import numpy as np
import pytest
import scirs2
class TestTensorCreation:
def test_create_tensor(self):
data = np.array([[1.0, 2.0], [3.0, 4.0]])
tensor = scirs2.Tensor(data, requires_grad=True)
assert tensor.shape == (2, 2)
assert tensor.requires_grad is True
def test_tensor_from_scalar(self):
tensor = scirs2.Tensor(5.0, requires_grad=True)
assert tensor.shape == () or tensor.shape == (1,)
def test_tensor_no_grad(self):
data = np.array([1.0, 2.0, 3.0])
tensor = scirs2.Tensor(data, requires_grad=False)
assert tensor.requires_grad is False
class TestForwardPass:
def test_addition(self):
a = scirs2.Tensor(np.array([1.0, 2.0, 3.0]), requires_grad=True)
b = scirs2.Tensor(np.array([4.0, 5.0, 6.0]), requires_grad=True)
c = a + b
expected = np.array([5.0, 7.0, 9.0])
assert np.allclose(c.data, expected)
def test_subtraction(self):
a = scirs2.Tensor(np.array([5.0, 6.0, 7.0]), requires_grad=True)
b = scirs2.Tensor(np.array([1.0, 2.0, 3.0]), requires_grad=True)
c = a - b
expected = np.array([4.0, 4.0, 4.0])
assert np.allclose(c.data, expected)
def test_multiplication(self):
a = scirs2.Tensor(np.array([2.0, 3.0, 4.0]), requires_grad=True)
b = scirs2.Tensor(np.array([5.0, 6.0, 7.0]), requires_grad=True)
c = a * b
expected = np.array([10.0, 18.0, 28.0])
assert np.allclose(c.data, expected)
def test_division(self):
a = scirs2.Tensor(np.array([10.0, 20.0, 30.0]), requires_grad=True)
b = scirs2.Tensor(np.array([2.0, 4.0, 5.0]), requires_grad=True)
c = a / b
expected = np.array([5.0, 5.0, 6.0])
assert np.allclose(c.data, expected)
def test_matrix_multiplication(self):
a = scirs2.Tensor(np.array([[1.0, 2.0], [3.0, 4.0]]), requires_grad=True)
b = scirs2.Tensor(np.array([[5.0, 6.0], [7.0, 8.0]]), requires_grad=True)
c = a @ b
expected = np.array([[19.0, 22.0], [43.0, 50.0]])
assert np.allclose(c.data, expected)
def test_power(self):
a = scirs2.Tensor(np.array([2.0, 3.0, 4.0]), requires_grad=True)
c = a ** 2
expected = np.array([4.0, 9.0, 16.0])
assert np.allclose(c.data, expected)
def test_exponential(self):
a = scirs2.Tensor(np.array([0.0, 1.0, 2.0]), requires_grad=True)
c = a.exp()
expected = np.array([1.0, np.e, np.e**2])
assert np.allclose(c.data, expected, atol=1e-5)
def test_logarithm(self):
a = scirs2.Tensor(np.array([1.0, np.e, np.e**2]), requires_grad=True)
c = a.log()
expected = np.array([0.0, 1.0, 2.0])
assert np.allclose(c.data, expected, atol=1e-5)
def test_sum(self):
a = scirs2.Tensor(np.array([[1.0, 2.0], [3.0, 4.0]]), requires_grad=True)
c = a.sum()
assert np.allclose(c.data, 10.0)
def test_mean(self):
a = scirs2.Tensor(np.array([1.0, 2.0, 3.0, 4.0]), requires_grad=True)
c = a.mean()
assert np.allclose(c.data, 2.5)
class TestBackwardPass:
def test_simple_gradient(self):
x = scirs2.Tensor(np.array([2.0]), requires_grad=True)
y = x ** 2
y.backward()
assert np.allclose(x.grad, [4.0])
def test_addition_gradient(self):
a = scirs2.Tensor(np.array([1.0, 2.0]), requires_grad=True)
b = scirs2.Tensor(np.array([3.0, 4.0]), requires_grad=True)
c = a + b
c.backward(np.ones_like(c.data))
assert np.allclose(a.grad, [1.0, 1.0])
assert np.allclose(b.grad, [1.0, 1.0])
def test_multiplication_gradient(self):
a = scirs2.Tensor(np.array([2.0]), requires_grad=True)
b = scirs2.Tensor(np.array([3.0]), requires_grad=True)
c = a * b
c.backward()
assert np.allclose(a.grad, [3.0])
assert np.allclose(b.grad, [2.0])
def test_chain_rule(self):
x = scirs2.Tensor(np.array([2.0]), requires_grad=True)
y = x ** 2 + 3 * x
y.backward()
assert np.allclose(x.grad, [7.0], atol=1e-5)
def test_matrix_multiplication_gradient(self):
a = scirs2.Tensor(np.array([[1.0, 2.0]]), requires_grad=True)
b = scirs2.Tensor(np.array([[3.0], [4.0]]), requires_grad=True)
c = a @ b
c.backward()
assert a.grad.shape == (1, 2)
assert b.grad.shape == (2, 1)
def test_exp_gradient(self):
x = scirs2.Tensor(np.array([0.0]), requires_grad=True)
y = x.exp()
y.backward()
assert np.allclose(x.grad, [1.0], atol=1e-5)
def test_log_gradient(self):
x = scirs2.Tensor(np.array([1.0]), requires_grad=True)
y = x.log()
y.backward()
assert np.allclose(x.grad, [1.0], atol=1e-5)
class TestGradientChecking:
def test_numerical_gradient_simple(self):
x = scirs2.Tensor(np.array([3.0]), requires_grad=True)
y = x ** 2
y.backward()
eps = 1e-5
x_plus = (3.0 + eps) ** 2
x_minus = (3.0 - eps) ** 2
numerical_grad = (x_plus - x_minus) / (2 * eps)
assert np.allclose(x.grad, [numerical_grad], atol=1e-4)
def test_numerical_gradient_complex(self):
x = scirs2.Tensor(np.array([2.0]), requires_grad=True)
y = x ** 3 + 2 * (x ** 2) + 3 * x
y.backward()
assert np.allclose(x.grad, [23.0], atol=1e-4)
class TestActivationFunctions:
def test_relu_forward(self):
x = scirs2.Tensor(np.array([-1.0, 0.0, 1.0, 2.0]), requires_grad=True)
y = x.relu()
expected = np.array([0.0, 0.0, 1.0, 2.0])
assert np.allclose(y.data, expected)
def test_relu_gradient(self):
x = scirs2.Tensor(np.array([-1.0, 0.0, 1.0, 2.0]), requires_grad=True)
y = x.relu()
y.backward(np.ones_like(y.data))
expected_grad = np.array([0.0, 0.0, 1.0, 1.0])
assert np.allclose(x.grad, expected_grad)
def test_sigmoid_forward(self):
x = scirs2.Tensor(np.array([0.0]), requires_grad=True)
y = x.sigmoid()
assert np.allclose(y.data, [0.5])
def test_sigmoid_gradient(self):
x = scirs2.Tensor(np.array([0.0]), requires_grad=True)
y = x.sigmoid()
y.backward()
assert np.allclose(x.grad, [0.25], atol=1e-5)
def test_tanh_forward(self):
x = scirs2.Tensor(np.array([0.0]), requires_grad=True)
y = x.tanh()
assert np.allclose(y.data, [0.0])
def test_tanh_gradient(self):
x = scirs2.Tensor(np.array([0.0]), requires_grad=True)
y = x.tanh()
y.backward()
assert np.allclose(x.grad, [1.0], atol=1e-5)
class TestComputationGraph:
def test_graph_multiple_uses(self):
x = scirs2.Tensor(np.array([2.0]), requires_grad=True)
y = x ** 2 + x ** 3
y.backward()
assert np.allclose(x.grad, [16.0], atol=1e-4)
def test_detach(self):
x = scirs2.Tensor(np.array([3.0]), requires_grad=True)
y = x ** 2
z = y.detach()
assert z.requires_grad is False
def test_no_grad_context(self):
x = scirs2.Tensor(np.array([2.0]), requires_grad=True)
with scirs2.no_grad():
y = x ** 2
assert y.requires_grad is False
class TestHigherOrderGradients:
def test_second_derivative(self):
x = scirs2.Tensor(np.array([2.0]), requires_grad=True)
y = x ** 3
y.backward(create_graph=True)
first_grad = x.grad
first_grad.backward()
assert np.allclose(first_grad.data, [12.0], atol=1e-4)
class TestEdgeCases:
def test_zero_gradient(self):
x = scirs2.Tensor(np.array([5.0]), requires_grad=True)
c = scirs2.Tensor(np.array([10.0]), requires_grad=False)
y = c
def test_large_computation_graph(self):
x = scirs2.Tensor(np.array([1.0]), requires_grad=True)
y = x
for i in range(100):
y = y + 1
y.backward()
assert np.allclose(x.grad, [1.0])
def test_in_place_operations(self):
x = scirs2.Tensor(np.array([1.0, 2.0, 3.0]), requires_grad=True)
try:
x.data += 1
y = x.sum()
y.backward()
except Exception:
pass
if __name__ == "__main__":
pytest.main([__file__, "-v"])