import numpy as np
import pytest
import scirs2
class TestClassificationMetrics:
def test_accuracy_perfect(self):
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([0, 1, 2, 0, 1, 2])
accuracy = scirs2.accuracy_score_py(y_true, y_pred)
assert accuracy == 1.0
def test_accuracy_imperfect(self):
y_true = np.array([0, 1, 1, 0, 1, 0])
y_pred = np.array([0, 1, 0, 0, 1, 1])
accuracy = scirs2.accuracy_score_py(y_true, y_pred)
assert np.allclose(accuracy, 4.0 / 6.0)
def test_precision_binary(self):
y_true = np.array([0, 1, 1, 0, 1, 0])
y_pred = np.array([0, 1, 1, 0, 0, 1])
precision = scirs2.precision_score_py(y_true, y_pred)
assert 0.6 <= precision <= 0.7
def test_recall_binary(self):
y_true = np.array([0, 1, 1, 0, 1, 0])
y_pred = np.array([0, 1, 1, 0, 0, 1])
recall = scirs2.recall_score_py(y_true, y_pred)
assert 0.6 <= recall <= 0.7
def test_f1_score_binary(self):
y_true = np.array([0, 1, 1, 0, 1, 0])
y_pred = np.array([0, 1, 1, 0, 1, 0])
f1 = scirs2.f1_score_py(y_true, y_pred)
assert np.allclose(f1, 1.0)
def test_f1_score_imperfect(self):
y_true = np.array([1, 1, 1, 0, 0, 0])
y_pred = np.array([1, 1, 0, 1, 0, 0])
f1 = scirs2.f1_score_py(y_true, y_pred)
assert 0 < f1 < 1
def test_precision_multiclass(self):
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([0, 2, 2, 0, 1, 2])
precision = scirs2.precision_score_py(y_true, y_pred, average="macro")
assert 0 <= precision <= 1
def test_recall_multiclass(self):
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([0, 2, 2, 0, 1, 2])
recall = scirs2.recall_score_py(y_true, y_pred, average="macro")
assert 0 <= recall <= 1
def test_f1_score_multiclass(self):
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([0, 1, 2, 0, 1, 2])
f1 = scirs2.f1_score_py(y_true, y_pred, average="macro")
assert np.allclose(f1, 1.0)
class TestConfusionMatrix:
def test_confusion_matrix_binary(self):
y_true = np.array([0, 1, 0, 1])
y_pred = np.array([0, 1, 1, 1])
cm = scirs2.confusion_matrix_py(y_true, y_pred)
assert cm.shape == (2, 2)
assert cm[0, 0] == 1 assert cm[1, 1] == 2
def test_confusion_matrix_multiclass(self):
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([0, 2, 2, 0, 1, 1])
cm = scirs2.confusion_matrix_py(y_true, y_pred)
assert cm.shape == (3, 3)
def test_classification_report(self):
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([0, 1, 2, 0, 1, 2])
report = scirs2.classification_report_py(y_true, y_pred)
assert "precision" in report
assert "recall" in report
assert "f1_score" in report
class TestROCAndAUC:
def test_roc_curve_basic(self):
y_true = np.array([0, 0, 1, 1])
y_score = np.array([0.1, 0.4, 0.35, 0.8])
result = scirs2.roc_curve_py(y_true, y_score)
assert "fpr" in result
assert "tpr" in result
assert "thresholds" in result
def test_roc_auc_score_perfect(self):
y_true = np.array([0, 0, 1, 1])
y_score = np.array([0.1, 0.2, 0.8, 0.9])
auc = scirs2.roc_auc_score_py(y_true, y_score)
assert np.allclose(auc, 1.0)
def test_roc_auc_score_random(self):
np.random.seed(42)
y_true = np.random.randint(0, 2, size=100)
y_score = np.random.rand(100)
auc = scirs2.roc_auc_score_py(y_true, y_score)
assert 0.3 <= auc <= 0.7
def test_pr_curve(self):
y_true = np.array([0, 0, 1, 1])
y_score = np.array([0.1, 0.4, 0.35, 0.8])
result = scirs2.pr_curve_py(y_true, y_score)
assert "precision" in result
assert "recall" in result
assert "thresholds" in result
def test_average_precision_score(self):
y_true = np.array([0, 0, 1, 1])
y_score = np.array([0.1, 0.4, 0.35, 0.8])
ap = scirs2.average_precision_score_py(y_true, y_score)
assert 0 <= ap <= 1
class TestRegressionMetrics:
def test_mean_squared_error(self):
y_true = np.array([1.0, 2.0, 3.0, 4.0])
y_pred = np.array([1.1, 2.1, 2.9, 4.2])
mse = scirs2.mean_squared_error_py(y_true, y_pred)
assert mse < 0.1
def test_mean_absolute_error(self):
y_true = np.array([1.0, 2.0, 3.0, 4.0])
y_pred = np.array([1.5, 2.5, 2.5, 4.5])
mae = scirs2.mean_absolute_error_py(y_true, y_pred)
assert np.allclose(mae, 0.5)
def test_root_mean_squared_error(self):
y_true = np.array([0.0, 0.0, 0.0, 0.0])
y_pred = np.array([1.0, 1.0, 1.0, 1.0])
rmse = scirs2.root_mean_squared_error_py(y_true, y_pred)
assert np.allclose(rmse, 1.0)
def test_r2_score_perfect(self):
y_true = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
y_pred = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
r2 = scirs2.r2_score_py(y_true, y_pred)
assert np.allclose(r2, 1.0)
def test_r2_score_mean_prediction(self):
y_true = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
y_pred = np.array([3.0, 3.0, 3.0, 3.0, 3.0])
r2 = scirs2.r2_score_py(y_true, y_pred)
assert np.allclose(r2, 0.0, atol=1e-5)
def test_mean_absolute_percentage_error(self):
y_true = np.array([100.0, 200.0, 300.0])
y_pred = np.array([110.0, 190.0, 310.0])
mape = scirs2.mean_absolute_percentage_error_py(y_true, y_pred)
assert 0 <= mape <= 10
def test_explained_variance_score(self):
y_true = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
y_pred = np.array([1.1, 2.1, 2.9, 4.1, 5.1])
evs = scirs2.explained_variance_score_py(y_true, y_pred)
assert 0.9 <= evs <= 1.0
class TestClusteringMetrics:
def test_silhouette_score(self):
X = np.array([[1, 1], [1, 2], [5, 5], [5, 6]])
labels = np.array([0, 0, 1, 1])
score = scirs2.silhouette_score_py(X, labels)
assert 0.5 <= score <= 1.0
def test_calinski_harabasz_score(self):
X = np.array([[1, 1], [1, 2], [5, 5], [5, 6]])
labels = np.array([0, 0, 1, 1])
score = scirs2.calinski_harabasz_score_py(X, labels)
assert score > 0
def test_davies_bouldin_score(self):
X = np.array([[1, 1], [1, 2], [5, 5], [5, 6]])
labels = np.array([0, 0, 1, 1])
score = scirs2.davies_bouldin_score_py(X, labels)
assert score >= 0
assert score < 2.0
def test_adjusted_rand_score(self):
labels_true = np.array([0, 0, 1, 1])
labels_pred = np.array([0, 0, 1, 1])
ari = scirs2.adjusted_rand_score_py(labels_true, labels_pred)
assert np.allclose(ari, 1.0)
def test_adjusted_mutual_info_score(self):
labels_true = np.array([0, 0, 1, 1, 2, 2])
labels_pred = np.array([0, 0, 1, 1, 2, 2])
ami = scirs2.adjusted_mutual_info_score_py(labels_true, labels_pred)
assert 0.9 <= ami <= 1.0
class TestRankingMetrics:
def test_ndcg_score(self):
y_true = np.array([[1.0, 0.0, 0.0, 1.0, 0.0]])
y_score = np.array([[0.9, 0.1, 0.2, 0.8, 0.3]])
ndcg = scirs2.ndcg_score_py(y_true, y_score)
assert 0 <= ndcg <= 1
def test_mean_reciprocal_rank(self):
y_true = np.array([[0, 1, 0, 0, 0]])
y_score = np.array([[0.1, 0.9, 0.3, 0.2, 0.4]])
mrr = scirs2.mean_reciprocal_rank_py(y_true, y_score)
assert np.allclose(mrr, 1.0)
class TestMultilabelMetrics:
def test_hamming_loss(self):
y_true = np.array([[1, 0, 1], [0, 1, 0]])
y_pred = np.array([[1, 0, 1], [0, 1, 1]])
loss = scirs2.hamming_loss_py(y_true, y_pred)
assert np.allclose(loss, 1.0 / 6.0)
def test_jaccard_score(self):
y_true = np.array([[1, 0, 1], [0, 1, 0]])
y_pred = np.array([[1, 0, 1], [0, 1, 0]])
jaccard = scirs2.jaccard_score_py(y_true, y_pred)
assert np.allclose(jaccard, 1.0)
class TestDistanceMetrics:
def test_euclidean_distance(self):
a = np.array([0.0, 0.0])
b = np.array([3.0, 4.0])
distance = scirs2.euclidean_distance_py(a, b)
assert np.allclose(distance, 5.0)
def test_manhattan_distance(self):
a = np.array([0.0, 0.0])
b = np.array([3.0, 4.0])
distance = scirs2.manhattan_distance_py(a, b)
assert np.allclose(distance, 7.0)
def test_cosine_similarity(self):
a = np.array([1.0, 0.0])
b = np.array([1.0, 0.0])
similarity = scirs2.cosine_similarity_py(a, b)
assert np.allclose(similarity, 1.0)
def test_cosine_distance(self):
a = np.array([1.0, 0.0])
b = np.array([0.0, 1.0])
distance = scirs2.cosine_distance_py(a, b)
assert np.allclose(distance, 1.0)
class TestEdgeCases:
def test_empty_predictions(self):
y_true = np.array([])
y_pred = np.array([])
try:
accuracy = scirs2.accuracy_score_py(y_true, y_pred)
assert np.isnan(accuracy) or accuracy == 0
except Exception:
pass
def test_single_class(self):
y_true = np.array([1, 1, 1, 1])
y_pred = np.array([1, 1, 1, 1])
accuracy = scirs2.accuracy_score_py(y_true, y_pred)
assert accuracy == 1.0
def test_all_zeros(self):
y_true = np.array([0.0, 0.0, 0.0])
y_pred = np.array([0.0, 0.0, 0.0])
mse = scirs2.mean_squared_error_py(y_true, y_pred)
assert mse == 0.0
if __name__ == "__main__":
pytest.main([__file__, "-v"])