briefcase-python 2.4.1

Python bindings for Briefcase AI
Documentation
"""Tests for Python bindings - core data models."""

from __future__ import annotations

import briefcase_ai
import pytest


class TestInput:
    def test_input_creation(self):
        input_obj = briefcase_ai.Input("test_input", "hello world", "string")

        assert input_obj.name == "test_input"
        assert input_obj.value == "hello world"
        assert input_obj.data_type == "string"

    def test_input_with_json_value(self):
        json_data = {"key": "value", "number": 42}
        input_obj = briefcase_ai.Input("json_input", json_data, "object")

        assert input_obj.value == json_data

    def test_input_to_dict(self):
        input_obj = briefcase_ai.Input("test", "value", "string")
        obj = input_obj.to_dict()

        assert obj["name"] == "test"
        assert obj["value"] == "value"
        assert obj["data_type"] == "string"


class TestOutput:
    def test_output_creation(self):
        output_obj = briefcase_ai.Output("test_output", "result", "string")

        assert output_obj.name == "test_output"
        assert output_obj.value == "result"
        assert output_obj.data_type == "string"
        assert output_obj.confidence is None

    def test_output_with_confidence(self):
        output_obj = briefcase_ai.Output("test", "result", "string")
        output_obj.with_confidence(0.95)

        assert output_obj.confidence == 0.95

    def test_output_to_dict_with_confidence(self):
        output_obj = briefcase_ai.Output("test", "result", "string")
        output_obj.with_confidence(0.85)

        obj = output_obj.to_dict()
        assert obj["name"] == "test"
        assert obj["confidence"] == 0.85


class TestModelParameters:
    def test_model_parameters_creation(self):
        params = briefcase_ai.ModelParameters("gpt-4")

        assert params.model_name == "gpt-4"
        assert params.provider is None

    def test_model_parameters_with_provider_and_parameters(self):
        params = briefcase_ai.ModelParameters("claude-3")
        params.with_provider("anthropic")
        params.with_parameter("temperature", 0.7)
        params.with_parameter("max_tokens", 1000)

        assert params.provider == "anthropic"
        assert params.parameters["temperature"] == 0.7
        assert params.parameters["max_tokens"] == 1000


class TestExecutionContext:
    def test_execution_context_properties(self):
        ctx = briefcase_ai.ExecutionContext()
        ctx.with_runtime_version("python3.11")
        ctx.with_dependency("pydantic", "2.0.0")
        ctx.with_random_seed(1234)
        ctx.with_env_var("ENV", "test")

        assert ctx.runtime_version == "python3.11"
        assert ctx.dependencies["pydantic"] == "2.0.0"
        assert ctx.random_seed == 1234
        assert ctx.environment_variables["ENV"] == "test"


class TestDecisionSnapshot:
    def test_decision_snapshot_creation(self):
        snapshot = briefcase_ai.DecisionSnapshot("my_function")

        assert snapshot.function_name == "my_function"
        assert snapshot.module_name is None
        assert snapshot.execution_time_ms is None

    def test_decision_snapshot_with_inputs_outputs_model_and_tags(self):
        snapshot = briefcase_ai.DecisionSnapshot("classify")
        snapshot.with_module("nlp")

        input_obj = briefcase_ai.Input("text", "hello", "string")
        output_obj = briefcase_ai.Output("label", "greeting", "string")
        params = briefcase_ai.ModelParameters("gpt-4")
        params.with_parameter("temperature", 0.1)

        snapshot.add_input(input_obj)
        snapshot.add_output(output_obj)
        snapshot.with_model_parameters(params)
        snapshot.with_execution_time(12.5)
        snapshot.add_tag("environment", "test")

        assert snapshot.function_name == "classify"
        assert snapshot.module_name == "nlp"
        assert snapshot.execution_time_ms == 12.5
        assert len(snapshot.inputs) == 1
        assert len(snapshot.outputs) == 1
        assert snapshot.tags["environment"] == "test"


class TestSnapshot:
    def test_snapshot_creation(self):
        snapshot = briefcase_ai.Snapshot("session")

        assert snapshot.snapshot_type == "session"
        assert len(snapshot.decisions) == 0

    def test_snapshot_with_invalid_type(self):
        with pytest.raises(Exception):
            briefcase_ai.Snapshot("invalid_type")

    def test_snapshot_add_decision(self):
        snapshot = briefcase_ai.Snapshot("batch")
        decision = briefcase_ai.DecisionSnapshot("test_func")

        snapshot.add_decision(decision)
        assert len(snapshot.decisions) == 1


class TestSnapshotQuery:
    def test_snapshot_query_builder(self):
        query = briefcase_ai.SnapshotQuery()
        query.with_function_name("fn")
        query.with_module_name("module")
        query.with_limit(10)
        query.with_offset(0)
        query.with_tag("env", "test")

        assert "SnapshotQuery" in repr(query)


class TestIntegration:
    def test_complete_storage_workflow(self):
        if not briefcase_ai.is_initialized():
            briefcase_ai.init_with_config(2)

        decision = briefcase_ai.DecisionSnapshot("text_classification")
        decision.with_module("nlp_service")
        decision.add_input(briefcase_ai.Input("text", "This is great", "string"))
        decision.add_output(
            briefcase_ai.Output("sentiment", "positive", "string").with_confidence(0.92)
        )
        decision.with_execution_time(45.2)
        decision.add_tag("environment", "staging")

        session = briefcase_ai.Snapshot("session")
        session.add_decision(decision)

        storage = briefcase_ai.SqliteBackend.in_memory()
        decision_id = storage.save_decision(decision)
        loaded_decision = storage.load_decision(decision_id)

        snapshot_id = storage.save(session)
        loaded_snapshot = storage.load(snapshot_id)

        assert loaded_decision.function_name == "text_classification"
        assert loaded_snapshot.snapshot_type == "session"
        assert len(loaded_snapshot.decisions) == 1
        assert storage.health_check() is True


if __name__ == "__main__":
    pytest.main([__file__])