import ctypes
import os
import platform
import sys
from typing import Optional
from pathlib import Path
class GraphResult:
def __init__(self, ptr):
self._ptr = ptr
self._result = ptr.contents if ptr else None
def is_success(self) -> bool:
return self._result and not self._result.error_message
@property
def graph_data(self) -> Optional[str]:
if not self._result or not self._result.graph_data:
return None
return self._result.graph_data.decode('utf-8')
@property
def error_message(self) -> Optional[str]:
if not self._result or not self._result.error_message:
return None
return self._result.error_message.decode('utf-8')
@property
def tensor_count(self) -> int:
return self._result.tensor_count if self._result else 0
@property
def node_count(self) -> int:
return self._result.node_count if self._result else 0
def __str__(self) -> str:
if self.is_success():
return f"GraphResult(tensors={self.tensor_count}, nodes={self.node_count})"
else:
return f"GraphResult(error='{self.error_message}')"
class ExecutionResult:
def __init__(self, ptr):
self._ptr = ptr
self._result = ptr.contents if ptr else None
def is_success(self) -> bool:
return self._result and not self._result.error_message
@property
def output_data(self) -> Optional[str]:
if not self._result or not self._result.output_data:
return None
return self._result.output_data.decode('utf-8')
@property
def error_message(self) -> Optional[str]:
if not self._result or not self._result.error_message:
return None
return self._result.error_message.decode('utf-8')
@property
def execution_time_us(self) -> int:
return self._result.execution_time_us if self._result else 0
@property
def execution_time_ms(self) -> float:
return self.execution_time_us / 1000.0
def __str__(self) -> str:
if self.is_success():
return f"ExecutionResult(time={self.execution_time_ms:.3f}ms)"
else:
return f"ExecutionResult(error='{self.error_message}')"
class OptimizationResult:
def __init__(self, ptr):
self._ptr = ptr
self._result = ptr.contents if ptr else None
def is_success(self) -> bool:
return self._result and not self._result.error_message
@property
def graph_data(self) -> Optional[str]:
if not self._result or not self._result.graph_data:
return None
return self._result.graph_data.decode('utf-8')
@property
def error_message(self) -> Optional[str]:
if not self._result or not self._result.error_message:
return None
return self._result.error_message.decode('utf-8')
@property
def tensors_removed(self) -> int:
return self._result.tensors_removed if self._result else 0
@property
def nodes_removed(self) -> int:
return self._result.nodes_removed if self._result else 0
def __str__(self) -> str:
if self.is_success():
return f"OptimizationResult(tensors_removed={self.tensors_removed}, nodes_removed={self.nodes_removed})"
else:
return f"OptimizationResult(error='{self.error_message}')"
class BenchmarkResult:
def __init__(self, ptr):
self._ptr = ptr
self._result = ptr.contents if ptr else None
def is_success(self) -> bool:
return self._result and not self._result.error_message
@property
def error_message(self) -> Optional[str]:
if not self._result or not self._result.error_message:
return None
return self._result.error_message.decode('utf-8')
@property
def mean_us(self) -> float:
return self._result.mean_us if self._result else 0.0
@property
def mean_ms(self) -> float:
return self.mean_us / 1000.0
@property
def std_dev_us(self) -> float:
return self._result.std_dev_us if self._result else 0.0
@property
def min_us(self) -> int:
return self._result.min_us if self._result else 0
@property
def max_us(self) -> int:
return self._result.max_us if self._result else 0
@property
def iterations(self) -> int:
return self._result.iterations if self._result else 0
def __str__(self) -> str:
if self.is_success():
return f"BenchmarkResult(mean={self.mean_ms:.3f}ms±{self.std_dev_us/1000:.3f}ms, n={self.iterations})"
else:
return f"BenchmarkResult(error='{self.error_message}')"
class _TLGraphResult(ctypes.Structure):
_fields_ = [
("graph_data", ctypes.c_char_p),
("error_message", ctypes.c_char_p),
("tensor_count", ctypes.c_size_t),
("node_count", ctypes.c_size_t),
]
class _TLExecutionResult(ctypes.Structure):
_fields_ = [
("output_data", ctypes.c_char_p),
("error_message", ctypes.c_char_p),
("execution_time_us", ctypes.c_uint64),
]
class _TLOptimizationResult(ctypes.Structure):
_fields_ = [
("graph_data", ctypes.c_char_p),
("error_message", ctypes.c_char_p),
("tensors_removed", ctypes.c_size_t),
("nodes_removed", ctypes.c_size_t),
]
class _TLBenchmarkResult(ctypes.Structure):
_fields_ = [
("error_message", ctypes.c_char_p),
("mean_us", ctypes.c_double),
("std_dev_us", ctypes.c_double),
("min_us", ctypes.c_uint64),
("max_us", ctypes.c_uint64),
("iterations", ctypes.c_size_t),
]
def _find_library() -> Path:
if "TENSORLOGIC_LIB_PATH" in os.environ:
lib_path = Path(os.environ["TENSORLOGIC_LIB_PATH"])
if lib_path.exists():
return lib_path
system = platform.system()
if system == "Linux":
lib_name = "libtensorlogic_cli.so"
elif system == "Darwin":
lib_name = "libtensorlogic_cli.dylib"
elif system == "Windows":
lib_name = "tensorlogic_cli.dll"
else:
raise RuntimeError(f"Unsupported platform: {system}")
search_paths = [
Path(__file__).parent.parent.parent.parent / "target" / "release" / lib_name,
Path(f"/usr/local/lib/{lib_name}"),
Path(f"/usr/lib/{lib_name}"),
Path(lib_name),
]
for path in search_paths:
if path.exists():
return path
raise RuntimeError(
f"Could not find TensorLogic CLI library ({lib_name}). "
"Set TENSORLOGIC_LIB_PATH environment variable or build with: "
"cargo build --release -p tensorlogic-cli"
)
class TensorLogic:
def __init__(self, lib_path: Optional[Path] = None):
if lib_path is None:
lib_path = _find_library()
self._lib = ctypes.CDLL(str(lib_path))
self._setup_functions()
def _setup_functions(self):
self._lib.tl_compile_expr.argtypes = [ctypes.c_char_p]
self._lib.tl_compile_expr.restype = ctypes.POINTER(_TLGraphResult)
self._lib.tl_execute_graph.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
self._lib.tl_execute_graph.restype = ctypes.POINTER(_TLExecutionResult)
self._lib.tl_optimize_graph.argtypes = [ctypes.c_char_p, ctypes.c_int32]
self._lib.tl_optimize_graph.restype = ctypes.POINTER(_TLOptimizationResult)
self._lib.tl_benchmark_compilation.argtypes = [ctypes.c_char_p, ctypes.c_size_t]
self._lib.tl_benchmark_compilation.restype = ctypes.POINTER(_TLBenchmarkResult)
self._lib.tl_free_string.argtypes = [ctypes.c_char_p]
self._lib.tl_free_string.restype = None
self._lib.tl_free_graph_result.argtypes = [ctypes.POINTER(_TLGraphResult)]
self._lib.tl_free_graph_result.restype = None
self._lib.tl_free_execution_result.argtypes = [ctypes.POINTER(_TLExecutionResult)]
self._lib.tl_free_execution_result.restype = None
self._lib.tl_free_optimization_result.argtypes = [ctypes.POINTER(_TLOptimizationResult)]
self._lib.tl_free_optimization_result.restype = None
self._lib.tl_free_benchmark_result.argtypes = [ctypes.POINTER(_TLBenchmarkResult)]
self._lib.tl_free_benchmark_result.restype = None
self._lib.tl_version.argtypes = []
self._lib.tl_version.restype = ctypes.c_char_p
self._lib.tl_is_backend_available.argtypes = [ctypes.c_char_p]
self._lib.tl_is_backend_available.restype = ctypes.c_int32
def compile(self, expr: str) -> GraphResult:
ptr = self._lib.tl_compile_expr(expr.encode('utf-8'))
result = GraphResult(ptr)
return result
def execute(self, graph_json: str, backend: str = "cpu") -> ExecutionResult:
ptr = self._lib.tl_execute_graph(
graph_json.encode('utf-8'),
backend.encode('utf-8')
)
result = ExecutionResult(ptr)
return result
def optimize(self, graph_json: str, level: int = 2) -> OptimizationResult:
ptr = self._lib.tl_optimize_graph(graph_json.encode('utf-8'), level)
result = OptimizationResult(ptr)
return result
def benchmark(self, expr: str, iterations: int = 100) -> BenchmarkResult:
ptr = self._lib.tl_benchmark_compilation(expr.encode('utf-8'), iterations)
result = BenchmarkResult(ptr)
return result
def version(self) -> str:
version_ptr = self._lib.tl_version()
version = version_ptr.decode('utf-8')
self._lib.tl_free_string(version_ptr)
return version
def is_backend_available(self, backend: str) -> bool:
return self._lib.tl_is_backend_available(backend.encode('utf-8')) == 1
if __name__ == "__main__":
tl = TensorLogic()
print(f"TensorLogic version: {tl.version()}")
print(f"CPU backend available: {tl.is_backend_available('cpu')}")
print()
print("Compiling expression: friend(alice, bob)")
result = tl.compile("friend(alice, bob)")
if result.is_success():
print(f"✓ Compiled successfully!")
print(f" Tensors: {result.tensor_count}, Nodes: {result.node_count}")
else:
print(f"✗ Compilation failed: {result.error_message}")
print("\nBenchmarking compilation (100 iterations)...")
bench = tl.benchmark("AND(pred1(x), pred2(x, y))", iterations=100)
if bench.is_success():
print(f"✓ Benchmark complete!")
print(f" Mean: {bench.mean_ms:.3f}ms ± {bench.std_dev_us/1000:.3f}ms")
print(f" Range: {bench.min_us/1000:.3f}ms - {bench.max_us/1000:.3f}ms")
else:
print(f"✗ Benchmark failed: {bench.error_message}")