#![allow(clippy::useless_conversion)]
use parking_lot::Mutex;
use pyo3::exceptions::PyValueError;
use pyo3::prelude::*;
use pyo3::types::{PyBytes, PyBytesMethods, PyDict, PyList};
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
use crate::{
config::TreeConfig,
git::{
types::{DiffOperation, StorageBackend},
versioned_store::{
FileVersionedKvStore, GitNamespacedKvStore, GitVersionedKvStore, HistoricalAccess,
HistoricalCommitAccess, InMemoryVersionedKvStore, ThreadSafeGitVersionedKvStore,
},
},
proof::Proof,
storage::{FileNodeStorage, InMemoryNodeStorage},
tree::{ProllyTree, Tree},
};
#[cfg(feature = "rocksdb_storage")]
use crate::git::versioned_store::RocksDBVersionedKvStore;
#[cfg(feature = "sql")]
use crate::sql::ProllyStorage;
#[cfg(feature = "sql")]
use gluesql_core::{data::Value as SqlValue, executor::Payload, prelude::Glue};
const MAX_KEYS_LIMIT: usize = 1024;
#[pyclass(name = "TreeConfig")]
struct PyTreeConfig {
base: u64,
modulus: u64,
min_chunk_size: usize,
max_chunk_size: usize,
pattern: u64,
}
#[pymethods]
impl PyTreeConfig {
#[new]
#[pyo3(signature = (base=4, modulus=64, min_chunk_size=1, max_chunk_size=4096, pattern=0))]
fn new(
base: u64,
modulus: u64,
min_chunk_size: usize,
max_chunk_size: usize,
pattern: u64,
) -> Self {
PyTreeConfig {
base,
modulus,
min_chunk_size,
max_chunk_size,
pattern,
}
}
}
enum ProllyTreeWrapper {
Memory(ProllyTree<32, InMemoryNodeStorage<32>>),
File(ProllyTree<32, FileNodeStorage<32>>),
}
macro_rules! with_tree {
($self:expr, $tree:ident, $body:expr) => {
match &*$self {
ProllyTreeWrapper::Memory($tree) => $body,
ProllyTreeWrapper::File($tree) => $body,
}
};
}
macro_rules! with_tree_mut {
($self:expr, $tree:ident, $body:expr) => {
match &mut *$self {
ProllyTreeWrapper::Memory($tree) => $body,
ProllyTreeWrapper::File($tree) => $body,
}
};
}
#[pyclass(name = "ProllyTree")]
struct PyProllyTree {
tree: Arc<Mutex<ProllyTreeWrapper>>,
}
#[pymethods]
impl PyProllyTree {
#[new]
#[pyo3(signature = (storage_type="memory", path=None, config=None))]
fn new(
storage_type: &str,
path: Option<String>,
config: Option<&PyTreeConfig>,
) -> PyResult<Self> {
let tree_config = if let Some(py_config) = config {
TreeConfig::<32> {
base: py_config.base,
modulus: py_config.modulus,
min_chunk_size: py_config.min_chunk_size,
max_chunk_size: py_config.max_chunk_size,
pattern: py_config.pattern,
root_hash: None,
key_schema: None,
value_schema: None,
encode_types: vec![],
}
} else {
TreeConfig::<32>::default()
};
let tree_wrapper = match storage_type {
"memory" => {
let storage = InMemoryNodeStorage::<32>::new();
let tree = ProllyTree::<32, _>::new(storage, tree_config);
ProllyTreeWrapper::Memory(tree)
}
"file" => {
let path =
path.ok_or_else(|| PyValueError::new_err("File storage requires a path"))?;
let storage = FileNodeStorage::<32>::new(PathBuf::from(path)).map_err(|e| {
PyValueError::new_err(format!("Failed to create file storage: {e}"))
})?;
let tree = ProllyTree::<32, _>::new(storage, tree_config);
ProllyTreeWrapper::File(tree)
}
_ => {
return Err(PyValueError::new_err(
"Invalid storage type. Use 'memory' or 'file'",
))
}
};
Ok(PyProllyTree {
tree: Arc::new(Mutex::new(tree_wrapper)),
})
}
fn insert(
&mut self,
py: Python,
key: &Bound<'_, PyBytes>,
value: &Bound<'_, PyBytes>,
) -> PyResult<()> {
let key_vec = key.as_bytes().to_vec();
let value_vec = value.as_bytes().to_vec();
py.allow_threads(|| {
let mut tree_wrapper = self.tree.lock();
with_tree_mut!(tree_wrapper, tree, {
tree.insert(key_vec, value_vec);
Ok(())
})
})
}
fn insert_batch(&mut self, py: Python, items: Vec<(Vec<u8>, Vec<u8>)>) -> PyResult<()> {
let keys: Vec<Vec<u8>> = items.iter().map(|(k, _)| k.clone()).collect();
let values: Vec<Vec<u8>> = items.iter().map(|(_, v)| v.clone()).collect();
py.allow_threads(|| {
let mut tree_wrapper = self.tree.lock();
with_tree_mut!(tree_wrapper, tree, {
tree.insert_batch(&keys, &values);
Ok(())
})
})
}
fn find(&self, py: Python, key: &Bound<'_, PyBytes>) -> PyResult<Option<Py<PyBytes>>> {
let key_vec = key.as_bytes().to_vec();
let result = py.allow_threads(|| {
let tree_wrapper = self.tree.lock();
with_tree!(tree_wrapper, tree, { tree.find(&key_vec) })
});
match result {
Some(node) => {
if let Some(key_index) = node.keys.iter().position(|k| k == &key_vec) {
if key_index < node.values.len() {
Ok(Some(PyBytes::new_bound(py, &node.values[key_index]).into()))
} else {
Ok(None)
}
} else {
Ok(None)
}
}
None => Ok(None),
}
}
fn update(
&mut self,
py: Python,
key: &Bound<'_, PyBytes>,
value: &Bound<'_, PyBytes>,
) -> PyResult<()> {
let key_vec = key.as_bytes().to_vec();
let value_vec = value.as_bytes().to_vec();
py.allow_threads(|| {
let mut tree_wrapper = self.tree.lock();
with_tree_mut!(tree_wrapper, tree, {
tree.update(key_vec, value_vec);
Ok(())
})
})
}
fn delete(&mut self, py: Python, key: &Bound<'_, PyBytes>) -> PyResult<()> {
let key_vec = key.as_bytes().to_vec();
py.allow_threads(|| {
let mut tree_wrapper = self.tree.lock();
with_tree_mut!(tree_wrapper, tree, {
tree.delete(&key_vec);
Ok(())
})
})
}
fn delete_batch(&mut self, py: Python, keys: Vec<Vec<u8>>) -> PyResult<()> {
let key_vecs: Vec<Vec<u8>> = keys;
py.allow_threads(|| {
let mut tree_wrapper = self.tree.lock();
with_tree_mut!(tree_wrapper, tree, {
tree.delete_batch(&key_vecs);
Ok(())
})
})
}
fn size(&self) -> PyResult<usize> {
let tree_wrapper = self.tree.lock();
Ok(with_tree!(tree_wrapper, tree, tree.size()))
}
fn depth(&self) -> PyResult<usize> {
let tree_wrapper = self.tree.lock();
Ok(with_tree!(tree_wrapper, tree, tree.depth()))
}
fn get_root_hash(&self, py: Python) -> PyResult<Py<PyBytes>> {
let tree_wrapper = self.tree.lock();
let hash_opt = with_tree!(tree_wrapper, tree, tree.get_root_hash());
match hash_opt {
Some(hash) => Ok(PyBytes::new_bound(py, hash.as_ref()).into()),
None => Ok(PyBytes::new_bound(py, &[0u8; 32]).into()),
}
}
fn stats(&self) -> PyResult<HashMap<String, usize>> {
let tree_wrapper = self.tree.lock();
let stats = with_tree!(tree_wrapper, tree, tree.stats());
let mut map = HashMap::new();
map.insert("num_nodes".to_string(), stats.num_nodes);
map.insert("num_leaves".to_string(), stats.num_leaves);
map.insert("num_internal_nodes".to_string(), stats.num_internal_nodes);
map.insert("avg_node_size".to_string(), stats.avg_node_size as usize);
map.insert(
"total_key_value_pairs".to_string(),
stats.total_key_value_pairs,
);
Ok(map)
}
fn generate_proof(&self, py: Python, key: &Bound<'_, PyBytes>) -> PyResult<Py<PyBytes>> {
let key_vec = key.as_bytes().to_vec();
let proof_bytes = py.allow_threads(|| {
let tree_wrapper = self.tree.lock();
let proof = with_tree!(tree_wrapper, tree, tree.generate_proof(&key_vec));
bincode::serialize(&proof)
.map_err(|e| PyValueError::new_err(format!("Proof serialization failed: {}", e)))
})?;
Ok(PyBytes::new_bound(py, &proof_bytes).into())
}
#[pyo3(signature = (proof_bytes, key, expected_value=None))]
fn verify_proof(
&self,
py: Python,
proof_bytes: &Bound<'_, PyBytes>,
key: &Bound<'_, PyBytes>,
expected_value: Option<&Bound<'_, PyBytes>>,
) -> PyResult<bool> {
let key_vec = key.as_bytes().to_vec();
let proof_vec = proof_bytes.as_bytes().to_vec();
let value_option = expected_value.map(|v| v.as_bytes().to_vec());
py.allow_threads(|| {
let proof: Proof<32> = bincode::deserialize(&proof_vec).map_err(|e| {
PyValueError::new_err(format!("Proof deserialization failed: {}", e))
})?;
let tree_wrapper = self.tree.lock();
Ok(with_tree!(
tree_wrapper,
tree,
tree.verify(proof, &key_vec, value_option.as_deref())
))
})
}
fn diff(&self, py: Python, _other: &PyProllyTree) -> PyResult<Py<PyDict>> {
let dict = PyDict::new_bound(py);
let added = PyDict::new_bound(py);
let removed = PyDict::new_bound(py);
let modified = PyDict::new_bound(py);
dict.set_item("added", added)?;
dict.set_item("removed", removed)?;
dict.set_item("modified", modified)?;
Ok(dict.into())
}
fn traverse(&self) -> PyResult<String> {
let tree_wrapper = self.tree.lock();
Ok(with_tree!(tree_wrapper, tree, tree.traverse()))
}
fn save_config(&self, py: Python) -> PyResult<()> {
py.allow_threads(|| {
let tree_wrapper = self.tree.lock();
with_tree!(tree_wrapper, tree, {
let _ = tree.save_config();
Ok(())
})
})
}
}
#[pyclass(name = "StorageBackend", eq, eq_int)]
#[derive(Clone, PartialEq)]
enum PyStorageBackend {
InMemory,
File,
Git,
RocksDB,
}
#[pymethods]
impl PyStorageBackend {
fn __str__(&self) -> &str {
match self {
PyStorageBackend::InMemory => "InMemory",
PyStorageBackend::File => "File",
PyStorageBackend::Git => "Git",
PyStorageBackend::RocksDB => "RocksDB",
}
}
}
impl From<StorageBackend> for PyStorageBackend {
fn from(backend: StorageBackend) -> Self {
match backend {
StorageBackend::InMemory => PyStorageBackend::InMemory,
StorageBackend::File => PyStorageBackend::File,
StorageBackend::Git => PyStorageBackend::Git,
#[cfg(feature = "rocksdb_storage")]
StorageBackend::RocksDB => PyStorageBackend::RocksDB,
}
}
}
enum VersionedKvStoreWrapper {
Git(GitVersionedKvStore<32>),
File(FileVersionedKvStore<32>),
InMemory(InMemoryVersionedKvStore<32>),
#[cfg(feature = "rocksdb_storage")]
RocksDB(RocksDBVersionedKvStore<32>),
}
macro_rules! with_versioned_store {
($self:expr, $store:ident, $body:expr) => {
match &*$self {
VersionedKvStoreWrapper::Git($store) => $body,
VersionedKvStoreWrapper::File($store) => $body,
VersionedKvStoreWrapper::InMemory($store) => $body,
#[cfg(feature = "rocksdb_storage")]
VersionedKvStoreWrapper::RocksDB($store) => $body,
}
};
}
macro_rules! with_versioned_store_mut {
($self:expr, $store:ident, $body:expr) => {
match &mut *$self {
VersionedKvStoreWrapper::Git($store) => $body,
VersionedKvStoreWrapper::File($store) => $body,
VersionedKvStoreWrapper::InMemory($store) => $body,
#[cfg(feature = "rocksdb_storage")]
VersionedKvStoreWrapper::RocksDB($store) => $body,
}
};
}
#[pyclass(name = "MergeConflict")]
#[derive(Clone)]
struct PyMergeConflict {
key: Vec<u8>,
base_value: Option<Vec<u8>>,
source_value: Option<Vec<u8>>,
destination_value: Option<Vec<u8>>,
}
#[pymethods]
impl PyMergeConflict {
#[getter]
fn key(&self, py: Python) -> PyResult<Py<PyBytes>> {
Ok(PyBytes::new_bound(py, &self.key).into())
}
#[getter]
fn base_value(&self, py: Python) -> PyResult<Option<Py<PyBytes>>> {
Ok(self
.base_value
.as_ref()
.map(|v| PyBytes::new_bound(py, v).into()))
}
#[getter]
fn source_value(&self, py: Python) -> PyResult<Option<Py<PyBytes>>> {
Ok(self
.source_value
.as_ref()
.map(|v| PyBytes::new_bound(py, v).into()))
}
#[getter]
fn destination_value(&self, py: Python) -> PyResult<Option<Py<PyBytes>>> {
Ok(self
.destination_value
.as_ref()
.map(|v| PyBytes::new_bound(py, v).into()))
}
fn __repr__(&self) -> String {
format!(
"MergeConflict(key={:?}, base={:?}, source={:?}, dest={:?})",
String::from_utf8_lossy(&self.key),
self.base_value
.as_ref()
.map(|v| String::from_utf8_lossy(v).to_string()),
self.source_value
.as_ref()
.map(|v| String::from_utf8_lossy(v).to_string()),
self.destination_value
.as_ref()
.map(|v| String::from_utf8_lossy(v).to_string()),
)
}
}
#[pyclass(name = "ConflictResolution", eq, eq_int)]
#[derive(Clone, PartialEq)]
enum PyConflictResolution {
IgnoreAll,
TakeSource,
TakeDestination,
}
#[pyclass(name = "DiffOperation")]
#[derive(Clone)]
struct PyDiffOperation {
operation_type: String,
value: Option<Vec<u8>>,
old_value: Option<Vec<u8>>,
new_value: Option<Vec<u8>>,
}
#[pymethods]
impl PyDiffOperation {
#[getter]
fn operation_type(&self) -> String {
self.operation_type.clone()
}
#[getter]
fn value(&self, py: Python) -> PyResult<Option<Py<PyBytes>>> {
Ok(self
.value
.as_ref()
.map(|v| PyBytes::new_bound(py, v).into()))
}
#[getter]
fn old_value(&self, py: Python) -> PyResult<Option<Py<PyBytes>>> {
Ok(self
.old_value
.as_ref()
.map(|v| PyBytes::new_bound(py, v).into()))
}
#[getter]
fn new_value(&self, py: Python) -> PyResult<Option<Py<PyBytes>>> {
Ok(self
.new_value
.as_ref()
.map(|v| PyBytes::new_bound(py, v).into()))
}
fn __repr__(&self) -> String {
match self.operation_type.as_str() {
"Added" => format!(
"DiffOperation.Added(value_size={})",
self.value.as_ref().map_or(0, |v| v.len())
),
"Removed" => format!(
"DiffOperation.Removed(value_size={})",
self.value.as_ref().map_or(0, |v| v.len())
),
"Modified" => format!(
"DiffOperation.Modified(old_size={}, new_size={})",
self.old_value.as_ref().map_or(0, |v| v.len()),
self.new_value.as_ref().map_or(0, |v| v.len())
),
_ => "DiffOperation.Unknown".to_string(),
}
}
}
#[pyclass(name = "KvDiff")]
#[derive(Clone)]
struct PyKvDiff {
key: Vec<u8>,
operation: PyDiffOperation,
}
#[pymethods]
impl PyKvDiff {
#[getter]
fn key(&self, py: Python) -> PyResult<Py<PyBytes>> {
Ok(PyBytes::new_bound(py, &self.key).into())
}
#[getter]
fn operation(&self) -> PyDiffOperation {
self.operation.clone()
}
fn __repr__(&self) -> String {
format!(
"KvDiff(key={:?}, operation={})",
String::from_utf8_lossy(&self.key),
self.operation.__repr__()
)
}
}
#[pyclass(name = "VersionedKvStore")]
struct PyVersionedKvStore {
inner: Arc<Mutex<VersionedKvStoreWrapper>>,
}
#[pymethods]
impl PyVersionedKvStore {
#[new]
#[pyo3(signature = (path, storage_backend=None))]
fn new(path: String, storage_backend: Option<PyStorageBackend>) -> PyResult<Self> {
let backend = storage_backend.unwrap_or(PyStorageBackend::Git);
let wrapper = match backend {
PyStorageBackend::Git => {
let store = GitVersionedKvStore::<32>::init(&path).map_err(|e| {
PyValueError::new_err(format!("Failed to initialize Git store: {}", e))
})?;
VersionedKvStoreWrapper::Git(store)
}
PyStorageBackend::File => {
let store = FileVersionedKvStore::<32>::init(&path).map_err(|e| {
PyValueError::new_err(format!("Failed to initialize File store: {}", e))
})?;
VersionedKvStoreWrapper::File(store)
}
PyStorageBackend::InMemory => {
let store = InMemoryVersionedKvStore::<32>::init(&path).map_err(|e| {
PyValueError::new_err(format!("Failed to initialize InMemory store: {}", e))
})?;
VersionedKvStoreWrapper::InMemory(store)
}
#[cfg(feature = "rocksdb_storage")]
PyStorageBackend::RocksDB => {
let store = RocksDBVersionedKvStore::<32>::init(&path).map_err(|e| {
PyValueError::new_err(format!("Failed to initialize RocksDB store: {}", e))
})?;
VersionedKvStoreWrapper::RocksDB(store)
}
#[cfg(not(feature = "rocksdb_storage"))]
PyStorageBackend::RocksDB => {
return Err(PyValueError::new_err(
"RocksDB storage backend requires 'rocksdb_storage' feature to be enabled",
));
}
};
Ok(PyVersionedKvStore {
inner: Arc::new(Mutex::new(wrapper)),
})
}
#[staticmethod]
#[pyo3(signature = (path, storage_backend=None))]
fn open(path: String, storage_backend: Option<PyStorageBackend>) -> PyResult<Self> {
let backend = storage_backend.unwrap_or(PyStorageBackend::Git);
let wrapper = match backend {
PyStorageBackend::Git => {
let store = GitVersionedKvStore::<32>::open(&path).map_err(|e| {
PyValueError::new_err(format!("Failed to open Git store: {}", e))
})?;
VersionedKvStoreWrapper::Git(store)
}
PyStorageBackend::File => {
let store = FileVersionedKvStore::<32>::open(&path).map_err(|e| {
PyValueError::new_err(format!("Failed to open File store: {}", e))
})?;
VersionedKvStoreWrapper::File(store)
}
PyStorageBackend::InMemory => {
let store = InMemoryVersionedKvStore::<32>::open(&path).map_err(|e| {
PyValueError::new_err(format!("Failed to open InMemory store: {}", e))
})?;
VersionedKvStoreWrapper::InMemory(store)
}
#[cfg(feature = "rocksdb_storage")]
PyStorageBackend::RocksDB => {
let store = RocksDBVersionedKvStore::<32>::open(&path).map_err(|e| {
PyValueError::new_err(format!("Failed to open RocksDB store: {}", e))
})?;
VersionedKvStoreWrapper::RocksDB(store)
}
#[cfg(not(feature = "rocksdb_storage"))]
PyStorageBackend::RocksDB => {
return Err(PyValueError::new_err(
"RocksDB storage backend requires 'rocksdb_storage' feature to be enabled",
));
}
};
Ok(PyVersionedKvStore {
inner: Arc::new(Mutex::new(wrapper)),
})
}
fn insert(&self, key: &Bound<'_, PyBytes>, value: &Bound<'_, PyBytes>) -> PyResult<()> {
let key_vec = key.as_bytes().to_vec();
let value_vec = value.as_bytes().to_vec();
let mut guard = self.inner.lock();
with_versioned_store_mut!(guard, store, {
store
.insert(key_vec, value_vec)
.map_err(|e| PyValueError::new_err(format!("Failed to insert: {}", e)))?;
Ok(())
})
}
fn get(&self, py: Python, key: &Bound<'_, PyBytes>) -> PyResult<Option<Py<PyBytes>>> {
let key_vec = key.as_bytes().to_vec();
let guard = self.inner.lock();
with_versioned_store!(guard, store, {
match store.get(&key_vec) {
Some(value) => Ok(Some(PyBytes::new_bound(py, &value).into())),
None => Ok(None),
}
})
}
fn update(&self, key: &Bound<'_, PyBytes>, value: &Bound<'_, PyBytes>) -> PyResult<bool> {
let key_vec = key.as_bytes().to_vec();
let value_vec = value.as_bytes().to_vec();
let mut guard = self.inner.lock();
with_versioned_store_mut!(guard, store, {
store
.update(key_vec, value_vec)
.map_err(|e| PyValueError::new_err(format!("Failed to update: {}", e)))
})
}
fn delete(&self, key: &Bound<'_, PyBytes>) -> PyResult<bool> {
let key_vec = key.as_bytes().to_vec();
let mut guard = self.inner.lock();
with_versioned_store_mut!(guard, store, {
store
.delete(&key_vec)
.map_err(|e| PyValueError::new_err(format!("Failed to delete: {}", e)))
})
}
fn list_keys(&self, py: Python) -> PyResult<Vec<Py<PyBytes>>> {
let guard = self.inner.lock();
with_versioned_store!(guard, store, {
let keys = store.list_keys();
let total_keys = keys.len();
if total_keys > MAX_KEYS_LIMIT {
eprintln!(
"Warning: Tree contains {} keys, but only returning first {} keys due to limit. \
Consider using more specific queries or implementing pagination.",
total_keys, MAX_KEYS_LIMIT
);
}
let py_keys: Vec<Py<PyBytes>> = keys
.iter()
.take(MAX_KEYS_LIMIT)
.map(|key| PyBytes::new_bound(py, key).into())
.collect();
Ok(py_keys)
})
}
fn status(&self, py: Python) -> PyResult<Vec<(Py<PyBytes>, String)>> {
let guard = self.inner.lock();
with_versioned_store!(guard, store, {
let status = store.status();
let py_status: Vec<(Py<PyBytes>, String)> = status
.iter()
.map(|(key, status_str)| (PyBytes::new_bound(py, key).into(), status_str.clone()))
.collect();
Ok(py_status)
})
}
fn commit(&self, message: String) -> PyResult<String> {
let mut guard = self.inner.lock();
with_versioned_store_mut!(guard, store, {
let commit_id = store
.commit(&message)
.map_err(|e| PyValueError::new_err(format!("Failed to commit: {}", e)))?;
Ok(commit_id.to_hex().to_string())
})
}
fn branch(&self, name: String) -> PyResult<()> {
let mut guard = self.inner.lock();
with_versioned_store_mut!(guard, store, {
store
.branch(&name)
.map_err(|e| PyValueError::new_err(format!("Failed to create branch: {}", e)))?;
Ok(())
})
}
fn create_branch(&self, name: String) -> PyResult<()> {
let mut guard = self.inner.lock();
with_versioned_store_mut!(guard, store, {
store.create_branch(&name).map_err(|e| {
PyValueError::new_err(format!("Failed to create and switch branch: {}", e))
})?;
Ok(())
})
}
fn checkout(&self, branch_or_commit: String) -> PyResult<()> {
let mut guard = self.inner.lock();
with_versioned_store_mut!(guard, store, {
store
.checkout_generic(&branch_or_commit)
.map_err(|e| PyValueError::new_err(format!("Failed to checkout: {}", e)))?;
Ok(())
})
}
fn current_branch(&self) -> PyResult<String> {
let guard = self.inner.lock();
with_versioned_store!(guard, store, { Ok(store.current_branch().to_string()) })
}
fn list_branches(&self) -> PyResult<Vec<String>> {
let guard = self.inner.lock();
with_versioned_store!(guard, store, {
store
.list_branches()
.map_err(|e| PyValueError::new_err(format!("Failed to list branches: {}", e)))
})
}
fn log(&self) -> PyResult<Vec<HashMap<String, Py<PyAny>>>> {
let commits_data: Vec<(String, String, String, String, i64)> = {
let guard = self.inner.lock();
with_versioned_store!(guard, store, {
let history = store
.log()
.map_err(|e| PyValueError::new_err(format!("Failed to get log: {}", e)))?;
let data: Vec<_> = history
.iter()
.map(|commit| {
(
commit.id.to_hex().to_string(),
commit.author.clone(),
commit.committer.clone(),
commit.message.clone(),
commit.timestamp,
)
})
.collect();
Ok::<_, PyErr>(data)
})?
};
Python::with_gil(|py| {
let results: PyResult<Vec<HashMap<String, Py<PyAny>>>> = commits_data
.into_iter()
.map(|(id, author, committer, message, timestamp)| {
let mut map = HashMap::new();
map.insert("id".to_string(), id.into_py(py));
map.insert("author".to_string(), author.into_py(py));
map.insert("committer".to_string(), committer.into_py(py));
map.insert("message".to_string(), message.into_py(py));
map.insert("timestamp".to_string(), timestamp.into_py(py));
Ok(map)
})
.collect();
results
})
}
fn get_commits_for_key(
&self,
key: &Bound<'_, PyBytes>,
) -> PyResult<Vec<HashMap<String, Py<PyAny>>>> {
let key_vec = key.as_bytes().to_vec();
let commits_data: Vec<(String, String, String, String, i64)> = {
let guard = self.inner.lock();
with_versioned_store!(guard, store, {
let commits = store.get_commits_for_key(&key_vec).map_err(|e| {
PyValueError::new_err(format!("Failed to get commits for key: {}", e))
})?;
let data: Vec<_> = commits
.iter()
.map(|commit| {
(
commit.id.to_hex().to_string(),
commit.author.clone(),
commit.committer.clone(),
commit.message.clone(),
commit.timestamp,
)
})
.collect();
Ok::<_, PyErr>(data)
})?
};
Python::with_gil(|py| {
let results: PyResult<Vec<HashMap<String, Py<PyAny>>>> = commits_data
.into_iter()
.map(|(id, author, committer, message, timestamp)| {
let mut map = HashMap::new();
map.insert("id".to_string(), id.into_py(py));
map.insert("author".to_string(), author.into_py(py));
map.insert("committer".to_string(), committer.into_py(py));
map.insert("message".to_string(), message.into_py(py));
map.insert("timestamp".to_string(), timestamp.into_py(py));
Ok(map)
})
.collect();
results
})
}
fn get_commit_history(&self) -> PyResult<Vec<HashMap<String, Py<PyAny>>>> {
let commits_data: Vec<(String, String, String, String, i64)> = {
let guard = self.inner.lock();
with_versioned_store!(guard, store, {
let commits = store.get_commit_history().map_err(|e| {
PyValueError::new_err(format!("Failed to get commit history: {}", e))
})?;
let data: Vec<_> = commits
.iter()
.map(|commit| {
(
commit.id.to_hex().to_string(),
commit.author.clone(),
commit.committer.clone(),
commit.message.clone(),
commit.timestamp,
)
})
.collect();
Ok::<_, PyErr>(data)
})?
};
Python::with_gil(|py| {
let results: PyResult<Vec<HashMap<String, Py<PyAny>>>> = commits_data
.into_iter()
.map(|(id, author, committer, message, timestamp)| {
let mut map = HashMap::new();
map.insert("id".to_string(), id.into_py(py));
map.insert("author".to_string(), author.into_py(py));
map.insert("committer".to_string(), committer.into_py(py));
map.insert("message".to_string(), message.into_py(py));
map.insert("timestamp".to_string(), timestamp.into_py(py));
Ok(map)
})
.collect();
results
})
}
#[pyo3(signature = (source_branch, conflict_resolution=None))]
fn merge(
&self,
source_branch: String,
conflict_resolution: Option<PyConflictResolution>,
) -> PyResult<String> {
let mut guard = self.inner.lock();
let resolution = conflict_resolution.unwrap_or(PyConflictResolution::IgnoreAll);
with_versioned_store_mut!(guard, store, {
let commit_id = match resolution {
PyConflictResolution::IgnoreAll => {
let resolver = crate::diff::IgnoreConflictsResolver;
store
.merge_generic(&source_branch, &resolver)
.map_err(|e| PyValueError::new_err(format!("Merge failed: {}", e)))?
}
PyConflictResolution::TakeSource => {
let resolver = crate::diff::TakeSourceResolver;
store
.merge_generic(&source_branch, &resolver)
.map_err(|e| PyValueError::new_err(format!("Merge failed: {}", e)))?
}
PyConflictResolution::TakeDestination => {
let resolver = crate::diff::TakeDestinationResolver;
store
.merge_generic(&source_branch, &resolver)
.map_err(|e| PyValueError::new_err(format!("Merge failed: {}", e)))?
}
};
Ok(commit_id.to_hex().to_string())
})
}
fn try_merge(&self, source_branch: String) -> PyResult<(bool, Vec<PyMergeConflict>)> {
let mut guard = self.inner.lock();
with_versioned_store_mut!(guard, store, {
match store.try_merge_generic(&source_branch) {
Ok(_commit_id) => {
Ok((true, Vec::new()))
}
Err(crate::git::types::GitKvError::MergeConflictError(conflicts)) => {
let py_conflicts: Vec<PyMergeConflict> = conflicts
.into_iter()
.map(|c| PyMergeConflict {
key: c.key,
base_value: c.base_value,
source_value: c.source_value,
destination_value: c.destination_value,
})
.collect();
Ok((false, py_conflicts))
}
Err(e) => Err(PyValueError::new_err(format!("Merge failed: {}", e))),
}
})
}
fn storage_backend(&self) -> PyResult<PyStorageBackend> {
let guard = self.inner.lock();
with_versioned_store!(guard, store, { Ok(store.storage_backend().clone().into()) })
}
fn generate_proof(&self, py: Python, key: &Bound<'_, PyBytes>) -> PyResult<Py<PyBytes>> {
let key_vec = key.as_bytes().to_vec();
let proof_bytes = py.allow_threads(|| {
let guard = self.inner.lock();
with_versioned_store!(guard, store, {
let proof = store.generate_proof(&key_vec);
bincode::serialize(&proof).map_err(|e| {
PyValueError::new_err(format!("Proof serialization failed: {}", e))
})
})
})?;
Ok(PyBytes::new_bound(py, &proof_bytes).into())
}
#[pyo3(signature = (proof_bytes, key, expected_value=None))]
fn verify_proof(
&self,
py: Python,
proof_bytes: &Bound<'_, PyBytes>,
key: &Bound<'_, PyBytes>,
expected_value: Option<&Bound<'_, PyBytes>>,
) -> PyResult<bool> {
let key_vec = key.as_bytes().to_vec();
let proof_vec = proof_bytes.as_bytes().to_vec();
let value_option = expected_value.map(|v| v.as_bytes().to_vec());
py.allow_threads(|| {
let proof: crate::proof::Proof<32> = bincode::deserialize(&proof_vec).map_err(|e| {
PyValueError::new_err(format!("Proof deserialization failed: {}", e))
})?;
let guard = self.inner.lock();
with_versioned_store!(guard, store, {
Ok(store.verify(proof, &key_vec, value_option.as_deref()))
})
})
}
fn get_keys_at_ref(
&self,
py: Python,
reference: String,
) -> PyResult<Vec<(Py<PyBytes>, Py<PyBytes>)>> {
let guard = self.inner.lock();
with_versioned_store!(guard, store, {
let keys_map = HistoricalAccess::get_keys_at_ref(store, &reference)
.map_err(|e| PyValueError::new_err(format!("Failed to get keys at ref: {}", e)))?;
let total_keys = keys_map.len();
if total_keys > MAX_KEYS_LIMIT {
eprintln!(
"Warning: Tree contains {} keys, but only returning first {} keys due to limit. \
Consider using more specific queries or implementing pagination.",
total_keys, MAX_KEYS_LIMIT
);
}
let py_pairs: Vec<(Py<PyBytes>, Py<PyBytes>)> = keys_map
.into_iter()
.take(MAX_KEYS_LIMIT)
.map(|(key, value): (Vec<u8>, Vec<u8>)| {
(
PyBytes::new_bound(py, &key).into(),
PyBytes::new_bound(py, &value).into(),
)
})
.collect();
Ok(py_pairs)
})
}
fn diff(&self, from_ref: String, to_ref: String) -> PyResult<Vec<PyKvDiff>> {
let guard = self.inner.lock();
with_versioned_store!(guard, store, {
let diffs = store
.diff(&from_ref, &to_ref)
.map_err(|e| PyValueError::new_err(format!("Failed to compute diff: {}", e)))?;
let py_diffs: Vec<PyKvDiff> = diffs
.into_iter()
.map(|diff| {
let operation = match diff.operation {
DiffOperation::Added(value) => PyDiffOperation {
operation_type: "Added".to_string(),
value: Some(value),
old_value: None,
new_value: None,
},
DiffOperation::Removed(value) => PyDiffOperation {
operation_type: "Removed".to_string(),
value: Some(value),
old_value: None,
new_value: None,
},
DiffOperation::Modified { old, new } => PyDiffOperation {
operation_type: "Modified".to_string(),
value: None,
old_value: Some(old),
new_value: Some(new),
},
};
PyKvDiff {
key: diff.key,
operation,
}
})
.collect();
Ok(py_diffs)
})
}
fn current_commit(&self) -> PyResult<String> {
let guard = self.inner.lock();
with_versioned_store!(guard, store, {
let commit_id = store.current_commit().map_err(|e| {
PyValueError::new_err(format!("Failed to get current commit: {}", e))
})?;
Ok(commit_id.to_hex().to_string())
})
}
}
#[cfg(feature = "git")]
#[pyclass(name = "WorktreeManager")]
struct PyWorktreeManager {
inner: Arc<Mutex<crate::git::worktree::WorktreeManager>>,
}
#[cfg(feature = "git")]
#[pymethods]
impl PyWorktreeManager {
#[new]
fn new(repo_path: String) -> PyResult<Self> {
let manager = crate::git::worktree::WorktreeManager::new(repo_path).map_err(|e| {
PyValueError::new_err(format!("Failed to create worktree manager: {}", e))
})?;
Ok(PyWorktreeManager {
inner: Arc::new(Mutex::new(manager)),
})
}
fn add_worktree(
&self,
path: String,
branch: String,
create_branch: bool,
) -> PyResult<HashMap<String, Py<PyAny>>> {
let mut manager = self.inner.lock();
let info = manager
.add_worktree(path, &branch, create_branch)
.map_err(|e| PyValueError::new_err(format!("Failed to add worktree: {}", e)))?;
Python::with_gil(|py| {
let mut map = HashMap::new();
map.insert("id".to_string(), info.id.into_py(py));
map.insert("path".to_string(), info.path.to_string_lossy().into_py(py));
map.insert("branch".to_string(), info.branch.into_py(py));
map.insert("is_linked".to_string(), info.is_linked.into_py(py));
Ok(map)
})
}
fn remove_worktree(&self, worktree_id: String) -> PyResult<()> {
let mut manager = self.inner.lock();
manager
.remove_worktree(&worktree_id)
.map_err(|e| PyValueError::new_err(format!("Failed to remove worktree: {}", e)))?;
Ok(())
}
fn lock_worktree(&self, worktree_id: String, reason: String) -> PyResult<()> {
let mut manager = self.inner.lock();
manager
.lock_worktree(&worktree_id, &reason)
.map_err(|e| PyValueError::new_err(format!("Failed to lock worktree: {}", e)))?;
Ok(())
}
fn unlock_worktree(&self, worktree_id: String) -> PyResult<()> {
let mut manager = self.inner.lock();
manager
.unlock_worktree(&worktree_id)
.map_err(|e| PyValueError::new_err(format!("Failed to unlock worktree: {}", e)))?;
Ok(())
}
fn list_worktrees(&self) -> PyResult<Vec<HashMap<String, Py<PyAny>>>> {
let manager = self.inner.lock();
let worktrees = manager.list_worktrees();
Python::with_gil(|py| {
let results: Vec<HashMap<String, Py<PyAny>>> = worktrees
.iter()
.map(|info| {
let mut map = HashMap::new();
map.insert("id".to_string(), info.id.clone().into_py(py));
map.insert("path".to_string(), info.path.to_string_lossy().into_py(py));
map.insert("branch".to_string(), info.branch.clone().into_py(py));
map.insert("is_linked".to_string(), info.is_linked.into_py(py));
map
})
.collect();
Ok(results)
})
}
fn is_locked(&self, worktree_id: String) -> PyResult<bool> {
let manager = self.inner.lock();
Ok(manager.is_locked(&worktree_id))
}
fn merge_to_main(&self, worktree_id: String, commit_message: String) -> PyResult<String> {
let mut manager = self.inner.lock();
manager
.merge_to_main(&worktree_id, &commit_message)
.map_err(|e| PyValueError::new_err(format!("Failed to merge to main: {}", e)))
}
fn merge_branch(
&self,
source_worktree_id: String,
target_branch: String,
commit_message: String,
) -> PyResult<String> {
let mut manager = self.inner.lock();
manager
.merge_branch(&source_worktree_id, &target_branch, &commit_message)
.map_err(|e| PyValueError::new_err(format!("Failed to merge branch: {}", e)))
}
fn get_branch_commit(&self, branch: String) -> PyResult<String> {
let manager = self.inner.lock();
manager
.get_branch_commit(&branch)
.map_err(|e| PyValueError::new_err(format!("Failed to get branch commit: {}", e)))
}
fn list_branches(&self) -> PyResult<Vec<String>> {
let manager = self.inner.lock();
manager
.list_branches()
.map_err(|e| PyValueError::new_err(format!("Failed to list branches: {}", e)))
}
}
#[cfg(feature = "git")]
#[pyclass(name = "WorktreeVersionedKvStore")]
struct PyWorktreeVersionedKvStore {
inner: Arc<Mutex<crate::git::worktree::WorktreeVersionedKvStore<32>>>,
}
#[cfg(feature = "git")]
#[pymethods]
impl PyWorktreeVersionedKvStore {
#[staticmethod]
fn from_worktree(
worktree_path: String,
worktree_id: String,
branch: String,
manager: &PyWorktreeManager,
) -> PyResult<Self> {
use std::path::PathBuf;
let worktree_info = crate::git::worktree::WorktreeInfo {
id: worktree_id,
path: PathBuf::from(worktree_path),
branch,
is_linked: true,
lock_file: None,
};
let store = crate::git::worktree::WorktreeVersionedKvStore::from_worktree(
worktree_info,
Arc::clone(&manager.inner),
)
.map_err(|e| PyValueError::new_err(format!("Failed to create worktree store: {}", e)))?;
Ok(PyWorktreeVersionedKvStore {
inner: Arc::new(Mutex::new(store)),
})
}
fn worktree_id(&self) -> PyResult<String> {
let store = self.inner.lock();
Ok(store.worktree_id().to_string())
}
fn current_branch(&self) -> PyResult<String> {
let store = self.inner.lock();
Ok(store.current_branch().to_string())
}
fn is_locked(&self) -> PyResult<bool> {
let store = self.inner.lock();
Ok(store.is_locked())
}
fn lock(&self, reason: String) -> PyResult<()> {
let store = self.inner.lock();
store
.lock(&reason)
.map_err(|e| PyValueError::new_err(format!("Failed to lock worktree: {}", e)))?;
Ok(())
}
fn unlock(&self) -> PyResult<()> {
let store = self.inner.lock();
store
.unlock()
.map_err(|e| PyValueError::new_err(format!("Failed to unlock worktree: {}", e)))?;
Ok(())
}
fn insert(&self, key: Vec<u8>, value: Vec<u8>) -> PyResult<()> {
let mut store = self.inner.lock();
store
.store_mut()
.insert(key, value)
.map_err(|e| PyValueError::new_err(format!("Failed to insert: {}", e)))?;
Ok(())
}
fn get(&self, key: Vec<u8>) -> PyResult<Option<Vec<u8>>> {
let store = self.inner.lock();
Ok(store.store().get(&key))
}
fn delete(&self, key: Vec<u8>) -> PyResult<bool> {
let mut store = self.inner.lock();
let result = store
.store_mut()
.delete(&key)
.map_err(|e| PyValueError::new_err(format!("Failed to delete: {}", e)))?;
Ok(result)
}
fn commit(&self, message: String) -> PyResult<String> {
let mut store = self.inner.lock();
let commit_id = store
.store_mut()
.commit(&message)
.map_err(|e| PyValueError::new_err(format!("Failed to commit: {}", e)))?;
Ok(commit_id.to_hex().to_string())
}
fn list_keys(&self) -> PyResult<Vec<Vec<u8>>> {
let store = self.inner.lock();
let keys = store.store().list_keys();
let total_keys = keys.len();
if total_keys > MAX_KEYS_LIMIT {
eprintln!(
"Warning: Tree contains {} keys, but only returning first {} keys due to limit. \
Consider using more specific queries or implementing pagination.",
total_keys, MAX_KEYS_LIMIT
);
}
Ok(keys.into_iter().take(MAX_KEYS_LIMIT).collect())
}
}
#[cfg(feature = "sql")]
#[pyclass(name = "ProllySQLStore")]
struct PyProllySQLStore {
inner: Arc<Mutex<Glue<ProllyStorage<32>>>>,
}
#[cfg(feature = "sql")]
#[pymethods]
impl PyProllySQLStore {
#[new]
fn new(path: String) -> PyResult<Self> {
let store = ThreadSafeGitVersionedKvStore::<32>::init(path)
.map_err(|e| PyValueError::new_err(format!("Failed to initialize store: {}", e)))?;
let storage = ProllyStorage::<32>::new(store);
let glue = Glue::new(storage);
Ok(PyProllySQLStore {
inner: Arc::new(Mutex::new(glue)),
})
}
#[staticmethod]
fn open(path: String) -> PyResult<Self> {
let store = ThreadSafeGitVersionedKvStore::<32>::open(path)
.map_err(|e| PyValueError::new_err(format!("Failed to open store: {}", e)))?;
let storage = ProllyStorage::<32>::new(store);
let glue = Glue::new(storage);
Ok(PyProllySQLStore {
inner: Arc::new(Mutex::new(glue)),
})
}
#[pyo3(signature = (query, format="dict"))]
fn execute(&self, py: Python, query: String, format: &str) -> PyResult<Py<PyAny>> {
py.allow_threads(|| {
let runtime = tokio::runtime::Runtime::new()
.map_err(|e| PyValueError::new_err(format!("Failed to create runtime: {}", e)))?;
let mut glue = self.inner.lock();
runtime.block_on(async {
let results = glue
.execute(&query)
.await
.map_err(|e| PyValueError::new_err(format!("SQL execution failed: {}", e)))?;
let result = results
.into_iter()
.next()
.ok_or_else(|| PyValueError::new_err("No result from SQL query"))?;
Python::with_gil(|py| {
match result {
Payload::Select { labels, rows } => {
match format {
"dict" | "dicts" => {
let py_list = PyList::empty_bound(py);
for row in rows {
let dict = PyDict::new_bound(py);
for (i, value) in row.iter().enumerate() {
if i < labels.len() {
let py_value = sql_value_to_python(py, value)?;
dict.set_item(&labels[i], py_value)?;
}
}
py_list.append(dict)?;
}
Ok(py_list.into())
}
"tuples" => {
let py_labels = PyList::empty_bound(py);
for label in &labels {
py_labels.append(label)?;
}
let py_rows = PyList::empty_bound(py);
for row in rows {
let py_row = PyList::empty_bound(py);
for value in row {
let py_value = sql_value_to_python(py, &value)?;
py_row.append(py_value)?;
}
py_rows.append(py_row)?;
}
Ok((py_labels, py_rows).into_py(py))
}
"json" => {
let mut json_rows = Vec::new();
for row in rows {
let mut json_row = serde_json::Map::new();
for (i, value) in row.iter().enumerate() {
if i < labels.len() {
let json_value = sql_value_to_json(value);
json_row.insert(labels[i].clone(), json_value);
}
}
json_rows.push(serde_json::Value::Object(json_row));
}
let json_str = serde_json::to_string_pretty(&json_rows)
.map_err(|e| {
PyValueError::new_err(format!(
"JSON serialization failed: {}",
e
))
})?;
Ok(json_str.into_py(py))
}
"csv" => {
let mut csv_str = labels.join(",") + "\n";
for row in rows {
let row_strs: Vec<String> = row
.iter()
.map(|v| {
let s = format!("{:?}", v);
if s.contains(',') {
format!("\"{}\"", s.replace('"', "\"\""))
} else {
s
}
})
.collect();
csv_str.push_str(&row_strs.join(","));
csv_str.push('\n');
}
Ok(csv_str.into_py(py))
}
_ => Err(PyValueError::new_err(format!(
"Unknown format: {}. Use 'dict', 'tuples', 'json', or 'csv'",
format
))),
}
}
Payload::Insert(count) => {
let dict = PyDict::new_bound(py);
dict.set_item("type", "insert")?;
dict.set_item("count", count)?;
Ok(dict.into())
}
Payload::Update(count) => {
let dict = PyDict::new_bound(py);
dict.set_item("type", "update")?;
dict.set_item("count", count)?;
Ok(dict.into())
}
Payload::Delete(count) => {
let dict = PyDict::new_bound(py);
dict.set_item("type", "delete")?;
dict.set_item("count", count)?;
Ok(dict.into())
}
Payload::Create => {
let dict = PyDict::new_bound(py);
dict.set_item("type", "create")?;
dict.set_item("success", true)?;
Ok(dict.into())
}
Payload::DropTable(_) => {
let dict = PyDict::new_bound(py);
dict.set_item("type", "drop_table")?;
dict.set_item("success", true)?;
Ok(dict.into())
}
Payload::AlterTable => {
let dict = PyDict::new_bound(py);
dict.set_item("type", "alter_table")?;
dict.set_item("success", true)?;
Ok(dict.into())
}
_ => {
let dict = PyDict::new_bound(py);
dict.set_item("type", "success")?;
dict.set_item("success", true)?;
Ok(dict.into())
}
}
})
})
})
}
fn execute_many(&self, py: Python, queries: Vec<String>) -> PyResult<Vec<Py<PyAny>>> {
let mut results = Vec::new();
for query in queries {
let result = self.execute(py, query, "dict")?;
results.push(result);
}
Ok(results)
}
fn commit(&self, py: Python, _message: String) -> PyResult<String> {
py.allow_threads(|| {
let runtime = tokio::runtime::Runtime::new()
.map_err(|e| PyValueError::new_err(format!("Failed to create runtime: {}", e)))?;
let mut glue = self.inner.lock();
runtime.block_on(async {
glue.execute("COMMIT")
.await
.map_err(|e| PyValueError::new_err(format!("Failed to commit: {}", e)))?;
Ok("committed".to_string())
})
})
}
fn create_table(
&self,
py: Python,
table_name: String,
columns: Vec<(String, String)>,
) -> PyResult<Py<PyAny>> {
let mut column_defs = Vec::new();
for (name, dtype) in columns {
column_defs.push(format!("{} {}", name, dtype));
}
let query = format!("CREATE TABLE {} ({})", table_name, column_defs.join(", "));
self.execute(py, query, "dict")
}
fn insert(
&self,
py: Python,
table_name: String,
values: Vec<Vec<Py<PyAny>>>,
) -> PyResult<Py<PyAny>> {
if values.is_empty() {
return Err(PyValueError::new_err("No values to insert"));
}
let mut value_strings = Vec::new();
for row in values {
let mut row_values = Vec::new();
for value in row {
let value_str = Python::with_gil(|py| -> PyResult<String> {
if let Ok(s) = value.extract::<String>(py) {
Ok(format!("'{}'", s.replace('\'', "''")))
} else if let Ok(i) = value.extract::<i64>(py) {
Ok(i.to_string())
} else if let Ok(f) = value.extract::<f64>(py) {
Ok(f.to_string())
} else if let Ok(b) = value.extract::<bool>(py) {
Ok(b.to_string())
} else if value.is_none(py) {
Ok("NULL".to_string())
} else {
Ok(format!("'{}'", value))
}
})?;
row_values.push(value_str);
}
value_strings.push(format!("({})", row_values.join(", ")));
}
let query = format!(
"INSERT INTO {} VALUES {}",
table_name,
value_strings.join(", ")
);
self.execute(py, query, "dict")
}
#[pyo3(signature = (table_name, columns=None, where_clause=None))]
fn select(
&self,
py: Python,
table_name: String,
columns: Option<Vec<String>>,
where_clause: Option<String>,
) -> PyResult<Py<PyAny>> {
let columns_str = columns
.map(|c| c.join(", "))
.unwrap_or_else(|| "*".to_string());
let mut query = format!("SELECT {} FROM {}", columns_str, table_name);
if let Some(where_str) = where_clause {
query.push_str(&format!(" WHERE {}", where_str));
}
self.execute(py, query, "dict")
}
}
#[cfg(feature = "sql")]
fn sql_value_to_python(py: Python, value: &SqlValue) -> PyResult<Py<PyAny>> {
match value {
SqlValue::Null => Ok(py.None()),
SqlValue::Bool(b) => Ok(b.into_py(py)),
SqlValue::I8(i) => Ok(i.into_py(py)),
SqlValue::I16(i) => Ok(i.into_py(py)),
SqlValue::I32(i) => Ok(i.into_py(py)),
SqlValue::I64(i) => Ok(i.into_py(py)),
SqlValue::I128(i) => Ok(i.into_py(py)),
SqlValue::U8(i) => Ok(i.into_py(py)),
SqlValue::U16(i) => Ok(i.into_py(py)),
SqlValue::U32(i) => Ok(i.into_py(py)),
SqlValue::U64(i) => Ok(i.into_py(py)),
SqlValue::U128(i) => Ok(i.to_string().into_py(py)),
SqlValue::F32(f) => Ok(f.into_py(py)),
SqlValue::F64(f) => Ok(f.into_py(py)),
SqlValue::Str(s) => Ok(s.into_py(py)),
SqlValue::Bytea(b) => Ok(PyBytes::new_bound(py, b).into()),
SqlValue::Date(d) => Ok(d.to_string().into_py(py)),
SqlValue::Time(t) => Ok(t.to_string().into_py(py)),
SqlValue::Timestamp(ts) => Ok(ts.to_string().into_py(py)),
SqlValue::Interval(i) => Ok(format!("{:?}", i).into_py(py)),
SqlValue::Uuid(u) => Ok(u.to_string().into_py(py)),
SqlValue::Map(m) => {
let dict = PyDict::new_bound(py);
for (k, v) in m.iter() {
let py_value = sql_value_to_python(py, v)?;
dict.set_item(k, py_value)?;
}
Ok(dict.into())
}
SqlValue::List(l) => {
let py_list = PyList::empty_bound(py);
for item in l.iter() {
let py_value = sql_value_to_python(py, item)?;
py_list.append(py_value)?;
}
Ok(py_list.into())
}
SqlValue::Decimal(d) => Ok(d.to_string().into_py(py)),
SqlValue::Point(p) => Ok(format!("POINT({} {})", p.x, p.y).into_py(py)),
SqlValue::Inet(ip) => Ok(ip.to_string().into_py(py)),
}
}
#[cfg(feature = "sql")]
fn sql_value_to_json(value: &SqlValue) -> serde_json::Value {
match value {
SqlValue::Null => serde_json::Value::Null,
SqlValue::Bool(b) => serde_json::Value::Bool(*b),
SqlValue::I8(i) => serde_json::Value::Number((*i).into()),
SqlValue::I16(i) => serde_json::Value::Number((*i).into()),
SqlValue::I32(i) => serde_json::Value::Number((*i).into()),
SqlValue::I64(i) => serde_json::Value::Number((*i).into()),
SqlValue::I128(i) => serde_json::Value::String(i.to_string()),
SqlValue::U8(i) => serde_json::Value::Number((*i).into()),
SqlValue::U16(i) => serde_json::Value::Number((*i).into()),
SqlValue::U32(i) => serde_json::Value::Number((*i).into()),
SqlValue::U64(i) => serde_json::Value::Number((*i).into()),
SqlValue::U128(i) => serde_json::Value::String(i.to_string()),
SqlValue::F32(f) => serde_json::json!(f),
SqlValue::F64(f) => serde_json::json!(f),
SqlValue::Str(s) => serde_json::Value::String(s.clone()),
SqlValue::Bytea(b) => {
use base64::Engine;
serde_json::Value::String(base64::engine::general_purpose::STANDARD.encode(b))
}
SqlValue::Date(d) => serde_json::Value::String(d.to_string()),
SqlValue::Time(t) => serde_json::Value::String(t.to_string()),
SqlValue::Timestamp(ts) => serde_json::Value::String(ts.to_string()),
SqlValue::Interval(i) => serde_json::Value::String(format!("{:?}", i)),
SqlValue::Uuid(u) => serde_json::Value::String(u.to_string()),
SqlValue::Map(m) => {
let mut map = serde_json::Map::new();
for (k, v) in m.iter() {
map.insert(k.clone(), sql_value_to_json(v));
}
serde_json::Value::Object(map)
}
SqlValue::List(l) => {
let list: Vec<serde_json::Value> = l.iter().map(sql_value_to_json).collect();
serde_json::Value::Array(list)
}
SqlValue::Decimal(d) => serde_json::Value::String(d.to_string()),
SqlValue::Point(p) => serde_json::json!({"x": p.x, "y": p.y}),
SqlValue::Inet(ip) => serde_json::Value::String(ip.to_string()),
}
}
#[pyclass(name = "NamespacedKvStore")]
struct PyNamespacedKvStore {
inner: Arc<Mutex<GitNamespacedKvStore<32>>>,
}
#[pymethods]
impl PyNamespacedKvStore {
#[new]
fn new(path: String) -> PyResult<Self> {
let store = GitNamespacedKvStore::<32>::init(&path).map_err(|e| {
PyValueError::new_err(format!("Failed to initialize NamespacedKvStore: {e}"))
})?;
Ok(Self {
inner: Arc::new(Mutex::new(store)),
})
}
#[staticmethod]
fn open(path: String) -> PyResult<Self> {
let store = GitNamespacedKvStore::<32>::open(&path)
.map_err(|e| PyValueError::new_err(format!("Failed to open NamespacedKvStore: {e}")))?;
Ok(Self {
inner: Arc::new(Mutex::new(store)),
})
}
fn ns_insert(
&self,
namespace: &str,
key: &Bound<'_, PyBytes>,
value: &Bound<'_, PyBytes>,
) -> PyResult<()> {
let mut store = self.inner.lock();
store
.namespace(namespace)
.insert(key.as_bytes().to_vec(), value.as_bytes().to_vec())
.map_err(|e| PyValueError::new_err(format!("Insert failed: {e}")))
}
fn ns_get<'py>(
&self,
py: Python<'py>,
namespace: &str,
key: &Bound<'py, PyBytes>,
) -> PyResult<Option<Py<PyBytes>>> {
let mut store = self.inner.lock();
Ok(store
.namespace(namespace)
.get(key.as_bytes())
.map(|v| PyBytes::new_bound(py, &v).into()))
}
fn ns_delete(&self, namespace: &str, key: &Bound<'_, PyBytes>) -> PyResult<bool> {
let mut store = self.inner.lock();
store
.namespace(namespace)
.delete(key.as_bytes())
.map_err(|e| PyValueError::new_err(format!("Delete failed: {e}")))
}
fn ns_list_keys<'py>(&self, py: Python<'py>, namespace: &str) -> PyResult<Py<PyList>> {
let mut store = self.inner.lock();
let keys = store.namespace(namespace).list_keys();
let py_keys: Vec<Py<PyBytes>> = keys
.iter()
.map(|k| PyBytes::new_bound(py, k).into())
.collect();
Ok(PyList::new_bound(py, &py_keys).into())
}
fn list_namespaces(&self) -> Vec<String> {
self.inner.lock().list_namespaces()
}
fn delete_namespace(&self, namespace: &str) -> PyResult<bool> {
self.inner
.lock()
.delete_namespace(namespace)
.map_err(|e| PyValueError::new_err(format!("Delete namespace failed: {e}")))
}
fn get_namespace_root_hash<'py>(
&self,
py: Python<'py>,
namespace: &str,
) -> Option<Py<PyBytes>> {
self.inner
.lock()
.get_namespace_root_hash(namespace)
.map(|h| PyBytes::new_bound(py, h.as_bytes()).into())
}
fn namespace_changed(&self, namespace: &str, commit_a: &str, commit_b: &str) -> PyResult<bool> {
self.inner
.lock()
.namespace_changed(namespace, commit_a, commit_b)
.map_err(|e| PyValueError::new_err(format!("namespace_changed failed: {e}")))
}
fn insert(&self, key: &Bound<'_, PyBytes>, value: &Bound<'_, PyBytes>) -> PyResult<()> {
self.inner
.lock()
.insert(key.as_bytes().to_vec(), value.as_bytes().to_vec())
.map_err(|e| PyValueError::new_err(format!("Insert failed: {e}")))
}
fn get<'py>(
&self,
py: Python<'py>,
key: &Bound<'py, PyBytes>,
) -> PyResult<Option<Py<PyBytes>>> {
Ok(self
.inner
.lock()
.get(key.as_bytes())
.map(|v| PyBytes::new_bound(py, &v).into()))
}
fn delete(&self, key: &Bound<'_, PyBytes>) -> PyResult<bool> {
self.inner
.lock()
.delete(key.as_bytes())
.map_err(|e| PyValueError::new_err(format!("Delete failed: {e}")))
}
fn list_keys<'py>(&self, py: Python<'py>) -> PyResult<Py<PyList>> {
let keys = self.inner.lock().list_keys();
let py_keys: Vec<Py<PyBytes>> = keys
.iter()
.map(|k| PyBytes::new_bound(py, k).into())
.collect();
Ok(PyList::new_bound(py, &py_keys).into())
}
#[pyo3(signature = (message=None))]
fn commit(&self, message: Option<&str>) -> PyResult<String> {
let msg = message.unwrap_or("commit");
self.inner
.lock()
.commit(msg)
.map(|id| id.to_hex().to_string())
.map_err(|e| PyValueError::new_err(format!("Commit failed: {e}")))
}
fn checkout(&self, branch_or_commit: &str) -> PyResult<()> {
self.inner
.lock()
.checkout(branch_or_commit)
.map_err(|e| PyValueError::new_err(format!("Checkout failed: {e}")))
}
fn branch(&self, name: &str) -> PyResult<()> {
self.inner
.lock()
.create_branch(name)
.map_err(|e| PyValueError::new_err(format!("Branch failed: {e}")))
}
#[getter]
fn current_branch(&self) -> String {
self.inner.lock().current_branch().to_string()
}
fn log(&self) -> PyResult<Vec<HashMap<String, String>>> {
let history = self
.inner
.lock()
.log()
.map_err(|e| PyValueError::new_err(format!("Log failed: {e}")))?;
Ok(history
.into_iter()
.map(|c| {
let mut m = HashMap::new();
m.insert("id".to_string(), c.id.to_hex().to_string());
m.insert("message".to_string(), c.message);
m.insert("author".to_string(), c.author);
m.insert("timestamp".to_string(), c.timestamp.to_string());
m
})
.collect())
}
fn migrate_v1_to_v2(&self) -> PyResult<HashMap<String, String>> {
let report = self
.inner
.lock()
.migrate_v1_to_v2()
.map_err(|e| PyValueError::new_err(format!("Migration failed: {e}")))?;
let mut m = HashMap::new();
m.insert(
"keys_migrated".to_string(),
report.keys_migrated.to_string(),
);
m.insert(
"namespaces_created".to_string(),
report.namespaces_created.join(", "),
);
Ok(m)
}
fn __repr__(&self) -> String {
let store = self.inner.lock();
format!(
"NamespacedKvStore(namespaces={}, branch='{}')",
store.list_namespaces().len(),
store.current_branch()
)
}
}
#[pymodule]
fn prollytree(m: &Bound<'_, PyModule>) -> PyResult<()> {
m.add_class::<PyTreeConfig>()?;
m.add_class::<PyProllyTree>()?;
m.add_class::<PyStorageBackend>()?;
m.add_class::<PyMergeConflict>()?;
m.add_class::<PyConflictResolution>()?;
m.add_class::<PyDiffOperation>()?;
m.add_class::<PyKvDiff>()?;
m.add_class::<PyVersionedKvStore>()?;
#[cfg(feature = "git")]
m.add_class::<PyNamespacedKvStore>()?;
#[cfg(feature = "git")]
m.add_class::<PyWorktreeManager>()?;
#[cfg(feature = "git")]
m.add_class::<PyWorktreeVersionedKvStore>()?;
#[cfg(feature = "sql")]
m.add_class::<PyProllySQLStore>()?;
Ok(())
}