pub mod backend_resolver;
pub mod filesystem;
pub mod types;
pub use filesystem::FilesystemDocumentService;
pub use types::{
CreateOptions, DocumentDiff, DocumentSummary, DocumentVisibility, ListFilter, UpdateOptions,
};
use crate::agent::Agent;
use crate::agent::boilerplate::BoilerPlate;
use crate::agent::document::{DocumentTraits, JACSDocument};
use crate::agent::loaders::{FileLoader, fetch_remote_public_key};
use crate::agent::{DOCUMENT_AGENT_SIGNATURE_FIELDNAME, SHA256_FIELDNAME};
use crate::config::{KeyResolutionSource, get_key_resolution_order};
use crate::error::JacsError;
use crate::search::{SearchQuery, SearchResults};
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use tracing::{debug, warn};
#[cfg(all(not(target_arch = "wasm32"), feature = "sqlite"))]
use crate::storage::SqliteDocumentService;
const SQLITE_DOCUMENT_DB_FILENAME: &str = "jacs_documents.sqlite3";
fn same_signer(current_agent_id: Option<&str>, signer_id: &str) -> bool {
fn normalize(value: &str) -> &str {
value.split(':').next().unwrap_or(value)
}
match current_agent_id {
Some(agent_id) if !agent_id.is_empty() && !signer_id.is_empty() => {
normalize(agent_id) == normalize(signer_id)
}
_ => false,
}
}
fn document_signer_id(value: &serde_json::Value) -> Option<&str> {
value
.get(DOCUMENT_AGENT_SIGNATURE_FIELDNAME)
.and_then(|sig| sig.get("agentID"))
.and_then(|id| id.as_str())
}
fn resolve_document_verification_key(
agent: &mut Agent,
value: &serde_json::Value,
) -> Result<(Vec<u8>, Option<String>), JacsError> {
let signer_id = document_signer_id(value).unwrap_or_default();
let current_agent_id = agent
.get_value()
.and_then(|agent_value| agent_value.get("jacsId"))
.and_then(|id| id.as_str());
if same_signer(current_agent_id, signer_id) {
return Ok((agent.get_public_key()?, None));
}
let public_key_hash = value
.get(DOCUMENT_AGENT_SIGNATURE_FIELDNAME)
.and_then(|sig| sig.get("publicKeyHash"))
.and_then(|hash| hash.as_str())
.unwrap_or_default()
.to_string();
let signer_version = value
.get(DOCUMENT_AGENT_SIGNATURE_FIELDNAME)
.and_then(|sig| sig.get("agentVersion"))
.and_then(|version| version.as_str())
.unwrap_or_default()
.to_string();
let resolution_order = get_key_resolution_order();
let mut last_error: Option<JacsError> = None;
for source in &resolution_order {
debug!("Resolving document verification key via {:?}", source);
match source {
KeyResolutionSource::Local => match agent.fs_load_public_key(&public_key_hash) {
Ok(key) => {
let enc_type = agent.fs_load_public_key_type(&public_key_hash).ok();
return Ok((key, enc_type));
}
Err(err) => last_error = Some(err),
},
KeyResolutionSource::Dns => {
continue;
}
KeyResolutionSource::Registry => {
if signer_id.is_empty() {
continue;
}
let requested_version = if signer_version.is_empty() {
"latest".to_string()
} else {
signer_version.clone()
};
match fetch_remote_public_key(signer_id, &requested_version) {
Ok(key_info) => {
if !public_key_hash.is_empty()
&& !key_info.hash.is_empty()
&& key_info.hash != public_key_hash
{
warn!(
"Registry key hash mismatch for signer {}: expected {}..., got {}...",
signer_id,
&public_key_hash[..public_key_hash.len().min(16)],
&key_info.hash[..key_info.hash.len().min(16)]
);
last_error = Some(JacsError::VerificationClaimFailed {
claim: "registry".to_string(),
reason: format!(
"Registry key hash mismatch for signer '{}'",
signer_id
),
});
continue;
}
return Ok((key_info.public_key, Some(key_info.algorithm)));
}
Err(err) => last_error = Some(err),
}
}
}
}
Err(last_error.unwrap_or_else(|| {
JacsError::DocumentError(format!(
"Failed to resolve verification key for signer '{}'",
signer_id
))
}))
}
pub(crate) fn verify_document_with_agent(
agent: &mut Agent,
doc: &JACSDocument,
) -> Result<(), JacsError> {
verify_document_value_with_agent(agent, &doc.value)
}
pub(crate) fn verify_document_value_with_agent(
agent: &mut Agent,
value: &serde_json::Value,
) -> Result<(), JacsError> {
let _ = agent.verify_hash(value)?;
agent.verify_document_files(value)?;
let (public_key, public_key_enc_type) = resolve_document_verification_key(agent, value)?;
agent.signature_verification_procedure(
value,
None,
DOCUMENT_AGENT_SIGNATURE_FIELDNAME,
public_key,
public_key_enc_type,
None,
None,
)?;
Ok(())
}
pub(crate) fn has_signed_document_headers(value: &serde_json::Value) -> bool {
value.get(DOCUMENT_AGENT_SIGNATURE_FIELDNAME).is_some() || value.get(SHA256_FIELDNAME).is_some()
}
pub fn sqlite_database_path(base_dir: &Path) -> PathBuf {
base_dir.join(SQLITE_DOCUMENT_DB_FILENAME)
}
pub fn service_from_agent(agent: Arc<Mutex<Agent>>) -> Result<Arc<dyn DocumentService>, JacsError> {
let (storage_input, base_dir, agent_storage) = {
let agent_guard = agent.lock().map_err(|e| JacsError::Internal {
message: format!("Failed to acquire agent lock: {}", e),
})?;
let config = agent_guard
.config
.as_ref()
.ok_or_else(|| JacsError::Internal {
message: "Agent has no config; load an agent first".to_string(),
})?;
let data_dir = config
.jacs_data_directory()
.as_ref()
.cloned()
.unwrap_or_else(|| "./jacs_data".to_string());
let storage = config
.jacs_default_storage()
.as_ref()
.cloned()
.unwrap_or_else(|| "fs".to_string());
let agent_storage = agent_guard.storage_ref().clone();
(storage, PathBuf::from(data_dir), agent_storage)
};
let backend_config = backend_resolver::resolve(&storage_input)?;
let redacted = backend_resolver::redact_connection_string(&storage_input);
debug!(
"Resolved storage backend: {} (from '{}')",
backend_config.backend_type, redacted
);
match backend_config.backend_type.as_str() {
"fs" => Ok(Arc::new(FilesystemDocumentService::new(
Arc::new(agent_storage),
agent,
base_dir,
))),
#[cfg(all(not(target_arch = "wasm32"), feature = "sqlite"))]
"rusqlite" | "sqlite" => {
let db_path = if let Some(ref path) = backend_config.path {
PathBuf::from(path)
} else {
if let Some(parent) = base_dir.parent()
&& !parent.as_os_str().is_empty()
{
std::fs::create_dir_all(parent).map_err(|e| {
JacsError::StorageError(format!(
"Failed to create sqlite parent directory '{}': {}",
parent.display(),
e
))
})?;
}
std::fs::create_dir_all(&base_dir).map_err(|e| {
JacsError::StorageError(format!(
"Failed to create data directory '{}': {}",
base_dir.display(),
e
))
})?;
sqlite_database_path(&base_dir)
};
let service = SqliteDocumentService::with_agent(&db_path.to_string_lossy(), agent)?;
Ok(Arc::new(service))
}
unsupported => Err(JacsError::StorageError(format!(
"DocumentService is not yet wired for storage backend '{}'. \
Supported backends: fs, sqlite, rusqlite",
unsupported
))),
}
}
pub trait DocumentService: Send + Sync {
fn create(&self, json: &str, options: CreateOptions) -> Result<JACSDocument, JacsError>;
fn get(&self, key: &str) -> Result<JACSDocument, JacsError>;
fn get_latest(&self, document_id: &str) -> Result<JACSDocument, JacsError>;
fn update(
&self,
document_id: &str,
new_json: &str,
options: UpdateOptions,
) -> Result<JACSDocument, JacsError>;
fn remove(&self, key: &str) -> Result<JACSDocument, JacsError>;
fn list(&self, filter: ListFilter) -> Result<Vec<DocumentSummary>, JacsError>;
fn versions(&self, document_id: &str) -> Result<Vec<JACSDocument>, JacsError>;
fn diff(&self, key_a: &str, key_b: &str) -> Result<DocumentDiff, JacsError>;
fn search(&self, query: SearchQuery) -> Result<SearchResults, JacsError>;
fn create_batch(
&self,
documents: &[&str],
options: CreateOptions,
) -> Result<Vec<JACSDocument>, Vec<JacsError>>;
fn visibility(&self, key: &str) -> Result<DocumentVisibility, JacsError>;
fn set_visibility(&self, key: &str, visibility: DocumentVisibility) -> Result<(), JacsError>;
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn document_service_is_object_safe() {
fn _assert_object_safe(_: &dyn DocumentService) {}
}
#[test]
fn document_service_is_send_sync() {
fn _assert_send_sync<T: Send + Sync + ?Sized>() {}
_assert_send_sync::<dyn DocumentService>();
}
}