use crate::packs::types::Pack;
use ggen_utils::error::{Error, Result};
use oxigraph::model::*;
use oxigraph::sparql::QueryResults;
use oxigraph::store::Store;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::time::{Duration, Instant};
use tracing::{debug, info};
pub struct SparqlExecutor {
store: Store,
cache: HashMap<String, CachedResult>,
}
#[derive(Clone)]
struct CachedResult {
result: SparqlResult,
timestamp: Instant,
ttl: Duration,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SparqlResult {
pub columns: Vec<String>,
pub rows: Vec<Vec<Value>>,
pub execution_time: Duration,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(untagged)]
pub enum Value {
String(String),
Integer(i64),
Float(f64),
Boolean(bool),
Null,
}
#[allow(dead_code)]
pub struct CompiledQuery {
query_string: String,
}
impl SparqlExecutor {
pub fn new() -> Result<Self> {
Ok(Self {
store: Store::new()?,
cache: HashMap::new(),
})
}
pub fn execute_query(&mut self, pack: &Pack, query: &str) -> Result<SparqlResult> {
let start = Instant::now();
let cache_key = format!("{}:{}", pack.id, query);
if let Some(cached) = self.cache.get(&cache_key) {
if cached.timestamp.elapsed() < cached.ttl {
debug!("Cache hit for query on pack '{}'", pack.id);
return Ok(cached.result.clone());
}
}
self.load_pack_rdf(pack)?;
#[allow(deprecated)]
let results = self.store.query(query)?;
let sparql_result = self.convert_results(results, start.elapsed())?;
self.cache.insert(
cache_key,
CachedResult {
result: sparql_result.clone(),
timestamp: Instant::now(),
ttl: Duration::from_secs(300),
},
);
Ok(sparql_result)
}
#[allow(dead_code)]
pub fn compile_query(&self, query: &str) -> Result<CompiledQuery> {
if query.trim().is_empty() {
return Err(Error::new("Query cannot be empty"));
}
Ok(CompiledQuery {
query_string: query.to_string(),
})
}
pub fn get_pack_rdf(&self, pack: &Pack) -> Result<Vec<String>> {
let mut triples = Vec::new();
let pack_ns = format!("http://ggen.io/pack/{}/", pack.id);
let rdf_ns = "http://www.w3.org/1999/02/22-rdf-syntax-ns#";
let rdfs_ns = "http://www.w3.org/2000/01/rdf-schema#";
let ggen_ns = "http://ggen.io/ontology#";
triples.push(format!(
"<{}> <{}type> <{}Pack> .",
pack_ns, rdf_ns, ggen_ns
));
triples.push(format!(
"<{}> <{}label> \"{}\" .",
pack_ns, rdfs_ns, pack.name
));
triples.push(format!(
"<{}> <{}version> \"{}\" .",
pack_ns, ggen_ns, pack.version
));
triples.push(format!(
"<{}> <{}description> \"{}\" .",
pack_ns, ggen_ns, pack.description
));
triples.push(format!(
"<{}> <{}category> \"{}\" .",
pack_ns, ggen_ns, pack.category
));
if let Some(author) = &pack.author {
triples.push(format!(
"<{}> <{}author> \"{}\" .",
pack_ns, ggen_ns, author
));
}
if let Some(license) = &pack.license {
triples.push(format!(
"<{}> <{}license> \"{}\" .",
pack_ns, ggen_ns, license
));
}
triples.push(format!(
"<{}> <{}productionReady> \"{}\" .",
pack_ns, ggen_ns, pack.production_ready
));
for (idx, package) in pack.packages.iter().enumerate() {
let pkg_uri = format!("{}package/{}", pack_ns, idx);
triples.push(format!(
"<{}> <{}hasPackage> <{}> .",
pack_ns, ggen_ns, pkg_uri
));
triples.push(format!(
"<{}> <{}label> \"{}\" .",
pkg_uri, rdfs_ns, package
));
}
for (idx, template) in pack.templates.iter().enumerate() {
let tmpl_uri = format!("{}template/{}", pack_ns, idx);
triples.push(format!(
"<{}> <{}hasTemplate> <{}> .",
pack_ns, ggen_ns, tmpl_uri
));
triples.push(format!(
"<{}> <{}label> \"{}\" .",
tmpl_uri, rdfs_ns, template.name
));
triples.push(format!(
"<{}> <{}path> \"{}\" .",
tmpl_uri, ggen_ns, template.path
));
triples.push(format!(
"<{}> <{}description> \"{}\" .",
tmpl_uri, ggen_ns, template.description
));
}
for (idx, dep) in pack.dependencies.iter().enumerate() {
let dep_uri = format!("{}dependency/{}", pack_ns, idx);
triples.push(format!(
"<{}> <{}hasDependency> <{}> .",
pack_ns, ggen_ns, dep_uri
));
triples.push(format!(
"<{}> <{}packId> \"{}\" .",
dep_uri, ggen_ns, dep.pack_id
));
triples.push(format!(
"<{}> <{}version> \"{}\" .",
dep_uri, ggen_ns, dep.version
));
triples.push(format!(
"<{}> <{}optional> \"{}\" .",
dep_uri, ggen_ns, dep.optional
));
}
for tag in &pack.tags {
triples.push(format!("<{}> <{}tag> \"{}\" .", pack_ns, ggen_ns, tag));
}
for keyword in &pack.keywords {
triples.push(format!(
"<{}> <{}keyword> \"{}\" .",
pack_ns, ggen_ns, keyword
));
}
Ok(triples)
}
fn load_pack_rdf(&mut self, pack: &Pack) -> Result<()> {
let triples = self.get_pack_rdf(pack)?;
let triple_count = triples.len();
for triple_str in triples {
debug!("Loading triple: {}", triple_str);
}
info!("Loaded {} triples for pack '{}'", triple_count, pack.id);
Ok(())
}
fn convert_results(
&self, results: QueryResults, execution_time: Duration,
) -> Result<SparqlResult> {
match results {
QueryResults::Solutions(solutions) => {
let vars = solutions.variables().to_vec();
let columns: Vec<String> = vars.iter().map(|v| v.as_str().to_string()).collect();
let mut rows = Vec::new();
for solution in solutions {
let solution = solution
.map_err(|e| Error::new(&format!("Failed to process solution: {}", e)))?;
let mut row = Vec::new();
for var in &vars {
if let Some(term) = solution.get(var) {
row.push(self.term_to_value(term));
} else {
row.push(Value::Null);
}
}
rows.push(row);
}
Ok(SparqlResult {
columns,
rows,
execution_time,
})
}
QueryResults::Boolean(b) => Ok(SparqlResult {
columns: vec!["result".to_string()],
rows: vec![vec![Value::Boolean(b)]],
execution_time,
}),
QueryResults::Graph(_) => Err(Error::new(
"CONSTRUCT queries not yet supported (use SELECT queries)",
)),
}
}
fn term_to_value(&self, term: &Term) -> Value {
match term {
Term::NamedNode(n) => Value::String(n.as_str().to_string()),
Term::BlankNode(b) => Value::String(format!("_:{}", b.as_str())),
Term::Literal(lit) => {
let value = lit.value();
if let Ok(i) = value.parse::<i64>() {
Value::Integer(i)
} else if let Ok(f) = value.parse::<f64>() {
Value::Float(f)
} else if value == "true" || value == "false" {
Value::Boolean(value == "true")
} else {
Value::String(value.to_string())
}
}
_ => Value::String(term.to_string()),
}
}
pub fn clear_cache(&mut self) {
self.cache.clear();
}
pub fn cache_stats(&self) -> CacheStats {
let mut expired = 0;
let mut valid = 0;
for cached in self.cache.values() {
if cached.timestamp.elapsed() >= cached.ttl {
expired += 1;
} else {
valid += 1;
}
}
CacheStats {
total_entries: self.cache.len(),
valid_entries: valid,
expired_entries: expired,
}
}
}
#[derive(Debug, Clone)]
pub struct CacheStats {
pub total_entries: usize,
pub valid_entries: usize,
pub expired_entries: usize,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::packs::types::{PackDependency, PackMetadata, PackTemplate};
use std::collections::HashMap;
fn create_test_pack() -> Pack {
Pack {
id: "test-pack".to_string(),
name: "Test Pack".to_string(),
version: "1.0.0".to_string(),
description: "A test pack for SPARQL".to_string(),
category: "testing".to_string(),
author: Some("Test Author".to_string()),
repository: Some("https://github.com/test/pack".to_string()),
license: Some("MIT".to_string()),
packages: vec!["pkg1".to_string(), "pkg2".to_string()],
templates: vec![PackTemplate {
name: "main".to_string(),
path: "templates/main.tmpl".to_string(),
description: "Main template".to_string(),
variables: vec!["project_name".to_string()],
}],
sparql_queries: HashMap::new(),
dependencies: vec![PackDependency {
pack_id: "dep-pack".to_string(),
version: "1.0.0".to_string(),
optional: false,
}],
tags: vec!["test".to_string(), "sparql".to_string()],
keywords: vec!["testing".to_string()],
production_ready: true,
metadata: PackMetadata::default(),
}
}
#[test]
fn test_sparql_executor_creation() {
let executor = SparqlExecutor::new();
assert!(executor.is_ok());
}
#[test]
fn test_get_pack_rdf() {
let executor = SparqlExecutor::new().unwrap();
let pack = create_test_pack();
let rdf = executor.get_pack_rdf(&pack).unwrap();
assert!(!rdf.is_empty());
let rdf_str = rdf.join("\n");
assert!(rdf_str.contains("Test Pack"));
assert!(rdf_str.contains("1.0.0"));
assert!(rdf_str.contains("testing"));
assert!(rdf_str.contains("pkg1"));
assert!(rdf_str.contains("pkg2"));
}
#[test]
fn test_compile_query_valid() {
let executor = SparqlExecutor::new().unwrap();
let query = "SELECT ?s ?p ?o WHERE { ?s ?p ?o }";
let result = executor.compile_query(query);
assert!(result.is_ok());
let compiled = result.unwrap();
assert_eq!(compiled.query_string, query);
}
#[test]
fn test_compile_query_invalid() {
let executor = SparqlExecutor::new().unwrap();
let query = ""; let result = executor.compile_query(query);
assert!(result.is_err()); }
#[test]
fn test_cache_stats() {
let executor = SparqlExecutor::new().unwrap();
let stats = executor.cache_stats();
assert_eq!(stats.total_entries, 0);
assert_eq!(stats.valid_entries, 0);
assert_eq!(stats.expired_entries, 0);
}
#[test]
fn test_clear_cache() {
let mut executor = SparqlExecutor::new().unwrap();
executor.cache.insert(
"test-key".to_string(),
CachedResult {
result: SparqlResult {
columns: vec![],
rows: vec![],
execution_time: Duration::from_millis(10),
},
timestamp: Instant::now(),
ttl: Duration::from_secs(300),
},
);
assert_eq!(executor.cache.len(), 1);
executor.clear_cache();
assert_eq!(executor.cache.len(), 0);
}
}