use crate::args::Cli;
use crate::index_discovery::{augment_query_with_scope, find_nearest_index};
use crate::output::{
DisplaySymbol, OutputStreams, call_identity_from_qualified_name, create_formatter,
};
use crate::plugin_defaults::{self, PluginSelectionMode};
use anyhow::{Context, Result, bail};
use sqry_core::graph::{
AcquisitionOperation, AutoBuildHook, FilesystemGraphProvider, GraphAcquirer, GraphAcquisition,
GraphAcquisitionError, GraphAcquisitionRequest, MissingGraphPolicy, PathPolicy,
PluginSelectionPolicy, PluginSelectionStatus, StalePolicy,
};
use sqry_core::query::QueryExecutor;
use sqry_core::query::parser_new::Parser as QueryParser;
use sqry_core::query::results::QueryResults;
use sqry_core::query::security::QuerySecurityConfig;
use sqry_core::query::types::{Expr, Value};
use sqry_core::query::validator::ValidationOptions;
use sqry_core::relations::CallIdentityMetadata;
use sqry_core::search::Match as TextMatch;
use sqry_core::search::classifier::{QueryClassifier, QueryType};
use sqry_core::search::fallback::{FallbackConfig, FallbackSearchEngine, SearchResults};
use sqry_core::session::{SessionManager, SessionStats};
use std::env;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
static QUERY_SESSION: std::sync::LazyLock<Mutex<Option<SessionManager>>> =
std::sync::LazyLock::new(|| Mutex::new(None));
const DEFAULT_QUERY_LIMIT: usize = 1000;
#[derive(Debug, Clone, Default)]
struct SimpleQueryStats {
used_index: bool,
}
fn query_results_to_display_symbols(results: &QueryResults) -> Vec<DisplaySymbol> {
results
.iter()
.map(|m| DisplaySymbol::from_query_match(&m))
.collect()
}
struct QueryExecution {
stats: SimpleQueryStats,
symbols: Vec<DisplaySymbol>,
executor: Option<QueryExecutor>,
}
enum QueryExecutionOutcome {
Terminal,
Continue(Box<QueryExecution>),
}
struct NonSessionQueryParams<'a> {
cli: &'a Cli,
query_string: &'a str,
search_path: &'a str,
validation_options: ValidationOptions,
verbose: bool,
no_parallel: bool,
relation_context: &'a RelationDisplayContext,
variables: Option<&'a std::collections::HashMap<String, String>>,
}
struct QueryExecutionParams<'a> {
cli: &'a Cli,
query_string: &'a str,
search_path: &'a Path,
validation_options: ValidationOptions,
no_parallel: bool,
start: Instant,
query_type: QueryType,
variables: Option<&'a std::collections::HashMap<String, String>>,
acquisition: &'a GraphAcquisition,
}
struct QueryRenderParams<'a> {
cli: &'a Cli,
query_string: &'a str,
verbose: bool,
start: Instant,
relation_context: &'a RelationDisplayContext,
index_info: IndexDiagnosticInfo,
}
struct HybridQueryParams<'a> {
cli: &'a Cli,
query_string: &'a str,
search_path: &'a Path,
validation_options: ValidationOptions,
no_parallel: bool,
start: Instant,
query_type: QueryType,
variables: Option<&'a std::collections::HashMap<String, String>>,
acquisition: &'a GraphAcquisition,
}
#[allow(clippy::too_many_arguments)]
#[allow(clippy::fn_params_excessive_bools)] pub fn run_query(
cli: &Cli,
query_string: &str,
search_path: &str,
explain: bool,
verbose: bool,
session_mode: bool,
no_parallel: bool,
timeout_secs: Option<u64>,
result_limit: Option<usize>,
variables: &[String],
) -> Result<()> {
let mut streams = OutputStreams::with_pager(cli.pager_config());
ensure_repo_predicate_not_present(query_string)?;
let validation_options = build_validation_options(cli);
let security_config = build_security_config(timeout_secs, result_limit);
maybe_emit_security_diagnostics(&mut streams, &security_config, verbose)?;
let _ = &security_config;
let parsed_variables = parse_variable_args(variables)?;
let variables_opt = if parsed_variables.is_empty() {
None
} else {
Some(&parsed_variables)
};
if !explain {
validate_query_path_strict(Path::new(search_path))?;
}
if let Some(pipeline) = detect_pipeline_query(query_string)? {
run_pipeline_query(
cli,
&mut streams,
query_string,
search_path,
&pipeline,
no_parallel,
variables_opt,
)?;
return streams.finish_checked();
}
if is_join_query(query_string) {
run_join_query(
cli,
&mut streams,
query_string,
search_path,
no_parallel,
variables_opt,
)?;
return streams.finish_checked();
}
if explain {
run_query_explain(query_string, validation_options, no_parallel, &mut streams)?;
return streams.finish_checked();
}
let relation_context = RelationDisplayContext::from_query(query_string);
if session_mode {
let result = run_query_with_session(
cli,
&mut streams,
query_string,
search_path,
verbose,
no_parallel,
&relation_context,
);
result?;
return streams.finish_checked();
}
let params = NonSessionQueryParams {
cli,
query_string,
search_path,
validation_options,
verbose,
no_parallel,
relation_context: &relation_context,
variables: variables_opt,
};
run_query_non_session(&mut streams, ¶ms)?;
streams.finish_checked()
}
fn build_validation_options(cli: &Cli) -> ValidationOptions {
ValidationOptions {
fuzzy_fields: cli.fuzzy_fields,
fuzzy_field_distance: cli.fuzzy_field_distance,
}
}
fn build_security_config(
timeout_secs: Option<u64>,
result_limit: Option<usize>,
) -> QuerySecurityConfig {
let mut config = QuerySecurityConfig::default();
if let Some(secs) = timeout_secs {
config = config.with_timeout(Duration::from_secs(secs));
}
if let Some(limit) = result_limit {
config = config.with_result_cap(limit);
}
config
}
fn maybe_emit_security_diagnostics(
streams: &mut OutputStreams,
security_config: &QuerySecurityConfig,
verbose: bool,
) -> Result<()> {
if verbose {
streams.write_diagnostic(&format!(
"[Security] timeout={}s, limit={}, memory={}MB",
security_config.timeout().as_secs(),
security_config.result_cap(),
security_config.memory_limit() / (1024 * 1024),
))?;
}
Ok(())
}
fn run_query_explain(
query_string: &str,
validation_options: ValidationOptions,
no_parallel: bool,
streams: &mut OutputStreams,
) -> Result<()> {
let mut executor = create_executor_with_plugins().with_validation_options(validation_options);
if no_parallel {
executor = executor.without_parallel();
}
let plan = executor.get_query_plan(query_string)?;
let explain_output = format!(
"Query Plan:\n Original: {}\n Optimized: {}\n\nExecution:\n{}\n\nPerformance:\n Execution time: {}ms\n Index-aware: {}\n Cache: {}",
plan.original_query,
plan.optimized_query,
format_execution_steps(&plan.steps),
plan.execution_time_ms,
if plan.used_index { "Yes" } else { "No" },
format_cache_status(&plan.cache_status),
);
streams.write_diagnostic(&explain_output)?;
Ok(())
}
struct EffectiveIndexResolution {
index_root: PathBuf,
query: String,
info: IndexDiagnosticInfo,
}
fn resolve_effective_index_root(
search_path: &Path,
query_string: &str,
) -> EffectiveIndexResolution {
let index_location = find_nearest_index(search_path);
if let Some(ref loc) = index_location {
let root = loc.index_root.clone();
let (query, filtered_to) = if loc.requires_scope_filter {
if let Some(relative_scope) = loc.relative_scope() {
let scope_str = if loc.is_file_query {
relative_scope.to_string_lossy().into_owned()
} else {
format!("{}/**", relative_scope.display())
};
let augmented =
augment_query_with_scope(query_string, &relative_scope, loc.is_file_query);
(augmented, Some(scope_str))
} else {
(query_string.to_string(), None)
}
} else {
(query_string.to_string(), None)
};
let info = IndexDiagnosticInfo {
index_root: Some(root.clone()),
filtered_to,
used_ancestor_index: loc.is_ancestor,
};
EffectiveIndexResolution {
index_root: root,
query,
info,
}
} else {
EffectiveIndexResolution {
index_root: search_path.to_path_buf(),
query: query_string.to_string(),
info: IndexDiagnosticInfo::default(),
}
}
}
fn run_query_non_session(
streams: &mut OutputStreams,
params: &NonSessionQueryParams<'_>,
) -> Result<()> {
let NonSessionQueryParams {
cli,
query_string,
search_path,
validation_options,
verbose,
no_parallel,
relation_context,
variables,
} = *params;
let search_path_path = Path::new(search_path);
if cli.text {
return run_query_text_only(streams, params);
}
if QueryClassifier::classify(query_string) == QueryType::Semantic {
probe_validate_query_syntax(cli, search_path_path, query_string, validation_options)?;
}
let acquisition = acquire_graph_for_cli(cli, search_path_path)?;
let resolution = resolve_effective_index_root(search_path_path, query_string);
let EffectiveIndexResolution {
index_root: effective_index_root,
query: effective_query,
info: index_info,
} = resolution;
let query_type = QueryClassifier::classify(&effective_query);
let start = Instant::now();
let execution_params = QueryExecutionParams {
cli,
query_string: &effective_query,
search_path: &effective_index_root,
validation_options,
no_parallel,
start,
query_type,
variables,
acquisition: &acquisition,
};
let outcome = execute_query_mode(streams, &execution_params)?;
let render_params = QueryRenderParams {
cli,
query_string: &effective_query,
verbose,
start,
relation_context,
index_info,
};
render_query_outcome(streams, outcome, render_params)
}
fn execute_query_mode(
streams: &mut OutputStreams,
params: &QueryExecutionParams<'_>,
) -> Result<QueryExecutionOutcome> {
let cli = params.cli;
let query_string = params.query_string;
let search_path = params.search_path;
let validation_options = params.validation_options;
let no_parallel = params.no_parallel;
let start = params.start;
let query_type = params.query_type;
let variables = params.variables;
let acquisition = params.acquisition;
if should_use_hybrid_search(cli) {
let params = HybridQueryParams {
cli,
query_string,
search_path,
validation_options,
no_parallel,
start,
query_type,
variables,
acquisition,
};
execute_hybrid_query(streams, ¶ms)
} else {
execute_semantic_query(
cli,
query_string,
search_path,
validation_options,
no_parallel,
variables,
acquisition,
)
}
}
fn render_query_outcome(
streams: &mut OutputStreams,
outcome: QueryExecutionOutcome,
params: QueryRenderParams<'_>,
) -> Result<()> {
let QueryRenderParams {
cli,
query_string,
verbose,
start,
relation_context,
index_info,
} = params;
if let QueryExecutionOutcome::Continue(mut execution) = outcome {
let elapsed = start.elapsed();
let execution = &mut *execution;
let diagnostics = QueryDiagnostics::Standard { index_info };
render_semantic_results(
cli,
streams,
query_string,
&mut execution.symbols,
&execution.stats,
elapsed,
verbose,
execution.executor.as_ref(),
&diagnostics,
relation_context,
)?;
}
Ok(())
}
fn run_query_text_only(
streams: &mut OutputStreams,
params: &NonSessionQueryParams<'_>,
) -> Result<()> {
let NonSessionQueryParams {
cli,
query_string,
search_path,
..
} = *params;
let search_path_path = Path::new(search_path);
let config = build_hybrid_config(cli);
let mut engine = FallbackSearchEngine::with_config(config)?;
let start = Instant::now();
let results = engine.search_text_only(query_string, search_path_path)?;
let elapsed = start.elapsed();
match results {
SearchResults::Text { matches, .. } => {
render_text_results(cli, streams, &matches, elapsed)?;
}
SearchResults::Semantic { results, .. } => {
let mut symbols = query_results_to_display_symbols(&results);
let stats = SimpleQueryStats { used_index: false };
let diagnostics = QueryDiagnostics::Standard {
index_info: IndexDiagnosticInfo::default(),
};
render_semantic_results(
cli,
streams,
query_string,
&mut symbols,
&stats,
elapsed,
params.verbose,
None,
&diagnostics,
params.relation_context,
)?;
}
}
Ok(())
}
fn execute_hybrid_query(
streams: &mut OutputStreams,
params: &HybridQueryParams<'_>,
) -> Result<QueryExecutionOutcome> {
let cli = params.cli;
let query_string = params.query_string;
let search_path = params.search_path;
let validation_options = params.validation_options;
let no_parallel = params.no_parallel;
let start = params.start;
let query_type = params.query_type;
let variables = params.variables;
let acquisition = params.acquisition;
let effective_query = if let Some(vars) = variables {
let ast = QueryParser::parse_query(query_string)
.map_err(|e| anyhow::anyhow!("Failed to parse query for variable resolution: {e}"))?;
let resolved = sqry_core::query::types::resolve_variables(&ast.root, vars)
.map_err(|e| anyhow::anyhow!("{e}"))?;
let resolved_ast = sqry_core::query::types::Query {
root: resolved,
span: ast.span,
};
std::borrow::Cow::Owned(sqry_core::query::parsed_query::serialize_query(
&resolved_ast,
))
} else {
std::borrow::Cow::Borrowed(query_string)
};
let config = build_hybrid_config(cli);
let mut executor = create_executor_with_plugins_for_cli(cli, search_path)?
.with_validation_options(validation_options);
if no_parallel {
executor = executor.without_parallel();
}
let mut engine = FallbackSearchEngine::with_config_and_executor(config.clone(), executor)?;
emit_search_mode_diagnostic(cli, streams, query_type, &config)?;
let results = run_hybrid_search(cli, &mut engine, &effective_query, search_path, acquisition)?;
let elapsed = start.elapsed();
match results {
SearchResults::Semantic { results, .. } => {
let symbols = query_results_to_display_symbols(&results);
Ok(QueryExecutionOutcome::Continue(Box::new(QueryExecution {
stats: build_query_stats(true, symbols.len()),
symbols,
executor: None,
})))
}
SearchResults::Text { matches, .. } => {
render_text_results(cli, streams, &matches, elapsed)?;
Ok(QueryExecutionOutcome::Terminal)
}
}
}
fn execute_semantic_query(
cli: &Cli,
query_string: &str,
search_path: &Path,
validation_options: ValidationOptions,
no_parallel: bool,
variables: Option<&std::collections::HashMap<String, String>>,
acquisition: &GraphAcquisition,
) -> Result<QueryExecutionOutcome> {
let mut executor = create_executor_with_plugins_for_cli(cli, search_path)?
.with_validation_options(validation_options);
if no_parallel {
executor = executor.without_parallel();
}
let query_results = executor.execute_on_preloaded_graph(
Arc::clone(&acquisition.graph),
query_string,
&acquisition.workspace_root,
variables,
)?;
let symbols = query_results_to_display_symbols(&query_results);
let stats = SimpleQueryStats { used_index: true };
Ok(QueryExecutionOutcome::Continue(Box::new(QueryExecution {
stats,
symbols,
executor: Some(executor),
})))
}
fn emit_search_mode_diagnostic(
cli: &Cli,
streams: &mut OutputStreams,
query_type: QueryType,
config: &FallbackConfig,
) -> Result<()> {
if !config.show_search_mode || cli.json {
return Ok(());
}
let message = match query_type {
QueryType::Semantic => "[Semantic search mode]",
QueryType::Text => "[Text search mode]",
QueryType::Hybrid => "[Hybrid mode: trying semantic first...]",
};
streams.write_diagnostic(message)?;
Ok(())
}
fn run_hybrid_search(
cli: &Cli,
engine: &mut FallbackSearchEngine,
query_string: &str,
search_path: &Path,
acquisition: &GraphAcquisition,
) -> Result<SearchResults> {
if cli.text {
engine.search_text_only(query_string, search_path)
} else if cli.semantic {
engine.search_semantic_only_with_preloaded_graph(
query_string,
Arc::clone(&acquisition.graph),
search_path,
)
} else {
engine.search_with_preloaded_graph(
query_string,
Arc::clone(&acquisition.graph),
search_path,
)
}
}
fn build_query_stats(used_index: bool, _symbol_count: usize) -> SimpleQueryStats {
SimpleQueryStats { used_index }
}
fn render_text_results(
cli: &Cli,
streams: &mut OutputStreams,
matches: &[TextMatch],
elapsed: Duration,
) -> Result<()> {
if cli.json {
let json_output = serde_json::json!({
"text_matches": matches,
"match_count": matches.len(),
"execution_time_ms": elapsed.as_millis(),
});
streams.write_result(&serde_json::to_string_pretty(&json_output)?)?;
} else if cli.count {
streams.write_result(&matches.len().to_string())?;
} else {
for m in matches {
streams.write_result(&format!(
"{}:{}:{}",
m.path.display(),
m.line,
m.line_text.trim()
))?;
}
streams.write_diagnostic(&format!(
"\nQuery executed ({}ms) - {} text matches found",
elapsed.as_millis(),
matches.len()
))?;
}
Ok(())
}
fn run_query_with_session(
cli: &Cli,
streams: &mut OutputStreams,
query_string: &str,
search_path: &str,
verbose: bool,
_no_parallel: bool,
relation_ctx: &RelationDisplayContext,
) -> Result<()> {
if cli.text {
bail!("--session is only available for semantic queries (remove --text)");
}
let search_path_path = Path::new(search_path);
if QueryClassifier::classify(query_string) == QueryType::Semantic {
probe_validate_query_syntax(
cli,
search_path_path,
query_string,
build_validation_options(cli),
)?;
}
match acquire_graph_for_cli_typed(cli, search_path_path, MissingGraphPolicy::Error)? {
Ok(_acquisition) => {}
Err(GraphAcquisitionError::NoGraph { .. }) => {
}
Err(other) => return Err(map_acquisition_error(other)),
}
let (workspace, relative_scope, is_file_query, is_ancestor) =
resolve_session_index(search_path_path)?;
let index_info = if is_ancestor || relative_scope.is_some() {
let filtered_to = relative_scope.as_ref().map(|p| {
if is_file_query {
p.to_string_lossy().into_owned()
} else {
format!("{}/**", p.display())
}
});
IndexDiagnosticInfo {
index_root: Some(workspace.clone()),
filtered_to,
used_ancestor_index: is_ancestor,
}
} else {
IndexDiagnosticInfo::default()
};
let effective_query: std::borrow::Cow<'_, str> = if let Some(ref scope) = relative_scope {
std::borrow::Cow::Owned(augment_query_with_scope(query_string, scope, is_file_query))
} else {
std::borrow::Cow::Borrowed(query_string)
};
let mut guard = QUERY_SESSION
.lock()
.expect("global session cache mutex poisoned");
if guard.is_none() {
let config = sqry_core::session::SessionConfig::default();
*guard = Some(
SessionManager::with_config(config).context("failed to initialise session manager")?,
);
}
let session = guard.as_ref().expect("session manager must be initialised");
let before = session.stats();
let start = Instant::now();
let query_results = session
.query(&workspace, &effective_query)
.with_context(|| format!("failed to execute query \"{}\"", &effective_query))?;
let elapsed = start.elapsed();
let after = session.stats();
let cache_hit = after.cache_hits > before.cache_hits;
let mut symbols = query_results_to_display_symbols(&query_results);
let stats = SimpleQueryStats { used_index: true };
let diagnostics = QueryDiagnostics::Session {
cache_hit,
stats: after,
index_info,
};
render_semantic_results(
cli,
streams,
&effective_query,
&mut symbols,
&stats,
elapsed,
verbose,
None,
&diagnostics,
relation_ctx,
)
}
fn resolve_session_index(path: &Path) -> Result<(PathBuf, Option<PathBuf>, bool, bool)> {
if !path.exists() {
bail!(
"session mode requires a directory ({} does not exist)",
path.display()
);
}
if path.is_file() {
bail!(
"session mode requires a directory path ({} is a file). \
For file-specific queries, omit --session.",
path.display()
);
}
if let Some(loc) = find_nearest_index(path) {
let relative_scope = if loc.requires_scope_filter {
loc.relative_scope()
} else {
None
};
Ok((
loc.index_root,
relative_scope,
loc.is_file_query,
loc.is_ancestor,
))
} else {
bail!(
"no index found at {} or any parent directory. \
Run `sqry index <root>` first.",
path.display()
);
}
}
fn ensure_repo_predicate_not_present(query_string: &str) -> Result<()> {
if let Ok(query) = QueryParser::parse_query(query_string) {
if expr_has_repo_predicate(&query.root) {
bail!(
"repo: filters are only supported via `sqry workspace query` (multi-repo command)"
);
}
return Ok(());
}
if query_string.contains("repo:") {
bail!("repo: filters are only supported via `sqry workspace query` (multi-repo command)");
}
Ok(())
}
fn expr_has_repo_predicate(expr: &Expr) -> bool {
match expr {
Expr::And(operands) | Expr::Or(operands) => operands.iter().any(expr_has_repo_predicate),
Expr::Not(operand) => expr_has_repo_predicate(operand),
Expr::Condition(condition) => condition.field.as_str() == "repo",
Expr::Join(join) => {
expr_has_repo_predicate(&join.left) || expr_has_repo_predicate(&join.right)
}
}
}
#[derive(Default)]
struct IndexDiagnosticInfo {
index_root: Option<PathBuf>,
filtered_to: Option<String>,
used_ancestor_index: bool,
}
enum QueryDiagnostics {
Standard {
index_info: IndexDiagnosticInfo,
},
Session {
cache_hit: bool,
stats: SessionStats,
index_info: IndexDiagnosticInfo,
},
}
struct QueryLimitInfo {
total_matches: usize,
limit: usize,
truncated: bool,
}
#[allow(clippy::too_many_arguments)]
fn render_semantic_results(
cli: &Cli,
streams: &mut OutputStreams,
query_string: &str,
symbols: &mut Vec<DisplaySymbol>,
stats: &SimpleQueryStats,
elapsed: Duration,
verbose: bool,
executor_opt: Option<&QueryExecutor>,
diagnostics: &QueryDiagnostics,
relation_ctx: &RelationDisplayContext,
) -> Result<()> {
apply_sorting(cli, symbols);
let limit_info = apply_symbol_limit(symbols, cli.limit.unwrap_or(DEFAULT_QUERY_LIMIT));
let index_info = match diagnostics {
QueryDiagnostics::Standard { index_info }
| QueryDiagnostics::Session { index_info, .. } => index_info,
};
let metadata =
build_formatter_metadata(query_string, limit_info.total_matches, elapsed, index_info);
let identity_overrides = build_identity_overrides(cli, symbols, relation_ctx);
let display_symbols =
build_display_symbols_with_identities(symbols, identity_overrides.as_ref());
format_semantic_output(cli, streams, &display_symbols, &metadata)?;
maybe_emit_truncation_notice(cli, &limit_info);
if cli.json || cli.count {
return Ok(());
}
write_query_summary(streams, stats, elapsed, symbols.len(), diagnostics)?;
if verbose {
emit_verbose_cache_stats(streams, stats, executor_opt, diagnostics)?;
}
maybe_emit_debug_cache(cli, streams, executor_opt, stats)?;
Ok(())
}
fn apply_sorting(cli: &Cli, symbols: &mut [DisplaySymbol]) {
if let Some(sort_field) = cli.sort {
crate::commands::sort::sort_symbols(symbols, sort_field);
}
}
fn apply_symbol_limit(symbols: &mut Vec<DisplaySymbol>, limit: usize) -> QueryLimitInfo {
let total_matches = symbols.len();
let truncated = total_matches > limit;
if truncated {
symbols.truncate(limit);
}
QueryLimitInfo {
total_matches,
limit,
truncated,
}
}
fn build_formatter_metadata(
query_string: &str,
total_matches: usize,
elapsed: Duration,
index_info: &IndexDiagnosticInfo,
) -> crate::output::FormatterMetadata {
crate::output::FormatterMetadata {
pattern: Some(query_string.to_string()),
total_matches,
execution_time: elapsed,
filters: sqry_core::json_response::Filters {
kind: None,
lang: None,
ignore_case: false,
exact: false,
fuzzy: None,
},
index_age_seconds: None,
used_ancestor_index: if index_info.used_ancestor_index || index_info.filtered_to.is_some() {
Some(index_info.used_ancestor_index)
} else {
None
},
filtered_to: index_info.filtered_to.clone(),
}
}
fn build_identity_overrides(
cli: &Cli,
symbols: &[DisplaySymbol],
relation_ctx: &RelationDisplayContext,
) -> Option<DisplayIdentities> {
if cli.qualified_names || cli.json {
Some(compute_display_identities(symbols, relation_ctx))
} else {
None
}
}
fn format_semantic_output(
cli: &Cli,
streams: &mut OutputStreams,
display_symbols: &[DisplaySymbol],
metadata: &crate::output::FormatterMetadata,
) -> Result<()> {
let formatter = create_formatter(cli);
formatter.format(display_symbols, Some(metadata), streams)?;
Ok(())
}
fn maybe_emit_truncation_notice(cli: &Cli, limit_info: &QueryLimitInfo) {
if !cli.json && limit_info.truncated {
eprintln!(
"\nShowing {} of {} matches (use --limit to adjust)",
limit_info.limit, limit_info.total_matches
);
}
}
fn build_display_symbols_with_identities(
symbols: &[DisplaySymbol],
identity_overrides: Option<&DisplayIdentities>,
) -> Vec<DisplaySymbol> {
match identity_overrides {
Some(identities) => symbols
.iter()
.enumerate()
.map(|(idx, symbol)| {
let invoker_identity = identities
.invoker_identities
.get(idx)
.and_then(Clone::clone);
let target_identity = identities.target_identities.get(idx).and_then(Clone::clone);
if invoker_identity.is_some() {
symbol.clone().with_caller_identity(invoker_identity)
} else if target_identity.is_some() {
symbol.clone().with_callee_identity(target_identity)
} else {
symbol.clone()
}
})
.collect(),
None => symbols.to_vec(),
}
}
fn write_query_summary(
streams: &mut OutputStreams,
stats: &SimpleQueryStats,
elapsed: Duration,
symbol_count: usize,
diagnostics: &QueryDiagnostics,
) -> Result<()> {
use std::fmt::Write as _;
streams.write_diagnostic("")?;
let index_info = match diagnostics {
QueryDiagnostics::Standard { index_info }
| QueryDiagnostics::Session { index_info, .. } => index_info,
};
let index_status = if stats.used_index {
if index_info.used_ancestor_index {
if let Some(ref root) = index_info.index_root {
format!("✓ Using index from {}", root.display())
} else {
"✓ Used index".to_string()
}
} else {
"✓ Used index".to_string()
}
} else {
"ℹ No index found".to_string()
};
let mut msg = format!(
"{} - Query executed ({}ms) - {} symbols found",
index_status,
elapsed.as_millis(),
symbol_count
);
if let Some(ref filtered_to) = index_info.filtered_to {
let _ = write!(msg, " (filtered to {filtered_to})");
}
if let QueryDiagnostics::Session { cache_hit, .. } = diagnostics {
let cache_state = if *cache_hit {
"session cache hit"
} else {
"session cache miss"
};
let _ = write!(msg, " [{cache_state}]");
}
streams.write_diagnostic(&msg)?;
Ok(())
}
fn emit_verbose_cache_stats(
streams: &mut OutputStreams,
_stats: &SimpleQueryStats,
executor_opt: Option<&QueryExecutor>,
diagnostics: &QueryDiagnostics,
) -> Result<()> {
match (executor_opt, diagnostics) {
(Some(executor), _) => emit_executor_cache_stats(streams, executor),
(None, QueryDiagnostics::Session { stats, .. }) => emit_session_cache_stats(streams, stats),
_ => emit_hybrid_cache_notice(streams),
}
}
fn emit_executor_cache_stats(streams: &mut OutputStreams, executor: &QueryExecutor) -> Result<()> {
let (parse_stats, result_stats) = executor.cache_stats();
streams.write_diagnostic("")?;
streams.write_diagnostic("Cache Statistics:")?;
let parse_msg = format!(
" Parse cache: {:.1}% hit rate ({} hits, {} misses, {} evictions)",
parse_stats.hit_rate() * 100.0,
parse_stats.hits,
parse_stats.misses,
parse_stats.evictions,
);
streams.write_diagnostic(&parse_msg)?;
let result_msg = format!(
" Result cache: {:.1}% hit rate ({} hits, {} misses, {} evictions)",
result_stats.hit_rate() * 100.0,
result_stats.hits,
result_stats.misses,
result_stats.evictions,
);
streams.write_diagnostic(&result_msg)?;
Ok(())
}
fn emit_session_cache_stats(streams: &mut OutputStreams, stats: &SessionStats) -> Result<()> {
let total_cache_events = stats.cache_hits + stats.cache_misses;
let hit_rate = if total_cache_events > 0 {
(u64_to_f64_lossy(stats.cache_hits) / u64_to_f64_lossy(total_cache_events)) * 100.0
} else {
0.0
};
streams.write_diagnostic("")?;
streams.write_diagnostic("Session statistics:")?;
let _ = streams.write_diagnostic(&format!(" Cached indexes : {}", stats.cached_graphs));
let _ = streams.write_diagnostic(&format!(" Total queries : {}", stats.total_queries));
let _ = streams.write_diagnostic(&format!(
" Cache hits : {} ({hit_rate:.1}% hit rate)",
stats.cache_hits
));
let _ = streams.write_diagnostic(&format!(" Cache misses : {}", stats.cache_misses));
let _ = streams.write_diagnostic(&format!(
" Estimated memory: ~{} MB",
stats.total_memory_mb
));
Ok(())
}
fn emit_hybrid_cache_notice(streams: &mut OutputStreams) -> Result<()> {
streams.write_diagnostic("")?;
streams.write_diagnostic("Cache statistics not available in hybrid search mode")?;
Ok(())
}
struct DisplayIdentities {
invoker_identities: Vec<Option<CallIdentityMetadata>>,
target_identities: Vec<Option<CallIdentityMetadata>>,
}
fn compute_display_identities(
symbols: &[DisplaySymbol],
relation_ctx: &RelationDisplayContext,
) -> DisplayIdentities {
let has_incoming_targets = !relation_ctx.caller_targets.is_empty();
let has_outgoing_targets = !relation_ctx.callee_targets.is_empty();
let identities: Vec<Option<CallIdentityMetadata>> = symbols
.iter()
.map(build_identity_from_display_symbol)
.collect();
if has_incoming_targets {
DisplayIdentities {
invoker_identities: identities,
target_identities: vec![None; symbols.len()],
}
} else if has_outgoing_targets {
DisplayIdentities {
invoker_identities: vec![None; symbols.len()],
target_identities: identities,
}
} else {
DisplayIdentities {
invoker_identities: vec![None; symbols.len()],
target_identities: vec![None; symbols.len()],
}
}
}
fn build_identity_from_display_symbol(symbol: &DisplaySymbol) -> Option<CallIdentityMetadata> {
let language = symbol.metadata.get("__raw_language").map(String::as_str);
let is_static = symbol
.metadata
.get("static")
.is_some_and(|value| value == "true");
build_identity_from_qualified_name(&symbol.qualified_name, &symbol.kind, language, is_static)
}
fn build_identity_from_qualified_name(
qualified: &str,
kind: &str,
language: Option<&str>,
is_static: bool,
) -> Option<CallIdentityMetadata> {
call_identity_from_qualified_name(qualified, kind, language, is_static)
}
fn format_execution_steps(steps: &[sqry_core::query::ExecutionStep]) -> String {
steps
.iter()
.map(|step| {
format!(
" {}. {} ({}ms)",
step.step_num, step.operation, step.time_ms
)
})
.collect::<Vec<_>>()
.join("\n")
}
fn format_cache_status(status: &sqry_core::query::CacheStatus) -> String {
match (status.parse_cache_hit, status.result_cache_hit) {
(true, true) => "HIT (100% cached)".to_string(),
(true, false) => "PARTIAL HIT (query cached, results computed)".to_string(),
(false, true) => "PARTIAL HIT (query parsed, results cached)".to_string(),
(false, false) => "MISS (first run)".to_string(),
}
}
fn env_debug_cache_enabled() -> bool {
matches!(
env::var("SQRY_CACHE_DEBUG"),
Ok(value) if value == "1" || value.eq_ignore_ascii_case("true")
)
}
#[derive(Default)]
struct RelationDisplayContext {
caller_targets: Vec<String>,
callee_targets: Vec<String>,
}
impl RelationDisplayContext {
fn from_query(query_str: &str) -> Self {
match QueryParser::parse_query(query_str) {
Ok(ast) => {
let mut ctx = Self::default();
collect_relation_targets(&ast.root, &mut ctx);
ctx
}
Err(_) => Self::default(),
}
}
}
fn collect_relation_targets(expr: &Expr, ctx: &mut RelationDisplayContext) {
match expr {
Expr::And(operands) | Expr::Or(operands) => {
for operand in operands {
collect_relation_targets(operand, ctx);
}
}
Expr::Not(inner) => collect_relation_targets(inner, ctx),
Expr::Join(join) => {
collect_relation_targets(&join.left, ctx);
collect_relation_targets(&join.right, ctx);
}
Expr::Condition(condition) => match condition.field.as_str() {
"callers" => {
if let Value::String(value) = &condition.value
&& !value.is_empty()
{
ctx.caller_targets.push(value.clone());
}
}
"callees" => {
if let Value::String(value) = &condition.value
&& !value.is_empty()
{
ctx.callee_targets.push(value.clone());
}
}
_ => {}
},
}
}
fn should_debug_cache(cli: &Cli) -> bool {
cli.debug_cache || env_debug_cache_enabled()
}
fn maybe_emit_debug_cache(
cli: &Cli,
streams: &mut OutputStreams,
executor_opt: Option<&QueryExecutor>,
_stats: &SimpleQueryStats,
) -> Result<()> {
if !should_debug_cache(cli) {
return Ok(());
}
let Some(executor) = executor_opt else {
streams.write_diagnostic("CacheStats unavailable in this mode")?;
return Ok(());
};
let (parse_stats, result_stats) = executor.cache_stats();
let debug_line = format!(
"CacheStats{{parse_hits={}, parse_misses={}, result_hits={}, result_misses={}}}",
parse_stats.hits, parse_stats.misses, result_stats.hits, result_stats.misses,
);
streams.write_diagnostic(&debug_line)?;
Ok(())
}
fn build_hybrid_config(cli: &Cli) -> FallbackConfig {
let mut config = FallbackConfig::from_env();
if cli.no_fallback {
config.fallback_enabled = false;
}
config.text_context_lines = cli.context;
config.max_text_results = cli.max_text_results;
if cli.json {
config.show_search_mode = false;
}
config
}
fn should_use_hybrid_search(cli: &Cli) -> bool {
if should_debug_cache(cli) {
return false;
}
true
}
pub(crate) fn create_executor_with_plugins() -> QueryExecutor {
let plugin_manager = crate::plugin_defaults::create_plugin_manager();
QueryExecutor::with_plugin_manager(plugin_manager)
}
pub(crate) fn create_executor_with_plugins_for_cli(
cli: &Cli,
search_path: &Path,
) -> Result<QueryExecutor> {
let effective_root = find_nearest_index(search_path)
.map_or_else(|| search_path.to_path_buf(), |location| location.index_root);
let resolved_plugins = plugin_defaults::resolve_plugin_selection(
cli,
&effective_root,
PluginSelectionMode::ReadOnly,
)?;
Ok(QueryExecutor::with_plugin_manager(
resolved_plugins.plugin_manager,
))
}
fn probe_validate_query_syntax(
cli: &Cli,
search_path: &Path,
query_string: &str,
validation_options: ValidationOptions,
) -> Result<()> {
let executor = match create_executor_with_plugins_for_cli(cli, search_path) {
Ok(executor) => executor.with_validation_options(validation_options),
Err(_) => create_executor_with_plugins().with_validation_options(validation_options),
};
executor.parse_query_ast(query_string).map(|_| ())
}
fn validate_query_path_strict(search_path: &Path) -> Result<PathBuf> {
if !search_path.exists() {
bail!(
"invalid path {}: path does not exist",
search_path.display()
);
}
match search_path.canonicalize() {
Ok(canonical) => Ok(canonical),
Err(err) => bail!(
"invalid path {}: path cannot be canonicalized: {err}",
search_path.display()
),
}
}
pub(crate) fn acquire_graph_for_cli(cli: &Cli, search_path: &Path) -> Result<GraphAcquisition> {
acquire_graph_for_cli_with_policy(cli, search_path, MissingGraphPolicy::AutoBuildIfEnabled)
}
pub(crate) fn acquire_graph_for_cli_with_policy(
cli: &Cli,
search_path: &Path,
missing_graph_policy: MissingGraphPolicy,
) -> Result<GraphAcquisition> {
let (provider, request) =
build_cli_provider_and_request(cli, search_path, missing_graph_policy)?;
provider.acquire(request).map_err(map_acquisition_error)
}
pub(crate) fn acquire_graph_for_cli_typed(
cli: &Cli,
search_path: &Path,
missing_graph_policy: MissingGraphPolicy,
) -> Result<std::result::Result<GraphAcquisition, GraphAcquisitionError>> {
let (provider, request) =
build_cli_provider_and_request(cli, search_path, missing_graph_policy)?;
Ok(provider.acquire(request))
}
fn build_cli_provider_and_request(
cli: &Cli,
search_path: &Path,
missing_graph_policy: MissingGraphPolicy,
) -> Result<(FilesystemGraphProvider, GraphAcquisitionRequest)> {
let plugin_root = find_nearest_index(search_path)
.map_or_else(|| search_path.to_path_buf(), |location| location.index_root);
let resolved_plugins = plugin_defaults::resolve_plugin_selection(
cli,
&plugin_root,
PluginSelectionMode::ReadOnly,
)?;
let mut provider = FilesystemGraphProvider::new(Arc::new(resolved_plugins.plugin_manager));
if matches!(missing_graph_policy, MissingGraphPolicy::AutoBuildIfEnabled) {
let hook_plugins = plugin_defaults::resolve_plugin_selection(
cli,
&plugin_root,
PluginSelectionMode::ReadOnly,
)?;
let hook_plugin_manager = Arc::new(hook_plugins.plugin_manager);
let auto_build_hook: AutoBuildHook = Arc::new(move |canonical_request: &Path| {
if !is_auto_index_enabled() {
return Err(GraphAcquisitionError::NoGraph {
workspace_root: canonical_request.to_path_buf(),
});
}
log::info!(
"No graph found at {}, auto-building index",
canonical_request.display()
);
let config = sqry_core::graph::unified::build::BuildConfig::default();
let (graph, _build_result) = sqry_core::graph::unified::build::build_and_persist_graph(
canonical_request,
&hook_plugin_manager,
&config,
"cli:auto_index",
)
.map_err(|e| GraphAcquisitionError::BuildFailed {
workspace_root: canonical_request.to_path_buf(),
reason: format!("{e}"),
})?;
Ok(Arc::new(graph))
});
provider = provider.with_auto_build_hook(auto_build_hook);
}
let request = GraphAcquisitionRequest {
requested_path: search_path.to_path_buf(),
operation: AcquisitionOperation::ReadOnlyQuery,
path_policy: PathPolicy::default(),
missing_graph_policy,
stale_policy: StalePolicy::default(),
plugin_selection_policy: PluginSelectionPolicy::default(),
tool_name: Some("sqry_query"),
};
Ok((provider, request))
}
fn is_auto_index_enabled() -> bool {
match std::env::var("SQRY_AUTO_INDEX") {
Ok(val) => val != "false" && val != "0",
Err(_) => true,
}
}
fn map_acquisition_error(err: GraphAcquisitionError) -> anyhow::Error {
match err {
GraphAcquisitionError::InvalidPath { path, reason } => {
anyhow::anyhow!("invalid path {}: {}", path.display(), reason)
}
GraphAcquisitionError::NoGraph { workspace_root } => {
anyhow::anyhow!(
"No graph found for {}. Run `sqry index {}` first.",
workspace_root.display(),
workspace_root.display()
)
}
GraphAcquisitionError::IncompatibleGraph {
source_root,
status,
} => match status {
PluginSelectionStatus::IncompatibleUnknownPluginIds {
unknown_plugin_ids,
manifest_path,
} => {
let suggested = sqry_plugin_registry::missing_features_for(&unknown_plugin_ids);
let all_have_features =
sqry_plugin_registry::all_unknown_ids_have_features(&unknown_plugin_ids);
let manifest_str = manifest_path
.as_ref()
.map(|p| p.display().to_string())
.unwrap_or_else(|| "<unknown>".to_string());
let suggestion = if !suggested.is_empty() {
format!(
"Rebuild this binary with `cargo install --path sqry-cli --features {}` \
or rebuild the index: `sqry index {} --force`.",
suggested.join(","),
source_root.display(),
)
} else if all_have_features {
format!(
"Rebuild the index with the binary that produced it: \
`sqry index {} --force`.",
source_root.display(),
)
} else {
format!(
"The unknown ids do not match any known feature flag — \
the manifest may be from a newer sqry version. \
Rebuild the index: `sqry index {} --force`.",
source_root.display(),
)
};
anyhow::anyhow!(
"Incompatible graph at {} — manifest references plugins this binary \
cannot load: {}. Manifest: {}. {}",
source_root.display(),
unknown_plugin_ids.join(", "),
manifest_str,
suggestion,
)
}
PluginSelectionStatus::IncompatibleSnapshotFormat { reason } => anyhow::anyhow!(
"Incompatible graph at {}: {}. Run `sqry index {} --force` to rebuild.",
source_root.display(),
reason,
source_root.display()
),
PluginSelectionStatus::Exact => {
anyhow::anyhow!(
"Incompatible graph at {} (no detail); rerun `sqry index --force`",
source_root.display()
)
}
other => anyhow::anyhow!(
"Incompatible graph at {}: {other:?}. Rebuild with `sqry index {} --force`.",
source_root.display(),
source_root.display(),
),
},
GraphAcquisitionError::LoadFailed {
source_root,
reason,
} => anyhow::anyhow!(
"Failed to load graph at {}: {}",
source_root.display(),
reason
),
other => anyhow::anyhow!("graph acquisition failed: {other}"),
}
}
fn u64_to_f64_lossy(value: u64) -> f64 {
let narrowed = u32::try_from(value).unwrap_or(u32::MAX);
f64::from(narrowed)
}
fn parse_variable_args(args: &[String]) -> Result<std::collections::HashMap<String, String>> {
let mut map = std::collections::HashMap::new();
for arg in args {
let (key, value) = arg
.split_once('=')
.ok_or_else(|| anyhow::anyhow!("Invalid --var format: '{arg}'. Expected KEY=VALUE"))?;
if key.is_empty() {
bail!("Variable name cannot be empty in --var '{arg}'");
}
map.insert(key.to_string(), value.to_string());
}
Ok(map)
}
fn is_join_query(query_str: &str) -> bool {
match QueryParser::parse_query(query_str) {
Ok(ast) => matches!(ast.root, Expr::Join(_)),
Err(_) => false,
}
}
fn detect_pipeline_query(
query_str: &str,
) -> Result<Option<sqry_core::query::types::PipelineQuery>> {
match QueryParser::parse_pipeline_query(query_str) {
Ok(result) => Ok(result),
Err(e) => {
if query_str.contains('|') {
Err(anyhow::anyhow!("Pipeline parse error: {e}"))
} else {
Ok(None)
}
}
}
}
fn run_join_query(
cli: &Cli,
streams: &mut OutputStreams,
query_string: &str,
search_path: &str,
no_parallel: bool,
variables: Option<&std::collections::HashMap<String, String>>,
) -> Result<()> {
let validation_options = build_validation_options(cli);
let mut executor = create_executor_with_plugins_for_cli(cli, Path::new(search_path))?
.with_validation_options(validation_options);
if no_parallel {
executor = executor.without_parallel();
}
let resolved_path = Path::new(search_path);
let join_results = executor.execute_join(query_string, resolved_path, variables)?;
if join_results.truncated() {
streams.write_diagnostic(&format!(
"Join query: {} pairs matched via {} (results truncated — cap reached)",
join_results.len(),
join_results.edge_kind()
))?;
} else {
streams.write_diagnostic(&format!(
"Join query: {} pairs matched via {}",
join_results.len(),
join_results.edge_kind()
))?;
}
for pair in join_results.iter() {
let left_name = pair.left.name().unwrap_or_default();
let left_path = pair
.left
.relative_path()
.map_or_else(|| "<unknown>".to_string(), |p| p.display().to_string());
let right_name = pair.right.name().unwrap_or_default();
let right_path = pair
.right
.relative_path()
.map_or_else(|| "<unknown>".to_string(), |p| p.display().to_string());
if cli.json {
let json = serde_json::json!({
"left": {
"name": left_name.as_ref(),
"kind": pair.left.kind().as_str(),
"path": left_path,
"line": pair.left.start_line(),
},
"edge": pair.edge_kind.to_string(),
"right": {
"name": right_name.as_ref(),
"kind": pair.right.kind().as_str(),
"path": right_path,
"line": pair.right.start_line(),
},
});
streams.write_result(&json.to_string())?;
} else {
streams.write_result(&format!(
"{} ({}:{}) {} {} ({}:{})",
left_name,
left_path,
pair.left.start_line(),
pair.edge_kind,
right_name,
right_path,
pair.right.start_line(),
))?;
}
}
Ok(())
}
fn run_pipeline_query(
cli: &Cli,
streams: &mut OutputStreams,
_query_string: &str,
search_path: &str,
pipeline: &sqry_core::query::types::PipelineQuery,
no_parallel: bool,
variables: Option<&std::collections::HashMap<String, String>>,
) -> Result<()> {
let validation_options = build_validation_options(cli);
let mut executor = create_executor_with_plugins_for_cli(cli, Path::new(search_path))?
.with_validation_options(validation_options);
if no_parallel {
executor = executor.without_parallel();
}
let resolved_path = Path::new(search_path);
let base_query = sqry_core::query::parsed_query::serialize_query(&pipeline.query);
let results =
executor.execute_on_graph_with_variables(&base_query, resolved_path, variables)?;
for stage in &pipeline.stages {
let aggregation = sqry_core::query::execute_pipeline_stage(&results, stage);
if cli.json {
render_aggregation_json(streams, &aggregation)?;
} else {
streams.write_result(&format!("{aggregation}"))?;
}
}
Ok(())
}
fn render_aggregation_json(
streams: &mut OutputStreams,
aggregation: &sqry_core::query::pipeline::AggregationResult,
) -> Result<()> {
use sqry_core::query::pipeline::AggregationResult;
let json = match aggregation {
AggregationResult::Count(r) => serde_json::json!({
"type": "count",
"total": r.total,
}),
AggregationResult::GroupBy(r) => serde_json::json!({
"type": "group_by",
"field": r.field,
"groups": r.groups.iter().map(|(k, v)| serde_json::json!({"value": k, "count": v})).collect::<Vec<_>>(),
}),
AggregationResult::Top(r) => serde_json::json!({
"type": "top",
"field": r.field,
"n": r.n,
"entries": r.entries.iter().map(|(k, v)| serde_json::json!({"value": k, "count": v})).collect::<Vec<_>>(),
}),
AggregationResult::Stats(r) => serde_json::json!({
"type": "stats",
"total": r.total,
"by_kind": r.by_kind.iter().map(|(k, v)| serde_json::json!({"value": k, "count": v})).collect::<Vec<_>>(),
"by_lang": r.by_lang.iter().map(|(k, v)| serde_json::json!({"value": k, "count": v})).collect::<Vec<_>>(),
"by_visibility": r.by_visibility.iter().map(|(k, v)| serde_json::json!({"value": k, "count": v})).collect::<Vec<_>>(),
}),
};
streams.write_result(&json.to_string())?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use sqry_core::relations::CallIdentityKind;
#[test]
fn test_u64_to_f64_lossy_zero() {
assert!((u64_to_f64_lossy(0) - 0.0).abs() < f64::EPSILON);
}
#[test]
fn test_u64_to_f64_lossy_small_values() {
assert!((u64_to_f64_lossy(1) - 1.0).abs() < f64::EPSILON);
assert!((u64_to_f64_lossy(100) - 100.0).abs() < f64::EPSILON);
assert!((u64_to_f64_lossy(1000) - 1000.0).abs() < f64::EPSILON);
}
#[test]
fn test_u64_to_f64_lossy_u32_max() {
let u32_max = u64::from(u32::MAX);
assert!((u64_to_f64_lossy(u32_max) - f64::from(u32::MAX)).abs() < f64::EPSILON);
}
#[test]
fn test_u64_to_f64_lossy_overflow_clamps_to_u32_max() {
let large_value = u64::from(u32::MAX) + 1;
assert!((u64_to_f64_lossy(large_value) - f64::from(u32::MAX)).abs() < f64::EPSILON);
}
#[test]
fn test_format_cache_status_full_hit() {
let status = sqry_core::query::CacheStatus {
parse_cache_hit: true,
result_cache_hit: true,
};
assert_eq!(format_cache_status(&status), "HIT (100% cached)");
}
#[test]
fn test_format_cache_status_parse_hit_only() {
let status = sqry_core::query::CacheStatus {
parse_cache_hit: true,
result_cache_hit: false,
};
assert_eq!(
format_cache_status(&status),
"PARTIAL HIT (query cached, results computed)"
);
}
#[test]
fn test_format_cache_status_result_hit_only() {
let status = sqry_core::query::CacheStatus {
parse_cache_hit: false,
result_cache_hit: true,
};
assert_eq!(
format_cache_status(&status),
"PARTIAL HIT (query parsed, results cached)"
);
}
#[test]
fn test_format_cache_status_full_miss() {
let status = sqry_core::query::CacheStatus {
parse_cache_hit: false,
result_cache_hit: false,
};
assert_eq!(format_cache_status(&status), "MISS (first run)");
}
#[test]
fn test_format_execution_steps_empty() {
let steps: Vec<sqry_core::query::ExecutionStep> = vec![];
assert_eq!(format_execution_steps(&steps), "");
}
#[test]
fn test_format_execution_steps_single() {
let steps = vec![sqry_core::query::ExecutionStep {
step_num: 1,
operation: "Parse query".to_string(),
result_count: 0,
time_ms: 5,
}];
assert_eq!(format_execution_steps(&steps), " 1. Parse query (5ms)");
}
#[test]
fn test_format_execution_steps_multiple() {
let steps = vec![
sqry_core::query::ExecutionStep {
step_num: 1,
operation: "Parse".to_string(),
result_count: 100,
time_ms: 2,
},
sqry_core::query::ExecutionStep {
step_num: 2,
operation: "Optimize".to_string(),
result_count: 50,
time_ms: 3,
},
sqry_core::query::ExecutionStep {
step_num: 3,
operation: "Execute".to_string(),
result_count: 25,
time_ms: 10,
},
];
let expected = " 1. Parse (2ms)\n 2. Optimize (3ms)\n 3. Execute (10ms)";
assert_eq!(format_execution_steps(&steps), expected);
}
#[test]
fn test_expr_has_repo_predicate_simple_repo() {
let query = QueryParser::parse_query("repo:myrepo").unwrap();
assert!(expr_has_repo_predicate(&query.root));
}
#[test]
fn test_expr_has_repo_predicate_no_repo() {
let query = QueryParser::parse_query("kind:function").unwrap();
assert!(!expr_has_repo_predicate(&query.root));
}
#[test]
fn test_expr_has_repo_predicate_nested_and() {
let query = QueryParser::parse_query("kind:function AND repo:myrepo").unwrap();
assert!(expr_has_repo_predicate(&query.root));
}
#[test]
fn test_expr_has_repo_predicate_nested_or() {
let query = QueryParser::parse_query("kind:function OR repo:myrepo").unwrap();
assert!(expr_has_repo_predicate(&query.root));
}
#[test]
fn test_expr_has_repo_predicate_nested_not() {
let query = QueryParser::parse_query("NOT repo:myrepo").unwrap();
assert!(expr_has_repo_predicate(&query.root));
}
#[test]
fn test_expr_has_repo_predicate_complex_no_repo() {
let query = QueryParser::parse_query("kind:function AND name:foo OR lang:rust").unwrap();
assert!(!expr_has_repo_predicate(&query.root));
}
#[test]
fn test_relation_context_no_relations() {
let ctx = RelationDisplayContext::from_query("kind:function");
assert!(ctx.caller_targets.is_empty());
assert!(ctx.callee_targets.is_empty());
}
#[test]
fn test_relation_context_with_callers() {
let ctx = RelationDisplayContext::from_query("callers:foo");
assert_eq!(ctx.caller_targets, vec!["foo"]);
assert!(ctx.callee_targets.is_empty());
}
#[test]
fn test_relation_context_with_callees() {
let ctx = RelationDisplayContext::from_query("callees:bar");
assert!(ctx.caller_targets.is_empty());
assert_eq!(ctx.callee_targets, vec!["bar"]);
}
#[test]
fn test_relation_context_with_both() {
let ctx = RelationDisplayContext::from_query("callers:foo AND callees:bar");
assert_eq!(ctx.caller_targets, vec!["foo"]);
assert_eq!(ctx.callee_targets, vec!["bar"]);
}
#[test]
fn test_relation_context_invalid_query() {
let ctx = RelationDisplayContext::from_query("invalid query syntax ???");
assert!(ctx.caller_targets.is_empty());
assert!(ctx.callee_targets.is_empty());
}
#[test]
fn test_build_identity_from_qualified_name_preserves_ruby_instance_display() {
let identity = build_identity_from_qualified_name(
"Admin::Users::Controller::show",
"method",
Some("ruby"),
false,
)
.expect("ruby instance identity");
assert_eq!(identity.qualified, "Admin::Users::Controller#show");
assert_eq!(identity.method_kind, CallIdentityKind::Instance);
}
#[test]
fn test_build_identity_from_qualified_name_preserves_ruby_singleton_display() {
let identity = build_identity_from_qualified_name(
"Admin::Users::Controller::show",
"method",
Some("ruby"),
true,
)
.expect("ruby singleton identity");
assert_eq!(identity.qualified, "Admin::Users::Controller.show");
assert_eq!(identity.method_kind, CallIdentityKind::Singleton);
}
#[test]
fn test_ensure_repo_not_present_ok() {
let result = ensure_repo_predicate_not_present("kind:function");
assert!(result.is_ok());
}
#[test]
fn test_ensure_repo_not_present_fails_with_repo() {
let result = ensure_repo_predicate_not_present("repo:myrepo");
assert!(result.is_err());
assert!(
result
.unwrap_err()
.to_string()
.contains("repo: filters are only supported")
);
}
#[test]
fn test_ensure_repo_not_present_fails_with_nested_repo() {
let result = ensure_repo_predicate_not_present("kind:function AND repo:myrepo");
assert!(result.is_err());
}
#[test]
fn test_ensure_repo_not_present_fallback_text_check() {
let result = ensure_repo_predicate_not_present("invalid??? repo:something");
assert!(result.is_err());
}
#[test]
fn test_parse_variable_args_empty() {
let result = parse_variable_args(&[]).unwrap();
assert!(result.is_empty());
}
#[test]
fn test_parse_variable_args_single_key_value() {
let args = vec!["FOO=bar".to_string()];
let result = parse_variable_args(&args).unwrap();
assert_eq!(result.len(), 1);
assert_eq!(result.get("FOO"), Some(&"bar".to_string()));
}
#[test]
fn test_parse_variable_args_multiple() {
let args = vec!["A=1".to_string(), "B=hello world".to_string()];
let result = parse_variable_args(&args).unwrap();
assert_eq!(result.len(), 2);
assert_eq!(result.get("A"), Some(&"1".to_string()));
assert_eq!(result.get("B"), Some(&"hello world".to_string()));
}
#[test]
fn test_parse_variable_args_value_with_equals() {
let args = vec!["KEY=val=ue".to_string()];
let result = parse_variable_args(&args).unwrap();
assert_eq!(result.get("KEY"), Some(&"val=ue".to_string()));
}
#[test]
fn test_parse_variable_args_no_equals_errors() {
let args = vec!["NOEQUALS".to_string()];
let err = parse_variable_args(&args).unwrap_err();
assert!(
err.to_string().contains("Invalid --var format"),
"Unexpected error: {err}"
);
}
#[test]
fn test_parse_variable_args_empty_key_errors() {
let args = vec!["=value".to_string()];
let err = parse_variable_args(&args).unwrap_err();
assert!(
err.to_string().contains("Variable name cannot be empty"),
"Unexpected error: {err}"
);
}
#[test]
fn test_parse_variable_args_empty_value_allowed() {
let args = vec!["KEY=".to_string()];
let result = parse_variable_args(&args).unwrap();
assert_eq!(result.get("KEY"), Some(&String::new()));
}
#[test]
fn test_is_join_query_non_join() {
assert!(!is_join_query("kind:function"));
assert!(!is_join_query("name:foo AND kind:method"));
}
#[test]
fn test_is_join_query_invalid_query_returns_false() {
assert!(!is_join_query("invalid ??? syntax {{{"));
}
#[test]
fn test_is_join_query_positive() {
assert!(
is_join_query("(kind:function) CALLS (kind:function)"),
"CALLS join expression must be detected as a join query"
);
}
#[test]
fn test_detect_pipeline_query_no_pipe_returns_none() {
let result = detect_pipeline_query("kind:function").unwrap();
assert!(result.is_none());
}
#[test]
fn test_detect_pipeline_query_invalid_without_pipe_returns_none() {
let result = detect_pipeline_query("invalid query !!!").unwrap();
assert!(result.is_none());
}
#[test]
fn test_detect_pipeline_query_invalid_with_pipe_errors() {
let result = detect_pipeline_query("kind:function | count");
assert!(
result.is_ok(),
"A valid pipeline query must return Ok, got: {result:?}"
);
assert!(
result.unwrap().is_some(),
"A valid pipeline query must return Ok(Some(_))"
);
}
#[test]
fn test_apply_symbol_limit_no_truncation() {
let mut symbols: Vec<DisplaySymbol> = (0..5)
.map(|i| DisplaySymbol {
name: format!("sym{i}"),
qualified_name: format!("sym{i}"),
kind: "function".to_string(),
file_path: std::path::PathBuf::from("a.rs"),
start_line: i,
start_column: 0,
end_line: i,
end_column: 0,
metadata: std::collections::HashMap::new(),
caller_identity: None,
callee_identity: None,
})
.collect();
let info = apply_symbol_limit(&mut symbols, 10);
assert_eq!(symbols.len(), 5);
assert!(!info.truncated);
assert_eq!(info.total_matches, 5);
assert_eq!(info.limit, 10);
}
#[test]
fn test_apply_symbol_limit_truncates() {
let mut symbols: Vec<DisplaySymbol> = (0..20)
.map(|i| DisplaySymbol {
name: format!("sym{i}"),
qualified_name: format!("sym{i}"),
kind: "function".to_string(),
file_path: std::path::PathBuf::from("a.rs"),
start_line: i,
start_column: 0,
end_line: i,
end_column: 0,
metadata: std::collections::HashMap::new(),
caller_identity: None,
callee_identity: None,
})
.collect();
let info = apply_symbol_limit(&mut symbols, 5);
assert_eq!(symbols.len(), 5);
assert!(info.truncated);
assert_eq!(info.total_matches, 20);
assert_eq!(info.limit, 5);
}
#[test]
fn test_apply_symbol_limit_exact_boundary() {
let mut symbols: Vec<DisplaySymbol> = (0..5)
.map(|i| DisplaySymbol {
name: format!("sym{i}"),
qualified_name: format!("sym{i}"),
kind: "function".to_string(),
file_path: std::path::PathBuf::from("a.rs"),
start_line: i,
start_column: 0,
end_line: i,
end_column: 0,
metadata: std::collections::HashMap::new(),
caller_identity: None,
callee_identity: None,
})
.collect();
let info = apply_symbol_limit(&mut symbols, 5);
assert_eq!(symbols.len(), 5);
assert!(!info.truncated, "Exact boundary should not truncate");
}
#[test]
fn test_u64_to_f64_lossy_large_values_clamp_to_u32_max() {
let very_large = u64::MAX;
let result = u64_to_f64_lossy(very_large);
assert!((result - f64::from(u32::MAX)).abs() < f64::EPSILON);
}
#[serial_test::serial]
#[test]
fn test_env_debug_cache_disabled_by_default() {
unsafe {
std::env::remove_var("SQRY_CACHE_DEBUG");
}
assert!(!env_debug_cache_enabled());
}
#[serial_test::serial]
#[test]
fn test_env_debug_cache_enabled_with_1() {
unsafe {
std::env::set_var("SQRY_CACHE_DEBUG", "1");
}
let result = env_debug_cache_enabled();
unsafe {
std::env::remove_var("SQRY_CACHE_DEBUG");
}
assert!(result);
}
#[serial_test::serial]
#[test]
fn test_env_debug_cache_enabled_with_true() {
unsafe {
std::env::set_var("SQRY_CACHE_DEBUG", "true");
}
let result = env_debug_cache_enabled();
unsafe {
std::env::remove_var("SQRY_CACHE_DEBUG");
}
assert!(result);
}
#[serial_test::serial]
#[test]
fn test_env_debug_cache_enabled_with_true_uppercase() {
unsafe {
std::env::set_var("SQRY_CACHE_DEBUG", "TRUE");
}
let result = env_debug_cache_enabled();
unsafe {
std::env::remove_var("SQRY_CACHE_DEBUG");
}
assert!(result);
}
#[serial_test::serial]
#[test]
fn test_env_debug_cache_disabled_with_zero() {
unsafe {
std::env::set_var("SQRY_CACHE_DEBUG", "0");
}
let result = env_debug_cache_enabled();
unsafe {
std::env::remove_var("SQRY_CACHE_DEBUG");
}
assert!(!result);
}
#[test]
fn test_build_query_stats_with_index() {
let stats = build_query_stats(true, 10);
assert!(stats.used_index);
}
#[test]
fn test_build_query_stats_without_index() {
let stats = build_query_stats(false, 10);
assert!(!stats.used_index);
}
}