1use std::{fs, path::Path};
6
7use anyhow::{Context, Result};
8use fraiseql_core::schema::{CURRENT_SCHEMA_FORMAT_VERSION, CompiledSchema, FieldType};
9use tracing::{info, warn};
10
11use crate::{
12 config::TomlProjectConfig,
13 schema::{
14 IntermediateSchema, OptimizationReport, SchemaConverter, SchemaOptimizer, SchemaValidator,
15 database_validator::validate_schema_against_database,
16 },
17};
18
19#[derive(Debug, Default)]
21pub struct CompileOptions<'a> {
22 pub input: &'a str,
24 pub types: Option<&'a str>,
26 pub schema_dir: Option<&'a str>,
28 pub type_files: Vec<String>,
30 pub query_files: Vec<String>,
32 pub mutation_files: Vec<String>,
34 pub database: Option<&'a str>,
36}
37
38impl<'a> CompileOptions<'a> {
39 #[must_use]
41 pub fn new(input: &'a str) -> Self {
42 Self {
43 input,
44 ..Default::default()
45 }
46 }
47
48 #[must_use]
50 pub fn with_types(mut self, types: &'a str) -> Self {
51 self.types = Some(types);
52 self
53 }
54
55 #[must_use]
57 pub fn with_schema_dir(mut self, schema_dir: &'a str) -> Self {
58 self.schema_dir = Some(schema_dir);
59 self
60 }
61
62 #[must_use]
64 pub fn with_database(mut self, database: &'a str) -> Self {
65 self.database = Some(database);
66 self
67 }
68}
69
70#[allow(clippy::cognitive_complexity)] fn load_intermediate_schema(
79 toml_path: &str,
80 type_files: &[String],
81 query_files: &[String],
82 mutation_files: &[String],
83 schema_dir: Option<&str>,
84 types_path: Option<&str>,
85) -> Result<IntermediateSchema> {
86 if !type_files.is_empty() || !query_files.is_empty() || !mutation_files.is_empty() {
87 info!("Mode: Explicit file lists");
88 return crate::schema::SchemaMerger::merge_explicit_files(
89 toml_path,
90 type_files,
91 query_files,
92 mutation_files,
93 )
94 .context("Failed to load explicit schema files");
95 }
96 if let Some(dir) = schema_dir {
97 info!("Mode: Auto-discovery from directory: {}", dir);
98 return crate::schema::SchemaMerger::merge_from_directory(toml_path, dir)
99 .context("Failed to load schema from directory");
100 }
101 if let Some(types) = types_path {
102 info!("Mode: Language + TOML (types.json + fraiseql.toml)");
103 return crate::schema::SchemaMerger::merge_files(types, toml_path)
104 .context("Failed to merge types.json with TOML");
105 }
106 info!("Mode: TOML-based (checking for domain discovery...)");
107 if let Ok(schema) = crate::schema::SchemaMerger::merge_from_domains(toml_path) {
108 return Ok(schema);
109 }
110 info!("No domains configured, checking for TOML includes...");
111 if let Ok(schema) = crate::schema::SchemaMerger::merge_with_includes(toml_path) {
112 return Ok(schema);
113 }
114 info!("No includes configured, using TOML-only definitions");
115 crate::schema::SchemaMerger::merge_toml_only(toml_path)
116 .context("Failed to load schema from TOML")
117}
118
119#[allow(clippy::cognitive_complexity)] pub async fn compile_to_schema(
134 opts: CompileOptions<'_>,
135) -> Result<(CompiledSchema, OptimizationReport)> {
136 info!("Compiling schema: {}", opts.input);
137
138 let input_path = Path::new(opts.input);
140 if !input_path.exists() {
141 anyhow::bail!("Input file not found: {}", opts.input);
142 }
143
144 let is_toml = input_path
146 .extension()
147 .and_then(|ext| ext.to_str())
148 .is_some_and(|ext| ext.eq_ignore_ascii_case("toml"));
149 let mut intermediate: IntermediateSchema = if is_toml {
150 info!("Using TOML-based workflow");
151 load_intermediate_schema(
152 opts.input,
153 &opts.type_files,
154 &opts.query_files,
155 &opts.mutation_files,
156 opts.schema_dir,
157 opts.types,
158 )?
159 } else {
160 info!("Using legacy JSON workflow");
162 let schema_json = fs::read_to_string(input_path).context("Failed to read schema.json")?;
163
164 info!("Parsing intermediate schema...");
166 serde_json::from_str(&schema_json).context("Failed to parse schema.json")?
167 };
168
169 if !is_toml && Path::new("fraiseql.toml").exists() {
174 info!("Loading security configuration from fraiseql.toml...");
175 match TomlProjectConfig::from_file("fraiseql.toml") {
176 Ok(config) => {
177 info!("Validating security configuration...");
178 config.validate()?;
179
180 info!("Applying security configuration to schema...");
181 let security_json = config.fraiseql.security.to_json();
183 intermediate.security = Some(security_json);
184
185 info!("Security configuration applied successfully");
186 },
187 Err(e) => {
188 anyhow::bail!(
189 "Failed to parse fraiseql.toml: {e}\n\
190 Fix the configuration file or remove it to use defaults."
191 );
192 },
193 }
194 } else {
195 info!("No fraiseql.toml found, using default security configuration");
196 }
197
198 info!("Validating schema structure...");
200 let validation_report =
201 SchemaValidator::validate(&intermediate).context("Failed to validate schema")?;
202
203 if !validation_report.is_valid() {
204 validation_report.print();
205 anyhow::bail!("Schema validation failed with {} error(s)", validation_report.error_count());
206 }
207
208 if validation_report.warning_count() > 0 {
210 validation_report.print();
211 }
212
213 info!("Converting to compiled format...");
215 let mut schema = SchemaConverter::convert(intermediate)
216 .context("Failed to convert schema to compiled format")?;
217
218 info!("Analyzing schema for optimization opportunities...");
220 let report = SchemaOptimizer::optimize(&mut schema).context("Failed to optimize schema")?;
221
222 schema.schema_format_version = Some(CURRENT_SCHEMA_FORMAT_VERSION);
224
225 infer_native_columns_from_arg_types(&mut schema);
228
229 if let Some(db_url) = opts.database {
231 info!("Validating indexed columns against database...");
232 validate_indexed_columns(&schema, db_url).await?;
233
234 info!("Validating native columns for direct query arguments...");
235 let pg_introspector =
236 build_postgres_introspector(db_url).context("Failed to connect for native column validation")?;
237 let db_report = validate_schema_against_database(&schema, &pg_introspector).await?;
238 for w in &db_report.warnings {
239 warn!("{w}");
240 }
241 for query in &mut schema.queries {
243 if let Some(cols) = db_report.native_columns.get(&query.name) {
244 query.native_columns = cols.clone();
245 }
246 }
247 } else {
248 for query in &schema.queries {
251 if query.sql_source.is_none() {
252 continue;
253 }
254 let unresolved: Vec<_> = query
255 .arguments
256 .iter()
257 .filter(|a| !NATIVE_COLUMN_SKIP_ARGS.contains(&a.name.as_str()))
258 .filter(|a| !query.native_columns.contains_key(&a.name))
259 .collect();
260 if !unresolved.is_empty() {
261 let names: Vec<_> = unresolved.iter().map(|a| a.name.as_str()).collect();
262 warn!(
263 "query `{}`: argument(s) {:?} on `{}` could not be resolved to native \
264 columns — no --database URL provided. These filters will use JSONB \
265 extraction. Provide --database or annotate with native_columns.",
266 query.name,
267 names,
268 query.sql_source.as_deref().unwrap_or("?"),
269 );
270 }
271 }
272 }
273
274 check_sqlite_compatibility_warnings(&schema, opts.input, is_toml, opts.database);
276
277 warn_wide_cascade_mutations(&schema);
279
280 Ok((schema, report))
281}
282
283#[allow(clippy::too_many_arguments)] pub async fn run(
315 input: &str,
316 types: Option<&str>,
317 schema_dir: Option<&str>,
318 type_files: Vec<String>,
319 query_files: Vec<String>,
320 mutation_files: Vec<String>,
321 output: &str,
322 check: bool,
323 database: Option<&str>,
324) -> Result<()> {
325 let opts = CompileOptions {
326 input,
327 types,
328 schema_dir,
329 type_files,
330 query_files,
331 mutation_files,
332 database,
333 };
334 let (schema, optimization_report) = compile_to_schema(opts).await?;
335
336 if check {
338 println!("✓ Schema is valid");
339 println!(" Types: {}", schema.types.len());
340 println!(" Queries: {}", schema.queries.len());
341 println!(" Mutations: {}", schema.mutations.len());
342 optimization_report.print();
343 return Ok(());
344 }
345
346 info!("Writing compiled schema to: {output}");
348 let output_json =
349 serde_json::to_string_pretty(&schema).context("Failed to serialize compiled schema")?;
350 fs::write(output, output_json).context("Failed to write compiled schema")?;
351
352 println!("✓ Schema compiled successfully");
354 println!(" Input: {input}");
355 println!(" Output: {output}");
356 println!(" Types: {}", schema.types.len());
357 println!(" Queries: {}", schema.queries.len());
358 println!(" Mutations: {}", schema.mutations.len());
359 optimization_report.print();
360
361 Ok(())
362}
363
364fn check_sqlite_compatibility_warnings(
369 schema: &CompiledSchema,
370 input_path: &str,
371 is_toml: bool,
372 database_url: Option<&str>,
373) {
374 let target_is_sqlite = database_url
375 .is_some_and(|url| url.to_ascii_lowercase().starts_with("sqlite://"))
376 || is_toml && detect_sqlite_target_in_toml(input_path);
377
378 if !target_is_sqlite {
379 return;
380 }
381
382 let mutation_count = schema.mutations.len();
383 let relay_count = schema.queries.iter().filter(|q| q.relay).count();
384 let subscription_count = schema.subscriptions.len();
385
386 if mutation_count > 0 {
387 warn!(
388 "Schema contains {} mutation(s) but target database is SQLite. \
389 Mutations are not supported on SQLite. \
390 See: https://fraiseql.dev/docs/database-compatibility",
391 mutation_count,
392 );
393 }
394 if relay_count > 0 {
395 warn!(
396 "Schema contains {} relay query/queries but target database is SQLite. \
397 Relay (keyset pagination) is not supported on SQLite. \
398 See: https://fraiseql.dev/docs/database-compatibility",
399 relay_count,
400 );
401 }
402 if subscription_count > 0 {
403 warn!(
404 "Schema contains {} subscription(s) but target database is SQLite. \
405 Subscriptions are not supported on SQLite. \
406 See: https://fraiseql.dev/docs/database-compatibility",
407 subscription_count,
408 );
409 }
410}
411
412fn detect_sqlite_target_in_toml(toml_path: &str) -> bool {
417 let Ok(content) = fs::read_to_string(toml_path) else {
418 return false;
419 };
420 let Ok(toml_schema) = toml::from_str::<crate::config::toml_schema::TomlSchema>(&content) else {
421 return false;
422 };
423 toml_schema.schema.database_target.to_ascii_lowercase().contains("sqlite")
424}
425
426const WIDE_FANOUT_THRESHOLD: usize = 3;
429
430fn wide_cascade_mutations(
436 schema: &CompiledSchema,
437 threshold: usize,
438) -> Vec<&fraiseql_core::schema::MutationDefinition> {
439 schema
440 .mutations
441 .iter()
442 .filter(|m| m.invalidates_views.len() + m.invalidates_fact_tables.len() >= threshold)
443 .collect()
444}
445
446fn warn_wide_cascade_mutations(schema: &CompiledSchema) {
459 for mutation in wide_cascade_mutations(schema, WIDE_FANOUT_THRESHOLD) {
460 let total = mutation.invalidates_views.len() + mutation.invalidates_fact_tables.len();
461
462 let mut targets: Vec<&str> = mutation
464 .invalidates_views
465 .iter()
466 .chain(mutation.invalidates_fact_tables.iter())
467 .map(String::as_str)
468 .collect();
469 targets.sort_unstable();
470 targets.dedup();
471
472 let alter_stmts: Vec<String> = targets
475 .iter()
476 .map(|&name| {
477 let table = name
478 .strip_prefix("tv_")
479 .or_else(|| name.strip_prefix("v_"))
480 .map_or_else(|| name.to_string(), |rest| format!("tb_{rest}"));
481 format!("ALTER TABLE {table} SET (fillfactor = 75);")
482 })
483 .collect();
484
485 warn!(
486 "mutation '{}' has a wide invalidation fan-out ({} targets: [{}]). \
487 Under high write load, HOT-update page slots on these tables may be \
488 exhausted, forcing full-page writes and reducing mutation throughput. \
489 Set fillfactor=70-80 on the backing tables: {} \
490 Monitor HOT efficiency: SELECT relname, \
491 n_tup_hot_upd * 100 / NULLIF(n_tup_upd, 0) AS hot_pct \
492 FROM pg_stat_user_tables WHERE n_tup_upd > 0 ORDER BY hot_pct;",
493 mutation.name,
494 total,
495 targets.join(", "),
496 alter_stmts.join(" "),
497 );
498 }
499}
500
501fn build_postgres_introspector(
509 db_url: &str,
510) -> Result<fraiseql_core::db::postgres::PostgresIntrospector> {
511 use deadpool_postgres::{Config, ManagerConfig, RecyclingMethod, Runtime};
512 use tokio_postgres::NoTls;
513
514 let mut cfg = Config::new();
515 cfg.url = Some(db_url.to_string());
516 cfg.manager = Some(ManagerConfig {
517 recycling_method: RecyclingMethod::Fast,
518 });
519 cfg.pool = Some(deadpool_postgres::PoolConfig::new(2));
520
521 let pool = cfg
522 .create_pool(Some(Runtime::Tokio1), NoTls)
523 .context("Failed to create connection pool for database validation")?;
524
525 Ok(fraiseql_core::db::postgres::PostgresIntrospector::new(pool))
526}
527
528async fn validate_indexed_columns(schema: &CompiledSchema, db_url: &str) -> Result<()> {
543 use deadpool_postgres::{Config, ManagerConfig, RecyclingMethod, Runtime};
544 use fraiseql_core::db::postgres::PostgresIntrospector;
545 use tokio_postgres::NoTls;
546
547 let mut cfg = Config::new();
549 cfg.url = Some(db_url.to_string());
550 cfg.manager = Some(ManagerConfig {
551 recycling_method: RecyclingMethod::Fast,
552 });
553 cfg.pool = Some(deadpool_postgres::PoolConfig::new(2));
554
555 let pool = cfg
556 .create_pool(Some(Runtime::Tokio1), NoTls)
557 .context("Failed to create connection pool for indexed column validation")?;
558
559 let introspector = PostgresIntrospector::new(pool);
560
561 let mut total_indexed = 0;
562 let mut total_views = 0;
563
564 for query in &schema.queries {
566 if let Some(view_name) = &query.sql_source {
567 total_views += 1;
568
569 match introspector.get_indexed_nested_columns(view_name).await {
571 Ok(indexed_cols) => {
572 if !indexed_cols.is_empty() {
573 info!(
574 "View '{}': found {} indexed column(s): {:?}",
575 view_name,
576 indexed_cols.len(),
577 indexed_cols
578 );
579 total_indexed += indexed_cols.len();
580 }
581 },
582 Err(e) => {
583 warn!(
584 "Could not introspect view '{}': {}. Skipping indexed column check.",
585 view_name, e
586 );
587 },
588 }
589 }
590 }
591
592 println!("✓ Indexed column validation complete");
593 println!(" Views checked: {total_views}");
594 println!(" Indexed columns found: {total_indexed}");
595
596 Ok(())
597}
598
599const NATIVE_COLUMN_SKIP_ARGS: &[&str] =
601 &["where", "limit", "offset", "orderBy", "first", "last", "after", "before"];
602
603fn infer_native_columns_from_arg_types(schema: &mut CompiledSchema) {
615 for query in &mut schema.queries {
616 if query.sql_source.is_none() || query.jsonb_column.is_empty() {
617 continue;
618 }
619 for arg in &query.arguments {
620 if NATIVE_COLUMN_SKIP_ARGS.contains(&arg.name.as_str()) {
621 continue;
622 }
623 if query.native_columns.contains_key(&arg.name) {
624 continue; }
626 if matches!(arg.arg_type, FieldType::Id | FieldType::Uuid) {
627 query.native_columns.insert(arg.name.clone(), "uuid".to_string());
628 }
629 }
630 }
631}
632
633#[cfg(test)]
634mod tests {
635 use std::collections::HashMap;
636
637 use fraiseql_core::{
638 schema::{
639 ArgumentDefinition, AutoParams, CompiledSchema, CursorType, FieldDefinition,
640 FieldDenyPolicy, FieldType, QueryDefinition, TypeDefinition,
641 },
642 validation::CustomTypeRegistry,
643 };
644 use indexmap::IndexMap;
645
646 use super::infer_native_columns_from_arg_types;
647
648 use fraiseql_core::schema::MutationDefinition;
649
650 use super::{WIDE_FANOUT_THRESHOLD, wide_cascade_mutations};
651
652 fn mutation_with_fanout(
653 name: &str,
654 views: &[&str],
655 fact_tables: &[&str],
656 ) -> MutationDefinition {
657 let mut m = MutationDefinition::new(name, "SomeResult");
658 m.invalidates_views = views.iter().map(|s| (*s).to_string()).collect();
659 m.invalidates_fact_tables = fact_tables.iter().map(|s| (*s).to_string()).collect();
660 m
661 }
662
663 #[test]
664 fn test_wide_cascade_below_threshold_not_flagged() {
665 let schema = CompiledSchema {
666 mutations: vec![mutation_with_fanout("update", &["tv_user", "tv_post"], &[])],
667 ..Default::default()
668 };
669 assert!(
670 wide_cascade_mutations(&schema, WIDE_FANOUT_THRESHOLD).is_empty(),
671 "2 targets is below threshold of 3"
672 );
673 }
674
675 #[test]
676 fn test_wide_cascade_at_threshold_flagged() {
677 let schema = CompiledSchema {
678 mutations: vec![mutation_with_fanout(
679 "updateUserWithPosts",
680 &["tv_user", "tv_post", "tv_comment"],
681 &[],
682 )],
683 ..Default::default()
684 };
685 let flagged = wide_cascade_mutations(&schema, WIDE_FANOUT_THRESHOLD);
686 assert_eq!(flagged.len(), 1);
687 assert_eq!(flagged[0].name, "updateUserWithPosts");
688 }
689
690 #[test]
691 fn test_wide_cascade_views_plus_fact_tables_counted_together() {
692 let schema = CompiledSchema {
693 mutations: vec![mutation_with_fanout(
694 "createOrder",
695 &["tv_order", "tv_order_item"],
696 &["tf_sales"],
697 )],
698 ..Default::default()
699 };
700 let flagged = wide_cascade_mutations(&schema, WIDE_FANOUT_THRESHOLD);
701 assert_eq!(flagged.len(), 1, "2 views + 1 fact table = 3 total, meets threshold");
702 }
703
704 #[test]
705 fn test_wide_cascade_only_wide_mutations_flagged() {
706 let schema = CompiledSchema {
707 mutations: vec![
708 mutation_with_fanout("narrow", &["tv_user"], &[]),
709 mutation_with_fanout("wide", &["tv_user", "tv_post", "tv_comment"], &[]),
710 ],
711 ..Default::default()
712 };
713 let flagged = wide_cascade_mutations(&schema, WIDE_FANOUT_THRESHOLD);
714 assert_eq!(flagged.len(), 1);
715 assert_eq!(flagged[0].name, "wide");
716 }
717
718 #[test]
719 fn test_wide_cascade_no_mutations_no_warnings() {
720 let schema = CompiledSchema::default();
721 assert!(wide_cascade_mutations(&schema, WIDE_FANOUT_THRESHOLD).is_empty());
722 }
723
724 #[test]
725 fn test_validate_schema_success() {
726 let schema = CompiledSchema {
727 types: vec![TypeDefinition {
728 name: "User".into(),
729 fields: vec![
730 FieldDefinition {
731 name: "id".into(),
732 field_type: FieldType::Int,
733 nullable: false,
734 default_value: None,
735 description: None,
736 vector_config: None,
737 alias: None,
738 deprecation: None,
739 requires_scope: None,
740 on_deny: FieldDenyPolicy::default(),
741 encryption: None,
742 },
743 FieldDefinition {
744 name: "name".into(),
745 field_type: FieldType::String,
746 nullable: false,
747 default_value: None,
748 description: None,
749 vector_config: None,
750 alias: None,
751 deprecation: None,
752 requires_scope: None,
753 on_deny: FieldDenyPolicy::default(),
754 encryption: None,
755 },
756 ],
757 description: Some("User type".to_string()),
758 sql_source: String::new().into(),
759 jsonb_column: String::new(),
760 sql_projection_hint: None,
761 implements: vec![],
762 requires_role: None,
763 is_error: false,
764 relay: false,
765 relationships: Vec::new(),
766 }],
767 queries: vec![QueryDefinition {
768 name: "users".to_string(),
769 return_type: "User".to_string(),
770 returns_list: true,
771 nullable: false,
772 arguments: vec![],
773 sql_source: Some("v_user".to_string()),
774 description: Some("Get users".to_string()),
775 auto_params: AutoParams::default(),
776 deprecation: None,
777 jsonb_column: "data".to_string(),
778 relay: false,
779 relay_cursor_column: None,
780 relay_cursor_type: CursorType::default(),
781 inject_params: IndexMap::default(),
782 cache_ttl_seconds: None,
783 additional_views: vec![],
784 requires_role: None,
785 rest_path: None,
786 rest_method: None,
787 native_columns: HashMap::new(),
788 }],
789 enums: vec![],
790 input_types: vec![],
791 interfaces: vec![],
792 unions: vec![],
793 mutations: vec![],
794 subscriptions: vec![],
795 directives: vec![],
796 observers: Vec::new(),
797 fact_tables: HashMap::default(),
798 federation: None,
799 security: None,
800 observers_config: None,
801 subscriptions_config: None,
802 validation_config: None,
803 debug_config: None,
804 mcp_config: None,
805 schema_sdl: None,
806 schema_format_version: None,
810 custom_scalars: CustomTypeRegistry::default(),
811 ..Default::default()
812 };
813
814 assert_eq!(schema.types.len(), 1);
817 assert_eq!(schema.queries.len(), 1);
818 }
819
820 #[test]
821 fn test_validate_schema_unknown_type() {
822 let schema = CompiledSchema {
823 types: vec![],
824 enums: vec![],
825 input_types: vec![],
826 interfaces: vec![],
827 unions: vec![],
828 queries: vec![QueryDefinition {
829 name: "users".to_string(),
830 return_type: "UnknownType".to_string(),
831 returns_list: true,
832 nullable: false,
833 arguments: vec![],
834 sql_source: Some("v_user".to_string()),
835 description: Some("Get users".to_string()),
836 auto_params: AutoParams::default(),
837 deprecation: None,
838 jsonb_column: "data".to_string(),
839 relay: false,
840 relay_cursor_column: None,
841 relay_cursor_type: CursorType::default(),
842 inject_params: IndexMap::default(),
843 cache_ttl_seconds: None,
844 additional_views: vec![],
845 requires_role: None,
846 rest_path: None,
847 rest_method: None,
848 native_columns: HashMap::new(),
849 }],
850 mutations: vec![],
851 subscriptions: vec![],
852 directives: vec![],
853 observers: Vec::new(),
854 fact_tables: HashMap::default(),
855 federation: None,
856 security: None,
857 observers_config: None,
858 subscriptions_config: None,
859 validation_config: None,
860 debug_config: None,
861 mcp_config: None,
862 schema_sdl: None,
863 schema_format_version: None,
867 custom_scalars: CustomTypeRegistry::default(),
868 ..Default::default()
869 };
870
871 assert_eq!(schema.types.len(), 0);
874 assert_eq!(schema.queries[0].return_type, "UnknownType");
875 }
876
877 fn make_query(
878 name: &str,
879 sql_source: Option<&str>,
880 jsonb_column: &str,
881 args: Vec<(&str, FieldType)>,
882 native_columns: std::collections::HashMap<String, String>,
883 ) -> QueryDefinition {
884 QueryDefinition {
885 name: name.to_string(),
886 return_type: "T".to_string(),
887 returns_list: false,
888 nullable: true,
889 arguments: args
890 .into_iter()
891 .map(|(n, t)| ArgumentDefinition::new(n, t))
892 .collect(),
893 sql_source: sql_source.map(str::to_string),
894 jsonb_column: jsonb_column.to_string(),
895 native_columns,
896 auto_params: AutoParams::default(),
897 ..Default::default()
898 }
899 }
900
901 #[test]
902 fn test_infer_id_arg_becomes_uuid_native_column() {
903 let mut schema = CompiledSchema {
904 queries: vec![make_query(
905 "user",
906 Some("tv_user"),
907 "data",
908 vec![("id", FieldType::Id)],
909 std::collections::HashMap::new(),
910 )],
911 ..Default::default()
912 };
913 infer_native_columns_from_arg_types(&mut schema);
914 assert_eq!(
915 schema.queries[0].native_columns.get("id").map(String::as_str),
916 Some("uuid"),
917 "ID-typed arg should be inferred as uuid native column"
918 );
919 }
920
921 #[test]
922 fn test_infer_uuid_arg_becomes_uuid_native_column() {
923 let mut schema = CompiledSchema {
924 queries: vec![make_query(
925 "user",
926 Some("tv_user"),
927 "data",
928 vec![("userId", FieldType::Uuid)],
929 std::collections::HashMap::new(),
930 )],
931 ..Default::default()
932 };
933 infer_native_columns_from_arg_types(&mut schema);
934 assert_eq!(
935 schema.queries[0].native_columns.get("userId").map(String::as_str),
936 Some("uuid")
937 );
938 }
939
940 #[test]
941 fn test_infer_does_not_override_explicit_declaration() {
942 let mut explicit = std::collections::HashMap::new();
943 explicit.insert("id".to_string(), "text".to_string()); let mut schema = CompiledSchema {
945 queries: vec![make_query(
946 "user",
947 Some("tv_user"),
948 "data",
949 vec![("id", FieldType::Id)],
950 explicit,
951 )],
952 ..Default::default()
953 };
954 infer_native_columns_from_arg_types(&mut schema);
955 assert_eq!(
957 schema.queries[0].native_columns.get("id").map(String::as_str),
958 Some("text"),
959 "explicit native_columns declaration must win over inference"
960 );
961 }
962
963 #[test]
964 fn test_infer_skips_queries_without_sql_source() {
965 let mut schema = CompiledSchema {
966 queries: vec![make_query(
967 "user",
968 None,
969 "data",
970 vec![("id", FieldType::Id)],
971 std::collections::HashMap::new(),
972 )],
973 ..Default::default()
974 };
975 infer_native_columns_from_arg_types(&mut schema);
976 assert!(
977 schema.queries[0].native_columns.is_empty(),
978 "queries without sql_source must not get inferred native_columns"
979 );
980 }
981
982 #[test]
983 fn test_infer_skips_queries_without_jsonb_column() {
984 let mut schema = CompiledSchema {
985 queries: vec![make_query(
986 "user",
987 Some("v_user"),
988 "", vec![("id", FieldType::Id)],
990 std::collections::HashMap::new(),
991 )],
992 ..Default::default()
993 };
994 infer_native_columns_from_arg_types(&mut schema);
995 assert!(
996 schema.queries[0].native_columns.is_empty(),
997 "queries without jsonb_column must not get inferred native_columns"
998 );
999 }
1000
1001 #[test]
1002 fn test_infer_skips_non_id_types() {
1003 let mut schema = CompiledSchema {
1004 queries: vec![make_query(
1005 "user",
1006 Some("tv_user"),
1007 "data",
1008 vec![("username", FieldType::String), ("age", FieldType::Int)],
1009 std::collections::HashMap::new(),
1010 )],
1011 ..Default::default()
1012 };
1013 infer_native_columns_from_arg_types(&mut schema);
1014 assert!(
1015 schema.queries[0].native_columns.is_empty(),
1016 "String/Int args must not be inferred as native columns"
1017 );
1018 }
1019
1020 #[test]
1021 fn test_infer_skips_auto_param_names() {
1022 let mut schema = CompiledSchema {
1023 queries: vec![make_query(
1024 "users",
1025 Some("tv_user"),
1026 "data",
1027 vec![
1028 ("where", FieldType::Id),
1029 ("limit", FieldType::Id),
1030 ("orderBy", FieldType::Id),
1031 ],
1032 std::collections::HashMap::new(),
1033 )],
1034 ..Default::default()
1035 };
1036 infer_native_columns_from_arg_types(&mut schema);
1037 assert!(
1038 schema.queries[0].native_columns.is_empty(),
1039 "auto-param names must never be inferred as native columns even if typed ID"
1040 );
1041 }
1042}