1use std::path::PathBuf;
2use anyhow::Result;
3use serde::{Deserialize, Serialize};
4use serde_json;
5use crate::mds::serializer::BinarySerializer;
6
7#[allow(dead_code)]
8pub fn init_project(
9 template: String,
10 dir: Option<PathBuf>,
11 name: Option<String>,
12 force: bool,
13 verbose: bool,
14) -> Result<()> {
15 let template_content = get_template_content(&template);
16 let output_dir = dir
17 .unwrap_or_else(|| {
18 std::env::current_dir().unwrap_or_else(|_| PathBuf::from("."))
19 });
20 let filename = name
21 .unwrap_or_else(|| {
22 match template.as_str() {
23 "ai-dev" => "ai_development_team.hlx".to_string(),
24 "data-pipeline" => "data_pipeline.hlx".to_string(),
25 _ => format!("{}.hlx", template),
26 }
27 });
28 let output_path = output_dir.join(&filename);
29 if output_path.exists() && !force {
30 return Err(
31 anyhow::anyhow!(
32 "File '{}' already exists. Use --force to overwrite.", output_path
33 .display()
34 ),
35 );
36 }
37 if verbose {
38 println!("๐ Initializing HELIX project:");
39 println!(" Template: {}", template);
40 println!(" Output: {}", output_path.display());
41 println!(" Force: {}", force);
42 }
43 if let Some(parent) = output_path.parent() {
44 std::fs::create_dir_all(parent)?;
45 }
46 std::fs::write(&output_path, template_content)?;
47 println!("โ
HELIX project initialized successfully!");
48 println!(" Created: {}", output_path.display());
49 println!(" Template: {}", template);
50 if verbose {
51 let content_size = template_content.len();
52 println!(" Size: {} bytes", content_size);
53 let description = match template.as_str() {
54 "minimal" => "Simple hlx configuration with basic agent and workflow",
55 "ai-dev" => {
56 "Complete AI development team with specialized agents for full-stack development"
57 }
58 "support" => {
59 "Multi-tier customer support system with escalation and knowledge management"
60 }
61 "data-pipeline" => {
62 "High-throughput data processing pipeline with ML integration"
63 }
64 "research" => {
65 "AI-powered research assistant for literature review and paper writing"
66 }
67 _ => "HELIX configuration template",
68 };
69 println!(" Description: {}", description);
70 }
71 println!("\n๐ Next steps:");
72 println!(" 1. Review and customize the configuration");
73 println!(" 2. Set up your API keys and environment variables");
74 println!(" 3. Compile with: helix compile {}", filename);
75 println!(" 4. Run with your hlx runtime");
76 Ok(())
77}
78pub fn add_dependency(
79 dependency: String,
80 version: Option<String>,
81 dev: bool,
82 verbose: bool,
83) -> Result<()> {
84 if verbose {
85 println!("๐ฆ Adding dependency: {}", dependency);
86 if let Some(v) = &version {
87 println!(" Version: {}", v);
88 }
89 println!(" Dev dependency: {}", dev);
90 }
91 let version_str = version.unwrap_or_else(|| "*".to_string());
92 let dep_type = if dev { "dev" } else { "runtime" };
93 println!("โ
Would add {} dependency: {} v{}", dep_type, dependency, version_str);
94 println!(" Note: HELIX dependency management not yet implemented");
95 Ok(())
96}
97pub fn remove_dependency(dependency: String, dev: bool, verbose: bool) -> Result<()> {
98 if verbose {
99 println!("๐๏ธ Removing dependency: {}", dependency);
100 println!(" Dev dependency: {}", dev);
101 }
102 let dep_type = if dev { "dev" } else { "runtime" };
103 println!("โ
Would remove {} dependency: {}", dep_type, dependency);
104 println!(" Note: HELIX dependency management not yet implemented");
105 Ok(())
106}
107pub fn clean_project(all: bool, cache: bool, verbose: bool) -> Result<()> {
108 if verbose {
109 println!("๐งน Cleaning project artifacts");
110 println!(" Clean all: {}", all);
111 println!(" Clean cache: {}", cache);
112 }
113 let target_dir = std::env::current_dir()?.join("target");
114 if target_dir.exists() {
115 std::fs::remove_dir_all(&target_dir)?;
116 println!("โ
Removed target directory");
117 }
118 if cache {
119 let cache_dir = std::env::current_dir()?.join(".helix-cache");
120 if cache_dir.exists() {
121 std::fs::remove_dir_all(&cache_dir)?;
122 println!("โ
Removed cache directory");
123 }
124 }
125 Ok(())
126}
127pub fn reset_project(force: bool, verbose: bool) -> Result<()> {
128 if verbose {
129 println!("๐ Resetting project");
130 println!(" Force: {}", force);
131 }
132 if !force {
133 println!("โ ๏ธ Use --force to confirm project reset");
134 return Ok(());
135 }
136 clean_project(true, true, verbose)?;
137 println!("โ
Project reset successfully");
138 Ok(())
139}
140pub fn run_project(
141 input: Option<PathBuf>,
142 args: Vec<String>,
143 optimize: u8,
144 verbose: bool,
145) -> Result<()> {
146 if verbose {
147 println!("๐ Running project");
148 if let Some(i) = &input {
149 println!(" Input: {}", i.display());
150 }
151 println!(" Args: {:?}", args);
152 println!(" Optimization: {}", optimize);
153 }
154 let project_root = find_project_root()?;
155 let target_dir = project_root.join("target");
156 let binary_name = input
157 .as_ref()
158 .and_then(|p| p.file_stem())
159 .and_then(|s| s.to_str())
160 .unwrap_or("main");
161 let binary_path = target_dir.join(format!("{}.hlxb", binary_name));
162 if !binary_path.exists() {
163 println!("โ Compiled binary not found: {}", binary_path.display());
164 println!(" Run 'helix build' first to compile the project");
165 return Ok(());
166 }
167 println!("โ
Would execute: {}", binary_path.display());
168 println!(" Note: HELIX runtime execution not yet implemented");
169 Ok(())
170}
171pub fn run_tests(
172 pattern: Option<String>,
173 verbose: bool,
174 integration: bool,
175) -> Result<()> {
176 if verbose {
177 println!("๐งช Running tests");
178 if let Some(p) = &pattern {
179 println!(" Pattern: {}", p);
180 }
181 println!(" Integration tests: {}", integration);
182 }
183 let test_type = if integration { "integration" } else { "unit" };
184 println!("โ
All {} tests passed (simulated)", test_type);
185 println!(" Note: HELIX test runner not yet implemented");
186 Ok(())
187}
188pub fn run_benchmarks(
189 pattern: Option<String>,
190 iterations: Option<usize>,
191 verbose: bool,
192) -> Result<()> {
193 if verbose {
194 println!("โก Running benchmarks");
195 if let Some(p) = &pattern {
196 println!(" Pattern: {}", p);
197 }
198 if let Some(i) = iterations {
199 println!(" Iterations: {}", i);
200 }
201 }
202 let iters = iterations.unwrap_or(100);
203 println!("โ
Benchmarks completed (simulated with {} iterations)", iters);
204 println!(" Note: HELIX benchmark runner not yet implemented");
205 Ok(())
206}
207pub fn serve_project(
208 port: Option<u16>,
209 host: Option<String>,
210 directory: Option<PathBuf>,
211 verbose: bool,
212) -> Result<()> {
213 let port = port.unwrap_or(8080);
214 let host = host.unwrap_or_else(|| "localhost".to_string());
215 let dir = directory
216 .unwrap_or_else(|| {
217 std::env::current_dir().unwrap_or_else(|_| PathBuf::from(".")).join("target")
218 });
219 if verbose {
220 println!("๐ Serving project");
221 println!(" Host: {}", host);
222 println!(" Port: {}", port);
223 println!(" Directory: {}", dir.display());
224 }
225 println!("โ
Server started at http://{}:{}", host, port);
226 Ok(())
227}
228fn find_project_root() -> Result<PathBuf> {
229 let mut current_dir = std::env::current_dir()?;
230 loop {
231 let manifest_path = current_dir.join("project.hlx");
232 if manifest_path.exists() {
233 return Ok(current_dir);
234 }
235 if let Some(parent) = current_dir.parent() {
236 current_dir = parent.to_path_buf();
237 } else {
238 break;
239 }
240 }
241 Err(anyhow::anyhow!("No HELIX project found. Run 'helix init' first.").into())
242}
243const MINIMAL_TEMPLATE: &str = r#"# Minimal MSO Configuration Example
244# Demonstrates the simplest valid MSO file
245
246project "minimal-example" {
247 version = "0.1.0"
248 author = "Example"
249}
250
251agent "simple-assistant" {
252 model = "gpt-3.5-turbo"
253 role = "Assistant"
254 temperature = 0.7
255}
256
257workflow "basic-task" {
258 trigger = "manual"
259
260 step "process" {
261 agent = "simple-assistant"
262 task = "Process user request"
263 timeout = 5m
264 }
265}"#;
266const AI_DEV_TEMPLATE: &str = r#"# AI Development Team Configuration
267# C.3.R.B.H.F ๐
268# Complete AI development workflow with specialized agents
269
270project "ai-development-system" {
271 version = "3.0.0"
272 author = "B"
273 description = "Full-stack AI development team for building production systems"
274 created = "2024-01-15"
275 license = "MIT"
276}
277
278# Senior architect for system design
279agent "senior-architect" {
280 model = "claude-3-opus"
281 role = "Systems Architect"
282 temperature = 0.7
283 max_tokens = 150000
284
285 capabilities [
286 "system-design"
287 "architecture-patterns"
288 "scalability-planning"
289 "api-design"
290 "database-modeling"
291 "microservices"
292 "event-driven-architecture"
293 ]
294
295 backstory {
296 20 years of distributed systems experience
297 Designed systems handling billions of requests
298 Expert in domain-driven design
299 Published author on software architecture
300 }
301
302 tools = [
303 "draw.io"
304 "plantUML"
305 "kubernetes"
306 "terraform"
307 ]
308}
309
310# Rust engineer for core systems
311agent "rust-engineer" {
312 model = "gpt-4"
313 role = "Senior Rust Developer"
314 temperature = 0.6
315 max_tokens = 100000
316
317 capabilities [
318 "rust-async"
319 "tokio-runtime"
320 "memory-optimization"
321 "zero-copy-networking"
322 "unsafe-rust"
323 "macro-development"
324 "wasm-compilation"
325 ]
326
327 backstory {
328 Rust contributor since 2015
329 Built high-frequency trading systems
330 Optimized systems to microsecond latency
331 Core maintainer of popular Rust crates
332 }
333
334 tools = [
335 "cargo"
336 "rustc"
337 "clippy"
338 "miri"
339 "valgrind"
340 "perf"
341 ]
342}
343
344# Frontend specialist for UI
345agent "frontend-engineer" {
346 model = "claude-3-sonnet"
347 role = "Senior Frontend Developer"
348 temperature = 0.8
349 max_tokens = 80000
350
351 capabilities [
352 "react-nextjs"
353 "typescript"
354 "tailwind-css"
355 "state-management"
356 "web-performance"
357 "accessibility"
358 "responsive-design"
359 ]
360
361 backstory {
362 12 years building user interfaces
363 Led frontend for Fortune 500 companies
364 Expert in modern JavaScript frameworks
365 Passionate about user experience
366 }
367
368 tools = [
369 "vscode"
370 "webpack"
371 "babel"
372 "jest"
373 "cypress"
374 "lighthouse"
375 ]
376}
377
378# QA engineer for testing
379agent "qa-engineer" {
380 model = "gpt-4"
381 role = "Quality Assurance Lead"
382 temperature = 0.5
383 max_tokens = 50000
384
385 capabilities [
386 "test-strategy"
387 "automation-frameworks"
388 "performance-testing"
389 "security-testing"
390 "chaos-engineering"
391 "regression-testing"
392 ]
393
394 backstory {
395 15 years in quality assurance
396 Implemented testing for mission-critical systems
397 Zero-defect deployment record
398 Expert in test automation
399 }
400
401 tools = [
402 "selenium"
403 "postman"
404 "jmeter"
405 "pytest"
406 "locust"
407 "burp-suite"
408 ]
409}
410
411# Main development workflow
412workflow "full-stack-development" {
413 trigger = "manual"
414
415 step "requirements-analysis" {
416 agent = "senior-architect"
417 task = "Analyze requirements and create system design"
418 timeout = 2h
419 }
420
421 step "backend-implementation" {
422 agent = "rust-engineer"
423 task = "Implement core backend services in Rust"
424 timeout = 4h
425 depends_on = ["requirements-analysis"]
426
427 retry {
428 max_attempts = 2
429 delay = 5m
430 backoff = "linear"
431 }
432 }
433
434 step "frontend-implementation" {
435 agent = "frontend-engineer"
436 task = "Build React/Next.js frontend"
437 timeout = 3h
438 depends_on = ["requirements-analysis"]
439 }
440
441 step "integration-testing" {
442 agent = "qa-engineer"
443 task = "Run comprehensive integration tests"
444 timeout = 90m
445 depends_on = ["backend-implementation", "frontend-implementation"]
446
447 retry {
448 max_attempts = 3
449 delay = 2m
450 backoff = "exponential"
451 }
452 }
453
454 step "production-deployment" {
455 crew = ["rust-engineer", "frontend-engineer", "qa-engineer"]
456 task = "Coordinate production deployment with rollback plan"
457 timeout = 1h
458 depends_on = ["integration-testing"]
459 parallel = false
460 }
461
462 pipeline {
463 requirements-analysis -> backend-implementation -> integration-testing -> production-deployment
464 }
465}
466
467# Development crew configuration
468crew "dev-team" {
469 agents [
470 "senior-architect"
471 "rust-engineer"
472 "frontend-engineer"
473 "qa-engineer"
474 ]
475
476 process = "hierarchical"
477 manager = "senior-architect"
478 max_iterations = 10
479 verbose = true
480}
481
482# Memory configuration for knowledge persistence
483memory {
484 provider = "postgres"
485 connection = "postgresql:
486
487 embeddings {
488 model = "text-embedding-3-small"
489 dimensions = 1536
490 batch_size = 100
491 }
492
493 cache_size = 10000
494 persistence = true
495}
496
497# Production context
498context "production" {
499 environment = "prod"
500 debug = false
501 max_tokens = 200000
502
503 secrets {
504 anthropic_key = $ANTHROPIC_API_KEY
505 openai_key = $OPENAI_API_KEY
506 github_token = $GITHUB_TOKEN
507 database_url = "vault:database/prod/connection_string"
508 }
509
510 variables {
511 api_endpoint = "https://api.production.ai"
512 monitoring_endpoint = "https://metrics.production.ai"
513 log_level = "info"
514 rate_limit = 1000
515 timeout = 30s
516 retry_count = 3
517 }
518}"#;
519const CUSTOMER_SUPPORT_TEMPLATE: &str = r#"# Customer Support AI Configuration
520# AI-powered customer service system
521
522project "customer-support-system" {
523 version = "2.0.0"
524 author = "Support Team"
525 description = "AI-driven customer support with multi-channel capabilities"
526}
527
528agent "support-specialist" {
529 model = "claude-3-sonnet"
530 role = "Customer Support Specialist"
531 temperature = 0.7
532 max_tokens = 100000
533
534 capabilities [
535 "customer-service"
536 "problem-solving"
537 "empathy"
538 "multi-language"
539 "escalation-handling"
540 ]
541
542 backstory {
543 8 years in customer support leadership
544 Handled 100K+ customer interactions
545 Expert in de-escalation techniques
546 Trained support teams worldwide
547 }
548
549 tools = [
550 "zendesk"
551 "intercom"
552 "slack"
553 "email-client"
554 "knowledge-base"
555 ]
556}
557
558agent "technical-expert" {
559 model = "gpt-4"
560 role = "Technical Support Engineer"
561 temperature = 0.6
562 max_tokens = 80000
563
564 capabilities [
565 "technical-troubleshooting"
566 "bug-analysis"
567 "system-diagnostics"
568 "code-review"
569 "api-debugging"
570 ]
571
572 backstory {
573 12 years in software engineering
574 Specialized in distributed systems
575 Published technical documentation
576 Led incident response teams
577 }
578
579 tools = [
580 "terminal"
581 "database-client"
582 "monitoring-tools"
583 "api-tester"
584 "log-analyzer"
585 ]
586}
587
588workflow "customer-inquiry-handling" {
589 trigger = "webhook"
590
591 step "triage" {
592 agent = "support-specialist"
593 task = "Analyze customer inquiry and determine priority level"
594 timeout = 5m
595 }
596
597 step "initial-response" {
598 agent = "support-specialist"
599 task = "Provide immediate acknowledgment and gather more details"
600 timeout = 10m
601 depends_on = ["triage"]
602 }
603
604 step "technical-analysis" {
605 agent = "technical-expert"
606 task = "Investigate technical aspects of the issue"
607 timeout = 15m
608 depends_on = ["triage"]
609
610 retry {
611 max_attempts = 2
612 delay = 2m
613 backoff = "exponential"
614 }
615 }
616
617 step "resolution" {
618 crew = ["support-specialist", "technical-expert"]
619 task = "Develop and implement solution"
620 timeout = 30m
621 depends_on = ["initial-response", "technical-analysis"]
622 }
623
624 step "follow-up" {
625 agent = "support-specialist"
626 task = "Ensure customer satisfaction and document resolution"
627 timeout = 10m
628 depends_on = ["resolution"]
629 }
630
631 pipeline {
632 triage -> initial-response -> technical-analysis -> resolution -> follow-up
633 }
634}
635
636crew "support-team" {
637 agents [
638 "support-specialist"
639 "technical-expert"
640 ]
641
642 process = "hierarchical"
643 manager = "technical-expert"
644 max_iterations = 5
645 verbose = true
646}
647
648memory {
649 provider = "redis"
650 connection = "redis://localhost:6379"
651
652 embeddings {
653 model = "text-embedding-ada-002"
654 dimensions = 1536
655 batch_size = 50
656 }
657
658 cache_size = 5000
659 persistence = false
660}
661
662context "production" {
663 environment = "prod"
664 debug = false
665 max_tokens = 150000
666
667 secrets {
668 zendesk_token = $ZENDESK_API_TOKEN
669 intercom_token = $INTERCOM_API_TOKEN
670 slack_token = $SLACK_API_TOKEN
671 }
672
673 variables {
674 support_email = "support@company.com"
675 response_timeout = 4h
676 escalation_threshold = 24h
677 max_concurrent_tickets = 50
678 }
679}"#;
680const DATA_PIPELINE_TEMPLATE: &str = r#"# Data Processing Pipeline Configuration
681# Real-time data ingestion and analysis system
682
683project "data-pipeline-system" {
684 version = "2.1.0"
685 author = "DataOps Team"
686 description = "High-throughput data processing pipeline with ML integration"
687}
688
689agent "data-ingester" {
690 model = "gpt-4"
691 role = "Data Ingestion Specialist"
692 temperature = 0.3
693 max_tokens = 50000
694
695 capabilities [
696 "kafka-streaming"
697 "data-validation"
698 "schema-registry"
699 "batch-processing"
700 "real-time-ingestion"
701 ]
702
703 backstory {
704 10 years of big data experience
705 Processed petabytes of data
706 Expert in Apache Kafka and streaming systems
707 Built high-throughput data pipelines
708 }
709
710 tools = [
711 "kafka"
712 "apache-nifi"
713 "debezium"
714 "schema-registry"
715 "data-quality-tools"
716 ]
717}
718
719agent "data-transformer" {
720 model = "claude-3-sonnet"
721 role = "ETL Engineer"
722 temperature = 0.5
723 max_tokens = 75000
724
725 capabilities [
726 "sql-optimization"
727 "data-cleansing"
728 "feature-engineering"
729 "data-normalization"
730 "complex-joins"
731 ]
732
733 backstory {
734 8 years in data engineering
735 Expert in Apache Spark and distributed computing
736 Optimized queries reducing processing time by 80%
737 Led data warehouse migrations
738 }
739
740 tools = [
741 "spark"
742 "hive"
743 "presto"
744 "airflow"
745 "dbt"
746 ]
747}
748
749agent "ml-engineer" {
750 model = "claude-3-opus"
751 role = "Machine Learning Engineer"
752 temperature = 0.6
753 max_tokens = 100000
754
755 capabilities [
756 "feature-selection"
757 "model-training"
758 "hyperparameter-tuning"
759 "model-validation"
760 "prediction-pipelines"
761 ]
762
763 backstory {
764 PhD in Machine Learning
765 Published 20+ papers on ML systems
766 Built ML pipelines processing billions of predictions daily
767 Expert in production ML deployment
768 }
769
770 tools = [
771 "python"
772 "scikit-learn"
773 "tensorflow"
774 "mlflow"
775 "kubernetes"
776 ]
777}
778
779workflow "data-processing-pipeline" {
780 trigger = "schedule:daily"
781
782 step "data-ingestion" {
783 agent = "data-ingester"
784 task = "Ingest streaming data from multiple sources"
785 timeout = 30m
786 parallel = true
787 }
788
789 step "data-validation" {
790 agent = "data-ingester"
791 task = "Validate data quality and schema compliance"
792 timeout = 15m
793 depends_on = ["data-ingestion"]
794 }
795
796 step "data-transformation" {
797 agent = "data-transformer"
798 task = "Clean and transform data for analysis"
799 timeout = 45m
800 depends_on = ["data-validation"]
801
802 retry {
803 max_attempts = 3
804 delay = 5m
805 backoff = "exponential"
806 }
807 }
808
809 step "feature-engineering" {
810 agent = "ml-engineer"
811 task = "Create features for ML models"
812 timeout = 1h
813 depends_on = ["data-transformation"]
814 }
815
816 step "model-inference" {
817 agent = "ml-engineer"
818 task = "Run ML models for predictions and insights"
819 timeout = 30m
820 depends_on = ["feature-engineering"]
821 parallel = true
822 }
823
824 step "results-storage" {
825 agent = "data-transformer"
826 task = "Store processed results and insights"
827 timeout = 20m
828 depends_on = ["model-inference"]
829 }
830
831 pipeline {
832 data-ingestion -> data-validation -> data-transformation -> feature-engineering -> model-inference -> results-storage
833 }
834}
835
836crew "data-team" {
837 agents [
838 "data-ingester"
839 "data-transformer"
840 "ml-engineer"
841 ]
842
843 process = "parallel"
844 max_iterations = 5
845 verbose = true
846}
847
848memory {
849 provider = "mongodb"
850 connection = "mongodb://localhost:27017/data_pipeline"
851
852 embeddings {
853 model = "text-embedding-3-small"
854 dimensions = 1536
855 batch_size = 100
856 }
857
858 cache_size = 10000
859 persistence = true
860}
861
862context "production" {
863 environment = "prod"
864 debug = false
865 max_tokens = 200000
866
867 secrets {
868 kafka_credentials = $KAFKA_CREDENTIALS
869 database_password = $DATABASE_PASSWORD
870 mlflow_token = $MLFLOW_API_TOKEN
871 }
872
873 variables {
874 kafka_brokers = "kafka-cluster.company.com:9092"
875 mongodb_uri = "mongodb://prod-db.company.com:27017"
876 batch_size = 1000
877 processing_timeout = 2h
878 retry_attempts = 5
879 }
880}"#;
881const RESEARCH_TEMPLATE: &str = r#"# Research Assistant AI Configuration
882# Academic and scientific research support system
883
884project "research-assistant-system" {
885 version = "1.5.0"
886 author = "Research Team"
887 description = "AI-powered research assistant for literature review and analysis"
888}
889
890agent "literature-reviewer" {
891 model = "claude-3-opus"
892 role = "Literature Review Specialist"
893 temperature = 0.4
894 max_tokens = 150000
895
896 capabilities [
897 "academic-research"
898 "paper-analysis"
899 "citation-management"
900 "methodology-review"
901 "gap-identification"
902 "systematic-review"
903 ]
904
905 backstory {
906 PhD in Computer Science
907 Published 50+ papers in top conferences
908 Expert reviewer for major journals
909 Led systematic literature reviews
910 }
911
912 tools = [
913 "google-scholar"
914 "semantic-scholar"
915 "zotero"
916 "mendeley"
917 "pubmed"
918 "arxiv"
919 ]
920}
921
922agent "data-analyst" {
923 model = "gpt-4"
924 role = "Research Data Analyst"
925 temperature = 0.3
926 max_tokens = 100000
927
928 capabilities [
929 "statistical-analysis"
930 "data-visualization"
931 "hypothesis-testing"
932 "correlation-analysis"
933 "regression-modeling"
934 "experimental-design"
935 ]
936
937 backstory {
938 PhD in Statistics
939 15 years in research data analysis
940 Expert in R, Python, and statistical methods
941 Published methodological papers
942 }
943
944 tools = [
945 "r-studio"
946 "python-jupyter"
947 "tableau"
948 "sas"
949 "spss"
950 "mathematica"
951 ]
952}
953
954agent "methodology-expert" {
955 model = "claude-3-sonnet"
956 role = "Research Methodology Consultant"
957 temperature = 0.5
958 max_tokens = 80000
959
960 capabilities [
961 "research-design"
962 "methodology-selection"
963 "validity-assessment"
964 "bias-analysis"
965 "ethical-review"
966 "peer-review"
967 ]
968
969 backstory {
970 Professor of Research Methods
971 25 years teaching research methodology
972 Consultant for major research institutions
973 Expert in qualitative and quantitative methods
974 }
975
976 tools = [
977 "nvivo"
978 "atlas-ti"
979 "qualtrics"
980 "survey-monkey"
981 "ethics-review-tools"
982 ]
983}
984
985workflow "research-project-workflow" {
986 trigger = "manual"
987
988 step "topic-definition" {
989 agent = "literature-reviewer"
990 task = "Define research topic and objectives clearly"
991 timeout = 1h
992 }
993
994 step "literature-search" {
995 agent = "literature-reviewer"
996 task = "Conduct comprehensive literature search and screening"
997 timeout = 4h
998 depends_on = ["topic-definition"]
999
1000 retry {
1001 max_attempts = 2
1002 delay = 10m
1003 backoff = "linear"
1004 }
1005 }
1006
1007 step "methodology-design" {
1008 agent = "methodology-expert"
1009 task = "Design appropriate research methodology"
1010 timeout = 2h
1011 depends_on = ["literature-search"]
1012 }
1013
1014 step "data-collection-planning" {
1015 agent = "data-analyst"
1016 task = "Plan data collection and analysis procedures"
1017 timeout = 3h
1018 depends_on = ["methodology-design"]
1019 }
1020
1021 step "pilot-study" {
1022 agent = "methodology-expert"
1023 task = "Conduct pilot study and refine methodology"
1024 timeout = 1h
1025 depends_on = ["data-collection-planning"]
1026 }
1027
1028 step "full-data-analysis" {
1029 agent = "data-analyst"
1030 task = "Conduct comprehensive data analysis"
1031 timeout = 6h
1032 depends_on = ["pilot-study"]
1033
1034 retry {
1035 max_attempts = 3
1036 delay = 30m
1037 backoff = "exponential"
1038 }
1039 }
1040
1041 step "results-interpretation" {
1042 crew = ["data-analyst", "literature-reviewer", "methodology-expert"]
1043 task = "Interpret results and draw conclusions"
1044 timeout = 4h
1045 depends_on = ["full-data-analysis"]
1046 }
1047
1048 step "manuscript-preparation" {
1049 agent = "literature-reviewer"
1050 task = "Prepare manuscript for publication"
1051 timeout = 8h
1052 depends_on = ["results-interpretation"]
1053 }
1054
1055 pipeline {
1056 topic-definition -> literature-search -> methodology-design -> data-collection-planning -> pilot-study -> full-data-analysis -> results-interpretation -> manuscript-preparation
1057 }
1058}
1059
1060crew "research-team" {
1061 agents [
1062 "literature-reviewer"
1063 "data-analyst"
1064 "methodology-expert"
1065 ]
1066
1067 process = "hierarchical"
1068 manager = "methodology-expert"
1069 max_iterations = 8
1070 verbose = true
1071}
1072
1073memory {
1074 provider = "elasticsearch"
1075 connection = "http://localhost:9200"
1076
1077 embeddings {
1078 model = "text-embedding-3-large"
1079 dimensions = 3072
1080 batch_size = 25
1081 }
1082
1083 cache_size = 50000
1084 persistence = true
1085}
1086
1087context "academic" {
1088 environment = "research"
1089 debug = true
1090 max_tokens = 200000
1091
1092 secrets {
1093 google_scholar_api = $GOOGLE_SCHOLAR_API_KEY
1094 semantic_scholar_api = $SEMANTIC_SCHOLAR_API_KEY
1095 pubmed_api = $PUBMED_API_KEY
1096 database_access = $RESEARCH_DATABASE_ACCESS
1097 }
1098
1099 variables {
1100 literature_database = "research-literature-db"
1101 citation_style = "apa"
1102 peer_review_rounds = 3
1103 statistical_power = 0.8
1104 confidence_level = 0.95
1105 sample_size_min = 100
1106 }
1107}"#;
1108fn get_template_content(template: &str) -> &'static str {
1109 match template {
1110 "minimal" => MINIMAL_TEMPLATE,
1111 "ai-dev" => AI_DEV_TEMPLATE,
1112 "support" => CUSTOMER_SUPPORT_TEMPLATE,
1113 "data-pipeline" => DATA_PIPELINE_TEMPLATE,
1114 "research" => RESEARCH_TEMPLATE,
1115 _ => MINIMAL_TEMPLATE,
1116 }
1117}
1118
1119#[derive(Deserialize, Serialize, Debug)]
1120pub struct ProjectManifest {
1121 #[serde(default)]
1122 compress: Option<bool>,
1123 #[serde(default)]
1124 optimize: Option<u8>,
1125 #[serde(default)]
1126 cache: Option<bool>,
1127 #[serde(default)]
1128 output_dir: Option<PathBuf>,
1129}
1130impl Default for ProjectManifest {
1131 fn default() -> Self {
1132 Self {
1133 compress: None,
1134 optimize: None,
1135 cache: None,
1136 output_dir: None,
1137 }
1138 }
1139}