aether_macros/
lib.rs

1//! # Aether Macros
2//!
3//! Procedural macros for one-line AI code injection.
4//!
5//! This crate provides the `ai!` macro for compile-time code generation markers
6//! and runtime injection helpers.
7
8use proc_macro::TokenStream;
9use quote::quote;
10use syn::{parse_macro_input, LitStr};
11
12/// Generate a template slot marker.
13///
14/// This macro creates a slot definition that will be filled by AI at runtime.
15///
16/// # Example
17///
18/// ```rust,ignore
19/// use aether_macros::ai_slot;
20///
21/// // Creates a slot named "button" with the given prompt
22/// let code = ai_slot!("button", "Create a submit button with hover effects");
23/// ```
24#[proc_macro]
25pub fn ai_slot(input: TokenStream) -> TokenStream {
26    let input_str = parse_macro_input!(input as LitStr);
27    let prompt = input_str.value();
28
29    let output = quote! {
30        aether_core::Slot::new("generated", #prompt)
31    };
32
33    output.into()
34}
35
36/// Create an AI injection template inline.
37///
38/// # Example
39///
40/// ```rust,ignore
41/// use aether_macros::ai_template;
42///
43/// let template = ai_template!("<div>{{AI:content}}</div>");
44/// ```
45#[proc_macro]
46pub fn ai_template(input: TokenStream) -> TokenStream {
47    let input_str = parse_macro_input!(input as LitStr);
48    let content = input_str.value();
49
50    let output = quote! {
51        aether_core::Template::new(#content)
52    };
53
54    output.into()
55}
56
57/// One-line AI code generation (async).
58///
59/// This macro creates a future that generates code using the specified
60/// provider and prompt.
61///
62/// # Example
63///
64/// ```rust,ignore
65/// use aether_macros::ai;
66/// use aether_ai::OpenAiProvider;
67///
68/// async fn example() {
69///     let provider = OpenAiProvider::from_env().unwrap();
70///     let code = ai!("Create a login form", provider).await.unwrap();
71///     println!("{}", code);
72/// }
73/// ```
74#[proc_macro]
75pub fn ai(input: TokenStream) -> TokenStream {
76    let input_tokens: proc_macro2::TokenStream = input.into();
77
78    // Parse as: prompt, provider
79    let output = quote! {
80        {
81            async {
82                use aether_core::{InjectionEngine, Template};
83
84                let (prompt, provider) = (#input_tokens);
85                let template = Template::new("{{AI:generated}}")
86                    .with_slot("generated", prompt);
87
88                let engine = InjectionEngine::new(provider);
89                engine.render(&template).await
90            }
91        }
92    };
93
94    output.into()
95}
96
97/// Mark a code section for AI generation (placeholder).
98///
99/// This attribute marks a function or item for AI-assisted generation.
100/// Use with build tools that preprocess source files.
101///
102/// # Example
103///
104/// ```rust,ignore
105/// #[ai_generate("Implement a function that validates email addresses")]
106/// fn validate_email(email: &str) -> bool {
107///     // AI will generate this implementation
108///     todo!()
109/// }
110/// ```
111#[proc_macro_attribute]
112pub fn ai_generate(attr: TokenStream, item: TokenStream) -> TokenStream {
113    let _prompt = parse_macro_input!(attr as LitStr);
114    let item_tokens: proc_macro2::TokenStream = item.into();
115
116    let output = quote! {
117        // AI Generation prompt: #prompt
118        #item_tokens
119    };
120
121    output.into()
122}
123
124/// Transform a function into a secure, polymorphic AI-powered runtime call.
125/// 
126/// This macro removes the function body and replaces it with logic that:
127/// 1. Fetches a script from AI at runtime.
128/// 2. Executes it using the AetherRuntime (Rhai).
129/// 
130/// # Example
131/// 
132/// ```rust,ignore
133/// #[aether_secure(prompt = "Calculate complex score based on inputs", temp = 0.0)]
134/// fn calculate_score(a: i64, b: i64) -> i64;
135/// ```
136#[proc_macro_attribute]
137pub fn aether_secure(attr: TokenStream, item: TokenStream) -> TokenStream {
138    let input = parse_macro_input!(item as syn::ItemFn);
139    let fn_name = &input.sig.ident;
140    let fn_vis = &input.vis;
141    let fn_args = &input.sig.inputs;
142    let fn_output = &input.sig.output;
143
144    // Simplified attribute parsing (in production use syn::AttributeArgs)
145    let attr_str = attr.to_string();
146    let prompt = if let Some(p) = attr_str.split("prompt =").nth(1).and_then(|s| s.split('"').nth(1)) {
147        p.to_string()
148    } else {
149        "Generate logic for this function".to_string()
150    };
151
152    let arg_names: Vec<_> = fn_args.iter().filter_map(|arg| {
153        if let syn::FnArg::Typed(pat_type) = arg {
154            if let syn::Pat::Ident(pat_id) = &*pat_type.pat {
155                return Some(&pat_id.ident);
156            }
157        }
158        None
159    }).collect();
160
161    let output = quote! {
162        #fn_vis async fn #fn_name(#fn_args) #fn_output {
163            use aether_core::prelude::*;
164            use aether_core::AetherRuntime;
165            use std::collections::HashMap;
166
167            // 1. Setup Engine & Request Script (Dynamic Provider Selection)
168            // We must handle rendering inside match arms because InjectionEngine<P> types differ.
169            
170            let provider_type = std::env::var("AETHER_PROVIDER").unwrap_or_else(|_| "openai".to_string());
171            
172            // Prepare template (common logic)
173            let script_prompt = format!(
174                "Implement this logic in Rhai script: {}. Output ONLY the raw Rhai script code. The inputs available are: {:?}. Return the result directly. Do not wrap in markdown.",
175                #prompt,
176                vec![#(stringify!(#arg_names)),*]
177            );
178            
179            let template = Template::new("{{AI:script}}")
180                .configure_slot(Slot::new("script", script_prompt).with_temperature(0.0));
181
182            let script = match provider_type.to_lowercase().as_str() {
183                "anthropic" | "claude" => {
184                    let p = aether_ai::AnthropicProvider::from_env().expect("Anthropic Provider not configured");
185                    let engine = InjectionEngine::new(p);
186                    engine.render(&template).await.expect("AI script generation failed")
187                },
188                "gemini" => {
189                    let p = aether_ai::GeminiProvider::from_env().expect("Gemini Provider not configured");
190                    let engine = InjectionEngine::new(p);
191                    engine.render(&template).await.expect("AI script generation failed")
192                },
193                "ollama" => {
194                    let model = std::env::var("AETHER_MODEL").unwrap_or_else(|_| "llama3".to_string());
195                    let p = aether_ai::OllamaProvider::new(&model);
196                    let engine = InjectionEngine::new(p);
197                    engine.render(&template).await.expect("AI script generation failed")
198                },
199                _ => {
200                   let p = aether_ai::OpenAiProvider::from_env().expect("OpenAI Provider not configured");
201                   let engine = InjectionEngine::new(p);
202                   engine.render(&template).await.expect("AI script generation failed")
203                }
204            };
205
206            // 3. Execute in Runtime
207            let runtime = AetherRuntime::new();
208            let mut inputs = HashMap::new();
209            #(
210                inputs.insert(stringify!(#arg_names).to_string(), rhai::Dynamic::from(#arg_names));
211             )*
212
213            let result = runtime.execute(&script, inputs).expect("Runtime execution failed");
214            
215            // 4. Return result (simplified cast, needs more robust handling for varied types)
216            result.cast()
217        }
218    };
219
220    output.into()
221}