eenn 0.1.0

A hybrid neural-symbolic constraint solver with cognitive reasoning capabilities
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
//! EENN CLI Solver - Command-line math problem solver using hybrid neuro-symbolic AI
//!
//! This CLI tool uses a combination of LLM and EENN's Lightning Strike algorithm
//! to solve mathematical problems from natural language input.

use anyhow::{Context, Result, anyhow};
use clap::{Parser, Subcommand};
use console::style;
use dialoguer::{Input, theme::ColorfulTheme};
use directories::ProjectDirs;
use eenn::{SelfLearningConfig, SelfLearningLightningStrike, constraint_parser};
use hf_hub::api::sync::Api;
use lazy_static::lazy_static;
use llama_cpp_2::{
    llama_backend::LlamaBackend,
    model::{AddBos, LlamaModel},
};
use parking_lot::Mutex;
use std::collections::HashMap;
use std::fs;
use std::path::{Path, PathBuf};
use std::time::Instant;

// Import theory core for constraint creation
use theory_core::{ConstraintIR, VarId};

/// Model information - HuggingFace repo and filename
const PHI3_REPO_ID: &str = "microsoft/Phi-3-mini-4k-instruct-gguf";
const PHI3_MINI_MODEL_FILENAME: &str = "Phi-3-mini-4k-instruct-q4.gguf";

/// Cached model and backend - loaded once and reused across solves
lazy_static! {
    static ref MODEL_CACHE: Mutex<Option<(LlamaBackend, LlamaModel)>> = Mutex::new(None);
}

/// EENN Math Solver CLI - Solve math problems using hybrid neuro-symbolic AI
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
struct Cli {
    #[command(subcommand)]
    command: Option<Commands>,
}

#[derive(Subcommand)]
enum Commands {
    /// Solve a math problem from natural language
    Solve {
        /// The math problem to solve, in natural language
        #[arg(short, long)]
        problem: Option<String>,

        /// Use verbose output showing reasoning steps
        #[arg(short, long)]
        verbose: bool,
    },

    /// Initialize the solver by downloading necessary models
    Init {
        /// Force re-download of models even if they already exist
        #[arg(short, long)]
        force: bool,
    },

    /// View training statistics from the neural solver
    Stats,
}

/// Main entry point
fn main() -> Result<()> {
    // Parse command-line arguments
    let cli = Cli::parse();

    // Handle commands
    match cli.command.unwrap_or(Commands::Solve {
        problem: None,
        verbose: false,
    }) {
        Commands::Solve { problem, verbose } => {
            let problem_text = match problem {
                Some(p) => p,
                None => {
                    // Interactive mode
                    Input::<String>::with_theme(&ColorfulTheme::default())
                        .with_prompt("Enter math problem to solve")
                        .interact_text()?
                }
            };

            solve_problem(&problem_text, verbose)
        }

        Commands::Init { force } => initialize_solver(force),

        Commands::Stats => show_statistics(),
    }
}

/// Initialize the solver by downloading necessary models using HuggingFace Hub
fn initialize_solver(force: bool) -> Result<()> {
    println!(
        "{}",
        style("🧠 Initializing EENN Math Solver").cyan().bold()
    );

    // Ensure data directory exists
    let data_dir = get_data_directory()?;
    fs::create_dir_all(&data_dir).context("Failed to create data directory")?;

    // Check if model already exists
    let model_path = data_dir.join(PHI3_MINI_MODEL_FILENAME);

    if model_path.exists() && !force {
        println!("✅ Model already downloaded at: {}", model_path.display());
        return Ok(());
    }

    // Download model using hf-hub
    println!("📥 Downloading Phi-3 Mini model from HuggingFace...");
    println!("   Repository: {}", PHI3_REPO_ID);
    println!("   File: {}", PHI3_MINI_MODEL_FILENAME);

    // Create HuggingFace API client
    let api = Api::new().context("Failed to initialize HuggingFace API")?;

    // Get the model repository
    let repo = api.model(PHI3_REPO_ID.to_string());

    // Download the model file
    println!("⏳ Downloading... (this may take a while, ~2.4GB)");
    let downloaded_path = repo
        .get(PHI3_MINI_MODEL_FILENAME)
        .context("Failed to download model from HuggingFace")?;

    // Copy to our data directory
    fs::copy(&downloaded_path, &model_path).context("Failed to copy model to data directory")?;

    println!(
        "✅ Model downloaded successfully to: {}",
        model_path.display()
    );
    println!("🚀 EENN Math Solver is ready to use!");

    Ok(())
}

/// Solve a math problem using the hybrid neuro-symbolic approach
fn solve_problem(problem_text: &str, verbose: bool) -> Result<()> {
    // Verbose: Show problem and thinking indicator
    if verbose {
        println!("{}", style("🧮 Math Problem:").cyan().bold());
        println!("{}\n", problem_text);
        println!("{}", style("🤔 Thinking...").yellow());
    }

    // Step 1: Extract constraints using LLM (no more heuristics!)
    let start_time = Instant::now();
    let model_path = get_data_directory()?.join(PHI3_MINI_MODEL_FILENAME);

    if !model_path.exists() {
        return Err(anyhow!(
            "Model not found. Please run 'eenn-solve init' to download the Phi-3 model."
        ));
    }

    // Extract constraints and get variable name mapping
    let (constraints, var_name_map) =
        extract_constraints_with_llm(problem_text, &model_path, verbose)?;

    // Step 2: Use EENN's Lightning Strike to solve the constraints
    if verbose {
        println!(
            "{}",
            style("⚡ Applying Lightning Strike algorithm...").yellow()
        );
    }

    // Create Self-Learning Lightning Strike engine with persistence
    let mut config = SelfLearningConfig::default();
    config.neural_save_path = Some(get_data_directory()?.join("neural_weights.json"));
    config.verbose = verbose; // Pass through verbosity flag
    let mut solver = SelfLearningLightningStrike::with_config(config)?;

    // Solve constraints with enhanced error handling
    let solution_result = match solver.solve_and_learn(&constraints, verbose) {
        Ok(result) => result,
        Err(e) => {
            let err_msg = e.to_string();
            if err_msg.contains("Division by zero") {
                eprintln!("{}", style("❌ Error: Division by zero detected").red().bold());
                eprintln!("💡 Hint: Check your problem for division by zero (e.g., x/0)");
            } else if err_msg.contains("Square root of negative") {
                eprintln!("{}", style("❌ Error: Square root of negative number").red().bold());
                eprintln!("💡 Hint: Square roots of negative numbers are not supported");
            } else if err_msg.contains("Exponent too large") {
                eprintln!("{}", style("❌ Error: Exponent too large").red().bold());
                eprintln!("💡 Hint: Try using smaller exponents (max: 100)");
            } else {
                eprintln!("{}", style(format!("❌ Error: {}", err_msg)).red().bold());
            }
            return Err(e);
        }
    };
    let solve_time = start_time.elapsed();

    // Print results - clean and simple by default
    if solution_result.satisfiable {
        if verbose {
            println!("\n{}", style("🎯 Solution:").green().bold());
            println!(
                "{} (in {}ms)",
                style("SATISFIABLE").green().bold(),
                solve_time.as_millis()
            );
        }

        // Print variables with their original names
        // If we have ranges, display them; otherwise display single values
        if let Some(ranges) = &solution_result.variable_ranges {
            // Display ranges for inequality solutions
            for (var_name, range_str) in ranges {
                let display_name = var_name_map
                    .iter()
                    .find(|(_, id_str)| id_str.as_str() == var_name)
                    .map(|(name, _)| name.as_str())
                    .unwrap_or(var_name);

                // Replace var name in range string with display name
                let display_range = range_str.replace(var_name, display_name);

                if verbose {
                    println!("  {}", style(&display_range).cyan());
                } else {
                    println!("{}", display_range);
                }
            }
        } else {
            // Display single values for equality solutions
            for (var_name, value) in &solution_result.assignment {
                // Map var_0, var_1 etc back to original names
                let display_name = var_name_map
                    .iter()
                    .find(|(_, id_str)| id_str.as_str() == var_name)
                    .map(|(name, _)| name.as_str())
                    .unwrap_or(var_name);

                if verbose {
                    println!(
                        "  {} = {}",
                        style(display_name).cyan(),
                        style(value).yellow()
                    );
                } else {
                    // Clean output: just the answer
                    println!("{} = {}", display_name, value);
                }
            }
        }

        // Verbose: Show solving strategy and timing
        if verbose {
            if solution_result.winning_strategy.as_deref() == Some("neural") {
                println!(
                    "\n{} (confidence: {:.1}%)",
                    style("✓ Used neural prediction").green(),
                    solution_result.confidence * 100.0
                );
            } else {
                println!("\n{}", style("✓ Used SMT solving").yellow());
            }
        }
    } else {
        // Better error messages for unsatisfiable constraints
        if verbose {
            println!("\n{}", style("🎯 Solution:").green().bold());
            println!(
                "{} (in {}ms)",
                style("UNSATISFIABLE").red().bold(),
                solve_time.as_millis()
            );
            println!("\n💡 {}", style("Possible reasons:").yellow());
            println!("   • The constraints may be contradictory (e.g., x = 5 and x = 10)");
            println!("   • The problem may have no solution in the given domain");
            println!("   • Try rephrasing or simplifying your constraints");
        } else {
            println!("No solution exists.");
            println!("💡 Hint: Use -v flag for more details on why this is unsatisfiable.");
        }
    }

    Ok(())
}

/// Extract constraints using LLM (Phi-3 Mini) with model caching
fn extract_constraints_with_llm(
    problem_text: &str,
    model_path: &Path,
    verbose: bool,
) -> Result<(ConstraintIR, HashMap<String, String>)> {
    use llama_cpp_2::{
        LogOptions, context::params::LlamaContextParams, llama_batch::LlamaBatch, model::Special,
        send_logs_to_tracing,
    };
    use std::num::NonZeroU32;

    // Configure llama-cpp-2 logging based on verbosity
    send_logs_to_tracing(LogOptions::default().with_logs_enabled(verbose));

    if verbose {
        println!("🤖 Using Phi-3 Mini for natural language understanding...");
    }

    // Get or initialize cached model
    let mut cache = MODEL_CACHE.lock();

    if cache.is_none() {
        if verbose {
            println!("   Loading model (first time only)...");
        }
        let backend = LlamaBackend::init().context("Failed to initialize LLaMA backend")?;
        let model = LlamaModel::load_from_file(&backend, model_path, &Default::default())
            .context("Failed to load model")?;
        *cache = Some((backend, model));
    } else if verbose {
        println!("   ✓ Using cached model (instant load)");
    }

    // Now safely unwrap since we know it exists
    let (backend, model) = cache.as_ref().unwrap();

    // Create context with proper API
    let ctx_params = LlamaContextParams::default().with_n_ctx(NonZeroU32::new(2048));

    let mut ctx = model
        .new_context(&backend, ctx_params)
        .context("Failed to create context")?;

    // Create a structured prompt
    let prompt = format!(
        "<|system|>You are a mathematical problem analyzer. Extract variables and constraints from word problems.
Format your response EXACTLY as:
Variables:
- name: result, type: integer, domain: 0..1000

Constraints:
- result = <expression><|end|>
<|user|>{}?<|end|>
<|assistant|>",
        problem_text.trim_end_matches('?')
    );

    // Tokenize the prompt
    let tokens_list = model
        .str_to_token(&prompt, llama_cpp_2::model::AddBos::Always)
        .context("Failed to tokenize prompt")?;

    if verbose {
        println!("   Prompt: {} tokens", tokens_list.len());
    }

    // Create batch and add prompt tokens
    let mut batch = LlamaBatch::new(512, 1);
    let last_index = (tokens_list.len() - 1) as i32;

    for (i, token) in (0_i32..).zip(tokens_list.into_iter()) {
        let is_last = i == last_index;
        batch.add(token, i, &[0], is_last)?;
    }

    // Decode the prompt
    ctx.decode(&mut batch).context("Failed to decode prompt")?;

    // Generate tokens one by one
    let mut n_cur = batch.n_tokens();
    let max_tokens = 256;
    let mut response_parts = Vec::new();

    for _ in 0..max_tokens {
        // Get candidates and find best token (greedy sampling)
        let candidates: Vec<_> = ctx.candidates().collect();

        if candidates.is_empty() {
            break;
        }

        let mut best_token = candidates[0].id();
        let mut best_logit = candidates[0].logit();

        for candidate in &candidates {
            if candidate.logit() > best_logit {
                best_logit = candidate.logit();
                best_token = candidate.id();
            }
        }

        // Check for end-of-generation
        if model.is_eog_token(best_token) {
            break;
        }

        // Convert and collect token
        if let Ok(token_str) = model.token_to_str(best_token, Special::Tokenize) {
            response_parts.push(token_str);
        }

        // Feed token back into model
        batch.clear();
        batch.add(best_token, n_cur, &[0], true)?;

        ctx.decode(&mut batch).context("Failed to decode token")?;
        n_cur += 1;
    }

    // Collect response text
    let response = response_parts.join("");

    if verbose {
        println!("✓ LLM response: {} tokens", response_parts.len());
        println!("{}", response.trim());
    }

    // Parse the LLM response into ConstraintIR with variable mapping
    constraint_parser::parse_llm_response_with_mapping(&response, verbose)
}

// Heuristics removed! LLM handles all natural language understanding.
// Phi-3 Mini is proven to handle:
// - Simple arithmetic: "What is 2 + 2?"
// - Different phrasings: "Calculate 7 multiplied by 6"
// - Word problems: "Alice has 12 apples. Bob has twice as many..."
// - Equations: "Solve for x: 2x + 5 = 15"
// - Systems of equations: "x + y = 10, x - y = 2"

/// Show statistics about the neural network training
fn show_statistics() -> Result<()> {
    let stats_path = get_data_directory()?.join("learning_progress.txt");

    if !stats_path.exists() {
        println!("No statistics available yet. Solve some problems first!");
        return Ok(());
    }

    let stats_content =
        fs::read_to_string(&stats_path).context("Failed to read statistics file")?;

    println!(
        "{}",
        style("📊 Neural Network Training Statistics").cyan().bold()
    );
    println!("{}", stats_content);

    Ok(())
}

/// Get the data directory for storing models and neural weights
fn get_data_directory() -> Result<PathBuf> {
    let proj_dirs = ProjectDirs::from("com", "eenn", "math-solver")
        .ok_or_else(|| anyhow!("Failed to determine project directories"))?;

    let data_dir = proj_dirs.data_dir().to_path_buf();
    fs::create_dir_all(&data_dir).context("Failed to create data directory")?;

    Ok(data_dir)
}