Skip to main content

converge_analytics/
model.rs

1// Copyright (c) 2026 Aprio One AB
2// Author: Kenneth Pernyer, kenneth@pernyer.se
3
4use crate::engine::FeatureVector;
5use burn::{
6    nn::{Linear, LinearConfig, Relu},
7    prelude::*,
8    tensor::{Tensor, backend::Backend},
9};
10use converge_core::{Agent, AgentEffect, Context, ContextKey, Fact};
11use serde_json;
12
13// Re-defining for now if not public in engine, strictly we should move to lib or common
14// But for this example we assume we can deserialize into this struct.
15
16/// Simple MLP Model
17#[derive(Module, Debug)]
18pub struct Model<B: Backend> {
19    fc1: Linear<B>,
20    fc2: Linear<B>,
21    activation: Relu,
22}
23
24impl<B: Backend> Model<B> {
25    pub fn new(device: &B::Device) -> Self {
26        // Initialize with default config for demo
27        let config = ModelConfig::new(3, 16, 1);
28        config.init(device)
29    }
30
31    pub fn forward(&self, input: Tensor<B, 2>) -> Tensor<B, 2> {
32        let x = self.fc1.forward(input);
33        let x = self.activation.forward(x);
34        self.fc2.forward(x)
35    }
36}
37
38#[derive(Config, Debug)]
39pub struct ModelConfig {
40    input_size: usize,
41    hidden_size: usize,
42    output_size: usize,
43}
44
45impl ModelConfig {
46    pub fn init<B: Backend>(&self, device: &B::Device) -> Model<B> {
47        Model {
48            fc1: LinearConfig::new(self.input_size, self.hidden_size).init(device),
49            fc2: LinearConfig::new(self.hidden_size, self.output_size).init(device),
50            activation: Relu::new(),
51        }
52    }
53}
54
55pub struct InferenceAgent {
56    // in real app, model might be Arc<Mutex<Model>> or just loaded
57    // For demo we instantiate on fly or would hold it.
58    // Burn models are cheap to clone if weights are Arc.
59    // For this demo, we won't hold the model in the struct to avoid generic complexity in the Agent trait object,
60    // or we use a concrete backend like NdArrayBackend.
61}
62
63impl InferenceAgent {
64    pub fn new() -> Self {
65        Self {}
66    }
67}
68
69impl Agent for InferenceAgent {
70    fn name(&self) -> &str {
71        "InferenceAgent (Burn)"
72    }
73
74    fn dependencies(&self) -> &[ContextKey] {
75        &[ContextKey::Proposals]
76    }
77
78    fn accepts(&self, ctx: &Context) -> bool {
79        // Run if there are proposals (features) but no hypothesis yet
80        ctx.has(ContextKey::Proposals) && !ctx.has(ContextKey::Hypotheses)
81    }
82
83    fn execute(&self, ctx: &Context) -> AgentEffect {
84        // 1. Find the feature proposal
85        // In reality, filtered by provenance "polars-engine"
86        let _proposals = ctx.get(ContextKey::Proposals); // wait, ctx.get returns Fact, but proposals are ProposedFacts?
87        // Ah, ctx.get(ContextKey) returns FACTs (promoted).
88        // If FeatureAgent emits PROPOSALS, they are in `ContextKey::Proposals`?
89        // Wait, ContextKey::Proposals is a key where Validated Proposals might live?
90        // OR does FeatureAgent emit *Facts* directly if trusted?
91
92        // In the `engine.rs` implementation I sent `ProposedFact` with key `ContextKey::Proposals`.
93        // If they are not promoted to Facts, they are not in `ctx.get()`.
94        // `Context` only stores `facts`.
95        // Proposals usually sit in a queue in the Engine or are added to Context if Key::Proposals is a storage for them?
96        // Looking at `ContextKey` definition: "Internal storage for proposed facts before validation."
97        // So they ARE stored as FACTS under the key `Proposals` if the system works that way?
98        // OR `ProposedFact`s are converted to `Fact`s by the engine.
99        // `ProposedFact::try_from` converts to `Fact`.
100        // If the engine accepts the proposal, it adds it as a Fact.
101
102        // Let's assume the engine validated it and stored it.
103        // So we look for Facts in `ContextKey::Proposals`?
104        // Actually, normally `Proposals` key is for... proposals.
105        // But `FeatureAgent` intended to propose `context.key = Proposals`?
106        // No, `FeatureAgent` sent `proposal.key = Proposals`.
107
108        // Let's assume we find the features in `ContextKey::Proposals` (as stored Facts).
109
110        // We iterate and find one we haven't processed? For now just take the first.
111
112        // This logic is simplified for demo.
113
114        let facts = ctx.get(ContextKey::Proposals);
115        if facts.is_empty() {
116            return AgentEffect::empty();
117        }
118
119        let fact_content = &facts[0].content;
120
121        // 2. Deserialize features
122        let features: FeatureVector = match serde_json::from_str(fact_content) {
123            Ok(f) => f,
124            Err(_) => return AgentEffect::empty(),
125        };
126
127        // 3. Run Inference (Burn)
128        type B = burn::backend::NdArray;
129        let device = Default::default();
130        let model: Model<B> = ModelConfig::new(3, 16, 1).init(&device);
131
132        let input = Tensor::<B, 1>::from_floats(features.data.as_slice(), &device)
133            .reshape([features.shape[0], features.shape[1]]);
134
135        let output = model.forward(input);
136
137        // 4. Emit Hypothesis
138        let values: Vec<f32> = output.into_data().to_vec::<f32>().unwrap_or_default();
139        let prediction = values[0]; // Assume single output
140
141        let hypo_content = format!("Prediction: {:.4} (based on {})", prediction, facts[0].id);
142
143        let hypothesis = Fact::new(
144            ContextKey::Hypotheses,
145            format!("hypo-{}", facts[0].id),
146            hypo_content,
147        );
148
149        AgentEffect::with_fact(hypothesis)
150    }
151}