use super::{ThinkToolContext, ThinkToolModule, ThinkToolModuleConfig, ThinkToolOutput};
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BedRockConfig {
pub max_depth: usize,
pub axiom_threshold: f64,
pub branching_factor: usize,
pub min_confidence: f64,
pub strict_assumptions: bool,
pub max_principles: usize,
}
impl Default for BedRockConfig {
fn default() -> Self {
Self {
max_depth: 3,
axiom_threshold: 0.85,
branching_factor: 3,
min_confidence: 0.5,
strict_assumptions: true,
max_principles: 20,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
#[serde(rename_all = "snake_case")]
pub enum ExplorationStrategy {
#[default]
BreadthFirst,
DepthFirst,
BestFirst,
AStar,
BeamSearch,
}
impl ExplorationStrategy {
pub fn use_case(&self) -> &'static str {
match self {
Self::BreadthFirst => "Unknown solution depth, want all solutions at each level",
Self::DepthFirst => "Deep solutions, good pruning heuristics available",
Self::BestFirst => "Reliable value function, want fastest path to good solution",
Self::AStar => "Need optimal solution, have admissible heuristic",
Self::BeamSearch => "Limited compute budget, want diverse high-quality solutions",
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ToTConfig {
pub strategy: ExplorationStrategy,
pub branching_factor: usize,
pub beam_width: usize,
pub max_nodes: usize,
pub pruning_threshold: f64,
pub enable_backtracking: bool,
pub max_depth: usize,
pub aggregate_paths: bool,
pub voting_samples: usize,
}
impl Default for ToTConfig {
fn default() -> Self {
Self {
strategy: ExplorationStrategy::default(),
branching_factor: 3,
beam_width: 5,
max_nodes: 100,
pruning_threshold: 0.3,
enable_backtracking: true,
max_depth: 4,
aggregate_paths: true,
voting_samples: 0, }
}
}
impl ToTConfig {
pub fn bfs() -> Self {
Self {
strategy: ExplorationStrategy::BreadthFirst,
branching_factor: 4,
beam_width: 10,
max_nodes: 200,
..Self::default()
}
}
pub fn dfs() -> Self {
Self {
strategy: ExplorationStrategy::DepthFirst,
branching_factor: 2,
max_depth: 6,
pruning_threshold: 0.4, ..Self::default()
}
}
pub fn beam(width: usize) -> Self {
Self {
strategy: ExplorationStrategy::BeamSearch,
beam_width: width,
branching_factor: 3,
aggregate_paths: true,
..Self::default()
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ThoughtNode {
pub id: usize,
pub thought: String,
pub value: f64,
pub depth: usize,
pub parent_id: Option<usize>,
pub children: Vec<usize>,
pub is_terminal: bool,
pub is_pruned: bool,
pub path: Vec<usize>,
pub generation_method: ThoughtGenerationMethod,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
#[serde(rename_all = "snake_case")]
pub enum ThoughtGenerationMethod {
#[default]
Sampled,
Proposed,
Decomposed,
Derived,
}
impl ThoughtNode {
pub fn root(thought: impl Into<String>) -> Self {
Self {
id: 0,
thought: thought.into(),
value: 1.0, depth: 0,
parent_id: None,
children: Vec::new(),
is_terminal: false,
is_pruned: false,
path: vec![0],
generation_method: ThoughtGenerationMethod::Decomposed,
}
}
pub fn child(id: usize, thought: impl Into<String>, value: f64, parent: &ThoughtNode) -> Self {
let mut path = parent.path.clone();
path.push(id);
Self {
id,
thought: thought.into(),
value,
depth: parent.depth + 1,
parent_id: Some(parent.id),
children: Vec::new(),
is_terminal: false,
is_pruned: false,
path,
generation_method: ThoughtGenerationMethod::default(),
}
}
pub fn should_prune(&self, threshold: f64) -> bool {
self.value < threshold
}
pub fn path_cost(&self, nodes: &[ThoughtNode]) -> f64 {
self.path
.iter()
.filter_map(|&id| nodes.get(id))
.map(|n| 1.0 - n.value)
.sum()
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ToTResult {
pub nodes: Vec<ThoughtNode>,
pub best_path: Vec<usize>,
pub best_value: f64,
pub terminal_nodes: Vec<usize>,
pub stats: ToTStats,
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct ToTStats {
pub nodes_created: usize,
pub nodes_expanded: usize,
pub nodes_pruned: usize,
pub max_depth_reached: usize,
pub solutions_found: usize,
pub strategy: String,
pub effective_branching_factor: f64,
}
impl ToTResult {
pub fn best_path_thoughts(&self) -> Vec<&str> {
self.best_path
.iter()
.filter_map(|&id| self.nodes.get(id))
.map(|n| n.thought.as_str())
.collect()
}
pub fn all_solutions(&self) -> Vec<(&ThoughtNode, f64)> {
let mut solutions: Vec<_> = self
.terminal_nodes
.iter()
.filter_map(|&id| self.nodes.get(id))
.map(|n| (n, n.value))
.collect();
solutions.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
solutions
}
}
pub struct ToTExplorer {
config: ToTConfig,
nodes: Vec<ThoughtNode>,
next_id: usize,
stats: ToTStats,
}
impl ToTExplorer {
pub fn new(config: ToTConfig) -> Self {
Self {
stats: ToTStats {
strategy: format!("{:?}", config.strategy),
..Default::default()
},
config,
nodes: Vec::new(),
next_id: 0,
}
}
pub fn bfs() -> Self {
Self::new(ToTConfig::bfs())
}
pub fn dfs() -> Self {
Self::new(ToTConfig::dfs())
}
pub fn beam(width: usize) -> Self {
Self::new(ToTConfig::beam(width))
}
pub fn initialize(&mut self, root_thought: impl Into<String>) {
self.nodes.clear();
self.next_id = 0;
self.stats = ToTStats {
strategy: format!("{:?}", self.config.strategy),
..Default::default()
};
let root = ThoughtNode::root(root_thought);
self.nodes.push(root);
self.next_id = 1;
self.stats.nodes_created = 1;
}
pub fn add_child(&mut self, parent_id: usize, thought: impl Into<String>, value: f64) -> usize {
let parent = self.nodes.get(parent_id).cloned();
if let Some(parent) = parent {
let child = ThoughtNode::child(self.next_id, thought, value, &parent);
let child_id = child.id;
if child.should_prune(self.config.pruning_threshold) {
let mut pruned = child;
pruned.is_pruned = true;
self.nodes.push(pruned);
self.stats.nodes_pruned += 1;
} else {
self.nodes.push(child);
}
if let Some(parent_node) = self.nodes.get_mut(parent_id) {
parent_node.children.push(child_id);
}
self.next_id += 1;
self.stats.nodes_created += 1;
self.stats.max_depth_reached =
self.stats.max_depth_reached.max(self.nodes[child_id].depth);
child_id
} else {
0
}
}
pub fn mark_terminal(&mut self, node_id: usize) {
if let Some(node) = self.nodes.get_mut(node_id) {
node.is_terminal = true;
self.stats.solutions_found += 1;
}
}
pub fn next_to_expand(&self) -> Option<usize> {
let candidates: Vec<_> = self
.nodes
.iter()
.filter(|n| !n.is_terminal && !n.is_pruned && n.children.is_empty())
.filter(|n| n.depth < self.config.max_depth)
.collect();
if candidates.is_empty() {
return None;
}
match self.config.strategy {
ExplorationStrategy::BreadthFirst => {
candidates.iter().min_by_key(|n| n.depth).map(|n| n.id)
}
ExplorationStrategy::DepthFirst => {
candidates.iter().max_by_key(|n| n.depth).map(|n| n.id)
}
ExplorationStrategy::BestFirst | ExplorationStrategy::AStar => {
candidates
.iter()
.max_by(|a, b| {
a.value
.partial_cmp(&b.value)
.unwrap_or(std::cmp::Ordering::Equal)
})
.map(|n| n.id)
}
ExplorationStrategy::BeamSearch => {
let max_depth = candidates.iter().map(|n| n.depth).max().unwrap_or(0);
candidates
.iter()
.filter(|n| n.depth == max_depth)
.max_by(|a, b| {
a.value
.partial_cmp(&b.value)
.unwrap_or(std::cmp::Ordering::Equal)
})
.map(|n| n.id)
}
}
}
pub fn get_beam_frontier(&self) -> Vec<usize> {
let max_depth = self.nodes.iter().map(|n| n.depth).max().unwrap_or(0);
let mut frontier: Vec<_> = self
.nodes
.iter()
.filter(|n| n.depth == max_depth && !n.is_pruned && !n.is_terminal)
.map(|n| (n.id, n.value))
.collect();
frontier.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
frontier
.into_iter()
.take(self.config.beam_width)
.map(|(id, _)| id)
.collect()
}
pub fn finish(mut self) -> ToTResult {
let (best_path, best_value) = self
.nodes
.iter()
.filter(|n| n.is_terminal)
.max_by(|a, b| {
a.value
.partial_cmp(&b.value)
.unwrap_or(std::cmp::Ordering::Equal)
})
.map(|n| (n.path.clone(), n.value))
.unwrap_or_else(|| (vec![0], 0.0));
let terminal_nodes: Vec<_> = self
.nodes
.iter()
.filter(|n| n.is_terminal)
.map(|n| n.id)
.collect();
let nodes_with_children = self.nodes.iter().filter(|n| !n.children.is_empty()).count();
let total_children: usize = self.nodes.iter().map(|n| n.children.len()).sum();
self.stats.effective_branching_factor = if nodes_with_children > 0 {
total_children as f64 / nodes_with_children as f64
} else {
0.0
};
self.stats.nodes_expanded = nodes_with_children;
ToTResult {
nodes: self.nodes,
best_path,
best_value,
terminal_nodes,
stats: self.stats,
}
}
pub fn get_node(&self, id: usize) -> Option<&ThoughtNode> {
self.nodes.get(id)
}
pub fn node_count(&self) -> usize {
self.nodes.len()
}
pub fn should_continue(&self) -> bool {
self.nodes.len() < self.config.max_nodes && self.next_to_expand().is_some()
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum PrincipleType {
Axiom,
Derived,
Assumption,
Empirical,
Definition,
Contested,
}
impl PrincipleType {
pub fn reliability_weight(&self) -> f64 {
match self {
PrincipleType::Axiom => 1.0,
PrincipleType::Definition => 0.95,
PrincipleType::Empirical => 0.80,
PrincipleType::Derived => 0.75,
PrincipleType::Assumption => 0.50,
PrincipleType::Contested => 0.30,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Principle {
pub id: usize,
pub statement: String,
pub principle_type: PrincipleType,
pub fundamentality: f64,
pub confidence: f64,
pub parent_id: Option<usize>,
pub child_ids: Vec<usize>,
pub evidence: Vec<String>,
pub challenges: Vec<String>,
pub depth: usize,
}
impl Principle {
pub fn effective_weight(&self) -> f64 {
self.fundamentality * self.confidence * self.principle_type.reliability_weight()
}
pub fn is_axiomatic(&self, threshold: f64) -> bool {
self.principle_type == PrincipleType::Axiom && self.fundamentality >= threshold
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReconstructionPath {
pub principle_chain: Vec<usize>,
pub connectives: Vec<String>,
pub confidence: f64,
pub is_complete: bool,
pub gaps: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AnalysisGap {
pub description: String,
pub severity: f64,
pub suggestion: Option<String>,
pub affected_principles: Vec<usize>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BedRockResult {
pub query: String,
pub principles: Vec<Principle>,
pub reconstructions: Vec<ReconstructionPath>,
pub gaps: Vec<AnalysisGap>,
pub insights: Vec<String>,
pub confidence: f64,
pub metadata: BedRockMetadata,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BedRockMetadata {
pub max_depth_reached: usize,
pub total_principles: usize,
pub axiom_count: usize,
pub assumption_count: usize,
pub contested_count: usize,
pub completeness: f64,
}
impl BedRockResult {
pub fn axioms(&self) -> Vec<&Principle> {
self.principles
.iter()
.filter(|p| p.principle_type == PrincipleType::Axiom)
.collect()
}
pub fn assumptions(&self) -> Vec<&Principle> {
self.principles
.iter()
.filter(|p| p.principle_type == PrincipleType::Assumption)
.collect()
}
pub fn contested(&self) -> Vec<&Principle> {
self.principles
.iter()
.filter(|p| p.principle_type == PrincipleType::Contested)
.collect()
}
pub fn at_depth(&self, depth: usize) -> Vec<&Principle> {
self.principles
.iter()
.filter(|p| p.depth == depth)
.collect()
}
pub fn is_complete(&self, threshold: f64) -> bool {
self.metadata.completeness >= threshold && self.gaps.iter().all(|g| g.severity < 0.8)
}
pub fn to_json(&self) -> serde_json::Value {
serde_json::json!({
"query": self.query,
"axioms": self.axioms().iter().map(|p| {
serde_json::json!({
"id": p.id,
"statement": p.statement,
"fundamentality": p.fundamentality,
"confidence": p.confidence,
"evidence": p.evidence
})
}).collect::<Vec<_>>(),
"assumptions": self.assumptions().iter().map(|p| {
serde_json::json!({
"id": p.id,
"statement": p.statement,
"confidence": p.confidence,
"challenges": p.challenges
})
}).collect::<Vec<_>>(),
"decomposition": self.principles.iter().map(|p| {
serde_json::json!({
"id": p.id,
"statement": p.statement,
"type": format!("{:?}", p.principle_type),
"fundamentality": p.fundamentality,
"confidence": p.confidence,
"depth": p.depth,
"parent_id": p.parent_id
})
}).collect::<Vec<_>>(),
"reconstruction": self.reconstructions.iter().map(|r| {
serde_json::json!({
"path": r.principle_chain,
"confidence": r.confidence,
"complete": r.is_complete,
"gaps": r.gaps
})
}).collect::<Vec<_>>(),
"gaps": self.gaps.iter().map(|g| {
serde_json::json!({
"description": g.description,
"severity": g.severity,
"suggestion": g.suggestion
})
}).collect::<Vec<_>>(),
"insights": self.insights,
"confidence": self.confidence,
"metadata": {
"max_depth": self.metadata.max_depth_reached,
"total_principles": self.metadata.total_principles,
"axioms": self.metadata.axiom_count,
"assumptions": self.metadata.assumption_count,
"contested": self.metadata.contested_count,
"completeness": self.metadata.completeness
}
})
}
}
pub struct BedRock {
config: ThinkToolModuleConfig,
analysis_config: BedRockConfig,
}
impl Default for BedRock {
fn default() -> Self {
Self::new()
}
}
impl BedRock {
pub fn new() -> Self {
Self {
config: ThinkToolModuleConfig {
name: "BedRock".to_string(),
version: "3.0.0".to_string(),
description: "First principles decomposition with Tree-of-Thoughts reconstruction"
.to_string(),
confidence_weight: 0.25,
},
analysis_config: BedRockConfig::default(),
}
}
pub fn with_config(analysis_config: BedRockConfig) -> Self {
Self {
config: ThinkToolModuleConfig {
name: "BedRock".to_string(),
version: "3.0.0".to_string(),
description: "First principles decomposition with Tree-of-Thoughts reconstruction"
.to_string(),
confidence_weight: 0.25,
},
analysis_config,
}
}
pub fn analysis_config(&self) -> &BedRockConfig {
&self.analysis_config
}
pub fn decompose(&self, query: &str, previous_steps: &[String]) -> BedRockResult {
let mut principles = Vec::new();
let mut next_id = 0;
let root_principle = self.create_root_principle(query, &mut next_id);
principles.push(root_principle);
self.decompose_recursive(&mut principles, 0, 0, &mut next_id);
self.incorporate_context(&mut principles, previous_steps, &mut next_id);
self.classify_principles(&mut principles);
let reconstructions = self.build_reconstructions(&principles);
let gaps = self.identify_gaps(&principles, &reconstructions);
let insights = self.extract_insights(&principles, &gaps);
let confidence = self.calculate_confidence(&principles, &gaps);
let metadata = BedRockMetadata {
max_depth_reached: principles.iter().map(|p| p.depth).max().unwrap_or(0),
total_principles: principles.len(),
axiom_count: principles
.iter()
.filter(|p| p.principle_type == PrincipleType::Axiom)
.count(),
assumption_count: principles
.iter()
.filter(|p| p.principle_type == PrincipleType::Assumption)
.count(),
contested_count: principles
.iter()
.filter(|p| p.principle_type == PrincipleType::Contested)
.count(),
completeness: self.calculate_completeness(&principles, &gaps),
};
BedRockResult {
query: query.to_string(),
principles,
reconstructions,
gaps,
insights,
confidence,
metadata,
}
}
fn create_root_principle(&self, query: &str, next_id: &mut usize) -> Principle {
let id = *next_id;
*next_id += 1;
let principle_type = self.classify_query(query);
Principle {
id,
statement: query.to_string(),
principle_type,
fundamentality: 0.0, confidence: 1.0, parent_id: None,
child_ids: Vec::new(),
evidence: Vec::new(),
challenges: Vec::new(),
depth: 0,
}
}
fn classify_query(&self, query: &str) -> PrincipleType {
let lower = query.to_lowercase();
if lower.contains("what is")
|| lower.contains("define")
|| lower.contains("meaning of")
|| lower.contains("definition")
{
return PrincipleType::Definition;
}
if lower.contains("how many")
|| lower.contains("when did")
|| lower.contains("data shows")
|| lower.contains("research")
|| lower.contains("study")
|| lower.contains("evidence")
{
return PrincipleType::Empirical;
}
if lower.contains("always true")
|| lower.contains("by definition")
|| lower.contains("necessarily")
|| lower.contains("logically")
|| lower.contains("mathematically")
{
return PrincipleType::Axiom;
}
if lower.contains("assume")
|| lower.contains("suppose")
|| lower.contains("if we")
|| lower.contains("given that")
{
return PrincipleType::Assumption;
}
if lower.contains("better")
|| lower.contains("worse")
|| lower.contains("should")
|| lower.contains("ought")
|| lower.contains("believe")
|| lower.contains("think")
{
return PrincipleType::Contested;
}
PrincipleType::Derived
}
fn decompose_recursive(
&self,
principles: &mut Vec<Principle>,
parent_idx: usize,
current_depth: usize,
next_id: &mut usize,
) {
if current_depth >= self.analysis_config.max_depth {
return;
}
if principles.len() >= self.analysis_config.max_principles {
return;
}
let parent_statement = principles[parent_idx].statement.clone();
let sub_principles = self.extract_sub_principles(&parent_statement, current_depth);
let mut child_ids = Vec::new();
for (statement, principle_type, fundamentality) in sub_principles {
if principles.len() >= self.analysis_config.max_principles {
break;
}
let id = *next_id;
*next_id += 1;
child_ids.push(id);
let confidence = self.estimate_confidence(&statement, principle_type);
let principle = Principle {
id,
statement,
principle_type,
fundamentality,
confidence,
parent_id: Some(principles[parent_idx].id),
child_ids: Vec::new(),
evidence: Vec::new(),
challenges: self.identify_challenges(principle_type),
depth: current_depth + 1,
};
let new_idx = principles.len();
principles.push(principle);
if principle_type != PrincipleType::Axiom
&& fundamentality < self.analysis_config.axiom_threshold
{
self.decompose_recursive(principles, new_idx, current_depth + 1, next_id);
}
}
principles[parent_idx].child_ids = child_ids;
}
fn extract_sub_principles(
&self,
statement: &str,
depth: usize,
) -> Vec<(String, PrincipleType, f64)> {
let mut sub_principles = Vec::new();
let lower = statement.to_lowercase();
if lower.contains("better") || lower.contains("worse") || lower.contains("more") {
sub_principles.push((
"Comparison requires a defined metric or criterion".to_string(),
PrincipleType::Definition,
0.9,
));
sub_principles.push((
"Both alternatives must be well-understood".to_string(),
PrincipleType::Assumption,
0.7,
));
}
if lower.contains("because") || lower.contains("causes") || lower.contains("leads to") {
sub_principles.push((
"Causal relationships require evidence of mechanism".to_string(),
PrincipleType::Empirical,
0.6,
));
sub_principles.push((
"Correlation does not imply causation".to_string(),
PrincipleType::Axiom,
1.0,
));
}
if lower.contains("all")
|| lower.contains("every")
|| lower.contains("none")
|| lower.contains("never")
{
sub_principles.push((
"Universal claims require exhaustive verification".to_string(),
PrincipleType::Axiom,
1.0,
));
sub_principles.push((
"A single counterexample disproves a universal claim".to_string(),
PrincipleType::Axiom,
1.0,
));
}
if lower.contains("good")
|| lower.contains("bad")
|| lower.contains("right")
|| lower.contains("wrong")
{
sub_principles.push((
"Value judgments require a defined value framework".to_string(),
PrincipleType::Definition,
0.85,
));
sub_principles.push((
"Different stakeholders may have different values".to_string(),
PrincipleType::Assumption,
0.75,
));
}
if lower.contains("will") || lower.contains("future") || lower.contains("predict") {
sub_principles.push((
"Future predictions carry inherent uncertainty".to_string(),
PrincipleType::Axiom,
1.0,
));
sub_principles.push((
"Past patterns may not continue".to_string(),
PrincipleType::Assumption,
0.6,
));
}
if sub_principles.is_empty() && depth < self.analysis_config.max_depth {
sub_principles.push((
"The claim contains implicit assumptions".to_string(),
PrincipleType::Assumption,
0.5,
));
sub_principles.push((
"Terms used may have multiple interpretations".to_string(),
PrincipleType::Definition,
0.6,
));
}
sub_principles
}
fn estimate_confidence(&self, _statement: &str, principle_type: PrincipleType) -> f64 {
match principle_type {
PrincipleType::Axiom => 0.95,
PrincipleType::Definition => 0.90,
PrincipleType::Empirical => 0.75,
PrincipleType::Derived => 0.70,
PrincipleType::Assumption => 0.55,
PrincipleType::Contested => 0.40,
}
}
fn identify_challenges(&self, principle_type: PrincipleType) -> Vec<String> {
match principle_type {
PrincipleType::Axiom => vec![],
PrincipleType::Definition => {
vec!["Alternative definitions may exist".to_string()]
}
PrincipleType::Empirical => vec![
"Data may be outdated".to_string(),
"Sample may not be representative".to_string(),
],
PrincipleType::Derived => vec![
"Derivation logic may have flaws".to_string(),
"Missing intermediate steps".to_string(),
],
PrincipleType::Assumption => vec![
"Assumption may not hold in all contexts".to_string(),
"Implicit bias may be present".to_string(),
],
PrincipleType::Contested => vec![
"Subject to debate".to_string(),
"Evidence may support opposing views".to_string(),
],
}
}
fn incorporate_context(
&self,
principles: &mut Vec<Principle>,
previous_steps: &[String],
next_id: &mut usize,
) {
for step in previous_steps {
if principles.len() >= self.analysis_config.max_principles {
break;
}
let principle_type = self.classify_query(step);
let id = *next_id;
*next_id += 1;
let principle = Principle {
id,
statement: format!("Prior context: {}", step),
principle_type,
fundamentality: 0.3, confidence: 0.7, parent_id: None,
child_ids: Vec::new(),
evidence: vec!["From previous reasoning step".to_string()],
challenges: vec!["May need re-evaluation in new context".to_string()],
depth: 0, };
principles.push(principle);
}
}
fn classify_principles(&self, principles: &mut [Principle]) {
for principle in principles.iter_mut() {
if principle.fundamentality >= self.analysis_config.axiom_threshold
&& principle.principle_type != PrincipleType::Axiom
&& principle.principle_type != PrincipleType::Contested
{
principle.principle_type = PrincipleType::Axiom;
principle.challenges.clear();
}
if principle.evidence.is_empty() && principle.principle_type == PrincipleType::Empirical
{
principle.principle_type = PrincipleType::Assumption;
principle.confidence *= 0.8;
}
}
}
fn build_reconstructions(&self, principles: &[Principle]) -> Vec<ReconstructionPath> {
let mut reconstructions = Vec::new();
let axioms: Vec<_> = principles
.iter()
.filter(|p| p.principle_type == PrincipleType::Axiom)
.collect();
for axiom in axioms {
let mut path = vec![axiom.id];
let mut connectives = Vec::new();
let mut current_id = axiom.id;
let mut gaps = Vec::new();
while let Some(principle) = principles.iter().find(|p| p.id == current_id) {
if let Some(parent_idx) = principles.iter().position(|p| {
p.child_ids.contains(¤t_id) || Some(p.id) == principle.parent_id
}) {
let parent = &principles[parent_idx];
path.push(parent.id);
connectives.push("implies".to_string());
current_id = parent.id;
} else {
break;
}
if path.len() > principles.len() {
gaps.push("Circular dependency detected".to_string());
break;
}
}
let is_complete = principles
.iter()
.any(|p| path.contains(&p.id) && p.depth == 0);
if !is_complete {
gaps.push("Path does not reach the original claim".to_string());
}
let confidence = if is_complete && gaps.is_empty() {
axiom.confidence * 0.9
} else {
axiom.confidence * 0.5
};
reconstructions.push(ReconstructionPath {
principle_chain: path,
connectives,
confidence,
is_complete,
gaps,
});
}
reconstructions
}
fn identify_gaps(
&self,
principles: &[Principle],
reconstructions: &[ReconstructionPath],
) -> Vec<AnalysisGap> {
let mut gaps = Vec::new();
if reconstructions.is_empty() {
gaps.push(AnalysisGap {
description: "No axiomatic foundation identified".to_string(),
severity: 0.9,
suggestion: Some("Decompose further to find self-evident truths".to_string()),
affected_principles: principles.iter().map(|p| p.id).collect(),
});
}
let incomplete_paths: Vec<_> = reconstructions.iter().filter(|r| !r.is_complete).collect();
if !incomplete_paths.is_empty() {
gaps.push(AnalysisGap {
description: format!(
"{} reconstruction path(s) do not reach the root claim",
incomplete_paths.len()
),
severity: 0.7,
suggestion: Some("Add intermediate principles to complete the chain".to_string()),
affected_principles: incomplete_paths
.iter()
.flat_map(|r| r.principle_chain.clone())
.collect(),
});
}
let unsupported_assumptions: Vec<_> = principles
.iter()
.filter(|p| p.principle_type == PrincipleType::Assumption && p.evidence.is_empty())
.collect();
if !unsupported_assumptions.is_empty() {
gaps.push(AnalysisGap {
description: format!(
"{} assumption(s) lack supporting evidence",
unsupported_assumptions.len()
),
severity: 0.6,
suggestion: Some("Provide evidence or acknowledge as unverified".to_string()),
affected_principles: unsupported_assumptions.iter().map(|p| p.id).collect(),
});
}
let low_confidence: Vec<_> = principles
.iter()
.filter(|p| p.confidence < self.analysis_config.min_confidence)
.collect();
if !low_confidence.is_empty() {
gaps.push(AnalysisGap {
description: format!(
"{} principle(s) have confidence below threshold",
low_confidence.len()
),
severity: 0.5,
suggestion: Some("Verify or remove low-confidence principles".to_string()),
affected_principles: low_confidence.iter().map(|p| p.id).collect(),
});
}
let unresolved_contested: Vec<_> = principles
.iter()
.filter(|p| p.principle_type == PrincipleType::Contested && !p.challenges.is_empty())
.collect();
if !unresolved_contested.is_empty() {
gaps.push(AnalysisGap {
description: format!(
"{} contested claim(s) require resolution",
unresolved_contested.len()
),
severity: 0.8,
suggestion: Some("Provide evidence to resolve contested claims".to_string()),
affected_principles: unresolved_contested.iter().map(|p| p.id).collect(),
});
}
gaps
}
fn extract_insights(&self, principles: &[Principle], gaps: &[AnalysisGap]) -> Vec<String> {
let mut insights = Vec::new();
let axiom_count = principles
.iter()
.filter(|p| p.principle_type == PrincipleType::Axiom)
.count();
if axiom_count > 0 {
insights.push(format!(
"Analysis rests on {} axiomatic foundation(s)",
axiom_count
));
} else {
insights.push(
"No self-evident axioms identified - claim relies on assumptions".to_string(),
);
}
let assumption_count = principles
.iter()
.filter(|p| p.principle_type == PrincipleType::Assumption)
.count();
if assumption_count > 0 {
insights.push(format!(
"{} hidden assumption(s) identified that could be challenged",
assumption_count
));
}
let critical_gaps: Vec<_> = gaps.iter().filter(|g| g.severity >= 0.8).collect();
if !critical_gaps.is_empty() {
insights.push(format!(
"{} critical gap(s) in reasoning require attention",
critical_gaps.len()
));
}
let max_depth = principles.iter().map(|p| p.depth).max().unwrap_or(0);
if max_depth > 0 {
insights.push(format!(
"Decomposition reached {} level(s) of depth",
max_depth
));
}
let contested_count = principles
.iter()
.filter(|p| p.principle_type == PrincipleType::Contested)
.count();
if contested_count > 0 {
insights.push(format!(
"{} contested claim(s) identified - these are debatable",
contested_count
));
}
insights
}
fn calculate_confidence(&self, principles: &[Principle], gaps: &[AnalysisGap]) -> f64 {
if principles.is_empty() {
return 0.0;
}
let principle_confidence: f64 =
principles.iter().map(|p| p.effective_weight()).sum::<f64>() / principles.len() as f64;
let gap_penalty: f64 = gaps.iter().map(|g| g.severity * 0.1).sum();
let axiom_count = principles
.iter()
.filter(|p| p.principle_type == PrincipleType::Axiom)
.count();
let axiom_bonus = (axiom_count as f64 * 0.05).min(0.2);
(principle_confidence + axiom_bonus - gap_penalty).clamp(0.0, 1.0)
}
fn calculate_completeness(&self, principles: &[Principle], gaps: &[AnalysisGap]) -> f64 {
if principles.is_empty() {
return 0.0;
}
let has_axiom = principles
.iter()
.any(|p| p.principle_type == PrincipleType::Axiom);
let has_definitions = principles
.iter()
.any(|p| p.principle_type == PrincipleType::Definition);
let assumptions_identified = principles
.iter()
.any(|p| p.principle_type == PrincipleType::Assumption);
let mut completeness = 0.0;
if has_axiom {
completeness += 0.3;
}
if has_definitions {
completeness += 0.2;
}
if assumptions_identified {
completeness += 0.2;
}
let max_depth = principles.iter().map(|p| p.depth).max().unwrap_or(0);
completeness += (max_depth as f64 * 0.1).min(0.2);
let critical_gaps = gaps.iter().filter(|g| g.severity >= 0.8).count();
completeness -= critical_gaps as f64 * 0.1;
completeness.clamp(0.0, 1.0)
}
}
impl ThinkToolModule for BedRock {
fn config(&self) -> &ThinkToolModuleConfig {
&self.config
}
fn execute(&self, context: &ThinkToolContext) -> Result<ThinkToolOutput, crate::error::Error> {
let result = self.decompose(&context.query, &context.previous_steps);
Ok(ThinkToolOutput {
module: self.config.name.clone(),
confidence: result.confidence,
output: result.to_json(),
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_bedrock_new() {
let bedrock = BedRock::new();
assert_eq!(bedrock.config().name, "BedRock");
assert_eq!(bedrock.config().version, "3.0.0");
}
#[test]
fn test_bedrock_with_config() {
let config = BedRockConfig {
max_depth: 5,
axiom_threshold: 0.9,
..Default::default()
};
let bedrock = BedRock::with_config(config);
assert_eq!(bedrock.analysis_config().max_depth, 5);
assert_eq!(bedrock.analysis_config().axiom_threshold, 0.9);
}
#[test]
fn test_principle_type_reliability() {
assert_eq!(PrincipleType::Axiom.reliability_weight(), 1.0);
assert_eq!(PrincipleType::Contested.reliability_weight(), 0.30);
assert!(
PrincipleType::Assumption.reliability_weight()
< PrincipleType::Derived.reliability_weight()
);
}
#[test]
fn test_decompose_simple_query() {
let bedrock = BedRock::new();
let result = bedrock.decompose("Electric vehicles are better than gas cars", &[]);
assert!(!result.principles.is_empty());
assert_eq!(result.query, "Electric vehicles are better than gas cars");
assert!(result.confidence > 0.0);
assert!(!result.insights.is_empty());
}
#[test]
fn test_decompose_with_comparison() {
let bedrock = BedRock::new();
let result = bedrock.decompose("Python is better than JavaScript for data science", &[]);
let has_definition = result
.principles
.iter()
.any(|p| p.principle_type == PrincipleType::Definition);
assert!(has_definition, "Should identify need for comparison metric");
}
#[test]
fn test_decompose_with_causation() {
let bedrock = BedRock::new();
let result = bedrock.decompose("Smoking causes cancer", &[]);
let has_axiom = result
.principles
.iter()
.any(|p| p.principle_type == PrincipleType::Axiom);
assert!(
has_axiom,
"Should identify axiomatic principles about causation"
);
}
#[test]
fn test_execute_trait() {
let bedrock = BedRock::new();
let context = ThinkToolContext {
query: "What is the best programming language?".into(),
previous_steps: vec!["Prior analysis: Consider use case".into()],
};
let output = bedrock.execute(&context).expect("Execution should succeed");
assert_eq!(output.module, "BedRock");
assert!(output.confidence > 0.0);
assert!(output.output.get("axioms").is_some());
assert!(output.output.get("assumptions").is_some());
assert!(output.output.get("decomposition").is_some());
assert!(output.output.get("insights").is_some());
}
#[test]
fn test_classify_query() {
let bedrock = BedRock::new();
let def_type = bedrock.classify_query("What is machine learning?");
assert_eq!(def_type, PrincipleType::Definition);
let emp_type = bedrock.classify_query("Research shows that exercise improves health");
assert_eq!(emp_type, PrincipleType::Empirical);
let contested_type = bedrock.classify_query("Rust is better than C++");
assert_eq!(contested_type, PrincipleType::Contested);
}
#[test]
fn test_result_accessors() {
let bedrock = BedRock::new();
let result = bedrock.decompose("All birds can fly", &[]);
let _axioms = result.axioms();
let _assumptions = result.assumptions();
let root_principles = result.at_depth(0);
assert!(!root_principles.is_empty());
assert!(result.metadata.completeness >= 0.0);
assert!(result.metadata.completeness <= 1.0);
}
#[test]
fn test_principle_effective_weight() {
let principle = Principle {
id: 0,
statement: "Test axiom".into(),
principle_type: PrincipleType::Axiom,
fundamentality: 1.0,
confidence: 0.95,
parent_id: None,
child_ids: vec![],
evidence: vec![],
challenges: vec![],
depth: 0,
};
let weight = principle.effective_weight();
assert_eq!(weight, 0.95); assert!(principle.is_axiomatic(0.85));
}
#[test]
fn test_gap_identification() {
let bedrock = BedRock::new();
let result = bedrock.decompose("This is a vague statement", &[]);
let _ = &result.gaps;
}
#[test]
fn test_max_principles_limit() {
let config = BedRockConfig {
max_principles: 5,
..Default::default()
};
let bedrock = BedRock::with_config(config);
let result = bedrock.decompose("Complex multi-part query about many things", &[]);
assert!(result.principles.len() <= 5);
}
#[test]
fn test_json_output_structure() {
let bedrock = BedRock::new();
let result = bedrock.decompose("Test query for JSON", &[]);
let json = result.to_json();
assert!(json.get("query").is_some());
assert!(json.get("axioms").is_some());
assert!(json.get("assumptions").is_some());
assert!(json.get("decomposition").is_some());
assert!(json.get("reconstruction").is_some());
assert!(json.get("gaps").is_some());
assert!(json.get("insights").is_some());
assert!(json.get("confidence").is_some());
assert!(json.get("metadata").is_some());
}
#[test]
fn test_exploration_strategy_use_cases() {
assert_eq!(
ExplorationStrategy::BreadthFirst.use_case(),
"Unknown solution depth, want all solutions at each level"
);
assert_eq!(
ExplorationStrategy::DepthFirst.use_case(),
"Deep solutions, good pruning heuristics available"
);
assert_eq!(
ExplorationStrategy::BestFirst.use_case(),
"Reliable value function, want fastest path to good solution"
);
assert_eq!(
ExplorationStrategy::AStar.use_case(),
"Need optimal solution, have admissible heuristic"
);
assert_eq!(
ExplorationStrategy::BeamSearch.use_case(),
"Limited compute budget, want diverse high-quality solutions"
);
}
#[test]
fn test_tot_config_defaults() {
let config = ToTConfig::default();
assert_eq!(config.strategy, ExplorationStrategy::BreadthFirst);
assert_eq!(config.branching_factor, 3);
assert_eq!(config.beam_width, 5);
assert_eq!(config.max_nodes, 100);
assert!((config.pruning_threshold - 0.3).abs() < f64::EPSILON);
assert!(config.enable_backtracking);
assert_eq!(config.max_depth, 4);
assert!(config.aggregate_paths);
assert_eq!(config.voting_samples, 0);
}
#[test]
fn test_tot_config_presets() {
let bfs = ToTConfig::bfs();
assert_eq!(bfs.strategy, ExplorationStrategy::BreadthFirst);
assert_eq!(bfs.branching_factor, 4);
assert_eq!(bfs.beam_width, 10);
assert_eq!(bfs.max_nodes, 200);
let dfs = ToTConfig::dfs();
assert_eq!(dfs.strategy, ExplorationStrategy::DepthFirst);
assert_eq!(dfs.branching_factor, 2);
assert_eq!(dfs.max_depth, 6);
assert!((dfs.pruning_threshold - 0.4).abs() < f64::EPSILON);
let beam = ToTConfig::beam(8);
assert_eq!(beam.strategy, ExplorationStrategy::BeamSearch);
assert_eq!(beam.beam_width, 8);
assert!(beam.aggregate_paths);
}
#[test]
fn test_thought_node_root() {
let root = ThoughtNode::root("What is the optimal solution?");
assert_eq!(root.id, 0);
assert_eq!(root.thought, "What is the optimal solution?");
assert!((root.value - 1.0).abs() < f64::EPSILON);
assert_eq!(root.depth, 0);
assert!(root.parent_id.is_none());
assert!(root.children.is_empty());
assert!(!root.is_terminal);
assert!(!root.is_pruned);
assert_eq!(root.path, vec![0]);
assert_eq!(root.generation_method, ThoughtGenerationMethod::Decomposed);
}
#[test]
fn test_thought_node_child() {
let root = ThoughtNode::root("Root thought");
let child = ThoughtNode::child(1, "Child thought", 0.8, &root);
assert_eq!(child.id, 1);
assert_eq!(child.thought, "Child thought");
assert!((child.value - 0.8).abs() < f64::EPSILON);
assert_eq!(child.depth, 1);
assert_eq!(child.parent_id, Some(0));
assert!(child.children.is_empty());
assert_eq!(child.path, vec![0, 1]);
}
#[test]
fn test_thought_node_pruning_threshold() {
let root = ThoughtNode::root("Root");
let high_value = ThoughtNode::child(1, "High value", 0.9, &root);
let low_value = ThoughtNode::child(2, "Low value", 0.2, &root);
assert!(!high_value.should_prune(0.3));
assert!(low_value.should_prune(0.3));
}
#[test]
fn test_tot_explorer_bfs_initialization() {
let mut explorer = ToTExplorer::bfs();
explorer.initialize("Problem: Find optimal path");
assert_eq!(explorer.node_count(), 1);
assert!(explorer.should_continue());
let root = explorer.get_node(0).unwrap();
assert_eq!(root.thought, "Problem: Find optimal path");
}
#[test]
fn test_tot_explorer_add_children() {
let mut explorer = ToTExplorer::bfs();
explorer.initialize("Root problem");
let child1 = explorer.add_child(0, "Approach A", 0.8);
let child2 = explorer.add_child(0, "Approach B", 0.7);
let child3 = explorer.add_child(0, "Approach C", 0.6);
assert_eq!(explorer.node_count(), 4);
let root = explorer.get_node(0).unwrap();
assert_eq!(root.children.len(), 3);
assert!(root.children.contains(&child1));
assert!(root.children.contains(&child2));
assert!(root.children.contains(&child3));
}
#[test]
fn test_tot_explorer_pruning() {
let config = ToTConfig {
pruning_threshold: 0.5,
..ToTConfig::default()
};
let mut explorer = ToTExplorer::new(config);
explorer.initialize("Root");
let pruned_id = explorer.add_child(0, "Low value thought", 0.2);
let pruned_node = explorer.get_node(pruned_id).unwrap();
assert!(pruned_node.is_pruned);
}
#[test]
fn test_tot_explorer_terminal_marking() {
let mut explorer = ToTExplorer::bfs();
explorer.initialize("Root");
let child1 = explorer.add_child(0, "Solution candidate", 0.9);
explorer.mark_terminal(child1);
let terminal = explorer.get_node(child1).unwrap();
assert!(terminal.is_terminal);
}
#[test]
fn test_tot_explorer_bfs_order() {
let mut explorer = ToTExplorer::bfs();
explorer.initialize("Root");
let child1 = explorer.add_child(0, "Level 1 - A", 0.8);
let child2 = explorer.add_child(0, "Level 1 - B", 0.7);
let next = explorer.next_to_expand();
assert!(next == Some(child1) || next == Some(child2));
}
#[test]
fn test_tot_explorer_dfs_order() {
let mut explorer = ToTExplorer::dfs();
explorer.initialize("Root");
let child1 = explorer.add_child(0, "Level 1 - A", 0.8);
let _child2 = explorer.add_child(0, "Level 1 - B", 0.7);
let grandchild = explorer.add_child(child1, "Level 2 - A", 0.75);
let next = explorer.next_to_expand();
assert_eq!(next, Some(grandchild));
}
#[test]
fn test_tot_explorer_beam_frontier() {
let mut explorer = ToTExplorer::beam(2);
explorer.initialize("Root");
explorer.add_child(0, "A", 0.9);
explorer.add_child(0, "B", 0.6);
explorer.add_child(0, "C", 0.8);
explorer.add_child(0, "D", 0.5);
let frontier = explorer.get_beam_frontier();
assert_eq!(frontier.len(), 2);
}
#[test]
fn test_tot_explorer_finish_results() {
let mut explorer = ToTExplorer::bfs();
explorer.initialize("Problem: 2+2");
let child1 = explorer.add_child(0, "Try addition", 0.8);
let solution = explorer.add_child(child1, "Answer: 4", 0.95);
explorer.mark_terminal(solution);
let result = explorer.finish();
assert_eq!(result.terminal_nodes.len(), 1);
assert!(result.terminal_nodes.contains(&solution));
assert_eq!(result.best_path, vec![0, child1, solution]);
assert!((result.best_value - 0.95).abs() < f64::EPSILON);
assert!(result.stats.nodes_created >= 3);
assert_eq!(result.stats.solutions_found, 1);
}
#[test]
fn test_tot_result_best_path_thoughts() {
let mut explorer = ToTExplorer::bfs();
explorer.initialize("Root thought");
let child = explorer.add_child(0, "Middle thought", 0.8);
let solution = explorer.add_child(child, "Final thought", 0.9);
explorer.mark_terminal(solution);
let result = explorer.finish();
let thoughts = result.best_path_thoughts();
assert_eq!(thoughts.len(), 3);
assert_eq!(thoughts[0], "Root thought");
assert_eq!(thoughts[1], "Middle thought");
assert_eq!(thoughts[2], "Final thought");
}
#[test]
fn test_tot_result_all_solutions() {
let mut explorer = ToTExplorer::bfs();
explorer.initialize("Root");
let path1 = explorer.add_child(0, "Path 1", 0.7);
let sol1 = explorer.add_child(path1, "Solution 1", 0.8);
explorer.mark_terminal(sol1);
let path2 = explorer.add_child(0, "Path 2", 0.8);
let sol2 = explorer.add_child(path2, "Solution 2", 0.95);
explorer.mark_terminal(sol2);
let result = explorer.finish();
let solutions = result.all_solutions();
assert_eq!(solutions.len(), 2);
assert!((solutions[0].1 - 0.95).abs() < f64::EPSILON);
assert!((solutions[1].1 - 0.8).abs() < f64::EPSILON);
}
#[test]
fn test_tot_stats_effective_branching_factor() {
let mut explorer = ToTExplorer::bfs();
explorer.initialize("Root");
explorer.add_child(0, "A", 0.8);
explorer.add_child(0, "B", 0.7);
explorer.add_child(0, "C", 0.6);
let result = explorer.finish();
assert!((result.stats.effective_branching_factor - 3.0).abs() < f64::EPSILON);
}
#[test]
fn test_tot_max_depth_limit() {
let config = ToTConfig {
max_depth: 2,
..ToTConfig::default()
};
let mut explorer = ToTExplorer::new(config);
explorer.initialize("Root");
let child1 = explorer.add_child(0, "Depth 1", 0.8);
let child2 = explorer.add_child(child1, "Depth 2", 0.8);
let _ = explorer.add_child(child2, "Depth 3", 0.8);
let next = explorer.next_to_expand();
if let Some(id) = next {
let node = explorer.get_node(id).unwrap();
assert!(node.depth < 2);
}
}
#[test]
fn test_tot_serialization() {
let config = ToTConfig::beam(3);
let json = serde_json::to_string(&config).unwrap();
let deserialized: ToTConfig = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.beam_width, 3);
assert_eq!(deserialized.strategy, ExplorationStrategy::BeamSearch);
}
#[test]
fn test_thought_generation_method_default() {
assert_eq!(
ThoughtGenerationMethod::default(),
ThoughtGenerationMethod::Sampled
);
}
#[test]
fn test_tot_explorer_should_continue_max_nodes() {
let config = ToTConfig {
max_nodes: 3,
..ToTConfig::default()
};
let mut explorer = ToTExplorer::new(config);
explorer.initialize("Root");
explorer.add_child(0, "A", 0.8);
explorer.add_child(0, "B", 0.7);
assert!(!explorer.should_continue());
}
}