Skip to main content

harn_vm/
lib.rs

1#![allow(clippy::result_large_err, clippy::cloned_ref_to_slice_refs)]
2
3pub mod a2a;
4pub mod agent_events;
5pub mod agent_sessions;
6pub mod bridge;
7pub mod checkpoint;
8mod chunk;
9mod compiler;
10pub mod connectors;
11pub mod event_log;
12pub mod events;
13mod http;
14pub mod jsonrpc;
15pub mod llm;
16pub mod llm_config;
17pub mod mcp;
18pub mod mcp_card;
19pub mod mcp_registry;
20pub mod mcp_server;
21pub mod metadata;
22pub mod observability;
23pub mod orchestration;
24pub mod record_filter;
25pub mod runtime_paths;
26pub mod schema;
27pub mod secrets;
28pub mod skills;
29pub mod stdlib;
30pub mod stdlib_modules;
31pub mod store;
32pub mod tool_annotations;
33pub mod tracing;
34pub mod triggers;
35pub mod trust_graph;
36pub mod value;
37pub mod visible_text;
38mod vm;
39pub mod waitpoints;
40pub mod workspace_path;
41
42pub use checkpoint::register_checkpoint_builtins;
43pub use chunk::*;
44pub use compiler::*;
45pub use connectors::{
46    active_connector_client, clear_active_connector_clients,
47    cron::{CatchupMode, CronConnector},
48    harn_module::{
49        load_contract as load_harn_connector_contract, HarnConnector, HarnConnectorContract,
50    },
51    hmac::verify_hmac_signed,
52    install_active_connector_clients, load_pending_webhook_handshakes,
53    postprocess_normalized_event, ActivationHandle, ClientError, Connector, ConnectorClient,
54    ConnectorCtx, ConnectorError, ConnectorMetricsSnapshot, ConnectorRegistry,
55    GenericWebhookConnector, GitHubConnector, LinearConnector, MetricsRegistry, NotionConnector,
56    PersistedNotionWebhookHandshake, PostNormalizeOutcome, ProviderPayloadSchema, RateLimitConfig,
57    RateLimiterFactory, RawInbound, SlackConnector, TriggerBinding, TriggerKind, TriggerRegistry,
58    WebhookSignatureVariant,
59};
60pub use http::{register_http_builtins, reset_http_state};
61pub use llm::register_llm_builtins;
62pub use llm::trigger_predicate::TriggerPredicateBudget;
63pub use mcp::{
64    connect_mcp_server, connect_mcp_server_from_json, connect_mcp_server_from_spec,
65    register_mcp_builtins,
66};
67pub use mcp_card::{fetch_server_card, load_server_card_from_path, CardError};
68pub use mcp_registry::{
69    active_handle as mcp_active_handle, ensure_active as mcp_ensure_active,
70    get_registration as mcp_get_registration, install_active as mcp_install_active,
71    is_registered as mcp_is_registered, register_servers as mcp_register_servers,
72    release as mcp_release, reset as mcp_reset_registry, snapshot_status as mcp_snapshot_status,
73    sweep_expired as mcp_sweep_expired, RegisteredMcpServer, RegistryStatus,
74};
75pub use mcp_server::{
76    take_mcp_serve_prompts, take_mcp_serve_registry, take_mcp_serve_resource_templates,
77    take_mcp_serve_resources, tool_registry_to_mcp_tools, McpServer,
78};
79pub use metadata::{register_metadata_builtins, register_scan_builtins};
80pub use record_filter::{normalize_record_filter_expression, CompiledRecordFilter};
81pub use schema::json_to_vm_value;
82pub use stdlib::hitl::{
83    append_hitl_response, HitlHostResponse, HITL_APPROVALS_TOPIC, HITL_DUAL_CONTROL_TOPIC,
84    HITL_ESCALATIONS_TOPIC, HITL_QUESTIONS_TOPIC,
85};
86pub use stdlib::host::{clear_host_call_bridge, set_host_call_bridge, HostCallBridge};
87pub use stdlib::secret_scan::{
88    append_secret_scan_audit, audit_secret_scan_active, scan_content as secret_scan_content,
89    SecretFinding, SECRET_SCAN_AUDIT_TOPIC,
90};
91pub use stdlib::template::{
92    lookup_prompt_consumers, lookup_prompt_span, prompt_render_indices, record_prompt_render_index,
93    PromptSourceSpan, PromptSpanKind,
94};
95pub use stdlib::waitpoint::{
96    process_waitpoint_resume_event, service_waitpoints_once, WAITPOINT_RESUME_TOPIC,
97};
98pub use stdlib::workflow_messages::{
99    workflow_pause_for_base, workflow_publish_query_for_base, workflow_query_for_base,
100    workflow_respond_update_for_base, workflow_resume_for_base, workflow_signal_for_base,
101    workflow_update_for_base, WorkflowMailboxState,
102};
103pub use stdlib::{
104    register_agent_stdlib, register_core_stdlib, register_io_stdlib, register_vm_stdlib,
105};
106pub use store::register_store_builtins;
107pub use triggers::{
108    append_dispatch_cancel_request, begin_in_flight, binding_version_as_of, clear_dispatcher_state,
109    clear_trigger_registry, drain, dynamic_deregister, dynamic_register, finish_in_flight,
110    install_manifest_triggers, parse_flow_control_duration, pin_trigger_binding, provider_metadata,
111    redact_headers, register_provider_schema, registered_provider_metadata,
112    registered_provider_schema_names, reset_provider_catalog, resolve_live_or_as_of,
113    resolve_live_trigger_binding, resolve_trigger_binding_as_of, run_trigger_harness_fixture,
114    snapshot_dispatcher_stats, snapshot_trigger_bindings, unpin_trigger_binding,
115    worker_claims_topic_name, worker_job_topic_name, worker_response_topic_name, ClaimedWorkerJob,
116    DispatchCancelRequest, DispatchError, DispatchOutcome, DispatchStatus, Dispatcher,
117    DispatcherDrainReport, DispatcherStatsSnapshot, HeaderRedactionPolicy, InboxIndex,
118    NotionPolledChangeEvent, ProviderCatalog, ProviderCatalogError, ProviderId, ProviderMetadata,
119    ProviderOutboundMethod, ProviderPayload, ProviderRuntimeMetadata, ProviderSchema,
120    ProviderSecretRequirement, RecordedTriggerBinding, RetryPolicy, SignatureStatus,
121    SignatureVerificationMetadata, StreamEventPayload, TenantId, TraceId, TriggerBatchConfig,
122    TriggerBindingSnapshot, TriggerBindingSource, TriggerBindingSpec, TriggerConcurrencyConfig,
123    TriggerDebounceConfig, TriggerDispatchOutcome, TriggerEvent, TriggerEventId,
124    TriggerExpressionSpec, TriggerFlowControlConfig, TriggerHandlerSpec, TriggerHarnessResult,
125    TriggerId, TriggerMetricsSnapshot, TriggerPredicateSpec, TriggerPriorityOrderConfig,
126    TriggerRateLimitConfig, TriggerRegistryError, TriggerRetryConfig, TriggerSingletonConfig,
127    TriggerState, TriggerThrottleConfig, WorkerQueue, WorkerQueueClaimHandle,
128    WorkerQueueEnqueueReceipt, WorkerQueueJob, WorkerQueueJobState, WorkerQueuePriority,
129    WorkerQueueResponseRecord, WorkerQueueState, WorkerQueueSummary, DEFAULT_INBOX_RETENTION_DAYS,
130    TRIGGERS_LIFECYCLE_TOPIC, TRIGGER_ATTEMPTS_TOPIC, TRIGGER_CANCEL_REQUESTS_TOPIC,
131    TRIGGER_DLQ_TOPIC, TRIGGER_INBOX_CLAIMS_TOPIC, TRIGGER_INBOX_ENVELOPES_TOPIC,
132    TRIGGER_INBOX_LEGACY_TOPIC, TRIGGER_OPERATION_AUDIT_TOPIC, TRIGGER_OUTBOX_TOPIC,
133    TRIGGER_TEST_FIXTURES, WORKER_QUEUE_CATALOG_TOPIC,
134};
135pub use trust_graph::{
136    append_active_trust_record, append_trust_record, group_trust_records_by_trace,
137    query_trust_records, resolve_agent_autonomy_tier, summarize_trust_records, topic_for_agent,
138    AutonomyTier, TrustAgentSummary, TrustOutcome, TrustQueryFilters, TrustRecord, TrustTraceGroup,
139    OPENTRUSTGRAPH_SCHEMA_V0, TRUST_GRAPH_GLOBAL_TOPIC, TRUST_GRAPH_TOPIC_PREFIX,
140};
141pub use value::*;
142pub use vm::*;
143
144/// Lex, parse, type-check, and compile source to bytecode in one call.
145/// Bails on the first type error. For callers that need diagnostics
146/// rather than early exit, use `harn_parser::check_source` directly
147/// and then call `Compiler::new().compile(&program)`.
148pub fn compile_source(source: &str) -> Result<Chunk, String> {
149    let program = harn_parser::check_source_strict(source).map_err(|e| e.to_string())?;
150    Compiler::new().compile(&program).map_err(|e| e.to_string())
151}
152
153pub fn json_schema_for_type_expr(type_expr: &harn_parser::TypeExpr) -> Option<serde_json::Value> {
154    let schema = compiler::Compiler::type_expr_to_schema_value(type_expr)?;
155    let json_schema = schema::schema_to_json_schema_value(&schema).ok()?;
156    Some(llm::vm_value_to_json(&json_schema))
157}
158
159pub fn json_schema_for_typed_params(params: &[harn_parser::TypedParam]) -> serde_json::Value {
160    let mut properties = serde_json::Map::new();
161    let mut required = Vec::new();
162
163    for param in params {
164        let param_schema = param
165            .type_expr
166            .as_ref()
167            .and_then(json_schema_for_type_expr)
168            .unwrap_or_else(|| serde_json::json!({}));
169        if param.default_value.is_none() {
170            required.push(serde_json::Value::String(param.name.clone()));
171        }
172        properties.insert(param.name.clone(), param_schema);
173    }
174
175    let mut schema = serde_json::Map::new();
176    schema.insert(
177        "type".to_string(),
178        serde_json::Value::String("object".to_string()),
179    );
180    schema.insert(
181        "properties".to_string(),
182        serde_json::Value::Object(properties),
183    );
184    if !required.is_empty() {
185        schema.insert("required".to_string(), serde_json::Value::Array(required));
186    }
187    serde_json::Value::Object(schema)
188}
189
190/// Reset all thread-local state that can leak between test runs.
191pub fn reset_thread_local_state() {
192    llm::reset_llm_state();
193    llm_config::clear_user_overrides();
194    http::reset_http_state();
195    event_log::reset_active_event_log();
196    stdlib::reset_stdlib_state();
197    connectors::clear_active_connector_clients();
198    orchestration::clear_runtime_hooks();
199    triggers::clear_dispatcher_state();
200    triggers::clear_trigger_registry();
201    events::reset_event_sinks();
202    agent_events::reset_all_sinks();
203    agent_sessions::reset_session_store();
204    mcp_registry::reset();
205}