mod adapter;
mod auth;
mod event;
mod no_streaming;
mod scheduled_recovery;
mod stream_recovery;
#[cfg(feature = "sqs")]
mod durable;
#[cfg(test)]
mod builder_tests;
#[cfg(test)]
mod scheduled_recovery_tests;
#[cfg(test)]
mod stream_recovery_tests;
pub use adapter::{axum_to_lambda_response, lambda_to_axum_request};
pub use auth::{AuthorizerMapping, LambdaAuthorizerMiddleware};
pub use event::{LambdaEvent, classify_event};
pub use no_streaming::NoStreamingLayer;
pub use scheduled_recovery::{
LambdaScheduledRecoveryConfig, LambdaScheduledRecoveryHandler, LambdaScheduledRecoveryResponse,
};
pub use stream_recovery::LambdaStreamRecoveryHandler;
#[cfg(feature = "sqs")]
pub use durable::{SqsDurableExecutorQueue, drive_sqs_batch};
use std::sync::Arc;
use turul_a2a::error::A2aError;
use turul_a2a::executor::AgentExecutor;
use turul_a2a::middleware::{A2aMiddleware, MiddlewareStack};
use turul_a2a::push::A2aPushDeliveryStore;
use turul_a2a::router::{AppState, build_router};
use turul_a2a::server::RuntimeConfig;
use turul_a2a::storage::{
A2aAtomicStore, A2aCancellationSupervisor, A2aEventStore, A2aPushNotificationStorage,
A2aTaskStorage,
};
use turul_a2a::streaming::TaskEventBroker;
pub struct LambdaA2aServerBuilder {
executor: Option<Arc<dyn AgentExecutor>>,
task_storage: Option<Arc<dyn A2aTaskStorage>>,
push_storage: Option<Arc<dyn A2aPushNotificationStorage>>,
event_store: Option<Arc<dyn A2aEventStore>>,
atomic_store: Option<Arc<dyn A2aAtomicStore>>,
cancellation_supervisor: Option<Arc<dyn A2aCancellationSupervisor>>,
push_delivery_store: Option<Arc<dyn A2aPushDeliveryStore>>,
middleware: Vec<Arc<dyn A2aMiddleware>>,
runtime_config: RuntimeConfig,
durable_executor_queue: Option<Arc<dyn turul_a2a::durable_executor::DurableExecutorQueue>>,
path_prefix: Option<String>,
}
impl LambdaA2aServerBuilder {
pub fn new() -> Self {
Self {
executor: None,
task_storage: None,
push_storage: None,
event_store: None,
atomic_store: None,
cancellation_supervisor: None,
push_delivery_store: None,
middleware: vec![],
runtime_config: RuntimeConfig::default(),
durable_executor_queue: None,
path_prefix: None,
}
}
pub fn strip_path_prefix(mut self, prefix: impl Into<String>) -> Self {
self.path_prefix = Some(prefix.into());
self
}
pub fn with_durable_executor(
mut self,
queue: Arc<dyn turul_a2a::durable_executor::DurableExecutorQueue>,
) -> Self {
self.durable_executor_queue = Some(queue);
self.runtime_config.supports_return_immediately = true;
self
}
#[cfg(feature = "sqs")]
pub fn with_sqs_return_immediately(
self,
queue_url: impl Into<String>,
sqs_client: Arc<aws_sdk_sqs::Client>,
) -> Self {
let queue = Arc::new(SqsDurableExecutorQueue::new(queue_url, sqs_client));
self.with_durable_executor(queue)
}
pub fn executor(mut self, exec: impl AgentExecutor + 'static) -> Self {
self.executor = Some(Arc::new(exec));
self
}
pub fn storage<S>(mut self, storage: S) -> Self
where
S: A2aTaskStorage
+ A2aPushNotificationStorage
+ A2aEventStore
+ A2aAtomicStore
+ A2aCancellationSupervisor
+ Clone
+ 'static,
{
self.task_storage = Some(Arc::new(storage.clone()));
self.push_storage = Some(Arc::new(storage.clone()));
self.event_store = Some(Arc::new(storage.clone()));
self.atomic_store = Some(Arc::new(storage.clone()));
self.cancellation_supervisor = Some(Arc::new(storage));
self
}
pub fn task_storage(mut self, s: impl A2aTaskStorage + 'static) -> Self {
self.task_storage = Some(Arc::new(s));
self
}
pub fn push_storage(mut self, s: impl A2aPushNotificationStorage + 'static) -> Self {
self.push_storage = Some(Arc::new(s));
self
}
pub fn event_store(mut self, s: impl A2aEventStore + 'static) -> Self {
self.event_store = Some(Arc::new(s));
self
}
pub fn atomic_store(mut self, s: impl A2aAtomicStore + 'static) -> Self {
self.atomic_store = Some(Arc::new(s));
self
}
pub fn cancellation_supervisor(mut self, s: impl A2aCancellationSupervisor + 'static) -> Self {
self.cancellation_supervisor = Some(Arc::new(s));
self
}
pub fn push_delivery_store(mut self, store: impl A2aPushDeliveryStore + 'static) -> Self {
self.push_delivery_store = Some(Arc::new(store));
self
}
pub fn runtime_config(mut self, cfg: RuntimeConfig) -> Self {
self.runtime_config = cfg;
self
}
pub fn middleware(mut self, mw: Arc<dyn A2aMiddleware>) -> Self {
self.middleware.push(mw);
self
}
pub fn build(self) -> Result<LambdaA2aHandler, A2aError> {
let executor = self
.executor
.ok_or(A2aError::Internal("executor is required".into()))?;
let task_storage = self.task_storage.ok_or(A2aError::Internal(
"task_storage is required for Lambda".into(),
))?;
let push_storage = self.push_storage.ok_or(A2aError::Internal(
"push_storage is required for Lambda".into(),
))?;
let event_store: Arc<dyn A2aEventStore> = self.event_store.ok_or(A2aError::Internal(
"event_store is required for Lambda (use .storage() for unified backend)".into(),
))?;
let atomic_store: Arc<dyn A2aAtomicStore> = self.atomic_store.ok_or(A2aError::Internal(
"atomic_store is required for Lambda (use .storage() for unified backend)".into(),
))?;
let cancellation_supervisor: Arc<dyn A2aCancellationSupervisor> =
self.cancellation_supervisor.ok_or(A2aError::Internal(
"cancellation_supervisor is required for Lambda. Use .storage() for unified \
backend wiring, or .cancellation_supervisor(...) alongside the individual \
storage setters. Omitting it silently breaks cross-instance cancellation."
.into(),
))?;
let task_backend = task_storage.backend_name();
let push_backend = push_storage.backend_name();
let event_backend = event_store.backend_name();
let atomic_backend = atomic_store.backend_name();
let supervisor_backend = cancellation_supervisor.backend_name();
if task_backend != push_backend
|| task_backend != event_backend
|| task_backend != atomic_backend
|| task_backend != supervisor_backend
{
return Err(A2aError::Internal(format!(
"Storage backend mismatch: task={task_backend}, push={push_backend}, \
event={event_backend}, atomic={atomic_backend}, \
cancellation_supervisor={supervisor_backend}. \
requires all storage traits to share the same backend. \
requires the cancellation supervisor on the same backend \
so cross-instance cancel markers are observable. \
Use .storage() for unified backend."
)));
}
let push_delivery_store = self.push_delivery_store;
match (
push_delivery_store.is_some(),
atomic_store.push_dispatch_enabled(),
) {
(true, true) | (false, false) => {}
(true, false) => {
return Err(A2aError::Internal(
"push_delivery_store wired but atomic_store.push_dispatch_enabled() \
is false. Call .with_push_dispatch_enabled(true) on the backend \
storage before passing it to .storage()."
.into(),
));
}
(false, true) => {
return Err(A2aError::Internal(
"atomic_store.push_dispatch_enabled() is true but no \
push_delivery_store is wired. Pending-dispatch markers would be \
written with no consumer, imposing load-bearing infra for no \
benefit. If you need to populate markers for an external \
consumer, open an issue for a distinctly-named opt-in — for now, \
this configuration is rejected."
.into(),
));
}
}
let mut runtime_config = self.runtime_config;
if self.durable_executor_queue.is_none() {
runtime_config.supports_return_immediately = false;
}
let push_dispatcher: Option<Arc<turul_a2a::push::PushDispatcher>> =
if let Some(delivery) = push_delivery_store.as_ref() {
let push_delivery_backend = delivery.backend_name();
if task_backend != push_delivery_backend {
return Err(A2aError::Internal(format!(
"Storage backend mismatch: task={task_backend}, \
push_delivery={push_delivery_backend}. \
requires all storage traits to share the same backend."
)));
}
let retry_horizon = runtime_config
.push_backoff_cap
.saturating_mul(runtime_config.push_max_attempts as u32);
if runtime_config.push_claim_expiry <= retry_horizon {
return Err(A2aError::Internal(format!(
"push_claim_expiry ({:?}) must be greater than retry horizon \
(push_max_attempts={} * push_backoff_cap={:?} = {:?}). \
Raise push_claim_expiry or lower push_max_attempts/push_backoff_cap.",
runtime_config.push_claim_expiry,
runtime_config.push_max_attempts,
runtime_config.push_backoff_cap,
retry_horizon
)));
}
let mut delivery_cfg = turul_a2a::push::delivery::PushDeliveryConfig::default();
delivery_cfg.max_attempts = runtime_config.push_max_attempts as u32;
delivery_cfg.backoff_base = runtime_config.push_backoff_base;
delivery_cfg.backoff_cap = runtime_config.push_backoff_cap;
delivery_cfg.backoff_jitter = runtime_config.push_backoff_jitter;
delivery_cfg.request_timeout = runtime_config.push_request_timeout;
delivery_cfg.connect_timeout = runtime_config.push_connect_timeout;
delivery_cfg.read_timeout = runtime_config.push_read_timeout;
delivery_cfg.claim_expiry = runtime_config.push_claim_expiry;
delivery_cfg.max_payload_bytes = runtime_config.push_max_payload_bytes;
delivery_cfg.allow_insecure_urls = runtime_config.allow_insecure_push_urls;
let instance_id = format!("a2a-lambda-{}", uuid::Uuid::now_v7());
let worker = turul_a2a::push::delivery::PushDeliveryWorker::new(
delivery.clone(),
delivery_cfg,
None,
instance_id,
)
.map_err(|e| A2aError::Internal(format!("push worker build failed: {e}")))?;
Some(Arc::new(turul_a2a::push::PushDispatcher::new(
Arc::new(worker),
push_storage.clone(),
task_storage.clone(),
)))
} else {
None
};
let state = AppState {
executor,
task_storage,
push_storage,
event_store,
atomic_store,
event_broker: TaskEventBroker::new(),
middleware_stack: Arc::new(MiddlewareStack::new(self.middleware)),
runtime_config,
in_flight: Arc::new(turul_a2a::server::in_flight::InFlightRegistry::new()),
cancellation_supervisor,
push_delivery_store,
push_dispatcher,
durable_executor_queue: self.durable_executor_queue,
};
let router = build_router(state.clone());
let path_prefix = match self.path_prefix {
None => None,
Some(p) if p.is_empty() || p == "/" => None,
Some(p) => {
if !p.starts_with('/') {
return Err(A2aError::InvalidRequest {
message: format!(
"LambdaA2aServerBuilder::strip_path_prefix: prefix must start with '/'; got {p:?}"
),
});
}
if p.ends_with('/') {
return Err(A2aError::InvalidRequest {
message: format!(
"LambdaA2aServerBuilder::strip_path_prefix: prefix must not end with '/' (except '/'); got {p:?}"
),
});
}
Some(Arc::from(p))
}
};
Ok(LambdaA2aHandler {
router,
state,
path_prefix,
})
}
}
impl Default for LambdaA2aServerBuilder {
fn default() -> Self {
Self::new()
}
}
#[derive(Clone)]
pub struct LambdaA2aHandler {
router: axum::Router,
#[cfg_attr(not(feature = "sqs"), allow(dead_code))]
state: AppState,
path_prefix: Option<Arc<str>>,
}
impl LambdaA2aHandler {
pub fn builder() -> LambdaA2aServerBuilder {
LambdaA2aServerBuilder::new()
}
pub async fn handle(
&self,
event: lambda_http::Request,
) -> Result<lambda_http::Response<lambda_http::Body>, lambda_http::Error> {
let axum_req = lambda_to_axum_request(event)?;
let axum_req = match &self.path_prefix {
Some(prefix) => adapter::strip_request_path_prefix(axum_req, prefix),
None => axum_req,
};
let axum_resp = tower::ServiceExt::oneshot(self.router.clone(), axum_req)
.await
.map_err(|e| lambda_http::Error::from(format!("Router error: {e}")))?;
axum_to_lambda_response(axum_resp).await
}
pub async fn handle_http_event_value(
&self,
value: serde_json::Value,
) -> Result<serde_json::Value, lambda_runtime::Error> {
use base64::Engine;
use http_body_util::BodyExt;
let lambda_req: lambda_http::request::LambdaRequest = serde_json::from_value(value)
.map_err(|e| lambda_runtime::Error::from(format!("invalid HTTP event: {e}")))?;
let shape = match &lambda_req {
lambda_http::request::LambdaRequest::ApiGatewayV1(_) => HttpEventShape::ApiGatewayV1,
lambda_http::request::LambdaRequest::ApiGatewayV2(_) => HttpEventShape::ApiGatewayV2,
lambda_http::request::LambdaRequest::Alb(_) => HttpEventShape::Alb,
lambda_http::request::LambdaRequest::WebSocket(_) => {
return Err(lambda_runtime::Error::from(
"WebSocket Lambda events are not supported by this adapter",
));
}
_ => {
return Err(lambda_runtime::Error::from(
"unsupported Lambda HTTP event variant",
));
}
};
let req: lambda_http::Request = lambda_req.into();
let resp = self
.handle(req)
.await
.map_err(|e| lambda_runtime::Error::from(format!("handler error: {e}")))?;
let (parts, body) = resp.into_parts();
let bytes = body
.collect()
.await
.map_err(|e| lambda_runtime::Error::from(format!("body collect: {e}")))?
.to_bytes();
let ct = parts
.headers
.get(http::header::CONTENT_TYPE)
.and_then(|v| v.to_str().ok())
.unwrap_or("application/octet-stream")
.to_ascii_lowercase();
let is_text = ct.starts_with("text/")
|| ct.starts_with("application/json")
|| ct.starts_with("application/xml")
|| ct.starts_with("application/javascript")
|| ct.contains("charset=");
let (body_str, is_base64) = if bytes.is_empty() {
(None, false)
} else if is_text {
match std::str::from_utf8(&bytes) {
Ok(s) => (Some(s.to_string()), false),
Err(_) => (
Some(base64::engine::general_purpose::STANDARD.encode(&bytes)),
true,
),
}
} else {
(
Some(base64::engine::general_purpose::STANDARD.encode(&bytes)),
true,
)
};
let status = parts.status.as_u16() as i64;
let headers = parts.headers;
let body_lambda = body_str.map(aws_lambda_events::encodings::Body::Text);
match shape {
HttpEventShape::ApiGatewayV1 => {
let mut api_resp = aws_lambda_events::apigw::ApiGatewayProxyResponse::default();
api_resp.status_code = status;
api_resp.headers = headers;
api_resp.body = body_lambda;
api_resp.is_base64_encoded = is_base64;
serde_json::to_value(api_resp).map_err(|e| {
lambda_runtime::Error::from(format!("serialise APIGW v1 response: {e}"))
})
}
HttpEventShape::ApiGatewayV2 => {
let mut api_resp = aws_lambda_events::apigw::ApiGatewayV2httpResponse::default();
api_resp.status_code = status;
api_resp.headers = headers;
api_resp.body = body_lambda;
api_resp.is_base64_encoded = is_base64;
serde_json::to_value(api_resp).map_err(|e| {
lambda_runtime::Error::from(format!("serialise APIGW v2 response: {e}"))
})
}
HttpEventShape::Alb => {
let mut api_resp = aws_lambda_events::alb::AlbTargetGroupResponse::default();
api_resp.status_code = status;
api_resp.headers = headers;
api_resp.body = body_lambda;
api_resp.is_base64_encoded = is_base64;
serde_json::to_value(api_resp).map_err(|e| {
lambda_runtime::Error::from(format!("serialise ALB response: {e}"))
})
}
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum HttpEventShape {
ApiGatewayV1,
ApiGatewayV2,
Alb,
}
impl LambdaA2aHandler {
#[cfg(feature = "sqs")]
pub async fn handle_sqs(
&self,
event: aws_lambda_events::event::sqs::SqsEvent,
) -> aws_lambda_events::event::sqs::SqsBatchResponse {
durable::drive_sqs_batch(&self.state, event).await
}
pub async fn run_http_only(self) -> Result<(), lambda_runtime::Error> {
let handler = Arc::new(self);
lambda_http::run(lambda_http::service_fn(move |event| {
let h = Arc::clone(&handler);
async move { h.handle(event).await }
}))
.await
}
}
#[cfg(not(feature = "sqs"))]
impl LambdaA2aHandler {
pub async fn run(self) -> Result<(), lambda_runtime::Error> {
self.run_http_only().await
}
}
impl LambdaA2aServerBuilder {
pub async fn run(self) -> Result<(), lambda_runtime::Error> {
let handler = self.build().map_err(|e| {
lambda_runtime::Error::from(format!("LambdaA2aServerBuilder::run: {e}"))
})?;
handler.run().await
}
}
#[cfg(test)]
mod handle_http_event_value_shape_tests {
use super::*;
use std::sync::Arc;
use turul_a2a::executor::{AgentExecutor, ExecutionContext};
use turul_a2a::server::RuntimeConfig;
use turul_a2a::server::in_flight::InFlightRegistry;
use turul_a2a::storage::InMemoryA2aStorage;
use turul_a2a::streaming::TaskEventBroker;
use turul_a2a_types::{Message, Task};
struct NoOpExecutor;
#[async_trait::async_trait]
impl AgentExecutor for NoOpExecutor {
async fn execute(
&self,
task: &mut Task,
_msg: &Message,
_ctx: &ExecutionContext,
) -> Result<(), turul_a2a::error::A2aError> {
let mut proto = task.as_proto().clone();
proto.status = Some(turul_a2a_proto::TaskStatus {
state: turul_a2a_proto::TaskState::Completed.into(),
message: None,
timestamp: None,
});
*task = Task::try_from(proto).unwrap();
Ok(())
}
fn agent_card(&self) -> turul_a2a_proto::AgentCard {
turul_a2a_proto::AgentCard {
name: "test-agent".to_string(),
..Default::default()
}
}
}
fn build_handler() -> LambdaA2aHandler {
let s = Arc::new(InMemoryA2aStorage::new());
let state = AppState {
executor: Arc::new(NoOpExecutor),
task_storage: s.clone(),
push_storage: s.clone(),
event_store: s.clone(),
atomic_store: s.clone(),
event_broker: TaskEventBroker::new(),
middleware_stack: Arc::new(MiddlewareStack::new(vec![])),
runtime_config: RuntimeConfig::default(),
in_flight: Arc::new(InFlightRegistry::new()),
cancellation_supervisor: s.clone(),
push_delivery_store: None,
push_dispatcher: None,
durable_executor_queue: None,
};
let router = build_router(state.clone());
LambdaA2aHandler {
router,
state,
path_prefix: None,
}
}
fn apigw_v2_agent_card_get() -> serde_json::Value {
serde_json::json!({
"version": "2.0",
"routeKey": "GET /.well-known/agent-card.json",
"rawPath": "/.well-known/agent-card.json",
"rawQueryString": "",
"headers": {"accept": "application/json", "a2a-version": "1.0"},
"requestContext": {
"accountId": "000000000000",
"apiId": "api",
"domainName": "fake.execute-api",
"domainPrefix": "fake",
"http": {
"method": "GET",
"path": "/.well-known/agent-card.json",
"protocol": "HTTP/1.1",
"sourceIp": "127.0.0.1",
"userAgent": "test"
},
"requestId": "rid",
"routeKey": "GET /.well-known/agent-card.json",
"stage": "$default",
"time": "01/Jan/2026:00:00:00 +0000",
"timeEpoch": 1_735_689_600_000i64
},
"isBase64Encoded": false
})
}
fn apigw_v1_agent_card_get_with_stage(stage: &str, path: &str) -> serde_json::Value {
let request_context_path = if stage == "$default" {
path.to_string()
} else {
format!("/{stage}{path}")
};
serde_json::json!({
"resource": "/{proxy+}",
"path": path,
"httpMethod": "GET",
"headers": {"accept": "application/json", "a2a-version": "1.0"},
"multiValueHeaders": {},
"queryStringParameters": null,
"multiValueQueryStringParameters": null,
"pathParameters": {"proxy": path.trim_start_matches('/').to_string()},
"stageVariables": null,
"requestContext": {
"accountId": "000000000000",
"apiId": "abc123",
"domainName": "fake.execute-api",
"domainPrefix": "fake",
"extendedRequestId": "xrid",
"httpMethod": "GET",
"identity": {
"sourceIp": "127.0.0.1",
"userAgent": "test"
},
"path": request_context_path,
"protocol": "HTTP/1.1",
"requestId": "rid",
"requestTime": "01/Jan/2026:00:00:00 +0000",
"requestTimeEpoch": 1_735_689_600_000i64,
"resourceId": "rsrc",
"resourcePath": "/{proxy+}",
"stage": stage
},
"body": null,
"isBase64Encoded": false
})
}
fn apigw_v1_agent_card_get() -> serde_json::Value {
apigw_v1_agent_card_get_with_stage("$default", "/.well-known/agent-card.json")
}
fn alb_agent_card_get() -> serde_json::Value {
serde_json::json!({
"requestContext": {
"elb": {
"targetGroupArn": "arn:aws:elasticloadbalancing:ap-southeast-2:000:targetgroup/fake/00"
}
},
"httpMethod": "GET",
"path": "/.well-known/agent-card.json",
"queryStringParameters": {},
"headers": {"accept": "application/json", "a2a-version": "1.0"},
"body": "",
"isBase64Encoded": false
})
}
#[tokio::test]
async fn v1_inbound_event_emits_v1_response_envelope() {
let handler = build_handler();
let resp_value = handler
.handle_http_event_value(apigw_v1_agent_card_get())
.await
.expect("handle_http_event_value(v1) must succeed");
let v1: aws_lambda_events::apigw::ApiGatewayProxyResponse =
serde_json::from_value(resp_value.clone())
.expect("response must deserialize as ApiGatewayProxyResponse (v1)");
assert_eq!(v1.status_code, 200);
match v1.body {
Some(aws_lambda_events::encodings::Body::Text(s)) => {
assert!(s.contains("\"name\""), "v1 body shape: {s}");
}
other => panic!("expected text body, got {other:?}"),
}
assert!(
resp_value.get("cookies").is_none(),
"v1 response must not carry v2-only `cookies` field; payload: {resp_value}"
);
}
#[tokio::test]
async fn v2_inbound_event_emits_v2_response_envelope() {
let handler = build_handler();
let resp_value = handler
.handle_http_event_value(apigw_v2_agent_card_get())
.await
.expect("handle_http_event_value(v2) must succeed");
let v2: aws_lambda_events::apigw::ApiGatewayV2httpResponse =
serde_json::from_value(resp_value.clone())
.expect("response must deserialize as ApiGatewayV2httpResponse (v2)");
assert_eq!(v2.status_code, 200);
match v2.body {
Some(aws_lambda_events::encodings::Body::Text(s)) => {
assert!(s.contains("\"name\""), "v2 body shape: {s}");
}
other => panic!("expected text body, got {other:?}"),
}
}
#[tokio::test]
async fn alb_inbound_event_emits_alb_response_envelope() {
let handler = build_handler();
let resp_value = handler
.handle_http_event_value(alb_agent_card_get())
.await
.expect("handle_http_event_value(alb) must succeed");
let alb: aws_lambda_events::alb::AlbTargetGroupResponse =
serde_json::from_value(resp_value)
.expect("response must deserialize as AlbTargetGroupResponse");
assert_eq!(alb.status_code, 200);
}
#[tokio::test]
async fn v1_stage_prefix_inbound_with_strip_emits_v1_response() {
let s = Arc::new(InMemoryA2aStorage::new());
let state = AppState {
executor: Arc::new(NoOpExecutor),
task_storage: s.clone(),
push_storage: s.clone(),
event_store: s.clone(),
atomic_store: s.clone(),
event_broker: TaskEventBroker::new(),
middleware_stack: Arc::new(MiddlewareStack::new(vec![])),
runtime_config: RuntimeConfig::default(),
in_flight: Arc::new(InFlightRegistry::new()),
cancellation_supervisor: s.clone(),
push_delivery_store: None,
push_dispatcher: None,
durable_executor_queue: None,
};
let router = build_router(state.clone());
let handler = LambdaA2aHandler {
router,
state,
path_prefix: Some(Arc::from("/dev")),
};
let event = apigw_v1_agent_card_get_with_stage("dev", "/.well-known/agent-card.json");
let resp_value = handler
.handle_http_event_value(event)
.await
.expect("v1 stage-prefixed event must succeed under strip_path_prefix");
let v1: aws_lambda_events::apigw::ApiGatewayProxyResponse =
serde_json::from_value(resp_value)
.expect("response must deserialize as ApiGatewayProxyResponse");
assert_eq!(v1.status_code, 200);
match v1.body {
Some(aws_lambda_events::encodings::Body::Text(s)) => {
assert!(s.contains("\"name\""), "v1 stage-prefix body: {s}");
}
other => panic!("expected text body, got {other:?}"),
}
}
#[tokio::test]
async fn v1_response_does_not_match_v2_envelope_strictly() {
let handler = build_handler();
let v1_value = handler
.handle_http_event_value(apigw_v1_agent_card_get())
.await
.unwrap();
assert!(v1_value.get("cookies").is_none());
let v2_value = handler
.handle_http_event_value(apigw_v2_agent_card_get())
.await
.unwrap();
assert!(
v2_value.get("cookies").is_some(),
"v2 response should carry the v2-only cookies field"
);
}
}