use crate::{column_mapping::ColumnMapping, ArrowChunk};
use anyhow::{anyhow, Context, Result};
use arrow2::datatypes::SchemaRef;
use serde::{Deserialize, Serialize};
use skar_net_types::RollbackGuard;
#[derive(Debug, Clone)]
pub struct QueryResponseData {
pub blocks: Vec<ArrowBatch>,
pub transactions: Vec<ArrowBatch>,
pub logs: Vec<ArrowBatch>,
}
#[derive(Debug, Clone)]
pub struct QueryResponse {
pub archive_height: Option<u64>,
pub next_block: u64,
pub total_execution_time: u64,
pub data: QueryResponseData,
pub rollback_guard: Option<RollbackGuard>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StreamConfig {
#[serde(default = "default_batch_size")]
pub batch_size: u64,
#[serde(default = "default_concurrency")]
pub concurrency: usize,
#[serde(default)]
pub retry: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ParquetConfig {
pub path: String,
#[serde(default)]
pub column_mapping: ColumnMapping,
#[serde(default)]
pub hex_output: bool,
#[serde(default = "default_batch_size")]
pub batch_size: u64,
#[serde(default = "default_concurrency")]
pub concurrency: usize,
#[serde(default)]
pub retry: bool,
}
fn default_batch_size() -> u64 {
400
}
fn default_concurrency() -> usize {
10
}
#[derive(Debug, Clone)]
pub struct ArrowBatch {
pub chunk: ArrowChunk,
pub schema: SchemaRef,
}
impl ArrowBatch {
pub fn column<T: 'static>(&self, name: &str) -> Result<&T> {
match self
.schema
.fields
.iter()
.enumerate()
.find(|(_, f)| f.name == name)
{
Some((idx, _)) => {
let col = self
.chunk
.columns()
.get(idx)
.context("get column")?
.as_any()
.downcast_ref::<T>()
.context("cast column type")?;
Ok(col)
}
None => Err(anyhow!("field {} not found in schema", name)),
}
}
}