Client

Struct Client 

Source
pub struct Client { /* private fields */ }
Expand description

Client to handle http requests and retries.

Implementations§

Source§

impl Client

Source

pub fn new(cfg: ClientConfig) -> Result<Self>

Creates a new client with the given configuration.

Configuration must include the url and api_token fields.

§Example
use hypersync_client::{Client, ClientConfig};

let config = ClientConfig {
    url: "https://eth.hypersync.xyz".to_string(),
    api_token: std::env::var("ENVIO_API_TOKEN")?,
    ..Default::default()
};
let client = Client::new(config)?;
§Errors

This method fails if the config is invalid.

Source

pub fn builder() -> ClientBuilder

Creates a new client builder.

§Example
use hypersync_client::Client;

let client = Client::builder()
    .chain_id(1)
    .api_token(std::env::var("ENVIO_API_TOKEN").unwrap())
    .build()
    .unwrap();
Source

pub async fn collect( &self, query: Query, config: StreamConfig, ) -> Result<QueryResponse>

Retrieves blocks, transactions, traces, and logs through a stream using the provided query and stream configuration.

§Implementation

Runs multiple queries simultaneously based on config.concurrency.

Each query runs until it reaches query.to, server height, any max_num_* query param, or execution timed out by server.

§⚠️ Important Warning

This method will continue executing until the query has run to completion from beginning to the end of the block range defined in the query. For heavy queries with large block ranges or high data volumes, consider:

  • Use stream() to interact with each streamed chunk individually
  • Use get() which returns a next_block that can be paginated for the next query
  • Break large queries into smaller block ranges
§Example
use hypersync_client::{Client, net_types::{Query, LogFilter, LogField}, StreamConfig};

let client = Client::builder()
    .chain_id(1)
    .api_token(std::env::var("ENVIO_API_TOKEN")?)
    .build()?;

// Query ERC20 transfer events
let query = Query::new()
    .from_block(19000000)
    .to_block_excl(19000010)
    .where_logs(
        LogFilter::all()
            .and_topic0(["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"])?
    )
    .select_log_fields([LogField::Address, LogField::Data]);
let response = client.collect(query, StreamConfig::default()).await?;

println!("Collected {} events", response.data.logs.len());
Source

pub async fn collect_events( &self, query: Query, config: StreamConfig, ) -> Result<QueryResponse<Vec<Event>>>

Retrieves events through a stream using the provided query and stream configuration.

§⚠️ Important Warning

This method will continue executing until the query has run to completion from beginning to the end of the block range defined in the query. For heavy queries with large block ranges or high data volumes, consider:

  • Use stream_events() to interact with each streamed chunk individually
  • Use get_events() which returns a next_block that can be paginated for the next query
  • Break large queries into smaller block ranges
§Example
use hypersync_client::{Client, net_types::{Query, TransactionFilter, TransactionField}, StreamConfig};

let client = Client::builder()
    .chain_id(1)
    .api_token(std::env::var("ENVIO_API_TOKEN")?)
    .build()?;

// Query transactions to a specific address
let query = Query::new()
    .from_block(19000000)
    .to_block_excl(19000100)
    .where_transactions(
        TransactionFilter::all()
            .and_to(["0xA0b86a33E6411b87Fd9D3DF822C8698FC06BBe4c"])?
    )
    .select_transaction_fields([TransactionField::Hash, TransactionField::From, TransactionField::Value]);
let response = client.collect_events(query, StreamConfig::default()).await?;

println!("Collected {} events", response.data.len());
Source

pub async fn collect_arrow( &self, query: Query, config: StreamConfig, ) -> Result<ArrowResponse>

Retrieves blocks, transactions, traces, and logs in Arrow format through a stream using the provided query and stream configuration.

Returns data in Apache Arrow format for high-performance columnar processing. Useful for analytics workloads or when working with Arrow-compatible tools.

§⚠️ Important Warning

This method will continue executing until the query has run to completion from beginning to the end of the block range defined in the query. For heavy queries with large block ranges or high data volumes, consider:

  • Use stream_arrow() to interact with each streamed chunk individually
  • Use get_arrow() which returns a next_block that can be paginated for the next query
  • Break large queries into smaller block ranges
§Example
use hypersync_client::{Client, net_types::{Query, BlockFilter, BlockField}, StreamConfig};

let client = Client::builder()
    .chain_id(1)
    .api_token(std::env::var("ENVIO_API_TOKEN")?)
    .build()?;

// Get block data in Arrow format for analytics
let query = Query::new()
    .from_block(19000000)
    .to_block_excl(19000100)
    .include_all_blocks()
    .select_block_fields([BlockField::Number, BlockField::Timestamp, BlockField::GasUsed]);
let response = client.collect_arrow(query, StreamConfig::default()).await?;

println!("Retrieved {} Arrow batches for blocks", response.data.blocks.len());
Source

pub async fn collect_parquet( &self, path: &str, query: Query, config: StreamConfig, ) -> Result<()>

Writes parquet file getting data through a stream using the provided path, query, and stream configuration.

Streams data directly to a Parquet file for efficient storage and later analysis. Perfect for data exports or ETL pipelines.

§⚠️ Important Warning

This method will continue executing until the query has run to completion from beginning to the end of the block range defined in the query. For heavy queries with large block ranges or high data volumes, consider:

  • Use stream_arrow() and write to Parquet incrementally
  • Use get_arrow() with pagination and append to Parquet files
  • Break large queries into smaller block ranges
§Example
use hypersync_client::{Client, net_types::{Query, LogFilter, LogField}, StreamConfig};

let client = Client::builder()
    .chain_id(1)
    .api_token(std::env::var("ENVIO_API_TOKEN")?)
    .build()?;

// Export all DEX trades to Parquet for analysis
let query = Query::new()
    .from_block(19000000)
    .to_block_excl(19010000)
    .where_logs(
        LogFilter::all()
            .and_topic0(["0xd78ad95fa46c994b6551d0da85fc275fe613ce37657fb8d5e3d130840159d822"])?
    )
    .select_log_fields([LogField::Address, LogField::Data, LogField::BlockNumber]);
client.collect_parquet("./trades.parquet", query, StreamConfig::default()).await?;

println!("Trade data exported to trades.parquet");
Source

pub async fn get_chain_id(&self) -> Result<u64>

Get the chain_id from the server with retries.

§Example
use hypersync_client::Client;

let client = Client::builder()
    .chain_id(1)
    .api_token(std::env::var("ENVIO_API_TOKEN")?)
    .build()?;

let chain_id = client.get_chain_id().await?;
println!("Connected to chain ID: {}", chain_id);
Source

pub async fn get_height(&self) -> Result<u64>

Get the height of from server with retries.

§Example
use hypersync_client::Client;

let client = Client::builder()
    .chain_id(1)
    .api_token(std::env::var("ENVIO_API_TOKEN")?)
    .build()?;

let height = client.get_height().await?;
println!("Current block height: {}", height);
Source

pub async fn health_check( &self, http_req_timeout: Option<Duration>, ) -> Result<u64>

Get the height of the Client instance for health checks.

Doesn’t do any retries and the http_req_timeout parameter will override the http timeout config set when creating the client.

§Example
use hypersync_client::Client;
use std::time::Duration;

let client = Client::builder()
    .chain_id(1)
    .api_token(std::env::var("ENVIO_API_TOKEN")?)
    .build()?;

// Quick health check with 5 second timeout
let height = client.health_check(Some(Duration::from_secs(5))).await?;
println!("Server is healthy at block: {}", height);
Source

pub async fn get(&self, query: &Query) -> Result<QueryResponse>

Executes query with retries and returns the response.

§Example
use hypersync_client::{Client, net_types::{Query, BlockFilter, BlockField}};

let client = Client::builder()
    .chain_id(1)
    .api_token(std::env::var("ENVIO_API_TOKEN")?)
    .build()?;

// Query all blocks from a specific range
let query = Query::new()
    .from_block(19000000)
    .to_block_excl(19000010)
    .include_all_blocks()
    .select_block_fields([BlockField::Number, BlockField::Hash, BlockField::Timestamp]);
let response = client.get(&query).await?;

println!("Retrieved {} blocks", response.data.blocks.len());
Source

pub async fn get_events( &self, query: Query, ) -> Result<QueryResponse<Vec<Event>>>

Add block, transaction and log fields selection to the query, executes it with retries and returns the response.

This method automatically joins blocks, transactions, and logs into unified events, making it easier to work with related blockchain data.

§Example
use hypersync_client::{Client, net_types::{Query, LogFilter, LogField, TransactionField}};

let client = Client::builder()
    .chain_id(1)
    .api_token(std::env::var("ENVIO_API_TOKEN")?)
    .build()?;

// Query ERC20 transfers with transaction context
let query = Query::new()
    .from_block(19000000)
    .to_block_excl(19000010)
    .where_logs(
        LogFilter::all()
            .and_topic0(["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"])?
    )
    .select_log_fields([LogField::Address, LogField::Data])
    .select_transaction_fields([TransactionField::Hash, TransactionField::From]);
let response = client.get_events(query).await?;

println!("Retrieved {} joined events", response.data.len());
Source

pub async fn get_arrow(&self, query: &Query) -> Result<ArrowResponse>

Executes query with retries and returns the response in Arrow format.

Source

pub async fn stream( &self, query: Query, config: StreamConfig, ) -> Result<Receiver<Result<QueryResponse>>>

Spawns task to execute query and return data via a channel.

§Example
use hypersync_client::{Client, net_types::{Query, LogFilter, LogField}, StreamConfig};

let client = Client::builder()
    .chain_id(1)
    .api_token(std::env::var("ENVIO_API_TOKEN")?)
    .build()?;

// Stream all ERC20 transfer events
let query = Query::new()
    .from_block(19000000)
    .where_logs(
        LogFilter::all()
            .and_topic0(["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"])?
    )
    .select_log_fields([LogField::Address, LogField::Topic1, LogField::Topic2, LogField::Data]);
let mut receiver = client.stream(query, StreamConfig::default()).await?;

while let Some(response) = receiver.recv().await {
    let response = response?;
    println!("Got {} events up to block: {}", response.data.logs.len(), response.next_block);
}
Source

pub async fn stream_events( &self, query: Query, config: StreamConfig, ) -> Result<Receiver<Result<QueryResponse<Vec<Event>>>>>

Add block, transaction and log fields selection to the query and spawns task to execute it, returning data via a channel.

This method automatically joins blocks, transactions, and logs into unified events, then streams them via a channel for real-time processing.

§Example
use hypersync_client::{Client, net_types::{Query, LogFilter, LogField, TransactionField}, StreamConfig};

let client = Client::builder()
    .chain_id(1)
    .api_token(std::env::var("ENVIO_API_TOKEN")?)
    .build()?;

// Stream NFT transfer events with transaction context
let query = Query::new()
    .from_block(19000000)
    .where_logs(
        LogFilter::all()
            .and_topic0(["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"])?
    )
    .select_log_fields([LogField::Address, LogField::Topic1, LogField::Topic2])
    .select_transaction_fields([TransactionField::Hash, TransactionField::From]);
let mut receiver = client.stream_events(query, StreamConfig::default()).await?;

while let Some(response) = receiver.recv().await {
    let response = response?;
    println!("Got {} joined events up to block: {}", response.data.len(), response.next_block);
}
Source

pub async fn stream_arrow( &self, query: Query, config: StreamConfig, ) -> Result<Receiver<Result<ArrowResponse>>>

Spawns task to execute query and return data via a channel in Arrow format.

Returns raw Apache Arrow data via a channel for high-performance processing. Ideal for applications that need to work directly with columnar data.

§Example
use hypersync_client::{Client, net_types::{Query, TransactionFilter, TransactionField}, StreamConfig};

let client = Client::builder()
    .chain_id(1)
    .api_token(std::env::var("ENVIO_API_TOKEN")?)
    .build()?;

// Stream transaction data in Arrow format for analytics
let query = Query::new()
    .from_block(19000000)
    .to_block_excl(19000100)
    .where_transactions(
        TransactionFilter::all()
            .and_contract_address(["0xA0b86a33E6411b87Fd9D3DF822C8698FC06BBe4c"])?
    )
    .select_transaction_fields([TransactionField::Hash, TransactionField::From, TransactionField::Value]);
let mut receiver = client.stream_arrow(query, StreamConfig::default()).await?;

while let Some(response) = receiver.recv().await {
    let response = response?;
    println!("Got {} Arrow batches for transactions", response.data.transactions.len());
}
Source

pub fn url(&self) -> &Url

Getter for url field.

§Example
use hypersync_client::Client;

let client = Client::builder()
    .chain_id(1)
    .api_token(std::env::var("ENVIO_API_TOKEN").unwrap())
    .build()
    .unwrap();

println!("Client URL: {}", client.url());
Source§

impl Client

Source

pub fn stream_height(&self) -> Receiver<HeightStreamEvent>

Streams archive height updates from the server via Server-Sent Events.

Establishes a long-lived SSE connection to /height/sse that automatically reconnects on disconnection with exponential backoff (200ms → 400ms → … → max 30s).

The stream emits HeightStreamEvent to notify consumers of connection state changes and height updates. This allows applications to display connection status to users.

§Returns

Channel receiver yielding HeightStreamEvents. The background task handles connection lifecycle and sends events through this channel.

§Example
let client = Client::builder()
    .url("https://eth.hypersync.xyz")
    .api_token(std::env::var("ENVIO_API_TOKEN").unwrap())
    .build()?;

let mut rx = client.stream_height();

while let Some(event) = rx.recv().await {
    match event {
        HeightStreamEvent::Connected => println!("Connected to stream"),
        HeightStreamEvent::Height(h) => println!("Height: {}", h),
        HeightStreamEvent::Reconnecting { delay } => {
            println!("Reconnecting in {:?}...", delay)
        }
    }
}

Trait Implementations§

Source§

impl Clone for Client

Source§

fn clone(&self) -> Client

Returns a duplicate of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl Debug for Client

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more

Auto Trait Implementations§

§

impl Freeze for Client

§

impl !RefUnwindSafe for Client

§

impl Send for Client

§

impl Sync for Client

§

impl Unpin for Client

§

impl !UnwindSafe for Client

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<T> DynClone for T
where T: Clone,

Source§

fn __clone_box(&self, _: Private) -> *mut ()

Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T> Instrument for T

Source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
Source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

impl<T> Pointable for T

Source§

const ALIGN: usize

The alignment of pointer.
Source§

type Init = T

The type for initializers.
Source§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
Source§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
Source§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
Source§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T> PolicyExt for T
where T: ?Sized,

Source§

fn and<P, B, E>(self, other: P) -> And<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow only if self and other return Action::Follow. Read more
Source§

fn or<P, B, E>(self, other: P) -> Or<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow if either self or other returns Action::Follow. Read more
Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V

Source§

impl<T> WithSubscriber for T

Source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more