Struct june_analytics::batcher::Batcher
source · pub struct Batcher { /* private fields */ }Expand description
A batcher can accept messages into an internal buffer, and report when messages must be flushed.
The recommended usage pattern looks something like this:
use analytics::batcher::Batcher;
use analytics::client::Client;
use analytics::http::HttpClient;
use analytics::message::{BatchMessage, Track, User};
use serde_json::json;
let mut batcher = Batcher::new(None);
let client = HttpClient::default();
for i in 0..100 {
let msg = BatchMessage::Track(Track {
user: User::UserId { user_id: format!("user-{}", i) },
event: "Example".to_owned(),
properties: json!({ "foo": "bar" }),
..Default::default()
});
// Batcher returns back ownership of a message if the internal buffer
// would overflow.
//
// When this occurs, we flush the batcher, create a new batcher, and add
// the message into the new batcher.
if let Some(msg) = batcher.push(msg).unwrap() {
client.send("your_write_key", &batcher.into_message()).unwrap();
batcher = Batcher::new(None);
batcher.push(msg).unwrap();
}
}If this delay is a concern, it is recommended that you periodically flush
the batcher on your own by calling into_message.
Implementations§
source§impl Batcher
impl Batcher
sourcepub fn new(context: Option<Value>) -> Self
pub fn new(context: Option<Value>) -> Self
Construct a new, empty batcher.
Optionally, you may specify a context that should be set on every
batch returned by into_message.
Examples found in repository?
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
fn main() {
let write_key = "YOUR_WRITE_KEY";
let client = HttpClient::default();
let mut batcher = Batcher::new(None);
// Pretend this is reading off of a queue, a file, or some other data
// source.
for i in 0..100 {
let msg = BatchMessage::Track(Track {
user: User::UserId {
user_id: format!("user-{}", i),
},
event: "Example Event".to_owned(),
properties: json!({
"foo": format!("bar-{}", i),
}),
..Default::default()
});
// An error here indicates a message is too large. In real life, you
// would probably want to put this message in a deadletter queue or some
// equivalent.
if let Some(msg) = batcher.push(msg).unwrap() {
client.send(write_key, &batcher.into_message()).unwrap();
batcher = Batcher::new(None);
batcher.push(msg).unwrap(); // Same error condition as above.
}
}
}sourcepub fn push(&mut self, msg: BatchMessage) -> Result<Option<BatchMessage>, Error>
pub fn push(&mut self, msg: BatchMessage) -> Result<Option<BatchMessage>, Error>
Push a message into the batcher.
Returns Ok(None) if the message was accepted and is now owned by the
batcher.
Returns Ok(Some(msg)) if the message was rejected because the current
batch would be oversized if this message were accepted. The given
message is returned back, and it is recommended that you flush the
current batch before attempting to push msg in again.
Returns an error if the message is too large to be sent to June’s API.
Examples found in repository?
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
fn main() {
let write_key = "YOUR_WRITE_KEY";
let client = HttpClient::default();
let mut batcher = Batcher::new(None);
// Pretend this is reading off of a queue, a file, or some other data
// source.
for i in 0..100 {
let msg = BatchMessage::Track(Track {
user: User::UserId {
user_id: format!("user-{}", i),
},
event: "Example Event".to_owned(),
properties: json!({
"foo": format!("bar-{}", i),
}),
..Default::default()
});
// An error here indicates a message is too large. In real life, you
// would probably want to put this message in a deadletter queue or some
// equivalent.
if let Some(msg) = batcher.push(msg).unwrap() {
client.send(write_key, &batcher.into_message()).unwrap();
batcher = Batcher::new(None);
batcher.push(msg).unwrap(); // Same error condition as above.
}
}
}sourcepub fn into_message(self) -> Message
pub fn into_message(self) -> Message
Consumes this batcher and converts it into a message that can be sent
Examples found in repository?
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
fn main() {
let write_key = "YOUR_WRITE_KEY";
let client = HttpClient::default();
let mut batcher = Batcher::new(None);
// Pretend this is reading off of a queue, a file, or some other data
// source.
for i in 0..100 {
let msg = BatchMessage::Track(Track {
user: User::UserId {
user_id: format!("user-{}", i),
},
event: "Example Event".to_owned(),
properties: json!({
"foo": format!("bar-{}", i),
}),
..Default::default()
});
// An error here indicates a message is too large. In real life, you
// would probably want to put this message in a deadletter queue or some
// equivalent.
if let Some(msg) = batcher.push(msg).unwrap() {
client.send(write_key, &batcher.into_message()).unwrap();
batcher = Batcher::new(None);
batcher.push(msg).unwrap(); // Same error condition as above.
}
}
}