#[cfg(feature = "btls-backend")]
use std::io::Write as _;
#[cfg(feature = "btls-backend")]
use crate::browser_emulation::{BoringCertCompression, BoringTlsFingerprint};
use crate::error::{Error, ErrorKind, Result};
#[cfg(any(feature = "native-tls", feature = "btls-backend"))]
use crate::tls::TlsConfig;
#[cfg(feature = "rustls")]
pub(crate) async fn connect_async_tls_with_config(
stream: async_net::TcpStream,
host: &str,
config: std::sync::Arc<rustls::ClientConfig>,
timeout: Option<std::time::Duration>,
) -> Result<async_tls::client::TlsStream<async_net::TcpStream>> {
let connector = async_tls::TlsConnector::from(config);
match timeout {
Some(duration) => {
futures_lite::future::or(
async move {
connector.connect(host, stream).await.map_err(|err| {
Error::with_source(ErrorKind::Transport, "failed tls handshake", err)
})
},
async move {
async_io::Timer::after(duration).await;
Err(Error::new(ErrorKind::Timeout, "tls handshake timed out"))
},
)
.await
}
None => connector
.connect(host, stream)
.await
.map_err(|err| Error::with_source(ErrorKind::Transport, "failed tls handshake", err)),
}
}
#[cfg(feature = "native-tls")]
pub(crate) async fn connect_native_tls_with_connector(
stream: async_net::TcpStream,
host: &str,
connector: async_native_tls::TlsConnector,
timeout: Option<std::time::Duration>,
) -> Result<async_native_tls::TlsStream<async_net::TcpStream>> {
match timeout {
Some(duration) => {
futures_lite::future::or(
async move {
connector.connect(host, stream).await.map_err(|err| {
Error::with_source(ErrorKind::Transport, "failed tls handshake", err)
})
},
async move {
async_io::Timer::after(duration).await;
Err(Error::new(ErrorKind::Timeout, "tls handshake timed out"))
},
)
.await
}
None => connector
.connect(host, stream)
.await
.map_err(|err| Error::with_source(ErrorKind::Transport, "failed tls handshake", err)),
}
}
#[cfg(feature = "native-tls")]
pub(crate) fn build_native_tls_connector_for_protocols(
tls_config: &TlsConfig,
protocols: &[String],
) -> Result<async_native_tls::TlsConnector> {
let mut connector = async_native_tls::TlsConnector::new()
.danger_accept_invalid_certs(tls_config.accept_invalid_certs);
if let Some(root_store) = &tls_config.root_store {
match root_store {
crate::RootStore::System => {}
crate::RootStore::WebPki => {
return Err(Error::new(
ErrorKind::Transport,
"native-tls backend does not support webpki root store override; use system roots or PEM",
));
}
crate::RootStore::PemFile(_) | crate::RootStore::Pem(_) => {
if let Some(pem) = root_store.pem_bytes()? {
let cert = async_native_tls::Certificate::from_pem(&pem).map_err(|err| {
Error::with_source(
ErrorKind::Transport,
"failed to parse native tls root certificate PEM",
err,
)
})?;
connector = connector.add_root_certificate(cert);
}
}
}
}
if !protocols.is_empty() {
let protocol_refs = protocols.iter().map(String::as_str).collect::<Vec<_>>();
connector = connector.request_alpns(&protocol_refs);
}
Ok(connector)
}
// ---- BoringSSL backend (via btls) ------------------------------------------
//
// This backend is optional and is primarily intended for "browser-like" TLS
// fingerprinting experiments. The implementation is best-effort: it focuses on
// ALPN + version/cipher selection, and provides an async wrapper for the
// non-blocking SSL state machine.
#[cfg(feature = "btls-backend")]
#[derive(Clone, Debug, Eq, PartialEq)]
struct BoringTlsBuildPlan {
accept_invalid_certs: bool,
alpn_wire: Vec<u8>,
min_proto_version: Option<btls::ssl::SslVersion>,
max_proto_version: Option<btls::ssl::SslVersion>,
fingerprint: Option<BoringTlsFingerprint>,
explicit_cipher_list: Option<String>,
}
#[cfg(all(feature = "btls-backend", feature = "brotli"))]
#[derive(Default)]
struct BoringBrotliCertificateCompressor;
#[cfg(all(feature = "btls-backend", feature = "brotli"))]
impl btls::ssl::CertificateCompressor for BoringBrotliCertificateCompressor {
const ALGORITHM: btls::ssl::CertificateCompressionAlgorithm =
btls::ssl::CertificateCompressionAlgorithm::BROTLI;
const CAN_COMPRESS: bool = true;
const CAN_DECOMPRESS: bool = true;
fn compress<W>(&self, input: &[u8], output: &mut W) -> std::io::Result<()>
where
W: std::io::Write,
{
let mut writer = brotli::CompressorWriter::new(output, 1024, 11, 22);
writer.write_all(input)?;
Ok(())
}
fn decompress<W>(&self, input: &[u8], output: &mut W) -> std::io::Result<()>
where
W: std::io::Write,
{
brotli::BrotliDecompress(&mut std::io::Cursor::new(input), output)
}
}
#[cfg(feature = "btls-backend")]
fn apply_boring_tls_fingerprint(
builder: &mut btls::ssl::SslConnectorBuilder,
fingerprint: &BoringTlsFingerprint,
) -> Result<()> {
if let Some(enabled) = fingerprint.grease_enabled {
builder.set_grease_enabled(enabled);
}
if let Some(enabled) = fingerprint.permute_extensions {
builder.set_permute_extensions(enabled);
}
if let Some(curves) = &fingerprint.curves_list {
builder.set_curves_list(curves).map_err(|err| {
Error::with_source(
ErrorKind::Transport,
"failed to set boring curves list",
err,
)
})?;
}
if let Some(sigalgs) = &fingerprint.sigalgs_list {
builder.set_sigalgs_list(sigalgs).map_err(|err| {
Error::with_source(
ErrorKind::Transport,
"failed to set boring signature algorithms",
err,
)
})?;
}
if let Some(cipher_list) = &fingerprint.cipher_list {
builder.set_preserve_tls13_cipher_list(true);
builder.set_cipher_list(cipher_list).map_err(|err| {
Error::with_source(
ErrorKind::Transport,
"failed to set boring cipher list",
err,
)
})?;
}
if fingerprint.enable_ocsp_stapling {
builder.enable_ocsp_stapling();
}
if fingerprint.enable_signed_cert_timestamps {
builder.enable_signed_cert_timestamps();
}
for compression in &fingerprint.certificate_compression {
match compression {
#[cfg(feature = "brotli")]
BoringCertCompression::Brotli => builder
.add_certificate_compression_algorithm(BoringBrotliCertificateCompressor)
.map_err(|err| {
Error::with_source(
ErrorKind::Transport,
"failed to enable boring Brotli certificate compression",
err,
)
})?,
#[cfg(not(feature = "brotli"))]
BoringCertCompression::Brotli => {
return Err(Error::new(
ErrorKind::Transport,
"Brotli certificate compression requires the brotli feature",
));
}
}
}
Ok(())
}
#[cfg(feature = "btls-backend")]
fn parse_boring_tls_version(version: &str) -> Option<btls::ssl::SslVersion> {
match version.trim() {
"1.2" => Some(btls::ssl::SslVersion::TLS1_2),
"1.3" => Some(btls::ssl::SslVersion::TLS1_3),
_ => None,
}
}
#[cfg(feature = "btls-backend")]
fn encode_boring_alpn_protocols(protocols: &[String]) -> Result<Vec<u8>> {
let mut encoded = Vec::new();
for proto in protocols {
let bytes = proto.as_bytes();
let len = u8::try_from(bytes.len()).map_err(|_| {
Error::new(ErrorKind::Transport, "ALPN protocol id is too long for TLS")
})?;
encoded.push(len);
encoded.extend_from_slice(bytes);
}
Ok(encoded)
}
#[cfg(feature = "btls-backend")]
fn build_boring_tls_plan(
tls_config: &TlsConfig,
protocols: &[String],
) -> Result<BoringTlsBuildPlan> {
Ok(BoringTlsBuildPlan {
accept_invalid_certs: tls_config.accept_invalid_certs,
alpn_wire: encode_boring_alpn_protocols(protocols)?,
min_proto_version: tls_config
.min_tls_version
.as_deref()
.and_then(parse_boring_tls_version),
max_proto_version: tls_config
.max_tls_version
.as_deref()
.and_then(parse_boring_tls_version),
fingerprint: tls_config.boring_tls_fingerprint().cloned(),
explicit_cipher_list: tls_config.cipher_suites.as_ref().and_then(|suites| {
if suites.is_empty() {
None
} else {
Some(suites.join(":"))
}
}),
})
}
#[cfg(feature = "btls-backend")]
fn apply_boring_tls_plan(
builder: &mut btls::ssl::SslConnectorBuilder,
plan: &BoringTlsBuildPlan,
) -> Result<()> {
use btls::ssl::SslVerifyMode;
if plan.accept_invalid_certs {
builder.set_verify(SslVerifyMode::NONE);
}
if !plan.alpn_wire.is_empty() {
builder.set_alpn_protos(&plan.alpn_wire).map_err(|err| {
Error::with_source(ErrorKind::Transport, "failed to set boring ALPN", err)
})?;
}
if let Some(version) = plan.min_proto_version {
builder
.set_min_proto_version(Some(version))
.map_err(|err| {
Error::with_source(
ErrorKind::Transport,
"failed to set boring min tls version",
err,
)
})?;
}
if let Some(version) = plan.max_proto_version {
builder
.set_max_proto_version(Some(version))
.map_err(|err| {
Error::with_source(
ErrorKind::Transport,
"failed to set boring max tls version",
err,
)
})?;
}
if let Some(fingerprint) = &plan.fingerprint {
apply_boring_tls_fingerprint(builder, fingerprint)?;
}
if let Some(cipher_list) = &plan.explicit_cipher_list {
builder.set_preserve_tls13_cipher_list(true);
builder.set_cipher_list(cipher_list).map_err(|err| {
Error::with_source(
ErrorKind::Transport,
"failed to set explicit boring cipher suites",
err,
)
})?;
}
Ok(())
}
#[cfg(feature = "btls-backend")]
fn boring_tls_ech_grease_enabled(tls_config: &TlsConfig) -> bool {
tls_config
.boring_tls_fingerprint()
.map(|fingerprint| fingerprint.enable_ech_grease)
.unwrap_or(false)
}
#[cfg(feature = "btls-backend")]
pub(crate) fn build_boring_tls_connector_for_protocols(
tls_config: &TlsConfig,
protocols: &[String],
) -> Result<btls::ssl::SslConnector> {
use btls::ssl::{SslConnector, SslMethod};
let mut builder = SslConnector::builder(SslMethod::tls()).map_err(|err| {
Error::with_source(
ErrorKind::Transport,
"failed to create boring ssl connector builder",
err,
)
})?;
// Root store override (PEM) support: keep this minimal for now.
if let Some(root_store) = &tls_config.root_store {
match root_store {
crate::RootStore::System | crate::RootStore::WebPki => {}
crate::RootStore::PemFile(_) | crate::RootStore::Pem(_) => {
if let Some(pem) = root_store.pem_bytes()? {
let certs = btls::x509::X509::stack_from_pem(&pem).map_err(|err| {
Error::with_source(
ErrorKind::Transport,
"failed to parse boring PEM root certificates",
err,
)
})?;
let store = builder.cert_store_mut();
for cert in certs {
store.add_cert(cert).map_err(|err| {
Error::with_source(
ErrorKind::Transport,
"failed to add boring PEM root certificate",
err,
)
})?;
}
}
}
}
}
let plan = build_boring_tls_plan(tls_config, protocols)?;
apply_boring_tls_plan(&mut builder, &plan)?;
Ok(builder.build())
}
#[cfg(feature = "btls-backend")]
pub(crate) async fn connect_boring_tls_with_connector(
stream: async_net::TcpStream,
host: &str,
connector: btls::ssl::SslConnector,
tls_config: &TlsConfig,
timeout: Option<std::time::Duration>,
) -> Result<BoringTlsStream> {
let io = BoringIo::new(stream);
let ech_grease = boring_tls_ech_grease_enabled(tls_config);
let fut = async move { BoringTlsStream::connect(host, connector, io, ech_grease).await };
match timeout {
Some(duration) => {
futures_lite::future::or(fut, async move {
async_io::Timer::after(duration).await;
Err(Error::new(ErrorKind::Timeout, "tls handshake timed out"))
})
.await
}
None => fut.await,
}
}
#[cfg(feature = "btls-backend")]
#[derive(Debug)]
struct BoringIo {
inner: std::sync::Arc<async_io::Async<std::net::TcpStream>>,
}
#[cfg(feature = "btls-backend")]
impl BoringIo {
fn new(stream: async_net::TcpStream) -> Self {
let inner: std::sync::Arc<async_io::Async<std::net::TcpStream>> = stream.into();
Self { inner }
}
fn poll_readable(
&self,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<std::io::Result<()>> {
self.inner.poll_readable(cx)
}
fn poll_writable(
&self,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<std::io::Result<()>> {
self.inner.poll_writable(cx)
}
}
#[cfg(feature = "btls-backend")]
impl std::io::Read for BoringIo {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.inner.get_ref().read(buf)
}
}
#[cfg(feature = "btls-backend")]
impl std::io::Write for BoringIo {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.inner.get_ref().write(buf)
}
fn flush(&mut self) -> std::io::Result<()> {
self.inner.get_ref().flush()
}
}
#[cfg(feature = "btls-backend")]
pub(crate) struct BoringTlsStream {
inner: btls::ssl::SslStream<BoringIo>,
}
#[cfg(feature = "btls-backend")]
impl BoringTlsStream {
async fn connect(
host: &str,
connector: btls::ssl::SslConnector,
io: BoringIo,
enable_ech_grease: bool,
) -> Result<Self> {
use btls::ssl::HandshakeError;
use futures_lite::future::poll_fn;
let host = host.to_owned();
let configuration = connector.configure().map_err(|err| {
Error::with_source(
ErrorKind::Transport,
"failed to configure boring ssl connector",
err,
)
})?;
if enable_ech_grease {
configuration.set_enable_ech_grease(true);
}
let mut state = Some(configuration.setup_connect(&host, io).map_err(|err| {
Error::with_source(
ErrorKind::Transport,
"failed to initialize boring tls handshake",
err,
)
})?);
poll_fn(|cx| {
loop {
let mid = match state.take() {
Some(v) => v,
None => return std::task::Poll::Pending,
};
match mid.handshake() {
Ok(stream) => return std::task::Poll::Ready(Ok(Self { inner: stream })),
Err(HandshakeError::WouldBlock(mid)) => {
// We don't know whether the next step needs read or write; poll both.
let io = mid.get_ref();
let _ = io.poll_readable(cx);
let _ = io.poll_writable(cx);
state = Some(mid);
return std::task::Poll::Pending;
}
Err(err) => {
return std::task::Poll::Ready(Err(Error::with_source(
ErrorKind::Transport,
"failed tls handshake",
err,
)));
}
}
}
})
.await
}
}
#[cfg(feature = "btls-backend")]
impl futures_lite::io::AsyncRead for BoringTlsStream {
fn poll_read(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &mut [u8],
) -> std::task::Poll<std::io::Result<usize>> {
loop {
match std::io::Read::read(&mut self.inner, buf) {
Ok(n) => return std::task::Poll::Ready(Ok(n)),
Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => {}
Err(err) => return std::task::Poll::Ready(Err(err)),
}
let io = self.inner.get_ref();
let readable = io.poll_readable(cx);
let writable = io.poll_writable(cx);
match (readable, writable) {
(std::task::Poll::Ready(Ok(())), _) | (_, std::task::Poll::Ready(Ok(()))) => {}
(std::task::Poll::Ready(Err(e)), _) => return std::task::Poll::Ready(Err(e)),
(_, std::task::Poll::Ready(Err(e))) => return std::task::Poll::Ready(Err(e)),
_ => return std::task::Poll::Pending,
}
}
}
}
#[cfg(feature = "btls-backend")]
impl futures_lite::io::AsyncWrite for BoringTlsStream {
fn poll_write(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &[u8],
) -> std::task::Poll<std::io::Result<usize>> {
loop {
match std::io::Write::write(&mut self.inner, buf) {
Ok(n) => return std::task::Poll::Ready(Ok(n)),
Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => {}
Err(err) => return std::task::Poll::Ready(Err(err)),
}
let io = self.inner.get_ref();
let readable = io.poll_readable(cx);
let writable = io.poll_writable(cx);
match (readable, writable) {
(std::task::Poll::Ready(Ok(())), _) | (_, std::task::Poll::Ready(Ok(()))) => {}
(std::task::Poll::Ready(Err(e)), _) => return std::task::Poll::Ready(Err(e)),
(_, std::task::Poll::Ready(Err(e))) => return std::task::Poll::Ready(Err(e)),
_ => return std::task::Poll::Pending,
}
}
}
fn poll_flush(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<std::io::Result<()>> {
loop {
match std::io::Write::flush(&mut self.inner) {
Ok(()) => return std::task::Poll::Ready(Ok(())),
Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => {}
Err(err) => return std::task::Poll::Ready(Err(err)),
}
let io = self.inner.get_ref();
let readable = io.poll_readable(cx);
let writable = io.poll_writable(cx);
match (readable, writable) {
(std::task::Poll::Ready(Ok(())), _) | (_, std::task::Poll::Ready(Ok(()))) => {}
(std::task::Poll::Ready(Err(e)), _) => return std::task::Poll::Ready(Err(e)),
(_, std::task::Poll::Ready(Err(e))) => return std::task::Poll::Ready(Err(e)),
_ => return std::task::Poll::Pending,
}
}
}
fn poll_close(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<std::io::Result<()>> {
self.poll_flush(cx)
}
}
#[cfg(all(test, feature = "btls-backend"))]
mod tests {
use super::{
BoringTlsBuildPlan, apply_boring_tls_plan, boring_tls_ech_grease_enabled,
build_boring_tls_connector_for_protocols, build_boring_tls_plan,
encode_boring_alpn_protocols, parse_boring_tls_version,
};
use crate::browser_emulation::{BoringCertCompression, BoringTlsFingerprint};
use crate::tls::TlsConfig;
use crate::{Client, TlsBackend};
use btls::ssl::SslCipher;
use std::io::Read as _;
use std::sync::mpsc;
use std::time::Duration;
#[cfg(feature = "emulation")]
use crate::Emulation;
#[derive(Debug)]
struct ParsedClientHello {
legacy_version: u16,
cipher_suites: Vec<u16>,
extensions: Vec<(u16, Vec<u8>)>,
}
#[cfg(feature = "emulation")]
struct EmulationHelloFixture {
name: &'static str,
emulation: Emulation,
cipher_names: &'static [&'static str],
signature_algorithms: &'static [u16],
expect_ech_extension: bool,
canonicalize_extension_order: bool,
normalized_snapshot_hex: &'static str,
}
impl ParsedClientHello {
fn extension(&self, extension_type: u16) -> Option<&[u8]> {
self.extensions
.iter()
.find_map(|(kind, payload)| (*kind == extension_type).then_some(payload.as_slice()))
}
}
fn spawn_client_hello_capture_server() -> (String, mpsc::Receiver<Result<Vec<u8>, String>>) {
let listener = std::net::TcpListener::bind(("127.0.0.1", 0)).unwrap();
let addr = listener.local_addr().unwrap();
let (sender, receiver) = mpsc::channel();
std::thread::spawn(move || {
let result = (|| -> Result<Vec<u8>, String> {
let (mut stream, _) = listener.accept().map_err(|err| err.to_string())?;
stream
.set_read_timeout(Some(Duration::from_secs(2)))
.map_err(|err| err.to_string())?;
read_tls_client_hello(&mut stream).map_err(|err| err.to_string())
})();
let _ = sender.send(result);
});
(format!("https://127.0.0.1:{}", addr.port()), receiver)
}
fn read_tls_client_hello(stream: &mut std::net::TcpStream) -> std::io::Result<Vec<u8>> {
let mut raw = Vec::new();
let mut chunk = [0_u8; 4096];
loop {
match stream.read(&mut chunk) {
Ok(0) => break,
Ok(read) => {
raw.extend_from_slice(&chunk[..read]);
if let Some(hello) = extract_client_hello_from_records(&raw)? {
return Ok(hello);
}
}
Err(err)
if matches!(
err.kind(),
std::io::ErrorKind::WouldBlock | std::io::ErrorKind::TimedOut
) =>
{
break;
}
Err(err) => return Err(err),
}
}
extract_client_hello_from_records(&raw)?.ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::UnexpectedEof,
"did not capture a complete ClientHello",
)
})
}
fn extract_client_hello_from_records(raw: &[u8]) -> std::io::Result<Option<Vec<u8>>> {
let mut offset = 0usize;
let mut handshake = Vec::new();
while offset + 5 <= raw.len() {
let header = &raw[offset..offset + 5];
let record_len = usize::from(u16::from_be_bytes([header[3], header[4]]));
if offset + 5 + record_len > raw.len() {
break;
}
if header[0] != 22 {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"expected TLS handshake record",
));
}
handshake.extend_from_slice(&raw[offset + 5..offset + 5 + record_len]);
offset += 5 + record_len;
if handshake.len() >= 4 {
if handshake[0] != 1 {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"expected ClientHello handshake message",
));
}
let hello_len = usize::from(handshake[1]) << 16
| usize::from(handshake[2]) << 8
| usize::from(handshake[3]);
if handshake.len() >= 4 + hello_len {
return Ok(Some(handshake[..4 + hello_len].to_vec()));
}
}
}
Ok(None)
}
fn parse_client_hello(handshake: &[u8]) -> Result<ParsedClientHello, String> {
fn take<'a>(buf: &'a [u8], offset: &mut usize, len: usize) -> Result<&'a [u8], String> {
let end = offset.saturating_add(len);
if end > buf.len() {
return Err("truncated ClientHello field".to_owned());
}
let out = &buf[*offset..end];
*offset = end;
Ok(out)
}
if handshake.len() < 4 || handshake[0] != 1 {
return Err("expected ClientHello handshake".to_owned());
}
let hello_len = usize::from(handshake[1]) << 16
| usize::from(handshake[2]) << 8
| usize::from(handshake[3]);
if handshake.len() < 4 + hello_len {
return Err("truncated ClientHello".to_owned());
}
let body = &handshake[4..4 + hello_len];
let mut offset = 0usize;
let legacy_version = {
let bytes = take(body, &mut offset, 2)?;
u16::from_be_bytes([bytes[0], bytes[1]])
};
let _random = take(body, &mut offset, 32)?;
let session_id_len = usize::from(take(body, &mut offset, 1)?[0]);
let _session_id = take(body, &mut offset, session_id_len)?;
let cipher_len = {
let bytes = take(body, &mut offset, 2)?;
usize::from(u16::from_be_bytes([bytes[0], bytes[1]]))
};
let cipher_bytes = take(body, &mut offset, cipher_len)?;
let cipher_suites = cipher_bytes
.chunks_exact(2)
.map(|chunk| u16::from_be_bytes([chunk[0], chunk[1]]))
.collect::<Vec<_>>();
let compression_len = usize::from(take(body, &mut offset, 1)?[0]);
let _compression = take(body, &mut offset, compression_len)?;
let extension_len = {
let bytes = take(body, &mut offset, 2)?;
usize::from(u16::from_be_bytes([bytes[0], bytes[1]]))
};
let extension_bytes = take(body, &mut offset, extension_len)?;
let mut extensions = Vec::new();
let mut ext_offset = 0usize;
while ext_offset < extension_bytes.len() {
let kind = {
let bytes = take(extension_bytes, &mut ext_offset, 2)?;
u16::from_be_bytes([bytes[0], bytes[1]])
};
let len = {
let bytes = take(extension_bytes, &mut ext_offset, 2)?;
usize::from(u16::from_be_bytes([bytes[0], bytes[1]]))
};
let payload = take(extension_bytes, &mut ext_offset, len)?.to_vec();
extensions.push((kind, payload));
}
Ok(ParsedClientHello {
legacy_version,
cipher_suites,
extensions,
})
}
fn parse_alpn_extension(payload: &[u8]) -> Result<Vec<String>, String> {
if payload.len() < 2 {
return Err("truncated ALPN extension".to_owned());
}
let list_len = usize::from(u16::from_be_bytes([payload[0], payload[1]]));
if payload.len() != 2 + list_len {
return Err("invalid ALPN extension length".to_owned());
}
let mut offset = 2usize;
let mut protocols = Vec::new();
while offset < payload.len() {
let len = usize::from(payload[offset]);
offset += 1;
if offset + len > payload.len() {
return Err("truncated ALPN protocol".to_owned());
}
let protocol = std::str::from_utf8(&payload[offset..offset + len])
.map_err(|err| err.to_string())?;
protocols.push(protocol.to_owned());
offset += len;
}
Ok(protocols)
}
fn parse_u16_vector_extension(payload: &[u8]) -> Result<Vec<u16>, String> {
if payload.len() < 2 {
return Err("truncated vector extension".to_owned());
}
let list_len = usize::from(u16::from_be_bytes([payload[0], payload[1]]));
if payload.len() != 2 + list_len || list_len % 2 != 0 {
return Err("invalid vector extension length".to_owned());
}
Ok(payload[2..]
.chunks_exact(2)
.map(|chunk| u16::from_be_bytes([chunk[0], chunk[1]]))
.collect())
}
fn parse_supported_versions_extension(payload: &[u8]) -> Result<Vec<u16>, String> {
if payload.is_empty() {
return Err("truncated supported_versions extension".to_owned());
}
let list_len = usize::from(payload[0]);
if payload.len() != 1 + list_len || list_len % 2 != 0 {
return Err("invalid supported_versions extension length".to_owned());
}
Ok(payload[1..]
.chunks_exact(2)
.map(|chunk| u16::from_be_bytes([chunk[0], chunk[1]]))
.collect())
}
fn is_grease_value(value: u16) -> bool {
let byte = (value & 0xff) as u8;
((value >> 8) as u8) == byte && (byte & 0x0f) == 0x0a
}
fn normalize_u16_vector(values: Vec<u16>) -> Vec<u16> {
values
.into_iter()
.filter(|value| !is_grease_value(*value))
.collect()
}
fn normalize_cipher_suite_names(cipher_suites: &[u16]) -> Vec<String> {
cipher_suites
.iter()
.copied()
.filter(|suite| !is_grease_value(*suite) && *suite != 0x00ff)
.map(|suite| {
SslCipher::from_value(suite)
.map(|cipher| cipher.name().to_owned())
.unwrap_or_else(|| format!("0x{suite:04x}"))
})
.collect()
}
fn normalize_grease_value(value: u16) -> u16 {
if is_grease_value(value) {
0x0a0a
} else {
value
}
}
fn normalize_u16_list_payload(payload: &[u8]) -> Result<Vec<u8>, String> {
if payload.len() < 2 {
return Err("truncated vector extension".to_owned());
}
let list_len = usize::from(u16::from_be_bytes([payload[0], payload[1]]));
if payload.len() != 2 + list_len || list_len % 2 != 0 {
return Err("invalid vector extension length".to_owned());
}
let mut normalized = Vec::with_capacity(payload.len());
normalized.extend_from_slice(&payload[..2]);
for chunk in payload[2..].chunks_exact(2) {
let value = normalize_grease_value(u16::from_be_bytes([chunk[0], chunk[1]]));
normalized.extend_from_slice(&value.to_be_bytes());
}
Ok(normalized)
}
fn normalize_supported_versions_payload(payload: &[u8]) -> Result<Vec<u8>, String> {
if payload.is_empty() {
return Err("truncated supported_versions extension".to_owned());
}
let list_len = usize::from(payload[0]);
if payload.len() != 1 + list_len || list_len % 2 != 0 {
return Err("invalid supported_versions extension length".to_owned());
}
let mut normalized = Vec::with_capacity(payload.len());
normalized.push(payload[0]);
for chunk in payload[1..].chunks_exact(2) {
let value = normalize_grease_value(u16::from_be_bytes([chunk[0], chunk[1]]));
normalized.extend_from_slice(&value.to_be_bytes());
}
Ok(normalized)
}
fn normalize_key_share_payload(payload: &[u8]) -> Result<Vec<u8>, String> {
if payload.len() < 2 {
return Err("truncated key_share extension".to_owned());
}
let shares_len = usize::from(u16::from_be_bytes([payload[0], payload[1]]));
if payload.len() != 2 + shares_len {
return Err("invalid key_share extension length".to_owned());
}
let mut normalized = Vec::with_capacity(payload.len());
normalized.extend_from_slice(&payload[..2]);
let mut offset = 2usize;
while offset < payload.len() {
if offset + 4 > payload.len() {
return Err("truncated key_share entry".to_owned());
}
let group =
normalize_grease_value(u16::from_be_bytes([payload[offset], payload[offset + 1]]));
normalized.extend_from_slice(&group.to_be_bytes());
let key_len = usize::from(u16::from_be_bytes([
payload[offset + 2],
payload[offset + 3],
]));
normalized.extend_from_slice(&(key_len as u16).to_be_bytes());
offset += 4;
if offset + key_len > payload.len() {
return Err("truncated key_share bytes".to_owned());
}
normalized.extend(std::iter::repeat_n(0, key_len));
offset += key_len;
}
Ok(normalized)
}
fn normalize_extension_payload(kind: u16, payload: &[u8]) -> Result<Vec<u8>, String> {
if is_grease_value(kind) {
return Ok(Vec::new());
}
match kind {
10 | 13 => normalize_u16_list_payload(payload),
65037 => Ok(Vec::new()),
43 => normalize_supported_versions_payload(payload),
51 => normalize_key_share_payload(payload),
_ => Ok(payload.to_vec()),
}
}
fn normalize_client_hello_snapshot(
handshake: &[u8],
canonicalize_extension_order: bool,
) -> Result<Vec<u8>, String> {
fn take<'a>(buf: &'a [u8], offset: &mut usize, len: usize) -> Result<&'a [u8], String> {
let end = offset.saturating_add(len);
if end > buf.len() {
return Err("truncated ClientHello field".to_owned());
}
let out = &buf[*offset..end];
*offset = end;
Ok(out)
}
if handshake.len() < 4 || handshake[0] != 1 {
return Err("expected ClientHello handshake".to_owned());
}
let hello_len = usize::from(handshake[1]) << 16
| usize::from(handshake[2]) << 8
| usize::from(handshake[3]);
if handshake.len() < 4 + hello_len {
return Err("truncated ClientHello".to_owned());
}
let body = &handshake[4..4 + hello_len];
let mut offset = 0usize;
let mut normalized_body = Vec::with_capacity(body.len());
normalized_body.extend_from_slice(take(body, &mut offset, 2)?);
let _random = take(body, &mut offset, 32)?;
normalized_body.extend(std::iter::repeat_n(0, 32));
let session_id_len = usize::from(take(body, &mut offset, 1)?[0]);
normalized_body.push(session_id_len as u8);
let _session_id = take(body, &mut offset, session_id_len)?;
normalized_body.extend(std::iter::repeat_n(0, session_id_len));
let cipher_len_bytes = take(body, &mut offset, 2)?;
let cipher_len = usize::from(u16::from_be_bytes([
cipher_len_bytes[0],
cipher_len_bytes[1],
]));
let cipher_bytes = take(body, &mut offset, cipher_len)?;
normalized_body.extend_from_slice(cipher_len_bytes);
for chunk in cipher_bytes.chunks_exact(2) {
let suite = normalize_grease_value(u16::from_be_bytes([chunk[0], chunk[1]]));
normalized_body.extend_from_slice(&suite.to_be_bytes());
}
let compression_len = usize::from(take(body, &mut offset, 1)?[0]);
normalized_body.push(compression_len as u8);
normalized_body.extend_from_slice(take(body, &mut offset, compression_len)?);
let extension_len_bytes = take(body, &mut offset, 2)?;
let extension_len = usize::from(u16::from_be_bytes([
extension_len_bytes[0],
extension_len_bytes[1],
]));
let extension_bytes = take(body, &mut offset, extension_len)?;
let mut extensions = Vec::new();
let mut ext_offset = 0usize;
while ext_offset < extension_bytes.len() {
let kind_bytes = take(extension_bytes, &mut ext_offset, 2)?;
let kind = u16::from_be_bytes([kind_bytes[0], kind_bytes[1]]);
let payload_len_bytes = take(extension_bytes, &mut ext_offset, 2)?;
let payload_len = usize::from(u16::from_be_bytes([
payload_len_bytes[0],
payload_len_bytes[1],
]));
let payload = take(extension_bytes, &mut ext_offset, payload_len)?;
if kind == 21 {
continue;
}
let normalized_kind = normalize_grease_value(kind);
let normalized_payload = normalize_extension_payload(kind, payload)?;
extensions.push((normalized_kind, normalized_payload));
}
if canonicalize_extension_order {
extensions.sort_by_key(|(kind, _)| *kind);
}
let mut normalized_extensions = Vec::new();
for (kind, payload) in extensions {
normalized_extensions.extend_from_slice(&kind.to_be_bytes());
normalized_extensions.extend_from_slice(&(payload.len() as u16).to_be_bytes());
normalized_extensions.extend_from_slice(&payload);
}
normalized_body.extend_from_slice(&(normalized_extensions.len() as u16).to_be_bytes());
normalized_body.extend_from_slice(&normalized_extensions);
let mut normalized = Vec::with_capacity(4 + normalized_body.len());
normalized.push(1);
normalized.push(((normalized_body.len() >> 16) & 0xff) as u8);
normalized.push(((normalized_body.len() >> 8) & 0xff) as u8);
normalized.push((normalized_body.len() & 0xff) as u8);
normalized.extend_from_slice(&normalized_body);
Ok(normalized)
}
fn hex_encode(bytes: &[u8]) -> String {
bytes
.iter()
.map(|byte| format!("{byte:02x}"))
.collect::<Vec<_>>()
.join("")
}
fn hex_decode(input: &str) -> Vec<u8> {
let filtered = input
.bytes()
.filter(|byte| !byte.is_ascii_whitespace())
.collect::<Vec<_>>();
assert_eq!(
filtered.len() % 2,
0,
"fixture hex must contain an even number of digits"
);
filtered
.chunks_exact(2)
.map(|pair| {
let pair = std::str::from_utf8(pair).unwrap();
u8::from_str_radix(pair, 16).unwrap()
})
.collect()
}
fn capture_raw_client_hello<F>(build_client: F) -> Vec<u8>
where
F: FnOnce(String) -> Client,
{
let (url, receiver) = spawn_client_hello_capture_server();
let client = build_client(url.clone());
let request_error =
futures_lite::future::block_on(async move { client.get(&url).await }).err();
let hello = receiver
.recv_timeout(Duration::from_secs(2))
.unwrap_or_else(|_| {
panic!("client hello capture server to respond, request error: {request_error:?}")
})
.unwrap_or_else(|err| {
panic!("client hello bytes to be captured: {err}, request error: {request_error:?}")
});
hello
}
fn capture_client_hello<F>(build_client: F) -> ParsedClientHello
where
F: FnOnce(String) -> Client,
{
let hello = capture_raw_client_hello(build_client);
parse_client_hello(&hello).expect("captured bytes to parse as ClientHello")
}
#[cfg(feature = "emulation")]
fn assert_emulation_fixture(fixture: EmulationHelloFixture) {
let raw_hello = capture_raw_client_hello(|_| {
Client::builder()
.emulation(fixture.emulation)
.build()
.unwrap()
});
let hello = parse_client_hello(&raw_hello).expect("captured bytes to parse as ClientHello");
let alpn = parse_alpn_extension(hello.extension(16).expect("ALPN extension")).unwrap();
let supported_groups = normalize_u16_vector(
parse_u16_vector_extension(hello.extension(10).expect("supported_groups extension"))
.unwrap(),
);
let supported_versions = normalize_u16_vector(
parse_supported_versions_extension(
hello.extension(43).expect("supported_versions extension"),
)
.unwrap(),
);
let signature_algorithms =
parse_u16_vector_extension(hello.extension(13).expect("sigalgs extension")).unwrap();
let cipher_names = normalize_cipher_suite_names(&hello.cipher_suites);
assert_eq!(hello.legacy_version, 0x0303);
assert_eq!(alpn, vec!["h2".to_owned(), "http/1.1".to_owned()]);
assert_eq!(supported_versions, vec![0x0304, 0x0303]);
assert_eq!(supported_groups, vec![29, 23, 24]);
assert_eq!(signature_algorithms, fixture.signature_algorithms);
assert_eq!(
cipher_names,
fixture
.cipher_names
.iter()
.map(|name| (*name).to_owned())
.collect::<Vec<_>>()
);
assert!(
hello.extension(5).is_some(),
"status_request extension missing"
);
assert!(
hello.extension(18).is_some(),
"certificate timestamp extension missing"
);
assert!(
hello.extension(27).is_some(),
"certificate compression extension missing"
);
assert_eq!(
hello.extension(65037).is_some(),
fixture.expect_ech_extension
);
let normalized_snapshot =
normalize_client_hello_snapshot(&raw_hello, fixture.canonicalize_extension_order)
.expect("normalized ClientHello snapshot");
let expected_snapshot = hex_decode(fixture.normalized_snapshot_hex);
if normalized_snapshot != expected_snapshot {
let first_diff = normalized_snapshot
.iter()
.zip(expected_snapshot.iter())
.position(|(actual, expected)| actual != expected)
.unwrap_or_else(|| normalized_snapshot.len().min(expected_snapshot.len()));
panic!(
"{} normalized ClientHello snapshot drifted at byte {first_diff} (expected_len={}, actual_len={})\nexpected={}\nactual={}",
fixture.name,
expected_snapshot.len(),
normalized_snapshot.len(),
hex_encode(&expected_snapshot),
hex_encode(&normalized_snapshot)
);
}
}
#[test]
fn encode_boring_alpn_protocols_matches_wire_format() {
let protocols = vec!["h2".to_owned(), "http/1.1".to_owned()];
assert_eq!(
encode_boring_alpn_protocols(&protocols).unwrap(),
b"\x02h2\x08http/1.1".to_vec()
);
}
#[test]
fn encode_boring_alpn_protocols_rejects_protocol_ids_longer_than_u8() {
let protocols = vec!["a".repeat(256)];
let err = encode_boring_alpn_protocols(&protocols).unwrap_err();
assert!(err.to_string().contains("ALPN protocol id is too long"));
}
#[test]
fn build_boring_tls_plan_matches_expected_golden_values() {
let fingerprint = BoringTlsFingerprint {
cipher_list: Some("TLS_AES_128_GCM_SHA256:ECDHE-RSA-AES128-GCM-SHA256".to_owned()),
curves_list: Some("X25519:P-256".to_owned()),
sigalgs_list: Some("ecdsa_secp256r1_sha256:rsa_pss_rsae_sha256".to_owned()),
grease_enabled: Some(true),
permute_extensions: Some(false),
enable_ocsp_stapling: true,
enable_signed_cert_timestamps: true,
enable_ech_grease: true,
certificate_compression: vec![BoringCertCompression::Brotli],
};
let tls = TlsConfig {
accept_invalid_certs: true,
alpn_protocols: None,
backend: None,
cipher_suites: Some(vec![
"TLS_AES_128_GCM_SHA256".to_owned(),
"ECDHE-RSA-AES128-GCM-SHA256".to_owned(),
]),
min_tls_version: Some("1.2".to_owned()),
max_tls_version: Some("1.3".to_owned()),
boring_tls_fingerprint: Some(fingerprint.clone()),
root_store: None,
pinned_certificates: Vec::new(),
};
let protocols = vec!["h2".to_owned(), "http/1.1".to_owned()];
let plan = build_boring_tls_plan(&tls, &protocols).unwrap();
assert_eq!(
plan,
BoringTlsBuildPlan {
accept_invalid_certs: true,
alpn_wire: b"\x02h2\x08http/1.1".to_vec(),
min_proto_version: Some(btls::ssl::SslVersion::TLS1_2),
max_proto_version: Some(btls::ssl::SslVersion::TLS1_3),
fingerprint: Some(fingerprint),
explicit_cipher_list: Some(
"TLS_AES_128_GCM_SHA256:ECDHE-RSA-AES128-GCM-SHA256".to_owned()
),
}
);
assert!(boring_tls_ech_grease_enabled(&tls));
}
#[test]
fn apply_boring_tls_plan_sets_builder_versions_and_verify_mode() {
use btls::ssl::{SslConnector, SslMethod, SslVerifyMode, SslVersion};
let tls = TlsConfig {
accept_invalid_certs: true,
alpn_protocols: None,
backend: None,
cipher_suites: None,
min_tls_version: Some("1.2".to_owned()),
max_tls_version: Some("1.3".to_owned()),
boring_tls_fingerprint: None,
root_store: None,
pinned_certificates: Vec::new(),
};
let protocols = vec!["h2".to_owned(), "http/1.1".to_owned()];
let plan = build_boring_tls_plan(&tls, &protocols).unwrap();
let mut builder = SslConnector::builder(SslMethod::tls()).unwrap();
apply_boring_tls_plan(&mut builder, &plan).unwrap();
assert_eq!(builder.min_proto_version(), Some(SslVersion::TLS1_2));
assert_eq!(builder.max_proto_version(), Some(SslVersion::TLS1_3));
assert_eq!(builder.build().context().verify_mode(), SslVerifyMode::NONE);
}
#[test]
fn parse_boring_tls_version_supports_emulation_versions() {
assert_eq!(
parse_boring_tls_version("1.2"),
Some(btls::ssl::SslVersion::TLS1_2)
);
assert_eq!(
parse_boring_tls_version("1.3"),
Some(btls::ssl::SslVersion::TLS1_3)
);
assert_eq!(parse_boring_tls_version("1.1"), None);
}
#[test]
fn build_boring_tls_connector_preserves_invalid_cert_setting() {
use btls::ssl::SslVerifyMode;
let tls = TlsConfig {
accept_invalid_certs: true,
..Default::default()
};
let connector = build_boring_tls_connector_for_protocols(&tls, &[]).unwrap();
assert_eq!(connector.context().verify_mode(), SslVerifyMode::NONE);
}
#[test]
fn live_client_hello_reflects_http1_safe_alpn_and_supported_groups() {
let fingerprint = BoringTlsFingerprint {
curves_list: Some("P-256".to_owned()),
grease_enabled: Some(false),
permute_extensions: Some(false),
..Default::default()
};
let tls = TlsConfig {
accept_invalid_certs: true,
backend: Some(TlsBackend::Boring),
alpn_protocols: Some(vec!["http/1.1".to_owned()]),
min_tls_version: Some("1.2".to_owned()),
max_tls_version: Some("1.3".to_owned()),
boring_tls_fingerprint: Some(fingerprint),
..Default::default()
};
let hello = capture_client_hello(|_| Client::builder().tls_config(tls).build().unwrap());
assert_eq!(hello.legacy_version, 0x0303);
assert!(!hello.cipher_suites.is_empty());
assert_eq!(
parse_alpn_extension(hello.extension(16).expect("ALPN extension")).unwrap(),
vec!["http/1.1".to_owned()]
);
assert_eq!(
parse_u16_vector_extension(hello.extension(10).expect("supported_groups extension"))
.unwrap(),
vec![23]
);
assert_eq!(
parse_supported_versions_extension(
hello.extension(43).expect("supported_versions extension")
)
.unwrap(),
vec![0x0304, 0x0303]
);
}
#[cfg(feature = "emulation")]
#[test]
fn live_client_hello_signature_algorithms_follow_emulation_preset() {
let chrome = capture_client_hello(|_| {
Client::builder()
.emulation(Emulation::Chrome136)
.build()
.unwrap()
});
let firefox = capture_client_hello(|_| {
Client::builder()
.emulation(Emulation::Firefox128)
.build()
.unwrap()
});
let chrome_sigalgs =
parse_u16_vector_extension(chrome.extension(13).expect("chrome sigalgs extension"))
.unwrap();
let firefox_sigalgs =
parse_u16_vector_extension(firefox.extension(13).expect("firefox sigalgs extension"))
.unwrap();
assert_eq!(
chrome_sigalgs,
vec![
0x0403, 0x0804, 0x0401, 0x0503, 0x0805, 0x0501, 0x0806, 0x0601
]
);
assert_eq!(
firefox_sigalgs,
vec![0x0403, 0x0804, 0x0401, 0x0503, 0x0805, 0x0501]
);
assert_ne!(chrome_sigalgs, firefox_sigalgs);
}
#[cfg(feature = "emulation")]
#[test]
fn live_client_hello_fixtures_cover_all_emulation_presets() {
let fixtures = [
EmulationHelloFixture {
name: "chrome136",
emulation: Emulation::Chrome136,
cipher_names: &[
"TLS_AES_128_GCM_SHA256",
"TLS_AES_256_GCM_SHA384",
"TLS_CHACHA20_POLY1305_SHA256",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-CHACHA20-POLY1305",
],
signature_algorithms: &[
0x0403, 0x0804, 0x0401, 0x0503, 0x0805, 0x0501, 0x0806, 0x0601,
],
expect_ech_extension: true,
canonicalize_extension_order: true,
normalized_snapshot_hex: "010001060303000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000140a0a130113021303c02bc02fc02cc030cca9cca8010000a9000500050100000000000a000a00080a0a001d00170018000b00020100000d00120010040308040401050308050501080606010010000e000c02683208687474702f312e310012000000170000001b000302000200230000002b0007060a0a03040303002d000201010033002b00290a0a000100001d002000000000000000000000000000000000000000000000000000000000000000000a0a00000a0a0000fe0d0000ff01000100",
},
EmulationHelloFixture {
name: "firefox128",
emulation: Emulation::Firefox128,
cipher_names: &[
"TLS_AES_128_GCM_SHA256",
"TLS_CHACHA20_POLY1305_SHA256",
"TLS_AES_256_GCM_SHA384",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-CHACHA20-POLY1305",
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
],
signature_algorithms: &[0x0403, 0x0804, 0x0401, 0x0503, 0x0805, 0x0501],
expect_ech_extension: true,
canonicalize_extension_order: true,
normalized_snapshot_hex: "010001020303000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000140a0a130113031302c02bc02fcca9cca8c02cc030010000a5000500050100000000000a000a00080a0a001d00170018000b00020100000d000e000c0403080404010503080505010010000e000c02683208687474702f312e310012000000170000001b000302000200230000002b0007060a0a03040303002d000201010033002b00290a0a000100001d002000000000000000000000000000000000000000000000000000000000000000000a0a00000a0a0000fe0d0000ff01000100",
},
EmulationHelloFixture {
name: "safari18_4",
emulation: Emulation::Safari18_4,
cipher_names: &[
"TLS_AES_128_GCM_SHA256",
"TLS_AES_256_GCM_SHA384",
"TLS_CHACHA20_POLY1305_SHA256",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-RSA-CHACHA20-POLY1305",
],
signature_algorithms: &[0x0403, 0x0804, 0x0401, 0x0503, 0x0805, 0x0501],
expect_ech_extension: false,
canonicalize_extension_order: false,
normalized_snapshot_hex: "010000fe0303000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000140a0a130113021303c02bc02ccca9c02fc030cca8010000a10a0a000000170000ff01000100000a000a00080a0a001d00170018000b00020100002300000010000e000c02683208687474702f312e31000500050100000000000d000e000c040308040401050308050501001200000033002b00290a0a000100001d00200000000000000000000000000000000000000000000000000000000000000000002d00020101002b0007060a0a03040303001b00030200020a0a0000",
},
];
for fixture in fixtures {
assert_emulation_fixture(fixture);
}
}
}