pub struct ConfigBuilder { /* private fields */ }Implementations§
Source§impl ConfigBuilder
impl ConfigBuilder
Sourcepub fn max_message_size(self, size: usize) -> Self
pub fn max_message_size(self, size: usize) -> Self
Set the maximum NETCONF message size. None means unlimited.
Sourcepub fn connect_timeout(self, timeout: Duration) -> Self
pub fn connect_timeout(self, timeout: Duration) -> Self
Set the timeout for the TCP/SSH connect phase.
When set, Session::connect_with_config
will fail with TransportError::Timeout
if the SSH connection is not established within this duration.
Examples found in repository?
7async fn main() -> netconf_rust::Result<()> {
8 let config = Config::builder()
9 .keepalive_interval(Duration::from_secs(10))
10 .keepalive_max(3)
11 .rpc_timeout(Duration::from_secs(30))
12 .connect_timeout(Duration::from_secs(10))
13 .nodelay(true)
14 .build();
15
16 let session = Arc::new(
17 Session::connect_with_config("localhost", 830, "netconf", "netconf", config).await?,
18 );
19
20 println!("Connected (session {})", session.session_id());
21
22 // Spawn a background task that fires when the session disconnects.
23 // This is the pattern you'd use in a long-running service to clean up
24 // sessions from a DashMap or similar structure.
25 let watcher_session = Arc::clone(&session);
26 let watcher = tokio::spawn(async move {
27 let reason = watcher_session.disconnected().await;
28 println!("Disconnect detected: {reason}");
29 // In a real service you'd remove the session from your map here:
30 // sessions.remove(&uuid);
31 });
32
33 // Normal operations — the watcher runs in the background
34 let config = session
35 .get_config(netconf_rust::Datastore::Running, None)
36 .await?;
37 println!("Got config ({} bytes)", config.len());
38
39 // You can also race an RPC against disconnect using select!
40 tokio::select! {
41 result = session.get_config(netconf_rust::Datastore::Running, None) => {
42 match result {
43 Ok(data) => println!("Got config again ({} bytes)", data.len()),
44 Err(e) => eprintln!("RPC failed: {e}"),
45 }
46 }
47 reason = session.disconnected() => {
48 eprintln!("Connection lost while waiting for RPC: {reason}");
49 }
50 }
51
52 // Graceful close — the watcher will fire with DisconnectReason::Eof
53 // after the server acknowledges the close and drops the connection.
54 session.close_session().await?;
55 println!("Session closed");
56
57 // Wait for the watcher to complete
58 let _ = watcher.await;
59
60 Ok(())
61}Sourcepub fn hello_timeout(self, timeout: Duration) -> Self
pub fn hello_timeout(self, timeout: Duration) -> Self
Set the timeout for the NETCONF hello exchange.
Some vendors (e.g. Nokia SR OS) can silently stall during the hello exchange — for example, if the client hello contains an invalid namespace, the server waits indefinitely for a valid hello without returning an error. Without a timeout, the connection hangs forever.
When set, the hello exchange will fail with
TransportError::Timeout
if the server does not complete the hello within this duration.
Sourcepub fn rpc_timeout(self, timeout: Duration) -> Self
pub fn rpc_timeout(self, timeout: Duration) -> Self
Set the timeout for individual RPC operations (send + wait for reply).
When set, RpcFuture::response() will fail
with TransportError::Timeout
if the server does not reply within this duration.
Examples found in repository?
7async fn main() -> netconf_rust::Result<()> {
8 let config = Config::builder()
9 .keepalive_interval(Duration::from_secs(10))
10 .keepalive_max(3)
11 .rpc_timeout(Duration::from_secs(30))
12 .connect_timeout(Duration::from_secs(10))
13 .nodelay(true)
14 .build();
15
16 let session = Arc::new(
17 Session::connect_with_config("localhost", 830, "netconf", "netconf", config).await?,
18 );
19
20 println!("Connected (session {})", session.session_id());
21
22 // Spawn a background task that fires when the session disconnects.
23 // This is the pattern you'd use in a long-running service to clean up
24 // sessions from a DashMap or similar structure.
25 let watcher_session = Arc::clone(&session);
26 let watcher = tokio::spawn(async move {
27 let reason = watcher_session.disconnected().await;
28 println!("Disconnect detected: {reason}");
29 // In a real service you'd remove the session from your map here:
30 // sessions.remove(&uuid);
31 });
32
33 // Normal operations — the watcher runs in the background
34 let config = session
35 .get_config(netconf_rust::Datastore::Running, None)
36 .await?;
37 println!("Got config ({} bytes)", config.len());
38
39 // You can also race an RPC against disconnect using select!
40 tokio::select! {
41 result = session.get_config(netconf_rust::Datastore::Running, None) => {
42 match result {
43 Ok(data) => println!("Got config again ({} bytes)", data.len()),
44 Err(e) => eprintln!("RPC failed: {e}"),
45 }
46 }
47 reason = session.disconnected() => {
48 eprintln!("Connection lost while waiting for RPC: {reason}");
49 }
50 }
51
52 // Graceful close — the watcher will fire with DisconnectReason::Eof
53 // after the server acknowledges the close and drops the connection.
54 session.close_session().await?;
55 println!("Session closed");
56
57 // Wait for the watcher to complete
58 let _ = watcher.await;
59
60 Ok(())
61}Sourcepub fn inactivity_timeout(self, timeout: Duration) -> Self
pub fn inactivity_timeout(self, timeout: Duration) -> Self
Set the SSH inactivity timeout (garbage-collect idle connections).
Sourcepub fn keepalive_interval(self, interval: Duration) -> Self
pub fn keepalive_interval(self, interval: Duration) -> Self
Set the SSH keepalive interval (detect dead peers).
Examples found in repository?
7async fn main() -> netconf_rust::Result<()> {
8 let config = Config::builder()
9 .keepalive_interval(Duration::from_secs(10))
10 .keepalive_max(3)
11 .rpc_timeout(Duration::from_secs(30))
12 .connect_timeout(Duration::from_secs(10))
13 .nodelay(true)
14 .build();
15
16 let session = Arc::new(
17 Session::connect_with_config("localhost", 830, "netconf", "netconf", config).await?,
18 );
19
20 println!("Connected (session {})", session.session_id());
21
22 // Spawn a background task that fires when the session disconnects.
23 // This is the pattern you'd use in a long-running service to clean up
24 // sessions from a DashMap or similar structure.
25 let watcher_session = Arc::clone(&session);
26 let watcher = tokio::spawn(async move {
27 let reason = watcher_session.disconnected().await;
28 println!("Disconnect detected: {reason}");
29 // In a real service you'd remove the session from your map here:
30 // sessions.remove(&uuid);
31 });
32
33 // Normal operations — the watcher runs in the background
34 let config = session
35 .get_config(netconf_rust::Datastore::Running, None)
36 .await?;
37 println!("Got config ({} bytes)", config.len());
38
39 // You can also race an RPC against disconnect using select!
40 tokio::select! {
41 result = session.get_config(netconf_rust::Datastore::Running, None) => {
42 match result {
43 Ok(data) => println!("Got config again ({} bytes)", data.len()),
44 Err(e) => eprintln!("RPC failed: {e}"),
45 }
46 }
47 reason = session.disconnected() => {
48 eprintln!("Connection lost while waiting for RPC: {reason}");
49 }
50 }
51
52 // Graceful close — the watcher will fire with DisconnectReason::Eof
53 // after the server acknowledges the close and drops the connection.
54 session.close_session().await?;
55 println!("Session closed");
56
57 // Wait for the watcher to complete
58 let _ = watcher.await;
59
60 Ok(())
61}Sourcepub fn keepalive_max(self, max: usize) -> Self
pub fn keepalive_max(self, max: usize) -> Self
Set the maximum number of missed keepalives before disconnect.
Examples found in repository?
7async fn main() -> netconf_rust::Result<()> {
8 let config = Config::builder()
9 .keepalive_interval(Duration::from_secs(10))
10 .keepalive_max(3)
11 .rpc_timeout(Duration::from_secs(30))
12 .connect_timeout(Duration::from_secs(10))
13 .nodelay(true)
14 .build();
15
16 let session = Arc::new(
17 Session::connect_with_config("localhost", 830, "netconf", "netconf", config).await?,
18 );
19
20 println!("Connected (session {})", session.session_id());
21
22 // Spawn a background task that fires when the session disconnects.
23 // This is the pattern you'd use in a long-running service to clean up
24 // sessions from a DashMap or similar structure.
25 let watcher_session = Arc::clone(&session);
26 let watcher = tokio::spawn(async move {
27 let reason = watcher_session.disconnected().await;
28 println!("Disconnect detected: {reason}");
29 // In a real service you'd remove the session from your map here:
30 // sessions.remove(&uuid);
31 });
32
33 // Normal operations — the watcher runs in the background
34 let config = session
35 .get_config(netconf_rust::Datastore::Running, None)
36 .await?;
37 println!("Got config ({} bytes)", config.len());
38
39 // You can also race an RPC against disconnect using select!
40 tokio::select! {
41 result = session.get_config(netconf_rust::Datastore::Running, None) => {
42 match result {
43 Ok(data) => println!("Got config again ({} bytes)", data.len()),
44 Err(e) => eprintln!("RPC failed: {e}"),
45 }
46 }
47 reason = session.disconnected() => {
48 eprintln!("Connection lost while waiting for RPC: {reason}");
49 }
50 }
51
52 // Graceful close — the watcher will fire with DisconnectReason::Eof
53 // after the server acknowledges the close and drops the connection.
54 session.close_session().await?;
55 println!("Session closed");
56
57 // Wait for the watcher to complete
58 let _ = watcher.await;
59
60 Ok(())
61}Sourcepub fn nodelay(self, nodelay: bool) -> Self
pub fn nodelay(self, nodelay: bool) -> Self
Enable or disable TCP_NODELAY on the SSH socket.
Examples found in repository?
7async fn main() -> netconf_rust::Result<()> {
8 let config = Config::builder()
9 .keepalive_interval(Duration::from_secs(10))
10 .keepalive_max(3)
11 .rpc_timeout(Duration::from_secs(30))
12 .connect_timeout(Duration::from_secs(10))
13 .nodelay(true)
14 .build();
15
16 let session = Arc::new(
17 Session::connect_with_config("localhost", 830, "netconf", "netconf", config).await?,
18 );
19
20 println!("Connected (session {})", session.session_id());
21
22 // Spawn a background task that fires when the session disconnects.
23 // This is the pattern you'd use in a long-running service to clean up
24 // sessions from a DashMap or similar structure.
25 let watcher_session = Arc::clone(&session);
26 let watcher = tokio::spawn(async move {
27 let reason = watcher_session.disconnected().await;
28 println!("Disconnect detected: {reason}");
29 // In a real service you'd remove the session from your map here:
30 // sessions.remove(&uuid);
31 });
32
33 // Normal operations — the watcher runs in the background
34 let config = session
35 .get_config(netconf_rust::Datastore::Running, None)
36 .await?;
37 println!("Got config ({} bytes)", config.len());
38
39 // You can also race an RPC against disconnect using select!
40 tokio::select! {
41 result = session.get_config(netconf_rust::Datastore::Running, None) => {
42 match result {
43 Ok(data) => println!("Got config again ({} bytes)", data.len()),
44 Err(e) => eprintln!("RPC failed: {e}"),
45 }
46 }
47 reason = session.disconnected() => {
48 eprintln!("Connection lost while waiting for RPC: {reason}");
49 }
50 }
51
52 // Graceful close — the watcher will fire with DisconnectReason::Eof
53 // after the server acknowledges the close and drops the connection.
54 session.close_session().await?;
55 println!("Session closed");
56
57 // Wait for the watcher to complete
58 let _ = watcher.await;
59
60 Ok(())
61}Sourcepub fn window_size(self, size: u32) -> Self
pub fn window_size(self, size: u32) -> Self
Set the SSH flow-control window size.
Controls how much data the remote side can send before waiting for a window adjustment. Larger values improve throughput for big NETCONF responses at the cost of higher memory usage.
Default: DEFAULT_WINDOW_SIZE (2 MB).
Sourcepub fn maximum_packet_size(self, size: u32) -> Self
pub fn maximum_packet_size(self, size: u32) -> Self
Set the maximum SSH packet size.
Each SSH data packet is at most this many bytes. Larger values reduce per-packet overhead but increase the minimum buffer allocation.
Default: DEFAULT_MAXIMUM_PACKET_SIZE (32 KB).
Sourcepub fn stream_buffer_capacity(self, capacity: usize) -> Self
pub fn stream_buffer_capacity(self, capacity: usize) -> Self
Set the capacity of the internal channel used by
Session::rpc_stream().
This controls how many chunks the background reader can buffer before applying backpressure. Higher values smooth out bursty reads at the cost of memory.
Default: DEFAULT_STREAM_BUFFER_CAPACITY (32).
§Panics
Panics if capacity is 0.
Sourcepub fn lenient_chunked_framing(self, mode: LenientChunkedFraming) -> Self
pub fn lenient_chunked_framing(self, mode: LenientChunkedFraming) -> Self
Enable lenient chunked framing recovery.
When set to a value other than LenientChunkedFraming::Off, the
decoder tolerates incorrect chunk sizes from routers that mis-report
the byte count in RFC 6242 chunk headers.
Sourcepub fn host_key_verification(self, mode: HostKeyVerification) -> Self
pub fn host_key_verification(self, mode: HostKeyVerification) -> Self
Set the host key verification mode.
Sourcepub fn known_hosts_path(self, path: impl Into<PathBuf>) -> Self
pub fn known_hosts_path(self, path: impl Into<PathBuf>) -> Self
Set a custom path to the known_hosts file.
Sourcepub fn danger_disable_host_key_verification(self) -> Self
pub fn danger_disable_host_key_verification(self) -> Self
Disable host key verification entirely (insecure — use only for testing).
Sourcepub fn jumphost(
self,
host: impl Into<String>,
port: u16,
username: impl Into<String>,
password: impl Into<String>,
) -> Self
pub fn jumphost( self, host: impl Into<String>, port: u16, username: impl Into<String>, password: impl Into<String>, ) -> Self
Route the connection through an SSH jumphost (bastion host).
The library will first SSH into the jumphost, open a direct-tcpip
channel to the target device, then establish the NETCONF SSH session
over that tunnel. The jumphost inherits all SSH settings
(host key verification, known_hosts, keepalive, etc.) from this config.
Sourcepub fn build(self) -> Config
pub fn build(self) -> Config
Examples found in repository?
7async fn main() -> netconf_rust::Result<()> {
8 let config = Config::builder()
9 .keepalive_interval(Duration::from_secs(10))
10 .keepalive_max(3)
11 .rpc_timeout(Duration::from_secs(30))
12 .connect_timeout(Duration::from_secs(10))
13 .nodelay(true)
14 .build();
15
16 let session = Arc::new(
17 Session::connect_with_config("localhost", 830, "netconf", "netconf", config).await?,
18 );
19
20 println!("Connected (session {})", session.session_id());
21
22 // Spawn a background task that fires when the session disconnects.
23 // This is the pattern you'd use in a long-running service to clean up
24 // sessions from a DashMap or similar structure.
25 let watcher_session = Arc::clone(&session);
26 let watcher = tokio::spawn(async move {
27 let reason = watcher_session.disconnected().await;
28 println!("Disconnect detected: {reason}");
29 // In a real service you'd remove the session from your map here:
30 // sessions.remove(&uuid);
31 });
32
33 // Normal operations — the watcher runs in the background
34 let config = session
35 .get_config(netconf_rust::Datastore::Running, None)
36 .await?;
37 println!("Got config ({} bytes)", config.len());
38
39 // You can also race an RPC against disconnect using select!
40 tokio::select! {
41 result = session.get_config(netconf_rust::Datastore::Running, None) => {
42 match result {
43 Ok(data) => println!("Got config again ({} bytes)", data.len()),
44 Err(e) => eprintln!("RPC failed: {e}"),
45 }
46 }
47 reason = session.disconnected() => {
48 eprintln!("Connection lost while waiting for RPC: {reason}");
49 }
50 }
51
52 // Graceful close — the watcher will fire with DisconnectReason::Eof
53 // after the server acknowledges the close and drops the connection.
54 session.close_session().await?;
55 println!("Session closed");
56
57 // Wait for the watcher to complete
58 let _ = watcher.await;
59
60 Ok(())
61}Trait Implementations§
Source§impl Clone for ConfigBuilder
impl Clone for ConfigBuilder
Source§fn clone(&self) -> ConfigBuilder
fn clone(&self) -> ConfigBuilder
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source. Read more