tun_rs/platform/linux/device.rs
1use crate::platform::linux::offload::{
2 gso_none_checksum, gso_split, handle_gro, VirtioNetHdr, VIRTIO_NET_HDR_F_NEEDS_CSUM,
3 VIRTIO_NET_HDR_GSO_NONE, VIRTIO_NET_HDR_GSO_TCPV4, VIRTIO_NET_HDR_GSO_TCPV6,
4 VIRTIO_NET_HDR_GSO_UDP_L4, VIRTIO_NET_HDR_LEN,
5};
6use crate::platform::unix::device::{ctl, ctl_v6};
7use crate::platform::{ExpandBuffer, GROTable};
8use crate::{
9 builder::{DeviceConfig, Layer},
10 platform::linux::sys::*,
11 platform::{
12 unix::{ipaddr_to_sockaddr, sockaddr_union, Fd, Tun},
13 ETHER_ADDR_LEN,
14 },
15 ToIpv4Address, ToIpv4Netmask, ToIpv6Address, ToIpv6Netmask,
16};
17use ipnet::IpNet;
18use libc::{
19 self, c_char, c_short, ifreq, in6_ifreq, ARPHRD_ETHER, IFF_MULTI_QUEUE, IFF_NO_PI, IFF_RUNNING,
20 IFF_TAP, IFF_TUN, IFF_UP, IFNAMSIZ, O_RDWR,
21};
22use std::net::Ipv6Addr;
23use std::sync::{Arc, Mutex};
24use std::{
25 ffi::CString,
26 io, mem,
27 net::{IpAddr, Ipv4Addr},
28 os::unix::io::{AsRawFd, RawFd},
29 ptr,
30};
31
32const OVERWRITE_SIZE: usize = mem::size_of::<libc::__c_anonymous_ifr_ifru>();
33
34/// A TUN device using the TUN/TAP Linux driver.
35pub struct DeviceImpl {
36 pub(crate) tun: Tun,
37 pub(crate) vnet_hdr: bool,
38 pub(crate) udp_gso: bool,
39 flags: c_short,
40 pub(crate) op_lock: Arc<Mutex<()>>,
41}
42
43impl DeviceImpl {
44 /// Create a new `Device` for the given `Configuration`.
45 pub(crate) fn new(config: DeviceConfig) -> std::io::Result<Self> {
46 let dev_name = match config.dev_name.as_ref() {
47 Some(tun_name) => {
48 let tun_name = CString::new(tun_name.clone())?;
49
50 if tun_name.as_bytes_with_nul().len() > IFNAMSIZ {
51 return Err(std::io::Error::new(
52 std::io::ErrorKind::InvalidInput,
53 "device name too long",
54 ));
55 }
56
57 Some(tun_name)
58 }
59
60 None => None,
61 };
62 unsafe {
63 let mut req: ifreq = mem::zeroed();
64
65 if let Some(dev_name) = dev_name.as_ref() {
66 ptr::copy_nonoverlapping(
67 dev_name.as_ptr() as *const c_char,
68 req.ifr_name.as_mut_ptr(),
69 dev_name.as_bytes_with_nul().len(),
70 );
71 }
72 let multi_queue = config.multi_queue.unwrap_or(false);
73 let device_type: c_short = config.layer.unwrap_or(Layer::L3).into();
74 let iff_no_pi = IFF_NO_PI as c_short;
75 let iff_vnet_hdr = libc::IFF_VNET_HDR as c_short;
76 let iff_multi_queue = IFF_MULTI_QUEUE as c_short;
77 let packet_information = config.packet_information.unwrap_or(false);
78 let offload = config.offload.unwrap_or(false);
79 req.ifr_ifru.ifru_flags = device_type
80 | if packet_information { 0 } else { iff_no_pi }
81 | if multi_queue { iff_multi_queue } else { 0 }
82 | if offload { iff_vnet_hdr } else { 0 };
83
84 let fd = libc::open(
85 c"/dev/net/tun".as_ptr() as *const _,
86 O_RDWR | libc::O_CLOEXEC,
87 0,
88 );
89 let tun_fd = Fd::new(fd)?;
90 if let Err(err) = tunsetiff(tun_fd.inner, &mut req as *mut _ as *mut _) {
91 return Err(io::Error::from(err));
92 }
93 let (vnet_hdr, udp_gso) = if offload && libc::IFF_VNET_HDR != 0 {
94 // tunTCPOffloads were added in Linux v2.6. We require their support if IFF_VNET_HDR is set.
95 let tun_tcp_offloads = libc::TUN_F_CSUM | libc::TUN_F_TSO4 | libc::TUN_F_TSO6;
96 let tun_udp_offloads = libc::TUN_F_USO4 | libc::TUN_F_USO6;
97 if let Err(err) = tunsetoffload(tun_fd.inner, tun_tcp_offloads as _) {
98 log::warn!("unsupported offload: {err:?}");
99 (false, false)
100 } else {
101 // tunUDPOffloads were added in Linux v6.2. We do not return an
102 // error if they are unsupported at runtime.
103 let rs =
104 tunsetoffload(tun_fd.inner, (tun_tcp_offloads | tun_udp_offloads) as _);
105 (true, rs.is_ok())
106 }
107 } else {
108 (false, false)
109 };
110
111 let device = DeviceImpl {
112 tun: Tun::new(tun_fd),
113 vnet_hdr,
114 udp_gso,
115 flags: req.ifr_ifru.ifru_flags,
116 op_lock: Arc::new(Mutex::new(())),
117 };
118 Ok(device)
119 }
120 }
121 unsafe fn set_tcp_offloads(&self) -> io::Result<()> {
122 let tun_tcp_offloads = libc::TUN_F_CSUM | libc::TUN_F_TSO4 | libc::TUN_F_TSO6;
123 tunsetoffload(self.as_raw_fd(), tun_tcp_offloads as _)
124 .map(|_| ())
125 .map_err(|e| e.into())
126 }
127 unsafe fn set_tcp_udp_offloads(&self) -> io::Result<()> {
128 let tun_tcp_offloads = libc::TUN_F_CSUM | libc::TUN_F_TSO4 | libc::TUN_F_TSO6;
129 let tun_udp_offloads = libc::TUN_F_USO4 | libc::TUN_F_USO6;
130 tunsetoffload(self.as_raw_fd(), (tun_tcp_offloads | tun_udp_offloads) as _)
131 .map(|_| ())
132 .map_err(|e| e.into())
133 }
134 pub(crate) fn from_tun(tun: Tun) -> io::Result<Self> {
135 Ok(Self {
136 tun,
137 vnet_hdr: false,
138 udp_gso: false,
139 flags: 0,
140 op_lock: Arc::new(Mutex::new(())),
141 })
142 }
143
144 /// # Prerequisites
145 /// - The `IFF_MULTI_QUEUE` flag must be enabled.
146 /// - The system must support network interface multi-queue functionality.
147 ///
148 /// # Description
149 /// When multi-queue is enabled, create a new queue by duplicating an existing one.
150 pub(crate) fn try_clone(&self) -> io::Result<DeviceImpl> {
151 let flags = self.flags;
152 if flags & (IFF_MULTI_QUEUE as c_short) != IFF_MULTI_QUEUE as c_short {
153 return Err(io::Error::new(
154 io::ErrorKind::Unsupported,
155 "iff_multi_queue not enabled",
156 ));
157 }
158 unsafe {
159 let mut req = self.request()?;
160 req.ifr_ifru.ifru_flags = flags;
161 let fd = libc::open(
162 c"/dev/net/tun".as_ptr() as *const _,
163 O_RDWR | libc::O_CLOEXEC,
164 );
165 let tun_fd = Fd::new(fd)?;
166 if let Err(err) = tunsetiff(tun_fd.inner, &mut req as *mut _ as *mut _) {
167 return Err(io::Error::from(err));
168 }
169 let dev = DeviceImpl {
170 tun: Tun::new(tun_fd),
171 vnet_hdr: self.vnet_hdr,
172 udp_gso: self.udp_gso,
173 flags,
174 op_lock: self.op_lock.clone(),
175 };
176 if dev.vnet_hdr {
177 if dev.udp_gso {
178 dev.set_tcp_udp_offloads()?
179 } else {
180 dev.set_tcp_offloads()?;
181 }
182 }
183
184 Ok(dev)
185 }
186 }
187 /// Returns whether UDP Generic Segmentation Offload (GSO) is enabled.
188 ///
189 /// This is determined by the `udp_gso` flag in the device.
190 pub fn udp_gso(&self) -> bool {
191 let _guard = self.op_lock.lock().unwrap();
192 self.udp_gso
193 }
194 /// Returns whether TCP Generic Segmentation Offload (GSO) is enabled.
195 ///
196 /// In this implementation, this is represented by the `vnet_hdr` flag.
197 pub fn tcp_gso(&self) -> bool {
198 let _guard = self.op_lock.lock().unwrap();
199 self.vnet_hdr
200 }
201 /// Sets the transmit queue length for the network interface.
202 ///
203 /// This method constructs an interface request (`ifreq`) structure,
204 /// assigns the desired transmit queue length to the `ifru_metric` field,
205 /// and calls the `change_tx_queue_len` function using the control file descriptor.
206 /// If the underlying operation fails, an I/O error is returned.
207 pub fn set_tx_queue_len(&self, tx_queue_len: u32) -> io::Result<()> {
208 let _guard = self.op_lock.lock().unwrap();
209 unsafe {
210 let mut ifreq = self.request()?;
211 ifreq.ifr_ifru.ifru_metric = tx_queue_len as _;
212 if let Err(err) = change_tx_queue_len(ctl()?.as_raw_fd(), &ifreq) {
213 return Err(io::Error::from(err));
214 }
215 }
216 Ok(())
217 }
218 /// Retrieves the current transmit queue length for the network interface.
219 ///
220 /// This function constructs an interface request structure and calls `tx_queue_len`
221 /// to populate it with the current transmit queue length. The value is then returned.
222 pub fn tx_queue_len(&self) -> io::Result<u32> {
223 let _guard = self.op_lock.lock().unwrap();
224 unsafe {
225 let mut ifreq = self.request()?;
226 if let Err(err) = tx_queue_len(ctl()?.as_raw_fd(), &mut ifreq) {
227 return Err(io::Error::from(err));
228 }
229 Ok(ifreq.ifr_ifru.ifru_metric as _)
230 }
231 }
232 /// Make the device persistent.
233 ///
234 /// By default, TUN/TAP devices are destroyed when the process exits.
235 /// Calling this method makes the device persist after the program terminates,
236 /// allowing it to be reused by other processes.
237 ///
238 /// # Example
239 ///
240 /// ```no_run
241 /// # #[cfg(all(target_os = "linux", not(target_env = "ohos")))]
242 /// # {
243 /// use tun_rs::DeviceBuilder;
244 ///
245 /// let dev = DeviceBuilder::new()
246 /// .name("persistent-tun")
247 /// .ipv4("10.0.0.1", 24, None)
248 /// .build_sync()?;
249 ///
250 /// // Make the device persistent so it survives after program exit
251 /// dev.persist()?;
252 /// println!("Device will persist after program exits");
253 /// # }
254 /// # Ok::<(), std::io::Error>(())
255 /// ```
256 pub fn persist(&self) -> io::Result<()> {
257 let _guard = self.op_lock.lock().unwrap();
258 unsafe {
259 if let Err(err) = tunsetpersist(self.as_raw_fd(), &1) {
260 Err(io::Error::from(err))
261 } else {
262 Ok(())
263 }
264 }
265 }
266
267 /// Set the owner (UID) of the device.
268 ///
269 /// This allows non-root users to access the TUN/TAP device.
270 ///
271 /// # Example
272 ///
273 /// ```no_run
274 /// # #[cfg(all(target_os = "linux", not(target_env = "ohos")))]
275 /// # {
276 /// use tun_rs::DeviceBuilder;
277 ///
278 /// let dev = DeviceBuilder::new()
279 /// .ipv4("10.0.0.1", 24, None)
280 /// .build_sync()?;
281 ///
282 /// // Set ownership to UID 1000 (typical first user on Linux)
283 /// dev.user(1000)?;
284 /// println!("Device ownership set to UID 1000");
285 /// # }
286 /// # Ok::<(), std::io::Error>(())
287 /// ```
288 pub fn user(&self, value: i32) -> io::Result<()> {
289 let _guard = self.op_lock.lock().unwrap();
290 unsafe {
291 if let Err(err) = tunsetowner(self.as_raw_fd(), &value) {
292 Err(io::Error::from(err))
293 } else {
294 Ok(())
295 }
296 }
297 }
298
299 /// Set the group (GID) of the device.
300 ///
301 /// This allows members of a specific group to access the TUN/TAP device.
302 ///
303 /// # Example
304 ///
305 /// ```no_run
306 /// # #[cfg(all(target_os = "linux", not(target_env = "ohos")))]
307 /// # {
308 /// use tun_rs::DeviceBuilder;
309 ///
310 /// let dev = DeviceBuilder::new()
311 /// .ipv4("10.0.0.1", 24, None)
312 /// .build_sync()?;
313 ///
314 /// // Set group ownership to GID 1000
315 /// dev.group(1000)?;
316 /// println!("Device group ownership set to GID 1000");
317 /// # }
318 /// # Ok::<(), std::io::Error>(())
319 /// ```
320 pub fn group(&self, value: i32) -> io::Result<()> {
321 let _guard = self.op_lock.lock().unwrap();
322 unsafe {
323 if let Err(err) = tunsetgroup(self.as_raw_fd(), &value) {
324 Err(io::Error::from(err))
325 } else {
326 Ok(())
327 }
328 }
329 }
330 /// Sends multiple packets in a batch with GRO (Generic Receive Offload) coalescing.
331 ///
332 /// This method allows efficient transmission of multiple packets by batching them together
333 /// and applying GRO optimizations. When offload is enabled, packets may be coalesced
334 /// to reduce system call overhead and improve throughput.
335 ///
336 /// # Arguments
337 ///
338 /// * `gro_table` - A mutable reference to a [`GROTable`] that manages packet coalescing state.
339 /// This table can be reused across multiple calls to amortize allocation overhead.
340 /// * `bufs` - A mutable slice of buffers containing the packets to send. Each buffer must
341 /// implement the [`ExpandBuffer`] trait.
342 /// * `offset` - The byte offset within each buffer where the packet data begins.
343 /// Must be >= `VIRTIO_NET_HDR_LEN` to accommodate the virtio network header when offload is enabled.
344 ///
345 /// # Returns
346 ///
347 /// Returns the total number of bytes successfully sent, or an I/O error.
348 ///
349 /// # Example
350 ///
351 /// ```no_run
352 /// # #[cfg(all(target_os = "linux", not(target_env = "ohos")))]
353 /// # {
354 /// use tun_rs::{DeviceBuilder, GROTable, VIRTIO_NET_HDR_LEN};
355 ///
356 /// let dev = DeviceBuilder::new()
357 /// .ipv4("10.0.0.1", 24, None)
358 /// .with(|builder| {
359 /// builder.offload(true) // Enable offload for GRO
360 /// })
361 /// .build_sync()?;
362 ///
363 /// let mut gro_table = GROTable::default();
364 /// let offset = VIRTIO_NET_HDR_LEN;
365 ///
366 /// // Prepare packets to send
367 /// let mut packet1 = vec![0u8; offset + 100]; // VIRTIO_NET_HDR + packet data
368 /// let mut packet2 = vec![0u8; offset + 200];
369 /// // Fill in packet data at offset...
370 ///
371 /// let mut bufs = vec![packet1, packet2];
372 ///
373 /// // Send all packets in one batch
374 /// let bytes_sent = dev.send_multiple(&mut gro_table, &mut bufs, offset)?;
375 /// println!("Sent {} bytes across {} packets", bytes_sent, bufs.len());
376 /// # }
377 /// # Ok::<(), std::io::Error>(())
378 /// ```
379 ///
380 /// # Platform
381 ///
382 /// This method is only available on Linux.
383 ///
384 /// # Performance Notes
385 ///
386 /// - Use `IDEAL_BATCH_SIZE` for optimal batch size (typically 128 packets)
387 /// - Reuse the same `GROTable` instance across calls to avoid allocations
388 /// - Enable offload via `.offload(true)` in `DeviceBuilder` for best performance
389 pub fn send_multiple<B: ExpandBuffer>(
390 &self,
391 gro_table: &mut GROTable,
392 bufs: &mut [B],
393 offset: usize,
394 ) -> io::Result<usize> {
395 self.send_multiple0(gro_table, bufs, offset, |tun, buf| tun.send(buf))
396 }
397 pub(crate) fn send_multiple0<B: ExpandBuffer, W: FnMut(&Tun, &[u8]) -> io::Result<usize>>(
398 &self,
399 gro_table: &mut GROTable,
400 bufs: &mut [B],
401 mut offset: usize,
402 mut write_f: W,
403 ) -> io::Result<usize> {
404 gro_table.reset();
405 if self.vnet_hdr {
406 handle_gro(
407 bufs,
408 offset,
409 &mut gro_table.tcp_gro_table,
410 &mut gro_table.udp_gro_table,
411 self.udp_gso,
412 &mut gro_table.to_write,
413 )?;
414 offset -= VIRTIO_NET_HDR_LEN;
415 } else {
416 for i in 0..bufs.len() {
417 gro_table.to_write.push(i);
418 }
419 }
420
421 let mut total = 0;
422 let mut err = Ok(());
423 for buf_idx in &gro_table.to_write {
424 match write_f(&self.tun, &bufs[*buf_idx].as_ref()[offset..]) {
425 Ok(n) => {
426 total += n;
427 }
428 Err(e) => {
429 if let Some(code) = e.raw_os_error() {
430 if libc::EBADFD == code {
431 return Err(e);
432 }
433 }
434 err = Err(e)
435 }
436 }
437 }
438 err?;
439 Ok(total)
440 }
441 /// Receives multiple packets in a batch with GSO (Generic Segmentation Offload) splitting.
442 ///
443 /// When offload is enabled, this method can receive large GSO packets from the TUN device
444 /// and automatically split them into MTU-sized segments, significantly improving receive
445 /// performance for high-bandwidth traffic.
446 ///
447 /// # Arguments
448 ///
449 /// * `original_buffer` - A mutable buffer to store the raw received data, including the
450 /// virtio network header and the potentially large GSO packet. Recommended size is
451 /// `VIRTIO_NET_HDR_LEN + 65535` bytes.
452 /// * `bufs` - A mutable slice of buffers to store the segmented packets. Each buffer will
453 /// receive one MTU-sized packet after GSO splitting.
454 /// * `sizes` - A mutable slice to store the actual size of each packet in `bufs`.
455 /// Must have the same length as `bufs`.
456 /// * `offset` - The byte offset within each output buffer where packet data should be written.
457 /// This allows for pre-allocated header space.
458 ///
459 /// # Returns
460 ///
461 /// Returns the number of packets successfully received and split, or an I/O error.
462 ///
463 /// # Example
464 ///
465 /// ```no_run
466 /// # #[cfg(all(target_os = "linux", not(target_env = "ohos")))]
467 /// # {
468 /// use tun_rs::{DeviceBuilder, VIRTIO_NET_HDR_LEN, IDEAL_BATCH_SIZE};
469 ///
470 /// let dev = DeviceBuilder::new()
471 /// .ipv4("10.0.0.1", 24, None)
472 /// .with(|builder| {
473 /// builder.offload(true) // Enable offload for GSO
474 /// })
475 /// .build_sync()?;
476 ///
477 /// // Buffer for the raw received packet (with virtio header)
478 /// let mut original_buffer = vec![0u8; VIRTIO_NET_HDR_LEN + 65535];
479 ///
480 /// // Output buffers for segmented packets
481 /// let mut bufs = vec![vec![0u8; 1500]; IDEAL_BATCH_SIZE];
482 /// let mut sizes = vec![0usize; IDEAL_BATCH_SIZE];
483 /// let offset = 0;
484 ///
485 /// loop {
486 /// // Receive and segment packets
487 /// let num_packets = dev.recv_multiple(
488 /// &mut original_buffer,
489 /// &mut bufs,
490 /// &mut sizes,
491 /// offset
492 /// )?;
493 ///
494 /// // Process each segmented packet
495 /// for i in 0..num_packets {
496 /// let packet = &bufs[i][offset..offset + sizes[i]];
497 /// println!("Received packet {}: {} bytes", i, sizes[i]);
498 /// // Process packet...
499 /// }
500 /// }
501 /// # }
502 /// # Ok::<(), std::io::Error>(())
503 /// ```
504 ///
505 /// # Platform
506 ///
507 /// This method is only available on Linux.
508 ///
509 /// # Performance Notes
510 ///
511 /// - Use `IDEAL_BATCH_SIZE` (128) for the number of output buffers
512 /// - A single `recv_multiple` call may return multiple MTU-sized packets from one large GSO packet
513 /// - The performance benefit is most noticeable with TCP traffic using large send/receive windows
514 pub fn recv_multiple<B: AsRef<[u8]> + AsMut<[u8]>>(
515 &self,
516 original_buffer: &mut [u8],
517 bufs: &mut [B],
518 sizes: &mut [usize],
519 offset: usize,
520 ) -> io::Result<usize> {
521 self.recv_multiple0(original_buffer, bufs, sizes, offset, |tun, buf| {
522 tun.recv(buf)
523 })
524 }
525 pub(crate) fn recv_multiple0<
526 B: AsRef<[u8]> + AsMut<[u8]>,
527 R: Fn(&Tun, &mut [u8]) -> io::Result<usize>,
528 >(
529 &self,
530 original_buffer: &mut [u8],
531 bufs: &mut [B],
532 sizes: &mut [usize],
533 offset: usize,
534 read_f: R,
535 ) -> io::Result<usize> {
536 if bufs.is_empty() || bufs.len() != sizes.len() {
537 return Err(io::Error::other("bufs error"));
538 }
539 if self.vnet_hdr {
540 let len = read_f(&self.tun, original_buffer)?;
541 if len <= VIRTIO_NET_HDR_LEN {
542 Err(io::Error::other(format!(
543 "length of packet ({len}) <= VIRTIO_NET_HDR_LEN ({VIRTIO_NET_HDR_LEN})",
544 )))?
545 }
546 let hdr = VirtioNetHdr::decode(&original_buffer[..VIRTIO_NET_HDR_LEN])?;
547 self.handle_virtio_read(
548 hdr,
549 &mut original_buffer[VIRTIO_NET_HDR_LEN..len],
550 bufs,
551 sizes,
552 offset,
553 )
554 } else {
555 let len = read_f(&self.tun, &mut bufs[0].as_mut()[offset..])?;
556 sizes[0] = len;
557 Ok(1)
558 }
559 }
560 /// https://github.com/WireGuard/wireguard-go/blob/12269c2761734b15625017d8565745096325392f/tun/tun_linux.go#L375
561 /// handleVirtioRead splits in into bufs, leaving offset bytes at the front of
562 /// each buffer. It mutates sizes to reflect the size of each element of bufs,
563 /// and returns the number of packets read.
564 pub(crate) fn handle_virtio_read<B: AsRef<[u8]> + AsMut<[u8]>>(
565 &self,
566 mut hdr: VirtioNetHdr,
567 input: &mut [u8],
568 bufs: &mut [B],
569 sizes: &mut [usize],
570 offset: usize,
571 ) -> io::Result<usize> {
572 let len = input.len();
573 if hdr.gso_type == VIRTIO_NET_HDR_GSO_NONE {
574 if hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM != 0 {
575 // This means CHECKSUM_PARTIAL in skb context. We are responsible
576 // for computing the checksum starting at hdr.csumStart and placing
577 // at hdr.csumOffset.
578 gso_none_checksum(input, hdr.csum_start, hdr.csum_offset);
579 }
580 if bufs[0].as_ref()[offset..].len() < len {
581 Err(io::Error::other(format!(
582 "read len {len} overflows bufs element len {}",
583 bufs[0].as_ref().len()
584 )))?
585 }
586 sizes[0] = len;
587 bufs[0].as_mut()[offset..offset + len].copy_from_slice(input);
588 return Ok(1);
589 }
590 if hdr.gso_type != VIRTIO_NET_HDR_GSO_TCPV4
591 && hdr.gso_type != VIRTIO_NET_HDR_GSO_TCPV6
592 && hdr.gso_type != VIRTIO_NET_HDR_GSO_UDP_L4
593 {
594 Err(io::Error::other(format!(
595 "unsupported virtio GSO type: {}",
596 hdr.gso_type
597 )))?
598 }
599 let ip_version = input[0] >> 4;
600 match ip_version {
601 4 => {
602 if hdr.gso_type != VIRTIO_NET_HDR_GSO_TCPV4
603 && hdr.gso_type != VIRTIO_NET_HDR_GSO_UDP_L4
604 {
605 Err(io::Error::other(format!(
606 "ip header version: 4, GSO type: {}",
607 hdr.gso_type
608 )))?
609 }
610 }
611 6 => {
612 if hdr.gso_type != VIRTIO_NET_HDR_GSO_TCPV6
613 && hdr.gso_type != VIRTIO_NET_HDR_GSO_UDP_L4
614 {
615 Err(io::Error::other(format!(
616 "ip header version: 6, GSO type: {}",
617 hdr.gso_type
618 )))?
619 }
620 }
621 ip_version => Err(io::Error::other(format!(
622 "invalid ip header version: {ip_version}"
623 )))?,
624 }
625 // Don't trust hdr.hdrLen from the kernel as it can be equal to the length
626 // of the entire first packet when the kernel is handling it as part of a
627 // FORWARD path. Instead, parse the transport header length and add it onto
628 // csumStart, which is synonymous for IP header length.
629 if hdr.gso_type == VIRTIO_NET_HDR_GSO_UDP_L4 {
630 hdr.hdr_len = hdr.csum_start + 8
631 } else {
632 if len <= hdr.csum_start as usize + 12 {
633 Err(io::Error::other("packet is too short"))?
634 }
635
636 let tcp_h_len = ((input[hdr.csum_start as usize + 12] as u16) >> 4) * 4;
637 if !(20..=60).contains(&tcp_h_len) {
638 // A TCP header must be between 20 and 60 bytes in length.
639 Err(io::Error::other(format!(
640 "tcp header len is invalid: {tcp_h_len}"
641 )))?
642 }
643 hdr.hdr_len = hdr.csum_start + tcp_h_len
644 }
645 if len < hdr.hdr_len as usize {
646 Err(io::Error::other(format!(
647 "length of packet ({len}) < virtioNetHdr.hdr_len ({})",
648 hdr.hdr_len
649 )))?
650 }
651 if hdr.hdr_len < hdr.csum_start {
652 Err(io::Error::other(format!(
653 "virtioNetHdr.hdrLen ({}) < virtioNetHdr.csumStart ({})",
654 hdr.hdr_len, hdr.csum_start
655 )))?
656 }
657 let c_sum_at = (hdr.csum_start + hdr.csum_offset) as usize;
658 if c_sum_at + 1 >= len {
659 Err(io::Error::other(format!(
660 "end of checksum offset ({}) exceeds packet length ({len})",
661 c_sum_at + 1,
662 )))?
663 }
664 gso_split(input, hdr, bufs, sizes, offset, ip_version == 6)
665 }
666 pub fn remove_address_v6_impl(&self, addr: Ipv6Addr, prefix: u8) -> io::Result<()> {
667 unsafe {
668 let if_index = self.if_index_impl()?;
669 let ctl = ctl_v6()?;
670 let mut ifrv6: in6_ifreq = mem::zeroed();
671 ifrv6.ifr6_ifindex = if_index as i32;
672 ifrv6.ifr6_prefixlen = prefix as _;
673 ifrv6.ifr6_addr = sockaddr_union::from(std::net::SocketAddr::new(addr.into(), 0))
674 .addr6
675 .sin6_addr;
676 if let Err(err) = siocdifaddr_in6(ctl.as_raw_fd(), &ifrv6) {
677 return Err(io::Error::from(err));
678 }
679 }
680 Ok(())
681 }
682}
683
684impl DeviceImpl {
685 /// Prepare a new request.
686 unsafe fn request(&self) -> io::Result<ifreq> {
687 request(&self.name_impl()?)
688 }
689 fn set_address_v4(&self, addr: Ipv4Addr) -> io::Result<()> {
690 unsafe {
691 let mut req = self.request()?;
692 ipaddr_to_sockaddr(addr, 0, &mut req.ifr_ifru.ifru_addr, OVERWRITE_SIZE);
693 if let Err(err) = siocsifaddr(ctl()?.as_raw_fd(), &req) {
694 return Err(io::Error::from(err));
695 }
696 }
697 Ok(())
698 }
699 fn set_netmask(&self, value: Ipv4Addr) -> io::Result<()> {
700 unsafe {
701 let mut req = self.request()?;
702 ipaddr_to_sockaddr(value, 0, &mut req.ifr_ifru.ifru_netmask, OVERWRITE_SIZE);
703 if let Err(err) = siocsifnetmask(ctl()?.as_raw_fd(), &req) {
704 return Err(io::Error::from(err));
705 }
706 Ok(())
707 }
708 }
709
710 fn set_destination(&self, value: Ipv4Addr) -> io::Result<()> {
711 unsafe {
712 let mut req = self.request()?;
713 ipaddr_to_sockaddr(value, 0, &mut req.ifr_ifru.ifru_dstaddr, OVERWRITE_SIZE);
714 if let Err(err) = siocsifdstaddr(ctl()?.as_raw_fd(), &req) {
715 return Err(io::Error::from(err));
716 }
717 Ok(())
718 }
719 }
720
721 /// Retrieves the name of the network interface.
722 pub(crate) fn name_impl(&self) -> io::Result<String> {
723 unsafe { name(self.as_raw_fd()) }
724 }
725
726 fn ifru_flags(&self) -> io::Result<i16> {
727 unsafe {
728 let ctl = ctl()?;
729 let mut req = self.request()?;
730
731 if let Err(err) = siocgifflags(ctl.as_raw_fd(), &mut req) {
732 return Err(io::Error::from(err));
733 }
734 Ok(req.ifr_ifru.ifru_flags)
735 }
736 }
737
738 fn remove_all_address_v4(&self) -> io::Result<()> {
739 let interface = netconfig_rs::Interface::try_from_index(self.if_index_impl()?)
740 .map_err(io::Error::from)?;
741 let list = interface.addresses().map_err(io::Error::from)?;
742 for x in list {
743 if x.addr().is_ipv4() {
744 interface.remove_address(x).map_err(io::Error::from)?;
745 }
746 }
747 Ok(())
748 }
749}
750
751//Public User Interface
752impl DeviceImpl {
753 /// Retrieves the name of the network interface.
754 pub fn name(&self) -> io::Result<String> {
755 let _guard = self.op_lock.lock().unwrap();
756 self.name_impl()
757 }
758 pub fn remove_address_v6(&self, addr: Ipv6Addr, prefix: u8) -> io::Result<()> {
759 let _guard = self.op_lock.lock().unwrap();
760 self.remove_address_v6_impl(addr, prefix)
761 }
762 /// Sets a new name for the network interface.
763 ///
764 /// This function converts the provided name into a C-compatible string,
765 /// checks that its length does not exceed the maximum allowed (IFNAMSIZ),
766 /// and then copies it into an interface request structure. It then uses a system call
767 /// (via `siocsifname`) to apply the new name.
768 ///
769 /// # Example
770 ///
771 /// ```no_run
772 /// # #[cfg(all(target_os = "linux", not(target_env = "ohos")))]
773 /// # {
774 /// use tun_rs::DeviceBuilder;
775 ///
776 /// let dev = DeviceBuilder::new()
777 /// .name("tun0")
778 /// .ipv4("10.0.0.1", 24, None)
779 /// .build_sync()?;
780 ///
781 /// // Rename the device
782 /// dev.set_name("vpn-tun")?;
783 /// println!("Device renamed to vpn-tun");
784 /// # }
785 /// # Ok::<(), std::io::Error>(())
786 /// ```
787 pub fn set_name(&self, value: &str) -> io::Result<()> {
788 let _guard = self.op_lock.lock().unwrap();
789 unsafe {
790 let tun_name = CString::new(value)?;
791
792 if tun_name.as_bytes_with_nul().len() > IFNAMSIZ {
793 return Err(io::Error::new(io::ErrorKind::InvalidInput, "name too long"));
794 }
795
796 let mut req = self.request()?;
797 ptr::copy_nonoverlapping(
798 tun_name.as_ptr() as *const c_char,
799 req.ifr_ifru.ifru_newname.as_mut_ptr(),
800 value.len(),
801 );
802
803 if let Err(err) = siocsifname(ctl()?.as_raw_fd(), &req) {
804 return Err(io::Error::from(err));
805 }
806
807 Ok(())
808 }
809 }
810 /// Checks whether the network interface is currently running.
811 ///
812 /// The interface is considered running if both the IFF_UP and IFF_RUNNING flags are set.
813 pub fn is_running(&self) -> io::Result<bool> {
814 let _guard = self.op_lock.lock().unwrap();
815 let flags = self.ifru_flags()?;
816 Ok(flags & (IFF_UP | IFF_RUNNING) as c_short == (IFF_UP | IFF_RUNNING) as c_short)
817 }
818 /// Enables or disables the network interface.
819 ///
820 /// If `value` is true, the interface is enabled by setting the IFF_UP and IFF_RUNNING flags.
821 /// If false, the IFF_UP flag is cleared. The change is applied using a system call.
822 pub fn enabled(&self, value: bool) -> io::Result<()> {
823 let _guard = self.op_lock.lock().unwrap();
824 unsafe {
825 let ctl = ctl()?;
826 let mut req = self.request()?;
827
828 if let Err(err) = siocgifflags(ctl.as_raw_fd(), &mut req) {
829 return Err(io::Error::from(err));
830 }
831
832 if value {
833 req.ifr_ifru.ifru_flags |= (IFF_UP | IFF_RUNNING) as c_short;
834 } else {
835 req.ifr_ifru.ifru_flags &= !(IFF_UP as c_short);
836 }
837
838 if let Err(err) = siocsifflags(ctl.as_raw_fd(), &req) {
839 return Err(io::Error::from(err));
840 }
841
842 Ok(())
843 }
844 }
845 /// Retrieves the broadcast address of the network interface.
846 ///
847 /// This function populates an interface request with the broadcast address via a system call,
848 /// converts it into a sockaddr structure, and then extracts the IP address.
849 ///
850 /// # Example
851 ///
852 /// ```no_run
853 /// # #[cfg(all(target_os = "linux", not(target_env = "ohos")))]
854 /// # {
855 /// use tun_rs::DeviceBuilder;
856 ///
857 /// let dev = DeviceBuilder::new()
858 /// .ipv4("10.0.0.1", 24, None)
859 /// .build_sync()?;
860 ///
861 /// // Get the broadcast address
862 /// let broadcast = dev.broadcast()?;
863 /// println!("Broadcast address: {}", broadcast);
864 /// # }
865 /// # Ok::<(), std::io::Error>(())
866 /// ```
867 pub fn broadcast(&self) -> io::Result<IpAddr> {
868 let _guard = self.op_lock.lock().unwrap();
869 unsafe {
870 let mut req = self.request()?;
871 if let Err(err) = siocgifbrdaddr(ctl()?.as_raw_fd(), &mut req) {
872 return Err(io::Error::from(err));
873 }
874 let sa = sockaddr_union::from(req.ifr_ifru.ifru_broadaddr);
875 Ok(std::net::SocketAddr::try_from(sa)?.ip())
876 }
877 }
878 /// Sets the broadcast address of the network interface.
879 ///
880 /// This function converts the given IP address into a sockaddr structure (with a specified overwrite size)
881 /// and then applies it to the interface via a system call.
882 pub fn set_broadcast(&self, value: IpAddr) -> io::Result<()> {
883 let _guard = self.op_lock.lock().unwrap();
884 unsafe {
885 let mut req = self.request()?;
886 ipaddr_to_sockaddr(value, 0, &mut req.ifr_ifru.ifru_broadaddr, OVERWRITE_SIZE);
887 if let Err(err) = siocsifbrdaddr(ctl()?.as_raw_fd(), &req) {
888 return Err(io::Error::from(err));
889 }
890 Ok(())
891 }
892 }
893 /// Sets the IPv4 network address, netmask, and an optional destination address.
894 /// Remove all previous set IPv4 addresses and set the specified address.
895 ///
896 /// # Example
897 ///
898 /// ```no_run
899 /// # #[cfg(all(target_os = "linux", not(target_env = "ohos")))]
900 /// # {
901 /// use tun_rs::DeviceBuilder;
902 ///
903 /// let dev = DeviceBuilder::new()
904 /// .ipv4("10.0.0.1", 24, None)
905 /// .build_sync()?;
906 ///
907 /// // Change the primary IPv4 address
908 /// dev.set_network_address("10.1.0.1", 24, None)?;
909 /// println!("Updated device address to 10.1.0.1/24");
910 /// # }
911 /// # Ok::<(), std::io::Error>(())
912 /// ```
913 pub fn set_network_address<IPv4: ToIpv4Address, Netmask: ToIpv4Netmask>(
914 &self,
915 address: IPv4,
916 netmask: Netmask,
917 destination: Option<IPv4>,
918 ) -> io::Result<()> {
919 let _guard = self.op_lock.lock().unwrap();
920 self.remove_all_address_v4()?;
921 self.set_address_v4(address.ipv4()?)?;
922 self.set_netmask(netmask.netmask()?)?;
923 if let Some(destination) = destination {
924 self.set_destination(destination.ipv4()?)?;
925 }
926 Ok(())
927 }
928 /// Add IPv4 network address and netmask to the interface.
929 ///
930 /// This allows multiple IPv4 addresses on a single TUN/TAP device.
931 ///
932 /// # Example
933 ///
934 /// ```no_run
935 /// # #[cfg(all(target_os = "linux", not(target_env = "ohos")))]
936 /// # {
937 /// use tun_rs::DeviceBuilder;
938 ///
939 /// let dev = DeviceBuilder::new()
940 /// .ipv4("10.0.0.1", 24, None)
941 /// .build_sync()?;
942 ///
943 /// // Add additional IPv4 addresses
944 /// dev.add_address_v4("10.0.1.1", 24)?;
945 /// dev.add_address_v4("10.0.2.1", 24)?;
946 /// println!("Added multiple IPv4 addresses");
947 /// # }
948 /// # Ok::<(), std::io::Error>(())
949 /// ```
950 pub fn add_address_v4<IPv4: ToIpv4Address, Netmask: ToIpv4Netmask>(
951 &self,
952 address: IPv4,
953 netmask: Netmask,
954 ) -> io::Result<()> {
955 let _guard = self.op_lock.lock().unwrap();
956 let interface = netconfig_rs::Interface::try_from_index(self.if_index_impl()?)
957 .map_err(io::Error::from)?;
958 interface
959 .add_address(IpNet::new_assert(address.ipv4()?.into(), netmask.prefix()?))
960 .map_err(io::Error::from)
961 }
962 /// Removes an IP address from the interface.
963 ///
964 /// For IPv4 addresses, it iterates over the current addresses and if a match is found,
965 /// resets the address to `0.0.0.0` (unspecified).
966 /// For IPv6 addresses, it retrieves the interface addresses by name and removes the matching address,
967 /// taking into account its prefix length.
968 ///
969 /// # Example
970 ///
971 /// ```no_run
972 /// # #[cfg(all(target_os = "linux", not(target_env = "ohos")))]
973 /// # {
974 /// use std::net::IpAddr;
975 /// use tun_rs::DeviceBuilder;
976 ///
977 /// let dev = DeviceBuilder::new()
978 /// .ipv4("10.0.0.1", 24, None)
979 /// .build_sync()?;
980 ///
981 /// // Add an additional address
982 /// dev.add_address_v4("10.0.1.1", 24)?;
983 ///
984 /// // Later, remove it
985 /// dev.remove_address("10.0.1.1".parse::<IpAddr>().unwrap())?;
986 /// println!("Removed address 10.0.1.1");
987 /// # }
988 /// # Ok::<(), std::io::Error>(())
989 /// ```
990 pub fn remove_address(&self, addr: IpAddr) -> io::Result<()> {
991 let _guard = self.op_lock.lock().unwrap();
992 match addr {
993 IpAddr::V4(_) => {
994 let interface = netconfig_rs::Interface::try_from_index(self.if_index_impl()?)
995 .map_err(io::Error::from)?;
996 let list = interface.addresses().map_err(io::Error::from)?;
997 for x in list {
998 if x.addr() == addr {
999 interface.remove_address(x).map_err(io::Error::from)?;
1000 }
1001 }
1002 }
1003 IpAddr::V6(addr_v6) => {
1004 let addrs = crate::platform::get_if_addrs_by_name(self.name_impl()?)?;
1005 for x in addrs {
1006 if let Some(ip_addr) = x.address.ip_addr() {
1007 if ip_addr == addr {
1008 if let Some(netmask) = x.address.netmask() {
1009 let prefix = ipnet::ip_mask_to_prefix(netmask).unwrap_or(0);
1010 self.remove_address_v6_impl(addr_v6, prefix)?
1011 }
1012 }
1013 }
1014 }
1015 }
1016 }
1017 Ok(())
1018 }
1019 /// Adds an IPv6 address to the interface.
1020 ///
1021 /// This function creates an `in6_ifreq` structure, fills in the interface index,
1022 /// prefix length, and IPv6 address (converted into a sockaddr structure),
1023 /// and then applies it using a system call.
1024 ///
1025 /// # Example
1026 ///
1027 /// ```no_run
1028 /// # #[cfg(all(target_os = "linux", not(target_env = "ohos")))]
1029 /// # {
1030 /// use tun_rs::DeviceBuilder;
1031 ///
1032 /// let dev = DeviceBuilder::new()
1033 /// .ipv4("10.0.0.1", 24, None)
1034 /// .build_sync()?;
1035 ///
1036 /// // Add IPv6 addresses
1037 /// dev.add_address_v6("fd00::1", 64)?;
1038 /// dev.add_address_v6("fd00::2", 64)?;
1039 /// println!("Added IPv6 addresses");
1040 /// # }
1041 /// # Ok::<(), std::io::Error>(())
1042 /// ```
1043 pub fn add_address_v6<IPv6: ToIpv6Address, Netmask: ToIpv6Netmask>(
1044 &self,
1045 addr: IPv6,
1046 netmask: Netmask,
1047 ) -> io::Result<()> {
1048 let _guard = self.op_lock.lock().unwrap();
1049 unsafe {
1050 let if_index = self.if_index_impl()?;
1051 let ctl = ctl_v6()?;
1052 let mut ifrv6: in6_ifreq = mem::zeroed();
1053 ifrv6.ifr6_ifindex = if_index as i32;
1054 ifrv6.ifr6_prefixlen = netmask.prefix()? as u32;
1055 ifrv6.ifr6_addr =
1056 sockaddr_union::from(std::net::SocketAddr::new(addr.ipv6()?.into(), 0))
1057 .addr6
1058 .sin6_addr;
1059 if let Err(err) = siocsifaddr_in6(ctl.as_raw_fd(), &ifrv6) {
1060 return Err(io::Error::from(err));
1061 }
1062 }
1063 Ok(())
1064 }
1065 /// Retrieves the current MTU (Maximum Transmission Unit) for the interface.
1066 ///
1067 /// This function constructs an interface request and uses a system call (via `siocgifmtu`)
1068 /// to obtain the MTU. The result is then converted to a u16.
1069 pub fn mtu(&self) -> io::Result<u16> {
1070 let _guard = self.op_lock.lock().unwrap();
1071 unsafe {
1072 let mut req = self.request()?;
1073
1074 if let Err(err) = siocgifmtu(ctl()?.as_raw_fd(), &mut req) {
1075 return Err(io::Error::from(err));
1076 }
1077
1078 req.ifr_ifru
1079 .ifru_mtu
1080 .try_into()
1081 .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, format!("{e:?}")))
1082 }
1083 }
1084 /// Sets the MTU (Maximum Transmission Unit) for the interface.
1085 ///
1086 /// This function creates an interface request, sets the `ifru_mtu` field to the new value,
1087 /// and then applies it via a system call.
1088 ///
1089 /// # Example
1090 ///
1091 /// ```no_run
1092 /// # #[cfg(all(target_os = "linux", not(target_env = "ohos")))]
1093 /// # {
1094 /// use tun_rs::DeviceBuilder;
1095 ///
1096 /// let dev = DeviceBuilder::new()
1097 /// .ipv4("10.0.0.1", 24, None)
1098 /// .mtu(1400)
1099 /// .build_sync()?;
1100 ///
1101 /// // Change MTU to accommodate larger packets
1102 /// dev.set_mtu(9000)?; // Jumbo frames
1103 /// println!("MTU set to 9000 bytes");
1104 /// # }
1105 /// # Ok::<(), std::io::Error>(())
1106 /// ```
1107 pub fn set_mtu(&self, value: u16) -> io::Result<()> {
1108 let _guard = self.op_lock.lock().unwrap();
1109 unsafe {
1110 let mut req = self.request()?;
1111 req.ifr_ifru.ifru_mtu = value as i32;
1112
1113 if let Err(err) = siocsifmtu(ctl()?.as_raw_fd(), &req) {
1114 return Err(io::Error::from(err));
1115 }
1116 Ok(())
1117 }
1118 }
1119 /// Sets the MAC (hardware) address for the interface.
1120 ///
1121 /// This function constructs an interface request and copies the provided MAC address
1122 /// into the hardware address field. It then applies the change via a system call.
1123 /// This operation is typically supported only for TAP devices.
1124 pub fn set_mac_address(&self, eth_addr: [u8; ETHER_ADDR_LEN as usize]) -> io::Result<()> {
1125 let _guard = self.op_lock.lock().unwrap();
1126 unsafe {
1127 let mut req = self.request()?;
1128 req.ifr_ifru.ifru_hwaddr.sa_family = ARPHRD_ETHER;
1129 req.ifr_ifru.ifru_hwaddr.sa_data[0..ETHER_ADDR_LEN as usize]
1130 .copy_from_slice(eth_addr.map(|c| c as _).as_slice());
1131 if let Err(err) = siocsifhwaddr(ctl()?.as_raw_fd(), &req) {
1132 return Err(io::Error::from(err));
1133 }
1134 Ok(())
1135 }
1136 }
1137 /// Retrieves the MAC (hardware) address of the interface.
1138 ///
1139 /// This function queries the MAC address by the interface name using a helper function.
1140 /// An error is returned if the MAC address cannot be found.
1141 pub fn mac_address(&self) -> io::Result<[u8; ETHER_ADDR_LEN as usize]> {
1142 let _guard = self.op_lock.lock().unwrap();
1143 unsafe {
1144 let mut req = self.request()?;
1145
1146 siocgifhwaddr(ctl()?.as_raw_fd(), &mut req).map_err(io::Error::from)?;
1147
1148 let hw = &req.ifr_ifru.ifru_hwaddr.sa_data;
1149
1150 let mut mac = [0u8; ETHER_ADDR_LEN as usize];
1151 for (i, b) in hw.iter().take(6).enumerate() {
1152 mac[i] = *b as u8;
1153 }
1154
1155 Ok(mac)
1156 }
1157 }
1158}
1159
1160unsafe fn name(fd: RawFd) -> io::Result<String> {
1161 let mut req: ifreq = mem::zeroed();
1162 if let Err(err) = tungetiff(fd, &mut req as *mut _ as *mut _) {
1163 return Err(io::Error::from(err));
1164 }
1165 let c_str = std::ffi::CStr::from_ptr(req.ifr_name.as_ptr() as *const c_char);
1166 let tun_name = c_str.to_string_lossy().into_owned();
1167 Ok(tun_name)
1168}
1169
1170unsafe fn request(name: &str) -> io::Result<ifreq> {
1171 let mut req: ifreq = mem::zeroed();
1172 ptr::copy_nonoverlapping(
1173 name.as_ptr() as *const c_char,
1174 req.ifr_name.as_mut_ptr(),
1175 name.len(),
1176 );
1177 Ok(req)
1178}
1179
1180impl From<Layer> for c_short {
1181 fn from(layer: Layer) -> Self {
1182 match layer {
1183 Layer::L2 => IFF_TAP as c_short,
1184 Layer::L3 => IFF_TUN as c_short,
1185 }
1186 }
1187}