1use crate::platform::linux::offload::{
2 gso_none_checksum, gso_split, handle_gro, VirtioNetHdr, VIRTIO_NET_HDR_F_NEEDS_CSUM,
3 VIRTIO_NET_HDR_GSO_NONE, VIRTIO_NET_HDR_GSO_TCPV4, VIRTIO_NET_HDR_GSO_TCPV6,
4 VIRTIO_NET_HDR_GSO_UDP_L4, VIRTIO_NET_HDR_LEN,
5};
6use crate::platform::unix::device::{ctl, ctl_v6};
7use crate::platform::{ExpandBuffer, GROTable};
8use crate::{
9 builder::{DeviceConfig, Layer},
10 platform::linux::sys::*,
11 platform::{
12 unix::{ipaddr_to_sockaddr, sockaddr_union, Fd, Tun},
13 ETHER_ADDR_LEN,
14 },
15 ToIpv4Address, ToIpv4Netmask, ToIpv6Address, ToIpv6Netmask,
16};
17use ipnet::IpNet;
18use libc::{
19 self, c_char, c_short, ifreq, in6_ifreq, ARPHRD_ETHER, IFF_MULTI_QUEUE, IFF_NO_PI, IFF_RUNNING,
20 IFF_TAP, IFF_TUN, IFF_UP, IFNAMSIZ, O_RDWR,
21};
22use std::net::Ipv6Addr;
23use std::sync::{Arc, Mutex};
24use std::{
25 ffi::CString,
26 io, mem,
27 net::{IpAddr, Ipv4Addr},
28 os::unix::io::{AsRawFd, RawFd},
29 ptr,
30};
31
32const OVERWRITE_SIZE: usize = mem::size_of::<libc::__c_anonymous_ifr_ifru>();
33
34pub struct DeviceImpl {
36 pub(crate) tun: Tun,
37 pub(crate) vnet_hdr: bool,
38 pub(crate) udp_gso: bool,
39 flags: c_short,
40 pub(crate) op_lock: Arc<Mutex<()>>,
41}
42
43impl DeviceImpl {
44 pub(crate) fn new(config: DeviceConfig) -> std::io::Result<Self> {
46 let dev_name = match config.dev_name.as_ref() {
47 Some(tun_name) => {
48 let tun_name = CString::new(tun_name.clone())?;
49
50 if tun_name.as_bytes_with_nul().len() > IFNAMSIZ {
51 return Err(std::io::Error::new(
52 std::io::ErrorKind::InvalidInput,
53 "device name too long",
54 ));
55 }
56
57 Some(tun_name)
58 }
59
60 None => None,
61 };
62 unsafe {
63 let mut req: ifreq = mem::zeroed();
64
65 if let Some(dev_name) = dev_name.as_ref() {
66 ptr::copy_nonoverlapping(
67 dev_name.as_ptr() as *const c_char,
68 req.ifr_name.as_mut_ptr(),
69 dev_name.as_bytes_with_nul().len(),
70 );
71 }
72 let multi_queue = config.multi_queue.unwrap_or(false);
73 let device_type: c_short = config.layer.unwrap_or(Layer::L3).into();
74 let iff_no_pi = IFF_NO_PI as c_short;
75 let iff_vnet_hdr = libc::IFF_VNET_HDR as c_short;
76 let iff_multi_queue = IFF_MULTI_QUEUE as c_short;
77 let packet_information = config.packet_information.unwrap_or(false);
78 let offload = config.offload.unwrap_or(false);
79 req.ifr_ifru.ifru_flags = device_type
80 | if packet_information { 0 } else { iff_no_pi }
81 | if multi_queue { iff_multi_queue } else { 0 }
82 | if offload { iff_vnet_hdr } else { 0 };
83
84 let fd = libc::open(
85 c"/dev/net/tun".as_ptr() as *const _,
86 O_RDWR | libc::O_CLOEXEC,
87 0,
88 );
89 let tun_fd = Fd::new(fd)?;
90 if let Err(err) = tunsetiff(tun_fd.inner, &mut req as *mut _ as *mut _) {
91 return Err(io::Error::from(err));
92 }
93 let (vnet_hdr, udp_gso) = if offload && libc::IFF_VNET_HDR != 0 {
94 let tun_tcp_offloads = libc::TUN_F_CSUM | libc::TUN_F_TSO4 | libc::TUN_F_TSO6;
96 let tun_udp_offloads = libc::TUN_F_USO4 | libc::TUN_F_USO6;
97 if let Err(err) = tunsetoffload(tun_fd.inner, tun_tcp_offloads as _) {
98 log::warn!("unsupported offload: {err:?}");
99 (false, false)
100 } else {
101 let rs =
104 tunsetoffload(tun_fd.inner, (tun_tcp_offloads | tun_udp_offloads) as _);
105 (true, rs.is_ok())
106 }
107 } else {
108 (false, false)
109 };
110
111 let device = DeviceImpl {
112 tun: Tun::new(tun_fd),
113 vnet_hdr,
114 udp_gso,
115 flags: req.ifr_ifru.ifru_flags,
116 op_lock: Arc::new(Mutex::new(())),
117 };
118 Ok(device)
119 }
120 }
121 unsafe fn set_tcp_offloads(&self) -> io::Result<()> {
122 let tun_tcp_offloads = libc::TUN_F_CSUM | libc::TUN_F_TSO4 | libc::TUN_F_TSO6;
123 tunsetoffload(self.as_raw_fd(), tun_tcp_offloads as _)
124 .map(|_| ())
125 .map_err(|e| e.into())
126 }
127 unsafe fn set_tcp_udp_offloads(&self) -> io::Result<()> {
128 let tun_tcp_offloads = libc::TUN_F_CSUM | libc::TUN_F_TSO4 | libc::TUN_F_TSO6;
129 let tun_udp_offloads = libc::TUN_F_USO4 | libc::TUN_F_USO6;
130 tunsetoffload(self.as_raw_fd(), (tun_tcp_offloads | tun_udp_offloads) as _)
131 .map(|_| ())
132 .map_err(|e| e.into())
133 }
134 pub(crate) fn from_tun(tun: Tun) -> io::Result<Self> {
135 Ok(Self {
136 tun,
137 vnet_hdr: false,
138 udp_gso: false,
139 flags: 0,
140 op_lock: Arc::new(Mutex::new(())),
141 })
142 }
143
144 pub(crate) fn try_clone(&self) -> io::Result<DeviceImpl> {
151 let flags = self.flags;
152 if flags & (IFF_MULTI_QUEUE as c_short) != IFF_MULTI_QUEUE as c_short {
153 return Err(io::Error::new(
154 io::ErrorKind::Unsupported,
155 "iff_multi_queue not enabled",
156 ));
157 }
158 unsafe {
159 let mut req = self.request()?;
160 req.ifr_ifru.ifru_flags = flags;
161 let fd = libc::open(
162 c"/dev/net/tun".as_ptr() as *const _,
163 O_RDWR | libc::O_CLOEXEC,
164 );
165 let tun_fd = Fd::new(fd)?;
166 if let Err(err) = tunsetiff(tun_fd.inner, &mut req as *mut _ as *mut _) {
167 return Err(io::Error::from(err));
168 }
169 let dev = DeviceImpl {
170 tun: Tun::new(tun_fd),
171 vnet_hdr: self.vnet_hdr,
172 udp_gso: self.udp_gso,
173 flags,
174 op_lock: self.op_lock.clone(),
175 };
176 if dev.vnet_hdr {
177 if dev.udp_gso {
178 dev.set_tcp_udp_offloads()?
179 } else {
180 dev.set_tcp_offloads()?;
181 }
182 }
183
184 Ok(dev)
185 }
186 }
187 pub fn udp_gso(&self) -> bool {
191 let _guard = self.op_lock.lock().unwrap();
192 self.udp_gso
193 }
194 pub fn tcp_gso(&self) -> bool {
198 let _guard = self.op_lock.lock().unwrap();
199 self.vnet_hdr
200 }
201 pub fn set_tx_queue_len(&self, tx_queue_len: u32) -> io::Result<()> {
208 let _guard = self.op_lock.lock().unwrap();
209 unsafe {
210 let mut ifreq = self.request()?;
211 ifreq.ifr_ifru.ifru_metric = tx_queue_len as _;
212 if let Err(err) = change_tx_queue_len(ctl()?.as_raw_fd(), &ifreq) {
213 return Err(io::Error::from(err));
214 }
215 }
216 Ok(())
217 }
218 pub fn tx_queue_len(&self) -> io::Result<u32> {
223 let _guard = self.op_lock.lock().unwrap();
224 unsafe {
225 let mut ifreq = self.request()?;
226 if let Err(err) = tx_queue_len(ctl()?.as_raw_fd(), &mut ifreq) {
227 return Err(io::Error::from(err));
228 }
229 Ok(ifreq.ifr_ifru.ifru_metric as _)
230 }
231 }
232 pub fn persist(&self) -> io::Result<()> {
257 let _guard = self.op_lock.lock().unwrap();
258 unsafe {
259 if let Err(err) = tunsetpersist(self.as_raw_fd(), &1) {
260 Err(io::Error::from(err))
261 } else {
262 Ok(())
263 }
264 }
265 }
266
267 pub fn user(&self, value: i32) -> io::Result<()> {
289 let _guard = self.op_lock.lock().unwrap();
290 unsafe {
291 if let Err(err) = tunsetowner(self.as_raw_fd(), &value) {
292 Err(io::Error::from(err))
293 } else {
294 Ok(())
295 }
296 }
297 }
298
299 pub fn group(&self, value: i32) -> io::Result<()> {
321 let _guard = self.op_lock.lock().unwrap();
322 unsafe {
323 if let Err(err) = tunsetgroup(self.as_raw_fd(), &value) {
324 Err(io::Error::from(err))
325 } else {
326 Ok(())
327 }
328 }
329 }
330 pub fn send_multiple<B: ExpandBuffer>(
334 &self,
335 gro_table: &mut GROTable,
336 bufs: &mut [B],
337 offset: usize,
338 ) -> io::Result<usize> {
339 self.send_multiple0(gro_table, bufs, offset, |tun, buf| tun.send(buf))
340 }
341 pub(crate) fn send_multiple0<B: ExpandBuffer, W: FnMut(&Tun, &[u8]) -> io::Result<usize>>(
342 &self,
343 gro_table: &mut GROTable,
344 bufs: &mut [B],
345 mut offset: usize,
346 mut write_f: W,
347 ) -> io::Result<usize> {
348 gro_table.reset();
349 if self.vnet_hdr {
350 handle_gro(
351 bufs,
352 offset,
353 &mut gro_table.tcp_gro_table,
354 &mut gro_table.udp_gro_table,
355 self.udp_gso,
356 &mut gro_table.to_write,
357 )?;
358 offset -= VIRTIO_NET_HDR_LEN;
359 } else {
360 for i in 0..bufs.len() {
361 gro_table.to_write.push(i);
362 }
363 }
364
365 let mut total = 0;
366 let mut err = Ok(());
367 for buf_idx in &gro_table.to_write {
368 match write_f(&self.tun, &bufs[*buf_idx].as_ref()[offset..]) {
369 Ok(n) => {
370 total += n;
371 }
372 Err(e) => {
373 if let Some(code) = e.raw_os_error() {
374 if libc::EBADFD == code {
375 return Err(e);
376 }
377 }
378 err = Err(e)
379 }
380 }
381 }
382 err?;
383 Ok(total)
384 }
385 pub fn recv_multiple<B: AsRef<[u8]> + AsMut<[u8]>>(
392 &self,
393 original_buffer: &mut [u8],
394 bufs: &mut [B],
395 sizes: &mut [usize],
396 offset: usize,
397 ) -> io::Result<usize> {
398 self.recv_multiple0(original_buffer, bufs, sizes, offset, |tun, buf| {
399 tun.recv(buf)
400 })
401 }
402 pub(crate) fn recv_multiple0<
403 B: AsRef<[u8]> + AsMut<[u8]>,
404 R: Fn(&Tun, &mut [u8]) -> io::Result<usize>,
405 >(
406 &self,
407 original_buffer: &mut [u8],
408 bufs: &mut [B],
409 sizes: &mut [usize],
410 offset: usize,
411 read_f: R,
412 ) -> io::Result<usize> {
413 if bufs.is_empty() || bufs.len() != sizes.len() {
414 return Err(io::Error::other("bufs error"));
415 }
416 if self.vnet_hdr {
417 let len = read_f(&self.tun, original_buffer)?;
418 if len <= VIRTIO_NET_HDR_LEN {
419 Err(io::Error::other(format!(
420 "length of packet ({len}) <= VIRTIO_NET_HDR_LEN ({VIRTIO_NET_HDR_LEN})",
421 )))?
422 }
423 let hdr = VirtioNetHdr::decode(&original_buffer[..VIRTIO_NET_HDR_LEN])?;
424 self.handle_virtio_read(
425 hdr,
426 &mut original_buffer[VIRTIO_NET_HDR_LEN..len],
427 bufs,
428 sizes,
429 offset,
430 )
431 } else {
432 let len = read_f(&self.tun, &mut bufs[0].as_mut()[offset..])?;
433 sizes[0] = len;
434 Ok(1)
435 }
436 }
437 pub(crate) fn handle_virtio_read<B: AsRef<[u8]> + AsMut<[u8]>>(
442 &self,
443 mut hdr: VirtioNetHdr,
444 input: &mut [u8],
445 bufs: &mut [B],
446 sizes: &mut [usize],
447 offset: usize,
448 ) -> io::Result<usize> {
449 let len = input.len();
450 if hdr.gso_type == VIRTIO_NET_HDR_GSO_NONE {
451 if hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM != 0 {
452 gso_none_checksum(input, hdr.csum_start, hdr.csum_offset);
456 }
457 if bufs[0].as_ref()[offset..].len() < len {
458 Err(io::Error::other(format!(
459 "read len {len} overflows bufs element len {}",
460 bufs[0].as_ref().len()
461 )))?
462 }
463 sizes[0] = len;
464 bufs[0].as_mut()[offset..offset + len].copy_from_slice(input);
465 return Ok(1);
466 }
467 if hdr.gso_type != VIRTIO_NET_HDR_GSO_TCPV4
468 && hdr.gso_type != VIRTIO_NET_HDR_GSO_TCPV6
469 && hdr.gso_type != VIRTIO_NET_HDR_GSO_UDP_L4
470 {
471 Err(io::Error::other(format!(
472 "unsupported virtio GSO type: {}",
473 hdr.gso_type
474 )))?
475 }
476 let ip_version = input[0] >> 4;
477 match ip_version {
478 4 => {
479 if hdr.gso_type != VIRTIO_NET_HDR_GSO_TCPV4
480 && hdr.gso_type != VIRTIO_NET_HDR_GSO_UDP_L4
481 {
482 Err(io::Error::other(format!(
483 "ip header version: 4, GSO type: {}",
484 hdr.gso_type
485 )))?
486 }
487 }
488 6 => {
489 if hdr.gso_type != VIRTIO_NET_HDR_GSO_TCPV6
490 && hdr.gso_type != VIRTIO_NET_HDR_GSO_UDP_L4
491 {
492 Err(io::Error::other(format!(
493 "ip header version: 6, GSO type: {}",
494 hdr.gso_type
495 )))?
496 }
497 }
498 ip_version => Err(io::Error::other(format!(
499 "invalid ip header version: {ip_version}"
500 )))?,
501 }
502 if hdr.gso_type == VIRTIO_NET_HDR_GSO_UDP_L4 {
507 hdr.hdr_len = hdr.csum_start + 8
508 } else {
509 if len <= hdr.csum_start as usize + 12 {
510 Err(io::Error::other("packet is too short"))?
511 }
512
513 let tcp_h_len = ((input[hdr.csum_start as usize + 12] as u16) >> 4) * 4;
514 if !(20..=60).contains(&tcp_h_len) {
515 Err(io::Error::other(format!(
517 "tcp header len is invalid: {tcp_h_len}"
518 )))?
519 }
520 hdr.hdr_len = hdr.csum_start + tcp_h_len
521 }
522 if len < hdr.hdr_len as usize {
523 Err(io::Error::other(format!(
524 "length of packet ({len}) < virtioNetHdr.hdr_len ({})",
525 hdr.hdr_len
526 )))?
527 }
528 if hdr.hdr_len < hdr.csum_start {
529 Err(io::Error::other(format!(
530 "virtioNetHdr.hdrLen ({}) < virtioNetHdr.csumStart ({})",
531 hdr.hdr_len, hdr.csum_start
532 )))?
533 }
534 let c_sum_at = (hdr.csum_start + hdr.csum_offset) as usize;
535 if c_sum_at + 1 >= len {
536 Err(io::Error::other(format!(
537 "end of checksum offset ({}) exceeds packet length ({len})",
538 c_sum_at + 1,
539 )))?
540 }
541 gso_split(input, hdr, bufs, sizes, offset, ip_version == 6)
542 }
543 pub fn remove_address_v6_impl(&self, addr: Ipv6Addr, prefix: u8) -> io::Result<()> {
544 unsafe {
545 let if_index = self.if_index_impl()?;
546 let ctl = ctl_v6()?;
547 let mut ifrv6: in6_ifreq = mem::zeroed();
548 ifrv6.ifr6_ifindex = if_index as i32;
549 ifrv6.ifr6_prefixlen = prefix as _;
550 ifrv6.ifr6_addr = sockaddr_union::from(std::net::SocketAddr::new(addr.into(), 0))
551 .addr6
552 .sin6_addr;
553 if let Err(err) = siocdifaddr_in6(ctl.as_raw_fd(), &ifrv6) {
554 return Err(io::Error::from(err));
555 }
556 }
557 Ok(())
558 }
559}
560
561impl DeviceImpl {
562 unsafe fn request(&self) -> io::Result<ifreq> {
564 request(&self.name_impl()?)
565 }
566 fn set_address_v4(&self, addr: Ipv4Addr) -> io::Result<()> {
567 unsafe {
568 let mut req = self.request()?;
569 ipaddr_to_sockaddr(addr, 0, &mut req.ifr_ifru.ifru_addr, OVERWRITE_SIZE);
570 if let Err(err) = siocsifaddr(ctl()?.as_raw_fd(), &req) {
571 return Err(io::Error::from(err));
572 }
573 }
574 Ok(())
575 }
576 fn set_netmask(&self, value: Ipv4Addr) -> io::Result<()> {
577 unsafe {
578 let mut req = self.request()?;
579 ipaddr_to_sockaddr(value, 0, &mut req.ifr_ifru.ifru_netmask, OVERWRITE_SIZE);
580 if let Err(err) = siocsifnetmask(ctl()?.as_raw_fd(), &req) {
581 return Err(io::Error::from(err));
582 }
583 Ok(())
584 }
585 }
586
587 fn set_destination(&self, value: Ipv4Addr) -> io::Result<()> {
588 unsafe {
589 let mut req = self.request()?;
590 ipaddr_to_sockaddr(value, 0, &mut req.ifr_ifru.ifru_dstaddr, OVERWRITE_SIZE);
591 if let Err(err) = siocsifdstaddr(ctl()?.as_raw_fd(), &req) {
592 return Err(io::Error::from(err));
593 }
594 Ok(())
595 }
596 }
597
598 pub(crate) fn name_impl(&self) -> io::Result<String> {
600 unsafe { name(self.as_raw_fd()) }
601 }
602
603 fn ifru_flags(&self) -> io::Result<i16> {
604 unsafe {
605 let ctl = ctl()?;
606 let mut req = self.request()?;
607
608 if let Err(err) = siocgifflags(ctl.as_raw_fd(), &mut req) {
609 return Err(io::Error::from(err));
610 }
611 Ok(req.ifr_ifru.ifru_flags)
612 }
613 }
614
615 fn remove_all_address_v4(&self) -> io::Result<()> {
616 let interface = netconfig_rs::Interface::try_from_index(self.if_index_impl()?)
617 .map_err(io::Error::from)?;
618 let list = interface.addresses().map_err(io::Error::from)?;
619 for x in list {
620 if x.addr().is_ipv4() {
621 interface.remove_address(x).map_err(io::Error::from)?;
622 }
623 }
624 Ok(())
625 }
626}
627
628impl DeviceImpl {
630 pub fn name(&self) -> io::Result<String> {
632 let _guard = self.op_lock.lock().unwrap();
633 self.name_impl()
634 }
635 pub fn remove_address_v6(&self, addr: Ipv6Addr, prefix: u8) -> io::Result<()> {
636 let _guard = self.op_lock.lock().unwrap();
637 self.remove_address_v6_impl(addr, prefix)
638 }
639 pub fn set_name(&self, value: &str) -> io::Result<()> {
665 let _guard = self.op_lock.lock().unwrap();
666 unsafe {
667 let tun_name = CString::new(value)?;
668
669 if tun_name.as_bytes_with_nul().len() > IFNAMSIZ {
670 return Err(io::Error::new(io::ErrorKind::InvalidInput, "name too long"));
671 }
672
673 let mut req = self.request()?;
674 ptr::copy_nonoverlapping(
675 tun_name.as_ptr() as *const c_char,
676 req.ifr_ifru.ifru_newname.as_mut_ptr(),
677 value.len(),
678 );
679
680 if let Err(err) = siocsifname(ctl()?.as_raw_fd(), &req) {
681 return Err(io::Error::from(err));
682 }
683
684 Ok(())
685 }
686 }
687 pub fn is_running(&self) -> io::Result<bool> {
691 let _guard = self.op_lock.lock().unwrap();
692 let flags = self.ifru_flags()?;
693 Ok(flags & (IFF_UP | IFF_RUNNING) as c_short == (IFF_UP | IFF_RUNNING) as c_short)
694 }
695 pub fn enabled(&self, value: bool) -> io::Result<()> {
700 let _guard = self.op_lock.lock().unwrap();
701 unsafe {
702 let ctl = ctl()?;
703 let mut req = self.request()?;
704
705 if let Err(err) = siocgifflags(ctl.as_raw_fd(), &mut req) {
706 return Err(io::Error::from(err));
707 }
708
709 if value {
710 req.ifr_ifru.ifru_flags |= (IFF_UP | IFF_RUNNING) as c_short;
711 } else {
712 req.ifr_ifru.ifru_flags &= !(IFF_UP as c_short);
713 }
714
715 if let Err(err) = siocsifflags(ctl.as_raw_fd(), &req) {
716 return Err(io::Error::from(err));
717 }
718
719 Ok(())
720 }
721 }
722 pub fn broadcast(&self) -> io::Result<IpAddr> {
745 let _guard = self.op_lock.lock().unwrap();
746 unsafe {
747 let mut req = self.request()?;
748 if let Err(err) = siocgifbrdaddr(ctl()?.as_raw_fd(), &mut req) {
749 return Err(io::Error::from(err));
750 }
751 let sa = sockaddr_union::from(req.ifr_ifru.ifru_broadaddr);
752 Ok(std::net::SocketAddr::try_from(sa)?.ip())
753 }
754 }
755 pub fn set_broadcast(&self, value: IpAddr) -> io::Result<()> {
760 let _guard = self.op_lock.lock().unwrap();
761 unsafe {
762 let mut req = self.request()?;
763 ipaddr_to_sockaddr(value, 0, &mut req.ifr_ifru.ifru_broadaddr, OVERWRITE_SIZE);
764 if let Err(err) = siocsifbrdaddr(ctl()?.as_raw_fd(), &req) {
765 return Err(io::Error::from(err));
766 }
767 Ok(())
768 }
769 }
770 pub fn set_network_address<IPv4: ToIpv4Address, Netmask: ToIpv4Netmask>(
791 &self,
792 address: IPv4,
793 netmask: Netmask,
794 destination: Option<IPv4>,
795 ) -> io::Result<()> {
796 let _guard = self.op_lock.lock().unwrap();
797 self.remove_all_address_v4()?;
798 self.set_address_v4(address.ipv4()?)?;
799 self.set_netmask(netmask.netmask()?)?;
800 if let Some(destination) = destination {
801 self.set_destination(destination.ipv4()?)?;
802 }
803 Ok(())
804 }
805 pub fn add_address_v4<IPv4: ToIpv4Address, Netmask: ToIpv4Netmask>(
828 &self,
829 address: IPv4,
830 netmask: Netmask,
831 ) -> io::Result<()> {
832 let _guard = self.op_lock.lock().unwrap();
833 let interface = netconfig_rs::Interface::try_from_index(self.if_index_impl()?)
834 .map_err(io::Error::from)?;
835 interface
836 .add_address(IpNet::new_assert(address.ipv4()?.into(), netmask.prefix()?))
837 .map_err(io::Error::from)
838 }
839 pub fn remove_address(&self, addr: IpAddr) -> io::Result<()> {
868 let _guard = self.op_lock.lock().unwrap();
869 match addr {
870 IpAddr::V4(_) => {
871 let interface = netconfig_rs::Interface::try_from_index(self.if_index_impl()?)
872 .map_err(io::Error::from)?;
873 let list = interface.addresses().map_err(io::Error::from)?;
874 for x in list {
875 if x.addr() == addr {
876 interface.remove_address(x).map_err(io::Error::from)?;
877 }
878 }
879 }
880 IpAddr::V6(addr_v6) => {
881 let addrs = crate::platform::get_if_addrs_by_name(self.name_impl()?)?;
882 for x in addrs {
883 if let Some(ip_addr) = x.address.ip_addr() {
884 if ip_addr == addr {
885 if let Some(netmask) = x.address.netmask() {
886 let prefix = ipnet::ip_mask_to_prefix(netmask).unwrap_or(0);
887 self.remove_address_v6_impl(addr_v6, prefix)?
888 }
889 }
890 }
891 }
892 }
893 }
894 Ok(())
895 }
896 pub fn add_address_v6<IPv6: ToIpv6Address, Netmask: ToIpv6Netmask>(
921 &self,
922 addr: IPv6,
923 netmask: Netmask,
924 ) -> io::Result<()> {
925 let _guard = self.op_lock.lock().unwrap();
926 unsafe {
927 let if_index = self.if_index_impl()?;
928 let ctl = ctl_v6()?;
929 let mut ifrv6: in6_ifreq = mem::zeroed();
930 ifrv6.ifr6_ifindex = if_index as i32;
931 ifrv6.ifr6_prefixlen = netmask.prefix()? as u32;
932 ifrv6.ifr6_addr =
933 sockaddr_union::from(std::net::SocketAddr::new(addr.ipv6()?.into(), 0))
934 .addr6
935 .sin6_addr;
936 if let Err(err) = siocsifaddr_in6(ctl.as_raw_fd(), &ifrv6) {
937 return Err(io::Error::from(err));
938 }
939 }
940 Ok(())
941 }
942 pub fn mtu(&self) -> io::Result<u16> {
947 let _guard = self.op_lock.lock().unwrap();
948 unsafe {
949 let mut req = self.request()?;
950
951 if let Err(err) = siocgifmtu(ctl()?.as_raw_fd(), &mut req) {
952 return Err(io::Error::from(err));
953 }
954
955 req.ifr_ifru
956 .ifru_mtu
957 .try_into()
958 .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, format!("{e:?}")))
959 }
960 }
961 pub fn set_mtu(&self, value: u16) -> io::Result<()> {
985 let _guard = self.op_lock.lock().unwrap();
986 unsafe {
987 let mut req = self.request()?;
988 req.ifr_ifru.ifru_mtu = value as i32;
989
990 if let Err(err) = siocsifmtu(ctl()?.as_raw_fd(), &req) {
991 return Err(io::Error::from(err));
992 }
993 Ok(())
994 }
995 }
996 pub fn set_mac_address(&self, eth_addr: [u8; ETHER_ADDR_LEN as usize]) -> io::Result<()> {
1002 let _guard = self.op_lock.lock().unwrap();
1003 unsafe {
1004 let mut req = self.request()?;
1005 req.ifr_ifru.ifru_hwaddr.sa_family = ARPHRD_ETHER;
1006 req.ifr_ifru.ifru_hwaddr.sa_data[0..ETHER_ADDR_LEN as usize]
1007 .copy_from_slice(eth_addr.map(|c| c as _).as_slice());
1008 if let Err(err) = siocsifhwaddr(ctl()?.as_raw_fd(), &req) {
1009 return Err(io::Error::from(err));
1010 }
1011 Ok(())
1012 }
1013 }
1014 pub fn mac_address(&self) -> io::Result<[u8; ETHER_ADDR_LEN as usize]> {
1019 let _guard = self.op_lock.lock().unwrap();
1020 unsafe {
1021 let mut req = self.request()?;
1022
1023 siocgifhwaddr(ctl()?.as_raw_fd(), &mut req).map_err(io::Error::from)?;
1024
1025 let hw = &req.ifr_ifru.ifru_hwaddr.sa_data;
1026
1027 let mut mac = [0u8; ETHER_ADDR_LEN as usize];
1028 for (i, b) in hw.iter().take(6).enumerate() {
1029 mac[i] = *b as u8;
1030 }
1031
1032 Ok(mac)
1033 }
1034 }
1035}
1036
1037unsafe fn name(fd: RawFd) -> io::Result<String> {
1038 let mut req: ifreq = mem::zeroed();
1039 if let Err(err) = tungetiff(fd, &mut req as *mut _ as *mut _) {
1040 return Err(io::Error::from(err));
1041 }
1042 let c_str = std::ffi::CStr::from_ptr(req.ifr_name.as_ptr() as *const c_char);
1043 let tun_name = c_str.to_string_lossy().into_owned();
1044 Ok(tun_name)
1045}
1046
1047unsafe fn request(name: &str) -> io::Result<ifreq> {
1048 let mut req: ifreq = mem::zeroed();
1049 ptr::copy_nonoverlapping(
1050 name.as_ptr() as *const c_char,
1051 req.ifr_name.as_mut_ptr(),
1052 name.len(),
1053 );
1054 Ok(req)
1055}
1056
1057impl From<Layer> for c_short {
1058 fn from(layer: Layer) -> Self {
1059 match layer {
1060 Layer::L2 => IFF_TAP as c_short,
1061 Layer::L3 => IFF_TUN as c_short,
1062 }
1063 }
1064}