1use crate::platform::linux::offload::{
2 gso_none_checksum, gso_split, handle_gro, VirtioNetHdr, VIRTIO_NET_HDR_F_NEEDS_CSUM,
3 VIRTIO_NET_HDR_GSO_NONE, VIRTIO_NET_HDR_GSO_TCPV4, VIRTIO_NET_HDR_GSO_TCPV6,
4 VIRTIO_NET_HDR_GSO_UDP_L4, VIRTIO_NET_HDR_LEN,
5};
6use crate::platform::unix::device::{ctl, ctl_v6};
7use crate::platform::{ExpandBuffer, GROTable};
8use crate::{
9 builder::{DeviceConfig, Layer},
10 platform::linux::sys::*,
11 platform::{
12 unix::{ipaddr_to_sockaddr, sockaddr_union, Fd, Tun},
13 ETHER_ADDR_LEN,
14 },
15 ToIpv4Address, ToIpv4Netmask, ToIpv6Address, ToIpv6Netmask,
16};
17use ipnet::IpNet;
18use libc::{
19 self, c_char, c_short, ifreq, in6_ifreq, ARPHRD_ETHER, IFF_MULTI_QUEUE, IFF_NO_PI, IFF_RUNNING,
20 IFF_TAP, IFF_TUN, IFF_UP, IFNAMSIZ, O_RDWR,
21};
22use std::net::Ipv6Addr;
23use std::sync::{Arc, Mutex};
24use std::{
25 ffi::CString,
26 io, mem,
27 net::{IpAddr, Ipv4Addr},
28 os::unix::io::{AsRawFd, RawFd},
29 ptr,
30};
31
32const OVERWRITE_SIZE: usize = mem::size_of::<libc::__c_anonymous_ifr_ifru>();
33
34pub struct DeviceImpl {
36 pub(crate) tun: Tun,
37 pub(crate) vnet_hdr: bool,
38 pub(crate) udp_gso: bool,
39 flags: c_short,
40 pub(crate) op_lock: Arc<Mutex<()>>,
41}
42
43impl DeviceImpl {
44 pub(crate) fn new(config: DeviceConfig) -> std::io::Result<Self> {
46 let dev_name = match config.dev_name.as_ref() {
47 Some(tun_name) => {
48 let tun_name = CString::new(tun_name.clone())?;
49
50 if tun_name.as_bytes_with_nul().len() > IFNAMSIZ {
51 return Err(std::io::Error::new(
52 std::io::ErrorKind::InvalidInput,
53 "device name too long",
54 ));
55 }
56
57 Some(tun_name)
58 }
59
60 None => None,
61 };
62 unsafe {
63 let mut req: ifreq = mem::zeroed();
64
65 if let Some(dev_name) = dev_name.as_ref() {
66 ptr::copy_nonoverlapping(
67 dev_name.as_ptr() as *const c_char,
68 req.ifr_name.as_mut_ptr(),
69 dev_name.as_bytes_with_nul().len(),
70 );
71 }
72 let multi_queue = config.multi_queue.unwrap_or(false);
73 let device_type: c_short = config.layer.unwrap_or(Layer::L3).into();
74 let iff_no_pi = IFF_NO_PI as c_short;
75 let iff_vnet_hdr = libc::IFF_VNET_HDR as c_short;
76 let iff_multi_queue = IFF_MULTI_QUEUE as c_short;
77 let packet_information = config.packet_information.unwrap_or(false);
78 let offload = config.offload.unwrap_or(false);
79 req.ifr_ifru.ifru_flags = device_type
80 | if packet_information { 0 } else { iff_no_pi }
81 | if multi_queue { iff_multi_queue } else { 0 }
82 | if offload { iff_vnet_hdr } else { 0 };
83
84 let fd = libc::open(
85 c"/dev/net/tun".as_ptr() as *const _,
86 O_RDWR | libc::O_CLOEXEC,
87 0,
88 );
89 let tun_fd = Fd::new(fd)?;
90 if let Err(err) = tunsetiff(tun_fd.inner, &mut req as *mut _ as *mut _) {
91 return Err(io::Error::from(err));
92 }
93 let (vnet_hdr, udp_gso) = if offload && libc::IFF_VNET_HDR != 0 {
94 let tun_tcp_offloads = libc::TUN_F_CSUM | libc::TUN_F_TSO4 | libc::TUN_F_TSO6;
96 let tun_udp_offloads = libc::TUN_F_USO4 | libc::TUN_F_USO6;
97 if let Err(err) = tunsetoffload(tun_fd.inner, tun_tcp_offloads as _) {
98 log::warn!("unsupported offload: {err:?}");
99 (false, false)
100 } else {
101 let rs =
104 tunsetoffload(tun_fd.inner, (tun_tcp_offloads | tun_udp_offloads) as _);
105 (true, rs.is_ok())
106 }
107 } else {
108 (false, false)
109 };
110
111 let device = DeviceImpl {
112 tun: Tun::new(tun_fd),
113 vnet_hdr,
114 udp_gso,
115 flags: req.ifr_ifru.ifru_flags,
116 op_lock: Arc::new(Mutex::new(())),
117 };
118 Ok(device)
119 }
120 }
121 unsafe fn set_tcp_offloads(&self) -> io::Result<()> {
122 let tun_tcp_offloads = libc::TUN_F_CSUM | libc::TUN_F_TSO4 | libc::TUN_F_TSO6;
123 tunsetoffload(self.as_raw_fd(), tun_tcp_offloads as _)
124 .map(|_| ())
125 .map_err(|e| e.into())
126 }
127 unsafe fn set_tcp_udp_offloads(&self) -> io::Result<()> {
128 let tun_tcp_offloads = libc::TUN_F_CSUM | libc::TUN_F_TSO4 | libc::TUN_F_TSO6;
129 let tun_udp_offloads = libc::TUN_F_USO4 | libc::TUN_F_USO6;
130 tunsetoffload(self.as_raw_fd(), (tun_tcp_offloads | tun_udp_offloads) as _)
131 .map(|_| ())
132 .map_err(|e| e.into())
133 }
134 pub(crate) fn from_tun(tun: Tun) -> io::Result<Self> {
135 Ok(Self {
136 tun,
137 vnet_hdr: false,
138 udp_gso: false,
139 flags: 0,
140 op_lock: Arc::new(Mutex::new(())),
141 })
142 }
143
144 pub(crate) fn try_clone(&self) -> io::Result<DeviceImpl> {
151 let flags = self.flags;
152 if flags & (IFF_MULTI_QUEUE as c_short) != IFF_MULTI_QUEUE as c_short {
153 return Err(io::Error::new(
154 io::ErrorKind::Unsupported,
155 "iff_multi_queue not enabled",
156 ));
157 }
158 unsafe {
159 let mut req = self.request()?;
160 req.ifr_ifru.ifru_flags = flags;
161 let fd = libc::open(
162 c"/dev/net/tun".as_ptr() as *const _,
163 O_RDWR | libc::O_CLOEXEC,
164 );
165 let tun_fd = Fd::new(fd)?;
166 if let Err(err) = tunsetiff(tun_fd.inner, &mut req as *mut _ as *mut _) {
167 return Err(io::Error::from(err));
168 }
169 let dev = DeviceImpl {
170 tun: Tun::new(tun_fd),
171 vnet_hdr: self.vnet_hdr,
172 udp_gso: self.udp_gso,
173 flags,
174 op_lock: self.op_lock.clone(),
175 };
176 if dev.vnet_hdr {
177 if dev.udp_gso {
178 dev.set_tcp_udp_offloads()?
179 } else {
180 dev.set_tcp_offloads()?;
181 }
182 }
183
184 Ok(dev)
185 }
186 }
187 pub fn udp_gso(&self) -> bool {
191 let _guard = self.op_lock.lock().unwrap();
192 self.udp_gso
193 }
194 pub fn tcp_gso(&self) -> bool {
198 let _guard = self.op_lock.lock().unwrap();
199 self.vnet_hdr
200 }
201 pub fn set_tx_queue_len(&self, tx_queue_len: u32) -> io::Result<()> {
208 let _guard = self.op_lock.lock().unwrap();
209 unsafe {
210 let mut ifreq = self.request()?;
211 ifreq.ifr_ifru.ifru_metric = tx_queue_len as _;
212 if let Err(err) = change_tx_queue_len(ctl()?.as_raw_fd(), &ifreq) {
213 return Err(io::Error::from(err));
214 }
215 }
216 Ok(())
217 }
218 pub fn tx_queue_len(&self) -> io::Result<u32> {
223 let _guard = self.op_lock.lock().unwrap();
224 unsafe {
225 let mut ifreq = self.request()?;
226 if let Err(err) = tx_queue_len(ctl()?.as_raw_fd(), &mut ifreq) {
227 return Err(io::Error::from(err));
228 }
229 Ok(ifreq.ifr_ifru.ifru_metric as _)
230 }
231 }
232 pub fn persist(&self) -> io::Result<()> {
234 let _guard = self.op_lock.lock().unwrap();
235 unsafe {
236 if let Err(err) = tunsetpersist(self.as_raw_fd(), &1) {
237 Err(io::Error::from(err))
238 } else {
239 Ok(())
240 }
241 }
242 }
243
244 pub fn user(&self, value: i32) -> io::Result<()> {
246 let _guard = self.op_lock.lock().unwrap();
247 unsafe {
248 if let Err(err) = tunsetowner(self.as_raw_fd(), &value) {
249 Err(io::Error::from(err))
250 } else {
251 Ok(())
252 }
253 }
254 }
255
256 pub fn group(&self, value: i32) -> io::Result<()> {
258 let _guard = self.op_lock.lock().unwrap();
259 unsafe {
260 if let Err(err) = tunsetgroup(self.as_raw_fd(), &value) {
261 Err(io::Error::from(err))
262 } else {
263 Ok(())
264 }
265 }
266 }
267 pub fn send_multiple<B: ExpandBuffer>(
271 &self,
272 gro_table: &mut GROTable,
273 bufs: &mut [B],
274 offset: usize,
275 ) -> io::Result<usize> {
276 self.send_multiple0(gro_table, bufs, offset, |tun, buf| tun.send(buf))
277 }
278 pub(crate) fn send_multiple0<B: ExpandBuffer, W: FnMut(&Tun, &[u8]) -> io::Result<usize>>(
279 &self,
280 gro_table: &mut GROTable,
281 bufs: &mut [B],
282 mut offset: usize,
283 mut write_f: W,
284 ) -> io::Result<usize> {
285 gro_table.reset();
286 if self.vnet_hdr {
287 handle_gro(
288 bufs,
289 offset,
290 &mut gro_table.tcp_gro_table,
291 &mut gro_table.udp_gro_table,
292 self.udp_gso,
293 &mut gro_table.to_write,
294 )?;
295 offset -= VIRTIO_NET_HDR_LEN;
296 } else {
297 for i in 0..bufs.len() {
298 gro_table.to_write.push(i);
299 }
300 }
301
302 let mut total = 0;
303 let mut err = Ok(());
304 for buf_idx in &gro_table.to_write {
305 match write_f(&self.tun, &bufs[*buf_idx].as_ref()[offset..]) {
306 Ok(n) => {
307 total += n;
308 }
309 Err(e) => {
310 if let Some(code) = e.raw_os_error() {
311 if libc::EBADFD == code {
312 return Err(e);
313 }
314 }
315 err = Err(e)
316 }
317 }
318 }
319 err?;
320 Ok(total)
321 }
322 pub fn recv_multiple<B: AsRef<[u8]> + AsMut<[u8]>>(
329 &self,
330 original_buffer: &mut [u8],
331 bufs: &mut [B],
332 sizes: &mut [usize],
333 offset: usize,
334 ) -> io::Result<usize> {
335 self.recv_multiple0(original_buffer, bufs, sizes, offset, |tun, buf| {
336 tun.recv(buf)
337 })
338 }
339 pub(crate) fn recv_multiple0<
340 B: AsRef<[u8]> + AsMut<[u8]>,
341 R: Fn(&Tun, &mut [u8]) -> io::Result<usize>,
342 >(
343 &self,
344 original_buffer: &mut [u8],
345 bufs: &mut [B],
346 sizes: &mut [usize],
347 offset: usize,
348 read_f: R,
349 ) -> io::Result<usize> {
350 if bufs.is_empty() || bufs.len() != sizes.len() {
351 return Err(io::Error::other("bufs error"));
352 }
353 if self.vnet_hdr {
354 let len = read_f(&self.tun, original_buffer)?;
355 if len <= VIRTIO_NET_HDR_LEN {
356 Err(io::Error::other(format!(
357 "length of packet ({len}) <= VIRTIO_NET_HDR_LEN ({VIRTIO_NET_HDR_LEN})",
358 )))?
359 }
360 let hdr = VirtioNetHdr::decode(&original_buffer[..VIRTIO_NET_HDR_LEN])?;
361 self.handle_virtio_read(
362 hdr,
363 &mut original_buffer[VIRTIO_NET_HDR_LEN..len],
364 bufs,
365 sizes,
366 offset,
367 )
368 } else {
369 let len = read_f(&self.tun, &mut bufs[0].as_mut()[offset..])?;
370 sizes[0] = len;
371 Ok(1)
372 }
373 }
374 pub(crate) fn handle_virtio_read<B: AsRef<[u8]> + AsMut<[u8]>>(
379 &self,
380 mut hdr: VirtioNetHdr,
381 input: &mut [u8],
382 bufs: &mut [B],
383 sizes: &mut [usize],
384 offset: usize,
385 ) -> io::Result<usize> {
386 let len = input.len();
387 if hdr.gso_type == VIRTIO_NET_HDR_GSO_NONE {
388 if hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM != 0 {
389 gso_none_checksum(input, hdr.csum_start, hdr.csum_offset);
393 }
394 if bufs[0].as_ref()[offset..].len() < len {
395 Err(io::Error::other(format!(
396 "read len {len} overflows bufs element len {}",
397 bufs[0].as_ref().len()
398 )))?
399 }
400 sizes[0] = len;
401 bufs[0].as_mut()[offset..offset + len].copy_from_slice(input);
402 return Ok(1);
403 }
404 if hdr.gso_type != VIRTIO_NET_HDR_GSO_TCPV4
405 && hdr.gso_type != VIRTIO_NET_HDR_GSO_TCPV6
406 && hdr.gso_type != VIRTIO_NET_HDR_GSO_UDP_L4
407 {
408 Err(io::Error::other(format!(
409 "unsupported virtio GSO type: {}",
410 hdr.gso_type
411 )))?
412 }
413 let ip_version = input[0] >> 4;
414 match ip_version {
415 4 => {
416 if hdr.gso_type != VIRTIO_NET_HDR_GSO_TCPV4
417 && hdr.gso_type != VIRTIO_NET_HDR_GSO_UDP_L4
418 {
419 Err(io::Error::other(format!(
420 "ip header version: 4, GSO type: {}",
421 hdr.gso_type
422 )))?
423 }
424 }
425 6 => {
426 if hdr.gso_type != VIRTIO_NET_HDR_GSO_TCPV6
427 && hdr.gso_type != VIRTIO_NET_HDR_GSO_UDP_L4
428 {
429 Err(io::Error::other(format!(
430 "ip header version: 6, GSO type: {}",
431 hdr.gso_type
432 )))?
433 }
434 }
435 ip_version => Err(io::Error::other(format!(
436 "invalid ip header version: {ip_version}"
437 )))?,
438 }
439 if hdr.gso_type == VIRTIO_NET_HDR_GSO_UDP_L4 {
444 hdr.hdr_len = hdr.csum_start + 8
445 } else {
446 if len <= hdr.csum_start as usize + 12 {
447 Err(io::Error::other("packet is too short"))?
448 }
449
450 let tcp_h_len = ((input[hdr.csum_start as usize + 12] as u16) >> 4) * 4;
451 if !(20..=60).contains(&tcp_h_len) {
452 Err(io::Error::other(format!(
454 "tcp header len is invalid: {tcp_h_len}"
455 )))?
456 }
457 hdr.hdr_len = hdr.csum_start + tcp_h_len
458 }
459 if len < hdr.hdr_len as usize {
460 Err(io::Error::other(format!(
461 "length of packet ({len}) < virtioNetHdr.hdr_len ({})",
462 hdr.hdr_len
463 )))?
464 }
465 if hdr.hdr_len < hdr.csum_start {
466 Err(io::Error::other(format!(
467 "virtioNetHdr.hdrLen ({}) < virtioNetHdr.csumStart ({})",
468 hdr.hdr_len, hdr.csum_start
469 )))?
470 }
471 let c_sum_at = (hdr.csum_start + hdr.csum_offset) as usize;
472 if c_sum_at + 1 >= len {
473 Err(io::Error::other(format!(
474 "end of checksum offset ({}) exceeds packet length ({len})",
475 c_sum_at + 1,
476 )))?
477 }
478 gso_split(input, hdr, bufs, sizes, offset, ip_version == 6)
479 }
480 pub fn remove_address_v6_impl(&self, addr: Ipv6Addr, prefix: u8) -> io::Result<()> {
481 unsafe {
482 let if_index = self.if_index_impl()?;
483 let ctl = ctl_v6()?;
484 let mut ifrv6: in6_ifreq = mem::zeroed();
485 ifrv6.ifr6_ifindex = if_index as i32;
486 ifrv6.ifr6_prefixlen = prefix as _;
487 ifrv6.ifr6_addr = sockaddr_union::from(std::net::SocketAddr::new(addr.into(), 0))
488 .addr6
489 .sin6_addr;
490 if let Err(err) = siocdifaddr_in6(ctl.as_raw_fd(), &ifrv6) {
491 return Err(io::Error::from(err));
492 }
493 }
494 Ok(())
495 }
496}
497
498impl DeviceImpl {
499 unsafe fn request(&self) -> io::Result<ifreq> {
501 request(&self.name_impl()?)
502 }
503 fn set_address_v4(&self, addr: Ipv4Addr) -> io::Result<()> {
504 unsafe {
505 let mut req = self.request()?;
506 ipaddr_to_sockaddr(addr, 0, &mut req.ifr_ifru.ifru_addr, OVERWRITE_SIZE);
507 if let Err(err) = siocsifaddr(ctl()?.as_raw_fd(), &req) {
508 return Err(io::Error::from(err));
509 }
510 }
511 Ok(())
512 }
513 fn set_netmask(&self, value: Ipv4Addr) -> io::Result<()> {
514 unsafe {
515 let mut req = self.request()?;
516 ipaddr_to_sockaddr(value, 0, &mut req.ifr_ifru.ifru_netmask, OVERWRITE_SIZE);
517 if let Err(err) = siocsifnetmask(ctl()?.as_raw_fd(), &req) {
518 return Err(io::Error::from(err));
519 }
520 Ok(())
521 }
522 }
523
524 fn set_destination(&self, value: Ipv4Addr) -> io::Result<()> {
525 unsafe {
526 let mut req = self.request()?;
527 ipaddr_to_sockaddr(value, 0, &mut req.ifr_ifru.ifru_dstaddr, OVERWRITE_SIZE);
528 if let Err(err) = siocsifdstaddr(ctl()?.as_raw_fd(), &req) {
529 return Err(io::Error::from(err));
530 }
531 Ok(())
532 }
533 }
534
535 pub(crate) fn name_impl(&self) -> io::Result<String> {
537 unsafe { name(self.as_raw_fd()) }
538 }
539
540 fn ifru_flags(&self) -> io::Result<i16> {
541 unsafe {
542 let ctl = ctl()?;
543 let mut req = self.request()?;
544
545 if let Err(err) = siocgifflags(ctl.as_raw_fd(), &mut req) {
546 return Err(io::Error::from(err));
547 }
548 Ok(req.ifr_ifru.ifru_flags)
549 }
550 }
551
552 fn remove_all_address_v4(&self) -> io::Result<()> {
553 let interface = netconfig_rs::Interface::try_from_index(self.if_index_impl()?)
554 .map_err(io::Error::from)?;
555 let list = interface.addresses().map_err(io::Error::from)?;
556 for x in list {
557 if x.addr().is_ipv4() {
558 interface.remove_address(x).map_err(io::Error::from)?;
559 }
560 }
561 Ok(())
562 }
563}
564
565impl DeviceImpl {
567 pub fn name(&self) -> io::Result<String> {
569 let _guard = self.op_lock.lock().unwrap();
570 self.name_impl()
571 }
572 pub fn remove_address_v6(&self, addr: Ipv6Addr, prefix: u8) -> io::Result<()> {
573 let _guard = self.op_lock.lock().unwrap();
574 self.remove_address_v6_impl(addr, prefix)
575 }
576 pub fn set_name(&self, value: &str) -> io::Result<()> {
583 let _guard = self.op_lock.lock().unwrap();
584 unsafe {
585 let tun_name = CString::new(value)?;
586
587 if tun_name.as_bytes_with_nul().len() > IFNAMSIZ {
588 return Err(io::Error::new(io::ErrorKind::InvalidInput, "name too long"));
589 }
590
591 let mut req = self.request()?;
592 ptr::copy_nonoverlapping(
593 tun_name.as_ptr() as *const c_char,
594 req.ifr_ifru.ifru_newname.as_mut_ptr(),
595 value.len(),
596 );
597
598 if let Err(err) = siocsifname(ctl()?.as_raw_fd(), &req) {
599 return Err(io::Error::from(err));
600 }
601
602 Ok(())
603 }
604 }
605 pub fn is_running(&self) -> io::Result<bool> {
609 let _guard = self.op_lock.lock().unwrap();
610 let flags = self.ifru_flags()?;
611 Ok(flags & (IFF_UP | IFF_RUNNING) as c_short == (IFF_UP | IFF_RUNNING) as c_short)
612 }
613 pub fn enabled(&self, value: bool) -> io::Result<()> {
618 let _guard = self.op_lock.lock().unwrap();
619 unsafe {
620 let ctl = ctl()?;
621 let mut req = self.request()?;
622
623 if let Err(err) = siocgifflags(ctl.as_raw_fd(), &mut req) {
624 return Err(io::Error::from(err));
625 }
626
627 if value {
628 req.ifr_ifru.ifru_flags |= (IFF_UP | IFF_RUNNING) as c_short;
629 } else {
630 req.ifr_ifru.ifru_flags &= !(IFF_UP as c_short);
631 }
632
633 if let Err(err) = siocsifflags(ctl.as_raw_fd(), &req) {
634 return Err(io::Error::from(err));
635 }
636
637 Ok(())
638 }
639 }
640 pub fn broadcast(&self) -> io::Result<IpAddr> {
645 let _guard = self.op_lock.lock().unwrap();
646 unsafe {
647 let mut req = self.request()?;
648 if let Err(err) = siocgifbrdaddr(ctl()?.as_raw_fd(), &mut req) {
649 return Err(io::Error::from(err));
650 }
651 let sa = sockaddr_union::from(req.ifr_ifru.ifru_broadaddr);
652 Ok(std::net::SocketAddr::try_from(sa)?.ip())
653 }
654 }
655 pub fn set_broadcast(&self, value: IpAddr) -> io::Result<()> {
660 let _guard = self.op_lock.lock().unwrap();
661 unsafe {
662 let mut req = self.request()?;
663 ipaddr_to_sockaddr(value, 0, &mut req.ifr_ifru.ifru_broadaddr, OVERWRITE_SIZE);
664 if let Err(err) = siocsifbrdaddr(ctl()?.as_raw_fd(), &req) {
665 return Err(io::Error::from(err));
666 }
667 Ok(())
668 }
669 }
670 pub fn set_network_address<IPv4: ToIpv4Address, Netmask: ToIpv4Netmask>(
673 &self,
674 address: IPv4,
675 netmask: Netmask,
676 destination: Option<IPv4>,
677 ) -> io::Result<()> {
678 let _guard = self.op_lock.lock().unwrap();
679 self.remove_all_address_v4()?;
680 self.set_address_v4(address.ipv4()?)?;
681 self.set_netmask(netmask.netmask()?)?;
682 if let Some(destination) = destination {
683 self.set_destination(destination.ipv4()?)?;
684 }
685 Ok(())
686 }
687 pub fn add_address_v4<IPv4: ToIpv4Address, Netmask: ToIpv4Netmask>(
689 &self,
690 address: IPv4,
691 netmask: Netmask,
692 ) -> io::Result<()> {
693 let _guard = self.op_lock.lock().unwrap();
694 let interface = netconfig_rs::Interface::try_from_index(self.if_index_impl()?)
695 .map_err(io::Error::from)?;
696 interface
697 .add_address(IpNet::new_assert(address.ipv4()?.into(), netmask.prefix()?))
698 .map_err(io::Error::from)
699 }
700 pub fn remove_address(&self, addr: IpAddr) -> io::Result<()> {
707 let _guard = self.op_lock.lock().unwrap();
708 match addr {
709 IpAddr::V4(_) => {
710 let interface = netconfig_rs::Interface::try_from_index(self.if_index_impl()?)
711 .map_err(io::Error::from)?;
712 let list = interface.addresses().map_err(io::Error::from)?;
713 for x in list {
714 if x.addr() == addr {
715 interface.remove_address(x).map_err(io::Error::from)?;
716 }
717 }
718 }
719 IpAddr::V6(addr_v6) => {
720 let addrs = crate::platform::get_if_addrs_by_name(self.name_impl()?)?;
721 for x in addrs {
722 if x.address == addr {
723 if let Some(netmask) = x.netmask {
724 let prefix = ipnet::ip_mask_to_prefix(netmask).unwrap_or(0);
725 self.remove_address_v6_impl(addr_v6, prefix)?
726 }
727 }
728 }
729 }
730 }
731 Ok(())
732 }
733 pub fn add_address_v6<IPv6: ToIpv6Address, Netmask: ToIpv6Netmask>(
739 &self,
740 addr: IPv6,
741 netmask: Netmask,
742 ) -> io::Result<()> {
743 let _guard = self.op_lock.lock().unwrap();
744 unsafe {
745 let if_index = self.if_index_impl()?;
746 let ctl = ctl_v6()?;
747 let mut ifrv6: in6_ifreq = mem::zeroed();
748 ifrv6.ifr6_ifindex = if_index as i32;
749 ifrv6.ifr6_prefixlen = netmask.prefix()? as u32;
750 ifrv6.ifr6_addr =
751 sockaddr_union::from(std::net::SocketAddr::new(addr.ipv6()?.into(), 0))
752 .addr6
753 .sin6_addr;
754 if let Err(err) = siocsifaddr_in6(ctl.as_raw_fd(), &ifrv6) {
755 return Err(io::Error::from(err));
756 }
757 }
758 Ok(())
759 }
760 pub fn mtu(&self) -> io::Result<u16> {
765 let _guard = self.op_lock.lock().unwrap();
766 unsafe {
767 let mut req = self.request()?;
768
769 if let Err(err) = siocgifmtu(ctl()?.as_raw_fd(), &mut req) {
770 return Err(io::Error::from(err));
771 }
772
773 req.ifr_ifru
774 .ifru_mtu
775 .try_into()
776 .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, format!("{e:?}")))
777 }
778 }
779 pub fn set_mtu(&self, value: u16) -> io::Result<()> {
784 let _guard = self.op_lock.lock().unwrap();
785 unsafe {
786 let mut req = self.request()?;
787 req.ifr_ifru.ifru_mtu = value as i32;
788
789 if let Err(err) = siocsifmtu(ctl()?.as_raw_fd(), &req) {
790 return Err(io::Error::from(err));
791 }
792 Ok(())
793 }
794 }
795 pub fn set_mac_address(&self, eth_addr: [u8; ETHER_ADDR_LEN as usize]) -> io::Result<()> {
801 let _guard = self.op_lock.lock().unwrap();
802 unsafe {
803 let mut req = self.request()?;
804 req.ifr_ifru.ifru_hwaddr.sa_family = ARPHRD_ETHER;
805 req.ifr_ifru.ifru_hwaddr.sa_data[0..ETHER_ADDR_LEN as usize]
806 .copy_from_slice(eth_addr.map(|c| c as _).as_slice());
807 if let Err(err) = siocsifhwaddr(ctl()?.as_raw_fd(), &req) {
808 return Err(io::Error::from(err));
809 }
810 Ok(())
811 }
812 }
813 pub fn mac_address(&self) -> io::Result<[u8; ETHER_ADDR_LEN as usize]> {
818 let _guard = self.op_lock.lock().unwrap();
819 unsafe {
820 let mut req = self.request()?;
821
822 siocgifhwaddr(ctl()?.as_raw_fd(), &mut req).map_err(io::Error::from)?;
823
824 let hw = &req.ifr_ifru.ifru_hwaddr.sa_data;
825
826 let mut mac = [0u8; ETHER_ADDR_LEN as usize];
827 for (i, b) in hw.iter().take(6).enumerate() {
828 mac[i] = *b as u8;
829 }
830
831 Ok(mac)
832 }
833 }
834}
835
836unsafe fn name(fd: RawFd) -> io::Result<String> {
837 let mut req: ifreq = mem::zeroed();
838 if let Err(err) = tungetiff(fd, &mut req as *mut _ as *mut _) {
839 return Err(io::Error::from(err));
840 }
841 let c_str = std::ffi::CStr::from_ptr(req.ifr_name.as_ptr() as *const c_char);
842 let tun_name = c_str.to_string_lossy().into_owned();
843 Ok(tun_name)
844}
845
846unsafe fn request(name: &str) -> io::Result<ifreq> {
847 let mut req: ifreq = mem::zeroed();
848 ptr::copy_nonoverlapping(
849 name.as_ptr() as *const c_char,
850 req.ifr_name.as_mut_ptr(),
851 name.len(),
852 );
853 Ok(req)
854}
855
856impl From<Layer> for c_short {
857 fn from(layer: Layer) -> Self {
858 match layer {
859 Layer::L2 => IFF_TAP as c_short,
860 Layer::L3 => IFF_TUN as c_short,
861 }
862 }
863}