1use crate::platform::linux::offload::{
2 gso_none_checksum, gso_split, handle_gro, VirtioNetHdr, VIRTIO_NET_HDR_F_NEEDS_CSUM,
3 VIRTIO_NET_HDR_GSO_NONE, VIRTIO_NET_HDR_GSO_TCPV4, VIRTIO_NET_HDR_GSO_TCPV6,
4 VIRTIO_NET_HDR_GSO_UDP_L4, VIRTIO_NET_HDR_LEN,
5};
6use crate::platform::unix::device::{ctl, ctl_v6};
7use crate::platform::{ExpandBuffer, GROTable};
8use crate::{
9 builder::{DeviceConfig, Layer},
10 platform::linux::sys::*,
11 platform::{
12 unix::{ipaddr_to_sockaddr, sockaddr_union, Fd, Tun},
13 ETHER_ADDR_LEN,
14 },
15 ToIpv4Address, ToIpv4Netmask, ToIpv6Address, ToIpv6Netmask,
16};
17use ipnet::IpNet;
18use libc::{
19 self, c_char, c_short, ifreq, in6_ifreq, ARPHRD_ETHER, IFF_MULTI_QUEUE, IFF_NO_PI, IFF_RUNNING,
20 IFF_TAP, IFF_TUN, IFF_UP, IFNAMSIZ, O_RDWR,
21};
22use mac_address::mac_address_by_name;
23use std::net::Ipv6Addr;
24use std::sync::{Arc, Mutex};
25use std::{
26 ffi::CString,
27 io, mem,
28 net::{IpAddr, Ipv4Addr},
29 os::unix::io::{AsRawFd, RawFd},
30 ptr,
31};
32
33const OVERWRITE_SIZE: usize = mem::size_of::<libc::__c_anonymous_ifr_ifru>();
34
35pub struct DeviceImpl {
37 pub(crate) tun: Tun,
38 pub(crate) vnet_hdr: bool,
39 pub(crate) udp_gso: bool,
40 flags: c_short,
41 pub(crate) op_lock: Arc<Mutex<()>>,
42}
43
44impl DeviceImpl {
45 pub(crate) fn new(config: DeviceConfig) -> std::io::Result<Self> {
47 let dev_name = match config.dev_name.as_ref() {
48 Some(tun_name) => {
49 let tun_name = CString::new(tun_name.clone())?;
50
51 if tun_name.as_bytes_with_nul().len() > IFNAMSIZ {
52 return Err(std::io::Error::new(
53 std::io::ErrorKind::InvalidInput,
54 "device name too long",
55 ));
56 }
57
58 Some(tun_name)
59 }
60
61 None => None,
62 };
63 unsafe {
64 let mut req: ifreq = mem::zeroed();
65
66 if let Some(dev_name) = dev_name.as_ref() {
67 ptr::copy_nonoverlapping(
68 dev_name.as_ptr() as *const c_char,
69 req.ifr_name.as_mut_ptr(),
70 dev_name.as_bytes_with_nul().len(),
71 );
72 }
73 let multi_queue = config.multi_queue.unwrap_or(false);
74 let device_type: c_short = config.layer.unwrap_or(Layer::L3).into();
75 let iff_no_pi = IFF_NO_PI as c_short;
76 let iff_vnet_hdr = libc::IFF_VNET_HDR as c_short;
77 let iff_multi_queue = IFF_MULTI_QUEUE as c_short;
78 let packet_information = config.packet_information.unwrap_or(false);
79 let offload = config.offload.unwrap_or(false);
80 req.ifr_ifru.ifru_flags = device_type
81 | if packet_information { 0 } else { iff_no_pi }
82 | if multi_queue { iff_multi_queue } else { 0 }
83 | if offload { iff_vnet_hdr } else { 0 };
84
85 let fd = libc::open(
86 c"/dev/net/tun".as_ptr() as *const _,
87 O_RDWR | libc::O_CLOEXEC,
88 0,
89 );
90 let tun_fd = Fd::new(fd)?;
91 if let Err(err) = tunsetiff(tun_fd.inner, &mut req as *mut _ as *mut _) {
92 return Err(io::Error::from(err));
93 }
94 let (vnet_hdr, udp_gso) = if offload && libc::IFF_VNET_HDR != 0 {
95 let tun_tcp_offloads = libc::TUN_F_CSUM | libc::TUN_F_TSO4 | libc::TUN_F_TSO6;
97 let tun_udp_offloads = libc::TUN_F_USO4 | libc::TUN_F_USO6;
98 if let Err(err) = tunsetoffload(tun_fd.inner, tun_tcp_offloads as _) {
99 log::warn!("unsupported offload: {err:?}");
100 (false, false)
101 } else {
102 let rs =
105 tunsetoffload(tun_fd.inner, (tun_tcp_offloads | tun_udp_offloads) as _);
106 (true, rs.is_ok())
107 }
108 } else {
109 (false, false)
110 };
111
112 let device = DeviceImpl {
113 tun: Tun::new(tun_fd),
114 vnet_hdr,
115 udp_gso,
116 flags: req.ifr_ifru.ifru_flags,
117 op_lock: Arc::new(Mutex::new(())),
118 };
119 Ok(device)
120 }
121 }
122 unsafe fn set_tcp_offloads(&self) -> io::Result<()> {
123 let tun_tcp_offloads = libc::TUN_F_CSUM | libc::TUN_F_TSO4 | libc::TUN_F_TSO6;
124 tunsetoffload(self.as_raw_fd(), tun_tcp_offloads as _)
125 .map(|_| ())
126 .map_err(|e| e.into())
127 }
128 unsafe fn set_tcp_udp_offloads(&self) -> io::Result<()> {
129 let tun_tcp_offloads = libc::TUN_F_CSUM | libc::TUN_F_TSO4 | libc::TUN_F_TSO6;
130 let tun_udp_offloads = libc::TUN_F_USO4 | libc::TUN_F_USO6;
131 tunsetoffload(self.as_raw_fd(), (tun_tcp_offloads | tun_udp_offloads) as _)
132 .map(|_| ())
133 .map_err(|e| e.into())
134 }
135 pub(crate) fn from_tun(tun: Tun) -> io::Result<Self> {
136 Ok(Self {
137 tun,
138 vnet_hdr: false,
139 udp_gso: false,
140 flags: 0,
141 op_lock: Arc::new(Mutex::new(())),
142 })
143 }
144
145 pub(crate) fn try_clone(&self) -> io::Result<DeviceImpl> {
152 let flags = self.flags;
153 if flags & (IFF_MULTI_QUEUE as c_short) != IFF_MULTI_QUEUE as c_short {
154 return Err(io::Error::new(
155 io::ErrorKind::Unsupported,
156 "iff_multi_queue not enabled",
157 ));
158 }
159 unsafe {
160 let mut req = self.request()?;
161 req.ifr_ifru.ifru_flags = flags;
162 let fd = libc::open(
163 c"/dev/net/tun".as_ptr() as *const _,
164 O_RDWR | libc::O_CLOEXEC,
165 );
166 let tun_fd = Fd::new(fd)?;
167 if let Err(err) = tunsetiff(tun_fd.inner, &mut req as *mut _ as *mut _) {
168 return Err(io::Error::from(err));
169 }
170 let dev = DeviceImpl {
171 tun: Tun::new(tun_fd),
172 vnet_hdr: self.vnet_hdr,
173 udp_gso: self.udp_gso,
174 flags,
175 op_lock: self.op_lock.clone(),
176 };
177 if dev.vnet_hdr {
178 if dev.udp_gso {
179 dev.set_tcp_udp_offloads()?
180 } else {
181 dev.set_tcp_offloads()?;
182 }
183 }
184
185 Ok(dev)
186 }
187 }
188 pub fn udp_gso(&self) -> bool {
192 let _guard = self.op_lock.lock().unwrap();
193 self.udp_gso
194 }
195 pub fn tcp_gso(&self) -> bool {
199 let _guard = self.op_lock.lock().unwrap();
200 self.vnet_hdr
201 }
202 pub fn set_tx_queue_len(&self, tx_queue_len: u32) -> io::Result<()> {
209 let _guard = self.op_lock.lock().unwrap();
210 unsafe {
211 let mut ifreq = self.request()?;
212 ifreq.ifr_ifru.ifru_metric = tx_queue_len as _;
213 if let Err(err) = change_tx_queue_len(ctl()?.as_raw_fd(), &ifreq) {
214 return Err(io::Error::from(err));
215 }
216 }
217 Ok(())
218 }
219 pub fn tx_queue_len(&self) -> io::Result<u32> {
224 let _guard = self.op_lock.lock().unwrap();
225 unsafe {
226 let mut ifreq = self.request()?;
227 if let Err(err) = tx_queue_len(ctl()?.as_raw_fd(), &mut ifreq) {
228 return Err(io::Error::from(err));
229 }
230 Ok(ifreq.ifr_ifru.ifru_metric as _)
231 }
232 }
233 pub fn persist(&self) -> io::Result<()> {
235 let _guard = self.op_lock.lock().unwrap();
236 unsafe {
237 if let Err(err) = tunsetpersist(self.as_raw_fd(), &1) {
238 Err(io::Error::from(err))
239 } else {
240 Ok(())
241 }
242 }
243 }
244
245 pub fn user(&self, value: i32) -> io::Result<()> {
247 let _guard = self.op_lock.lock().unwrap();
248 unsafe {
249 if let Err(err) = tunsetowner(self.as_raw_fd(), &value) {
250 Err(io::Error::from(err))
251 } else {
252 Ok(())
253 }
254 }
255 }
256
257 pub fn group(&self, value: i32) -> io::Result<()> {
259 let _guard = self.op_lock.lock().unwrap();
260 unsafe {
261 if let Err(err) = tunsetgroup(self.as_raw_fd(), &value) {
262 Err(io::Error::from(err))
263 } else {
264 Ok(())
265 }
266 }
267 }
268 pub fn send_multiple<B: ExpandBuffer>(
272 &self,
273 gro_table: &mut GROTable,
274 bufs: &mut [B],
275 offset: usize,
276 ) -> io::Result<usize> {
277 self.send_multiple0(gro_table, bufs, offset, |tun, buf| tun.send(buf))
278 }
279 pub(crate) fn send_multiple0<B: ExpandBuffer, W: FnMut(&Tun, &[u8]) -> io::Result<usize>>(
280 &self,
281 gro_table: &mut GROTable,
282 bufs: &mut [B],
283 mut offset: usize,
284 mut write_f: W,
285 ) -> io::Result<usize> {
286 gro_table.reset();
287 if self.vnet_hdr {
288 handle_gro(
289 bufs,
290 offset,
291 &mut gro_table.tcp_gro_table,
292 &mut gro_table.udp_gro_table,
293 self.udp_gso,
294 &mut gro_table.to_write,
295 )?;
296 offset -= VIRTIO_NET_HDR_LEN;
297 } else {
298 for i in 0..bufs.len() {
299 gro_table.to_write.push(i);
300 }
301 }
302
303 let mut total = 0;
304 let mut err = Ok(());
305 for buf_idx in &gro_table.to_write {
306 match write_f(&self.tun, &bufs[*buf_idx].as_ref()[offset..]) {
307 Ok(n) => {
308 total += n;
309 }
310 Err(e) => {
311 if let Some(code) = e.raw_os_error() {
312 if libc::EBADFD == code {
313 return Err(e);
314 }
315 }
316 err = Err(e)
317 }
318 }
319 }
320 err?;
321 Ok(total)
322 }
323 pub fn recv_multiple<B: AsRef<[u8]> + AsMut<[u8]>>(
330 &self,
331 original_buffer: &mut [u8],
332 bufs: &mut [B],
333 sizes: &mut [usize],
334 offset: usize,
335 ) -> io::Result<usize> {
336 self.recv_multiple0(original_buffer, bufs, sizes, offset, |tun, buf| {
337 tun.recv(buf)
338 })
339 }
340 pub(crate) fn recv_multiple0<
341 B: AsRef<[u8]> + AsMut<[u8]>,
342 R: Fn(&Tun, &mut [u8]) -> io::Result<usize>,
343 >(
344 &self,
345 original_buffer: &mut [u8],
346 bufs: &mut [B],
347 sizes: &mut [usize],
348 offset: usize,
349 read_f: R,
350 ) -> io::Result<usize> {
351 if bufs.is_empty() || bufs.len() != sizes.len() {
352 return Err(io::Error::other("bufs error"));
353 }
354 if self.vnet_hdr {
355 let len = read_f(&self.tun, original_buffer)?;
356 if len <= VIRTIO_NET_HDR_LEN {
357 Err(io::Error::other(format!(
358 "length of packet ({len}) <= VIRTIO_NET_HDR_LEN ({VIRTIO_NET_HDR_LEN})",
359 )))?
360 }
361 let hdr = VirtioNetHdr::decode(&original_buffer[..VIRTIO_NET_HDR_LEN])?;
362 self.handle_virtio_read(
363 hdr,
364 &mut original_buffer[VIRTIO_NET_HDR_LEN..len],
365 bufs,
366 sizes,
367 offset,
368 )
369 } else {
370 let len = read_f(&self.tun, &mut bufs[0].as_mut()[offset..])?;
371 sizes[0] = len;
372 Ok(1)
373 }
374 }
375 pub(crate) fn handle_virtio_read<B: AsRef<[u8]> + AsMut<[u8]>>(
380 &self,
381 mut hdr: VirtioNetHdr,
382 input: &mut [u8],
383 bufs: &mut [B],
384 sizes: &mut [usize],
385 offset: usize,
386 ) -> io::Result<usize> {
387 let len = input.len();
388 if hdr.gso_type == VIRTIO_NET_HDR_GSO_NONE {
389 if hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM != 0 {
390 gso_none_checksum(input, hdr.csum_start, hdr.csum_offset);
394 }
395 if bufs[0].as_ref()[offset..].len() < len {
396 Err(io::Error::other(format!(
397 "read len {len} overflows bufs element len {}",
398 bufs[0].as_ref().len()
399 )))?
400 }
401 sizes[0] = len;
402 bufs[0].as_mut()[offset..offset + len].copy_from_slice(input);
403 return Ok(1);
404 }
405 if hdr.gso_type != VIRTIO_NET_HDR_GSO_TCPV4
406 && hdr.gso_type != VIRTIO_NET_HDR_GSO_TCPV6
407 && hdr.gso_type != VIRTIO_NET_HDR_GSO_UDP_L4
408 {
409 Err(io::Error::other(format!(
410 "unsupported virtio GSO type: {}",
411 hdr.gso_type
412 )))?
413 }
414 let ip_version = input[0] >> 4;
415 match ip_version {
416 4 => {
417 if hdr.gso_type != VIRTIO_NET_HDR_GSO_TCPV4
418 && hdr.gso_type != VIRTIO_NET_HDR_GSO_UDP_L4
419 {
420 Err(io::Error::other(format!(
421 "ip header version: 4, GSO type: {}",
422 hdr.gso_type
423 )))?
424 }
425 }
426 6 => {
427 if hdr.gso_type != VIRTIO_NET_HDR_GSO_TCPV6
428 && hdr.gso_type != VIRTIO_NET_HDR_GSO_UDP_L4
429 {
430 Err(io::Error::other(format!(
431 "ip header version: 6, GSO type: {}",
432 hdr.gso_type
433 )))?
434 }
435 }
436 ip_version => Err(io::Error::other(format!(
437 "invalid ip header version: {ip_version}"
438 )))?,
439 }
440 if hdr.gso_type == VIRTIO_NET_HDR_GSO_UDP_L4 {
445 hdr.hdr_len = hdr.csum_start + 8
446 } else {
447 if len <= hdr.csum_start as usize + 12 {
448 Err(io::Error::other("packet is too short"))?
449 }
450
451 let tcp_h_len = ((input[hdr.csum_start as usize + 12] as u16) >> 4) * 4;
452 if !(20..=60).contains(&tcp_h_len) {
453 Err(io::Error::other(format!(
455 "tcp header len is invalid: {tcp_h_len}"
456 )))?
457 }
458 hdr.hdr_len = hdr.csum_start + tcp_h_len
459 }
460 if len < hdr.hdr_len as usize {
461 Err(io::Error::other(format!(
462 "length of packet ({len}) < virtioNetHdr.hdr_len ({})",
463 hdr.hdr_len
464 )))?
465 }
466 if hdr.hdr_len < hdr.csum_start {
467 Err(io::Error::other(format!(
468 "virtioNetHdr.hdrLen ({}) < virtioNetHdr.csumStart ({})",
469 hdr.hdr_len, hdr.csum_start
470 )))?
471 }
472 let c_sum_at = (hdr.csum_start + hdr.csum_offset) as usize;
473 if c_sum_at + 1 >= len {
474 Err(io::Error::other(format!(
475 "end of checksum offset ({}) exceeds packet length ({len})",
476 c_sum_at + 1,
477 )))?
478 }
479 gso_split(input, hdr, bufs, sizes, offset, ip_version == 6)
480 }
481 pub fn remove_address_v6_impl(&self, addr: Ipv6Addr, prefix: u8) -> io::Result<()> {
482 unsafe {
483 let if_index = self.if_index_impl()?;
484 let ctl = ctl_v6()?;
485 let mut ifrv6: in6_ifreq = mem::zeroed();
486 ifrv6.ifr6_ifindex = if_index as i32;
487 ifrv6.ifr6_prefixlen = prefix as _;
488 ifrv6.ifr6_addr = sockaddr_union::from(std::net::SocketAddr::new(addr.into(), 0))
489 .addr6
490 .sin6_addr;
491 if let Err(err) = siocdifaddr_in6(ctl.as_raw_fd(), &ifrv6) {
492 return Err(io::Error::from(err));
493 }
494 }
495 Ok(())
496 }
497}
498
499impl DeviceImpl {
500 unsafe fn request(&self) -> io::Result<ifreq> {
502 request(&self.name_impl()?)
503 }
504 fn set_address_v4(&self, addr: Ipv4Addr) -> io::Result<()> {
505 unsafe {
506 let mut req = self.request()?;
507 ipaddr_to_sockaddr(addr, 0, &mut req.ifr_ifru.ifru_addr, OVERWRITE_SIZE);
508 if let Err(err) = siocsifaddr(ctl()?.as_raw_fd(), &req) {
509 return Err(io::Error::from(err));
510 }
511 }
512 Ok(())
513 }
514 fn set_netmask(&self, value: Ipv4Addr) -> io::Result<()> {
515 unsafe {
516 let mut req = self.request()?;
517 ipaddr_to_sockaddr(value, 0, &mut req.ifr_ifru.ifru_netmask, OVERWRITE_SIZE);
518 if let Err(err) = siocsifnetmask(ctl()?.as_raw_fd(), &req) {
519 return Err(io::Error::from(err));
520 }
521 Ok(())
522 }
523 }
524
525 fn set_destination(&self, value: Ipv4Addr) -> io::Result<()> {
526 unsafe {
527 let mut req = self.request()?;
528 ipaddr_to_sockaddr(value, 0, &mut req.ifr_ifru.ifru_dstaddr, OVERWRITE_SIZE);
529 if let Err(err) = siocsifdstaddr(ctl()?.as_raw_fd(), &req) {
530 return Err(io::Error::from(err));
531 }
532 Ok(())
533 }
534 }
535
536 pub(crate) fn name_impl(&self) -> io::Result<String> {
538 unsafe { name(self.as_raw_fd()) }
539 }
540
541 fn ifru_flags(&self) -> io::Result<i16> {
542 unsafe {
543 let ctl = ctl()?;
544 let mut req = self.request()?;
545
546 if let Err(err) = siocgifflags(ctl.as_raw_fd(), &mut req) {
547 return Err(io::Error::from(err));
548 }
549 Ok(req.ifr_ifru.ifru_flags)
550 }
551 }
552
553 fn remove_all_address_v4(&self) -> io::Result<()> {
554 let interface = netconfig_rs::Interface::try_from_index(self.if_index_impl()?)
555 .map_err(io::Error::from)?;
556 let list = interface.addresses().map_err(io::Error::from)?;
557 for x in list {
558 if x.addr().is_ipv4() {
559 interface.remove_address(x).map_err(io::Error::from)?;
560 }
561 }
562 Ok(())
563 }
564}
565
566impl DeviceImpl {
568 pub fn name(&self) -> io::Result<String> {
570 let _guard = self.op_lock.lock().unwrap();
571 self.name_impl()
572 }
573 pub fn remove_address_v6(&self, addr: Ipv6Addr, prefix: u8) -> io::Result<()> {
574 let _guard = self.op_lock.lock().unwrap();
575 self.remove_address_v6_impl(addr, prefix)
576 }
577 pub fn set_name(&self, value: &str) -> io::Result<()> {
584 let _guard = self.op_lock.lock().unwrap();
585 unsafe {
586 let tun_name = CString::new(value)?;
587
588 if tun_name.as_bytes_with_nul().len() > IFNAMSIZ {
589 return Err(io::Error::new(io::ErrorKind::InvalidInput, "name too long"));
590 }
591
592 let mut req = self.request()?;
593 ptr::copy_nonoverlapping(
594 tun_name.as_ptr() as *const c_char,
595 req.ifr_ifru.ifru_newname.as_mut_ptr(),
596 value.len(),
597 );
598
599 if let Err(err) = siocsifname(ctl()?.as_raw_fd(), &req) {
600 return Err(io::Error::from(err));
601 }
602
603 Ok(())
604 }
605 }
606 pub fn is_running(&self) -> io::Result<bool> {
610 let _guard = self.op_lock.lock().unwrap();
611 let flags = self.ifru_flags()?;
612 Ok(flags & (IFF_UP | IFF_RUNNING) as c_short == (IFF_UP | IFF_RUNNING) as c_short)
613 }
614 pub fn enabled(&self, value: bool) -> io::Result<()> {
619 let _guard = self.op_lock.lock().unwrap();
620 unsafe {
621 let ctl = ctl()?;
622 let mut req = self.request()?;
623
624 if let Err(err) = siocgifflags(ctl.as_raw_fd(), &mut req) {
625 return Err(io::Error::from(err));
626 }
627
628 if value {
629 req.ifr_ifru.ifru_flags |= (IFF_UP | IFF_RUNNING) as c_short;
630 } else {
631 req.ifr_ifru.ifru_flags &= !(IFF_UP as c_short);
632 }
633
634 if let Err(err) = siocsifflags(ctl.as_raw_fd(), &req) {
635 return Err(io::Error::from(err));
636 }
637
638 Ok(())
639 }
640 }
641 pub fn broadcast(&self) -> io::Result<IpAddr> {
646 let _guard = self.op_lock.lock().unwrap();
647 unsafe {
648 let mut req = self.request()?;
649 if let Err(err) = siocgifbrdaddr(ctl()?.as_raw_fd(), &mut req) {
650 return Err(io::Error::from(err));
651 }
652 let sa = sockaddr_union::from(req.ifr_ifru.ifru_broadaddr);
653 Ok(std::net::SocketAddr::try_from(sa)?.ip())
654 }
655 }
656 pub fn set_broadcast(&self, value: IpAddr) -> io::Result<()> {
661 let _guard = self.op_lock.lock().unwrap();
662 unsafe {
663 let mut req = self.request()?;
664 ipaddr_to_sockaddr(value, 0, &mut req.ifr_ifru.ifru_broadaddr, OVERWRITE_SIZE);
665 if let Err(err) = siocsifbrdaddr(ctl()?.as_raw_fd(), &req) {
666 return Err(io::Error::from(err));
667 }
668 Ok(())
669 }
670 }
671 pub fn set_network_address<IPv4: ToIpv4Address, Netmask: ToIpv4Netmask>(
674 &self,
675 address: IPv4,
676 netmask: Netmask,
677 destination: Option<IPv4>,
678 ) -> io::Result<()> {
679 let _guard = self.op_lock.lock().unwrap();
680 self.remove_all_address_v4()?;
681 self.set_address_v4(address.ipv4()?)?;
682 self.set_netmask(netmask.netmask()?)?;
683 if let Some(destination) = destination {
684 self.set_destination(destination.ipv4()?)?;
685 }
686 Ok(())
687 }
688 pub fn add_address_v4<IPv4: ToIpv4Address, Netmask: ToIpv4Netmask>(
690 &self,
691 address: IPv4,
692 netmask: Netmask,
693 ) -> io::Result<()> {
694 let _guard = self.op_lock.lock().unwrap();
695 let interface = netconfig_rs::Interface::try_from_index(self.if_index_impl()?)
696 .map_err(io::Error::from)?;
697 interface
698 .add_address(IpNet::new_assert(address.ipv4()?.into(), netmask.prefix()?))
699 .map_err(io::Error::from)
700 }
701 pub fn remove_address(&self, addr: IpAddr) -> io::Result<()> {
708 let _guard = self.op_lock.lock().unwrap();
709 match addr {
710 IpAddr::V4(_) => {
711 let interface = netconfig_rs::Interface::try_from_index(self.if_index_impl()?)
712 .map_err(io::Error::from)?;
713 let list = interface.addresses().map_err(io::Error::from)?;
714 for x in list {
715 if x.addr() == addr {
716 interface.remove_address(x).map_err(io::Error::from)?;
717 }
718 }
719 }
720 IpAddr::V6(addr_v6) => {
721 let addrs = crate::platform::get_if_addrs_by_name(self.name_impl()?)?;
722 for x in addrs {
723 if x.address == addr {
724 if let Some(netmask) = x.netmask {
725 let prefix = ipnet::ip_mask_to_prefix(netmask).unwrap_or(0);
726 self.remove_address_v6_impl(addr_v6, prefix)?
727 }
728 }
729 }
730 }
731 }
732 Ok(())
733 }
734 pub fn add_address_v6<IPv6: ToIpv6Address, Netmask: ToIpv6Netmask>(
740 &self,
741 addr: IPv6,
742 netmask: Netmask,
743 ) -> io::Result<()> {
744 let _guard = self.op_lock.lock().unwrap();
745 unsafe {
746 let if_index = self.if_index_impl()?;
747 let ctl = ctl_v6()?;
748 let mut ifrv6: in6_ifreq = mem::zeroed();
749 ifrv6.ifr6_ifindex = if_index as i32;
750 ifrv6.ifr6_prefixlen = netmask.prefix()? as u32;
751 ifrv6.ifr6_addr =
752 sockaddr_union::from(std::net::SocketAddr::new(addr.ipv6()?.into(), 0))
753 .addr6
754 .sin6_addr;
755 if let Err(err) = siocsifaddr_in6(ctl.as_raw_fd(), &ifrv6) {
756 return Err(io::Error::from(err));
757 }
758 }
759 Ok(())
760 }
761 pub fn mtu(&self) -> io::Result<u16> {
766 let _guard = self.op_lock.lock().unwrap();
767 unsafe {
768 let mut req = self.request()?;
769
770 if let Err(err) = siocgifmtu(ctl()?.as_raw_fd(), &mut req) {
771 return Err(io::Error::from(err));
772 }
773
774 req.ifr_ifru
775 .ifru_mtu
776 .try_into()
777 .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, format!("{e:?}")))
778 }
779 }
780 pub fn set_mtu(&self, value: u16) -> io::Result<()> {
785 let _guard = self.op_lock.lock().unwrap();
786 unsafe {
787 let mut req = self.request()?;
788 req.ifr_ifru.ifru_mtu = value as i32;
789
790 if let Err(err) = siocsifmtu(ctl()?.as_raw_fd(), &req) {
791 return Err(io::Error::from(err));
792 }
793 Ok(())
794 }
795 }
796 pub fn set_mac_address(&self, eth_addr: [u8; ETHER_ADDR_LEN as usize]) -> io::Result<()> {
802 let _guard = self.op_lock.lock().unwrap();
803 unsafe {
804 let mut req = self.request()?;
805 req.ifr_ifru.ifru_hwaddr.sa_family = ARPHRD_ETHER;
806 req.ifr_ifru.ifru_hwaddr.sa_data[0..ETHER_ADDR_LEN as usize]
807 .copy_from_slice(eth_addr.map(|c| c as _).as_slice());
808 if let Err(err) = siocsifhwaddr(ctl()?.as_raw_fd(), &req) {
809 return Err(io::Error::from(err));
810 }
811 Ok(())
812 }
813 }
814 pub fn mac_address(&self) -> io::Result<[u8; ETHER_ADDR_LEN as usize]> {
819 let _guard = self.op_lock.lock().unwrap();
820 let mac = mac_address_by_name(&self.name_impl()?)
821 .map_err(|e| io::Error::other(e.to_string()))?
822 .ok_or(io::Error::from(io::ErrorKind::NotFound))?;
823 Ok(mac.bytes())
824 }
825}
826
827unsafe fn name(fd: RawFd) -> io::Result<String> {
828 let mut req: ifreq = mem::zeroed();
829 if let Err(err) = tungetiff(fd, &mut req as *mut _ as *mut _) {
830 return Err(io::Error::from(err));
831 }
832 let c_str = std::ffi::CStr::from_ptr(req.ifr_name.as_ptr() as *const c_char);
833 let tun_name = c_str.to_string_lossy().into_owned();
834 Ok(tun_name)
835}
836
837unsafe fn request(name: &str) -> io::Result<ifreq> {
838 let mut req: ifreq = mem::zeroed();
839 ptr::copy_nonoverlapping(
840 name.as_ptr() as *const c_char,
841 req.ifr_name.as_mut_ptr(),
842 name.len(),
843 );
844 Ok(req)
845}
846
847impl From<Layer> for c_short {
848 fn from(layer: Layer) -> Self {
849 match layer {
850 Layer::L2 => IFF_TAP as c_short,
851 Layer::L3 => IFF_TUN as c_short,
852 }
853 }
854}