tun_rs/async_device/unix/mod.rs
1#[cfg(all(target_os = "linux", not(target_env = "ohos")))]
2use crate::platform::offload::{handle_gro, VirtioNetHdr, VIRTIO_NET_HDR_LEN};
3use crate::platform::DeviceImpl;
4#[cfg(all(target_os = "linux", not(target_env = "ohos")))]
5use crate::platform::GROTable;
6use crate::SyncDevice;
7use std::io;
8use std::io::{IoSlice, IoSliceMut};
9use std::ops::Deref;
10use std::os::fd::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
11
12#[cfg(feature = "async_tokio")]
13mod tokio;
14#[cfg(feature = "async_tokio")]
15pub use self::tokio::AsyncDevice;
16
17#[cfg(all(feature = "async_io", not(feature = "async_tokio")))]
18mod async_io;
19#[cfg(all(feature = "async_io", not(feature = "async_tokio")))]
20pub use self::async_io::AsyncDevice;
21
22impl FromRawFd for AsyncDevice {
23 unsafe fn from_raw_fd(fd: RawFd) -> Self {
24 AsyncDevice::from_fd(fd).unwrap()
25 }
26}
27impl IntoRawFd for AsyncDevice {
28 fn into_raw_fd(self) -> RawFd {
29 self.into_fd().unwrap()
30 }
31}
32impl AsRawFd for AsyncDevice {
33 fn as_raw_fd(&self) -> RawFd {
34 self.get_ref().as_raw_fd()
35 }
36}
37
38impl Deref for AsyncDevice {
39 type Target = DeviceImpl;
40
41 fn deref(&self) -> &Self::Target {
42 self.get_ref()
43 }
44}
45
46impl AsyncDevice {
47 #[allow(dead_code)]
48 pub fn new(device: SyncDevice) -> io::Result<AsyncDevice> {
49 AsyncDevice::new_dev(device.0)
50 }
51
52 /// # Safety
53 /// This method is safe if the provided fd is valid
54 /// Construct a AsyncDevice from an existing file descriptor
55 pub unsafe fn from_fd(fd: RawFd) -> io::Result<AsyncDevice> {
56 AsyncDevice::new_dev(DeviceImpl::from_fd(fd)?)
57 }
58
59 /// # Safety
60 /// The fd passed in must be a valid, open file descriptor.
61 /// Unlike [`from_fd`], this function does **not** take ownership of `fd`,
62 /// and therefore will not close it when dropped.
63 /// The caller is responsible for ensuring the lifetime and eventual closure of `fd`.
64 #[allow(dead_code)]
65 pub(crate) unsafe fn borrow_raw(fd: RawFd) -> io::Result<Self> {
66 AsyncDevice::new_dev(DeviceImpl::borrow_raw(fd)?)
67 }
68
69 pub fn into_fd(self) -> io::Result<RawFd> {
70 Ok(self.into_device()?.into_raw_fd())
71 }
72 /// Waits for the device to become readable.
73 ///
74 /// This function is usually paired with `try_recv()`.
75 ///
76 /// The function may complete without the device being readable. This is a
77 /// false-positive and attempting a `try_recv()` will return with
78 /// `io::ErrorKind::WouldBlock`.
79 ///
80 /// # Cancel safety
81 ///
82 /// This method is cancel safe. Once a readiness event occurs, the method
83 /// will continue to return immediately until the readiness event is
84 /// consumed by an attempt to read that fails with `WouldBlock` or
85 /// `Poll::Pending`.
86 pub async fn readable(&self) -> io::Result<()> {
87 self.0.readable().await.map(|_| ())
88 }
89 /// Waits for the device to become writable.
90 ///
91 /// This function is usually paired with `try_send()`.
92 ///
93 /// The function may complete without the device being writable. This is a
94 /// false-positive and attempting a `try_send()` will return with
95 /// `io::ErrorKind::WouldBlock`.
96 ///
97 /// # Cancel safety
98 ///
99 /// This method is cancel safe. Once a readiness event occurs, the method
100 /// will continue to return immediately until the readiness event is
101 /// consumed by an attempt to write that fails with `WouldBlock` or
102 /// `Poll::Pending`.
103 pub async fn writable(&self) -> io::Result<()> {
104 self.0.writable().await.map(|_| ())
105 }
106 /// Receives a single packet from the device.
107 /// On success, returns the number of bytes read.
108 ///
109 /// The function must be called with valid byte array `buf` of sufficient
110 /// size to hold the message bytes. If a message is too long to fit in the
111 /// supplied buffer, excess bytes may be discarded.
112 pub async fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
113 self.read_with(|device| device.recv(buf)).await
114 }
115 /// Tries to receive a single packet from the device.
116 /// On success, returns the number of bytes read.
117 ///
118 /// This method must be called with valid byte array `buf` of sufficient size
119 /// to hold the message bytes. If a message is too long to fit in the
120 /// supplied buffer, excess bytes may be discarded.
121 ///
122 /// When there is no pending data, `Err(io::ErrorKind::WouldBlock)` is
123 /// returned. This function is usually paired with `readable()`.
124 pub fn try_recv(&self, buf: &mut [u8]) -> io::Result<usize> {
125 self.try_read_io(|device| device.recv(buf))
126 }
127
128 /// Send a packet to the device
129 ///
130 /// # Return
131 /// On success, the number of bytes sent is returned, otherwise, the encountered error is returned.
132 pub async fn send(&self, buf: &[u8]) -> io::Result<usize> {
133 self.write_with(|device| device.send(buf)).await
134 }
135 /// Tries to send packet to the device.
136 ///
137 /// When the device buffer is full, `Err(io::ErrorKind::WouldBlock)` is
138 /// returned. This function is usually paired with `writable()`.
139 ///
140 /// # Returns
141 ///
142 /// If successful, `Ok(n)` is returned, where `n` is the number of bytes
143 /// sent. If the device is not ready to send data,
144 /// `Err(ErrorKind::WouldBlock)` is returned.
145 pub fn try_send(&self, buf: &[u8]) -> io::Result<usize> {
146 self.try_write_io(|device| device.send(buf))
147 }
148 /// Receives a packet into multiple buffers (scatter read).
149 /// **Processes single packet per call**.
150 pub async fn recv_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
151 self.read_with(|device| device.recv_vectored(bufs)).await
152 }
153 /// Non-blocking version of `recv_vectored`.
154 pub fn try_recv_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
155 self.try_read_io(|device| device.recv_vectored(bufs))
156 }
157 /// Sends multiple buffers as a single packet (gather write).
158 pub async fn send_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
159 self.write_with(|device| device.send_vectored(bufs)).await
160 }
161 /// Non-blocking version of `send_vectored`.
162 pub fn try_send_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
163 self.try_write_io(|device| device.send_vectored(bufs))
164 }
165}
166
167#[cfg(all(target_os = "linux", not(target_env = "ohos")))]
168impl AsyncDevice {
169 /// # Prerequisites
170 /// - The `IFF_MULTI_QUEUE` flag must be enabled.
171 /// - The system must support network interface multi-queue functionality.
172 ///
173 /// # Description
174 /// When multi-queue is enabled, create a new queue by duplicating an existing one.
175 pub fn try_clone(&self) -> io::Result<Self> {
176 AsyncDevice::new_dev(self.get_ref().try_clone()?)
177 }
178 /// Recv a packet from the device.
179 /// If offload is enabled. This method can be used to obtain processed data.
180 ///
181 /// original_buffer is used to store raw data, including the VirtioNetHdr and the unsplit IP packet. The recommended size is 10 + 65535.
182 /// bufs and sizes are used to store the segmented IP packets. bufs.len == sizes.len > 65535/MTU
183 /// offset: Starting position
184 #[cfg(target_os = "linux")]
185 pub async fn recv_multiple<B: AsRef<[u8]> + AsMut<[u8]>>(
186 &self,
187 original_buffer: &mut [u8],
188 bufs: &mut [B],
189 sizes: &mut [usize],
190 offset: usize,
191 ) -> io::Result<usize> {
192 if bufs.is_empty() || bufs.len() != sizes.len() {
193 return Err(io::Error::other("bufs error"));
194 }
195 let tun = self.get_ref();
196 if tun.vnet_hdr {
197 let len = self.recv(original_buffer).await?;
198 if len <= VIRTIO_NET_HDR_LEN {
199 Err(io::Error::other(format!(
200 "length of packet ({len}) <= VIRTIO_NET_HDR_LEN ({VIRTIO_NET_HDR_LEN})",
201 )))?
202 }
203 let hdr = VirtioNetHdr::decode(&original_buffer[..VIRTIO_NET_HDR_LEN])?;
204 tun.handle_virtio_read(
205 hdr,
206 &mut original_buffer[VIRTIO_NET_HDR_LEN..len],
207 bufs,
208 sizes,
209 offset,
210 )
211 } else {
212 let len = self.recv(&mut bufs[0].as_mut()[offset..]).await?;
213 sizes[0] = len;
214 Ok(1)
215 }
216 }
217 /// send multiple fragmented data packets.
218 /// GROTable can be reused, as it is used to assist in data merging.
219 /// Offset is the starting position of the data. Need to meet offset>10.
220 #[cfg(target_os = "linux")]
221 pub async fn send_multiple<B: crate::platform::ExpandBuffer>(
222 &self,
223 gro_table: &mut GROTable,
224 bufs: &mut [B],
225 mut offset: usize,
226 ) -> io::Result<usize> {
227 gro_table.reset();
228 let tun = self.get_ref();
229 if tun.vnet_hdr {
230 handle_gro(
231 bufs,
232 offset,
233 &mut gro_table.tcp_gro_table,
234 &mut gro_table.udp_gro_table,
235 tun.udp_gso,
236 &mut gro_table.to_write,
237 )?;
238 offset -= VIRTIO_NET_HDR_LEN;
239 } else {
240 for i in 0..bufs.len() {
241 gro_table.to_write.push(i);
242 }
243 }
244
245 let mut total = 0;
246 let mut err = Ok(());
247 for buf_idx in &gro_table.to_write {
248 match self.send(&bufs[*buf_idx].as_ref()[offset..]).await {
249 Ok(n) => {
250 total += n;
251 }
252 Err(e) => {
253 if let Some(code) = e.raw_os_error() {
254 if libc::EBADFD == code {
255 return Err(e);
256 }
257 }
258 err = Err(e)
259 }
260 }
261 }
262 err?;
263 Ok(total)
264 }
265}