tun_sync/platform/linux/
device.rs1use std::ffi::{CStr, CString};
16use std::io::{self, Read, Write};
17use std::mem;
18use std::net::Ipv4Addr;
19use std::os::unix::io::{AsRawFd, IntoRawFd, RawFd};
20use std::ptr;
21use std::vec::Vec;
22
23use libc;
24use libc::{c_char, c_short};
25use libc::{AF_INET, O_RDWR, SOCK_DGRAM};
26
27use crate::configuration::{Configuration, Layer};
28use crate::device::Device as D;
29use crate::error::*;
30use crate::platform::linux::sys::*;
31use crate::platform::posix::{Fd, SockAddr};
32
33pub struct Device {
35 name: String,
36 queues: Vec<Queue>,
37 ctl: Fd,
38}
39
40impl Device {
41 pub fn new(config: &Configuration) -> Result<Self> {
43 let mut device = unsafe {
44 let dev = match config.name.as_ref() {
45 Some(name) => {
46 let name = CString::new(name.clone())?;
47
48 if name.as_bytes_with_nul().len() > IFNAMSIZ {
49 return Err(Error::NameTooLong);
50 }
51
52 Some(name)
53 }
54
55 None => None,
56 };
57
58 let mut queues = Vec::new();
59
60 let mut req: ifreq = mem::zeroed();
61
62 if let Some(dev) = dev.as_ref() {
63 ptr::copy_nonoverlapping(
64 dev.as_ptr() as *const c_char,
65 req.ifrn.name.as_mut_ptr(),
66 dev.as_bytes().len(),
67 );
68 }
69
70 let device_type: c_short = config.layer.unwrap_or(Layer::L3).into();
71
72 let queues_num = config.queues.unwrap_or(1);
73 if queues_num < 1 {
74 return Err(Error::InvalidQueuesNumber);
75 }
76
77 req.ifru.flags = device_type
78 | if config.platform.packet_information {
79 0
80 } else {
81 IFF_NO_PI
82 }
83 | if queues_num > 1 { IFF_MULTI_QUEUE } else { 0 };
84
85 for _ in 0..queues_num {
86 let tun = Fd::new(libc::open(b"/dev/net/tun\0".as_ptr() as *const _, O_RDWR))
87 .map_err(|_| io::Error::last_os_error())?;
88
89 if tunsetiff(tun.0, &mut req as *mut _ as *mut _) < 0 {
90 return Err(io::Error::last_os_error().into());
91 }
92
93 queues.push(Queue {
94 tun,
95 pi_enabled: config.platform.packet_information,
96 });
97 }
98
99 let ctl = Fd::new(libc::socket(AF_INET, SOCK_DGRAM, 0))
100 .map_err(|_| io::Error::last_os_error())?;
101
102 Device {
103 name: CStr::from_ptr(req.ifrn.name.as_ptr())
104 .to_string_lossy()
105 .into(),
106 queues,
107 ctl,
108 }
109 };
110
111 device.configure(config)?;
112
113 Ok(device)
114 }
115
116 unsafe fn request(&self) -> ifreq {
118 let mut req: ifreq = mem::zeroed();
119 ptr::copy_nonoverlapping(
120 self.name.as_ptr() as *const c_char,
121 req.ifrn.name.as_mut_ptr(),
122 self.name.len(),
123 );
124
125 req
126 }
127
128 pub fn persist(&mut self) -> Result<()> {
130 unsafe {
131 if tunsetpersist(self.as_raw_fd(), &1) < 0 {
132 Err(io::Error::last_os_error().into())
133 } else {
134 Ok(())
135 }
136 }
137 }
138
139 pub fn user(&mut self, value: i32) -> Result<()> {
141 unsafe {
142 if tunsetowner(self.as_raw_fd(), &value) < 0 {
143 Err(io::Error::last_os_error().into())
144 } else {
145 Ok(())
146 }
147 }
148 }
149
150 pub fn group(&mut self, value: i32) -> Result<()> {
152 unsafe {
153 if tunsetgroup(self.as_raw_fd(), &value) < 0 {
154 Err(io::Error::last_os_error().into())
155 } else {
156 Ok(())
157 }
158 }
159 }
160
161 pub fn has_packet_information(&mut self) -> bool {
163 self.queues[0].has_packet_information()
164 }
165
166 pub fn set_nonblock(&self) -> io::Result<()> {
168 self.queues[0].set_nonblock()
169 }
170}
171
172impl Read for Device {
173 fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
174 self.queues[0].read(buf)
175 }
176
177 fn read_vectored(&mut self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result<usize> {
178 self.queues[0].read_vectored(bufs)
179 }
180}
181
182impl Write for Device {
183 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
184 self.queues[0].write(buf)
185 }
186
187 fn flush(&mut self) -> io::Result<()> {
188 self.queues[0].flush()
189 }
190
191 fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize> {
192 self.queues[0].write_vectored(bufs)
193 }
194}
195
196impl D for Device {
197 type Queue = Queue;
198
199 fn name(&self) -> &str {
200 &self.name
201 }
202
203 fn set_name(&mut self, value: &str) -> Result<()> {
204 unsafe {
205 let name = CString::new(value)?;
206
207 if name.as_bytes_with_nul().len() > IFNAMSIZ {
208 return Err(Error::NameTooLong);
209 }
210
211 let mut req = self.request();
212 ptr::copy_nonoverlapping(
213 name.as_ptr() as *const c_char,
214 req.ifru.newname.as_mut_ptr(),
215 value.len(),
216 );
217
218 if siocsifname(self.ctl.as_raw_fd(), &req) < 0 {
219 return Err(io::Error::last_os_error().into());
220 }
221
222 self.name = value.into();
223
224 Ok(())
225 }
226 }
227
228 fn enabled(&mut self, value: bool) -> Result<()> {
229 unsafe {
230 let mut req = self.request();
231
232 if siocgifflags(self.ctl.as_raw_fd(), &mut req) < 0 {
233 return Err(io::Error::last_os_error().into());
234 }
235
236 if value {
237 req.ifru.flags |= IFF_UP | IFF_RUNNING;
238 } else {
239 req.ifru.flags &= !IFF_UP;
240 }
241
242 if siocsifflags(self.ctl.as_raw_fd(), &req) < 0 {
243 return Err(io::Error::last_os_error().into());
244 }
245
246 Ok(())
247 }
248 }
249
250 fn address(&self) -> Result<Ipv4Addr> {
251 unsafe {
252 let mut req = self.request();
253
254 if siocgifaddr(self.ctl.as_raw_fd(), &mut req) < 0 {
255 return Err(io::Error::last_os_error().into());
256 }
257
258 SockAddr::new(&req.ifru.addr).map(Into::into)
259 }
260 }
261
262 fn set_address(&mut self, value: Ipv4Addr) -> Result<()> {
263 unsafe {
264 let mut req = self.request();
265 req.ifru.addr = SockAddr::from(value).into();
266
267 if siocsifaddr(self.ctl.as_raw_fd(), &req) < 0 {
268 return Err(io::Error::last_os_error().into());
269 }
270
271 Ok(())
272 }
273 }
274
275 fn destination(&self) -> Result<Ipv4Addr> {
276 unsafe {
277 let mut req = self.request();
278
279 if siocgifdstaddr(self.ctl.as_raw_fd(), &mut req) < 0 {
280 return Err(io::Error::last_os_error().into());
281 }
282
283 SockAddr::new(&req.ifru.dstaddr).map(Into::into)
284 }
285 }
286
287 fn set_destination(&mut self, value: Ipv4Addr) -> Result<()> {
288 unsafe {
289 let mut req = self.request();
290 req.ifru.dstaddr = SockAddr::from(value).into();
291
292 if siocsifdstaddr(self.ctl.as_raw_fd(), &req) < 0 {
293 return Err(io::Error::last_os_error().into());
294 }
295
296 Ok(())
297 }
298 }
299
300 fn broadcast(&self) -> Result<Ipv4Addr> {
301 unsafe {
302 let mut req = self.request();
303
304 if siocgifbrdaddr(self.ctl.as_raw_fd(), &mut req) < 0 {
305 return Err(io::Error::last_os_error().into());
306 }
307
308 SockAddr::new(&req.ifru.broadaddr).map(Into::into)
309 }
310 }
311
312 fn set_broadcast(&mut self, value: Ipv4Addr) -> Result<()> {
313 unsafe {
314 let mut req = self.request();
315 req.ifru.broadaddr = SockAddr::from(value).into();
316
317 if siocsifbrdaddr(self.ctl.as_raw_fd(), &req) < 0 {
318 return Err(io::Error::last_os_error().into());
319 }
320
321 Ok(())
322 }
323 }
324
325 fn netmask(&self) -> Result<Ipv4Addr> {
326 unsafe {
327 let mut req = self.request();
328
329 if siocgifnetmask(self.ctl.as_raw_fd(), &mut req) < 0 {
330 return Err(io::Error::last_os_error().into());
331 }
332
333 SockAddr::new(&req.ifru.netmask).map(Into::into)
334 }
335 }
336
337 fn set_netmask(&mut self, value: Ipv4Addr) -> Result<()> {
338 unsafe {
339 let mut req = self.request();
340 req.ifru.netmask = SockAddr::from(value).into();
341
342 if siocsifnetmask(self.ctl.as_raw_fd(), &req) < 0 {
343 return Err(io::Error::last_os_error().into());
344 }
345
346 Ok(())
347 }
348 }
349
350 fn mtu(&self) -> Result<i32> {
351 unsafe {
352 let mut req = self.request();
353
354 if siocgifmtu(self.ctl.as_raw_fd(), &mut req) < 0 {
355 return Err(io::Error::last_os_error().into());
356 }
357
358 Ok(req.ifru.mtu)
359 }
360 }
361
362 fn set_mtu(&mut self, value: i32) -> Result<()> {
363 unsafe {
364 let mut req = self.request();
365 req.ifru.mtu = value;
366
367 if siocsifmtu(self.ctl.as_raw_fd(), &req) < 0 {
368 return Err(io::Error::last_os_error().into());
369 }
370
371 Ok(())
372 }
373 }
374
375 fn queue(&mut self, index: usize) -> Option<&mut Self::Queue> {
376 self.queues.get_mut(index)
377 }
378}
379
380impl AsRawFd for Device {
381 fn as_raw_fd(&self) -> RawFd {
382 self.queues[0].as_raw_fd()
383 }
384}
385
386impl IntoRawFd for Device {
387 fn into_raw_fd(mut self) -> RawFd {
388 let queue = self.queues.swap_remove(0);
390 queue.into_raw_fd()
391 }
392}
393
394pub struct Queue {
395 tun: Fd,
396 pi_enabled: bool,
397}
398
399impl Queue {
400 pub fn has_packet_information(&mut self) -> bool {
401 self.pi_enabled
402 }
403
404 pub fn set_nonblock(&self) -> io::Result<()> {
405 self.tun.set_nonblock()
406 }
407}
408
409impl Read for Queue {
410 fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
411 self.tun.read(buf)
412 }
413
414 fn read_vectored(&mut self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result<usize> {
415 self.tun.read_vectored(bufs)
416 }
417}
418
419impl Write for Queue {
420 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
421 self.tun.write(buf)
422 }
423
424 fn flush(&mut self) -> io::Result<()> {
425 self.tun.flush()
426 }
427
428 fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize> {
429 self.tun.write_vectored(bufs)
430 }
431}
432
433impl AsRawFd for Queue {
434 fn as_raw_fd(&self) -> RawFd {
435 self.tun.as_raw_fd()
436 }
437}
438
439impl IntoRawFd for Queue {
440 fn into_raw_fd(self) -> RawFd {
441 self.tun.into_raw_fd()
442 }
443}
444
445impl From<Layer> for c_short {
446 fn from(layer: Layer) -> Self {
447 match layer {
448 Layer::L2 => IFF_TAP,
449 Layer::L3 => IFF_TUN,
450 }
451 }
452}