legomushroom_tun/platform/linux/
device.rs1use std::ffi::{CStr, CString};
16use std::io::{self, Read, Write};
17use std::mem;
18use std::net::Ipv4Addr;
19use std::os::unix::io::{AsRawFd, IntoRawFd, RawFd};
20use std::ptr;
21use std::sync::Arc;
22
23use libc;
24use libc::{c_char, c_short};
25use libc::{AF_INET, O_RDWR, SOCK_DGRAM};
26
27use crate::configuration::{Configuration, Layer};
28use crate::device::Device as D;
29use crate::error::*;
30use crate::platform::linux::sys::*;
31use crate::platform::posix::{self, Fd, SockAddr};
32
33pub struct Device {
35 name: String,
36 queue: Queue,
37 ctl: Fd,
38}
39
40impl Device {
41 pub fn split(self) -> (posix::Reader, posix::Writer) {
43 let fd = Arc::new(self.queue.tun);
44 return (posix::Reader(fd.clone()), posix::Writer(fd.clone()));
45 }
46
47 pub fn new(config: &Configuration) -> Result<Self> {
49 let mut device = unsafe {
50 let dev = match config.name.as_ref() {
51 Some(name) => {
52 let name = CString::new(name.clone())?;
53
54 if name.as_bytes_with_nul().len() > IFNAMSIZ {
55 return Err(Error::NameTooLong);
56 }
57
58 Some(name)
59 }
60
61 None => None,
62 };
63
64 let mut req: ifreq = mem::zeroed();
65
66 if let Some(dev) = dev.as_ref() {
67 ptr::copy_nonoverlapping(
68 dev.as_ptr() as *const c_char,
69 req.ifrn.name.as_mut_ptr(),
70 dev.as_bytes().len(),
71 );
72 }
73
74 let device_type: c_short = config.layer.unwrap_or(Layer::L3).into();
75
76 let queues_num = config.queues.unwrap_or(1);
77 if queues_num < 1 {
78 return Err(Error::InvalidQueuesNumber);
79 }
80
81 req.ifru.flags = device_type
82 | if config.platform.packet_information {
83 0
84 } else {
85 IFF_NO_PI
86 }
87 | if queues_num > 1 { IFF_MULTI_QUEUE } else { 0 };
88
89 let tun = Fd::new(libc::open(b"/dev/net/tun\0".as_ptr() as *const _, O_RDWR))
90 .map_err(|_| io::Error::last_os_error())?;
91
92 if tunsetiff(tun.0, &mut req as *mut _ as *mut _) < 0 {
93 return Err(io::Error::last_os_error().into());
94 }
95
96 let queue = Queue {
97 tun,
98 pi_enabled: config.platform.packet_information,
99 };
100
101 let ctl = Fd::new(libc::socket(AF_INET, SOCK_DGRAM, 0))
102 .map_err(|_| io::Error::last_os_error())?;
103
104 Device {
105 name: CStr::from_ptr(req.ifrn.name.as_ptr())
106 .to_string_lossy()
107 .into(),
108 queue,
109 ctl: ctl,
110 }
111 };
112
113 device.configure(&config)?;
114
115 Ok(device)
116 }
117
118 unsafe fn request(&self) -> ifreq {
120 let mut req: ifreq = mem::zeroed();
121 ptr::copy_nonoverlapping(
122 self.name.as_ptr() as *const c_char,
123 req.ifrn.name.as_mut_ptr(),
124 self.name.len(),
125 );
126
127 req
128 }
129
130 pub fn persist(&mut self) -> Result<()> {
132 unsafe {
133 if tunsetpersist(self.as_raw_fd(), &1) < 0 {
134 Err(io::Error::last_os_error().into())
135 } else {
136 Ok(())
137 }
138 }
139 }
140
141 pub fn user(&mut self, value: i32) -> Result<()> {
143 unsafe {
144 if tunsetowner(self.as_raw_fd(), &value) < 0 {
145 Err(io::Error::last_os_error().into())
146 } else {
147 Ok(())
148 }
149 }
150 }
151
152 pub fn group(&mut self, value: i32) -> Result<()> {
154 unsafe {
155 if tunsetgroup(self.as_raw_fd(), &value) < 0 {
156 Err(io::Error::last_os_error().into())
157 } else {
158 Ok(())
159 }
160 }
161 }
162
163 pub fn has_packet_information(&mut self) -> bool {
165 self.queue.has_packet_information()
166 }
167
168 pub fn set_nonblock(&self) -> io::Result<()> {
170 self.queue.set_nonblock()
171 }
172}
173
174impl Read for Device {
175 fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
176 self.queue.read(buf)
177 }
178
179 fn read_vectored(&mut self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result<usize> {
180 self.queue.read_vectored(bufs)
181 }
182}
183
184impl Write for Device {
185 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
186 self.queue.write(buf)
187 }
188
189 fn flush(&mut self) -> io::Result<()> {
190 self.queue.flush()
191 }
192
193 fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize> {
194 self.queue.write_vectored(bufs)
195 }
196}
197
198impl D for Device {
199 type Queue = Queue;
200
201 fn name(&self) -> &str {
202 &self.name
203 }
204
205 fn set_name(&mut self, value: &str) -> Result<()> {
206 unsafe {
207 let name = CString::new(value)?;
208
209 if name.as_bytes_with_nul().len() > IFNAMSIZ {
210 return Err(Error::NameTooLong);
211 }
212
213 let mut req = self.request();
214 ptr::copy_nonoverlapping(
215 name.as_ptr() as *const c_char,
216 req.ifru.newname.as_mut_ptr(),
217 value.len(),
218 );
219
220 if siocsifname(self.ctl.as_raw_fd(), &req) < 0 {
221 return Err(io::Error::last_os_error().into());
222 }
223
224 self.name = value.into();
225
226 Ok(())
227 }
228 }
229
230 fn enabled(&mut self, value: bool) -> Result<()> {
231 unsafe {
232 let mut req = self.request();
233
234 if siocgifflags(self.ctl.as_raw_fd(), &mut req) < 0 {
235 return Err(io::Error::last_os_error().into());
236 }
237
238 if value {
239 req.ifru.flags |= IFF_UP | IFF_RUNNING;
240 } else {
241 req.ifru.flags &= !IFF_UP;
242 }
243
244 if siocsifflags(self.ctl.as_raw_fd(), &mut req) < 0 {
245 return Err(io::Error::last_os_error().into());
246 }
247
248 Ok(())
249 }
250 }
251
252 fn address(&self) -> Result<Ipv4Addr> {
253 unsafe {
254 let mut req = self.request();
255
256 if siocgifaddr(self.ctl.as_raw_fd(), &mut req) < 0 {
257 return Err(io::Error::last_os_error().into());
258 }
259
260 SockAddr::new(&req.ifru.addr).map(Into::into)
261 }
262 }
263
264 fn set_address(&mut self, value: Ipv4Addr) -> Result<()> {
265 unsafe {
266 let mut req = self.request();
267 req.ifru.addr = SockAddr::from(value).into();
268
269 if siocsifaddr(self.ctl.as_raw_fd(), &req) < 0 {
270 return Err(io::Error::last_os_error().into());
271 }
272
273 Ok(())
274 }
275 }
276
277 fn destination(&self) -> Result<Ipv4Addr> {
278 unsafe {
279 let mut req = self.request();
280
281 if siocgifdstaddr(self.ctl.as_raw_fd(), &mut req) < 0 {
282 return Err(io::Error::last_os_error().into());
283 }
284
285 SockAddr::new(&req.ifru.dstaddr).map(Into::into)
286 }
287 }
288
289 fn set_destination(&mut self, value: Ipv4Addr) -> Result<()> {
290 unsafe {
291 let mut req = self.request();
292 req.ifru.dstaddr = SockAddr::from(value).into();
293
294 if siocsifdstaddr(self.ctl.as_raw_fd(), &req) < 0 {
295 return Err(io::Error::last_os_error().into());
296 }
297
298 Ok(())
299 }
300 }
301
302 fn broadcast(&self) -> Result<Ipv4Addr> {
303 unsafe {
304 let mut req = self.request();
305
306 if siocgifbrdaddr(self.ctl.as_raw_fd(), &mut req) < 0 {
307 return Err(io::Error::last_os_error().into());
308 }
309
310 SockAddr::new(&req.ifru.broadaddr).map(Into::into)
311 }
312 }
313
314 fn set_broadcast(&mut self, value: Ipv4Addr) -> Result<()> {
315 unsafe {
316 let mut req = self.request();
317 req.ifru.broadaddr = SockAddr::from(value).into();
318
319 if siocsifbrdaddr(self.ctl.as_raw_fd(), &req) < 0 {
320 return Err(io::Error::last_os_error().into());
321 }
322
323 Ok(())
324 }
325 }
326
327 fn netmask(&self) -> Result<Ipv4Addr> {
328 unsafe {
329 let mut req = self.request();
330
331 if siocgifnetmask(self.ctl.as_raw_fd(), &mut req) < 0 {
332 return Err(io::Error::last_os_error().into());
333 }
334
335 SockAddr::new(&req.ifru.netmask).map(Into::into)
336 }
337 }
338
339 fn set_netmask(&mut self, value: Ipv4Addr) -> Result<()> {
340 unsafe {
341 let mut req = self.request();
342 req.ifru.netmask = SockAddr::from(value).into();
343
344 if siocsifnetmask(self.ctl.as_raw_fd(), &req) < 0 {
345 return Err(io::Error::last_os_error().into());
346 }
347
348 Ok(())
349 }
350 }
351
352 fn mtu(&self) -> Result<i32> {
353 unsafe {
354 let mut req = self.request();
355
356 if siocgifmtu(self.ctl.as_raw_fd(), &mut req) < 0 {
357 return Err(io::Error::last_os_error().into());
358 }
359
360 Ok(req.ifru.mtu)
361 }
362 }
363
364 fn set_mtu(&mut self, value: i32) -> Result<()> {
365 unsafe {
366 let mut req = self.request();
367 req.ifru.mtu = value;
368
369 if siocsifmtu(self.ctl.as_raw_fd(), &req) < 0 {
370 return Err(io::Error::last_os_error().into());
371 }
372
373 Ok(())
374 }
375 }
376
377 fn queue(&mut self, _index: usize) -> Option<&mut Self::Queue> {
378 return Some(&mut self.queue);
379 }
380}
381
382impl AsRawFd for Device {
383 fn as_raw_fd(&self) -> RawFd {
384 self.queue.as_raw_fd()
385 }
386}
387
388impl IntoRawFd for Device {
389 fn into_raw_fd(self) -> RawFd {
390 return self.queue.into_raw_fd();
391 }
392}
393
394pub struct Queue {
395 tun: Fd,
396 pi_enabled: bool,
397}
398
399impl Queue {
400 pub fn has_packet_information(&mut self) -> bool {
401 self.pi_enabled
402 }
403
404 pub fn set_nonblock(&self) -> io::Result<()> {
405 self.tun.set_nonblock()
406 }
407}
408
409impl Read for Queue {
410 fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
411 self.tun.read(buf)
412 }
413
414 fn read_vectored(&mut self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result<usize> {
415 self.tun.read_vectored(bufs)
416 }
417}
418
419impl Write for Queue {
420 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
421 self.tun.write(buf)
422 }
423
424 fn flush(&mut self) -> io::Result<()> {
425 self.tun.flush()
426 }
427
428 fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize> {
429 self.tun.write_vectored(bufs)
430 }
431}
432
433impl AsRawFd for Queue {
434 fn as_raw_fd(&self) -> RawFd {
435 self.tun.as_raw_fd()
436 }
437}
438
439impl IntoRawFd for Queue {
440 fn into_raw_fd(self) -> RawFd {
441 self.tun.as_raw_fd()
442 }
443}
444
445impl Into<c_short> for Layer {
446 fn into(self) -> c_short {
447 match self {
448 Layer::L2 => IFF_TAP,
449 Layer::L3 => IFF_TUN,
450 }
451 }
452}