1use crate::overlayfs::AtomicU64;
2use crate::overlayfs::HandleData;
3use crate::overlayfs::RealHandle;
4use futures::stream::Iter;
5use rfuse3::raw::prelude::*;
6use rfuse3::*;
7use std::ffi::OsStr;
8use std::io::Error;
9use std::io::ErrorKind;
10use std::num::NonZeroU32;
11use std::sync::Arc;
12use std::sync::atomic::Ordering;
13use std::vec::IntoIter;
14
15use super::Inode;
16use super::OverlayFs;
17use super::utils;
18impl Filesystem for OverlayFs {
19 async fn init(&self, _req: Request) -> Result<ReplyInit> {
21 if self.config.do_import {
22 self.import().await?;
23 }
24 if !self.config.do_import || self.config.writeback {
25 self.writeback.store(true, Ordering::Relaxed);
26 }
27 if !self.config.do_import || self.config.no_open {
28 self.no_open.store(true, Ordering::Relaxed);
29 }
30 if !self.config.do_import || self.config.no_opendir {
31 self.no_opendir.store(true, Ordering::Relaxed);
32 }
33 if !self.config.do_import || self.config.killpriv_v2 {
34 self.killpriv_v2.store(true, Ordering::Relaxed);
35 }
36 if self.config.perfile_dax {
37 self.perfile_dax.store(true, Ordering::Relaxed);
38 }
39
40 Ok(ReplyInit {
41 max_write: NonZeroU32::new(128 * 1024).unwrap(),
42 })
43 }
44
45 async fn destroy(&self, _req: Request) {}
50
51 async fn lookup(&self, req: Request, parent: Inode, name: &OsStr) -> Result<ReplyEntry> {
53 let tmp = name.to_string_lossy().to_string();
54 let result = self.do_lookup(req, parent, tmp.as_str()).await;
55 match result {
56 Ok(e) => Ok(e),
57 Err(err) => Err(err.into()),
58 }
59 }
60
61 async fn forget(&self, _req: Request, inode: Inode, nlookup: u64) {
71 self.forget_one(inode, nlookup).await;
72 }
73
74 async fn getattr(
76 &self,
77 req: Request,
78 inode: Inode,
79 fh: Option<u64>,
80 flags: u32,
81 ) -> Result<ReplyAttr> {
82 if !self.no_open.load(Ordering::Relaxed) {
83 if let Some(h) = fh {
84 let handles = self.handles.lock().await;
85 if let Some(hd) = handles.get(&h) {
86 if let Some(ref rh) = hd.real_handle {
87 let mut rep: ReplyAttr = rh
88 .layer
89 .getattr(req, rh.inode, Some(rh.handle.load(Ordering::Relaxed)), 0)
90 .await?;
91 rep.attr.ino = inode;
92 return Ok(rep);
93 }
94 }
95 }
96 }
97
98 let node: Arc<super::OverlayInode> = self.lookup_node(req, inode, "").await?;
99 let (layer, _, lower_inode) = node.first_layer_inode().await;
100 let mut re = layer.getattr(req, lower_inode, None, flags).await?;
101 re.attr.ino = inode;
102 Ok(re)
103 }
104
105 async fn setattr(
107 &self,
108 req: Request,
109 inode: Inode,
110 fh: Option<u64>,
111 set_attr: SetAttr,
112 ) -> Result<ReplyAttr> {
113 self.upper_layer
115 .as_ref()
116 .cloned()
117 .ok_or_else(|| Error::from_raw_os_error(libc::EROFS))?;
118
119 if !self.no_open.load(Ordering::Relaxed) {
121 if let Some(h) = fh {
122 let handles = self.handles.lock().await;
123 if let Some(hd) = handles.get(&h) {
124 if let Some(ref rhd) = hd.real_handle {
125 if rhd.in_upper_layer {
127 let rep = rhd
128 .layer
129 .setattr(
130 req,
131 rhd.inode,
132 Some(rhd.handle.load(Ordering::Relaxed)),
133 set_attr,
134 )
135 .await?;
136 return Ok(rep);
137 }
138 }
139 }
140 }
141 }
142
143 let mut node = self.lookup_node(req, inode, "").await?;
144
145 if !node.in_upper_layer().await {
146 node = self.copy_node_up(req, node.clone()).await?
147 }
148
149 let (layer, _, real_inode) = node.first_layer_inode().await;
150 layer.setattr(req, real_inode, None, set_attr).await
151 }
152
153 async fn readlink(&self, req: Request, inode: Inode) -> Result<ReplyData> {
155 trace!("READLINK: inode: {}\n", inode);
156
157 let node = self.lookup_node(req, inode, "").await?;
158
159 if node.whiteout.load(Ordering::Relaxed) {
160 return Err(Error::from_raw_os_error(libc::ENOENT).into());
161 }
162
163 let (layer, _, inode) = node.first_layer_inode().await;
164 layer.readlink(req, inode).await
165 }
166
167 async fn symlink(
169 &self,
170 req: Request,
171 parent: Inode,
172 name: &OsStr,
173 link: &OsStr,
174 ) -> Result<ReplyEntry> {
175 let sname = name.to_string_lossy().into_owned().to_owned();
177 let slinkname = link.to_string_lossy().into_owned().to_owned();
178
179 let pnode = self.lookup_node(req, parent, "").await?;
180 self.do_symlink(req, slinkname.as_str(), &pnode, sname.as_str())
181 .await?;
182
183 self.do_lookup(req, parent, sname.as_str())
184 .await
185 .map_err(|e| e.into())
186 }
187
188 async fn mknod(
192 &self,
193 req: Request,
194 parent: Inode,
195 name: &OsStr,
196 mode: u32,
197 rdev: u32,
198 ) -> Result<ReplyEntry> {
199 let sname = name.to_string_lossy().to_string();
200
201 let pnode = self.lookup_node(req, parent, "").await?;
203 if pnode.whiteout.load(Ordering::Relaxed) {
204 return Err(Error::from_raw_os_error(libc::ENOENT).into());
205 }
206
207 self.do_mknod(req, &pnode, sname.as_str(), mode, rdev, 0)
208 .await?;
209 self.do_lookup(req, parent, sname.as_str())
210 .await
211 .map_err(|e| e.into())
212 }
213
214 async fn mkdir(
216 &self,
217 req: Request,
218 parent: Inode,
219 name: &OsStr,
220 mode: u32,
221 umask: u32,
222 ) -> Result<ReplyEntry> {
223 let sname = name.to_string_lossy().to_string();
224
225 let pnode = self.lookup_node(req, parent, "").await?;
227 if pnode.whiteout.load(Ordering::Relaxed) {
228 return Err(Error::from_raw_os_error(libc::ENOENT).into());
229 }
230
231 self.do_mkdir(req, pnode, sname.as_str(), mode, umask)
232 .await?;
233 self.do_lookup(req, parent, sname.as_str())
234 .await
235 .map_err(|e| e.into())
236 }
237
238 async fn unlink(&self, req: Request, parent: Inode, name: &OsStr) -> Result<()> {
240 self.do_rm(req, parent, name, false)
241 .await
242 .map_err(|e| e.into())
243 }
244
245 async fn rmdir(&self, req: Request, parent: Inode, name: &OsStr) -> Result<()> {
247 self.do_rm(req, parent, name, true)
248 .await
249 .map_err(|e| e.into())
250 }
251
252 async fn rename(
254 &self,
255 req: Request,
256 parent: Inode,
257 name: &OsStr,
258 new_parent: Inode,
259 new_name: &OsStr,
260 ) -> Result<()> {
261 self.do_rename(req, parent, name, new_parent, new_name)
262 .await
263 .map_err(|e| e.into())
264 }
265
266 async fn link(
268 &self,
269 req: Request,
270 inode: Inode,
271 new_parent: Inode,
272 new_name: &OsStr,
273 ) -> Result<ReplyEntry> {
274 let node = self.lookup_node(req, inode, "").await?;
275 if node.whiteout.load(Ordering::Relaxed) {
276 return Err(Error::from_raw_os_error(libc::ENOENT).into());
277 }
278
279 let newpnode = self.lookup_node(req, new_parent, "").await?;
280 if newpnode.whiteout.load(Ordering::Relaxed) {
281 return Err(Error::from_raw_os_error(libc::ENOENT).into());
282 }
283 let name = new_name.to_str().unwrap();
284 self.do_link(req, &node, &newpnode, name).await?;
285 self.do_lookup(req, new_parent, name)
286 .await
287 .map_err(|e| e.into())
288 }
289
290 async fn open(&self, req: Request, inode: Inode, flags: u32) -> Result<ReplyOpen> {
305 if self.no_open.load(Ordering::Relaxed) {
306 info!("fuse: open is not supported.");
307 return Err(Error::from_raw_os_error(libc::ENOSYS).into());
308 }
309
310 let readonly: bool = flags
311 & (libc::O_APPEND | libc::O_CREAT | libc::O_TRUNC | libc::O_RDWR | libc::O_WRONLY)
312 as u32
313 == 0;
314 let mut flags: i32 = flags as i32;
316
317 flags |= libc::O_NOFOLLOW;
318
319 if self.config.writeback {
320 if flags & libc::O_ACCMODE == libc::O_WRONLY {
321 flags &= !libc::O_ACCMODE;
322 flags |= libc::O_RDWR;
323 }
324
325 if flags & libc::O_APPEND != 0 {
326 flags &= !libc::O_APPEND;
327 }
328 }
329 let node = self.lookup_node(req, inode, "").await?;
331
332 if node.whiteout.load(Ordering::Relaxed) {
334 return Err(Error::from_raw_os_error(libc::ENOENT).into());
335 }
336
337 if !readonly {
338 self.copy_node_up(req, node.clone()).await?;
340 }
341
342 let (_l, h) = node.open(req, flags as u32, 0).await?;
344
345 let hd = self.next_handle.fetch_add(1, Ordering::Relaxed);
346 let (layer, in_upper_layer, inode) = node.first_layer_inode().await;
347 let handle_data = HandleData {
348 node: node.clone(),
349 real_handle: Some(RealHandle {
350 layer,
351 in_upper_layer,
352 inode,
353 handle: AtomicU64::new(h.fh),
354 }),
355 };
356
357 self.handles.lock().await.insert(hd, Arc::new(handle_data));
358
359 trace!("OPEN: returning handle: {}", hd);
360
361 Ok(ReplyOpen {
362 fh: hd,
363 flags: flags as u32,
364 })
365 }
366
367 async fn read(
373 &self,
374 req: Request,
375 inode: Inode,
376 fh: u64,
377 offset: u64,
378 size: u32,
379 ) -> Result<ReplyData> {
380 let data = self.get_data(req, Some(fh), inode, 0).await?;
381
382 match data.real_handle {
383 None => Err(Error::from_raw_os_error(libc::ENOENT).into()),
384 Some(ref hd) => {
385 hd.layer
386 .read(
387 req,
388 hd.inode,
389 hd.handle.load(Ordering::Relaxed),
390 offset,
391 size,
392 )
393 .await
394 }
395 }
396 }
397
398 #[allow(clippy::too_many_arguments)]
406 async fn write(
407 &self,
408 req: Request,
409 inode: Inode,
410 fh: u64,
411 offset: u64,
412 data: &[u8],
413 write_flags: u32,
414 flags: u32,
415 ) -> Result<ReplyWrite> {
416 let handle_data: Arc<HandleData> = self.get_data(req, Some(fh), inode, flags).await?;
417
418 match handle_data.real_handle {
419 None => Err(Error::from_raw_os_error(libc::ENOENT).into()),
420 Some(ref hd) => {
421 hd.layer
422 .write(
423 req,
424 hd.inode,
425 hd.handle.load(Ordering::Relaxed),
426 offset,
427 data,
428 write_flags,
429 flags,
430 )
431 .await
432 }
433 }
434 }
435
436 async fn statfs(&self, req: Request, inode: Inode) -> Result<ReplyStatFs> {
438 self.do_statvfs(req, inode).await.map_err(|e| e.into())
439 }
440
441 async fn release(
449 &self,
450 req: Request,
451 _inode: Inode,
452 fh: u64,
453 flags: u32,
454 lock_owner: u64,
455 flush: bool,
456 ) -> Result<()> {
457 if self.no_open.load(Ordering::Relaxed) {
458 info!("fuse: release is not supported.");
459 return Err(Error::from_raw_os_error(libc::ENOSYS).into());
460 }
461
462 if let Some(hd) = self.handles.lock().await.get(&fh) {
463 let rh = if let Some(ref h) = hd.real_handle {
464 h
465 } else {
466 return Err(Error::other("no handle").into());
467 };
468 let real_handle = rh.handle.load(Ordering::Relaxed);
469 let real_inode = rh.inode;
470 rh.layer
471 .release(req, real_inode, real_handle, flags, lock_owner, flush)
472 .await?;
473 }
474
475 self.handles.lock().await.remove(&fh);
476
477 Ok(())
478 }
479
480 async fn fsync(&self, req: Request, inode: Inode, fh: u64, datasync: bool) -> Result<()> {
483 self.do_fsync(req, inode, datasync, fh, false)
484 .await
485 .map_err(|e| e.into())
486 }
487
488 async fn setxattr(
490 &self,
491 req: Request,
492 inode: Inode,
493 name: &OsStr,
494 value: &[u8],
495 flags: u32,
496 position: u32,
497 ) -> Result<()> {
498 let node = self.lookup_node(req, inode, "").await?;
499
500 if node.whiteout.load(Ordering::Relaxed) {
501 return Err(Error::from_raw_os_error(libc::ENOENT).into());
502 }
503
504 if !node.in_upper_layer().await {
505 self.copy_node_up(req, node.clone()).await?;
507 }
508
509 let (layer, _, real_inode) = node.first_layer_inode().await;
510
511 layer
512 .setxattr(req, real_inode, name, value, flags, position)
513 .await
514 }
515
516 async fn getxattr(
520 &self,
521 req: Request,
522 inode: Inode,
523 name: &OsStr,
524 size: u32,
525 ) -> Result<ReplyXAttr> {
526 let node = self.lookup_node(req, inode, "").await?;
527
528 if node.whiteout.load(Ordering::Relaxed) {
529 return Err(Error::from_raw_os_error(libc::ENOENT).into());
530 }
531
532 let (layer, real_inode) = self.find_real_inode(inode).await?;
533
534 layer.getxattr(req, real_inode, name, size).await
535 }
536
537 async fn listxattr(&self, req: Request, inode: Inode, size: u32) -> Result<ReplyXAttr> {
542 let node = self.lookup_node(req, inode, "").await?;
543 if node.whiteout.load(Ordering::Relaxed) {
544 return Err(Error::from_raw_os_error(libc::ENOENT).into());
545 }
546 let (layer, real_inode) = self.find_real_inode(inode).await?;
547 layer.listxattr(req, real_inode, size).await
548 }
549
550 async fn removexattr(&self, req: Request, inode: Inode, name: &OsStr) -> Result<()> {
552 let node = self.lookup_node(req, inode, "").await?;
553
554 if node.whiteout.load(Ordering::Relaxed) {
555 return Err(Error::from_raw_os_error(libc::ENOENT).into());
556 }
557
558 if !node.in_upper_layer().await {
559 self.copy_node_up(req, node.clone()).await?;
561 }
562
563 let (layer, _, ino) = node.first_layer_inode().await;
564 layer.removexattr(req, ino, name).await
565
566 }
568
569 async fn flush(&self, req: Request, inode: Inode, fh: u64, lock_owner: u64) -> Result<()> {
582 if self.no_open.load(Ordering::Relaxed) {
583 return Err(Error::from_raw_os_error(libc::ENOSYS).into());
584 }
585
586 let node = self.lookup_node(req, inode, "").await?;
587
588 if node.whiteout.load(Ordering::Relaxed) {
589 return Err(Error::from_raw_os_error(libc::ENOENT).into());
590 }
591
592 let (layer, real_inode, real_handle) = self.find_real_info_from_handle(fh).await?;
593
594 layer.flush(req, real_inode, real_handle, lock_owner).await
597 }
598
599 async fn opendir(&self, req: Request, inode: Inode, flags: u32) -> Result<ReplyOpen> {
607 if self.no_opendir.load(Ordering::Relaxed) {
608 info!("fuse: opendir is not supported.");
609 return Err(Error::from_raw_os_error(libc::ENOSYS).into());
610 }
611
612 let node = self.lookup_node(req, inode, ".").await?;
614
615 if node.whiteout.load(Ordering::Relaxed) {
616 return Err(Error::from_raw_os_error(libc::ENOENT).into());
617 }
618
619 let st = node.stat64(req).await?;
620 if !utils::is_dir(&st.attr.kind) {
621 return Err(Error::from_raw_os_error(libc::ENOTDIR).into());
622 }
623
624 let handle = self.next_handle.fetch_add(1, Ordering::Relaxed);
625
626 self.handles.lock().await.insert(
627 handle,
628 Arc::new(HandleData {
629 node: Arc::clone(&node),
630 real_handle: None,
631 }),
632 );
633
634 Ok(ReplyOpen { fh: handle, flags })
635 }
636
637 type DirEntryStream<'a>
639 = Iter<IntoIter<Result<DirectoryEntry>>>
640 where
641 Self: 'a;
642 type DirEntryPlusStream<'a>
644 = Iter<IntoIter<Result<DirectoryEntryPlus>>>
645 where
646 Self: 'a;
647
648 async fn readdir(
652 &self,
653 req: Request,
654 parent: Inode,
655 fh: u64,
656 offset: i64,
657 ) -> Result<ReplyDirectory<Self::DirEntryStream<'_>>> {
658 if self.config.no_readdir {
659 info!("fuse: readdir is not supported.");
660 return Err(Error::from_raw_os_error(libc::ENOTDIR).into());
661 }
662 let entries = self
663 .do_readdir(req, parent, fh, offset.try_into().unwrap())
664 .await?;
665 Ok(ReplyDirectory { entries })
666 }
667
668 async fn readdirplus(
671 &self,
672 req: Request,
673 parent: Inode,
674 fh: u64,
675 offset: u64,
676 _lock_owner: u64,
677 ) -> Result<ReplyDirectoryPlus<Self::DirEntryPlusStream<'_>>> {
678 if self.config.no_readdir {
679 info!("fuse: readdir is not supported.");
680 return Err(Error::from_raw_os_error(libc::ENOTDIR).into());
681 }
682 let entries = self.do_readdirplus(req, parent, fh, offset).await?;
683 Ok(ReplyDirectoryPlus { entries })
684 }
685 async fn releasedir(&self, _req: Request, _inode: Inode, fh: u64, _flags: u32) -> Result<()> {
690 if self.no_opendir.load(Ordering::Relaxed) {
691 info!("fuse: releasedir is not supported.");
692 return Err(Error::from_raw_os_error(libc::ENOSYS).into());
693 }
694
695 self.handles.lock().await.remove(&fh);
696 Ok(())
697 }
698
699 async fn fsyncdir(&self, req: Request, inode: Inode, fh: u64, datasync: bool) -> Result<()> {
704 self.do_fsync(req, inode, datasync, fh, true)
705 .await
706 .map_err(|e| e.into())
707 }
708 async fn access(&self, req: Request, inode: Inode, mask: u32) -> Result<()> {
712 let node = self.lookup_node(req, inode, "").await?;
713
714 if node.whiteout.load(Ordering::Relaxed) {
715 return Err(Error::from_raw_os_error(libc::ENOENT).into());
716 }
717
718 let (layer, real_inode) = self.find_real_inode(inode).await?;
719 layer.access(req, real_inode, mask).await
720 }
721
722 async fn create(
739 &self,
740 req: Request,
741 parent: Inode,
742 name: &OsStr,
743 mode: u32,
744 flags: u32,
745 ) -> Result<ReplyCreated> {
746 let pnode = self.lookup_node(req, parent, "").await?;
748 if pnode.whiteout.load(Ordering::Relaxed) {
749 return Err(Error::from_raw_os_error(libc::ENOENT).into());
750 }
751
752 let mut flags: i32 = flags as i32;
753 flags |= libc::O_NOFOLLOW;
754 flags &= !libc::O_DIRECT;
755 if self.config.writeback {
756 if flags & libc::O_ACCMODE == libc::O_WRONLY {
757 flags &= !libc::O_ACCMODE;
758 flags |= libc::O_RDWR;
759 }
760
761 if flags & libc::O_APPEND != 0 {
762 flags &= !libc::O_APPEND;
763 }
764 }
765
766 let final_handle = self
767 .do_create(req, &pnode, name, mode, flags.try_into().unwrap())
768 .await?;
769 let entry = self.do_lookup(req, parent, name.to_str().unwrap()).await?;
770 let fh = final_handle
771 .ok_or_else(|| std::io::Error::new(ErrorKind::NotFound, "Handle not found"))?;
772 Ok(ReplyCreated {
773 ttl: entry.ttl,
774 attr: entry.attr,
775 generation: entry.generation,
776 fh,
777 flags: flags.try_into().unwrap(),
778 })
779 }
780
781 async fn batch_forget(&self, _req: Request, inodes: &[(Inode, u64)]) {
783 for inode in inodes {
784 self.forget_one(inode.0, inode.1).await;
785 }
786 }
787
788 async fn fallocate(
795 &self,
796 req: Request,
797 inode: Inode,
798 fh: u64,
799 offset: u64,
800 length: u64,
801 mode: u32,
802 ) -> Result<()> {
803 let data = self
805 .get_data(req, Some(fh), inode, libc::O_RDONLY as u32)
806 .await?;
807
808 match data.real_handle {
809 None => Err(Error::from_raw_os_error(libc::ENOENT).into()),
810 Some(ref rhd) => {
811 if !rhd.in_upper_layer {
812 return Err(Error::from_raw_os_error(libc::EROFS).into());
814 }
815 rhd.layer
816 .fallocate(
817 req,
818 rhd.inode,
819 rhd.handle.load(Ordering::Relaxed),
820 offset,
821 length,
822 mode,
823 )
824 .await
825 }
826 }
827 }
828
829 async fn lseek(
831 &self,
832 req: Request,
833 inode: Inode,
834 fh: u64,
835 offset: u64,
836 whence: u32,
837 ) -> Result<ReplyLSeek> {
838 let node = self.lookup_node(req, inode, "").await?;
841
842 if node.whiteout.load(Ordering::Relaxed) {
843 return Err(Error::from_raw_os_error(libc::ENOENT).into());
844 }
845
846 let st = node.stat64(req).await?;
847 if utils::is_dir(&st.attr.kind) {
848 error!("lseek on directory");
849 return Err(Error::from_raw_os_error(libc::EINVAL).into());
850 }
851
852 let (layer, real_inode, real_handle) = self.find_real_info_from_handle(fh).await?;
853 layer
854 .lseek(req, real_inode, real_handle, offset, whence)
855 .await
856 }
857
858 async fn interrupt(&self, _req: Request, _unique: u64) -> Result<()> {
859 Ok(())
860 }
861}
862#[cfg(test)]
863mod tests {
864 use std::{ffi::OsString, sync::Arc};
865
866 use rfuse3::{MountOptions, raw::Session};
867 use tokio::signal;
868
869 use crate::{
870 overlayfs::{OverlayFs, config::Config},
871 passthrough::{new_passthroughfs_layer, newlogfs::LoggingFileSystem},
872 };
873
874 #[tokio::test]
875 #[ignore]
876 async fn test_a_ovlfs() {
877 env_logger::Builder::new()
878 .filter_level(log::LevelFilter::Trace)
879 .init();
880 let mountpoint = "/home/luxian/megatest/true_temp".to_string();
882 let lowerdir = vec!["/home/luxian/github/buck2-rust-third-party".to_string()];
883 let upperdir = "/home/luxian/upper".to_string();
884
885 let mut lower_layers = Vec::new();
887 for lower in &lowerdir {
888 let layer = new_passthroughfs_layer(lower).await.unwrap();
889 lower_layers.push(Arc::new(layer));
890 }
891 let upper_layer = Arc::new(new_passthroughfs_layer(&upperdir).await.unwrap());
893 let config = Config {
895 mountpoint: mountpoint.clone(),
896 do_import: true,
897 ..Default::default()
898 };
899
900 let overlayfs = OverlayFs::new(Some(upper_layer), lower_layers, config, 1).unwrap();
901
902 let logfs = LoggingFileSystem::new(overlayfs);
903
904 let mount_path: OsString = OsString::from(mountpoint);
905
906 let uid = unsafe { libc::getuid() };
907 let gid = unsafe { libc::getgid() };
908
909 let not_unprivileged = false;
910
911 let mut mount_options = MountOptions::default();
912 mount_options.force_readdir_plus(true).uid(uid).gid(gid);
914
915 let mut mount_handle: rfuse3::raw::MountHandle = if !not_unprivileged {
916 Session::new(mount_options)
917 .mount_with_unprivileged(logfs, mount_path)
918 .await
919 .unwrap()
920 } else {
921 Session::new(mount_options)
922 .mount(logfs, mount_path)
923 .await
924 .unwrap()
925 };
926
927 let handle = &mut mount_handle;
928
929 tokio::select! {
930 res = handle => res.unwrap(),
931 _ = signal::ctrl_c() => {
932 mount_handle.unmount().await.unwrap()
933 }
934 }
935 }
936}