1use crate::overlayfs::AtomicU64;
2use crate::overlayfs::HandleData;
3use crate::overlayfs::RealHandle;
4use futures::stream::Iter;
5use rfuse3::raw::prelude::*;
6use rfuse3::*;
7use std::ffi::OsStr;
8use std::io::Error;
9use std::io::ErrorKind;
10use std::num::NonZeroU32;
11use std::sync::Arc;
12use std::sync::atomic::Ordering;
13use std::vec::IntoIter;
14
15use super::Inode;
16use super::OverlayFs;
17use super::utils;
18impl Filesystem for OverlayFs {
19 async fn init(&self, _req: Request) -> Result<ReplyInit> {
21 if self.config.do_import {
22 self.import().await?;
23 }
24 if !self.config.do_import || self.config.writeback {
25 self.writeback.store(true, Ordering::Relaxed);
26 }
27 if !self.config.do_import || self.config.no_open {
28 self.no_open.store(true, Ordering::Relaxed);
29 }
30 if !self.config.do_import || self.config.no_opendir {
31 self.no_opendir.store(true, Ordering::Relaxed);
32 }
33 if !self.config.do_import || self.config.killpriv_v2 {
34 self.killpriv_v2.store(true, Ordering::Relaxed);
35 }
36 if self.config.perfile_dax {
37 self.perfile_dax.store(true, Ordering::Relaxed);
38 }
39
40 Ok(ReplyInit {
41 max_write: NonZeroU32::new(128 * 1024).unwrap(),
42 })
43 }
44
45 async fn destroy(&self, _req: Request) {}
50
51 async fn lookup(&self, req: Request, parent: Inode, name: &OsStr) -> Result<ReplyEntry> {
53 let tmp = name.to_string_lossy().to_string();
54 let result = self.do_lookup(req, parent, tmp.as_str()).await;
55 match result {
56 Ok(e) => Ok(e),
57 Err(err) => Err(err.into()),
58 }
59 }
60
61 async fn forget(&self, _req: Request, inode: Inode, nlookup: u64) {
71 self.forget_one(inode, nlookup).await;
72 }
73
74 async fn getattr(
76 &self,
77 req: Request,
78 inode: Inode,
79 fh: Option<u64>,
80 flags: u32,
81 ) -> Result<ReplyAttr> {
82 if !self.no_open.load(Ordering::Relaxed) {
83 if let Some(h) = fh {
84 let handles = self.handles.lock().await;
85 if let Some(hd) = handles.get(&h) {
86 if let Some(ref rh) = hd.real_handle {
87 let mut rep: ReplyAttr = rh
88 .layer
89 .getattr(req, rh.inode, Some(rh.handle.load(Ordering::Relaxed)), 0)
90 .await?;
91 rep.attr.ino = inode;
92 return Ok(rep);
93 }
94 }
95 }
96 }
97
98 let node: Arc<super::OverlayInode> = self.lookup_node(req, inode, "").await?;
99 let (layer, _, lower_inode) = node.first_layer_inode().await;
100 let mut re = layer.getattr(req, lower_inode, None, flags).await?;
101 re.attr.ino = inode;
102 Ok(re)
103 }
104
105 async fn setattr(
107 &self,
108 req: Request,
109 inode: Inode,
110 fh: Option<u64>,
111 set_attr: SetAttr,
112 ) -> Result<ReplyAttr> {
113 self.upper_layer
115 .as_ref()
116 .cloned()
117 .ok_or_else(|| Error::from_raw_os_error(libc::EROFS))?;
118
119 if !self.no_open.load(Ordering::Relaxed) {
121 if let Some(h) = fh {
122 let handles = self.handles.lock().await;
123 if let Some(hd) = handles.get(&h) {
124 if let Some(ref rhd) = hd.real_handle {
125 if rhd.in_upper_layer {
127 let mut rep = rhd
128 .layer
129 .setattr(
130 req,
131 rhd.inode,
132 Some(rhd.handle.load(Ordering::Relaxed)),
133 set_attr,
134 )
135 .await?;
136 rep.attr.ino = inode;
137 return Ok(rep);
138 }
139 }
140 }
141 }
142 }
143
144 let mut node = self.lookup_node(req, inode, "").await?;
145
146 if !node.in_upper_layer().await {
147 node = self.copy_node_up(req, node.clone()).await?
148 }
149
150 let (layer, _, real_inode) = node.first_layer_inode().await;
151 let mut rep = layer.setattr(req, real_inode, None, set_attr).await?;
153 rep.attr.ino = inode;
154 Ok(rep)
155 }
156
157 async fn readlink(&self, req: Request, inode: Inode) -> Result<ReplyData> {
159 trace!("READLINK: inode: {inode}\n");
160
161 let node = self.lookup_node(req, inode, "").await?;
162
163 if node.whiteout.load(Ordering::Relaxed) {
164 return Err(Error::from_raw_os_error(libc::ENOENT).into());
165 }
166
167 let (layer, _, inode) = node.first_layer_inode().await;
168 layer.readlink(req, inode).await
169 }
170
171 async fn symlink(
173 &self,
174 req: Request,
175 parent: Inode,
176 name: &OsStr,
177 link: &OsStr,
178 ) -> Result<ReplyEntry> {
179 let sname = name.to_string_lossy().into_owned().to_owned();
181 let slinkname = link.to_string_lossy().into_owned().to_owned();
182
183 let pnode = self.lookup_node(req, parent, "").await?;
184 self.do_symlink(req, slinkname.as_str(), &pnode, sname.as_str())
185 .await?;
186
187 self.do_lookup(req, parent, sname.as_str())
188 .await
189 .map_err(|e| e.into())
190 }
191
192 async fn mknod(
196 &self,
197 req: Request,
198 parent: Inode,
199 name: &OsStr,
200 mode: u32,
201 rdev: u32,
202 ) -> Result<ReplyEntry> {
203 let sname = name.to_string_lossy().to_string();
204
205 let pnode = self.lookup_node(req, parent, "").await?;
207 if pnode.whiteout.load(Ordering::Relaxed) {
208 return Err(Error::from_raw_os_error(libc::ENOENT).into());
209 }
210
211 self.do_mknod(req, &pnode, sname.as_str(), mode, rdev, 0)
212 .await?;
213 self.do_lookup(req, parent, sname.as_str())
214 .await
215 .map_err(|e| e.into())
216 }
217
218 async fn mkdir(
220 &self,
221 req: Request,
222 parent: Inode,
223 name: &OsStr,
224 mode: u32,
225 umask: u32,
226 ) -> Result<ReplyEntry> {
227 let sname = name.to_string_lossy().to_string();
228
229 let pnode = self.lookup_node(req, parent, "").await?;
231 if pnode.whiteout.load(Ordering::Relaxed) {
232 return Err(Error::from_raw_os_error(libc::ENOENT).into());
233 }
234
235 self.do_mkdir(req, pnode, sname.as_str(), mode, umask)
236 .await?;
237 self.do_lookup(req, parent, sname.as_str())
238 .await
239 .map_err(|e| e.into())
240 }
241
242 async fn unlink(&self, req: Request, parent: Inode, name: &OsStr) -> Result<()> {
244 self.do_rm(req, parent, name, false)
245 .await
246 .map_err(|e| e.into())
247 }
248
249 async fn rmdir(&self, req: Request, parent: Inode, name: &OsStr) -> Result<()> {
251 self.do_rm(req, parent, name, true)
252 .await
253 .map_err(|e| e.into())
254 }
255
256 async fn rename(
258 &self,
259 req: Request,
260 parent: Inode,
261 name: &OsStr,
262 new_parent: Inode,
263 new_name: &OsStr,
264 ) -> Result<()> {
265 self.do_rename(req, parent, name, new_parent, new_name)
266 .await
267 .map_err(|e| e.into())
268 }
269
270 async fn link(
272 &self,
273 req: Request,
274 inode: Inode,
275 new_parent: Inode,
276 new_name: &OsStr,
277 ) -> Result<ReplyEntry> {
278 let node = self.lookup_node(req, inode, "").await?;
279 if node.whiteout.load(Ordering::Relaxed) {
280 return Err(Error::from_raw_os_error(libc::ENOENT).into());
281 }
282
283 let newpnode = self.lookup_node(req, new_parent, "").await?;
284 if newpnode.whiteout.load(Ordering::Relaxed) {
285 return Err(Error::from_raw_os_error(libc::ENOENT).into());
286 }
287 let new_name = new_name.to_str().unwrap();
288 self.do_link(req, &node, &newpnode, new_name).await?;
293 self.do_lookup(req, new_parent, new_name)
295 .await
296 .map_err(|e| e.into())
297 }
298
299 async fn open(&self, req: Request, inode: Inode, flags: u32) -> Result<ReplyOpen> {
314 if self.no_open.load(Ordering::Relaxed) {
315 info!("fuse: open is not supported.");
316 return Err(Error::from_raw_os_error(libc::ENOSYS).into());
317 }
318
319 let readonly: bool = flags
320 & (libc::O_APPEND | libc::O_CREAT | libc::O_TRUNC | libc::O_RDWR | libc::O_WRONLY)
321 as u32
322 == 0;
323 let mut flags: i32 = flags as i32;
325
326 flags |= libc::O_NOFOLLOW;
327
328 if self.config.writeback {
329 if flags & libc::O_ACCMODE == libc::O_WRONLY {
330 flags &= !libc::O_ACCMODE;
331 flags |= libc::O_RDWR;
332 }
333
334 if flags & libc::O_APPEND != 0 {
335 flags &= !libc::O_APPEND;
336 }
337 }
338 let node = self.lookup_node(req, inode, "").await?;
340
341 if node.whiteout.load(Ordering::Relaxed) {
343 return Err(Error::from_raw_os_error(libc::ENOENT).into());
344 }
345
346 if !readonly {
347 self.copy_node_up(req, node.clone()).await?;
349 }
350
351 let (_l, h) = node.open(req, flags as u32, 0).await?;
353
354 let hd = self.next_handle.fetch_add(1, Ordering::Relaxed);
355 let (layer, in_upper_layer, inode) = node.first_layer_inode().await;
356 let handle_data = HandleData {
357 node: node.clone(),
358 real_handle: Some(RealHandle {
359 layer,
360 in_upper_layer,
361 inode,
362 handle: AtomicU64::new(h.fh),
363 }),
364 };
365
366 self.handles.lock().await.insert(hd, Arc::new(handle_data));
367
368 trace!("OPEN: returning handle: {hd}");
369
370 Ok(ReplyOpen {
371 fh: hd,
372 flags: flags as u32,
373 })
374 }
375
376 async fn read(
382 &self,
383 req: Request,
384 inode: Inode,
385 fh: u64,
386 offset: u64,
387 size: u32,
388 ) -> Result<ReplyData> {
389 let data = self.get_data(req, Some(fh), inode, 0).await?;
390
391 match data.real_handle {
392 None => Err(Error::from_raw_os_error(libc::ENOENT).into()),
393 Some(ref hd) => {
394 hd.layer
395 .read(
396 req,
397 hd.inode,
398 hd.handle.load(Ordering::Relaxed),
399 offset,
400 size,
401 )
402 .await
403 }
404 }
405 }
406
407 #[allow(clippy::too_many_arguments)]
415 async fn write(
416 &self,
417 req: Request,
418 inode: Inode,
419 fh: u64,
420 offset: u64,
421 data: &[u8],
422 write_flags: u32,
423 flags: u32,
424 ) -> Result<ReplyWrite> {
425 let handle_data: Arc<HandleData> = self.get_data(req, Some(fh), inode, flags).await?;
426
427 match handle_data.real_handle {
428 None => Err(Error::from_raw_os_error(libc::ENOENT).into()),
429 Some(ref hd) => {
430 hd.layer
431 .write(
432 req,
433 hd.inode,
434 hd.handle.load(Ordering::Relaxed),
435 offset,
436 data,
437 write_flags,
438 flags,
439 )
440 .await
441 }
442 }
443 }
444
445 async fn statfs(&self, req: Request, inode: Inode) -> Result<ReplyStatFs> {
447 self.do_statvfs(req, inode).await.map_err(|e| e.into())
448 }
449
450 async fn release(
458 &self,
459 req: Request,
460 _inode: Inode,
461 fh: u64,
462 flags: u32,
463 lock_owner: u64,
464 flush: bool,
465 ) -> Result<()> {
466 if self.no_open.load(Ordering::Relaxed) {
467 info!("fuse: release is not supported.");
468 return Err(Error::from_raw_os_error(libc::ENOSYS).into());
469 }
470
471 if let Some(hd) = self.handles.lock().await.get(&fh) {
472 let rh = if let Some(ref h) = hd.real_handle {
473 h
474 } else {
475 return Err(Error::other("no handle").into());
476 };
477 let real_handle = rh.handle.load(Ordering::Relaxed);
478 let real_inode = rh.inode;
479 rh.layer
480 .release(req, real_inode, real_handle, flags, lock_owner, flush)
481 .await?;
482 }
483
484 self.handles.lock().await.remove(&fh);
485
486 Ok(())
487 }
488
489 async fn fsync(&self, req: Request, inode: Inode, fh: u64, datasync: bool) -> Result<()> {
492 self.do_fsync(req, inode, datasync, fh, false)
493 .await
494 .map_err(|e| e.into())
495 }
496
497 async fn setxattr(
499 &self,
500 req: Request,
501 inode: Inode,
502 name: &OsStr,
503 value: &[u8],
504 flags: u32,
505 position: u32,
506 ) -> Result<()> {
507 let node = self.lookup_node(req, inode, "").await?;
508
509 if node.whiteout.load(Ordering::Relaxed) {
510 return Err(Error::from_raw_os_error(libc::ENOENT).into());
511 }
512
513 if !node.in_upper_layer().await {
514 self.copy_node_up(req, node.clone()).await?;
516 }
517
518 let (layer, _, real_inode) = node.first_layer_inode().await;
519
520 layer
521 .setxattr(req, real_inode, name, value, flags, position)
522 .await
523 }
524
525 async fn getxattr(
529 &self,
530 req: Request,
531 inode: Inode,
532 name: &OsStr,
533 size: u32,
534 ) -> Result<ReplyXAttr> {
535 let node = self.lookup_node(req, inode, "").await?;
536
537 if node.whiteout.load(Ordering::Relaxed) {
538 return Err(Error::from_raw_os_error(libc::ENOENT).into());
539 }
540
541 let (layer, real_inode) = self.find_real_inode(inode).await?;
542
543 layer.getxattr(req, real_inode, name, size).await
544 }
545
546 async fn listxattr(&self, req: Request, inode: Inode, size: u32) -> Result<ReplyXAttr> {
551 let node = self.lookup_node(req, inode, "").await?;
552 if node.whiteout.load(Ordering::Relaxed) {
553 return Err(Error::from_raw_os_error(libc::ENOENT).into());
554 }
555 let (layer, real_inode) = self.find_real_inode(inode).await?;
556 layer.listxattr(req, real_inode, size).await
557 }
558
559 async fn removexattr(&self, req: Request, inode: Inode, name: &OsStr) -> Result<()> {
561 let node = self.lookup_node(req, inode, "").await?;
562
563 if node.whiteout.load(Ordering::Relaxed) {
564 return Err(Error::from_raw_os_error(libc::ENOENT).into());
565 }
566
567 if !node.in_upper_layer().await {
568 self.copy_node_up(req, node.clone()).await?;
570 }
571
572 let (layer, _, ino) = node.first_layer_inode().await;
573 layer.removexattr(req, ino, name).await
574
575 }
577
578 async fn flush(&self, req: Request, inode: Inode, fh: u64, lock_owner: u64) -> Result<()> {
591 if self.no_open.load(Ordering::Relaxed) {
592 return Err(Error::from_raw_os_error(libc::ENOSYS).into());
593 }
594
595 let node = self.lookup_node(req, inode, "").await;
596 match node {
597 Ok(n) => {
598 if n.whiteout.load(Ordering::Relaxed) {
599 return Err(Error::from_raw_os_error(libc::ENOENT).into());
600 }
601 }
602 Err(e) => {
603 if e.raw_os_error() == Some(libc::ENOENT) {
604 trace!("flush: inode {inode} is stale");
605 } else {
606 return Err(e.into());
607 }
608 }
609 }
610
611 let (layer, real_inode, real_handle) = self.find_real_info_from_handle(fh).await?;
612
613 if inode
615 != self
616 .handles
617 .lock()
618 .await
619 .get(&fh)
620 .map(|h| h.node.inode)
621 .unwrap_or(0)
622 {
623 return Err(Error::other("inode does not match handle").into());
624 }
625
626 trace!("flushing, real_inode: {real_inode}, real_handle: {real_handle}");
627 layer.flush(req, real_inode, real_handle, lock_owner).await
628 }
629
630 async fn opendir(&self, req: Request, inode: Inode, flags: u32) -> Result<ReplyOpen> {
638 if self.no_opendir.load(Ordering::Relaxed) {
639 info!("fuse: opendir is not supported.");
640 return Err(Error::from_raw_os_error(libc::ENOSYS).into());
641 }
642
643 let node = self.lookup_node(req, inode, ".").await?;
645
646 if node.whiteout.load(Ordering::Relaxed) {
647 return Err(Error::from_raw_os_error(libc::ENOENT).into());
648 }
649
650 let st = node.stat64(req).await?;
651 if !utils::is_dir(&st.attr.kind) {
652 return Err(Error::from_raw_os_error(libc::ENOTDIR).into());
653 }
654
655 let handle = self.next_handle.fetch_add(1, Ordering::Relaxed);
656 let (layer, in_upper_layer, real_inode) = node.first_layer_inode().await;
658 let reply = layer.opendir(req, real_inode, flags).await?;
659
660 self.handles.lock().await.insert(
661 handle,
662 Arc::new(HandleData {
663 node: Arc::clone(&node),
664 real_handle: Some(RealHandle {
665 layer,
666 in_upper_layer,
667 inode: real_inode,
668 handle: AtomicU64::new(reply.fh),
669 }),
670 }),
671 );
672
673 Ok(ReplyOpen { fh: handle, flags })
674 }
675
676 type DirEntryStream<'a>
678 = Iter<IntoIter<Result<DirectoryEntry>>>
679 where
680 Self: 'a;
681 type DirEntryPlusStream<'a>
683 = Iter<IntoIter<Result<DirectoryEntryPlus>>>
684 where
685 Self: 'a;
686
687 async fn readdir(
691 &self,
692 req: Request,
693 parent: Inode,
694 fh: u64,
695 offset: i64,
696 ) -> Result<ReplyDirectory<Self::DirEntryStream<'_>>> {
697 if self.config.no_readdir {
698 info!("fuse: readdir is not supported.");
699 return Err(Error::from_raw_os_error(libc::ENOTDIR).into());
700 }
701 let entries = self
702 .do_readdir(req, parent, fh, offset.try_into().unwrap())
703 .await?;
704 Ok(ReplyDirectory { entries })
705 }
706
707 async fn readdirplus(
710 &self,
711 req: Request,
712 parent: Inode,
713 fh: u64,
714 offset: u64,
715 _lock_owner: u64,
716 ) -> Result<ReplyDirectoryPlus<Self::DirEntryPlusStream<'_>>> {
717 if self.config.no_readdir {
718 info!("fuse: readdir is not supported.");
719 return Err(Error::from_raw_os_error(libc::ENOTDIR).into());
720 }
721 trace!("readdirplus: parent: {parent}, fh: {fh}, offset: {offset}");
722 let entries = self.do_readdirplus(req, parent, fh, offset).await?;
723 match self.handles.lock().await.get(&fh) {
724 Some(h) => {
725 trace!(
726 "after readdirplus: found handle, seeing real_handle: {}",
727 h.real_handle.is_some()
728 );
729 }
730 None => trace!("after readdirplus: no handle found: {fh}"),
731 }
732 Ok(ReplyDirectoryPlus { entries })
733 }
734 async fn releasedir(&self, _req: Request, _inode: Inode, fh: u64, _flags: u32) -> Result<()> {
739 if self.no_opendir.load(Ordering::Relaxed) {
740 info!("fuse: releasedir is not supported.");
741 return Err(Error::from_raw_os_error(libc::ENOSYS).into());
742 }
743
744 self.handles.lock().await.remove(&fh);
745 Ok(())
746 }
747
748 async fn fsyncdir(&self, req: Request, inode: Inode, fh: u64, datasync: bool) -> Result<()> {
753 self.do_fsync(req, inode, datasync, fh, true)
754 .await
755 .map_err(|e| e.into())
756 }
757 async fn access(&self, req: Request, inode: Inode, mask: u32) -> Result<()> {
761 let node = self.lookup_node(req, inode, "").await?;
762
763 if node.whiteout.load(Ordering::Relaxed) {
764 return Err(Error::from_raw_os_error(libc::ENOENT).into());
765 }
766
767 let (layer, real_inode) = self.find_real_inode(inode).await?;
768 layer.access(req, real_inode, mask).await
769 }
770
771 async fn create(
788 &self,
789 req: Request,
790 parent: Inode,
791 name: &OsStr,
792 mode: u32,
793 flags: u32,
794 ) -> Result<ReplyCreated> {
795 let pnode = self.lookup_node(req, parent, "").await?;
797 if pnode.whiteout.load(Ordering::Relaxed) {
798 return Err(Error::from_raw_os_error(libc::ENOENT).into());
799 }
800
801 let mut flags: i32 = flags as i32;
802 flags |= libc::O_NOFOLLOW;
803 flags &= !libc::O_DIRECT;
804 if self.config.writeback {
805 if flags & libc::O_ACCMODE == libc::O_WRONLY {
806 flags &= !libc::O_ACCMODE;
807 flags |= libc::O_RDWR;
808 }
809
810 if flags & libc::O_APPEND != 0 {
811 flags &= !libc::O_APPEND;
812 }
813 }
814
815 let final_handle = self
816 .do_create(req, &pnode, name, mode, flags.try_into().unwrap())
817 .await?;
818 let entry = self.do_lookup(req, parent, name.to_str().unwrap()).await?;
819 let fh = final_handle
820 .ok_or_else(|| std::io::Error::new(ErrorKind::NotFound, "Handle not found"))?;
821 Ok(ReplyCreated {
822 ttl: entry.ttl,
823 attr: entry.attr,
824 generation: entry.generation,
825 fh,
826 flags: flags.try_into().unwrap(),
827 })
828 }
829
830 async fn batch_forget(&self, _req: Request, inodes: &[(Inode, u64)]) {
832 for inode in inodes {
833 self.forget_one(inode.0, inode.1).await;
834 }
835 }
836
837 async fn fallocate(
844 &self,
845 req: Request,
846 inode: Inode,
847 fh: u64,
848 offset: u64,
849 length: u64,
850 mode: u32,
851 ) -> Result<()> {
852 let data = self
854 .get_data(req, Some(fh), inode, libc::O_RDONLY as u32)
855 .await?;
856
857 match data.real_handle {
858 None => Err(Error::from_raw_os_error(libc::ENOENT).into()),
859 Some(ref rhd) => {
860 if !rhd.in_upper_layer {
861 return Err(Error::from_raw_os_error(libc::EROFS).into());
863 }
864 rhd.layer
865 .fallocate(
866 req,
867 rhd.inode,
868 rhd.handle.load(Ordering::Relaxed),
869 offset,
870 length,
871 mode,
872 )
873 .await
874 }
875 }
876 }
877
878 async fn lseek(
880 &self,
881 req: Request,
882 inode: Inode,
883 fh: u64,
884 offset: u64,
885 whence: u32,
886 ) -> Result<ReplyLSeek> {
887 let node = self.lookup_node(req, inode, "").await?;
890
891 if node.whiteout.load(Ordering::Relaxed) {
892 return Err(Error::from_raw_os_error(libc::ENOENT).into());
893 }
894
895 let st = node.stat64(req).await?;
896 if utils::is_dir(&st.attr.kind) {
897 error!("lseek on directory");
898 return Err(Error::from_raw_os_error(libc::EINVAL).into());
899 }
900
901 let (layer, real_inode, real_handle) = self.find_real_info_from_handle(fh).await?;
902 layer
903 .lseek(req, real_inode, real_handle, offset, whence)
904 .await
905 }
906
907 async fn interrupt(&self, _req: Request, _unique: u64) -> Result<()> {
908 Ok(())
909 }
910}
911
912#[cfg(test)]
913mod tests {
914 use std::{ffi::OsString, sync::Arc};
915
916 use rfuse3::{MountOptions, raw::Session};
917 use tokio::signal;
918
919 use crate::{
920 overlayfs::{OverlayFs, config::Config},
921 passthrough::{new_passthroughfs_layer, newlogfs::LoggingFileSystem},
922 };
923
924 #[tokio::test]
925 #[ignore]
926 async fn test_a_ovlfs() {
927 env_logger::Builder::new()
928 .filter_level(log::LevelFilter::Trace)
929 .init();
930 let mountpoint = "/home/luxian/megatest/true_temp".to_string();
932 let lowerdir = vec!["/home/luxian/github/buck2-rust-third-party".to_string()];
933 let upperdir = "/home/luxian/upper".to_string();
934
935 let mut lower_layers = Vec::new();
937 for lower in &lowerdir {
938 let layer = new_passthroughfs_layer(lower).await.unwrap();
939 lower_layers.push(Arc::new(layer));
940 }
941 let upper_layer = Arc::new(new_passthroughfs_layer(&upperdir).await.unwrap());
943 let config = Config {
945 mountpoint: mountpoint.clone(),
946 do_import: true,
947 ..Default::default()
948 };
949
950 let overlayfs = OverlayFs::new(Some(upper_layer), lower_layers, config, 1).unwrap();
951
952 let logfs = LoggingFileSystem::new(overlayfs);
953
954 let mount_path: OsString = OsString::from(mountpoint);
955
956 let uid = unsafe { libc::getuid() };
957 let gid = unsafe { libc::getgid() };
958
959 let not_unprivileged = false;
960
961 let mut mount_options = MountOptions::default();
962 mount_options.force_readdir_plus(true).uid(uid).gid(gid);
964
965 let mut mount_handle: rfuse3::raw::MountHandle = if !not_unprivileged {
966 Session::new(mount_options)
967 .mount_with_unprivileged(logfs, mount_path)
968 .await
969 .unwrap()
970 } else {
971 Session::new(mount_options)
972 .mount(logfs, mount_path)
973 .await
974 .unwrap()
975 };
976
977 let handle = &mut mount_handle;
978
979 tokio::select! {
980 res = handle => res.unwrap(),
981 _ = signal::ctrl_c() => {
982 mount_handle.unmount().await.unwrap()
983 }
984 }
985 }
986}