1use super::utils;
2use super::{CachePolicy, HandleData, Inode, OverlayFs, RealHandle};
3use crate::util::open_options::OpenOptions;
4use rfuse3::raw::prelude::*;
5use rfuse3::*;
6use std::ffi::OsStr;
7use std::io::Error;
8use std::io::ErrorKind;
9use std::num::NonZeroU32;
10use std::sync::Arc;
11use std::sync::atomic::{AtomicU64, Ordering};
12use tokio::sync::Mutex;
13use tracing::info;
14use tracing::trace;
15
16impl Filesystem for OverlayFs {
17 async fn init(&self, req: Request) -> Result<ReplyInit> {
19 for layer in self.lower_layers.iter() {
20 layer.init(req).await?;
21 }
22 if let Some(upper) = &self.upper_layer {
23 upper.init(req).await?;
24 }
25 if self.config.do_import {
26 self.import().await?;
27 }
28 if !self.config.do_import || self.config.writeback {
29 self.writeback.store(true, Ordering::Relaxed);
30 }
31 if !self.config.do_import || self.config.no_open {
32 self.no_open.store(true, Ordering::Relaxed);
33 }
34 if !self.config.do_import || self.config.no_opendir {
35 self.no_opendir.store(true, Ordering::Relaxed);
36 }
37 if !self.config.do_import || self.config.killpriv_v2 {
38 self.killpriv_v2.store(true, Ordering::Relaxed);
39 }
40 if self.config.perfile_dax {
41 self.perfile_dax.store(true, Ordering::Relaxed);
42 }
43
44 Ok(ReplyInit {
45 max_write: NonZeroU32::new(128 * 1024).unwrap(),
46 })
47 }
48
49 async fn destroy(&self, _req: Request) {}
54
55 async fn lookup(&self, req: Request, parent: Inode, name: &OsStr) -> Result<ReplyEntry> {
57 let tmp = name.to_string_lossy().to_string();
58 let result = self.do_lookup(req, parent, tmp.as_str()).await;
59 match result {
60 Ok(e) => Ok(e),
61 Err(err) => Err(err.into()),
62 }
63 }
64
65 async fn forget(&self, _req: Request, inode: Inode, nlookup: u64) {
75 self.forget_one(inode, nlookup).await;
76 }
77
78 async fn getattr(
80 &self,
81 req: Request,
82 inode: Inode,
83 fh: Option<u64>,
84 flags: u32,
85 ) -> Result<ReplyAttr> {
86 if !self.no_open.load(Ordering::Relaxed)
87 && let Some(h) = fh
88 {
89 let handles = self.handles.lock().await;
90 if let Some(hd) = handles.get(&h)
91 && let Some(ref rh) = hd.real_handle
92 {
93 let mut rep: ReplyAttr = rh
94 .layer
95 .getattr(req, rh.inode, Some(rh.handle.load(Ordering::Relaxed)), 0)
96 .await?;
97 rep.attr.ino = inode;
98 return Ok(rep);
99 }
100 }
101
102 let node: Arc<super::OverlayInode> = self.lookup_node(req, inode, "").await?;
103 let (layer, _, lower_inode) = node.first_layer_inode().await;
104 let mut re = layer.getattr(req, lower_inode, None, flags).await?;
105 re.attr.ino = inode;
106 Ok(re)
107 }
108
109 async fn setattr(
111 &self,
112 req: Request,
113 inode: Inode,
114 fh: Option<u64>,
115 set_attr: SetAttr,
116 ) -> Result<ReplyAttr> {
117 self.upper_layer
119 .as_ref()
120 .cloned()
121 .ok_or_else(|| Error::from_raw_os_error(libc::EROFS))?;
122
123 if !self.no_open.load(Ordering::Relaxed)
125 && let Some(h) = fh
126 {
127 let handles = self.handles.lock().await;
128 if let Some(hd) = handles.get(&h)
129 && let Some(ref rhd) = hd.real_handle
130 {
131 if rhd.in_upper_layer {
133 let mut rep = rhd
134 .layer
135 .setattr(
136 req,
137 rhd.inode,
138 Some(rhd.handle.load(Ordering::Relaxed)),
139 set_attr,
140 )
141 .await?;
142 rep.attr.ino = inode;
143 return Ok(rep);
144 }
145 }
146 }
147
148 let mut node = self.lookup_node(req, inode, "").await?;
149
150 if !node.in_upper_layer().await {
151 node = self.copy_node_up(req, node.clone()).await?
152 }
153
154 let (layer, _, real_inode) = node.first_layer_inode().await;
155 let mut rep = layer.setattr(req, real_inode, None, set_attr).await?;
157 rep.attr.ino = inode;
158 Ok(rep)
159 }
160
161 async fn readlink(&self, req: Request, inode: Inode) -> Result<ReplyData> {
163 trace!("READLINK: inode: {inode}\n");
164
165 let node = self.lookup_node(req, inode, "").await?;
166
167 if node.whiteout.load(Ordering::Relaxed) {
168 return Err(Error::from_raw_os_error(libc::ENOENT).into());
169 }
170
171 let (layer, _, inode) = node.first_layer_inode().await;
172 layer.readlink(req, inode).await
173 }
174
175 async fn symlink(
177 &self,
178 req: Request,
179 parent: Inode,
180 name: &OsStr,
181 link: &OsStr,
182 ) -> Result<ReplyEntry> {
183 let sname = name.to_string_lossy().into_owned().to_owned();
185 let slinkname = link.to_string_lossy().into_owned().to_owned();
186
187 let pnode = self.lookup_node(req, parent, "").await?;
188 self.do_symlink(req, slinkname.as_str(), &pnode, sname.as_str())
189 .await?;
190
191 self.do_lookup(req, parent, sname.as_str())
192 .await
193 .map_err(|e| e.into())
194 }
195
196 async fn mknod(
200 &self,
201 req: Request,
202 parent: Inode,
203 name: &OsStr,
204 mode: u32,
205 rdev: u32,
206 ) -> Result<ReplyEntry> {
207 let sname = name.to_string_lossy().to_string();
208
209 let pnode = self.lookup_node(req, parent, "").await?;
211 if pnode.whiteout.load(Ordering::Relaxed) {
212 return Err(Error::from_raw_os_error(libc::ENOENT).into());
213 }
214
215 self.do_mknod(req, &pnode, sname.as_str(), mode, rdev, 0)
216 .await?;
217 self.do_lookup(req, parent, sname.as_str())
218 .await
219 .map_err(|e| e.into())
220 }
221
222 async fn mkdir(
224 &self,
225 req: Request,
226 parent: Inode,
227 name: &OsStr,
228 mode: u32,
229 umask: u32,
230 ) -> Result<ReplyEntry> {
231 let sname = name.to_string_lossy().to_string();
232
233 let pnode = self.lookup_node(req, parent, "").await?;
235 if pnode.whiteout.load(Ordering::Relaxed) {
236 return Err(Error::from_raw_os_error(libc::ENOENT).into());
237 }
238
239 self.do_mkdir(req, pnode, sname.as_str(), mode, umask)
240 .await?;
241 self.do_lookup(req, parent, sname.as_str())
242 .await
243 .map_err(|e| e.into())
244 }
245
246 async fn unlink(&self, req: Request, parent: Inode, name: &OsStr) -> Result<()> {
248 self.do_rm(req, parent, name, false)
249 .await
250 .map_err(|e| e.into())
251 }
252
253 async fn rmdir(&self, req: Request, parent: Inode, name: &OsStr) -> Result<()> {
255 self.do_rm(req, parent, name, true)
256 .await
257 .map_err(|e| e.into())
258 }
259
260 async fn rename(
262 &self,
263 req: Request,
264 parent: Inode,
265 name: &OsStr,
266 new_parent: Inode,
267 new_name: &OsStr,
268 ) -> Result<()> {
269 self.do_rename(req, parent, name, new_parent, new_name)
270 .await
271 .map_err(|e| e.into())
272 }
273
274 async fn link(
276 &self,
277 req: Request,
278 inode: Inode,
279 new_parent: Inode,
280 new_name: &OsStr,
281 ) -> Result<ReplyEntry> {
282 let node = self.lookup_node(req, inode, "").await?;
283 if node.whiteout.load(Ordering::Relaxed) {
284 return Err(Error::from_raw_os_error(libc::ENOENT).into());
285 }
286
287 let newpnode = self.lookup_node(req, new_parent, "").await?;
288 if newpnode.whiteout.load(Ordering::Relaxed) {
289 return Err(Error::from_raw_os_error(libc::ENOENT).into());
290 }
291 let new_name = new_name.to_str().unwrap();
292 self.do_link(req, &node, &newpnode, new_name).await?;
297 self.do_lookup(req, new_parent, new_name)
299 .await
300 .map_err(|e| e.into())
301 }
302
303 async fn open(&self, req: Request, inode: Inode, flags: u32) -> Result<ReplyOpen> {
318 if self.no_open.load(Ordering::Relaxed) {
319 info!("fuse: open is not supported.");
320 return Err(Error::from_raw_os_error(libc::ENOSYS).into());
321 }
322
323 let readonly: bool = flags
324 & (libc::O_APPEND | libc::O_CREAT | libc::O_TRUNC | libc::O_RDWR | libc::O_WRONLY)
325 as u32
326 == 0;
327 let mut flags: i32 = flags as i32;
329
330 flags |= libc::O_NOFOLLOW;
331
332 if self.config.writeback {
333 if flags & libc::O_ACCMODE == libc::O_WRONLY {
334 flags &= !libc::O_ACCMODE;
335 flags |= libc::O_RDWR;
336 }
337
338 if flags & libc::O_APPEND != 0 {
339 flags &= !libc::O_APPEND;
340 }
341 }
342 let node = self.lookup_node(req, inode, "").await?;
344
345 if node.whiteout.load(Ordering::Relaxed) {
347 return Err(Error::from_raw_os_error(libc::ENOENT).into());
348 }
349
350 if !readonly {
351 self.copy_node_up(req, node.clone()).await?;
353 }
354
355 let (_l, h) = node.open(req, flags as u32, 0).await?;
357
358 let hd = self.next_handle.fetch_add(1, Ordering::Relaxed);
359 let (layer, in_upper_layer, inode) = node.first_layer_inode().await;
360 let handle_data = HandleData {
361 node: node.clone(),
362 real_handle: Some(RealHandle {
363 layer,
364 in_upper_layer,
365 inode,
366 handle: AtomicU64::new(h.fh),
367 }),
368 dir_snapshot: Mutex::new(None),
369 };
370
371 self.handles.lock().await.insert(hd, Arc::new(handle_data));
372
373 let mut opts = OpenOptions::empty();
374 match self.config.cache_policy {
375 CachePolicy::Never => opts |= OpenOptions::DIRECT_IO,
376 CachePolicy::Always => opts |= OpenOptions::KEEP_CACHE,
377 _ => {}
378 }
379 Ok(ReplyOpen {
382 fh: hd,
383 flags: opts.bits(),
384 })
385 }
386
387 async fn read(
393 &self,
394 req: Request,
395 inode: Inode,
396 fh: u64,
397 offset: u64,
398 size: u32,
399 ) -> Result<ReplyData> {
400 let data = self.get_data(req, Some(fh), inode, 0).await?;
401
402 match data.real_handle {
403 None => Err(Error::from_raw_os_error(libc::ENOENT).into()),
404 Some(ref hd) => {
405 hd.layer
406 .read(
407 req,
408 hd.inode,
409 hd.handle.load(Ordering::Relaxed),
410 offset,
411 size,
412 )
413 .await
414 }
415 }
416 }
417
418 #[allow(clippy::too_many_arguments)]
426 async fn write(
427 &self,
428 req: Request,
429 inode: Inode,
430 fh: u64,
431 offset: u64,
432 data: &[u8],
433 write_flags: u32,
434 flags: u32,
435 ) -> Result<ReplyWrite> {
436 let handle_data: Arc<HandleData> = self.get_data(req, Some(fh), inode, flags).await?;
437
438 match handle_data.real_handle {
439 None => Err(Error::from_raw_os_error(libc::ENOENT).into()),
440 Some(ref hd) => {
441 hd.layer
442 .write(
443 req,
444 hd.inode,
445 hd.handle.load(Ordering::Relaxed),
446 offset,
447 data,
448 write_flags,
449 flags,
450 )
451 .await
452 }
453 }
454 }
455
456 #[allow(clippy::too_many_arguments)]
461 async fn copy_file_range(
462 &self,
463 req: Request,
464 inode_in: Inode,
465 fh_in: u64,
466 offset_in: u64,
467 inode_out: Inode,
468 fh_out: u64,
469 offset_out: u64,
470 length: u64,
471 flags: u64,
472 ) -> Result<ReplyCopyFileRange> {
473 let data_in = self.get_data(req, Some(fh_in), inode_in, 0).await?;
475 let handle_in = match data_in.real_handle {
476 None => return Err(Error::from_raw_os_error(libc::ENOENT).into()),
477 Some(ref hd) => hd,
478 };
479
480 let data_out = self.get_data(req, Some(fh_out), inode_out, 0).await?;
482 let handle_out = match data_out.real_handle {
483 None => return Err(Error::from_raw_os_error(libc::ENOENT).into()),
484 Some(ref hd) => hd,
485 };
486
487 if !Arc::ptr_eq(&handle_in.layer, &handle_out.layer) {
489 return Err(Error::from_raw_os_error(libc::EXDEV).into());
491 }
492
493 handle_in
495 .layer
496 .copy_file_range(
497 req,
498 handle_in.inode,
499 handle_in.handle.load(Ordering::Relaxed),
500 offset_in,
501 handle_out.inode,
502 handle_out.handle.load(Ordering::Relaxed),
503 offset_out,
504 length,
505 flags,
506 )
507 .await
508 }
509
510 async fn statfs(&self, req: Request, inode: Inode) -> Result<ReplyStatFs> {
512 self.do_statvfs(req, inode).await.map_err(|e| e.into())
513 }
514
515 async fn release(
523 &self,
524 req: Request,
525 _inode: Inode,
526 fh: u64,
527 flags: u32,
528 lock_owner: u64,
529 flush: bool,
530 ) -> Result<()> {
531 if self.no_open.load(Ordering::Relaxed) {
532 info!("fuse: release is not supported.");
533 return Err(Error::from_raw_os_error(libc::ENOSYS).into());
534 }
535
536 if let Some(hd) = self.handles.lock().await.get(&fh) {
537 let rh = if let Some(ref h) = hd.real_handle {
538 h
539 } else {
540 return Err(
541 Error::other(format!("no real handle found for file handle {fh}")).into(),
542 );
543 };
544 let real_handle = rh.handle.load(Ordering::Relaxed);
545 let real_inode = rh.inode;
546 rh.layer
547 .release(req, real_inode, real_handle, flags, lock_owner, flush)
548 .await?;
549 }
550
551 self.handles.lock().await.remove(&fh);
552
553 Ok(())
554 }
555
556 async fn fsync(&self, req: Request, inode: Inode, fh: u64, datasync: bool) -> Result<()> {
559 self.do_fsync(req, inode, datasync, fh, false)
560 .await
561 .map_err(|e| e.into())
562 }
563
564 async fn setxattr(
566 &self,
567 req: Request,
568 inode: Inode,
569 name: &OsStr,
570 value: &[u8],
571 flags: u32,
572 position: u32,
573 ) -> Result<()> {
574 let node = self.lookup_node(req, inode, "").await?;
575
576 if node.whiteout.load(Ordering::Relaxed) {
577 return Err(Error::from_raw_os_error(libc::ENOENT).into());
578 }
579
580 if !node.in_upper_layer().await {
581 self.copy_node_up(req, node.clone()).await?;
583 }
584
585 let (layer, _, real_inode) = node.first_layer_inode().await;
586
587 layer
588 .setxattr(req, real_inode, name, value, flags, position)
589 .await
590 }
591
592 async fn getxattr(
596 &self,
597 req: Request,
598 inode: Inode,
599 name: &OsStr,
600 size: u32,
601 ) -> Result<ReplyXAttr> {
602 let node = self.lookup_node(req, inode, "").await?;
603
604 if node.whiteout.load(Ordering::Relaxed) {
605 return Err(Error::from_raw_os_error(libc::ENOENT).into());
606 }
607
608 let (layer, real_inode) = self.find_real_inode(inode).await?;
609
610 layer.getxattr(req, real_inode, name, size).await
611 }
612
613 async fn listxattr(&self, req: Request, inode: Inode, size: u32) -> Result<ReplyXAttr> {
618 let node = self.lookup_node(req, inode, "").await?;
619 if node.whiteout.load(Ordering::Relaxed) {
620 return Err(Error::from_raw_os_error(libc::ENOENT).into());
621 }
622 let (layer, real_inode) = self.find_real_inode(inode).await?;
623 layer.listxattr(req, real_inode, size).await
624 }
625
626 async fn removexattr(&self, req: Request, inode: Inode, name: &OsStr) -> Result<()> {
628 let node = self.lookup_node(req, inode, "").await?;
629
630 if node.whiteout.load(Ordering::Relaxed) {
631 return Err(Error::from_raw_os_error(libc::ENOENT).into());
632 }
633
634 if !node.in_upper_layer().await {
635 self.copy_node_up(req, node.clone()).await?;
637 }
638
639 let (layer, _, ino) = node.first_layer_inode().await;
640 layer.removexattr(req, ino, name).await
641
642 }
644
645 async fn flush(&self, req: Request, inode: Inode, fh: u64, lock_owner: u64) -> Result<()> {
658 if self.no_open.load(Ordering::Relaxed) {
659 return Err(Error::from_raw_os_error(libc::ENOSYS).into());
660 }
661
662 let node = self.lookup_node(req, inode, "").await;
663 match node {
664 Ok(n) => {
665 if n.whiteout.load(Ordering::Relaxed) {
666 return Err(Error::from_raw_os_error(libc::ENOENT).into());
667 }
668 }
669 Err(e) => {
670 if e.raw_os_error() == Some(libc::ENOENT) {
671 trace!("flush: inode {inode} is stale");
672 } else {
673 return Err(e.into());
674 }
675 }
676 }
677
678 let (layer, real_inode, real_handle) = self.find_real_info_from_handle(fh).await?;
679
680 if inode
682 != self
683 .handles
684 .lock()
685 .await
686 .get(&fh)
687 .map(|h| h.node.inode)
688 .unwrap_or(0)
689 {
690 return Err(Error::other("inode does not match handle").into());
691 }
692
693 trace!("flushing, real_inode: {real_inode}, real_handle: {real_handle}");
694 layer.flush(req, real_inode, real_handle, lock_owner).await
695 }
696
697 async fn opendir(&self, req: Request, inode: Inode, flags: u32) -> Result<ReplyOpen> {
705 if self.no_opendir.load(Ordering::Relaxed) {
706 info!("fuse: opendir is not supported.");
707 return Err(Error::from_raw_os_error(libc::ENOSYS).into());
708 }
709
710 let node = self.lookup_node(req, inode, ".").await?;
712
713 if node.whiteout.load(Ordering::Relaxed) {
714 return Err(Error::from_raw_os_error(libc::ENOENT).into());
715 }
716
717 let st = node.stat64(req).await?;
718 if !utils::is_dir(&st.attr.kind) {
719 return Err(Error::from_raw_os_error(libc::ENOTDIR).into());
720 }
721
722 let handle = self.next_handle.fetch_add(1, Ordering::Relaxed);
723 let (layer, in_upper_layer, real_inode) = node.first_layer_inode().await;
725 let reply = layer.opendir(req, real_inode, flags).await?;
726
727 self.handles.lock().await.insert(
728 handle,
729 Arc::new(HandleData {
730 node: Arc::clone(&node),
731 real_handle: Some(RealHandle {
732 layer,
733 in_upper_layer,
734 inode: real_inode,
735 handle: AtomicU64::new(reply.fh),
736 }),
737 dir_snapshot: Mutex::new(None),
738 }),
739 );
740
741 Ok(ReplyOpen { fh: handle, flags })
742 }
743
744 async fn readdir<'a>(
748 &'a self,
749 req: Request,
750 parent: Inode,
751 fh: u64,
752 offset: i64,
753 ) -> Result<
754 ReplyDirectory<
755 impl futures_util::stream::Stream<Item = Result<DirectoryEntry>> + Send + 'a,
756 >,
757 > {
758 if self.config.no_readdir {
759 info!("fuse: readdir is not supported.");
760 return Err(Error::from_raw_os_error(libc::ENOTDIR).into());
761 }
762 let entries = self
763 .do_readdir(req, parent, fh, offset.try_into().unwrap())
764 .await?;
765 Ok(ReplyDirectory { entries })
766 }
767
768 async fn readdirplus<'a>(
771 &'a self,
772 req: Request,
773 parent: Inode,
774 fh: u64,
775 offset: u64,
776 _lock_owner: u64,
777 ) -> Result<
778 ReplyDirectoryPlus<
779 impl futures_util::stream::Stream<Item = Result<DirectoryEntryPlus>> + Send + 'a,
780 >,
781 > {
782 if self.config.no_readdir {
783 info!("fuse: readdir is not supported.");
784 return Err(Error::from_raw_os_error(libc::ENOTDIR).into());
785 }
786 trace!("readdirplus: parent: {parent}, fh: {fh}, offset: {offset}");
787 let entries = self.do_readdirplus(req, parent, fh, offset).await?;
788 match self.handles.lock().await.get(&fh) {
789 Some(h) => {
790 trace!(
791 "after readdirplus: found handle, seeing real_handle: {}",
792 h.real_handle.is_some()
793 );
794 }
795 None => trace!("after readdirplus: no handle found: {fh}"),
796 }
797 Ok(ReplyDirectoryPlus { entries })
798 }
799 async fn releasedir(&self, req: Request, _inode: Inode, fh: u64, flags: u32) -> Result<()> {
804 if self.no_opendir.load(Ordering::Relaxed) {
805 info!("fuse: releasedir is not supported.");
806 return Err(Error::from_raw_os_error(libc::ENOSYS).into());
807 }
808
809 if let Some(hd) = self.handles.lock().await.get(&fh) {
810 let rh = if let Some(ref h) = hd.real_handle {
811 h
812 } else {
813 return Err(
814 Error::other(format!("no real handle found for file handle {fh}")).into(),
815 );
816 };
817 let real_handle = rh.handle.load(Ordering::Relaxed);
818 let real_inode = rh.inode;
819 rh.layer
820 .releasedir(req, real_inode, real_handle, flags)
821 .await?;
822 }
823
824 self.handles.lock().await.remove(&fh);
825 Ok(())
826 }
827
828 async fn fsyncdir(&self, req: Request, inode: Inode, fh: u64, datasync: bool) -> Result<()> {
833 self.do_fsync(req, inode, datasync, fh, true)
834 .await
835 .map_err(|e| e.into())
836 }
837 async fn access(&self, req: Request, inode: Inode, mask: u32) -> Result<()> {
841 let node = self.lookup_node(req, inode, "").await?;
842
843 if node.whiteout.load(Ordering::Relaxed) {
844 return Err(Error::from_raw_os_error(libc::ENOENT).into());
845 }
846
847 let (layer, real_inode) = self.find_real_inode(inode).await?;
848 layer.access(req, real_inode, mask).await
849 }
850
851 async fn create(
868 &self,
869 req: Request,
870 parent: Inode,
871 name: &OsStr,
872 mode: u32,
873 flags: u32,
874 ) -> Result<ReplyCreated> {
875 let pnode = self.lookup_node(req, parent, "").await?;
877 if pnode.whiteout.load(Ordering::Relaxed) {
878 return Err(Error::from_raw_os_error(libc::ENOENT).into());
879 }
880
881 let mut flags: i32 = flags as i32;
882 flags |= libc::O_NOFOLLOW;
883 flags &= !libc::O_DIRECT;
884 if self.config.writeback {
885 if flags & libc::O_ACCMODE == libc::O_WRONLY {
886 flags &= !libc::O_ACCMODE;
887 flags |= libc::O_RDWR;
888 }
889
890 if flags & libc::O_APPEND != 0 {
891 flags &= !libc::O_APPEND;
892 }
893 }
894
895 let final_handle = self
896 .do_create(req, &pnode, name, mode, flags.try_into().unwrap())
897 .await?;
898 let entry = self.do_lookup(req, parent, name.to_str().unwrap()).await?;
899 let fh = final_handle
900 .ok_or_else(|| std::io::Error::new(ErrorKind::NotFound, "Handle not found"))?;
901
902 let mut opts = OpenOptions::empty();
903 match self.config.cache_policy {
904 CachePolicy::Never => opts |= OpenOptions::DIRECT_IO,
905 CachePolicy::Always => opts |= OpenOptions::KEEP_CACHE,
906 _ => {}
907 }
908
909 Ok(ReplyCreated {
910 ttl: entry.ttl,
911 attr: entry.attr,
912 generation: entry.generation,
913 fh,
914 flags: opts.bits(),
915 })
916 }
917
918 async fn batch_forget(&self, _req: Request, inodes: &[(Inode, u64)]) {
920 for inode in inodes {
921 self.forget_one(inode.0, inode.1).await;
922 }
923 }
924
925 async fn fallocate(
932 &self,
933 req: Request,
934 inode: Inode,
935 fh: u64,
936 offset: u64,
937 length: u64,
938 mode: u32,
939 ) -> Result<()> {
940 let data = self
942 .get_data(req, Some(fh), inode, libc::O_RDONLY as u32)
943 .await?;
944
945 match data.real_handle {
946 None => Err(Error::from_raw_os_error(libc::ENOENT).into()),
947 Some(ref rhd) => {
948 if !rhd.in_upper_layer {
949 return Err(Error::from_raw_os_error(libc::EROFS).into());
951 }
952 rhd.layer
953 .fallocate(
954 req,
955 rhd.inode,
956 rhd.handle.load(Ordering::Relaxed),
957 offset,
958 length,
959 mode,
960 )
961 .await
962 }
963 }
964 }
965
966 async fn lseek(
968 &self,
969 req: Request,
970 inode: Inode,
971 fh: u64,
972 offset: u64,
973 whence: u32,
974 ) -> Result<ReplyLSeek> {
975 let node = self.lookup_node(req, inode, "").await?;
976
977 if node.whiteout.load(Ordering::Relaxed) {
978 return Err(Error::from_raw_os_error(libc::ENOENT).into());
979 }
980
981 let st = node.stat64(req).await?;
982 if utils::is_dir(&st.attr.kind) {
983 let (layer, real_inode, real_handle) = self.find_real_info_from_handle(fh).await?;
986
987 let handle_stat = match layer.getattr(req, real_inode, Some(real_handle), 0).await {
989 Ok(s) => s,
990 Err(_) => return Err(Error::from_raw_os_error(libc::EBADF).into()),
991 };
992
993 if !utils::is_dir(&handle_stat.attr.kind) {
994 return Err(Error::from_raw_os_error(libc::ENOTDIR).into());
995 }
996
997 match whence {
1000 x if x == libc::SEEK_SET as u32 => {
1002 if offset > i64::MAX as u64 {
1005 return Err(Error::from_raw_os_error(libc::EINVAL).into());
1006 }
1007
1008 layer
1011 .lseek(req, real_inode, real_handle, offset, whence)
1012 .await
1013 }
1014 x if x == libc::SEEK_CUR as u32 => {
1016 let current = match layer
1019 .lseek(req, real_inode, real_handle, 0, libc::SEEK_CUR as u32)
1020 .await
1021 {
1022 Ok(r) => r.offset,
1023 Err(_) => return Err(Error::from_raw_os_error(libc::EINVAL).into()),
1024 };
1025
1026 if let Some(new_offset) = current.checked_add(offset) {
1029 if new_offset > i64::MAX as u64 {
1031 return Err(Error::from_raw_os_error(libc::EINVAL).into());
1032 }
1033
1034 match layer
1037 .lseek(
1038 req,
1039 real_inode,
1040 real_handle,
1041 new_offset,
1042 libc::SEEK_SET as u32,
1043 )
1044 .await
1045 {
1046 Ok(_) => Ok(ReplyLSeek { offset: new_offset }),
1047 Err(_) => Err(Error::from_raw_os_error(libc::EINVAL).into()),
1048 }
1049 } else {
1050 Err(Error::from_raw_os_error(libc::EINVAL).into())
1051 }
1052 }
1053 _ => Err(Error::from_raw_os_error(libc::EINVAL).into()),
1055 }
1056 } else {
1057 let (layer, real_inode, real_handle) = self.find_real_info_from_handle(fh).await?;
1060 layer
1061 .lseek(req, real_inode, real_handle, offset, whence)
1062 .await
1063 }
1064 }
1065
1066 async fn interrupt(&self, _req: Request, _unique: u64) -> Result<()> {
1067 Ok(())
1068 }
1069}
1070
1071#[cfg(test)]
1072mod tests {
1073 use std::{ffi::OsString, path::PathBuf, sync::Arc};
1074
1075 use rfuse3::{MountOptions, raw::Session};
1076 use tokio::signal;
1077 use tracing_subscriber::EnvFilter;
1078
1079 use crate::unionfs::BoxedLayer;
1080 use crate::{
1081 passthrough::{PassthroughArgs, new_passthroughfs_layer, newlogfs::LoggingFileSystem},
1082 unionfs::{OverlayFs, config::Config},
1083 };
1084
1085 #[tokio::test]
1086 #[ignore]
1087 async fn test_a_ovlfs() {
1088 let _ = tracing_subscriber::fmt()
1089 .with_env_filter(EnvFilter::from_default_env().add_directive("trace".parse().unwrap()))
1090 .try_init();
1091
1092 let mountpoint = PathBuf::from("/home/luxian/megatest/true_temp");
1094 let lowerdir = vec![PathBuf::from("/home/luxian/github/buck2-rust-third-party")];
1095 let upperdir = PathBuf::from("/home/luxian/upper");
1096
1097 let mut lower_layers: Vec<Arc<BoxedLayer>> = Vec::new();
1099 for lower in &lowerdir {
1100 let layer = new_passthroughfs_layer(PassthroughArgs {
1101 root_dir: lower.clone(),
1102 mapping: None::<&str>,
1103 })
1104 .await
1105 .unwrap();
1106 lower_layers.push(Arc::new(layer) as Arc<BoxedLayer>);
1107 }
1108 let upper_layer: Arc<BoxedLayer> = Arc::new(
1110 new_passthroughfs_layer(PassthroughArgs {
1111 root_dir: upperdir,
1112 mapping: None::<&str>,
1113 })
1114 .await
1115 .unwrap(),
1116 );
1117 let config = Config {
1119 mountpoint: mountpoint.clone(),
1120 do_import: true,
1121 ..Default::default()
1122 };
1123
1124 let overlayfs = OverlayFs::new(Some(upper_layer), lower_layers, config, 1).unwrap();
1125
1126 let logfs = LoggingFileSystem::new(overlayfs);
1127
1128 let mount_path: OsString = OsString::from(mountpoint);
1129
1130 let uid = unsafe { libc::getuid() };
1131 let gid = unsafe { libc::getgid() };
1132
1133 let not_unprivileged = false;
1134
1135 let mut mount_options = MountOptions::default();
1136 mount_options.force_readdir_plus(true).uid(uid).gid(gid);
1138
1139 let mut mount_handle: rfuse3::raw::MountHandle = if !not_unprivileged {
1140 Session::new(mount_options)
1141 .mount_with_unprivileged(logfs, mount_path)
1142 .await
1143 .unwrap()
1144 } else {
1145 Session::new(mount_options)
1146 .mount(logfs, mount_path)
1147 .await
1148 .unwrap()
1149 };
1150
1151 let handle = &mut mount_handle;
1152
1153 tokio::select! {
1154 res = handle => res.unwrap(),
1155 _ = signal::ctrl_c() => {
1156 mount_handle.unmount().await.unwrap()
1157 }
1158 }
1159 }
1160}