fuse_backend_rs/passthrough/async_io.rs
1// Copyright (C) 2021-2022 Alibaba Cloud. All rights reserved.
2// SPDX-License-Identifier: Apache-2.0
3
4#![allow(unused_variables)]
5#![allow(dead_code)]
6#![allow(unused_imports)]
7
8use std::io;
9use std::mem::ManuallyDrop;
10
11use async_trait::async_trait;
12
13use super::*;
14use crate::abi::fuse_abi::{
15 CreateIn, Opcode, OpenOptions, SetattrValid, FOPEN_IN_KILL_SUIDGID, WRITE_KILL_PRIV,
16};
17use crate::api::filesystem::{
18 AsyncFileSystem, AsyncZeroCopyReader, AsyncZeroCopyWriter, Context, FileSystem,
19};
20
21impl<S: BitmapSlice + Send + Sync + 'static> BackendFileSystem for PassthroughFs<S> {
22 fn mount(&self) -> io::Result<(Entry, u64)> {
23 let entry = self.do_lookup(fuse::ROOT_ID, &CString::new(".").unwrap())?;
24 Ok((entry, VFS_MAX_INO))
25 }
26
27 fn as_any(&self) -> &dyn Any {
28 self
29 }
30}
31
32impl<'a> InodeData {
33 async fn async_get_file(&self) -> io::Result<InodeFile<'_>> {
34 // The io_uring doesn't support open_by_handle_at yet, so use sync io.
35 self.get_file()
36 }
37}
38
39impl<S: BitmapSlice + Send + Sync> PassthroughFs<S> {
40 /*
41 async fn async_open_file(
42 &self,
43 ctx: &Context,
44 dir_fd: i32,
45 pathname: &'_ CStr,
46 flags: i32,
47 mode: u32,
48 ) -> io::Result<File> {
49 AsyncUtil::open_at(drive, dir_fd, pathname, flags, mode)
50 .await
51 .map(|fd| unsafe { File::from_raw_fd(fd as i32) })
52 }
53
54 async fn async_open_proc_file(
55 &self,
56 ctx: &Context,
57 fd: RawFd,
58 flags: i32,
59 mode: u32,
60 ) -> io::Result<File> {
61 if !is_safe_inode(mode) {
62 return Err(ebadf());
63 }
64
65 let pathname = CString::new(format!("{}", fd))
66 .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
67
68 // We don't really check `flags` because if the kernel can't handle poorly specified flags
69 // then we have much bigger problems. Also, clear the `O_NOFOLLOW` flag if it is set since
70 // we need to follow the `/proc/self/fd` symlink to get the file.
71 self.async_open_file(
72 ctx,
73 self.proc_self_fd.as_raw_fd(),
74 pathname.as_c_str(),
75 (flags | libc::O_CLOEXEC) & (!libc::O_NOFOLLOW),
76 0,
77 )
78 .await
79 }
80
81 /// Create a File or File Handle for `name` under directory `dir_fd` to support `lookup()`.
82 async fn async_open_file_or_handle<F>(
83 &self,
84 ctx: &Context,
85 dir_fd: RawFd,
86 name: &CStr,
87 reopen_dir: F,
88 ) -> io::Result<(FileOrHandle, InodeStat, InodeAltKey, Option<InodeAltKey>)>
89 where
90 F: FnOnce(RawFd, libc::c_int, u32) -> io::Result<File>,
91 {
92 let handle = if self.cfg.inode_file_handles {
93 FileHandle::from_name_at_with_mount_fds(dir_fd, name, &self.mount_fds, reopen_dir)
94 } else {
95 Err(io::Error::from_raw_os_error(libc::ENOTSUP))
96 };
97
98 // Ignore errors, because having a handle is optional
99 let file_or_handle = if let Ok(h) = handle {
100 FileOrHandle::Handle(h)
101 } else {
102 let f = self
103 .async_open_file(
104 ctx,
105 dir_fd,
106 name,
107 libc::O_PATH | libc::O_NOFOLLOW | libc::O_CLOEXEC,
108 0,
109 )
110 .await?;
111
112 FileOrHandle::File(f)
113 };
114
115 let st = match &file_or_handle {
116 FileOrHandle::File(f) => {
117 // Count mount ID as part of alt key if use_mntid is true. Note that using
118 // name_to_handle_at() to get mntid is kind of expensive in Lookup intensive
119 // workloads, e.g. when cache is none and accessing lots of files.
120 //
121 // Some filesystems don't support file handle, for example overlayfs mounted
122 // without index feature, if so just use mntid 0 in that case.
123 //
124 // TODO: use statx(2) to query mntid when 5.8 kernel or later are widely used.
125 let mnt_id = if self.cfg.enable_mntid {
126 match FileHandle::from_name_at(dir_fd, name) {
127 Ok(h) => h.mnt_id,
128 Err(_) => 0,
129 }
130 } else {
131 0
132 };
133 InodeStat {
134 stat: self.async_stat(ctx, f, None).await?,
135 mnt_id,
136 }
137 }
138 FileOrHandle::Handle(h) => InodeStat {
139 stat: self.async_stat_fd(ctx, dir_fd, Some(name)).await?,
140 mnt_id: h.mnt_id,
141 },
142 };
143 let ids_altkey = InodeAltKey::ids_from_stat(&st);
144
145 // Note that this will always be `None` if `cfg.inode_file_handles` is false, but we only
146 // really need this alt key when we do not have an `O_PATH` fd open for every inode. So if
147 // `cfg.inode_file_handles` is false, we do not need this key anyway.
148 let handle_altkey = file_or_handle.handle().map(|h| InodeAltKey::Handle(*h));
149
150 Ok((file_or_handle, st, ids_altkey, handle_altkey))
151 }
152
153 async fn async_open_inode(
154 &self,
155 ctx: &Context,
156 inode: Inode,
157 mut flags: i32,
158 ) -> io::Result<File> {
159 let new_flags = self.get_writeback_open_flags(flags);
160
161 let data = self.inode_map.get(inode)?;
162 let file = data.async_get_file(&self.mount_fds).await?;
163
164 self.async_open_proc_file(ctx, file.as_raw_fd(), new_flags, data.mode)
165 .await
166 }
167
168 async fn async_do_open(
169 &self,
170 ctx: &Context,
171 inode: Inode,
172 flags: u32,
173 fuse_flags: u32,
174 ) -> io::Result<(Option<Handle>, OpenOptions)> {
175 let killpriv = if self.killpriv_v2.load(Ordering::Relaxed)
176 && (fuse_flags & FOPEN_IN_KILL_SUIDGID != 0)
177 {
178 self::drop_cap_fsetid()?
179 } else {
180 None
181 };
182 let file = self.async_open_inode(ctx, inode, flags as i32).await?;
183 drop(killpriv);
184
185 let data = HandleData::new(inode, file);
186 let handle = self.next_handle.fetch_add(1, Ordering::Relaxed);
187 let mut opts = OpenOptions::empty();
188
189 self.handle_map.insert(handle, data);
190 match self.cfg.cache_policy {
191 // We only set the direct I/O option on files.
192 CachePolicy::Never => opts.set(
193 OpenOptions::DIRECT_IO,
194 flags & (libc::O_DIRECTORY as u32) == 0,
195 ),
196 CachePolicy::Always => opts |= OpenOptions::KEEP_CACHE,
197 _ => {}
198 };
199
200 Ok((Some(handle), opts))
201 }
202 */
203
204 async fn async_do_getattr(
205 &self,
206 ctx: &Context,
207 inode: Inode,
208 handle: Option<<Self as FileSystem>::Handle>,
209 ) -> io::Result<(libc::stat64, Duration)> {
210 unimplemented!()
211 /*
212 let st;
213 let fd;
214 let data = self.inode_map.get(inode).map_err(|e| {
215 error!("fuse: do_getattr ino {} Not find err {:?}", inode, e);
216 e
217 })?;
218
219 // kernel sends 0 as handle in case of no_open, and it depends on fuse server to handle
220 // this case correctly.
221 if !self.no_open.load(Ordering::Relaxed) && handle.is_some() {
222 // Safe as we just checked handle
223 let hd = self.handle_map.get(handle.unwrap(), inode)?;
224 fd = hd.get_handle_raw_fd();
225 st = self.async_stat_fd(ctx, fd, None).await;
226 } else {
227 match &data.file_or_handle {
228 FileOrHandle::File(f) => {
229 fd = f.as_raw_fd();
230 st = self.async_stat_fd(ctx, fd, None).await;
231 }
232 FileOrHandle::Handle(_h) => {
233 let file = data.async_get_file(&self.mount_fds).await?;
234 fd = file.as_raw_fd();
235 st = self.async_stat_fd(ctx, fd, None).await;
236 }
237 }
238 }
239
240 let st = st.map_err(|e| {
241 error!(
242 "fuse: do_getattr stat failed ino {} fd: {:?} err {:?}",
243 inode, fd, e
244 );
245 e
246 })?;
247
248 Ok((st, self.cfg.attr_timeout))
249 */
250 }
251
252 /*
253 async fn async_stat(
254 &self,
255 ctx: &Context,
256 dir: &impl AsRawFd,
257 path: Option<&CStr>,
258 ) -> io::Result<libc::stat64> {
259 self.async_stat_fd(ctx, dir.as_raw_fd(), path).await
260 }
261
262 async fn async_stat_fd(
263 &self,
264 _ctx: &Context,
265 dir_fd: RawFd,
266 path: Option<&CStr>,
267 ) -> io::Result<libc::stat64> {
268 // Safe because this is a constant value and a valid C string.
269 let pathname =
270 path.unwrap_or_else(|| unsafe { CStr::from_bytes_with_nul_unchecked(EMPTY_CSTR) });
271 let mut st = MaybeUninit::<libc::stat64>::zeroed();
272
273 // Safe because the kernel will only write data in `st` and we check the return value.
274 let res = unsafe {
275 libc::fstatat64(
276 dir_fd,
277 pathname.as_ptr(),
278 st.as_mut_ptr(),
279 libc::AT_EMPTY_PATH | libc::AT_SYMLINK_NOFOLLOW,
280 )
281 };
282 if res >= 0 {
283 // Safe because the kernel guarantees that the struct is now fully initialized.
284 Ok(unsafe { st.assume_init() })
285 } else {
286 Err(io::Error::last_os_error())
287 }
288 }
289
290 async fn async_get_data(
291 &self,
292 ctx: &Context,
293 handle: Handle,
294 inode: Inode,
295 flags: libc::c_int,
296 ) -> io::Result<Arc<HandleData>> {
297 let no_open = self.no_open.load(Ordering::Relaxed);
298 if !no_open {
299 self.handle_map.get(handle, inode)
300 } else {
301 let file = self.async_open_inode(ctx, inode, flags as i32).await?;
302 Ok(Arc::new(HandleData::new(inode, file)))
303 }
304 }
305 */
306}
307
308#[async_trait]
309impl<S: BitmapSlice + Send + Sync> AsyncFileSystem for PassthroughFs<S> {
310 async fn async_lookup(
311 &self,
312 ctx: &Context,
313 parent: <Self as FileSystem>::Inode,
314 name: &CStr,
315 ) -> io::Result<Entry> {
316 unimplemented!()
317 /*
318 // Don't use is_safe_path_component(), allow "." and ".." for NFS export support
319 if name.to_bytes_with_nul().contains(&SLASH_ASCII) {
320 return Err(io::Error::from_raw_os_error(libc::EINVAL));
321 }
322
323 let dir = self.inode_map.get(parent)?;
324 let dir_file = dir.async_get_file(&self.mount_fds).await?;
325 let (file_or_handle, st, ids_altkey, handle_altkey) = self
326 .async_open_file_or_handle(ctx, dir_file.as_raw_fd(), name, |fd, flags, mode| {
327 Self::open_proc_file(&self.proc_self_fd, fd, flags, mode)
328 })
329 .await?;
330
331 let mut attr_flags: u32 = 0;
332 if let Some(dax_file_size) = self.cfg.dax_file_size {
333 // st.stat.st_size is i64
334 if self.perfile_dax.load(Ordering::Relaxed)
335 && st.stat.st_size >= 0x0
336 && st.stat.st_size as u64 >= dax_file_size
337 {
338 attr_flags |= fuse::FUSE_ATTR_DAX;
339 }
340 }
341
342 let mut found = None;
343 'search: loop {
344 match self.inode_map.get_alt(&ids_altkey, handle_altkey.as_ref()) {
345 // No existing entry found
346 None => break 'search,
347 Some(data) => {
348 let curr = data.refcount.load(Ordering::Acquire);
349 // forgot_one() has just destroyed the entry, retry...
350 if curr == 0 {
351 continue 'search;
352 }
353
354 // Saturating add to avoid integer overflow, it's not realistic to saturate u64.
355 let new = curr.saturating_add(1);
356
357 // Synchronizes with the forgot_one()
358 if data
359 .refcount
360 .compare_exchange(curr, new, Ordering::AcqRel, Ordering::Acquire)
361 .is_ok()
362 {
363 found = Some(data.inode);
364 break;
365 }
366 }
367 }
368 }
369
370 let inode = if let Some(v) = found {
371 v
372 } else {
373 // Write guard get_alt_locked() and insert_lock() to avoid race conditions.
374 let mut inodes = self.inode_map.get_map_mut();
375
376 // Lookup inode_map again after acquiring the inode_map lock, as there might be another
377 // racing thread already added an inode with the same altkey while we're not holding
378 // the lock. If so just use the newly added inode, otherwise the inode will be replaced
379 // and results in EBADF.
380 match InodeMap::get_alt_locked(inodes.deref(), &ids_altkey, handle_altkey.as_ref()) {
381 Some(data) => {
382 data.refcount.fetch_add(1, Ordering::Relaxed);
383 data.inode
384 }
385 None => {
386 let inode = self.next_inode.fetch_add(1, Ordering::Relaxed);
387 if inode > VFS_MAX_INO {
388 error!("fuse: max inode number reached: {}", VFS_MAX_INO);
389 return Err(io::Error::new(
390 io::ErrorKind::Other,
391 format!("max inode number reached: {}", VFS_MAX_INO),
392 ));
393 }
394
395 InodeMap::insert_locked(
396 inodes.deref_mut(),
397 inode,
398 InodeData::new(inode, file_or_handle, 1, ids_altkey, st.get_stat().st_mode),
399 ids_altkey,
400 handle_altkey,
401 );
402 inode
403 }
404 }
405 };
406
407 Ok(Entry {
408 inode,
409 generation: 0,
410 attr: st.get_stat(),
411 attr_flags,
412 attr_timeout: self.cfg.attr_timeout,
413 entry_timeout: self.cfg.entry_timeout,
414 })
415 */
416 }
417
418 async fn async_getattr(
419 &self,
420 ctx: &Context,
421 inode: <Self as FileSystem>::Inode,
422 handle: Option<<Self as FileSystem>::Handle>,
423 ) -> io::Result<(libc::stat64, Duration)> {
424 self.async_do_getattr(ctx, inode, handle).await
425 }
426
427 async fn async_setattr(
428 &self,
429 ctx: &Context,
430 inode: <Self as FileSystem>::Inode,
431 attr: libc::stat64,
432 handle: Option<<Self as FileSystem>::Handle>,
433 valid: SetattrValid,
434 ) -> io::Result<(libc::stat64, Duration)> {
435 unimplemented!()
436 /*
437 enum Data {
438 Handle(Arc<HandleData>, RawFd),
439 ProcPath(CString),
440 }
441
442 let inode_data = self.inode_map.get(inode)?;
443 let file = inode_data.async_get_file(&self.mount_fds).await?;
444 let data = if self.no_open.load(Ordering::Relaxed) {
445 let pathname = CString::new(format!("self/fd/{}", file.as_raw_fd()))
446 .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
447 Data::ProcPath(pathname)
448 } else {
449 // If we have a handle then use it otherwise get a new fd from the inode.
450 if let Some(handle) = handle {
451 let hd = self.handle_map.get(handle, inode)?;
452 let fd = hd.get_handle_raw_fd();
453 Data::Handle(hd, fd)
454 } else {
455 let pathname = CString::new(format!("self/fd/{}", file.as_raw_fd()))
456 .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
457 Data::ProcPath(pathname)
458 }
459 };
460
461 if valid.contains(SetattrValid::SIZE) && self.seal_size.load(Ordering::Relaxed) {
462 return Err(io::Error::from_raw_os_error(libc::EPERM));
463 }
464
465 if valid.contains(SetattrValid::MODE) {
466 // Safe because this doesn't modify any memory and we check the return value.
467 let res = unsafe {
468 match data {
469 Data::Handle(_, fd) => libc::fchmod(fd, attr.st_mode),
470 Data::ProcPath(ref p) => {
471 libc::fchmodat(self.proc_self_fd.as_raw_fd(), p.as_ptr(), attr.st_mode, 0)
472 }
473 }
474 };
475 if res < 0 {
476 return Err(io::Error::last_os_error());
477 }
478 }
479
480 if valid.intersects(SetattrValid::UID | SetattrValid::GID) {
481 let uid = if valid.contains(SetattrValid::UID) {
482 attr.st_uid
483 } else {
484 // Cannot use -1 here because these are unsigned values.
485 ::std::u32::MAX
486 };
487 let gid = if valid.contains(SetattrValid::GID) {
488 attr.st_gid
489 } else {
490 // Cannot use -1 here because these are unsigned values.
491 ::std::u32::MAX
492 };
493
494 // Safe because this is a constant value and a valid C string.
495 let empty = unsafe { CStr::from_bytes_with_nul_unchecked(EMPTY_CSTR) };
496
497 // Safe because this doesn't modify any memory and we check the return value.
498 let res = unsafe {
499 libc::fchownat(
500 file.as_raw_fd(),
501 empty.as_ptr(),
502 uid,
503 gid,
504 libc::AT_EMPTY_PATH | libc::AT_SYMLINK_NOFOLLOW,
505 )
506 };
507 if res < 0 {
508 return Err(io::Error::last_os_error());
509 }
510 }
511
512 if valid.contains(SetattrValid::SIZE) {
513 // Cap restored when _killpriv is dropped
514 let _killpriv = if self.killpriv_v2.load(Ordering::Relaxed)
515 && valid.contains(SetattrValid::KILL_SUIDGID)
516 {
517 self::drop_cap_fsetid()?
518 } else {
519 None
520 };
521
522 // Safe because this doesn't modify any memory and we check the return value.
523 let res = match data {
524 Data::Handle(_, fd) => unsafe { libc::ftruncate(fd, attr.st_size) },
525 Data::ProcPath(_) => {
526 // There is no `ftruncateat` so we need to get a new fd and truncate it.
527 let f = self
528 .async_open_inode(ctx, inode, libc::O_NONBLOCK | libc::O_RDWR)
529 .await?;
530 unsafe { libc::ftruncate(f.as_raw_fd(), attr.st_size) }
531 }
532 };
533 if res < 0 {
534 return Err(io::Error::last_os_error());
535 }
536 }
537
538 if valid.intersects(SetattrValid::ATIME | SetattrValid::MTIME) {
539 let mut tvs = [
540 libc::timespec {
541 tv_sec: 0,
542 tv_nsec: libc::UTIME_OMIT,
543 },
544 libc::timespec {
545 tv_sec: 0,
546 tv_nsec: libc::UTIME_OMIT,
547 },
548 ];
549
550 if valid.contains(SetattrValid::ATIME_NOW) {
551 tvs[0].tv_nsec = libc::UTIME_NOW;
552 } else if valid.contains(SetattrValid::ATIME) {
553 tvs[0].tv_sec = attr.st_atime;
554 tvs[0].tv_nsec = attr.st_atime_nsec;
555 }
556
557 if valid.contains(SetattrValid::MTIME_NOW) {
558 tvs[1].tv_nsec = libc::UTIME_NOW;
559 } else if valid.contains(SetattrValid::MTIME) {
560 tvs[1].tv_sec = attr.st_mtime;
561 tvs[1].tv_nsec = attr.st_mtime_nsec;
562 }
563
564 // Safe because this doesn't modify any memory and we check the return value.
565 let res = match data {
566 Data::Handle(_, fd) => unsafe { libc::futimens(fd, tvs.as_ptr()) },
567 Data::ProcPath(ref p) => unsafe {
568 libc::utimensat(self.proc_self_fd.as_raw_fd(), p.as_ptr(), tvs.as_ptr(), 0)
569 },
570 };
571 if res < 0 {
572 return Err(io::Error::last_os_error());
573 }
574 }
575
576 self.async_do_getattr(ctx, inode, handle).await
577 */
578 }
579
580 async fn async_open(
581 &self,
582 ctx: &Context,
583 inode: <Self as FileSystem>::Inode,
584 flags: u32,
585 fuse_flags: u32,
586 ) -> io::Result<(Option<<Self as FileSystem>::Handle>, OpenOptions)> {
587 unimplemented!()
588 /*
589 if self.no_open.load(Ordering::Relaxed) {
590 info!("fuse: open is not supported.");
591 Err(io::Error::from_raw_os_error(libc::ENOSYS))
592 } else {
593 self.async_do_open(ctx, inode, flags, fuse_flags).await
594 }
595 */
596 }
597
598 async fn async_create(
599 &self,
600 ctx: &Context,
601 parent: <Self as FileSystem>::Inode,
602 name: &CStr,
603 args: CreateIn,
604 ) -> io::Result<(Entry, Option<<Self as FileSystem>::Handle>, OpenOptions)> {
605 unimplemented!()
606 /*
607 self.validate_path_component(name)?;
608
609 let dir = self.inode_map.get(parent)?;
610 let dir_file = dir.async_get_file(&self.mount_fds).await?;
611
612 let new_file = {
613 let (_uid, _gid) = set_creds(ctx.uid, ctx.gid)?;
614
615 let flags = self.get_writeback_open_flags(args.flags as i32);
616 Self::create_file_excl(
617 dir_file.as_raw_fd(),
618 name,
619 flags,
620 args.mode & !(args.umask & 0o777),
621 )?
622 };
623
624 let entry = self.async_lookup(ctx, parent, name).await?;
625 let file = match new_file {
626 // File didn't exist, now created by create_file_excl()
627 Some(f) => f,
628 // File exists, and args.flags doesn't contain O_EXCL. Now let's open it with
629 // open_inode().
630 None => {
631 // Cap restored when _killpriv is dropped
632 let _killpriv = if self.killpriv_v2.load(Ordering::Relaxed)
633 && (args.fuse_flags & FOPEN_IN_KILL_SUIDGID != 0)
634 {
635 self::drop_cap_fsetid()?
636 } else {
637 None
638 };
639
640 let (_uid, _gid) = set_creds(ctx.uid, ctx.gid)?;
641 self.async_open_inode(ctx, entry.inode, args.flags as i32)
642 .await?
643 }
644 };
645
646 let ret_handle = if !self.no_open.load(Ordering::Relaxed) {
647 let handle = self.next_handle.fetch_add(1, Ordering::Relaxed);
648 let data = HandleData::new(entry.inode, file);
649
650 self.handle_map.insert(handle, data);
651 Some(handle)
652 } else {
653 None
654 };
655
656 let mut opts = OpenOptions::empty();
657 match self.cfg.cache_policy {
658 CachePolicy::Never => opts |= OpenOptions::DIRECT_IO,
659 CachePolicy::Always => opts |= OpenOptions::KEEP_CACHE,
660 _ => {}
661 };
662
663 Ok((entry, ret_handle, opts))
664 */
665 }
666
667 #[allow(clippy::too_many_arguments)]
668 async fn async_read(
669 &self,
670 ctx: &Context,
671 inode: <Self as FileSystem>::Inode,
672 handle: <Self as FileSystem>::Handle,
673 w: &mut (dyn AsyncZeroCopyWriter + Send),
674 size: u32,
675 offset: u64,
676 _lock_owner: Option<u64>,
677 _flags: u32,
678 ) -> io::Result<usize> {
679 unimplemented!()
680 /*
681 let data = self
682 .async_get_data(ctx, handle, inode, libc::O_RDONLY)
683 .await?;
684 let drive = ctx
685 .get_drive::<D>()
686 .ok_or_else(|| io::Error::from_raw_os_error(libc::EINVAL))?;
687
688 w.async_write_from(drive, data.get_handle_raw_fd(), size as usize, offset)
689 .await
690 */
691 }
692
693 #[allow(clippy::too_many_arguments)]
694 async fn async_write(
695 &self,
696 ctx: &Context,
697 inode: <Self as FileSystem>::Inode,
698 handle: <Self as FileSystem>::Handle,
699 r: &mut (dyn AsyncZeroCopyReader + Send),
700 size: u32,
701 offset: u64,
702 _lock_owner: Option<u64>,
703 _delayed_write: bool,
704 _flags: u32,
705 fuse_flags: u32,
706 ) -> io::Result<usize> {
707 unimplemented!()
708 /*
709 let data = self
710 .async_get_data(ctx, handle, inode, libc::O_RDWR)
711 .await?;
712
713 if self.seal_size.load(Ordering::Relaxed) {
714 let st = self
715 .async_stat_fd(cxt, data.get_handle_raw_fd(), None)
716 .await?;
717 self.seal_size_check(Opcode::Write, st.st_size as u64, offset, size as u64, 0)?;
718 }
719
720 // Fallback to sync io if KILLPRIV_V2 is enabled to work around a limitation of io_uring.
721 if self.killpriv_v2.load(Ordering::Relaxed) && (fuse_flags & WRITE_KILL_PRIV != 0) {
722 // Manually implement File::try_clone() by borrowing fd of data.file instead of dup().
723 // It's safe because the `data` variable's lifetime spans the whole function,
724 // so data.file won't be closed.
725 let f = unsafe { File::from_raw_fd(data.get_handle_raw_fd()) };
726 let mut f = ManuallyDrop::new(f);
727 // Cap restored when _killpriv is dropped
728 let _killpriv = self::drop_cap_fsetid()?;
729
730 r.read_to(&mut *f, size as usize, offset)
731 } else {
732 let drive = ctx
733 .get_drive::<D>()
734 .ok_or_else(|| io::Error::from_raw_os_error(libc::EINVAL))?;
735
736 r.async_read_to(drive, data.get_handle_raw_fd(), size as usize, offset)
737 .await
738 }
739 */
740 }
741
742 async fn async_fsync(
743 &self,
744 ctx: &Context,
745 inode: <Self as FileSystem>::Inode,
746 datasync: bool,
747 handle: <Self as FileSystem>::Handle,
748 ) -> io::Result<()> {
749 unimplemented!()
750 /*
751 let data = self
752 .async_get_data(ctx, handle, inode, libc::O_RDONLY)
753 .await?;
754 let drive = ctx
755 .get_drive::<D>()
756 .ok_or_else(|| io::Error::from_raw_os_error(libc::EINVAL))?;
757
758 AsyncUtil::fsync(drive, data.get_handle_raw_fd(), datasync).await
759 */
760 }
761
762 async fn async_fallocate(
763 &self,
764 ctx: &Context,
765 inode: <Self as FileSystem>::Inode,
766 handle: <Self as FileSystem>::Handle,
767 mode: u32,
768 offset: u64,
769 length: u64,
770 ) -> io::Result<()> {
771 unimplemented!()
772 /*
773 // Let the Arc<HandleData> in scope, otherwise fd may get invalid.
774 let data = self
775 .async_get_data(ctx, handle, inode, libc::O_RDWR)
776 .await?;
777 let drive = ctx
778 .get_drive::<D>()
779 .ok_or_else(|| io::Error::from_raw_os_error(libc::EINVAL))?;
780
781 if self.seal_size.load(Ordering::Relaxed) {
782 let st = self
783 .async_stat_fd(cxt, data.get_handle_raw_fd(), None)
784 .await?;
785 self.seal_size_check(
786 Opcode::Fallocate,
787 st.st_size as u64,
788 offset,
789 length,
790 mode as i32,
791 )?;
792 }
793
794 AsyncUtil::fallocate(drive, data.get_handle_raw_fd(), offset, length, mode).await
795 */
796 }
797
798 async fn async_fsyncdir(
799 &self,
800 ctx: &Context,
801 inode: <Self as FileSystem>::Inode,
802 datasync: bool,
803 handle: <Self as FileSystem>::Handle,
804 ) -> io::Result<()> {
805 self.async_fsync(ctx, inode, datasync, handle).await
806 }
807}