Skip to main content

btrfs_cli/inspect/
map_swapfile.rs

1use crate::{RunContext, Runnable};
2use anyhow::{Context, Result, bail};
3use btrfs_disk::items::{ChunkItem, FileExtentBody, FileExtentType};
4use btrfs_uapi::{
5    raw::{
6        BTRFS_BLOCK_GROUP_PROFILE_MASK, BTRFS_CHUNK_ITEM_KEY,
7        BTRFS_CHUNK_TREE_OBJECTID, BTRFS_EXTENT_DATA_KEY,
8        BTRFS_FIRST_CHUNK_TREE_OBJECTID,
9    },
10    tree_search::{Key, SearchFilter, tree_search},
11};
12use clap::Parser;
13use std::{
14    fs::File,
15    os::unix::io::{AsFd, AsRawFd},
16    path::PathBuf,
17};
18
19/// Print physical offset of first block and resume offset if file is
20/// suitable as swapfile.
21///
22/// All conditions of swapfile extents are verified if they could pass
23/// kernel tests. Use the value of resume offset for
24/// /sys/power/resume_offset, this depends on the page size that is
25/// detected on this system.
26#[derive(Parser, Debug)]
27#[allow(clippy::doc_markdown)]
28pub struct MapSwapfileCommand {
29    /// Print only the value of resume_offset
30    #[arg(short = 'r', long)]
31    resume_offset: bool,
32
33    /// Path to a file on the btrfs filesystem
34    path: PathBuf,
35}
36
37impl Runnable for MapSwapfileCommand {
38    fn run(&self, _ctx: &RunContext) -> Result<()> {
39        let file = File::open(&self.path).with_context(|| {
40            format!("cannot open '{}'", self.path.display())
41        })?;
42
43        validate_file(&file, &self.path)?;
44
45        let fd = file.as_fd();
46        let chunks = read_chunk_tree(fd)?;
47
48        let tree_id = btrfs_uapi::inode::lookup_path_rootid(fd)
49            .context("cannot lookup parent subvolume")?;
50
51        let stat = nix::sys::stat::fstat(&file).context("cannot fstat file")?;
52
53        let physical_start =
54            map_physical_start(fd, tree_id, stat.st_ino, &chunks)?;
55
56        #[allow(clippy::cast_sign_loss)] // sysconf returns positive page size
57        let page_size = unsafe { libc::sysconf(libc::_SC_PAGESIZE) } as u64;
58        if self.resume_offset {
59            println!("{}", physical_start / page_size);
60        } else {
61            println!("Physical start: {physical_start:12}");
62            println!("Resume offset:  {:12}", physical_start / page_size);
63        }
64
65        Ok(())
66    }
67}
68
69const FS_NOCOW_FL: libc::c_long = 0x0080_0000;
70const FS_COMPR_FL: libc::c_long = 0x0000_0004;
71
72/// Validate that the file is on btrfs, is a regular file, is NOCOW,
73/// and is not compressed.
74fn validate_file(file: &File, path: &std::path::Path) -> Result<()> {
75    let stfs = nix::sys::statfs::fstatfs(file)
76        .with_context(|| format!("cannot statfs '{}'", path.display()))?;
77    if stfs.filesystem_type() != nix::sys::statfs::BTRFS_SUPER_MAGIC {
78        bail!("not a file on btrfs");
79    }
80
81    let stat = nix::sys::stat::fstat(file)
82        .with_context(|| format!("cannot fstat '{}'", path.display()))?;
83    if stat.st_mode & libc::S_IFMT != libc::S_IFREG {
84        bail!("not a regular file");
85    }
86
87    let mut flags: libc::c_long = 0;
88    let ret = unsafe {
89        libc::ioctl(file.as_raw_fd(), libc::FS_IOC_GETFLAGS, &mut flags)
90    };
91    if ret == -1 {
92        bail!(
93            "cannot verify file flags: {}",
94            std::io::Error::last_os_error()
95        );
96    }
97    if flags & FS_NOCOW_FL == 0 {
98        bail!("file is not NOCOW");
99    }
100    if flags & FS_COMPR_FL != 0 {
101        bail!("file has COMPR attribute");
102    }
103
104    Ok(())
105}
106
107/// A parsed chunk from the chunk tree with stripe info.
108struct Chunk {
109    offset: u64,
110    length: u64,
111    stripe_len: u64,
112    type_flags: u64,
113    num_stripes: usize,
114    stripes: Vec<(u64, u64)>,
115}
116
117/// Read all chunks from the chunk tree via tree search.
118fn read_chunk_tree(fd: std::os::unix::io::BorrowedFd) -> Result<Vec<Chunk>> {
119    let mut chunks = Vec::new();
120
121    tree_search(
122        fd,
123        SearchFilter::for_objectid_range(
124            u64::from(BTRFS_CHUNK_TREE_OBJECTID),
125            BTRFS_CHUNK_ITEM_KEY,
126            u64::from(BTRFS_FIRST_CHUNK_TREE_OBJECTID),
127            u64::from(BTRFS_FIRST_CHUNK_TREE_OBJECTID),
128        ),
129        |hdr, data| {
130            let Some(ci) = ChunkItem::parse(data) else {
131                return Ok(());
132            };
133            chunks.push(Chunk {
134                offset: hdr.offset,
135                length: ci.length,
136                stripe_len: ci.stripe_len,
137                type_flags: ci.chunk_type.bits(),
138                num_stripes: ci.num_stripes as usize,
139                stripes: ci
140                    .stripes
141                    .iter()
142                    .map(|s| (s.devid, s.offset))
143                    .collect(),
144            });
145            Ok(())
146        },
147    )
148    .context("failed to read chunk tree")?;
149
150    Ok(chunks)
151}
152
153/// Find the chunk containing `logical` via binary search.
154fn find_chunk(chunks: &[Chunk], logical: u64) -> Option<&Chunk> {
155    chunks
156        .binary_search_by(|c| {
157            if logical < c.offset {
158                std::cmp::Ordering::Greater
159            } else if logical >= c.offset + c.length {
160                std::cmp::Ordering::Less
161            } else {
162                std::cmp::Ordering::Equal
163            }
164        })
165        .ok()
166        .map(|i| &chunks[i])
167}
168
169/// A file extent parsed from the extent data tree search.
170struct FileExtent {
171    logical_offset: u64,
172    num_stripes: usize,
173    stripe_len: u64,
174    stripe_devid: u64,
175    stripe_physical: u64,
176    chunk_offset: u64,
177}
178
179/// Walk the extent data for a file and compute the physical start offset.
180fn map_physical_start(
181    fd: std::os::unix::io::BorrowedFd,
182    tree_id: u64,
183    ino: u64,
184    chunks: &[Chunk],
185) -> Result<u64> {
186    // Collect extents first, then validate (tree_search callback is nix::Result).
187    let mut extents: Vec<FileExtent> = Vec::new();
188    let mut error: Option<String> = None;
189
190    tree_search(
191        fd,
192        SearchFilter {
193            tree_id,
194            start: Key {
195                objectid: ino,
196                item_type: BTRFS_EXTENT_DATA_KEY,
197                offset: 0,
198            },
199            end: Key {
200                objectid: ino,
201                item_type: BTRFS_EXTENT_DATA_KEY,
202                offset: u64::MAX,
203            },
204            min_transid: 0,
205            max_transid: u64::MAX,
206        },
207        |_hdr, data| {
208            if error.is_some() {
209                return Ok(());
210            }
211            let Some(fe) = btrfs_disk::items::FileExtentItem::parse(data)
212            else {
213                return Ok(());
214            };
215
216            if fe.extent_type != FileExtentType::Regular
217                && fe.extent_type != FileExtentType::Prealloc
218            {
219                error = Some(if fe.extent_type == FileExtentType::Inline {
220                    "file with inline extent".to_string()
221                } else {
222                    "unknown extent type".to_string()
223                });
224                return Ok(());
225            }
226
227            let FileExtentBody::Regular { disk_bytenr, .. } = &fe.body else {
228                return Ok(());
229            };
230            let logical_offset = *disk_bytenr;
231            if logical_offset == 0 {
232                error = Some("file with holes".to_string());
233                return Ok(());
234            }
235
236            if !matches!(
237                fe.compression,
238                btrfs_disk::items::CompressionType::None
239            ) {
240                error = Some("compressed extent".to_string());
241                return Ok(());
242            }
243
244            let Some(chunk) = find_chunk(chunks, logical_offset) else {
245                error = Some(format!(
246                    "cannot find chunk containing {logical_offset}"
247                ));
248                return Ok(());
249            };
250
251            if chunk.type_flags & u64::from(BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0
252            {
253                error = Some(format!(
254                    "unsupported block group profile: {:#x}",
255                    chunk.type_flags
256                        & u64::from(BTRFS_BLOCK_GROUP_PROFILE_MASK)
257                ));
258                return Ok(());
259            }
260
261            extents.push(FileExtent {
262                logical_offset,
263                num_stripes: chunk.num_stripes,
264                stripe_len: chunk.stripe_len,
265                stripe_devid: chunk.stripes[0].0,
266                stripe_physical: chunk.stripes[0].1,
267                chunk_offset: chunk.offset,
268            });
269
270            Ok(())
271        },
272    )
273    .context("failed to search extent data")?;
274
275    if let Some(err) = error {
276        bail!("{err}");
277    }
278    if extents.is_empty() {
279        bail!("file has no extents");
280    }
281
282    // Validate all extents are on the same device.
283    let first_devid = extents[0].stripe_devid;
284    for ext in &extents[1..] {
285        if ext.stripe_devid != first_devid {
286            bail!("file stored on multiple devices");
287        }
288    }
289
290    // Compute physical offset from the first extent.
291    let ext = &extents[0];
292    // For single profile (validated above), num_stripes == 1 and stripe_index
293    // is always 0. The general formula from the C reference simplifies to:
294    let offset = ext.logical_offset - ext.chunk_offset;
295    let stripe_nr = offset / ext.stripe_len;
296    let stripe_offset = offset - stripe_nr * ext.stripe_len;
297    let physical_start = ext.stripe_physical
298        + (stripe_nr / ext.num_stripes as u64) * ext.stripe_len
299        + stripe_offset;
300
301    Ok(physical_start)
302}