Skip to main content

btrfs_cli/inspect/
map_swapfile.rs

1use crate::{Format, Runnable};
2use anyhow::{Context, Result, bail};
3use btrfs_disk::items::{ChunkItem, FileExtentBody, FileExtentType};
4use btrfs_uapi::{
5    raw::{
6        BTRFS_BLOCK_GROUP_PROFILE_MASK, BTRFS_CHUNK_ITEM_KEY,
7        BTRFS_CHUNK_TREE_OBJECTID, BTRFS_EXTENT_DATA_KEY,
8        BTRFS_FIRST_CHUNK_TREE_OBJECTID,
9    },
10    tree_search::{SearchKey, tree_search},
11};
12use clap::Parser;
13use std::{
14    fs::File,
15    os::unix::io::{AsFd, AsRawFd},
16    path::PathBuf,
17};
18
19/// Print physical offset of first block and resume offset if file is
20/// suitable as swapfile.
21///
22/// All conditions of swapfile extents are verified if they could pass
23/// kernel tests. Use the value of resume offset for
24/// /sys/power/resume_offset, this depends on the page size that is
25/// detected on this system.
26#[derive(Parser, Debug)]
27#[allow(clippy::doc_markdown)]
28pub struct MapSwapfileCommand {
29    /// Print only the value of resume_offset
30    #[arg(short = 'r', long)]
31    resume_offset: bool,
32
33    /// Path to a file on the btrfs filesystem
34    path: PathBuf,
35}
36
37impl Runnable for MapSwapfileCommand {
38    fn run(&self, _format: Format, _dry_run: bool) -> Result<()> {
39        let file = File::open(&self.path).with_context(|| {
40            format!("cannot open '{}'", self.path.display())
41        })?;
42
43        validate_file(&file, &self.path)?;
44
45        let fd = file.as_fd();
46        let chunks = read_chunk_tree(fd)?;
47
48        let tree_id = btrfs_uapi::inode::lookup_path_rootid(fd)
49            .context("cannot lookup parent subvolume")?;
50
51        let stat = nix::sys::stat::fstat(&file).context("cannot fstat file")?;
52
53        let physical_start =
54            map_physical_start(fd, tree_id, stat.st_ino, &chunks)?;
55
56        #[allow(clippy::cast_sign_loss)] // sysconf returns positive page size
57        let page_size = unsafe { libc::sysconf(libc::_SC_PAGESIZE) } as u64;
58        if self.resume_offset {
59            println!("{}", physical_start / page_size);
60        } else {
61            println!("Physical start: {physical_start:12}");
62            println!("Resume offset:  {:12}", physical_start / page_size);
63        }
64
65        Ok(())
66    }
67}
68
69const FS_NOCOW_FL: libc::c_long = 0x0080_0000;
70const FS_COMPR_FL: libc::c_long = 0x0000_0004;
71
72/// Validate that the file is on btrfs, is a regular file, is NOCOW,
73/// and is not compressed.
74fn validate_file(file: &File, path: &std::path::Path) -> Result<()> {
75    let stfs = nix::sys::statfs::fstatfs(file)
76        .with_context(|| format!("cannot statfs '{}'", path.display()))?;
77    if stfs.filesystem_type() != nix::sys::statfs::BTRFS_SUPER_MAGIC {
78        bail!("not a file on btrfs");
79    }
80
81    let stat = nix::sys::stat::fstat(file)
82        .with_context(|| format!("cannot fstat '{}'", path.display()))?;
83    if stat.st_mode & libc::S_IFMT != libc::S_IFREG {
84        bail!("not a regular file");
85    }
86
87    let mut flags: libc::c_long = 0;
88    let ret = unsafe {
89        libc::ioctl(file.as_raw_fd(), libc::FS_IOC_GETFLAGS, &mut flags)
90    };
91    if ret == -1 {
92        bail!(
93            "cannot verify file flags: {}",
94            std::io::Error::last_os_error()
95        );
96    }
97    if flags & FS_NOCOW_FL == 0 {
98        bail!("file is not NOCOW");
99    }
100    if flags & FS_COMPR_FL != 0 {
101        bail!("file has COMPR attribute");
102    }
103
104    Ok(())
105}
106
107/// A parsed chunk from the chunk tree with stripe info.
108struct Chunk {
109    offset: u64,
110    length: u64,
111    stripe_len: u64,
112    type_flags: u64,
113    num_stripes: usize,
114    stripes: Vec<(u64, u64)>,
115}
116
117/// Read all chunks from the chunk tree via tree search.
118fn read_chunk_tree(fd: std::os::unix::io::BorrowedFd) -> Result<Vec<Chunk>> {
119    let mut chunks = Vec::new();
120
121    tree_search(
122        fd,
123        SearchKey::for_objectid_range(
124            u64::from(BTRFS_CHUNK_TREE_OBJECTID),
125            BTRFS_CHUNK_ITEM_KEY,
126            u64::from(BTRFS_FIRST_CHUNK_TREE_OBJECTID),
127            u64::from(BTRFS_FIRST_CHUNK_TREE_OBJECTID),
128        ),
129        |hdr, data| {
130            let Some(ci) = ChunkItem::parse(data) else {
131                return Ok(());
132            };
133            chunks.push(Chunk {
134                offset: hdr.offset,
135                length: ci.length,
136                stripe_len: ci.stripe_len,
137                type_flags: ci.chunk_type.bits(),
138                num_stripes: ci.num_stripes as usize,
139                stripes: ci
140                    .stripes
141                    .iter()
142                    .map(|s| (s.devid, s.offset))
143                    .collect(),
144            });
145            Ok(())
146        },
147    )
148    .context("failed to read chunk tree")?;
149
150    Ok(chunks)
151}
152
153/// Find the chunk containing `logical` via binary search.
154fn find_chunk(chunks: &[Chunk], logical: u64) -> Option<&Chunk> {
155    chunks
156        .binary_search_by(|c| {
157            if logical < c.offset {
158                std::cmp::Ordering::Greater
159            } else if logical >= c.offset + c.length {
160                std::cmp::Ordering::Less
161            } else {
162                std::cmp::Ordering::Equal
163            }
164        })
165        .ok()
166        .map(|i| &chunks[i])
167}
168
169/// A file extent parsed from the extent data tree search.
170struct FileExtent {
171    logical_offset: u64,
172    num_stripes: usize,
173    stripe_len: u64,
174    stripe_devid: u64,
175    stripe_physical: u64,
176    chunk_offset: u64,
177}
178
179/// Walk the extent data for a file and compute the physical start offset.
180fn map_physical_start(
181    fd: std::os::unix::io::BorrowedFd,
182    tree_id: u64,
183    ino: u64,
184    chunks: &[Chunk],
185) -> Result<u64> {
186    // Collect extents first, then validate (tree_search callback is nix::Result).
187    let mut extents: Vec<FileExtent> = Vec::new();
188    let mut error: Option<String> = None;
189
190    tree_search(
191        fd,
192        SearchKey {
193            tree_id,
194            min_objectid: ino,
195            max_objectid: ino,
196            min_type: BTRFS_EXTENT_DATA_KEY,
197            max_type: BTRFS_EXTENT_DATA_KEY,
198            min_offset: 0,
199            max_offset: u64::MAX,
200            min_transid: 0,
201            max_transid: u64::MAX,
202        },
203        |_hdr, data| {
204            if error.is_some() {
205                return Ok(());
206            }
207            let Some(fe) = btrfs_disk::items::FileExtentItem::parse(data)
208            else {
209                return Ok(());
210            };
211
212            if fe.extent_type != FileExtentType::Regular
213                && fe.extent_type != FileExtentType::Prealloc
214            {
215                error = Some(if fe.extent_type == FileExtentType::Inline {
216                    "file with inline extent".to_string()
217                } else {
218                    "unknown extent type".to_string()
219                });
220                return Ok(());
221            }
222
223            let FileExtentBody::Regular { disk_bytenr, .. } = &fe.body else {
224                return Ok(());
225            };
226            let logical_offset = *disk_bytenr;
227            if logical_offset == 0 {
228                error = Some("file with holes".to_string());
229                return Ok(());
230            }
231
232            if !matches!(
233                fe.compression,
234                btrfs_disk::items::CompressionType::None
235            ) {
236                error = Some("compressed extent".to_string());
237                return Ok(());
238            }
239
240            let Some(chunk) = find_chunk(chunks, logical_offset) else {
241                error = Some(format!(
242                    "cannot find chunk containing {logical_offset}"
243                ));
244                return Ok(());
245            };
246
247            if chunk.type_flags & u64::from(BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0
248            {
249                error = Some(format!(
250                    "unsupported block group profile: {:#x}",
251                    chunk.type_flags
252                        & u64::from(BTRFS_BLOCK_GROUP_PROFILE_MASK)
253                ));
254                return Ok(());
255            }
256
257            extents.push(FileExtent {
258                logical_offset,
259                num_stripes: chunk.num_stripes,
260                stripe_len: chunk.stripe_len,
261                stripe_devid: chunk.stripes[0].0,
262                stripe_physical: chunk.stripes[0].1,
263                chunk_offset: chunk.offset,
264            });
265
266            Ok(())
267        },
268    )
269    .context("failed to search extent data")?;
270
271    if let Some(err) = error {
272        bail!("{err}");
273    }
274    if extents.is_empty() {
275        bail!("file has no extents");
276    }
277
278    // Validate all extents are on the same device.
279    let first_devid = extents[0].stripe_devid;
280    for ext in &extents[1..] {
281        if ext.stripe_devid != first_devid {
282            bail!("file stored on multiple devices");
283        }
284    }
285
286    // Compute physical offset from the first extent.
287    let ext = &extents[0];
288    // For single profile (validated above), num_stripes == 1 and stripe_index
289    // is always 0. The general formula from the C reference simplifies to:
290    let offset = ext.logical_offset - ext.chunk_offset;
291    let stripe_nr = offset / ext.stripe_len;
292    let stripe_offset = offset - stripe_nr * ext.stripe_len;
293    let physical_start = ext.stripe_physical
294        + (stripe_nr / ext.num_stripes as u64) * ext.stripe_len
295        + stripe_offset;
296
297    Ok(physical_start)
298}