devicemapper/
thinpooldev.rs

1// This Source Code Form is subject to the terms of the Mozilla Public
2// License, v. 2.0. If a copy of the MPL was not distributed with this
3// file, You can obtain one at http://mozilla.org/MPL/2.0/.
4
5use std::{collections::hash_set::HashSet, fmt, path::PathBuf, str::FromStr};
6
7use crate::{
8    core::{DevId, Device, DeviceInfo, DmFlags, DmName, DmOptions, DmUuid, DM},
9    lineardev::{LinearDev, LinearDevTargetParams},
10    result::{DmError, DmResult, ErrorEnum},
11    shared::{
12        device_create, device_exists, device_match, get_status, get_status_line_fields,
13        make_unexpected_value_error, parse_device, parse_value, DmDevice, TargetLine, TargetParams,
14        TargetTable, TargetTypeBuf,
15    },
16    units::{DataBlocks, MetaBlocks, Sectors},
17};
18
19#[cfg(test)]
20use std::path::Path;
21
22#[cfg(test)]
23use crate::core::devnode_to_devno;
24
25const THINPOOL_TARGET_NAME: &str = "thin-pool";
26
27/// Struct representing params for a thin pool target
28#[derive(Clone, Debug, Eq, PartialEq)]
29pub struct ThinPoolTargetParams {
30    /// Thin pool metadata device
31    pub metadata_dev: Device,
32    /// Thin pool data device
33    pub data_dev: Device,
34    /// Block size for allocations within the thin pool
35    pub data_block_size: Sectors,
36    /// Amount of free space left at which to trigger the low water mark
37    pub low_water_mark: DataBlocks,
38    /// Feature arguments
39    pub feature_args: HashSet<String>,
40}
41
42impl ThinPoolTargetParams {
43    /// Create a new ThinPoolTargetParams struct
44    pub fn new(
45        metadata_dev: Device,
46        data_dev: Device,
47        data_block_size: Sectors,
48        low_water_mark: DataBlocks,
49        feature_args: Vec<String>,
50    ) -> ThinPoolTargetParams {
51        ThinPoolTargetParams {
52            metadata_dev,
53            data_dev,
54            data_block_size,
55            low_water_mark,
56            feature_args: feature_args.into_iter().collect::<HashSet<_>>(),
57        }
58    }
59}
60
61impl fmt::Display for ThinPoolTargetParams {
62    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
63        write!(f, "{} {}", THINPOOL_TARGET_NAME, self.param_str())
64    }
65}
66
67impl FromStr for ThinPoolTargetParams {
68    type Err = DmError;
69
70    fn from_str(s: &str) -> DmResult<ThinPoolTargetParams> {
71        let vals = s.split(' ').collect::<Vec<_>>();
72
73        if vals.len() < 5 {
74            let err_msg = format!(
75                "expected at least 5 values in params string \"{}\", found {}",
76                s,
77                vals.len()
78            );
79            return Err(DmError::Dm(ErrorEnum::Invalid, err_msg));
80        }
81
82        if vals[0] != THINPOOL_TARGET_NAME {
83            let err_msg = format!(
84                "Expected a thin-pool target entry but found target type {}",
85                vals[0]
86            );
87            return Err(DmError::Dm(ErrorEnum::Invalid, err_msg));
88        }
89
90        let metadata_dev = parse_device(vals[1], "metadata device for thinpool target")?;
91        let data_dev = parse_device(vals[2], "data device for thinpool target")?;
92
93        let data_block_size = Sectors(parse_value(vals[3], "data block size")?);
94        let low_water_mark = DataBlocks(parse_value(vals[4], "low water mark")?);
95
96        let feature_args = if vals.len() == 5 {
97            vec![]
98        } else {
99            vals[6..6 + parse_value::<usize>(vals[5], "number of feature args")?]
100                .iter()
101                .map(|x| (*x).to_string())
102                .collect()
103        };
104
105        Ok(ThinPoolTargetParams::new(
106            metadata_dev,
107            data_dev,
108            data_block_size,
109            low_water_mark,
110            feature_args,
111        ))
112    }
113}
114
115impl TargetParams for ThinPoolTargetParams {
116    fn param_str(&self) -> String {
117        let feature_args = if self.feature_args.is_empty() {
118            "0".to_owned()
119        } else {
120            format!(
121                "{} {}",
122                self.feature_args.len(),
123                self.feature_args
124                    .iter()
125                    .cloned()
126                    .collect::<Vec<_>>()
127                    .join(" ")
128            )
129        };
130
131        format!(
132            "{} {} {} {} {}",
133            self.metadata_dev,
134            self.data_dev,
135            *self.data_block_size,
136            *self.low_water_mark,
137            feature_args
138        )
139    }
140
141    fn target_type(&self) -> TargetTypeBuf {
142        TargetTypeBuf::new(THINPOOL_TARGET_NAME.into()).expect("THINPOOL_TARGET_NAME is valid")
143    }
144}
145
146/// A target table for a thin pool device.
147#[derive(Clone, Debug, Eq, PartialEq)]
148pub struct ThinPoolDevTargetTable {
149    /// The device's table
150    pub table: TargetLine<ThinPoolTargetParams>,
151}
152
153impl ThinPoolDevTargetTable {
154    /// Make a new ThinPoolDevTargetTable from a suitable vec
155    pub fn new(
156        start: Sectors,
157        length: Sectors,
158        params: ThinPoolTargetParams,
159    ) -> ThinPoolDevTargetTable {
160        ThinPoolDevTargetTable {
161            table: TargetLine::new(start, length, params),
162        }
163    }
164}
165
166impl fmt::Display for ThinPoolDevTargetTable {
167    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
168        let table = &self.table;
169        writeln!(f, "{} {} {}", *table.start, *table.length, table.params)
170    }
171}
172
173impl TargetTable for ThinPoolDevTargetTable {
174    fn from_raw_table(table: &[(u64, u64, String, String)]) -> DmResult<ThinPoolDevTargetTable> {
175        if table.len() != 1 {
176            let err_msg = format!(
177                "ThinPoolDev table should have exactly one line, has {} lines",
178                table.len()
179            );
180            return Err(DmError::Dm(ErrorEnum::Invalid, err_msg));
181        }
182        let line = table.first().expect("table.len() == 1");
183        Ok(ThinPoolDevTargetTable::new(
184            Sectors(line.0),
185            Sectors(line.1),
186            format!("{} {}", line.2, line.3).parse::<ThinPoolTargetParams>()?,
187        ))
188    }
189
190    fn to_raw_table(&self) -> Vec<(u64, u64, String, String)> {
191        to_raw_table_unique!(self)
192    }
193}
194
195/// DM construct to contain thin provisioned devices
196#[derive(Debug)]
197pub struct ThinPoolDev {
198    dev_info: Box<DeviceInfo>,
199    meta_dev: LinearDev,
200    data_dev: LinearDev,
201    table: ThinPoolDevTargetTable,
202}
203
204impl DmDevice<ThinPoolDevTargetTable> for ThinPoolDev {
205    fn device(&self) -> Device {
206        device!(self)
207    }
208
209    fn devnode(&self) -> PathBuf {
210        devnode!(self)
211    }
212
213    // This method is incomplete. It is expected that it will be refined so
214    // that it will return true in more cases, i.e., to be less stringent.
215    // In particular, two devices are equivalent even if their low water
216    // marks are different.
217    fn equivalent_tables(
218        left: &ThinPoolDevTargetTable,
219        right: &ThinPoolDevTargetTable,
220    ) -> DmResult<bool> {
221        let left = &left.table;
222        let right = &right.table;
223
224        Ok(left.start == right.start
225            && left.length == right.length
226            && left.params.metadata_dev == right.params.metadata_dev
227            && left.params.data_dev == right.params.data_dev
228            && left.params.data_block_size == right.params.data_block_size)
229    }
230
231    fn name(&self) -> &DmName {
232        name!(self)
233    }
234
235    fn size(&self) -> Sectors {
236        self.data_dev.size()
237    }
238
239    fn table(&self) -> &ThinPoolDevTargetTable {
240        table!(self)
241    }
242
243    fn teardown(&mut self, dm: &DM) -> DmResult<()> {
244        dm.device_remove(&DevId::Name(self.name()), DmOptions::default())?;
245        self.data_dev.teardown(dm)?;
246        self.meta_dev.teardown(dm)?;
247        Ok(())
248    }
249
250    fn uuid(&self) -> Option<&DmUuid> {
251        uuid!(self)
252    }
253}
254
255#[derive(Debug, Clone)]
256/// Contains values indicating the thinpool's used vs total
257/// allocations for metadata and data blocks.
258pub struct ThinPoolUsage {
259    /// The number of metadata blocks that are in use.
260    pub used_meta: MetaBlocks,
261    /// The total number of metadata blocks available to the thinpool.
262    pub total_meta: MetaBlocks,
263    /// The number of data blocks that are in use.
264    pub used_data: DataBlocks,
265    /// The total number of data blocks available to the thinpool.
266    pub total_data: DataBlocks,
267}
268
269#[derive(Clone, Copy, Debug, Eq, PartialEq)]
270/// Indicates if a working thinpool is working optimally, or is
271/// experiencing a non-fatal error condition.
272pub enum ThinPoolStatusSummary {
273    /// The pool is working normally.
274    Good,
275    /// The pool has been forced to transition to read-only mode.
276    ReadOnly,
277    /// The pool is out of space.
278    OutOfSpace,
279}
280
281/// Policy if no space on device
282#[derive(Clone, Copy, Debug, Eq, PartialEq)]
283pub enum ThinPoolNoSpacePolicy {
284    /// error the IO if no space on device
285    Error,
286    /// queue the IO if no space on device
287    Queue,
288}
289
290/// Status of a working thin pool, i.e, one that does not have status Fail
291#[derive(Debug, Clone)]
292pub struct ThinPoolWorkingStatus {
293    /// The transaction id.
294    pub transaction_id: u64,
295    /// A struct recording block usage for meta and data devices.
296    pub usage: ThinPoolUsage,
297    /// A single block value indicating the held metadata root
298    pub held_metadata_root: Option<MetaBlocks>,
299    /// discard_passdown/no_discard_passdown
300    pub discard_passdown: bool,
301    /// no space policy
302    pub no_space_policy: ThinPoolNoSpacePolicy,
303    /// A summary of some other status information.
304    pub summary: ThinPoolStatusSummary,
305    /// needs_check flag has been set in metadata superblock
306    pub needs_check: bool,
307    /// The lowater value for the metadata device in metablocks. This value
308    /// is set by the kernel. Available in kernel version 4.19 and later.
309    pub meta_low_water: Option<u64>,
310}
311
312impl ThinPoolWorkingStatus {
313    /// Make a new ThinPoolWorkingStatus struct
314    #[allow(clippy::too_many_arguments)]
315    pub fn new(
316        transaction_id: u64,
317        usage: ThinPoolUsage,
318        held_metadata_root: Option<MetaBlocks>,
319        discard_passdown: bool,
320        no_space_policy: ThinPoolNoSpacePolicy,
321        summary: ThinPoolStatusSummary,
322        needs_check: bool,
323        meta_low_water: Option<u64>,
324    ) -> ThinPoolWorkingStatus {
325        ThinPoolWorkingStatus {
326            transaction_id,
327            usage,
328            held_metadata_root,
329            discard_passdown,
330            no_space_policy,
331            summary,
332            needs_check,
333            meta_low_water,
334        }
335    }
336}
337
338#[derive(Debug, Clone)]
339/// Top-level thinpool status that indicates if it is working or failed.
340pub enum ThinPoolStatus {
341    /// The thinpool has not failed utterly.
342    Working(Box<ThinPoolWorkingStatus>),
343    /// Devicemapper has reported that it could not obtain the status
344    Error,
345    /// The thinpool is in a failed condition.
346    Fail,
347}
348
349impl FromStr for ThinPoolStatus {
350    type Err = DmError;
351
352    fn from_str(status_line: &str) -> DmResult<ThinPoolStatus> {
353        if status_line.starts_with("Error") {
354            return Ok(ThinPoolStatus::Error);
355        }
356
357        if status_line.starts_with("Fail") {
358            return Ok(ThinPoolStatus::Fail);
359        }
360
361        let status_vals = get_status_line_fields(status_line, 8)?;
362
363        let transaction_id = parse_value(status_vals[0], "transaction id")?;
364
365        let usage = {
366            let meta_vals = status_vals[1].split('/').collect::<Vec<_>>();
367            let data_vals = status_vals[2].split('/').collect::<Vec<_>>();
368            ThinPoolUsage {
369                used_meta: MetaBlocks(parse_value(meta_vals[0], "used meta")?),
370                total_meta: MetaBlocks(parse_value(meta_vals[1], "total meta")?),
371                used_data: DataBlocks(parse_value(data_vals[0], "used data")?),
372                total_data: DataBlocks(parse_value(data_vals[1], "total data")?),
373            }
374        };
375
376        let held_metadata_root = match status_vals[3] {
377            "-" => None,
378            val => Some(MetaBlocks(parse_value(val, "held metadata root")?)),
379        };
380
381        let summary = match status_vals[4] {
382            "rw" => ThinPoolStatusSummary::Good,
383            "ro" => ThinPoolStatusSummary::ReadOnly,
384            "out_of_data_space" => ThinPoolStatusSummary::OutOfSpace,
385            val => {
386                return Err(make_unexpected_value_error(5, val, "summary"));
387            }
388        };
389
390        let discard_passdown = match status_vals[5] {
391            "discard_passdown" => true,
392            "no_discard_passdown" => false,
393            val => {
394                return Err(make_unexpected_value_error(6, val, "discard passdown"));
395            }
396        };
397
398        let no_space_policy = match status_vals[6] {
399            "error_if_no_space" => ThinPoolNoSpacePolicy::Error,
400            "queue_if_no_space" => ThinPoolNoSpacePolicy::Queue,
401            val => {
402                return Err(make_unexpected_value_error(7, val, "no space policy"));
403            }
404        };
405
406        let needs_check = match status_vals[7] {
407            "-" => false,
408            "needs_check" => true,
409            val => {
410                return Err(make_unexpected_value_error(8, val, "needs check"));
411            }
412        };
413
414        let meta_low_water = status_vals
415            .get(8)
416            .and_then(|v| parse_value(v, "meta low water").ok());
417
418        Ok(ThinPoolStatus::Working(Box::new(
419            ThinPoolWorkingStatus::new(
420                transaction_id,
421                usage,
422                held_metadata_root,
423                discard_passdown,
424                no_space_policy,
425                summary,
426                needs_check,
427                meta_low_water,
428            ),
429        )))
430    }
431}
432
433/// Use DM to create a "thin-pool".  A "thin-pool" is shared space for
434/// other thin provisioned devices to use.
435///
436/// See section ["Setting up a fresh pool device"](https://docs.kernel.org/admin-guide/device-mapper/thin-provisioning.html#setting-up-a-fresh-pool-device)
437impl ThinPoolDev {
438    /// Construct a new `ThinPoolDev` with the given data and meta devs.
439    /// Returns an error if the device is already known to the kernel.
440    /// Returns an error if `data_block_size` is not within required range.
441    /// Precondition: the metadata device does not contain any pool metadata.
442    #[allow(clippy::too_many_arguments)]
443    pub fn new(
444        dm: &DM,
445        name: &DmName,
446        uuid: Option<&DmUuid>,
447        meta: LinearDev,
448        data: LinearDev,
449        data_block_size: Sectors,
450        low_water_mark: DataBlocks,
451        feature_args: Vec<String>,
452    ) -> DmResult<ThinPoolDev> {
453        if device_exists(dm, name)? {
454            let err_msg = format!("thinpooldev {name} already exists");
455            return Err(DmError::Dm(ErrorEnum::Invalid, err_msg));
456        }
457
458        let table =
459            ThinPoolDev::gen_table(&meta, &data, data_block_size, low_water_mark, feature_args);
460        let dev_info = device_create(dm, name, uuid, &table, DmOptions::private())?;
461
462        Ok(ThinPoolDev {
463            dev_info: Box::new(dev_info),
464            meta_dev: meta,
465            data_dev: data,
466            table,
467        })
468    }
469
470    /// Obtain the meta device that backs this thin pool device.
471    pub fn meta_dev(&self) -> &LinearDev {
472        &self.meta_dev
473    }
474
475    /// Obtain the data device that backs this thin pool device.
476    pub fn data_dev(&self) -> &LinearDev {
477        &self.data_dev
478    }
479
480    /// Obtain the data block size for this thin pool device.
481    pub fn data_block_size(&self) -> Sectors {
482        self.table.table.params.data_block_size
483    }
484
485    /// Set up a thin pool from the given metadata and data device.
486    /// Returns an error if data_block_size is not within required range.
487    /// Precondition: There is existing metadata for this thinpool device
488    /// on the metadata device. If the metadata is corrupted, subsequent
489    /// errors will result, so it is expected that the metadata is
490    /// well-formed and consistent with the data on the data device.
491    #[allow(clippy::too_many_arguments)]
492    pub fn setup(
493        dm: &DM,
494        name: &DmName,
495        uuid: Option<&DmUuid>,
496        meta: LinearDev,
497        data: LinearDev,
498        data_block_size: Sectors,
499        low_water_mark: DataBlocks,
500        feature_args: Vec<String>,
501    ) -> DmResult<ThinPoolDev> {
502        let table =
503            ThinPoolDev::gen_table(&meta, &data, data_block_size, low_water_mark, feature_args);
504        let dev = if device_exists(dm, name)? {
505            let dev_info = dm.device_info(&DevId::Name(name))?;
506            let dev = ThinPoolDev {
507                dev_info: Box::new(dev_info),
508                meta_dev: meta,
509                data_dev: data,
510                table,
511            };
512            device_match(dm, &dev, uuid)?;
513            dev
514        } else {
515            let dev_info = device_create(dm, name, uuid, &table, DmOptions::private())?;
516            ThinPoolDev {
517                dev_info: Box::new(dev_info),
518                meta_dev: meta,
519                data_dev: data,
520                table,
521            }
522        };
523        Ok(dev)
524    }
525
526    /// Generate a table to be passed to DM. The format of the table
527    /// entries is:
528    /// ```plain
529    /// <start sec (0)> <length> "thin-pool" <thin-pool-specific string>
530    /// ```
531    /// where the thin-pool-specific string has the format:
532    /// ```plain
533    /// <meta maj:min> <data maj:min> <block size> <low water mark>
534    /// ```
535    /// There is exactly one entry in the table.
536    fn gen_table(
537        meta: &LinearDev,
538        data: &LinearDev,
539        data_block_size: Sectors,
540        low_water_mark: DataBlocks,
541        feature_args: Vec<String>,
542    ) -> ThinPoolDevTargetTable {
543        ThinPoolDevTargetTable::new(
544            Sectors::default(),
545            data.size(),
546            ThinPoolTargetParams::new(
547                meta.device(),
548                data.device(),
549                data_block_size,
550                low_water_mark,
551                feature_args,
552            ),
553        )
554    }
555
556    /// Set the low water mark.
557    /// This action puts the device in a state where it is ready to be resumed.
558    pub fn set_low_water_mark(&mut self, dm: &DM, low_water_mark: DataBlocks) -> DmResult<()> {
559        let mut new_table = self.table.clone();
560        new_table.table.params.low_water_mark = low_water_mark;
561
562        self.suspend(dm, DmOptions::default().set_flags(DmFlags::DM_NOFLUSH))?;
563        self.table_load(dm, &new_table, DmOptions::default())?;
564
565        self.table = new_table;
566        Ok(())
567    }
568
569    /// Get the current status of the thinpool.
570    /// Returns an error if there was an error getting the status value.
571    pub fn status(&self, dm: &DM, options: DmOptions) -> DmResult<ThinPoolStatus> {
572        status!(self, dm, options)
573    }
574
575    /// Set the table for the existing metadata device.
576    /// This action puts the device in a state where it is ready to be resumed.
577    /// Warning: It is the client's responsibility to make sure the designated
578    /// table is compatible with the device's existing table.
579    /// If are not, this function will still succeed, but some kind of
580    /// data corruption will be the inevitable result.
581    pub fn set_meta_table(
582        &mut self,
583        dm: &DM,
584        table: Vec<TargetLine<LinearDevTargetParams>>,
585    ) -> DmResult<()> {
586        self.suspend(dm, DmOptions::default().set_flags(DmFlags::DM_NOFLUSH))?;
587        self.meta_dev.set_table(dm, table)?;
588        self.meta_dev.resume(dm)?;
589
590        // Reload the table even though it is unchanged.
591        // See comment on CacheDev::set_cache_table for reason.
592        self.table_load(dm, self.table(), DmOptions::default())?;
593
594        Ok(())
595    }
596
597    /// Set the data device's existing table.
598    /// This action puts the device in a state where it is ready to be resumed.
599    /// Warning: It is the client's responsibility to make sure the designated
600    /// table is compatible with the device's existing table.
601    /// If not, this function will still succeed, but some kind of
602    /// data corruption will be the inevitable result.
603    pub fn set_data_table(
604        &mut self,
605        dm: &DM,
606        table: Vec<TargetLine<LinearDevTargetParams>>,
607    ) -> DmResult<()> {
608        self.suspend(dm, DmOptions::default().set_flags(DmFlags::DM_NOFLUSH))?;
609
610        self.data_dev.set_table(dm, table)?;
611        self.data_dev.resume(dm)?;
612
613        let mut table = self.table.clone();
614        table.table.length = self.data_dev.size();
615        self.table_load(dm, &table, DmOptions::default())?;
616
617        self.table = table;
618
619        Ok(())
620    }
621
622    fn set_feature_arg(&mut self, feature_arg: &str, dm: &DM) -> DmResult<()> {
623        let mut table = self.table().clone();
624        if !table.table.params.feature_args.contains(feature_arg) {
625            table
626                .table
627                .params
628                .feature_args
629                .insert(feature_arg.to_string());
630
631            self.suspend(dm, DmOptions::default().set_flags(DmFlags::DM_NOFLUSH))?;
632            self.table_load(dm, &table, DmOptions::default())?;
633            self.table = table;
634
635            self.resume(dm)?;
636        }
637
638        Ok(())
639    }
640
641    fn unset_feature_arg(&mut self, feature_arg: &str, dm: &DM) -> DmResult<()> {
642        let mut table = self.table().clone();
643        if table.table.params.feature_args.contains(feature_arg) {
644            table.table.params.feature_args.remove(feature_arg);
645
646            self.suspend(dm, DmOptions::default().set_flags(DmFlags::DM_NOFLUSH))?;
647            self.table_load(dm, &table, DmOptions::default())?;
648            self.table = table;
649
650            self.resume(dm)?;
651        }
652
653        Ok(())
654    }
655
656    /// Default behavior for devicemapper thin pools is to queue requests if
657    /// the thin pool is out of space to allow time for the thin pool to extend.
658    /// This behavior can be changed by adding the feature argument
659    /// `error_if_no_space` to the devicemapper table.
660    ///
661    /// This method will add `error_if_no_space` from the devicemapper table
662    /// if it is not present.
663    pub fn error_if_no_space(&mut self, dm: &DM) -> DmResult<()> {
664        self.set_feature_arg("error_if_no_space", dm)
665    }
666
667    /// Default behavior for devicemapper thin pools is to queue requests if
668    /// the thin pool is out of space to allow time for the thin pool to extend.
669    /// This behavior can be changed by adding the feature argument
670    /// `error_if_no_space` to the devicemapper table.
671    ///
672    /// This method will remove `error_if_no_space` from the devicemapper table
673    /// if it is present.
674    pub fn queue_if_no_space(&mut self, dm: &DM) -> DmResult<()> {
675        self.unset_feature_arg("error_if_no_space", dm)
676    }
677
678    /// Default behavior for devicemapper thin pools is to zero newly allocated
679    /// data blocks. This behavior can be changed by adding the feature argument
680    /// `skip_block_zeroing` to the devicemapper table.
681    ///
682    /// This method will add `skip_block_zeroing` from the devicemapper table
683    /// if it is not present.
684    pub fn skip_block_zeroing(&mut self, dm: &DM) -> DmResult<()> {
685        self.set_feature_arg("skip_block_zeroing", dm)
686    }
687
688    /// Default behavior for devicemapper thin pools is to zero newly allocated
689    /// data blocks. This behavior can be changed by adding the feature argument
690    /// `skip_block_zeroing` to the devicemapper table.
691    ///
692    /// This method will remove `skip_block_zeroing` from the devicemapper table
693    /// if it is present.
694    pub fn require_block_zeroing(&mut self, dm: &DM) -> DmResult<()> {
695        self.unset_feature_arg("skip_block_zeroing", dm)
696    }
697
698    /// Default behavior for devicemapper thin pools is to pass down discards.
699    /// This behavior can be changed by adding the feature argument
700    /// `no_discard_passdown` to the devicemapper table.
701    ///
702    /// This method will add `no_discard_passdown` to the devicemapper table
703    /// if it is not present.
704    pub fn no_discard_passdown(&mut self, dm: &DM) -> DmResult<()> {
705        self.set_feature_arg("no_discard_passdown", dm)
706    }
707
708    /// Default behavior for devicemapper thin pools is to pass down discards.
709    /// This behavior can be changed by adding the feature argument
710    /// `no_discard_passdown` to the devicemapper table.
711    ///
712    /// This method will remove `no_discard_passdown` from the devicemapper
713    /// table if it is present.
714    pub fn discard_passdown(&mut self, dm: &DM) -> DmResult<()> {
715        self.unset_feature_arg("no_discard_passdown", dm)
716    }
717}
718
719#[cfg(test)]
720use std::fs::OpenOptions;
721
722#[cfg(test)]
723use crate::{
724    consts::IEC,
725    lineardev::LinearTargetParams,
726    testing::{blkdev_size, test_name},
727};
728
729/// Values are explicitly stated in the device-mapper kernel documentation.
730#[cfg(test)]
731const MIN_DATA_BLOCK_SIZE: Sectors = Sectors(128); // 64 KiB
732#[cfg(test)]
733#[allow(dead_code)]
734const MAX_DATA_BLOCK_SIZE: Sectors = Sectors(2 * IEC::Mi); // 1 GiB
735#[cfg(test)]
736const MIN_RECOMMENDED_METADATA_SIZE: Sectors = Sectors(4 * IEC::Ki); // 2 MiB
737#[cfg(test)]
738#[allow(dead_code)]
739// Note that this value is stated in the kernel docs to be 16 GiB, but the
740// devicemapper source gives a different value for THIN_METADATA_MAX_SECTORS,
741// which is the actual maximum size.
742const MAX_METADATA_SIZE: MetaBlocks = MetaBlocks(255 * ((1 << 14) - 64));
743
744#[cfg(test)]
745/// Generate a minimal thinpool dev. Use all the space available not consumed
746/// by the metadata device for the data device.
747pub fn minimal_thinpool(dm: &DM, path: &Path) -> ThinPoolDev {
748    let dev_size = blkdev_size(&OpenOptions::new().read(true).open(path).unwrap()).sectors();
749    let dev = Device::from(devnode_to_devno(path).unwrap().unwrap());
750    let meta_params = LinearTargetParams::new(dev, Sectors(0));
751    let meta_table = vec![TargetLine::new(
752        Sectors(0),
753        MIN_RECOMMENDED_METADATA_SIZE,
754        LinearDevTargetParams::Linear(meta_params),
755    )];
756    let meta = LinearDev::setup(
757        dm,
758        &test_name("meta").expect("valid format"),
759        None,
760        meta_table,
761    )
762    .unwrap();
763
764    let data_params = LinearTargetParams::new(dev, MIN_RECOMMENDED_METADATA_SIZE);
765    let data_table = vec![TargetLine::new(
766        Sectors(0),
767        dev_size - MIN_RECOMMENDED_METADATA_SIZE,
768        LinearDevTargetParams::Linear(data_params),
769    )];
770    let data = LinearDev::setup(
771        dm,
772        &test_name("data").expect("valid format"),
773        None,
774        data_table,
775    )
776    .unwrap();
777
778    ThinPoolDev::new(
779        dm,
780        &test_name("pool").expect("valid format"),
781        None,
782        meta,
783        data,
784        MIN_DATA_BLOCK_SIZE,
785        DataBlocks(1),
786        vec![
787            "no_discard_passdown".to_owned(),
788            "skip_block_zeroing".to_owned(),
789        ],
790    )
791    .unwrap()
792}
793
794#[cfg(test)]
795mod tests {
796    use std::path::Path;
797
798    use crate::{
799        core::{errors::Error, DmFlags},
800        testing::{test_name, test_with_spec},
801    };
802
803    use super::*;
804
805    /// Verify success when constructing a new ThinPoolDev with minimum values
806    /// for data block size and metadata device. Check that the status of the
807    /// device is as expected.
808    fn test_minimum_values(paths: &[&Path]) {
809        assert!(!paths.is_empty());
810
811        let dm = DM::new().unwrap();
812        let mut tp = minimal_thinpool(&dm, paths[0]);
813        match tp.status(&dm, DmOptions::default()).unwrap() {
814            ThinPoolStatus::Working(ref status)
815                if status.summary == ThinPoolStatusSummary::Good =>
816            {
817                assert!(!status.discard_passdown);
818                assert_eq!(status.held_metadata_root, None);
819
820                let usage = &status.usage;
821                // Even an empty thinpool requires some metadata.
822                assert!(usage.used_meta > MetaBlocks(0));
823                assert_eq!(usage.total_meta, tp.meta_dev().size().metablocks());
824                assert_eq!(usage.used_data, DataBlocks(0));
825                assert_eq!(
826                    usage.total_data,
827                    DataBlocks(tp.data_dev().size() / tp.data_block_size())
828                );
829            }
830            status => panic!("unexpected thinpool status: {status:?}"),
831        }
832
833        let table = ThinPoolDev::read_kernel_table(&dm, &DevId::Name(tp.name()))
834            .unwrap()
835            .table;
836        let params = &table.params;
837        assert_eq!(params.metadata_dev, tp.meta_dev().device());
838        assert_eq!(params.data_dev, tp.data_dev().device());
839
840        tp.teardown(&dm).unwrap();
841    }
842
843    #[test]
844    fn loop_test_basic() {
845        test_with_spec(1, test_minimum_values);
846    }
847
848    /// Verify that data block size less than minimum results in a failure.
849    fn test_low_data_block_size(paths: &[&Path]) {
850        assert!(!paths.is_empty());
851        let dev = Device::from(devnode_to_devno(paths[0]).unwrap().unwrap());
852
853        let dm = DM::new().unwrap();
854
855        let meta_name = test_name("meta").expect("valid format");
856        let meta_params = LinearTargetParams::new(dev, Sectors(0));
857        let meta_table = vec![TargetLine::new(
858            Sectors(0),
859            MIN_RECOMMENDED_METADATA_SIZE,
860            LinearDevTargetParams::Linear(meta_params),
861        )];
862        let meta = LinearDev::setup(&dm, &meta_name, None, meta_table).unwrap();
863
864        let data_name = test_name("data").expect("valid format");
865        let data_params = LinearTargetParams::new(dev, MIN_RECOMMENDED_METADATA_SIZE);
866        let data_table = vec![TargetLine::new(
867            Sectors(0),
868            512u64 * MIN_DATA_BLOCK_SIZE,
869            LinearDevTargetParams::Linear(data_params),
870        )];
871        let data = LinearDev::setup(&dm, &data_name, None, data_table).unwrap();
872
873        assert_matches!(
874            ThinPoolDev::new(
875                &dm,
876                &test_name("pool").expect("valid format"),
877                None,
878                meta,
879                data,
880                MIN_DATA_BLOCK_SIZE / 2u64,
881                DataBlocks(1),
882                vec![
883                    "no_discard_passdown".to_owned(),
884                    "skip_block_zeroing".to_owned()
885                ],
886            ),
887            Err(DmError::Core(Error::Ioctl(_, _, _, _)))
888        );
889        dm.device_remove(&DevId::Name(&meta_name), DmOptions::default())
890            .unwrap();
891        dm.device_remove(&DevId::Name(&data_name), DmOptions::default())
892            .unwrap();
893    }
894
895    #[test]
896    fn loop_test_low_data_block_size() {
897        test_with_spec(1, test_low_data_block_size);
898    }
899
900    /// Verify that setting the data table does not fail and results in
901    /// the correct size data device.
902    fn test_set_data(paths: &[&Path]) {
903        assert!(paths.len() > 1);
904
905        let dm = DM::new().unwrap();
906        let mut tp = minimal_thinpool(&dm, paths[0]);
907
908        let mut data_table = tp.data_dev.table().table.clone();
909        let data_size = tp.data_dev.size();
910
911        let dev2 = Device::from(devnode_to_devno(paths[1]).unwrap().unwrap());
912        let data_params = LinearTargetParams::new(dev2, Sectors(0));
913        data_table.push(TargetLine::new(
914            data_size,
915            data_size,
916            LinearDevTargetParams::Linear(data_params),
917        ));
918        tp.set_data_table(&dm, data_table).unwrap();
919        tp.resume(&dm).unwrap();
920
921        match tp.status(&dm, DmOptions::default()).unwrap() {
922            ThinPoolStatus::Working(ref status) => {
923                let usage = &status.usage;
924                assert_eq!(
925                    *usage.total_data * tp.table().table.params.data_block_size,
926                    2u8 * data_size
927                );
928            }
929            ThinPoolStatus::Error => panic!("devicemapper could not obtain thin pool status"),
930            ThinPoolStatus::Fail => panic!("thin pool should not have failed"),
931        }
932
933        tp.teardown(&dm).unwrap();
934    }
935
936    #[test]
937    fn loop_test_set_data() {
938        test_with_spec(2, test_set_data);
939    }
940
941    /// Verify that setting the meta table does not fail and results in
942    /// the correct size meta device.
943    fn test_set_meta(paths: &[&Path]) {
944        assert!(paths.len() > 1);
945
946        let dm = DM::new().unwrap();
947        let mut tp = minimal_thinpool(&dm, paths[0]);
948
949        let mut meta_table = tp.meta_dev.table().table.clone();
950        let meta_size = tp.meta_dev.size();
951
952        let dev2 = Device::from(devnode_to_devno(paths[1]).unwrap().unwrap());
953        let meta_params = LinearTargetParams::new(dev2, Sectors(0));
954        meta_table.push(TargetLine::new(
955            meta_size,
956            meta_size,
957            LinearDevTargetParams::Linear(meta_params),
958        ));
959        tp.set_meta_table(&dm, meta_table).unwrap();
960        tp.resume(&dm).unwrap();
961
962        match tp.status(&dm, DmOptions::default()).unwrap() {
963            ThinPoolStatus::Working(ref status) => {
964                let usage = &status.usage;
965                assert_eq!(usage.total_meta.sectors(), 2u8 * meta_size);
966            }
967            ThinPoolStatus::Error => panic!("devicemapper could not obtain thin pool status"),
968            ThinPoolStatus::Fail => panic!("thin pool should not have failed"),
969        }
970
971        tp.teardown(&dm).unwrap();
972    }
973
974    #[test]
975    fn loop_test_set_meta() {
976        test_with_spec(2, test_set_meta);
977    }
978
979    /// Just test that suspending and resuming a thinpool has no errors.
980    fn test_suspend(paths: &[&Path]) {
981        assert!(!paths.is_empty());
982
983        let dm = DM::new().unwrap();
984        let mut tp = minimal_thinpool(&dm, paths[0]);
985        tp.suspend(&dm, DmOptions::default().set_flags(DmFlags::DM_NOFLUSH))
986            .unwrap();
987        tp.suspend(&dm, DmOptions::default().set_flags(DmFlags::DM_NOFLUSH))
988            .unwrap();
989        tp.resume(&dm).unwrap();
990        tp.resume(&dm).unwrap();
991        tp.teardown(&dm).unwrap();
992    }
993
994    #[test]
995    fn loop_test_suspend() {
996        test_with_spec(1, test_suspend);
997    }
998
999    fn test_status_noflush(paths: &[&Path]) {
1000        assert!(!paths.is_empty());
1001
1002        let dm = DM::new().unwrap();
1003        let tp = minimal_thinpool(&dm, paths[0]);
1004
1005        tp.status(&dm, DmOptions::default().set_flags(DmFlags::DM_NOFLUSH))
1006            .unwrap();
1007    }
1008
1009    #[test]
1010    fn loop_test_status_noflush() {
1011        test_with_spec(1, test_status_noflush);
1012    }
1013
1014    #[test]
1015    fn test_thinpool_target_params_zero() {
1016        let result = "thin-pool 42:42 42:43 16 2 0"
1017            .parse::<ThinPoolTargetParams>()
1018            .unwrap();
1019        assert_eq!(result.feature_args, HashSet::new());
1020    }
1021
1022    #[test]
1023    fn test_thinpool_target_params_none() {
1024        let result = "thin-pool 42:42 42:43 16 2"
1025            .parse::<ThinPoolTargetParams>()
1026            .unwrap();
1027        assert_eq!(result.feature_args, HashSet::new());
1028    }
1029}