devicemapper/
cachedev.rs

1// This Source Code Form is subject to the terms of the Mozilla Public
2// License, v. 2.0. If a copy of the MPL was not distributed with this
3// file, You can obtain one at http://mozilla.org/MPL/2.0/.
4
5use std::{
6    collections::{HashMap, HashSet},
7    fmt,
8    path::PathBuf,
9    str::FromStr,
10};
11
12use crate::{
13    consts::IEC,
14    core::{DevId, Device, DeviceInfo, DmName, DmOptions, DmUuid, DM},
15    lineardev::{LinearDev, LinearDevTargetParams},
16    result::{DmError, DmResult, ErrorEnum},
17    shared::{
18        device_create, device_exists, device_match, get_status, get_status_line_fields,
19        make_unexpected_value_error, parse_device, parse_value, DmDevice, TargetLine, TargetParams,
20        TargetTable, TargetTypeBuf,
21    },
22    units::{DataBlocks, MetaBlocks, Sectors},
23};
24
25// Specified in kernel docs
26/// The minimum size recommended in the docs for a cache block.
27pub const MIN_CACHE_BLOCK_SIZE: Sectors = Sectors(64); // 32 KiB
28/// The maximum size recommended in the docs for a cache block.
29pub const MAX_CACHE_BLOCK_SIZE: Sectors = Sectors(2 * IEC::Mi); // 1 GiB
30
31const CACHE_TARGET_NAME: &str = "cache";
32
33/// Struct representing params for a cache target
34#[derive(Clone, Debug, Eq, PartialEq)]
35pub struct CacheTargetParams {
36    /// Cache metadata device
37    pub meta: Device,
38    /// Cache device
39    pub cache: Device,
40    /// Origin device with data to be cached
41    pub origin: Device,
42    /// Cache block size
43    pub cache_block_size: Sectors,
44    /// Feature arguments
45    pub feature_args: HashSet<String>,
46    /// IO policy
47    pub policy: String,
48    /// IO policy arguments
49    pub policy_args: HashMap<String, String>,
50}
51
52impl CacheTargetParams {
53    /// Create a new CacheTargetParams struct
54    pub fn new(
55        meta: Device,
56        cache: Device,
57        origin: Device,
58        cache_block_size: Sectors,
59        feature_args: Vec<String>,
60        policy: String,
61        policy_args: Vec<(String, String)>,
62    ) -> CacheTargetParams {
63        CacheTargetParams {
64            meta,
65            cache,
66            origin,
67            cache_block_size,
68            feature_args: feature_args.into_iter().collect::<HashSet<_>>(),
69            policy,
70            policy_args: policy_args.into_iter().collect::<HashMap<_, _>>(),
71        }
72    }
73}
74
75impl fmt::Display for CacheTargetParams {
76    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
77        write!(f, "{} {}", CACHE_TARGET_NAME, self.param_str())
78    }
79}
80
81impl FromStr for CacheTargetParams {
82    type Err = DmError;
83
84    fn from_str(s: &str) -> DmResult<CacheTargetParams> {
85        let vals = s.split(' ').collect::<Vec<_>>();
86
87        if vals.len() < 8 {
88            let err_msg = format!(
89                "expected at least 8 values in params string \"{}\", found {}",
90                s,
91                vals.len()
92            );
93            return Err(DmError::Dm(ErrorEnum::Invalid, err_msg));
94        }
95
96        if vals[0] != CACHE_TARGET_NAME {
97            let err_msg = format!(
98                "Expected a cache target entry but found target type {}",
99                vals[0]
100            );
101            return Err(DmError::Dm(ErrorEnum::Invalid, err_msg));
102        }
103
104        let metadata_dev = parse_device(vals[1], "metadata sub-device for cache target")?;
105        let cache_dev = parse_device(vals[2], "cache sub-device for cache target")?;
106        let origin_dev = parse_device(vals[3], "origin sub-device for cache target")?;
107
108        let block_size = Sectors(parse_value(vals[4], "data block size")?);
109        let num_feature_args: usize = parse_value(vals[5], "number of feature args")?;
110
111        let end_feature_args_index = 6 + num_feature_args;
112        let feature_args: Vec<String> = vals[6..end_feature_args_index]
113            .iter()
114            .map(|x| (*x).to_string())
115            .collect();
116
117        let policy = vals[end_feature_args_index].to_owned();
118
119        let num_policy_args: usize =
120            parse_value(vals[end_feature_args_index + 1], "number of policy args")?;
121
122        let start_policy_args_index = end_feature_args_index + 2;
123        let end_policy_args_index = start_policy_args_index + num_policy_args;
124        let policy_args: Vec<(String, String)> = vals
125            [start_policy_args_index..end_policy_args_index]
126            .chunks(2)
127            .map(|x| (x[0].to_string(), x[1].to_string()))
128            .collect();
129
130        Ok(CacheTargetParams::new(
131            metadata_dev,
132            cache_dev,
133            origin_dev,
134            block_size,
135            feature_args,
136            policy,
137            policy_args,
138        ))
139    }
140}
141
142impl TargetParams for CacheTargetParams {
143    fn param_str(&self) -> String {
144        let feature_args = if self.feature_args.is_empty() {
145            "0".to_owned()
146        } else {
147            format!(
148                "{} {}",
149                self.feature_args.len(),
150                self.feature_args
151                    .iter()
152                    .cloned()
153                    .collect::<Vec<_>>()
154                    .join(" ")
155            )
156        };
157
158        let policy_args = if self.policy_args.is_empty() {
159            "0".to_owned()
160        } else {
161            format!(
162                "{} {}",
163                self.policy_args.len(),
164                self.policy_args
165                    .iter()
166                    .map(|(k, v)| format!("{k} {v}"))
167                    .collect::<Vec<String>>()
168                    .join(" ")
169            )
170        };
171
172        format!(
173            "{} {} {} {} {} {} {}",
174            self.meta,
175            self.cache,
176            self.origin,
177            *self.cache_block_size,
178            feature_args,
179            self.policy,
180            policy_args
181        )
182    }
183
184    fn target_type(&self) -> TargetTypeBuf {
185        TargetTypeBuf::new(CACHE_TARGET_NAME.into()).expect("CACHE_TARGET_NAME is valid")
186    }
187}
188
189/// A target table for a cache device.
190#[derive(Clone, Debug, Eq, PartialEq)]
191pub struct CacheDevTargetTable {
192    /// The device's table
193    pub table: TargetLine<CacheTargetParams>,
194}
195
196impl CacheDevTargetTable {
197    /// Make a new CacheDevTargetTable from the required input
198    pub fn new(start: Sectors, length: Sectors, params: CacheTargetParams) -> CacheDevTargetTable {
199        CacheDevTargetTable {
200            table: TargetLine::new(start, length, params),
201        }
202    }
203}
204
205impl fmt::Display for CacheDevTargetTable {
206    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
207        let table = &self.table;
208        writeln!(f, "{} {} {}", *table.start, *table.length, table.params)
209    }
210}
211
212impl TargetTable for CacheDevTargetTable {
213    fn from_raw_table(table: &[(u64, u64, String, String)]) -> DmResult<CacheDevTargetTable> {
214        if table.len() != 1 {
215            let err_msg = format!(
216                "CacheDev table should have exactly one line, has {} lines",
217                table.len()
218            );
219            return Err(DmError::Dm(ErrorEnum::Invalid, err_msg));
220        }
221        let line = table.first().expect("table.len() == 1");
222        Ok(CacheDevTargetTable::new(
223            Sectors(line.0),
224            Sectors(line.1),
225            format!("{} {}", line.2, line.3).parse::<CacheTargetParams>()?,
226        ))
227    }
228
229    fn to_raw_table(&self) -> Vec<(u64, u64, String, String)> {
230        to_raw_table_unique!(self)
231    }
232}
233
234/// Cache usage
235#[derive(Debug)]
236pub struct CacheDevUsage {
237    /// The metadata block size, should always be equal to META_BLOCK_SIZE.
238    /// At time of writing, all metadata blocks have the same size.
239    pub meta_block_size: Sectors,
240    /// The number of metadata blocks in use
241    pub used_meta: MetaBlocks,
242    /// The number of metadata blocks available
243    pub total_meta: MetaBlocks,
244    /// The cache block size
245    pub cache_block_size: Sectors,
246    /// Used cache blocks
247    pub used_cache: DataBlocks,
248    /// Total cache blocks
249    pub total_cache: DataBlocks,
250}
251
252impl CacheDevUsage {
253    /// Make a new CacheDevUsage struct
254    pub fn new(
255        meta_block_size: Sectors,
256        used_meta: MetaBlocks,
257        total_meta: MetaBlocks,
258        cache_block_size: Sectors,
259        used_cache: DataBlocks,
260        total_cache: DataBlocks,
261    ) -> CacheDevUsage {
262        // This is defined at the kernel level and should not change.
263        assert_eq!(meta_block_size, Sectors(8));
264        CacheDevUsage {
265            meta_block_size,
266            used_meta,
267            total_meta,
268            cache_block_size,
269            used_cache,
270            total_cache,
271        }
272    }
273}
274
275/// Cache dev performance data
276#[derive(Debug)]
277pub struct CacheDevPerformance {
278    /// Number of read hits
279    pub read_hits: u64,
280    /// Number of read misses
281    pub read_misses: u64,
282    /// Number of write hits
283    pub write_hits: u64,
284    /// Number of write misses
285    pub write_misses: u64,
286    /// Number of demotions
287    pub demotions: u64,
288    /// Number of promotions
289    pub promotions: u64,
290    /// Number of dirty blocks
291    pub dirty: u64,
292}
293
294impl CacheDevPerformance {
295    /// Construct a new CacheDevPerformance struct
296    pub fn new(
297        read_hits: u64,
298        read_misses: u64,
299        write_hits: u64,
300        write_misses: u64,
301        demotions: u64,
302        promotions: u64,
303        dirty: u64,
304    ) -> CacheDevPerformance {
305        CacheDevPerformance {
306            read_hits,
307            read_misses,
308            write_hits,
309            write_misses,
310            demotions,
311            promotions,
312            dirty,
313        }
314    }
315}
316
317/// The cache metadata mode
318#[derive(Clone, Copy, Debug, Eq, PartialEq)]
319pub enum CacheDevMetadataMode {
320    /// The cache is working normally.
321    Good,
322    /// The cache has been forced to transition to read-only mode.
323    ReadOnly,
324}
325
326/// Status values of a cache device when it is working
327#[derive(Debug)]
328pub struct CacheDevWorkingStatus {
329    /// A struct recording block usage for all devices
330    pub usage: CacheDevUsage,
331    /// A struct recording cache dev performance
332    pub performance: CacheDevPerformance,
333    /// The feature args
334    pub feature_args: Vec<String>,
335    /// The core args
336    pub core_args: Vec<(String, String)>,
337    /// The name of the replacement policy to use
338    /// User-defined policies are permitted.
339    pub policy: String,
340    /// Arguments for the designated policy
341    pub policy_args: Vec<(String, String)>,
342    /// cache metadata mode
343    pub metadata_mode: CacheDevMetadataMode,
344    /// needs_check flag has been set in metadata superblock
345    pub needs_check: bool,
346}
347
348impl CacheDevWorkingStatus {
349    /// Make a new CacheDevWorkingStatus struct
350    #[allow(clippy::too_many_arguments)]
351    pub fn new(
352        usage: CacheDevUsage,
353        performance: CacheDevPerformance,
354        feature_args: Vec<String>,
355        core_args: Vec<(String, String)>,
356        policy: String,
357        policy_args: Vec<(String, String)>,
358        metadata_mode: CacheDevMetadataMode,
359        needs_check: bool,
360    ) -> CacheDevWorkingStatus {
361        CacheDevWorkingStatus {
362            usage,
363            performance,
364            feature_args,
365            core_args,
366            policy,
367            policy_args,
368            metadata_mode,
369            needs_check,
370        }
371    }
372}
373
374/// Return type of CacheDev::status()
375#[derive(Debug)]
376pub enum CacheDevStatus {
377    /// The cache has not failed utterly
378    Working(Box<CacheDevWorkingStatus>),
379    /// Devicemapper has reported that it could not obtain the status
380    Error,
381    /// The cache is in a failed condition
382    Fail,
383}
384
385impl FromStr for CacheDevStatus {
386    type Err = DmError;
387
388    // Note: This method is not entirely complete. In particular, *_args values
389    // may require more or better checking or processing.
390    fn from_str(status_line: &str) -> DmResult<CacheDevStatus> {
391        if status_line.starts_with("Error") {
392            return Ok(CacheDevStatus::Error);
393        }
394
395        if status_line.starts_with("Fail") {
396            return Ok(CacheDevStatus::Fail);
397        }
398
399        let status_vals = get_status_line_fields(status_line, 17)?;
400
401        let usage = {
402            let meta_block_size = status_vals[0];
403            let meta_usage = status_vals[1].split('/').collect::<Vec<_>>();
404            let cache_block_size = status_vals[2];
405            let cache_usage = status_vals[3].split('/').collect::<Vec<_>>();
406            CacheDevUsage::new(
407                Sectors(parse_value(meta_block_size, "meta block size")?),
408                MetaBlocks(parse_value(meta_usage[0], "used meta")?),
409                MetaBlocks(parse_value(meta_usage[1], "total meta")?),
410                Sectors(parse_value(cache_block_size, "cache block size")?),
411                DataBlocks(parse_value(cache_usage[0], "used cache")?),
412                DataBlocks(parse_value(cache_usage[1], "total cache")?),
413            )
414        };
415
416        let performance = CacheDevPerformance::new(
417            parse_value(status_vals[4], "read hits")?,
418            parse_value(status_vals[5], "read misses")?,
419            parse_value(status_vals[6], "write hits")?,
420            parse_value(status_vals[7], "write misses")?,
421            parse_value(status_vals[8], "demotions")?,
422            parse_value(status_vals[9], "promotions")?,
423            parse_value(status_vals[10], "dirty")?,
424        );
425
426        let num_feature_args: usize = parse_value(status_vals[11], "number of feature args")?;
427        let core_args_start_index = 12usize + num_feature_args;
428        let feature_args: Vec<String> = status_vals[12..core_args_start_index]
429            .iter()
430            .map(|x| (*x).to_string())
431            .collect();
432
433        let (policy_start_index, core_args) =
434            CacheDev::parse_pairs(core_args_start_index, &status_vals)?;
435
436        let policy = status_vals[policy_start_index].to_string();
437        let (rest_start_index, policy_args) =
438            CacheDev::parse_pairs(policy_start_index + 1, &status_vals)?;
439
440        let cache_metadata_mode = match status_vals[rest_start_index] {
441            "rw" => CacheDevMetadataMode::Good,
442            "ro" => CacheDevMetadataMode::ReadOnly,
443            val => {
444                return Err(make_unexpected_value_error(
445                    rest_start_index + 1,
446                    val,
447                    "cache metadata mode",
448                ));
449            }
450        };
451
452        let needs_check = match status_vals[rest_start_index + 1] {
453            "-" => false,
454            "needs_check" => true,
455            val => {
456                return Err(make_unexpected_value_error(
457                    rest_start_index + 1,
458                    val,
459                    "needs check",
460                ));
461            }
462        };
463
464        Ok(CacheDevStatus::Working(Box::new(
465            CacheDevWorkingStatus::new(
466                usage,
467                performance,
468                feature_args,
469                core_args,
470                policy,
471                policy_args,
472                cache_metadata_mode,
473                needs_check,
474            ),
475        )))
476    }
477}
478
479/// DM Cache device
480#[derive(Debug)]
481pub struct CacheDev {
482    dev_info: Box<DeviceInfo>,
483    meta_dev: LinearDev,
484    cache_dev: LinearDev,
485    origin_dev: LinearDev,
486    table: CacheDevTargetTable,
487}
488
489impl DmDevice<CacheDevTargetTable> for CacheDev {
490    fn device(&self) -> Device {
491        device!(self)
492    }
493
494    fn devnode(&self) -> PathBuf {
495        devnode!(self)
496    }
497
498    // Omit replacement policy field from equality test when checking that
499    // two devices are the same. Equality of replacement policies is not a
500    // necessary requirement for equality of devices as the replacement
501    // policy can be changed dynamically by a reload of of the device's table.
502    // It is convenient that this is the case, because checking equality of
503    // replacement policies is somewhat hard. "default", which is a valid
504    // policy string, is not a particular policy, but an alias for the default
505    // policy for this version of devicemapper. Therefore, using string
506    // equality to check equivalence can result in false negatives, as
507    // "default" != "smq", the current default policy in the recent kernel.
508    // Note: There is the possibility of implementing the following somewhat
509    // complicated check. Without loss of generality, let
510    // left[0].params.policy = "default" and
511    // right[0].params.policy = X, where X != "default". Then, if X is the
512    // default policy, return true, otherwise return false. Unfortunately,
513    // there is no straightforward programmatic way of determining the default
514    // policy for a given kernel, and we are assured that the default policy
515    // can vary between kernels, and may of course, change in future.
516    fn equivalent_tables(
517        left: &CacheDevTargetTable,
518        right: &CacheDevTargetTable,
519    ) -> DmResult<bool> {
520        let left = &left.table;
521        let right = &right.table;
522
523        Ok(left.start == right.start
524            && left.length == right.length
525            && left.params.meta == right.params.meta
526            && left.params.origin == right.params.origin
527            && left.params.cache_block_size == right.params.cache_block_size
528            && left.params.feature_args == right.params.feature_args
529            && left.params.policy_args == right.params.policy_args)
530    }
531
532    fn name(&self) -> &DmName {
533        name!(self)
534    }
535
536    fn size(&self) -> Sectors {
537        self.origin_dev.size()
538    }
539
540    fn table(&self) -> &CacheDevTargetTable {
541        table!(self)
542    }
543
544    fn teardown(&mut self, dm: &DM) -> DmResult<()> {
545        dm.device_remove(&DevId::Name(self.name()), DmOptions::default())?;
546        self.cache_dev.teardown(dm)?;
547        self.origin_dev.teardown(dm)?;
548        self.meta_dev.teardown(dm)?;
549        Ok(())
550    }
551
552    fn uuid(&self) -> Option<&DmUuid> {
553        uuid!(self)
554    }
555}
556
557/// Cache device implementation.
558impl CacheDev {
559    /// Construct a new CacheDev with the given data and meta devs.
560    /// Returns an error if the device is already known to the kernel.
561    pub fn new(
562        dm: &DM,
563        name: &DmName,
564        uuid: Option<&DmUuid>,
565        meta: LinearDev,
566        cache: LinearDev,
567        origin: LinearDev,
568        cache_block_size: Sectors,
569    ) -> DmResult<CacheDev> {
570        if device_exists(dm, name)? {
571            let err_msg = format!("cachedev {name} already exists");
572            return Err(DmError::Dm(ErrorEnum::Invalid, err_msg));
573        }
574
575        let table = CacheDev::gen_default_table(&meta, &cache, &origin, cache_block_size);
576        let dev_info = device_create(dm, name, uuid, &table, DmOptions::private())?;
577
578        Ok(CacheDev {
579            dev_info: Box::new(dev_info),
580            meta_dev: meta,
581            cache_dev: cache,
582            origin_dev: origin,
583            table,
584        })
585    }
586
587    /// Set up a cache device from the given metadata and data devices.
588    pub fn setup(
589        dm: &DM,
590        name: &DmName,
591        uuid: Option<&DmUuid>,
592        meta: LinearDev,
593        cache: LinearDev,
594        origin: LinearDev,
595        cache_block_size: Sectors,
596    ) -> DmResult<CacheDev> {
597        let table = CacheDev::gen_default_table(&meta, &cache, &origin, cache_block_size);
598        let dev = if device_exists(dm, name)? {
599            let dev_info = dm.device_info(&DevId::Name(name))?;
600            let dev = CacheDev {
601                dev_info: Box::new(dev_info),
602                meta_dev: meta,
603                cache_dev: cache,
604                origin_dev: origin,
605                table,
606            };
607            device_match(dm, &dev, uuid)?;
608            dev
609        } else {
610            let dev_info = device_create(dm, name, uuid, &table, DmOptions::private())?;
611            CacheDev {
612                dev_info: Box::new(dev_info),
613                meta_dev: meta,
614                cache_dev: cache,
615                origin_dev: origin,
616                table,
617            }
618        };
619
620        Ok(dev)
621    }
622
623    /// Set the table for the existing origin device.
624    /// This action puts the device in a state where it is ready to be resumed.
625    /// Warning: It is the client's responsibility to make sure the designated
626    /// table is compatible with the device's existing table.
627    /// If not, this function will still succeed, but some kind of
628    /// data corruption will be the inevitable result.
629    pub fn set_origin_table(
630        &mut self,
631        dm: &DM,
632        table: Vec<TargetLine<LinearDevTargetParams>>,
633    ) -> DmResult<()> {
634        self.origin_dev.set_table(dm, table)?;
635        self.origin_dev.resume(dm)?;
636
637        let mut table = self.table.clone();
638        table.table.length = self.origin_dev.size();
639        self.table_load(dm, &table, DmOptions::default())?;
640
641        self.table = table;
642
643        Ok(())
644    }
645
646    /// Set the table for the existing cache sub-device.
647    /// This action puts the device in a state where it is ready to be resumed.
648    /// Warning: It is the client's responsibility to make sure the designated
649    /// table is compatible with the device's existing table.
650    /// If not, this function will still succeed, but some kind of
651    /// data corruption will be the inevitable result.
652    pub fn set_cache_table(
653        &mut self,
654        dm: &DM,
655        table: Vec<TargetLine<LinearDevTargetParams>>,
656    ) -> DmResult<()> {
657        self.cache_dev.set_table(dm, table)?;
658        self.cache_dev.resume(dm)?;
659
660        // Reload the table, even though it is unchanged. Otherwise, we
661        // suffer from whacky smq bug documented in the following PR:
662        // https://github.com/stratis-storage/devicemapper-rs/pull/279.
663        self.table_load(dm, self.table(), DmOptions::default())?;
664
665        Ok(())
666    }
667
668    /// Set the table for the existing meta sub-device.
669    /// This action puts the device in a state where it is ready to be resumed.
670    /// Warning: It is the client's responsibility to make sure the designated
671    /// table is compatible with the device's existing table.
672    /// If not, this function will still succeed, but some kind of
673    /// data corruption will be the inevitable result.
674    pub fn set_meta_table(
675        &mut self,
676        dm: &DM,
677        table: Vec<TargetLine<LinearDevTargetParams>>,
678    ) -> DmResult<()> {
679        self.meta_dev.set_table(dm, table)?;
680        self.meta_dev.resume(dm)?;
681
682        // Reload the table, even though it is unchanged. Otherwise, we
683        // suffer from whacky smq bug documented in the following PR:
684        // https://github.com/stratis-storage/devicemapper-rs/pull/279.
685        self.table_load(dm, self.table(), DmOptions::default())?;
686
687        Ok(())
688    }
689
690    /// Generate a table to be passed to DM. The format of the table
691    /// entries is:
692    /// ```plain
693    /// <start sec (0)> <length> "cache" <cache-specific string>
694    /// ```
695    /// where the cache-specific string has the format:
696    /// ```plain
697    /// <meta maj:min> <cache maj:min> <origin maj:min> <block size>
698    /// <#num feature args (1)> writethrough <replacement policy (default)>
699    /// <#num policy args (0)>
700    /// ```
701    /// There is exactly one entry in the table.
702    /// Various defaults are hard coded in the method.
703    fn gen_default_table(
704        meta: &LinearDev,
705        cache: &LinearDev,
706        origin: &LinearDev,
707        cache_block_size: Sectors,
708    ) -> CacheDevTargetTable {
709        CacheDevTargetTable::new(
710            Sectors::default(),
711            origin.size(),
712            CacheTargetParams::new(
713                meta.device(),
714                cache.device(),
715                origin.device(),
716                cache_block_size,
717                vec!["writethrough".into()],
718                "default".to_owned(),
719                vec![],
720            ),
721        )
722    }
723
724    /// Parse pairs of arguments from a slice
725    fn parse_pairs(start_index: usize, vals: &[&str]) -> DmResult<(usize, Vec<(String, String)>)> {
726        let num_pairs: usize = parse_value(vals[start_index], "number of pairs")?;
727        if num_pairs % 2 != 0 {
728            let err_msg = format!("Number of args \"{num_pairs}\" is not even");
729            return Err(DmError::Dm(ErrorEnum::Invalid, err_msg));
730        }
731        let next_start_index = start_index + num_pairs + 1;
732        Ok((
733            next_start_index,
734            vals[start_index + 1..next_start_index]
735                .chunks(2)
736                .map(|p| (p[0].to_string(), p[1].to_string()))
737                .collect(),
738        ))
739    }
740
741    /// Get the current status of the cache device.
742    pub fn status(&self, dm: &DM, options: DmOptions) -> DmResult<CacheDevStatus> {
743        status!(self, dm, options)
744    }
745}
746
747#[cfg(test)]
748use std::fs::OpenOptions;
749#[cfg(test)]
750use std::path::Path;
751
752#[cfg(test)]
753use crate::core::devnode_to_devno;
754#[cfg(test)]
755use crate::lineardev::LinearTargetParams;
756#[cfg(test)]
757use crate::testing::{blkdev_size, test_name};
758
759#[cfg(test)]
760// Make a minimal cachedev. Put the meta and cache on one device, and put
761// the origin on a separate device. paths.len() must be at least 2 or the
762// method will fail.
763pub fn minimal_cachedev(dm: &DM, paths: &[&Path]) -> CacheDev {
764    assert!(paths.len() >= 2);
765    let dev1 = Device::from(devnode_to_devno(paths[0]).unwrap().unwrap());
766
767    let meta_name = test_name("cache-meta").expect("valid format");
768
769    // Minimum recommended metadata size for thinpool
770    let meta_length = Sectors(4 * IEC::Ki);
771    let meta_params = LinearTargetParams::new(dev1, Sectors(0));
772    let meta_table = vec![TargetLine::new(
773        Sectors(0),
774        meta_length,
775        LinearDevTargetParams::Linear(meta_params),
776    )];
777    let meta = LinearDev::setup(dm, &meta_name, None, meta_table).unwrap();
778
779    let cache_name = test_name("cache-cache").expect("valid format");
780    let cache_offset = meta_length;
781    let cache_length = MIN_CACHE_BLOCK_SIZE;
782    let cache_params = LinearTargetParams::new(dev1, cache_offset);
783    let cache_table = vec![TargetLine::new(
784        Sectors(0),
785        cache_length,
786        LinearDevTargetParams::Linear(cache_params),
787    )];
788    let cache = LinearDev::setup(dm, &cache_name, None, cache_table).unwrap();
789
790    let dev2_size = blkdev_size(&OpenOptions::new().read(true).open(paths[1]).unwrap()).sectors();
791    let dev2 = Device::from(devnode_to_devno(paths[1]).unwrap().unwrap());
792
793    let origin_name = test_name("cache-origin").expect("valid format");
794    let origin_params = LinearTargetParams::new(dev2, Sectors(0));
795    let origin_table = vec![TargetLine::new(
796        Sectors(0),
797        dev2_size,
798        LinearDevTargetParams::Linear(origin_params),
799    )];
800    let origin = LinearDev::setup(dm, &origin_name, None, origin_table).unwrap();
801
802    CacheDev::new(
803        dm,
804        &test_name("cache").expect("valid format"),
805        None,
806        meta,
807        cache,
808        origin,
809        MIN_CACHE_BLOCK_SIZE,
810    )
811    .unwrap()
812}
813
814#[cfg(test)]
815mod tests {
816    use std::path::Path;
817
818    use crate::{core::DmFlags, testing::test_with_spec};
819
820    use super::*;
821
822    // Test creating a minimal cache dev.
823    // Verify that status method executes and gives reasonable values.
824    fn test_minimal_cache_dev(paths: &[&Path]) {
825        assert!(paths.len() >= 2);
826        let dm = DM::new().unwrap();
827        let mut cache = minimal_cachedev(&dm, paths);
828
829        match cache.status(&dm, DmOptions::default()).unwrap() {
830            CacheDevStatus::Working(ref status) => {
831                let usage = &status.usage;
832
833                assert_eq!(usage.meta_block_size, Sectors(8));
834
835                // Even an empty cache dev takes up some metadata space.
836                assert!(usage.used_meta > MetaBlocks(0));
837
838                assert_eq!(usage.cache_block_size, MIN_CACHE_BLOCK_SIZE);
839                assert_eq!(
840                    usage.cache_block_size,
841                    cache.table.table.params.cache_block_size
842                );
843
844                let performance = &status.performance;
845
846                // No write activity should mean all write performance data is 0
847                assert_eq!(performance.write_hits, 0);
848                assert_eq!(performance.write_misses, 0);
849                assert_eq!(performance.demotions, 0);
850                assert_eq!(performance.dirty, 0);
851
852                // The current defaults for configuration values
853                assert_eq!(status.feature_args, vec!["writethrough"]);
854                assert_eq!(
855                    status.core_args,
856                    vec![("migration_threshold".to_string(), "2048".to_string())]
857                );
858                assert_eq!(status.policy, "smq");
859
860                assert_eq!(status.policy_args, vec![] as Vec<(String, String)>);
861
862                assert_eq!(status.metadata_mode, CacheDevMetadataMode::Good);
863
864                assert!(!status.needs_check);
865            }
866            status => panic!("unexpected thinpool status: {status:?}"),
867        }
868
869        let table = CacheDev::read_kernel_table(&dm, &DevId::Name(cache.name()))
870            .unwrap()
871            .table;
872
873        let params = &table.params;
874        assert_eq!(params.cache_block_size, MIN_CACHE_BLOCK_SIZE);
875        assert_eq!(
876            params.feature_args,
877            vec!["writethrough".into()]
878                .into_iter()
879                .collect::<HashSet<_>>()
880        );
881        assert_eq!(params.policy, "default");
882
883        cache.teardown(&dm).unwrap();
884    }
885
886    #[test]
887    fn loop_test_minimal_cache_dev() {
888        test_with_spec(2, test_minimal_cache_dev);
889    }
890
891    /// Basic test of meta size change.
892    /// This executes the code paths, but is not enough to ensure correctness.
893    /// * Construct a minimal cache
894    /// * Expand the meta device by one block
895    fn test_meta_size_change(paths: &[&Path]) {
896        assert!(paths.len() >= 3);
897
898        let dm = DM::new().unwrap();
899        let mut cache = minimal_cachedev(&dm, paths);
900
901        let mut table = cache.meta_dev.table().table.clone();
902        let dev3 = Device::from(devnode_to_devno(paths[2]).unwrap().unwrap());
903
904        let extra_length = MIN_CACHE_BLOCK_SIZE;
905        let cache_params = LinearTargetParams::new(dev3, Sectors(0));
906        let current_length = cache.meta_dev.size();
907
908        match cache.status(&dm, DmOptions::default()).unwrap() {
909            CacheDevStatus::Working(ref status) => {
910                let usage = &status.usage;
911                assert_eq!(*usage.total_meta * usage.meta_block_size, current_length);
912            }
913            CacheDevStatus::Error => panic!("devicemapper could not obtain cache status"),
914            CacheDevStatus::Fail => panic!("cache should not have failed"),
915        }
916
917        table.push(TargetLine::new(
918            current_length,
919            extra_length,
920            LinearDevTargetParams::Linear(cache_params),
921        ));
922        assert_matches!(cache.set_meta_table(&dm, table), Ok(_));
923        cache.resume(&dm).unwrap();
924
925        match cache.status(&dm, DmOptions::default()).unwrap() {
926            CacheDevStatus::Working(ref status) => {
927                let usage = &status.usage;
928                let assigned_length = current_length + extra_length;
929                assert!(*usage.total_meta * usage.meta_block_size <= assigned_length);
930                assert_eq!(assigned_length, cache.meta_dev.size());
931            }
932            CacheDevStatus::Error => panic!("devicemapper could not obtain cache status"),
933            CacheDevStatus::Fail => panic!("cache should not have failed"),
934        }
935
936        cache.teardown(&dm).unwrap();
937    }
938
939    #[test]
940    fn loop_test_meta_size_change() {
941        test_with_spec(3, test_meta_size_change);
942    }
943
944    /// Basic test of cache size change
945    /// This executes the code paths, but is not enough to ensure correctness.
946    /// * Construct a minimal cache
947    /// * Expand the cache by one more block
948    /// * Decrease the cache to its original size
949    fn test_cache_size_change(paths: &[&Path]) {
950        assert!(paths.len() >= 3);
951
952        let dm = DM::new().unwrap();
953        let mut cache = minimal_cachedev(&dm, paths);
954
955        let mut cache_table = cache.cache_dev.table().table.clone();
956        let dev3 = Device::from(devnode_to_devno(paths[2]).unwrap().unwrap());
957
958        let extra_length = MIN_CACHE_BLOCK_SIZE;
959        let cache_params = LinearTargetParams::new(dev3, Sectors(0));
960        let current_length = cache.cache_dev.size();
961
962        match cache.status(&dm, DmOptions::default()).unwrap() {
963            CacheDevStatus::Working(ref status) => {
964                let usage = &status.usage;
965                assert_eq!(*usage.total_cache * usage.cache_block_size, current_length);
966            }
967            CacheDevStatus::Error => panic!("devicemapper could not obtain cache status"),
968            CacheDevStatus::Fail => panic!("cache should not have failed"),
969        }
970
971        cache_table.push(TargetLine::new(
972            current_length,
973            extra_length,
974            LinearDevTargetParams::Linear(cache_params),
975        ));
976        assert_matches!(cache.set_cache_table(&dm, cache_table.clone()), Ok(_));
977        cache.resume(&dm).unwrap();
978
979        match cache.status(&dm, DmOptions::default()).unwrap() {
980            CacheDevStatus::Working(ref status) => {
981                let usage = &status.usage;
982                assert_eq!(
983                    *usage.total_cache * usage.cache_block_size,
984                    current_length + extra_length
985                );
986            }
987            CacheDevStatus::Error => panic!("devicemapper could not obtain cache status"),
988            CacheDevStatus::Fail => panic!("cache should not have failed"),
989        }
990
991        cache_table.pop();
992
993        assert_matches!(cache.set_cache_table(&dm, cache_table), Ok(_));
994        cache.resume(&dm).unwrap();
995
996        match cache.status(&dm, DmOptions::default()).unwrap() {
997            CacheDevStatus::Working(ref status) => {
998                let usage = &status.usage;
999                assert_eq!(*usage.total_cache * usage.cache_block_size, current_length);
1000            }
1001            CacheDevStatus::Error => panic!("devicemapper could not obtain cache status"),
1002            CacheDevStatus::Fail => panic!("cache should not have failed"),
1003        }
1004
1005        cache.teardown(&dm).unwrap();
1006    }
1007
1008    #[test]
1009    fn loop_test_cache_size_change() {
1010        test_with_spec(3, test_cache_size_change);
1011    }
1012
1013    /// Test changing the size of the origin device.
1014    /// Verify that once changed, the new size is reflected in origin device
1015    /// and cache device.
1016    fn test_origin_size_change(paths: &[&Path]) {
1017        assert!(paths.len() >= 3);
1018
1019        let dm = DM::new().unwrap();
1020        let mut cache = minimal_cachedev(&dm, paths);
1021
1022        let mut origin_table = cache.origin_dev.table().table.clone();
1023        let origin_size = cache.origin_dev.size();
1024
1025        let dev3_size =
1026            blkdev_size(&OpenOptions::new().read(true).open(paths[2]).unwrap()).sectors();
1027        let dev3 = Device::from(devnode_to_devno(paths[2]).unwrap().unwrap());
1028        let origin_params = LinearTargetParams::new(dev3, Sectors(0));
1029
1030        origin_table.push(TargetLine::new(
1031            origin_size,
1032            dev3_size,
1033            LinearDevTargetParams::Linear(origin_params),
1034        ));
1035
1036        cache.set_origin_table(&dm, origin_table).unwrap();
1037        cache.resume(&dm).unwrap();
1038
1039        let origin_size = origin_size + dev3_size;
1040        assert_eq!(cache.origin_dev.size(), origin_size);
1041        assert_eq!(cache.size(), origin_size);
1042
1043        cache.teardown(&dm).unwrap();
1044    }
1045
1046    #[test]
1047    fn loop_test_origin_size_change() {
1048        test_with_spec(3, test_origin_size_change);
1049    }
1050
1051    /// Verify that suspending and resuming the cache doesn't fail.
1052    fn test_suspend(paths: &[&Path]) {
1053        assert!(paths.len() >= 2);
1054
1055        let dm = DM::new().unwrap();
1056        let mut cache = minimal_cachedev(&dm, paths);
1057        cache
1058            .suspend(&dm, DmOptions::default().set_flags(DmFlags::DM_NOFLUSH))
1059            .unwrap();
1060        cache
1061            .suspend(&dm, DmOptions::default().set_flags(DmFlags::DM_NOFLUSH))
1062            .unwrap();
1063        cache.resume(&dm).unwrap();
1064        cache.resume(&dm).unwrap();
1065        cache.teardown(&dm).unwrap();
1066    }
1067
1068    #[test]
1069    fn loop_test_suspend() {
1070        test_with_spec(2, test_suspend);
1071    }
1072}