qemu_command_builder/
machine.rs

1use bon::Builder;
2
3use crate::common::*;
4use crate::machine_type::MachineX86_64;
5use crate::to_command::{ToArg, ToCommand};
6
7#[derive(Default)]
8pub enum Granularity {
9    #[default]
10    G256,
11    G512,
12    G1k,
13    G2k,
14    G4k,
15    G8k,
16    G16k,
17}
18
19impl ToArg for Granularity {
20    fn to_arg(&self) -> &str {
21        match self {
22            Granularity::G256 => "256",
23            Granularity::G512 => "512",
24            Granularity::G1k => "1k",
25            Granularity::G2k => "2k",
26            Granularity::G4k => "4k",
27            Granularity::G8k => "8k",
28            Granularity::G16k => "16k",
29        }
30    }
31}
32#[derive(Default, Builder)]
33pub struct CxlFmw {
34    targets: Vec<String>,
35    size: String,
36    interleave_granularity: Option<Granularity>,
37}
38
39#[derive(Default, Builder)]
40pub struct SmpCache {
41    cache: String,
42    topology: String,
43}
44
45/// Select the emulated machine by name. Use ``-machine help`` to list
46/// available machines.
47///
48/// For architectures which aim to support live migration compatibility
49/// across releases, each release will introduce a new versioned machine
50/// type. For example, the 2.8.0 release introduced machine types
51/// "pc-i440fx-2.8" and "pc-q35-2.8" for the x86\_64/i686 architectures.
52///
53/// To allow live migration of guests from QEMU version 2.8.0, to QEMU
54/// version 2.9.0, the 2.9.0 version must support the "pc-i440fx-2.8"
55/// and "pc-q35-2.8" machines too. To allow users live migrating VMs to
56/// skip multiple intermediate releases when upgrading, new releases of
57/// QEMU will support machine types from many previous versions.
58#[derive(Builder)]
59pub struct MachineForX86 {
60    machine_type: MachineX86_64,
61
62    /// This is used to enable an accelerator. Depending on the target
63    /// architecture, kvm, xen, hvf, nvmm, whpx or tcg can be available.
64    /// By default, tcg is used. If there is more than one accelerator
65    /// specified, the next one is used if the previous one fails to
66    /// initialize.
67    accel: Option<Vec<AccelType>>,
68
69    /// Enables emulation of VMWare IO port, for vmmouse etc. auto says
70    //  to select the value based on accel and i8042. For accel=xen or
71    //  i8042=off the default is off otherwise the default is on.
72    vmport: Option<OnOffAuto>,
73
74    /// Include guest memory in a core dump. The default is on.
75    dump_guest_core: Option<OnOffDefaultOn>,
76
77    /// Enables or disables memory merge support. This feature, when
78    //  supported by the host, de-duplicates identical memory pages
79    //  among VMs instances (enabled by default).
80    mem_merge: Option<OnOffDefaultOn>,
81
82    /// Enables or disables AES key wrapping support on s390-ccw hosts.
83    /// This feature controls whether AES wrapping keys will be created
84    /// to allow execution of AES cryptographic functions. The default
85    /// is on.
86    aes_key_wrap: Option<OnOffDefaultOn>,
87
88    /// Enables or disables DEA key wrapping support on s390-ccw hosts.
89    /// This feature controls whether DEA wrapping keys will be created
90    /// to allow execution of DEA cryptographic functions. The default
91    /// is on.
92    dea_key_wrap: Option<OnOffDefaultOn>,
93
94    /// Enables or disables NVDIMM support. The default is off.
95    nvdimm: Option<OnOffDefaultOff>,
96
97    /// Memory encryption object to use. The default is none.
98    memory_encryption: Option<String>, // TODO find out actual values
99
100    /// Enables or disables ACPI Heterogeneous Memory Attribute Table
101    /// (HMAT) support. The default is off.
102    hmat: Option<OnOffDefaultOff>,
103
104    /// Allocate auxiliary guest RAM as an anonymous file that is
105    /// shareable with an external process.  This option applies to
106    /// memory allocated as a side effect of creating various devices.
107    /// It does not apply to memory-backend-objects, whether explicitly
108    /// specified on the command line, or implicitly created by the -m
109    /// command line option.  The default is off.
110    aux_ram_share: Option<OnOffDefaultOff>,
111
112    /// An alternative to legacy ``-mem-path`` and ``mem-prealloc`` options.
113    /// Allows to use a memory backend as main RAM.
114    memory_backend: Option<String>, // TODO find out actual values
115
116    /// Define a CXL Fixed Memory Window (CFMW).
117    ///
118    /// Described in the CXL 2.0 ECN: CEDT CFMWS & QTG _DSM.
119    ///
120    /// They are regions of Host Physical Addresses (HPA) on a system which
121    /// may be interleaved across one or more CXL host bridges.  The system
122    /// software will assign particular devices into these windows and
123    /// configure the downstream Host-managed Device Memory (HDM) decoders
124    /// in root ports, switch ports and devices appropriately to meet the
125    /// interleave requirements before enabling the memory devices.
126    ///
127    /// ``targets.X=target`` provides the mapping to CXL host bridges
128    /// which may be identified by the id provided in the -device entry.
129    /// Multiple entries are needed to specify all the targets when
130    /// the fixed memory window represents interleaved memory. X is the
131    /// target index from 0.
132    ///
133    /// ``size=size`` sets the size of the CFMW. This must be a multiple of
134    /// 256MiB. The region will be aligned to 256MiB but the location is
135    /// platform and configuration dependent.
136    ///
137    /// ``interleave-granularity=granularity`` sets the granularity of
138    /// interleave. Default 256 (bytes). Only 256, 512, 1k, 2k,
139    /// 4k, 8k and 16k granularities supported.
140    cxl_fmw: Option<CxlFmw>,
141
142    /// Define cache properties for SMP system.
143    ///
144    /// ``cache=cachename`` specifies the cache that the properties will be
145    /// applied on. This field is the combination of cache level and cache
146    /// type. It supports ``l1d`` (L1 data cache), ``l1i`` (L1 instruction
147    /// cache), ``l2`` (L2 unified cache) and ``l3`` (L3 unified cache).
148    ///
149    /// ``topology=topologylevel`` sets the cache topology level. It accepts
150    /// CPU topology levels including ``core``, ``module``, ``cluster``, ``die``,
151    /// ``socket``, ``book``, ``drawer`` and a special value ``default``. If
152    /// ``default`` is set, then the cache topology will follow the architecture's
153    /// default cache topology model. If another topology level is set, the cache
154    /// will be shared at corresponding CPU topology level. For example,
155    /// ``topology=core`` makes the cache shared by all threads within a core.
156    /// The omitting cache will default to using the ``default`` level.
157    ///
158    /// The default cache topology model for an i386 PC machine is as follows:
159    /// ``l1d``, ``l1i``, and ``l2`` caches are per ``core``, while the ``l3``
160    /// cache is per ``die``.
161    smp_cache: Option<Vec<SmpCache>>,
162}
163
164impl ToCommand for MachineForX86 {
165    fn to_command(&self) -> Vec<String> {
166        let mut cmd = vec![];
167        cmd.push("-machine".to_string());
168
169        let mut args = vec![self.machine_type.to_arg().to_string()];
170
171        if let Some(accels) = &self.accel {
172            let accel_strs: Vec<&str> = accels.iter().map(|a| a.to_arg()).collect();
173            args.push(format!("accel={}", accel_strs.join(":")));
174        }
175        if let Some(vmport) = &self.vmport {
176            args.push(format!("vmport={}", vmport.to_arg()));
177        }
178        if let Some(dump_guest_core) = &self.dump_guest_core {
179            args.push(format!("dump-guest-core={}", dump_guest_core.to_arg()));
180        }
181        if let Some(mem_merge) = &self.mem_merge {
182            args.push(format!("mem-merge={}", mem_merge.to_arg()));
183        }
184        if let Some(aes_key_wrap) = &self.aes_key_wrap {
185            args.push(format!("aes-key-wrap={}", aes_key_wrap.to_arg()));
186        }
187        if let Some(dea_key_wrap) = &self.dea_key_wrap {
188            args.push(format!("dea-key-wrap={}", dea_key_wrap.to_arg()));
189        }
190        if let Some(nvdimm) = &self.nvdimm {
191            args.push(format!("nvdimm={}", nvdimm.to_arg()));
192        }
193        if let Some(memory_encryption) = &self.memory_encryption {
194            args.push(format!("memory-encryption={}", memory_encryption));
195        }
196        if let Some(hmat) = &self.hmat {
197            args.push(format!("hmat={}", hmat.to_arg()));
198        }
199        if let Some(aux_ram_share) = &self.aux_ram_share {
200            args.push(format!("aux-ram-share={}", aux_ram_share.to_arg()));
201        }
202        if let Some(memory_backend) = &self.memory_backend {
203            args.push(format!("memory-backend={}", memory_backend));
204        }
205        if let Some(cxl_fmw) = &self.cxl_fmw {
206            for (idx, target) in cxl_fmw.targets.iter().enumerate() {
207                args.push(format!("cxl-fmw.0.targets.{}={}", idx, target));
208            }
209            args.push(format!("cxl-fmw.0.size={}", cxl_fmw.size));
210            if let Some(granularity) = &cxl_fmw.interleave_granularity {
211                args.push(format!(
212                    "cxl-fmw.0.interleave-granularity={}",
213                    granularity.to_arg()
214                ));
215            }
216        }
217        if let Some(smp_caches) = &self.smp_cache {
218            for (idx, smp_cache) in smp_caches.iter().enumerate() {
219                args.push(format!("smp-cache.{}.cache={}", idx, smp_cache.cache));
220                args.push(format!("smp-cache.{}.topology={}", idx, smp_cache.topology));
221            }
222        }
223        cmd.push(args.join(","));
224
225        cmd
226    }
227}