#[derive(Default, Debug, Clone, Eq, PartialEq)]
#[derive(Deserialize, Serialize)]
#[serde(default, deny_unknown_fields)]
pub struct GlobalComputedSchedulingConfiguration
{
pub software_and_hardware_watchdog_runs_on_which_kernel_cpus: Option<HyperThreads>,
pub work_queue_runs_on_which_kernel_cpus: Option<HyperThreads>,
pub default_interrupt_request_affinity: Option<HyperThreads>,
pub interrupt_request_affinity: HashMap<InterruptRequest, HyperThreads>,
pub receive_packet_steering_flow_limit_tables: Option<HyperThreads>,
}
impl GlobalComputedSchedulingConfiguration
{
#[inline(always)]
pub fn configure(&self, sys_path: &SysPath, proc_path: &ProcPath) -> Result<(), GlobalComputedSchedulingConfigurationError>
{
use self::GlobalComputedSchedulingConfigurationError::*;
set_value(proc_path, |proc_path, value| value.force_watchdog_to_just_these_hyper_threads(proc_path), self.software_and_hardware_watchdog_runs_on_which_kernel_cpus.as_ref(), CouldNotChangeSoftwareAndHardwareWatchdogCpus)?;
set_value(proc_path, |_proc_path, value| value.set_work_queue_hyper_thread_affinity(sys_path), self.work_queue_runs_on_which_kernel_cpus.as_ref(), CouldNotChangeWorkQueueCpus)?;
set_value(proc_path, |proc_path, value| InterruptRequest::set_default_smp_affinity(proc_path, value), self.default_interrupt_request_affinity.as_ref(), CouldNotChangeInterruptRequestDefaultAffinity)?;
for (interrupt_request, hyper_threads) in self.interrupt_request_affinity.iter()
{
let interrupt_request = *interrupt_request;
interrupt_request.set_smp_affinity(proc_path, hyper_threads).map_err(|cause| CouldNotChangeInterruptRequestAffinity(cause, interrupt_request))?;
}
set_value(proc_path, |proc_path, value| value.set_receive_packet_steering_flow_limit_tables_affinity(proc_path), self.receive_packet_steering_flow_limit_tables.as_ref(), CouldNotChangeWhichHyperThreadsHaveReceivePacketSteeringFlowLimitTablesEnabled)?;
Ok(())
}
}