use crate::bitmasks::device::FbcFlags;
use crate::enum_wrappers::device::{
BridgeChip, Clock, EncoderType, FbcSessionType, PerformanceState, SampleValueType,
};
use crate::enums::device::{FirmwareVersion, SampleValue, UsedGpuMemory};
use crate::error::{nvml_try, Bits, NvmlError};
use crate::ffi::bindings::*;
use crate::structs::device::FieldId;
#[cfg(feature = "serde")]
use serde_derive::{Deserialize, Serialize};
use std::{
cmp::Ordering,
ffi::{CStr, CString},
};
use std::{
convert::{TryFrom, TryInto},
os::raw::c_char,
};
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct PciInfo {
pub bus: u32,
pub bus_id: String,
pub device: u32,
pub domain: u32,
pub pci_device_id: u32,
pub pci_sub_system_id: Option<u32>,
}
impl PciInfo {
pub fn try_from(struct_: nvmlPciInfo_t, sub_sys_id_present: bool) -> Result<Self, NvmlError> {
unsafe {
let bus_id_raw = CStr::from_ptr(struct_.busId.as_ptr());
Ok(Self {
bus: struct_.bus,
bus_id: bus_id_raw.to_str()?.into(),
device: struct_.device,
domain: struct_.domain,
pci_device_id: struct_.pciDeviceId,
pci_sub_system_id: if sub_sys_id_present {
Some(struct_.pciSubSystemId)
} else {
None
},
})
}
}
}
impl TryInto<nvmlPciInfo_t> for PciInfo {
type Error = NvmlError;
fn try_into(self) -> Result<nvmlPciInfo_t, Self::Error> {
const fn buf_size() -> usize {
NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE as usize
}
let mut bus_id_c: [c_char; buf_size()] = [0; buf_size()];
let mut bus_id = CString::new(self.bus_id)?.into_bytes_with_nul();
match bus_id.len().cmp(&buf_size()) {
Ordering::Less => {
while bus_id.len() != buf_size() {
bus_id.push(0);
}
}
Ordering::Equal => {
}
Ordering::Greater => {
return Err(NvmlError::StringTooLong {
max_len: buf_size(),
actual_len: bus_id.len(),
})
}
}
bus_id_c.clone_from_slice(&bus_id.into_iter().map(|b| b as c_char).collect::<Vec<_>>());
Ok(nvmlPciInfo_t {
busIdLegacy: [0; NVML_DEVICE_PCI_BUS_ID_BUFFER_V2_SIZE as usize],
domain: self.domain,
bus: self.bus,
device: self.device,
pciDeviceId: self.pci_device_id,
pciSubSystemId: self.pci_sub_system_id.unwrap_or(0),
busId: bus_id_c,
})
}
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct BAR1MemoryInfo {
pub free: u64,
pub total: u64,
pub used: u64,
}
impl From<nvmlBAR1Memory_t> for BAR1MemoryInfo {
fn from(struct_: nvmlBAR1Memory_t) -> Self {
Self {
free: struct_.bar1Free,
total: struct_.bar1Total,
used: struct_.bar1Used,
}
}
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct BridgeChipInfo {
pub fw_version: FirmwareVersion,
pub chip_type: BridgeChip,
}
impl TryFrom<nvmlBridgeChipInfo_t> for BridgeChipInfo {
type Error = NvmlError;
fn try_from(value: nvmlBridgeChipInfo_t) -> Result<Self, Self::Error> {
let fw_version = FirmwareVersion::from(value.fwVersion);
let chip_type = BridgeChip::try_from(value.type_)?;
Ok(Self {
fw_version,
chip_type,
})
}
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct BridgeChipHierarchy {
pub chips_hierarchy: Vec<BridgeChipInfo>,
pub chip_count: u8,
}
impl TryFrom<nvmlBridgeChipHierarchy_t> for BridgeChipHierarchy {
type Error = NvmlError;
fn try_from(value: nvmlBridgeChipHierarchy_t) -> Result<Self, Self::Error> {
let chips_hierarchy = value
.bridgeChipInfo
.iter()
.map(|bci| BridgeChipInfo::try_from(*bci))
.collect::<Result<_, NvmlError>>()?;
Ok(Self {
chips_hierarchy,
chip_count: value.bridgeCount,
})
}
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct ProcessInfo {
pub pid: u32,
pub used_gpu_memory: UsedGpuMemory,
pub gpu_instance_id: Option<u32>,
pub compute_instance_id: Option<u32>,
}
impl From<nvmlProcessInfo_t> for ProcessInfo {
fn from(struct_: nvmlProcessInfo_t) -> Self {
const NO_VALUE: u32 = 0xFFFFFFFF;
let gpu_instance_id = Some(struct_.gpuInstanceId).filter(|id| *id != NO_VALUE);
let compute_instance_id = Some(struct_.computeInstanceId).filter(|id| *id != NO_VALUE);
Self {
pid: struct_.pid,
used_gpu_memory: UsedGpuMemory::from(struct_.usedGpuMemory),
gpu_instance_id,
compute_instance_id,
}
}
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct EccErrorCounts {
pub device_memory: u64,
pub l1_cache: u64,
pub l2_cache: u64,
pub register_file: u64,
}
impl From<nvmlEccErrorCounts_t> for EccErrorCounts {
fn from(struct_: nvmlEccErrorCounts_t) -> Self {
Self {
device_memory: struct_.deviceMemory,
l1_cache: struct_.l1Cache,
l2_cache: struct_.l2Cache,
register_file: struct_.registerFile,
}
}
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct MemoryInfo {
pub free: u64,
pub reserved: u64,
pub total: u64,
pub used: u64,
pub version: u32,
}
impl From<nvmlMemory_v2_t> for MemoryInfo {
fn from(struct_: nvmlMemory_v2_t) -> Self {
Self {
free: struct_.free,
reserved: struct_.reserved,
total: struct_.total,
used: struct_.used,
version: struct_.version,
}
}
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Utilization {
pub gpu: u32,
pub memory: u32,
}
impl From<nvmlUtilization_t> for Utilization {
fn from(struct_: nvmlUtilization_t) -> Self {
Self {
gpu: struct_.gpu,
memory: struct_.memory,
}
}
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct ViolationTime {
pub reference_time: u64,
pub violation_time: u64,
}
impl From<nvmlViolationTime_t> for ViolationTime {
fn from(struct_: nvmlViolationTime_t) -> Self {
Self {
reference_time: struct_.referenceTime,
violation_time: struct_.violationTime,
}
}
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct AccountingStats {
pub gpu_utilization: Option<u32>,
pub is_running: bool,
pub max_memory_usage: Option<u64>,
pub memory_utilization: Option<u32>,
pub start_time: u64,
pub time: u64,
}
impl From<nvmlAccountingStats_t> for AccountingStats {
fn from(struct_: nvmlAccountingStats_t) -> Self {
let not_avail_u64 = (NVML_VALUE_NOT_AVAILABLE) as u64;
let not_avail_u32 = (NVML_VALUE_NOT_AVAILABLE) as u32;
#[allow(clippy::match_like_matches_macro)]
Self {
gpu_utilization: match struct_.gpuUtilization {
v if v == not_avail_u32 => None,
_ => Some(struct_.gpuUtilization),
},
is_running: match struct_.isRunning {
0 => false,
_ => true,
},
max_memory_usage: match struct_.maxMemoryUsage {
v if v == not_avail_u64 => None,
_ => Some(struct_.maxMemoryUsage),
},
memory_utilization: match struct_.memoryUtilization {
v if v == not_avail_u32 => None,
_ => Some(struct_.memoryUtilization),
},
start_time: struct_.startTime,
time: struct_.time,
}
}
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct EncoderSessionInfo {
pub session_id: u32,
pub pid: u32,
pub vgpu_instance: Option<u32>,
pub codec_type: EncoderType,
pub hres: u32,
pub vres: u32,
pub average_fps: u32,
pub average_latency: u32,
}
impl TryFrom<nvmlEncoderSessionInfo_t> for EncoderSessionInfo {
type Error = NvmlError;
fn try_from(value: nvmlEncoderSessionInfo_t) -> Result<Self, Self::Error> {
Ok(Self {
session_id: value.sessionId,
pid: value.pid,
vgpu_instance: match value.vgpuInstance {
0 => None,
other => Some(other),
},
codec_type: EncoderType::try_from(value.codecType)?,
hres: value.hResolution,
vres: value.vResolution,
average_fps: value.averageFps,
average_latency: value.averageLatency,
})
}
}
#[derive(Debug, Clone, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Sample {
pub timestamp: u64,
pub value: SampleValue,
}
impl Sample {
pub fn from_tag_and_struct(tag: &SampleValueType, struct_: nvmlSample_t) -> Self {
Self {
timestamp: struct_.timeStamp,
value: SampleValue::from_tag_and_union(tag, struct_.sampleValue),
}
}
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct ProcessUtilizationSample {
pub pid: u32,
pub timestamp: u64,
pub sm_util: u32,
pub mem_util: u32,
pub enc_util: u32,
pub dec_util: u32,
}
impl From<nvmlProcessUtilizationSample_t> for ProcessUtilizationSample {
fn from(struct_: nvmlProcessUtilizationSample_t) -> Self {
Self {
pid: struct_.pid,
timestamp: struct_.timeStamp,
sm_util: struct_.smUtil,
mem_util: struct_.memUtil,
enc_util: struct_.encUtil,
dec_util: struct_.decUtil,
}
}
}
#[derive(Debug)]
pub struct FieldValueSample {
pub field: FieldId,
pub timestamp: i64,
pub latency: i64,
pub value: Result<SampleValue, NvmlError>,
}
impl TryFrom<nvmlFieldValue_t> for FieldValueSample {
type Error = NvmlError;
fn try_from(value: nvmlFieldValue_t) -> Result<Self, Self::Error> {
Ok(Self {
field: FieldId(value.fieldId),
timestamp: value.timestamp,
latency: value.latencyUsec,
value: match nvml_try(value.nvmlReturn) {
Ok(_) => Ok(SampleValue::from_tag_and_union(
&SampleValueType::try_from(value.valueType)?,
value.value,
)),
Err(e) => Err(e),
},
})
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct FbcStats {
pub sessions_count: u32,
pub average_fps: u32,
pub average_latency: u32,
}
impl From<nvmlFBCStats_t> for FbcStats {
fn from(struct_: nvmlFBCStats_t) -> Self {
Self {
sessions_count: struct_.sessionsCount,
average_fps: struct_.averageFPS,
average_latency: struct_.averageLatency,
}
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct FbcSessionInfo {
pub session_id: u32,
pub pid: u32,
pub vgpu_instance: Option<u32>,
pub display_ordinal: u32,
pub session_type: FbcSessionType,
pub session_flags: FbcFlags,
pub hres_max: u32,
pub vres_max: u32,
pub hres: u32,
pub vres: u32,
pub average_fps: u32,
pub average_latency: u32,
}
impl TryFrom<nvmlFBCSessionInfo_t> for FbcSessionInfo {
type Error = NvmlError;
fn try_from(value: nvmlFBCSessionInfo_t) -> Result<Self, Self::Error> {
Ok(Self {
session_id: value.sessionId,
pid: value.pid,
vgpu_instance: match value.vgpuInstance {
0 => None,
other => Some(other),
},
display_ordinal: value.displayOrdinal,
session_type: FbcSessionType::try_from(value.sessionType)?,
session_flags: FbcFlags::from_bits(value.sessionFlags)
.ok_or(NvmlError::IncorrectBits(Bits::U32(value.sessionFlags)))?,
hres_max: value.hMaxResolution,
vres_max: value.vMaxResolution,
hres: value.hResolution,
vres: value.vResolution,
average_fps: value.averageFPS,
average_latency: value.averageLatency,
})
}
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct DeviceAttributes {
pub multiprocessor_count: u32,
pub shared_copy_engine_count: u32,
pub shared_decoder_count: u32,
pub shared_encoder_count: u32,
pub shared_jpeg_count: u32,
pub shared_ofa_count: u32,
pub gpu_instance_slice_count: u32,
pub compute_instance_slice_count: u32,
pub memory_size_mb: u64,
}
impl From<nvmlDeviceAttributes_t> for DeviceAttributes {
fn from(struct_: nvmlDeviceAttributes_t) -> Self {
Self {
multiprocessor_count: struct_.multiprocessorCount,
shared_copy_engine_count: struct_.sharedCopyEngineCount,
shared_decoder_count: struct_.sharedDecoderCount,
shared_encoder_count: struct_.sharedEncoderCount,
shared_jpeg_count: struct_.sharedJpegCount,
shared_ofa_count: struct_.sharedOfaCount,
gpu_instance_slice_count: struct_.gpuInstanceSliceCount,
compute_instance_slice_count: struct_.computeInstanceSliceCount,
memory_size_mb: struct_.memorySizeMB,
}
}
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct FanSpeedInfo {
pub version: u32,
pub fan: u32,
pub speed: u32,
}
impl From<nvmlFanSpeedInfo_t> for FanSpeedInfo {
fn from(struct_: nvmlFanSpeedInfo_t) -> Self {
Self {
version: struct_.version,
fan: struct_.fan,
speed: struct_.speed,
}
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct ClockOffset {
pub version: u32,
pub clock_type: Clock,
pub state: PerformanceState,
pub clock_offset_mhz: i32,
pub min_clock_offset_mhz: i32,
pub max_clock_offset_mhz: i32,
}
impl TryFrom<nvmlClockOffset_v1_t> for ClockOffset {
type Error = NvmlError;
fn try_from(value: nvmlClockOffset_v1_t) -> Result<Self, Self::Error> {
Ok(Self {
version: value.version,
clock_type: Clock::try_from(value.type_)?,
state: PerformanceState::try_from(value.pstate)?,
clock_offset_mhz: value.clockOffsetMHz,
min_clock_offset_mhz: value.minClockOffsetMHz,
max_clock_offset_mhz: value.maxClockOffsetMHz,
})
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct ProfileInfo {
pub copy_engine_count: u32,
pub decoder_count: u32,
pub encoder_count: u32,
pub id: u32,
pub instance_count: u32,
pub is_p2p_supported: bool,
pub jpeg_count: u32,
pub memory_size_mb: u64,
pub multiprocessor_count: u32,
pub ofa_count: u32,
pub slice_count: u32,
}
impl From<nvmlGpuInstanceProfileInfo_t> for ProfileInfo {
fn from(struct_: nvmlGpuInstanceProfileInfo_t) -> Self {
Self {
copy_engine_count: struct_.copyEngineCount,
decoder_count: struct_.decoderCount,
encoder_count: struct_.encoderCount,
id: struct_.id,
instance_count: struct_.instanceCount,
is_p2p_supported: struct_.isP2pSupported > 0,
jpeg_count: struct_.jpegCount,
memory_size_mb: struct_.memorySizeMB,
multiprocessor_count: struct_.multiprocessorCount,
ofa_count: struct_.ofaCount,
slice_count: struct_.sliceCount,
}
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct GpuInstancePlacement {
pub size: u32,
pub start: u32,
}
impl From<nvmlGpuInstancePlacement_t> for GpuInstancePlacement {
fn from(value: nvmlGpuInstancePlacement_t) -> Self {
Self {
size: value.size,
start: value.start,
}
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct VgpuSchedulerCapabilities {
pub is_arr_mode_supported: bool,
pub max_avg_factor_for_arr: u32,
pub max_freq_for_arr: u32,
pub max_time_slice: u32,
pub min_avg_factor_for_arr: u32,
pub min_freq_for_arr: u32,
pub min_time_slice: u32,
pub supported_schedulers: Vec<u32>,
}
impl From<nvmlVgpuSchedulerCapabilities_t> for VgpuSchedulerCapabilities {
fn from(value: nvmlVgpuSchedulerCapabilities_t) -> Self {
let supported_schedulers = value.supportedSchedulers.to_vec();
Self {
is_arr_mode_supported: value.isArrModeSupported > 0,
max_avg_factor_for_arr: value.maxAvgFactorForARR,
max_freq_for_arr: value.maxFrequencyForARR,
max_time_slice: value.maxTimeslice,
min_avg_factor_for_arr: value.minAvgFactorForARR,
min_freq_for_arr: value.minFrequencyForARR,
min_time_slice: value.minTimeslice,
supported_schedulers,
}
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct VgpuVersion {
pub min: u32,
pub max: u32,
}
impl From<nvmlVgpuVersion_t> for VgpuVersion {
fn from(value: nvmlVgpuVersion_t) -> Self {
Self {
min: value.minVersion,
max: value.maxVersion,
}
}
}
impl VgpuVersion {
pub fn as_c(&self) -> nvmlVgpuVersion_t {
nvmlVgpuVersion_t {
minVersion: self.min,
maxVersion: self.max,
}
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct VgpuSchedulerParams {
pub avg_factor: Option<u32>,
pub timeslice: u32,
}
#[derive(Debug, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct VgpuSchedulerLogEntry {
pub timestamp: u64,
pub time_run_total: u64,
pub time_run: u64,
pub sw_runlist_id: u32,
pub target_time_slice: u64,
pub cumulative_preemption_time: u64,
}
impl From<nvmlVgpuSchedulerLogEntry_t> for VgpuSchedulerLogEntry {
fn from(value: nvmlVgpuSchedulerLogEntry_t) -> Self {
Self {
timestamp: value.timestamp,
time_run_total: value.timeRunTotal,
time_run: value.timeRun,
sw_runlist_id: value.swRunlistId,
target_time_slice: value.targetTimeSlice,
cumulative_preemption_time: value.cumulativePreemptionTime,
}
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct VgpuSchedulerLog {
pub engine_id: u32,
pub scheduler_policy: u32,
pub arr_mode: u32,
pub scheduler_params: VgpuSchedulerParams,
pub entries_count: u32,
pub entries: Vec<VgpuSchedulerLogEntry>,
}
impl From<nvmlVgpuSchedulerLog_t> for VgpuSchedulerLog {
fn from(value: nvmlVgpuSchedulerLog_t) -> Self {
let entries = value
.logEntries
.iter()
.map(|e| VgpuSchedulerLogEntry::from(*e))
.collect::<Vec<_>>();
let params = match value.arrMode {
2 => {
let data = unsafe { value.schedulerParams.vgpuSchedDataWithARR };
VgpuSchedulerParams {
avg_factor: Some(data.avgFactor),
timeslice: data.timeslice,
}
}
_ => {
let data = unsafe { value.schedulerParams.vgpuSchedData };
VgpuSchedulerParams {
avg_factor: None,
timeslice: data.timeslice,
}
}
};
Self {
engine_id: value.engineId,
scheduler_policy: value.schedulerPolicy,
arr_mode: value.arrMode,
scheduler_params: params,
entries_count: entries.len() as u32,
entries,
}
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct VgpuSchedulerGetState {
pub arr_mode: u32,
pub scheduler_policy: u32,
}
impl From<nvmlVgpuSchedulerGetState_t> for VgpuSchedulerGetState {
fn from(value: nvmlVgpuSchedulerGetState_t) -> Self {
Self {
arr_mode: value.arrMode,
scheduler_policy: value.schedulerPolicy,
}
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct VgpuSchedulerSetParams {
pub avg_factor: Option<u32>,
pub frequency_or_timeslice: u32,
}
impl VgpuSchedulerSetParams {
pub fn as_c(&self) -> nvmlVgpuSchedulerSetParams_t {
match self.avg_factor {
Some(a) => nvmlVgpuSchedulerSetParams_t {
vgpuSchedDataWithARR: nvmlVgpuSchedulerSetParams_t__bindgen_ty_1 {
avgFactor: a,
frequency: self.frequency_or_timeslice,
},
},
_ => nvmlVgpuSchedulerSetParams_t {
vgpuSchedData: nvmlVgpuSchedulerSetParams_t__bindgen_ty_2 {
timeslice: self.frequency_or_timeslice,
},
},
}
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct VgpuSchedulerSetState {
pub scheduler_policy: u32,
pub enable_arr_mode: u32,
pub scheduler_params: VgpuSchedulerSetParams,
}
impl VgpuSchedulerSetState {
pub fn as_c(&self) -> nvmlVgpuSchedulerSetState_t {
nvmlVgpuSchedulerSetState_t {
enableARRMode: self.enable_arr_mode,
schedulerPolicy: self.scheduler_policy,
schedulerParams: self.scheduler_params.as_c(),
}
}
}
#[cfg(test)]
#[allow(unused_variables, unused_imports)]
mod tests {
use crate::error::*;
use crate::ffi::bindings::*;
use crate::test_utils::*;
use std::convert::TryInto;
use std::mem;
#[test]
fn pci_info_from_to_c() {
let nvml = nvml();
test_with_device(3, &nvml, |device| {
let converted: nvmlPciInfo_t = device
.pci_info()
.expect("wrapped pci info")
.try_into()
.expect("converted c pci info");
let sym = nvml_sym(nvml.lib.nvmlDeviceGetPciInfo_v3.as_ref())?;
let raw = unsafe {
let mut pci_info: nvmlPciInfo_t = mem::zeroed();
nvml_try(sym(device.handle(), &mut pci_info)).expect("raw pci info");
pci_info
};
assert_eq!(converted.busId, raw.busId);
assert_eq!(converted.domain, raw.domain);
assert_eq!(converted.bus, raw.bus);
assert_eq!(converted.device, raw.device);
assert_eq!(converted.pciDeviceId, raw.pciDeviceId);
assert_eq!(converted.pciSubSystemId, raw.pciSubSystemId);
Ok(())
})
}
}