use crate::ml;
use crate::common;
use serde::{Deserialize, Serialize};
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct ModelProfile { #[serde(rename = "predictor", default, skip_serializing_if = "Option::is_none")]
pub predictor: Option<String>,
#[serde(rename = "train", default, skip_serializing_if = "Option::is_none")]
pub train: Option<ml::ModelStats>, #[serde(rename = "model_state", default, skip_serializing_if = "Option::is_none")]
pub model_state: Option<String>,
#[serde(rename = "undeploy", default, skip_serializing_if = "Option::is_none")]
pub undeploy: Option<ml::ModelStats>,
#[serde(rename = "register", default, skip_serializing_if = "Option::is_none")]
pub register: Option<ml::ModelStats>,
#[serde(rename = "predict", default, skip_serializing_if = "Option::is_none")]
pub predict: Option<ml::ModelStats>, #[serde(rename = "memory_size_estimation_cpu", default, skip_serializing_if = "Option::is_none")]
pub memory_size_estimation_cpu: Option<u32>,
#[serde(rename = "deploy", default, skip_serializing_if = "Option::is_none")]
pub deploy: Option<ml::ModelStats>,
#[serde(rename = "target_worker_nodes", default, skip_serializing_if = "Option::is_none")]
pub target_worker_nodes: Option<Vec<common::NodeIds>>,
#[serde(rename = "train_predict", default, skip_serializing_if = "Option::is_none")]
pub train_predict: Option<ml::ModelStats>,
#[serde(rename = "predict_request_stats", default, skip_serializing_if = "Option::is_none")]
pub predict_request_stats: Option<ml::PredictRequestStats>,
#[serde(rename = "execute", default, skip_serializing_if = "Option::is_none")]
pub execute: Option<ml::ModelStats>,
#[serde(rename = "worker_nodes", default, skip_serializing_if = "Option::is_none")]
pub worker_nodes: Option<Vec<common::NodeIds>>, #[serde(rename = "memory_size_estimation_gpu", default, skip_serializing_if = "Option::is_none")]
pub memory_size_estimation_gpu: Option<u32>,
}
impl ModelProfile {
pub fn new() -> ModelProfile {
ModelProfile {
predictor: None,
train: None,
model_state: None,
undeploy: None,
register: None,
predict: None,
memory_size_estimation_cpu: None,
deploy: None,
target_worker_nodes: None,
train_predict: None,
predict_request_stats: None,
execute: None,
worker_nodes: None,
memory_size_estimation_gpu: None,
}
}
}