#[non_exhaustive]pub struct DescribeTrainingJobOutput {Show 40 fields
pub training_job_name: Option<String>,
pub training_job_arn: Option<String>,
pub tuning_job_arn: Option<String>,
pub labeling_job_arn: Option<String>,
pub auto_ml_job_arn: Option<String>,
pub model_artifacts: Option<ModelArtifacts>,
pub training_job_status: Option<TrainingJobStatus>,
pub secondary_status: Option<SecondaryStatus>,
pub failure_reason: Option<String>,
pub hyper_parameters: Option<HashMap<String, String>>,
pub algorithm_specification: Option<AlgorithmSpecification>,
pub role_arn: Option<String>,
pub input_data_config: Option<Vec<Channel>>,
pub output_data_config: Option<OutputDataConfig>,
pub resource_config: Option<ResourceConfig>,
pub vpc_config: Option<VpcConfig>,
pub stopping_condition: Option<StoppingCondition>,
pub creation_time: Option<DateTime>,
pub training_start_time: Option<DateTime>,
pub training_end_time: Option<DateTime>,
pub last_modified_time: Option<DateTime>,
pub secondary_status_transitions: Option<Vec<SecondaryStatusTransition>>,
pub final_metric_data_list: Option<Vec<MetricData>>,
pub enable_network_isolation: bool,
pub enable_inter_container_traffic_encryption: bool,
pub enable_managed_spot_training: bool,
pub checkpoint_config: Option<CheckpointConfig>,
pub training_time_in_seconds: Option<i32>,
pub billable_time_in_seconds: Option<i32>,
pub debug_hook_config: Option<DebugHookConfig>,
pub experiment_config: Option<ExperimentConfig>,
pub debug_rule_configurations: Option<Vec<DebugRuleConfiguration>>,
pub tensor_board_output_config: Option<TensorBoardOutputConfig>,
pub debug_rule_evaluation_statuses: Option<Vec<DebugRuleEvaluationStatus>>,
pub profiler_config: Option<ProfilerConfig>,
pub profiler_rule_configurations: Option<Vec<ProfilerRuleConfiguration>>,
pub profiler_rule_evaluation_statuses: Option<Vec<ProfilerRuleEvaluationStatus>>,
pub profiling_status: Option<ProfilingStatus>,
pub retry_strategy: Option<RetryStrategy>,
pub environment: Option<HashMap<String, String>>,
}
Fields (Non-exhaustive)
This struct is marked as non-exhaustive
Struct { .. }
syntax; cannot be matched against without a wildcard ..
; and struct update syntax will not work.training_job_name: Option<String>
Name of the model training job.
training_job_arn: Option<String>
The Amazon Resource Name (ARN) of the training job.
tuning_job_arn: Option<String>
The Amazon Resource Name (ARN) of the associated hyperparameter tuning job if the training job was launched by a hyperparameter tuning job.
labeling_job_arn: Option<String>
The Amazon Resource Name (ARN) of the Amazon SageMaker Ground Truth labeling job that created the transform or training job.
auto_ml_job_arn: Option<String>
The Amazon Resource Name (ARN) of an AutoML job.
model_artifacts: Option<ModelArtifacts>
Information about the Amazon S3 location that is configured for storing model artifacts.
training_job_status: Option<TrainingJobStatus>
The status of the training job.
Amazon SageMaker provides the following training job statuses:
-
InProgress
- The training is in progress. -
Completed
- The training job has completed. -
Failed
- The training job has failed. To see the reason for the failure, see theFailureReason
field in the response to aDescribeTrainingJobResponse
call. -
Stopping
- The training job is stopping. -
Stopped
- The training job has stopped.
For more detailed information, see SecondaryStatus
.
secondary_status: Option<SecondaryStatus>
Provides detailed information about the state of the training job. For detailed information on the secondary status of the training job, see StatusMessage
under SecondaryStatusTransition
.
Amazon SageMaker provides primary statuses and secondary statuses that apply to each of them:
- InProgress
-
-
Starting
- Starting the training job. -
Downloading
- An optional stage for algorithms that supportFile
training input mode. It indicates that data is being downloaded to the ML storage volumes. -
Training
- Training is in progress. -
Interrupted
- The job stopped because the managed spot training instances were interrupted. -
Uploading
- Training is complete and the model artifacts are being uploaded to the S3 location.
-
- Completed
-
-
Completed
- The training job has completed.
-
- Failed
-
-
Failed
- The training job has failed. The reason for the failure is returned in theFailureReason
field ofDescribeTrainingJobResponse
.
-
- Stopped
-
-
MaxRuntimeExceeded
- The job stopped because it exceeded the maximum allowed runtime. -
MaxWaitTimeExceeded
- The job stopped because it exceeded the maximum allowed wait time. -
Stopped
- The training job has stopped.
-
- Stopping
-
-
Stopping
- Stopping the training job.
-
Valid values for SecondaryStatus
are subject to change.
We no longer support the following secondary statuses:
-
LaunchingMLInstances
-
PreparingTraining
-
DownloadingTrainingImage
failure_reason: Option<String>
If the training job failed, the reason it failed.
hyper_parameters: Option<HashMap<String, String>>
Algorithm-specific parameters.
algorithm_specification: Option<AlgorithmSpecification>
Information about the algorithm used for training, and algorithm metadata.
role_arn: Option<String>
The Amazon Web Services Identity and Access Management (IAM) role configured for the training job.
input_data_config: Option<Vec<Channel>>
An array of Channel
objects that describes each data input channel.
output_data_config: Option<OutputDataConfig>
The S3 path where model artifacts that you configured when creating the job are stored. Amazon SageMaker creates subfolders for model artifacts.
resource_config: Option<ResourceConfig>
Resources, including ML compute instances and ML storage volumes, that are configured for model training.
vpc_config: Option<VpcConfig>
A VpcConfig
object that specifies the VPC that this training job has access to. For more information, see Protect Training Jobs by Using an Amazon Virtual Private Cloud.
stopping_condition: Option<StoppingCondition>
Specifies a limit to how long a model training job can run. It also specifies how long a managed Spot training job has to complete. When the job reaches the time limit, Amazon SageMaker ends the training job. Use this API to cap model training costs.
To stop a job, Amazon SageMaker sends the algorithm the SIGTERM
signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.
creation_time: Option<DateTime>
A timestamp that indicates when the training job was created.
training_start_time: Option<DateTime>
Indicates the time when the training job starts on training instances. You are billed for the time interval between this time and the value of TrainingEndTime
. The start time in CloudWatch Logs might be later than this time. The difference is due to the time it takes to download the training data and to the size of the training container.
training_end_time: Option<DateTime>
Indicates the time when the training job ends on training instances. You are billed for the time interval between the value of TrainingStartTime
and this time. For successful jobs and stopped jobs, this is the time after model artifacts are uploaded. For failed jobs, this is the time when Amazon SageMaker detects a job failure.
last_modified_time: Option<DateTime>
A timestamp that indicates when the status of the training job was last modified.
secondary_status_transitions: Option<Vec<SecondaryStatusTransition>>
A history of all of the secondary statuses that the training job has transitioned through.
final_metric_data_list: Option<Vec<MetricData>>
A collection of MetricData
objects that specify the names, values, and dates and times that the training algorithm emitted to Amazon CloudWatch.
enable_network_isolation: bool
If you want to allow inbound or outbound network calls, except for calls between peers within a training cluster for distributed training, choose True
. If you enable network isolation for training jobs that are configured to use a VPC, Amazon SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access.
enable_inter_container_traffic_encryption: bool
To encrypt all communications between ML compute instances in distributed training, choose True
. Encryption provides greater security for distributed training, but training might take longer. How long it takes depends on the amount of communication between compute instances, especially if you use a deep learning algorithms in distributed training.
enable_managed_spot_training: bool
A Boolean indicating whether managed spot training is enabled (True
) or not (False
).
checkpoint_config: Option<CheckpointConfig>
Contains information about the output location for managed spot training checkpoint data.
training_time_in_seconds: Option<i32>
The training time in seconds.
billable_time_in_seconds: Option<i32>
The billable time in seconds. Billable time refers to the absolute wall-clock time.
Multiply BillableTimeInSeconds
by the number of instances (InstanceCount
) in your training cluster to get the total compute time SageMaker will bill you if you run distributed training. The formula is as follows: BillableTimeInSeconds * InstanceCount
.
You can calculate the savings from using managed spot training using the formula (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100
. For example, if BillableTimeInSeconds
is 100 and TrainingTimeInSeconds
is 500, the savings is 80%.
debug_hook_config: Option<DebugHookConfig>
Configuration information for the Debugger hook parameters, metric and tensor collections, and storage paths. To learn more about how to configure the DebugHookConfig
parameter, see Use the SageMaker and Debugger Configuration API Operations to Create, Update, and Debug Your Training Job.
experiment_config: Option<ExperimentConfig>
Associates a SageMaker job as a trial component with an experiment and trial. Specified when you call the following APIs:
-
CreateProcessingJob
-
CreateTrainingJob
-
CreateTransformJob
debug_rule_configurations: Option<Vec<DebugRuleConfiguration>>
Configuration information for Debugger rules for debugging output tensors.
tensor_board_output_config: Option<TensorBoardOutputConfig>
Configuration of storage locations for the Debugger TensorBoard output data.
debug_rule_evaluation_statuses: Option<Vec<DebugRuleEvaluationStatus>>
Evaluation status of Debugger rules for debugging on a training job.
profiler_config: Option<ProfilerConfig>
Configuration information for Debugger system monitoring, framework profiling, and storage paths.
profiler_rule_configurations: Option<Vec<ProfilerRuleConfiguration>>
Configuration information for Debugger rules for profiling system and framework metrics.
profiler_rule_evaluation_statuses: Option<Vec<ProfilerRuleEvaluationStatus>>
Evaluation status of Debugger rules for profiling on a training job.
profiling_status: Option<ProfilingStatus>
Profiling status of a training job.
retry_strategy: Option<RetryStrategy>
The number of times to retry the job when the job fails due to an InternalServerError
.
environment: Option<HashMap<String, String>>
The environment variables to set in the Docker container.
Implementations
sourceimpl DescribeTrainingJobOutput
impl DescribeTrainingJobOutput
sourcepub fn training_job_name(&self) -> Option<&str>
pub fn training_job_name(&self) -> Option<&str>
Name of the model training job.
sourcepub fn training_job_arn(&self) -> Option<&str>
pub fn training_job_arn(&self) -> Option<&str>
The Amazon Resource Name (ARN) of the training job.
sourcepub fn tuning_job_arn(&self) -> Option<&str>
pub fn tuning_job_arn(&self) -> Option<&str>
The Amazon Resource Name (ARN) of the associated hyperparameter tuning job if the training job was launched by a hyperparameter tuning job.
sourcepub fn labeling_job_arn(&self) -> Option<&str>
pub fn labeling_job_arn(&self) -> Option<&str>
The Amazon Resource Name (ARN) of the Amazon SageMaker Ground Truth labeling job that created the transform or training job.
sourcepub fn auto_ml_job_arn(&self) -> Option<&str>
pub fn auto_ml_job_arn(&self) -> Option<&str>
The Amazon Resource Name (ARN) of an AutoML job.
sourcepub fn model_artifacts(&self) -> Option<&ModelArtifacts>
pub fn model_artifacts(&self) -> Option<&ModelArtifacts>
Information about the Amazon S3 location that is configured for storing model artifacts.
sourcepub fn training_job_status(&self) -> Option<&TrainingJobStatus>
pub fn training_job_status(&self) -> Option<&TrainingJobStatus>
The status of the training job.
Amazon SageMaker provides the following training job statuses:
-
InProgress
- The training is in progress. -
Completed
- The training job has completed. -
Failed
- The training job has failed. To see the reason for the failure, see theFailureReason
field in the response to aDescribeTrainingJobResponse
call. -
Stopping
- The training job is stopping. -
Stopped
- The training job has stopped.
For more detailed information, see SecondaryStatus
.
sourcepub fn secondary_status(&self) -> Option<&SecondaryStatus>
pub fn secondary_status(&self) -> Option<&SecondaryStatus>
Provides detailed information about the state of the training job. For detailed information on the secondary status of the training job, see StatusMessage
under SecondaryStatusTransition
.
Amazon SageMaker provides primary statuses and secondary statuses that apply to each of them:
- InProgress
-
-
Starting
- Starting the training job. -
Downloading
- An optional stage for algorithms that supportFile
training input mode. It indicates that data is being downloaded to the ML storage volumes. -
Training
- Training is in progress. -
Interrupted
- The job stopped because the managed spot training instances were interrupted. -
Uploading
- Training is complete and the model artifacts are being uploaded to the S3 location.
-
- Completed
-
-
Completed
- The training job has completed.
-
- Failed
-
-
Failed
- The training job has failed. The reason for the failure is returned in theFailureReason
field ofDescribeTrainingJobResponse
.
-
- Stopped
-
-
MaxRuntimeExceeded
- The job stopped because it exceeded the maximum allowed runtime. -
MaxWaitTimeExceeded
- The job stopped because it exceeded the maximum allowed wait time. -
Stopped
- The training job has stopped.
-
- Stopping
-
-
Stopping
- Stopping the training job.
-
Valid values for SecondaryStatus
are subject to change.
We no longer support the following secondary statuses:
-
LaunchingMLInstances
-
PreparingTraining
-
DownloadingTrainingImage
sourcepub fn failure_reason(&self) -> Option<&str>
pub fn failure_reason(&self) -> Option<&str>
If the training job failed, the reason it failed.
sourcepub fn hyper_parameters(&self) -> Option<&HashMap<String, String>>
pub fn hyper_parameters(&self) -> Option<&HashMap<String, String>>
Algorithm-specific parameters.
sourcepub fn algorithm_specification(&self) -> Option<&AlgorithmSpecification>
pub fn algorithm_specification(&self) -> Option<&AlgorithmSpecification>
Information about the algorithm used for training, and algorithm metadata.
sourcepub fn role_arn(&self) -> Option<&str>
pub fn role_arn(&self) -> Option<&str>
The Amazon Web Services Identity and Access Management (IAM) role configured for the training job.
sourcepub fn input_data_config(&self) -> Option<&[Channel]>
pub fn input_data_config(&self) -> Option<&[Channel]>
An array of Channel
objects that describes each data input channel.
sourcepub fn output_data_config(&self) -> Option<&OutputDataConfig>
pub fn output_data_config(&self) -> Option<&OutputDataConfig>
The S3 path where model artifacts that you configured when creating the job are stored. Amazon SageMaker creates subfolders for model artifacts.
sourcepub fn resource_config(&self) -> Option<&ResourceConfig>
pub fn resource_config(&self) -> Option<&ResourceConfig>
Resources, including ML compute instances and ML storage volumes, that are configured for model training.
sourcepub fn vpc_config(&self) -> Option<&VpcConfig>
pub fn vpc_config(&self) -> Option<&VpcConfig>
A VpcConfig
object that specifies the VPC that this training job has access to. For more information, see Protect Training Jobs by Using an Amazon Virtual Private Cloud.
sourcepub fn stopping_condition(&self) -> Option<&StoppingCondition>
pub fn stopping_condition(&self) -> Option<&StoppingCondition>
Specifies a limit to how long a model training job can run. It also specifies how long a managed Spot training job has to complete. When the job reaches the time limit, Amazon SageMaker ends the training job. Use this API to cap model training costs.
To stop a job, Amazon SageMaker sends the algorithm the SIGTERM
signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.
sourcepub fn creation_time(&self) -> Option<&DateTime>
pub fn creation_time(&self) -> Option<&DateTime>
A timestamp that indicates when the training job was created.
sourcepub fn training_start_time(&self) -> Option<&DateTime>
pub fn training_start_time(&self) -> Option<&DateTime>
Indicates the time when the training job starts on training instances. You are billed for the time interval between this time and the value of TrainingEndTime
. The start time in CloudWatch Logs might be later than this time. The difference is due to the time it takes to download the training data and to the size of the training container.
sourcepub fn training_end_time(&self) -> Option<&DateTime>
pub fn training_end_time(&self) -> Option<&DateTime>
Indicates the time when the training job ends on training instances. You are billed for the time interval between the value of TrainingStartTime
and this time. For successful jobs and stopped jobs, this is the time after model artifacts are uploaded. For failed jobs, this is the time when Amazon SageMaker detects a job failure.
sourcepub fn last_modified_time(&self) -> Option<&DateTime>
pub fn last_modified_time(&self) -> Option<&DateTime>
A timestamp that indicates when the status of the training job was last modified.
sourcepub fn secondary_status_transitions(
&self
) -> Option<&[SecondaryStatusTransition]>
pub fn secondary_status_transitions(
&self
) -> Option<&[SecondaryStatusTransition]>
A history of all of the secondary statuses that the training job has transitioned through.
sourcepub fn final_metric_data_list(&self) -> Option<&[MetricData]>
pub fn final_metric_data_list(&self) -> Option<&[MetricData]>
A collection of MetricData
objects that specify the names, values, and dates and times that the training algorithm emitted to Amazon CloudWatch.
sourcepub fn enable_network_isolation(&self) -> bool
pub fn enable_network_isolation(&self) -> bool
If you want to allow inbound or outbound network calls, except for calls between peers within a training cluster for distributed training, choose True
. If you enable network isolation for training jobs that are configured to use a VPC, Amazon SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access.
sourcepub fn enable_inter_container_traffic_encryption(&self) -> bool
pub fn enable_inter_container_traffic_encryption(&self) -> bool
To encrypt all communications between ML compute instances in distributed training, choose True
. Encryption provides greater security for distributed training, but training might take longer. How long it takes depends on the amount of communication between compute instances, especially if you use a deep learning algorithms in distributed training.
sourcepub fn enable_managed_spot_training(&self) -> bool
pub fn enable_managed_spot_training(&self) -> bool
A Boolean indicating whether managed spot training is enabled (True
) or not (False
).
sourcepub fn checkpoint_config(&self) -> Option<&CheckpointConfig>
pub fn checkpoint_config(&self) -> Option<&CheckpointConfig>
Contains information about the output location for managed spot training checkpoint data.
sourcepub fn training_time_in_seconds(&self) -> Option<i32>
pub fn training_time_in_seconds(&self) -> Option<i32>
The training time in seconds.
sourcepub fn billable_time_in_seconds(&self) -> Option<i32>
pub fn billable_time_in_seconds(&self) -> Option<i32>
The billable time in seconds. Billable time refers to the absolute wall-clock time.
Multiply BillableTimeInSeconds
by the number of instances (InstanceCount
) in your training cluster to get the total compute time SageMaker will bill you if you run distributed training. The formula is as follows: BillableTimeInSeconds * InstanceCount
.
You can calculate the savings from using managed spot training using the formula (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100
. For example, if BillableTimeInSeconds
is 100 and TrainingTimeInSeconds
is 500, the savings is 80%.
sourcepub fn debug_hook_config(&self) -> Option<&DebugHookConfig>
pub fn debug_hook_config(&self) -> Option<&DebugHookConfig>
Configuration information for the Debugger hook parameters, metric and tensor collections, and storage paths. To learn more about how to configure the DebugHookConfig
parameter, see Use the SageMaker and Debugger Configuration API Operations to Create, Update, and Debug Your Training Job.
sourcepub fn experiment_config(&self) -> Option<&ExperimentConfig>
pub fn experiment_config(&self) -> Option<&ExperimentConfig>
Associates a SageMaker job as a trial component with an experiment and trial. Specified when you call the following APIs:
-
CreateProcessingJob
-
CreateTrainingJob
-
CreateTransformJob
sourcepub fn debug_rule_configurations(&self) -> Option<&[DebugRuleConfiguration]>
pub fn debug_rule_configurations(&self) -> Option<&[DebugRuleConfiguration]>
Configuration information for Debugger rules for debugging output tensors.
sourcepub fn tensor_board_output_config(&self) -> Option<&TensorBoardOutputConfig>
pub fn tensor_board_output_config(&self) -> Option<&TensorBoardOutputConfig>
Configuration of storage locations for the Debugger TensorBoard output data.
sourcepub fn debug_rule_evaluation_statuses(
&self
) -> Option<&[DebugRuleEvaluationStatus]>
pub fn debug_rule_evaluation_statuses(
&self
) -> Option<&[DebugRuleEvaluationStatus]>
Evaluation status of Debugger rules for debugging on a training job.
sourcepub fn profiler_config(&self) -> Option<&ProfilerConfig>
pub fn profiler_config(&self) -> Option<&ProfilerConfig>
Configuration information for Debugger system monitoring, framework profiling, and storage paths.
sourcepub fn profiler_rule_configurations(
&self
) -> Option<&[ProfilerRuleConfiguration]>
pub fn profiler_rule_configurations(
&self
) -> Option<&[ProfilerRuleConfiguration]>
Configuration information for Debugger rules for profiling system and framework metrics.
sourcepub fn profiler_rule_evaluation_statuses(
&self
) -> Option<&[ProfilerRuleEvaluationStatus]>
pub fn profiler_rule_evaluation_statuses(
&self
) -> Option<&[ProfilerRuleEvaluationStatus]>
Evaluation status of Debugger rules for profiling on a training job.
sourcepub fn profiling_status(&self) -> Option<&ProfilingStatus>
pub fn profiling_status(&self) -> Option<&ProfilingStatus>
Profiling status of a training job.
sourcepub fn retry_strategy(&self) -> Option<&RetryStrategy>
pub fn retry_strategy(&self) -> Option<&RetryStrategy>
The number of times to retry the job when the job fails due to an InternalServerError
.
sourceimpl DescribeTrainingJobOutput
impl DescribeTrainingJobOutput
sourcepub fn builder() -> Builder
pub fn builder() -> Builder
Creates a new builder-style object to manufacture DescribeTrainingJobOutput
Trait Implementations
sourceimpl Clone for DescribeTrainingJobOutput
impl Clone for DescribeTrainingJobOutput
sourcefn clone(&self) -> DescribeTrainingJobOutput
fn clone(&self) -> DescribeTrainingJobOutput
Returns a copy of the value. Read more
1.0.0 · sourcefn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
Performs copy-assignment from source
. Read more
sourceimpl Debug for DescribeTrainingJobOutput
impl Debug for DescribeTrainingJobOutput
sourceimpl PartialEq<DescribeTrainingJobOutput> for DescribeTrainingJobOutput
impl PartialEq<DescribeTrainingJobOutput> for DescribeTrainingJobOutput
sourcefn eq(&self, other: &DescribeTrainingJobOutput) -> bool
fn eq(&self, other: &DescribeTrainingJobOutput) -> bool
This method tests for self
and other
values to be equal, and is used
by ==
. Read more
sourcefn ne(&self, other: &DescribeTrainingJobOutput) -> bool
fn ne(&self, other: &DescribeTrainingJobOutput) -> bool
This method tests for !=
.
impl StructuralPartialEq for DescribeTrainingJobOutput
Auto Trait Implementations
impl RefUnwindSafe for DescribeTrainingJobOutput
impl Send for DescribeTrainingJobOutput
impl Sync for DescribeTrainingJobOutput
impl Unpin for DescribeTrainingJobOutput
impl UnwindSafe for DescribeTrainingJobOutput
Blanket Implementations
sourceimpl<T> BorrowMut<T> for T where
T: ?Sized,
impl<T> BorrowMut<T> for T where
T: ?Sized,
const: unstable · sourcepub fn borrow_mut(&mut self) -> &mut T
pub fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
sourceimpl<T> Instrument for T
impl<T> Instrument for T
sourcefn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
sourcefn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
sourceimpl<T> ToOwned for T where
T: Clone,
impl<T> ToOwned for T where
T: Clone,
type Owned = T
type Owned = T
The resulting type after obtaining ownership.
sourcepub fn to_owned(&self) -> T
pub fn to_owned(&self) -> T
Creates owned data from borrowed data, usually by cloning. Read more
sourcepub fn clone_into(&self, target: &mut T)
pub fn clone_into(&self, target: &mut T)
toowned_clone_into
)Uses borrowed data to replace owned data, usually by cloning. Read more
sourceimpl<T> WithSubscriber for T
impl<T> WithSubscriber for T
sourcefn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self> where
S: Into<Dispatch>,
fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self> where
S: Into<Dispatch>,
Attaches the provided Subscriber
to this type, returning a
WithDispatch
wrapper. Read more
sourcefn with_current_subscriber(self) -> WithDispatch<Self>
fn with_current_subscriber(self) -> WithDispatch<Self>
Attaches the current default Subscriber
to this type, returning a
WithDispatch
wrapper. Read more