pub struct CreateRunRequest {Show 17 fields
pub additional_instructions: Option<String>,
pub additional_messages: Option<Vec<CreateMessageRequest>>,
pub assistant_id: String,
pub instructions: Option<String>,
pub max_completion_tokens: Option<i32>,
pub max_prompt_tokens: Option<i32>,
pub metadata: Option<Metadata>,
pub model: Option<Value>,
pub parallel_tool_calls: Option<ParallelToolCalls>,
pub reasoning_effort: Option<ReasoningEffort>,
pub response_format: Option<AssistantsApiResponseFormatOption>,
pub stream: Option<bool>,
pub temperature: Option<f32>,
pub tool_choice: Option<CreateRunRequestToolChoice>,
pub tools: Option<Vec<Value>>,
pub top_p: Option<f32>,
pub truncation_strategy: Option<CreateRunRequestToolChoice>,
}
Fields§
§additional_instructions: Option<String>
Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions.
additional_messages: Option<Vec<CreateMessageRequest>>
Adds additional messages to the thread before creating the run.
assistant_id: String
The ID of the assistant to use to execute this run.
instructions: Option<String>
Overrides the instructions of the assistant. This is useful for modifying the behavior on a per-run basis.
max_completion_tokens: Option<i32>
The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status incomplete
. See incomplete_details
for more info.
max_prompt_tokens: Option<i32>
The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status incomplete
. See incomplete_details
for more info.
metadata: Option<Metadata>
§model: Option<Value>
The ID of the Model to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.
parallel_tool_calls: Option<ParallelToolCalls>
§reasoning_effort: Option<ReasoningEffort>
§response_format: Option<AssistantsApiResponseFormatOption>
§stream: Option<bool>
If true
, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a data: [DONE]
message.
temperature: Option<f32>
What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
tool_choice: Option<CreateRunRequestToolChoice>
§tools: Option<Vec<Value>>
Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis.
top_p: Option<f32>
An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both.
truncation_strategy: Option<CreateRunRequestToolChoice>