pub struct VsCodeCopilotProvider { /* private fields */ }Expand description
VSCode Copilot LLM provider (proxy-based).
Connects to copilot-api proxy for GitHub Copilot access.
Implementations§
Source§impl VsCodeCopilotProvider
impl VsCodeCopilotProvider
Sourcepub fn new() -> VsCodeCopilotProviderBuilder
pub fn new() -> VsCodeCopilotProviderBuilder
Create a new provider builder with default settings.
Sourcepub fn with_proxy(proxy_url: impl Into<String>) -> VsCodeCopilotProviderBuilder
pub fn with_proxy(proxy_url: impl Into<String>) -> VsCodeCopilotProviderBuilder
Create a provider builder with custom proxy URL.
Sourcepub fn get_client(&self) -> &VsCodeCopilotClient
pub fn get_client(&self) -> &VsCodeCopilotClient
Get a reference to the HTTP client for advanced operations.
Sourcepub async fn list_models(&self) -> Result<ModelsResponse>
pub async fn list_models(&self) -> Result<ModelsResponse>
List available models from the Copilot API.
§OODA-79: Dynamic Model Discovery
Delegates to the underlying client to fetch available models. Returns models that are available for the authenticated user.
Trait Implementations§
Source§impl Clone for VsCodeCopilotProvider
impl Clone for VsCodeCopilotProvider
Source§fn clone(&self) -> VsCodeCopilotProvider
fn clone(&self) -> VsCodeCopilotProvider
Returns a duplicate of the value. Read more
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
Performs copy-assignment from
source. Read moreSource§impl Default for VsCodeCopilotProvider
impl Default for VsCodeCopilotProvider
Source§impl EmbeddingProvider for VsCodeCopilotProvider
impl EmbeddingProvider for VsCodeCopilotProvider
Source§fn max_tokens(&self) -> usize
fn max_tokens(&self) -> usize
Get the maximum number of tokens per input.
Source§impl LLMProvider for VsCodeCopilotProvider
impl LLMProvider for VsCodeCopilotProvider
Source§fn supports_tool_streaming(&self) -> bool
fn supports_tool_streaming(&self) -> bool
OODA-05: Enable streaming with tool calls for real-time token display.
Source§fn chat_with_tools_stream<'life0, 'life1, 'life2, 'life3, 'async_trait>(
&'life0 self,
messages: &'life1 [ChatMessage],
tools: &'life2 [ToolDefinition],
tool_choice: Option<ToolChoice>,
options: Option<&'life3 CompletionOptions>,
) -> Pin<Box<dyn Future<Output = LlmResult<BoxStream<'static, LlmResult<StreamChunk>>>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
'life3: 'async_trait,
fn chat_with_tools_stream<'life0, 'life1, 'life2, 'life3, 'async_trait>(
&'life0 self,
messages: &'life1 [ChatMessage],
tools: &'life2 [ToolDefinition],
tool_choice: Option<ToolChoice>,
options: Option<&'life3 CompletionOptions>,
) -> Pin<Box<dyn Future<Output = LlmResult<BoxStream<'static, LlmResult<StreamChunk>>>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
'life3: 'async_trait,
Stream LLM response with tool calls (OODA-05).
Returns a stream of StreamChunk events for real-time:
- Content display
- Tool call progress
- Token counting and rate display
This enables the React agent to use StreamingProgress instead of
SpinnerGuard, providing ⚡ N tokens (M t/s) display.
Source§fn max_context_length(&self) -> usize
fn max_context_length(&self) -> usize
Get the maximum context length for the model.
Source§fn complete<'life0, 'life1, 'async_trait>(
&'life0 self,
prompt: &'life1 str,
) -> Pin<Box<dyn Future<Output = LlmResult<LLMResponse>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
fn complete<'life0, 'life1, 'async_trait>(
&'life0 self,
prompt: &'life1 str,
) -> Pin<Box<dyn Future<Output = LlmResult<LLMResponse>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
Generate a completion for the given prompt.
Source§fn complete_with_options<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
prompt: &'life1 str,
options: &'life2 CompletionOptions,
) -> Pin<Box<dyn Future<Output = LlmResult<LLMResponse>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
fn complete_with_options<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
prompt: &'life1 str,
options: &'life2 CompletionOptions,
) -> Pin<Box<dyn Future<Output = LlmResult<LLMResponse>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
Generate a completion with custom options.
Source§fn chat<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
messages: &'life1 [ChatMessage],
options: Option<&'life2 CompletionOptions>,
) -> Pin<Box<dyn Future<Output = LlmResult<LLMResponse>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
fn chat<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
messages: &'life1 [ChatMessage],
options: Option<&'life2 CompletionOptions>,
) -> Pin<Box<dyn Future<Output = LlmResult<LLMResponse>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
Generate a chat completion with messages.
Source§fn chat_with_tools<'life0, 'life1, 'life2, 'life3, 'async_trait>(
&'life0 self,
messages: &'life1 [ChatMessage],
tools: &'life2 [ToolDefinition],
tool_choice: Option<ToolChoice>,
options: Option<&'life3 CompletionOptions>,
) -> Pin<Box<dyn Future<Output = LlmResult<LLMResponse>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
'life3: 'async_trait,
fn chat_with_tools<'life0, 'life1, 'life2, 'life3, 'async_trait>(
&'life0 self,
messages: &'life1 [ChatMessage],
tools: &'life2 [ToolDefinition],
tool_choice: Option<ToolChoice>,
options: Option<&'life3 CompletionOptions>,
) -> Pin<Box<dyn Future<Output = LlmResult<LLMResponse>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
'life3: 'async_trait,
Generate a chat completion with tool/function calling support. Read more
Source§fn stream<'life0, 'life1, 'async_trait>(
&'life0 self,
prompt: &'life1 str,
) -> Pin<Box<dyn Future<Output = LlmResult<BoxStream<'static, LlmResult<String>>>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
fn stream<'life0, 'life1, 'async_trait>(
&'life0 self,
prompt: &'life1 str,
) -> Pin<Box<dyn Future<Output = LlmResult<BoxStream<'static, LlmResult<String>>>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
Generate a streaming completion.
Source§fn supports_streaming(&self) -> bool
fn supports_streaming(&self) -> bool
Check if the model supports streaming.
Source§fn supports_json_mode(&self) -> bool
fn supports_json_mode(&self) -> bool
Check if the model supports JSON mode.
Source§fn supports_function_calling(&self) -> bool
fn supports_function_calling(&self) -> bool
Check if the model supports function/tool calling.
Auto Trait Implementations§
impl Freeze for VsCodeCopilotProvider
impl !RefUnwindSafe for VsCodeCopilotProvider
impl Send for VsCodeCopilotProvider
impl Sync for VsCodeCopilotProvider
impl Unpin for VsCodeCopilotProvider
impl UnsafeUnpin for VsCodeCopilotProvider
impl !UnwindSafe for VsCodeCopilotProvider
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more