1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
use crate::;
/// Specifies how to identify and resolve a model for API calls.
///
/// `ModelSpec` provides three levels of control over model resolution:
///
/// - [`ModelSpec::Name`]: Just a model name string. The adapter kind is inferred
/// from the name, and auth/endpoint are resolved via the client's configured resolvers.
///
/// - [`ModelSpec::Iden`]: An explicit [`ModelIden`] with adapter kind specified.
/// Skips adapter inference but still resolves auth/endpoint via config.
///
/// - [`ModelSpec::Target`]: A complete [`ServiceTarget`] with endpoint, auth, and model.
/// Used directly, only runs the service target resolver.
///
/// # Examples
///
/// ```rust
/// use genai::adapter::AdapterKind;
/// use genai::resolver::{AuthData, Endpoint};
/// use genai::{ModelIden, ModelSpec, ServiceTarget};
///
/// // Using a string name (full inference)
/// let spec: ModelSpec = "gpt-4".into();
///
/// // Using an explicit ModelIden (skip adapter inference)
/// let spec: ModelSpec = ModelIden::new(AdapterKind::OpenAI, "gpt-4").into();
///
/// // Using a complete ServiceTarget (bypass all resolution)
/// let target = ServiceTarget {
/// endpoint: Endpoint::from_static("https://custom.api/v1/"),
/// auth: AuthData::from_env("CUSTOM_API_KEY"),
/// model: ModelIden::new(AdapterKind::OpenAI, "custom-model"),
/// };
/// let spec: ModelSpec = target.into();
/// ```
// region: --- Constructors
// endregion: --- Constructors
// region: --- From Implementations
// endregion: --- From Implementations