1use crate::client::Anthropic;
2use crate::types::{
3 AnthropicError, Result,
4 ModelObject, ModelListParams, ModelList, ModelCapabilities, ModelCapability,
5 ModelPricing, PricingTier, ModelComparison, ModelPerformance, ComparisonSummary,
6 ModelRequirements, ModelUsageRecommendations, CostEstimation, CostBreakdown,
7};
8use std::collections::HashMap;
9use chrono::Utc;
10
11pub struct ModelsResource<'a> {
13 client: &'a Anthropic,
14}
15
16impl<'a> ModelsResource<'a> {
17 pub(crate) fn new(client: &'a Anthropic) -> Self {
18 Self { client }
19 }
20
21 pub async fn list(&self, params: Option<ModelListParams>) -> Result<ModelList> {
46 let mut query_params = Vec::new();
47
48 if let Some(params) = params {
49 if let Some(before_id) = params.before_id {
50 query_params.push(("before_id", before_id));
51 }
52 if let Some(after_id) = params.after_id {
53 query_params.push(("after_id", after_id));
54 }
55 if let Some(limit) = params.limit {
56 query_params.push(("limit", limit.to_string()));
57 }
58 }
59
60 let url = format!("{}/v1/models", self.client.config().base_url);
61 let request = self.client.http_client()
62 .get(&url)
63 .query(&query_params)
64 .build()
65 .map_err(|e| AnthropicError::Connection { message: e.to_string() })?;
66 let response = self.client.http_client().send(request).await?;
67
68 if response.status().is_success() {
69 let model_list: ModelList = response.json().await?;
70 Ok(model_list)
71 } else {
72 let status = response.status().as_u16();
73 let error_text = response.text().await?;
74 Err(AnthropicError::from_status(status, error_text))
75 }
76 }
77
78 pub async fn get(&self, model_id: &str) -> Result<ModelObject> {
99 let url = format!("{}/v1/models/{}", self.client.config().base_url, model_id);
100 let request = self.client.http_client()
101 .get(&url)
102 .build()
103 .map_err(|e| AnthropicError::Connection { message: e.to_string() })?;
104 let response = self.client.http_client().send(request).await?;
105
106 if response.status().is_success() {
107 let model: ModelObject = response.json().await?;
108 Ok(model)
109 } else {
110 let status = response.status().as_u16();
111 let error_text = response.text().await?;
112 Err(AnthropicError::from_status(status, error_text))
113 }
114 }
115
116 pub async fn list_by_family(&self, family: &str) -> Result<Vec<ModelObject>> {
136 let all_models = self.list(None).await?;
137 let filtered_models = all_models.data
138 .into_iter()
139 .filter(|model| model.is_family(family))
140 .collect();
141
142 Ok(filtered_models)
143 }
144
145 pub async fn get_capabilities(&self, model_id: &str) -> Result<ModelCapabilities> {
169 let model = self.get(model_id).await?;
171
172 let capabilities = self.derive_capabilities(&model);
174 Ok(capabilities)
175 }
176
177 pub async fn get_pricing(&self, model_id: &str) -> Result<ModelPricing> {
201 let model = self.get(model_id).await?;
202 let pricing = self.derive_pricing(&model);
203 Ok(pricing)
204 }
205
206 pub async fn find_best_model(&self, requirements: &ModelRequirements) -> Result<ModelObject> {
231 let all_models = self.list(None).await?;
232 let mut scored_models = Vec::new();
233
234 for model in all_models.data {
235 let capabilities = self.derive_capabilities(&model);
236 let pricing = self.derive_pricing(&model);
237 let performance = self.derive_performance(&model);
238
239 if let Some(score) = self.score_model(&model, &capabilities, &pricing, &performance, requirements) {
240 scored_models.push((score, model));
241 }
242 }
243
244 scored_models.sort_by(|a, b| b.0.partial_cmp(&a.0).unwrap_or(std::cmp::Ordering::Equal));
245
246 scored_models.into_iter()
247 .next()
248 .map(|(_, model)| model)
249 .ok_or_else(|| AnthropicError::Other("No models match the requirements".to_string()))
250 }
251
252 pub async fn compare_models(&self, model_ids: &[&str]) -> Result<ModelComparison> {
277 let mut models = Vec::new();
278 let mut capabilities = Vec::new();
279 let mut pricing = Vec::new();
280 let mut performance = Vec::new();
281
282 for model_id in model_ids {
283 let model = self.get(model_id).await?;
284 let caps = self.derive_capabilities(&model);
285 let price = self.derive_pricing(&model);
286 let perf = self.derive_performance(&model);
287
288 models.push(model);
289 capabilities.push(caps);
290 pricing.push(price);
291 performance.push(perf);
292 }
293
294 let summary = self.create_comparison_summary(&models, &performance);
295
296 Ok(ModelComparison {
297 models,
298 capabilities,
299 pricing,
300 performance,
301 summary,
302 })
303 }
304
305 pub async fn estimate_cost(&self, model_id: &str, input_tokens: u64, output_tokens: u64) -> Result<CostEstimation> {
328 let pricing = self.get_pricing(model_id).await?;
329
330 let input_cost_usd = (input_tokens as f64 / 1_000_000.0) * pricing.input_price_per_million;
331 let output_cost_usd = (output_tokens as f64 / 1_000_000.0) * pricing.output_price_per_million;
332 let total_cost_usd = input_cost_usd + output_cost_usd;
333
334 let batch_discount_usd = if input_tokens + output_tokens > 100_000 {
336 Some(total_cost_usd * 0.1) } else {
338 None
339 };
340
341 let final_cost_usd = total_cost_usd - batch_discount_usd.unwrap_or(0.0);
342
343 let breakdown = CostBreakdown {
344 cost_per_input_token_usd: pricing.input_price_per_million / 1_000_000.0,
345 cost_per_output_token_usd: pricing.output_price_per_million / 1_000_000.0,
346 effective_cost_per_token_usd: final_cost_usd / (input_tokens + output_tokens) as f64,
347 cost_vs_alternatives: HashMap::new(), };
349
350 Ok(CostEstimation {
351 model_id: model_id.to_string(),
352 input_tokens,
353 output_tokens,
354 input_cost_usd,
355 output_cost_usd,
356 total_cost_usd,
357 batch_discount_usd,
358 cache_savings_usd: None,
359 final_cost_usd,
360 breakdown,
361 })
362 }
363
364 pub async fn get_recommendations(&self, use_case: &str) -> Result<ModelUsageRecommendations> {
386 let all_models = self.list(None).await?;
387 let recommendations = self.create_use_case_recommendations(use_case, &all_models.data);
388 Ok(recommendations)
389 }
390
391 fn derive_capabilities(&self, model: &ModelObject) -> ModelCapabilities {
394 let model_id = &model.id;
395
396 let (max_context_length, max_output_tokens, supports_vision, supports_tools) =
398 if model_id.contains("claude-4") {
399 (200_000, 8_192, true, true)
400 } else if model_id.contains("claude-3-5") {
401 (200_000, 8_192, model_id.contains("sonnet"), true)
402 } else if model_id.contains("claude-3") && model_id.contains("opus") {
403 (200_000, 4_096, true, true)
404 } else if model_id.contains("claude-3") && model_id.contains("sonnet") {
405 (200_000, 4_096, true, true)
406 } else if model_id.contains("claude-3") && model_id.contains("haiku") {
407 (200_000, 4_096, true, true)
408 } else {
409 (100_000, 4_096, false, false)
410 };
411
412 let capabilities = if supports_vision && supports_tools {
413 vec![
414 ModelCapability::TextGeneration,
415 ModelCapability::Vision,
416 ModelCapability::ToolUse,
417 ModelCapability::CodeGeneration,
418 ModelCapability::Mathematical,
419 ModelCapability::Analysis,
420 ModelCapability::Creative,
421 ModelCapability::Summarization,
422 ModelCapability::Translation,
423 ModelCapability::LongContext,
424 ]
425 } else if supports_tools {
426 vec![
427 ModelCapability::TextGeneration,
428 ModelCapability::ToolUse,
429 ModelCapability::CodeGeneration,
430 ModelCapability::Mathematical,
431 ModelCapability::Analysis,
432 ModelCapability::Creative,
433 ModelCapability::Summarization,
434 ModelCapability::Translation,
435 ]
436 } else {
437 vec![
438 ModelCapability::TextGeneration,
439 ModelCapability::Creative,
440 ModelCapability::Summarization,
441 ModelCapability::Translation,
442 ]
443 };
444
445 ModelCapabilities {
446 max_context_length,
447 max_output_tokens,
448 capabilities,
449 family: model.family(),
450 generation: if model_id.contains("claude-4") {
451 "4".to_string()
452 } else if model_id.contains("claude-3-7") {
453 "3.7".to_string()
454 } else if model_id.contains("claude-3-5") {
455 "3.5".to_string()
456 } else if model_id.contains("claude-3") {
457 "3".to_string()
458 } else {
459 "unknown".to_string()
460 },
461 supports_vision,
462 supports_tools,
463 supports_system_messages: true,
464 supports_streaming: true,
465 supported_languages: vec![
466 "en".to_string(), "es".to_string(), "fr".to_string(), "de".to_string(),
467 "it".to_string(), "pt".to_string(), "ru".to_string(), "ja".to_string(),
468 "ko".to_string(), "zh".to_string(), "ar".to_string(), "hi".to_string(),
469 ],
470 training_cutoff: Some(model.created_at),
471 }
472 }
473
474 fn derive_pricing(&self, model: &ModelObject) -> ModelPricing {
475 let model_id = &model.id;
476
477 let (input_price, output_price, tier) = if model_id.contains("claude-4") {
479 if model_id.contains("opus") {
480 (15.0, 75.0, PricingTier::Premium)
481 } else {
482 (3.0, 15.0, PricingTier::Standard)
483 }
484 } else if model_id.contains("claude-3-7") {
485 (3.0, 15.0, PricingTier::Standard)
486 } else if model_id.contains("claude-3-5") {
487 if model_id.contains("sonnet") {
488 (3.0, 15.0, PricingTier::Standard)
489 } else if model_id.contains("haiku") {
490 (0.25, 1.25, PricingTier::Fast)
491 } else {
492 (3.0, 15.0, PricingTier::Standard)
493 }
494 } else if model_id.contains("claude-3") {
495 if model_id.contains("opus") {
496 (15.0, 75.0, PricingTier::Premium)
497 } else if model_id.contains("sonnet") {
498 (3.0, 15.0, PricingTier::Standard)
499 } else if model_id.contains("haiku") {
500 (0.25, 1.25, PricingTier::Fast)
501 } else {
502 (3.0, 15.0, PricingTier::Standard)
503 }
504 } else {
505 (3.0, 15.0, PricingTier::Standard)
506 };
507
508 ModelPricing {
509 model_id: model_id.clone(),
510 input_price_per_million: input_price,
511 output_price_per_million: output_price,
512 batch_input_price_per_million: Some(input_price * 0.5), batch_output_price_per_million: Some(output_price * 0.5),
514 cache_write_price_per_million: Some(input_price * 1.25), cache_read_price_per_million: Some(input_price * 0.1), tier,
517 currency: "USD".to_string(),
518 updated_at: Utc::now(),
519 }
520 }
521
522 fn derive_performance(&self, model: &ModelObject) -> ModelPerformance {
523 let model_id = &model.id;
524
525 let (speed_score, quality_score, cost_efficiency_score) =
527 if model_id.contains("claude-4") {
528 if model_id.contains("opus") {
529 (6, 10, 5) } else {
531 (8, 9, 8) }
533 } else if model_id.contains("claude-3-7") {
534 (8, 9, 8)
535 } else if model_id.contains("claude-3-5") {
536 if model_id.contains("sonnet") {
537 (8, 9, 8)
538 } else if model_id.contains("haiku") {
539 (10, 7, 10) } else {
541 (8, 9, 8)
542 }
543 } else if model_id.contains("claude-3") {
544 if model_id.contains("opus") {
545 (5, 10, 4)
546 } else if model_id.contains("sonnet") {
547 (7, 8, 7)
548 } else if model_id.contains("haiku") {
549 (10, 6, 9)
550 } else {
551 (7, 8, 7)
552 }
553 } else {
554 (7, 8, 7)
555 };
556
557 ModelPerformance {
558 model_id: model_id.clone(),
559 speed_score,
560 quality_score,
561 avg_response_time_ms: Some(match speed_score {
562 10 => 500,
563 9 => 750,
564 8 => 1000,
565 7 => 1500,
566 6 => 2000,
567 _ => 3000,
568 }),
569 tokens_per_second: Some(match speed_score {
570 10 => 100.0,
571 9 => 80.0,
572 8 => 60.0,
573 7 => 40.0,
574 6 => 25.0,
575 _ => 15.0,
576 }),
577 cost_efficiency_score,
578 }
579 }
580
581 fn score_model(
582 &self,
583 model: &ModelObject,
584 capabilities: &ModelCapabilities,
585 pricing: &ModelPricing,
586 performance: &ModelPerformance,
587 requirements: &ModelRequirements,
588 ) -> Option<f64> {
589 let mut score = 0.0;
590 let mut penalty = 0.0;
591
592 if let Some(max_input_cost) = requirements.max_input_cost_per_token {
594 let input_cost_per_token = pricing.input_price_per_million / 1_000_000.0;
595 if input_cost_per_token > max_input_cost {
596 return None; }
598 score += (max_input_cost - input_cost_per_token) * 1000.0; }
600
601 if let Some(max_output_cost) = requirements.max_output_cost_per_token {
602 let output_cost_per_token = pricing.output_price_per_million / 1_000_000.0;
603 if output_cost_per_token > max_output_cost {
604 return None;
605 }
606 score += (max_output_cost - output_cost_per_token) * 1000.0;
607 }
608
609 if let Some(min_context) = requirements.min_context_length {
610 if capabilities.max_context_length < min_context {
611 return None;
612 }
613 score += (capabilities.max_context_length - min_context) as f64 / 1000.0;
614 }
615
616 for required_cap in &requirements.required_capabilities {
618 if !capabilities.capabilities.contains(required_cap) {
619 return None;
620 }
621 score += 10.0; }
623
624 if let Some(requires_vision) = requirements.requires_vision {
625 if requires_vision && !capabilities.supports_vision {
626 return None;
627 }
628 if requires_vision && capabilities.supports_vision {
629 score += 20.0;
630 }
631 }
632
633 if let Some(requires_tools) = requirements.requires_tools {
634 if requires_tools && !capabilities.supports_tools {
635 return None;
636 }
637 if requires_tools && capabilities.supports_tools {
638 score += 20.0;
639 }
640 }
641
642 if let Some(family) = &requirements.preferred_family {
644 if model.is_family(family) {
645 score += 15.0;
646 }
647 }
648
649 if let Some(min_speed) = requirements.min_speed_score {
650 if performance.speed_score < min_speed {
651 penalty += 10.0;
652 } else {
653 score += (performance.speed_score - min_speed) as f64;
654 }
655 }
656
657 if let Some(min_quality) = requirements.min_quality_score {
658 if performance.quality_score < min_quality {
659 penalty += 10.0;
660 } else {
661 score += (performance.quality_score - min_quality) as f64;
662 }
663 }
664
665 score += performance.speed_score as f64;
667 score += performance.quality_score as f64;
668 score += performance.cost_efficiency_score as f64;
669
670 Some(score - penalty)
671 }
672
673 fn create_comparison_summary(&self, _models: &[ModelObject], performance: &[ModelPerformance]) -> ComparisonSummary {
674 let fastest_model = performance
675 .iter()
676 .max_by_key(|p| p.speed_score)
677 .map(|p| p.model_id.clone())
678 .unwrap_or_default();
679
680 let highest_quality_model = performance
681 .iter()
682 .max_by_key(|p| p.quality_score)
683 .map(|p| p.model_id.clone())
684 .unwrap_or_default();
685
686 let most_cost_effective_model = performance
687 .iter()
688 .max_by_key(|p| p.cost_efficiency_score)
689 .map(|p| p.model_id.clone())
690 .unwrap_or_default();
691
692 let best_overall_model = performance
694 .iter()
695 .max_by_key(|p| p.speed_score + p.quality_score + p.cost_efficiency_score)
696 .map(|p| p.model_id.clone())
697 .unwrap_or_default();
698
699 let key_differences = vec![
700 "Performance varies significantly across models".to_string(),
701 "Cost-effectiveness inversely correlated with quality".to_string(),
702 "Vision support available in selected models".to_string(),
703 ];
704
705 let mut use_case_recommendations = HashMap::new();
706 use_case_recommendations.insert("speed".to_string(), fastest_model.clone());
707 use_case_recommendations.insert("quality".to_string(), highest_quality_model.clone());
708 use_case_recommendations.insert("cost".to_string(), most_cost_effective_model.clone());
709 use_case_recommendations.insert("balanced".to_string(), best_overall_model.clone());
710
711 ComparisonSummary {
712 fastest_model,
713 highest_quality_model,
714 most_cost_effective_model,
715 best_overall_model,
716 key_differences,
717 use_case_recommendations,
718 }
719 }
720
721 fn create_use_case_recommendations(&self, use_case: &str, _models: &[ModelObject]) -> ModelUsageRecommendations {
722 use crate::types::{
723 ModelRecommendation, RecommendedParameters, PerformanceExpectations,
724 CostRange, QualityLevel,
725 };
726
727 let (recommended_models, guidelines, recommended_params, expected_perf) = match use_case {
729 "code-generation" => {
730 let models = vec![
731 ModelRecommendation {
732 model_id: "claude-3-5-sonnet-latest".to_string(),
733 reason: "Excellent code understanding and generation capabilities".to_string(),
734 confidence_score: 9,
735 cost_range: CostRange {
736 min_cost_usd: 0.003,
737 max_cost_usd: 0.015,
738 typical_cost_usd: 0.008,
739 },
740 strengths: vec![
741 "Strong programming language support".to_string(),
742 "Good debugging assistance".to_string(),
743 "Comprehensive code explanations".to_string(),
744 ],
745 limitations: vec![
746 "May generate verbose explanations".to_string(),
747 ],
748 },
749 ModelRecommendation {
750 model_id: "claude-3-5-haiku-latest".to_string(),
751 reason: "Fast and cost-effective for simple code tasks".to_string(),
752 confidence_score: 7,
753 cost_range: CostRange {
754 min_cost_usd: 0.0003,
755 max_cost_usd: 0.0015,
756 typical_cost_usd: 0.0008,
757 },
758 strengths: vec![
759 "Very fast response times".to_string(),
760 "Cost-effective for bulk operations".to_string(),
761 ],
762 limitations: vec![
763 "Less sophisticated for complex problems".to_string(),
764 ],
765 },
766 ];
767
768 let guidelines = vec![
769 "Provide clear specifications and examples".to_string(),
770 "Request code comments for maintainability".to_string(),
771 "Ask for error handling and edge cases".to_string(),
772 ];
773
774 let params = RecommendedParameters {
775 temperature_range: (0.0, 0.3),
776 max_tokens_range: (1024, 4096),
777 top_p_range: Some((0.1, 0.5)),
778 use_streaming: Some(true),
779 system_message_patterns: vec![
780 "You are an expert programmer. Provide clean, well-documented code.".to_string(),
781 ],
782 };
783
784 let perf = PerformanceExpectations {
785 response_time_range_ms: (1000, 5000),
786 cost_range: CostRange {
787 min_cost_usd: 0.0003,
788 max_cost_usd: 0.015,
789 typical_cost_usd: 0.008,
790 },
791 quality_level: QualityLevel::Excellent,
792 success_rate_percentage: 90.0,
793 };
794
795 (models, guidelines, params, perf)
796 },
797
798 "creative-writing" => {
799 let models = vec![
800 ModelRecommendation {
801 model_id: "claude-3-opus-latest".to_string(),
802 reason: "Highest quality creative output with nuanced understanding".to_string(),
803 confidence_score: 10,
804 cost_range: CostRange {
805 min_cost_usd: 0.015,
806 max_cost_usd: 0.075,
807 typical_cost_usd: 0.035,
808 },
809 strengths: vec![
810 "Exceptional creativity and originality".to_string(),
811 "Strong narrative structure".to_string(),
812 "Rich character development".to_string(),
813 ],
814 limitations: vec![
815 "Higher cost per token".to_string(),
816 "Slower response times".to_string(),
817 ],
818 },
819 ];
820
821 let guidelines = vec![
822 "Use higher temperature for more creativity".to_string(),
823 "Provide detailed prompts for better context".to_string(),
824 "Consider iterative refinement".to_string(),
825 ];
826
827 let params = RecommendedParameters {
828 temperature_range: (0.7, 1.0),
829 max_tokens_range: (2048, 8192),
830 top_p_range: Some((0.8, 0.95)),
831 use_streaming: Some(true),
832 system_message_patterns: vec![
833 "You are a creative writer with expertise in storytelling and narrative structure.".to_string(),
834 ],
835 };
836
837 let perf = PerformanceExpectations {
838 response_time_range_ms: (2000, 8000),
839 cost_range: CostRange {
840 min_cost_usd: 0.015,
841 max_cost_usd: 0.075,
842 typical_cost_usd: 0.035,
843 },
844 quality_level: QualityLevel::Excellent,
845 success_rate_percentage: 95.0,
846 };
847
848 (models, guidelines, params, perf)
849 },
850
851 _ => {
852 let models = vec![
854 ModelRecommendation {
855 model_id: "claude-3-5-sonnet-latest".to_string(),
856 reason: "Well-balanced model suitable for most tasks".to_string(),
857 confidence_score: 8,
858 cost_range: CostRange {
859 min_cost_usd: 0.003,
860 max_cost_usd: 0.015,
861 typical_cost_usd: 0.008,
862 },
863 strengths: vec![
864 "Good balance of speed, quality, and cost".to_string(),
865 "Wide range of capabilities".to_string(),
866 ],
867 limitations: vec![
868 "May not be optimal for specialized tasks".to_string(),
869 ],
870 },
871 ];
872
873 let guidelines = vec![
874 "Start with moderate temperature settings".to_string(),
875 "Adjust parameters based on specific needs".to_string(),
876 ];
877
878 let params = RecommendedParameters {
879 temperature_range: (0.3, 0.7),
880 max_tokens_range: (1024, 4096),
881 top_p_range: Some((0.5, 0.9)),
882 use_streaming: Some(false),
883 system_message_patterns: vec![
884 "You are a helpful AI assistant.".to_string(),
885 ],
886 };
887
888 let perf = PerformanceExpectations {
889 response_time_range_ms: (1000, 4000),
890 cost_range: CostRange {
891 min_cost_usd: 0.003,
892 max_cost_usd: 0.015,
893 typical_cost_usd: 0.008,
894 },
895 quality_level: QualityLevel::Good,
896 success_rate_percentage: 85.0,
897 };
898
899 (models, guidelines, params, perf)
900 }
901 };
902
903 let pitfalls = vec![
904 "Using inappropriate temperature settings".to_string(),
905 "Not providing sufficient context".to_string(),
906 "Ignoring token limits and costs".to_string(),
907 ];
908
909 ModelUsageRecommendations {
910 use_case: use_case.to_string(),
911 recommended_models,
912 guidelines,
913 recommended_parameters: recommended_params,
914 pitfalls,
915 expected_performance: expected_perf,
916 }
917 }
918}
919
920#[cfg(test)]
921mod tests {
922 use super::*;
923
924 #[test]
925 fn test_derive_capabilities() {
926 let client = Anthropic::new("test_key".to_string()).unwrap();
927 let models_resource = ModelsResource::new(&client);
928
929 let model = ModelObject {
930 id: "claude-3-5-sonnet-latest".to_string(),
931 display_name: "Claude 3.5 Sonnet".to_string(),
932 created_at: Utc::now(),
933 object_type: "model".to_string(),
934 };
935
936 let capabilities = models_resource.derive_capabilities(&model);
937
938 assert_eq!(capabilities.max_context_length, 200_000);
939 assert!(capabilities.supports_vision);
940 assert!(capabilities.supports_tools);
941 assert!(capabilities.capabilities.contains(&ModelCapability::Vision));
942 assert!(capabilities.capabilities.contains(&ModelCapability::ToolUse));
943 }
944
945 #[test]
946 fn test_derive_pricing() {
947 let client = Anthropic::new("test_key".to_string()).unwrap();
948 let models_resource = ModelsResource::new(&client);
949
950 let model = ModelObject {
951 id: "claude-3-5-haiku-latest".to_string(),
952 display_name: "Claude 3.5 Haiku".to_string(),
953 created_at: Utc::now(),
954 object_type: "model".to_string(),
955 };
956
957 let pricing = models_resource.derive_pricing(&model);
958
959 assert_eq!(pricing.input_price_per_million, 0.25);
960 assert_eq!(pricing.output_price_per_million, 1.25);
961 assert_eq!(pricing.tier, PricingTier::Fast);
962 }
963
964 #[test]
965 fn test_derive_performance() {
966 let client = Anthropic::new("test_key".to_string()).unwrap();
967 let models_resource = ModelsResource::new(&client);
968
969 let model = ModelObject {
970 id: "claude-3-5-haiku-latest".to_string(),
971 display_name: "Claude 3.5 Haiku".to_string(),
972 created_at: Utc::now(),
973 object_type: "model".to_string(),
974 };
975
976 let performance = models_resource.derive_performance(&model);
977
978 assert_eq!(performance.speed_score, 10); assert_eq!(performance.cost_efficiency_score, 10); assert!(performance.tokens_per_second.unwrap() > 50.0);
981 }
982
983 #[test]
984 fn test_score_model_with_requirements() {
985 let client = Anthropic::new("test_key".to_string()).unwrap();
986 let models_resource = ModelsResource::new(&client);
987
988 let model = ModelObject {
989 id: "claude-3-5-sonnet-latest".to_string(),
990 display_name: "Claude 3.5 Sonnet".to_string(),
991 created_at: Utc::now(),
992 object_type: "model".to_string(),
993 };
994
995 let capabilities = models_resource.derive_capabilities(&model);
996 let pricing = models_resource.derive_pricing(&model);
997 let performance = models_resource.derive_performance(&model);
998
999 let requirements = ModelRequirements::new()
1000 .require_vision()
1001 .min_context_length(50000);
1002
1003 let score = models_resource.score_model(&model, &capabilities, &pricing, &performance, &requirements);
1004
1005 assert!(score.is_some());
1006 assert!(score.unwrap() > 0.0);
1007 }
1008
1009 #[test]
1010 fn test_score_model_elimination() {
1011 let client = Anthropic::new("test_key".to_string()).unwrap();
1012 let models_resource = ModelsResource::new(&client);
1013
1014 let model = ModelObject {
1015 id: "claude-3-haiku-20240307".to_string(),
1016 display_name: "Claude 3 Haiku".to_string(),
1017 created_at: Utc::now(),
1018 object_type: "model".to_string(),
1019 };
1020
1021 let capabilities = models_resource.derive_capabilities(&model);
1022 let pricing = models_resource.derive_pricing(&model);
1023 let performance = models_resource.derive_performance(&model);
1024
1025 let requirements = ModelRequirements::new()
1027 .max_input_cost_per_token(0.0000001); let score = models_resource.score_model(&model, &capabilities, &pricing, &performance, &requirements);
1030
1031 assert!(score.is_none()); }
1033}