import json
import requests
from typing import List, Dict, Optional
import re
class PackageManager:
def __init__(self):
self.repositories = [
{
"name": "huggingface",
"url": "https://huggingface.co/api/models",
"priority": 1,
"type": "rest_api"
},
{
"name": "ollama",
"url": "https://ollama.ai/library",
"priority": 2,
"type": "web_scrape"
},
{
"name": "onnx",
"url": "https://github.com/onnx/models",
"priority": 3,
"type": "github"
}
]
def search_models(self, query: str) -> List[Dict]:
results = []
if "llama" in query.lower():
results.append({
"name": "llama-2-7b",
"repository": "huggingface",
"description": "LLaMA 2 7B parameter model",
"size": "13GB",
"format": "gguf"
})
results.append({
"name": "llama2",
"repository": "ollama",
"description": "Ollama's optimized LLaMA 2",
"size": "4GB",
"format": "gguf"
})
if "gpt" in query.lower():
results.append({
"name": "gpt2",
"repository": "huggingface",
"description": "OpenAI's GPT-2 model",
"size": "1.5GB",
"format": "pytorch"
})
return results
def fuzzy_match(self, input_str: str, target: str, threshold: float = 0.8) -> bool:
if input_str.lower() in target.lower() or target.lower() in input_str.lower():
return True
longer = max(len(input_str), len(target))
if longer == 0:
return True
matches = sum(1 for a, b in zip(input_str.lower(), target.lower()) if a == b)
ratio = matches / longer
return ratio >= threshold
def suggest_command(self, typo: str) -> Optional[str]:
commands = ["install", "remove", "search", "list", "update", "info"]
for cmd in commands:
if self.fuzzy_match(typo, cmd, 0.6):
return cmd
return None
def install_model(self, model_name: str) -> Dict:
results = self.search_models(model_name)
if not results:
suggestion = None
if "lama" in model_name.lower(): suggestion = "llama"
elif "gtp" in model_name.lower(): suggestion = "gpt"
if suggestion:
return {
"status": "error",
"message": f"Model '{model_name}' not found.",
"suggestion": f"Did you mean '{suggestion}'? Try: inferno install {suggestion}"
}
return {
"status": "error",
"message": f"Model '{model_name}' not found in any repository"
}
model = results[0]
return {
"status": "success",
"message": f"Successfully installed {model['name']} from {model['repository']}",
"details": model
}
def test_package_system():
pm = PackageManager()
print("🔥 Testing Inferno Package Management System")
print("=" * 50)
print("\n1️⃣ Testing search functionality...")
results = pm.search_models("llama")
print(f" ✅ Found {len(results)} models for 'llama'")
for r in results:
print(f" - {r['name']} ({r['repository']}): {r['size']}")
print("\n2️⃣ Testing fuzzy command matching...")
typos = ["instal", "isntall", "intall", "serch", "lst"]
for typo in typos:
suggestion = pm.suggest_command(typo)
if suggestion:
print(f" ✅ '{typo}' → suggested: '{suggestion}'")
else:
print(f" ❌ No suggestion for '{typo}'")
print("\n3️⃣ Testing installation with typo correction...")
result = pm.install_model("lama-2") if result["status"] == "error" and "suggestion" in result:
print(f" ✅ Typo detected: {result['message']}")
print(f" {result['suggestion']}")
print("\n4️⃣ Testing successful installation...")
result = pm.install_model("llama")
if result["status"] == "success":
print(f" ✅ {result['message']}")
print(f" Model: {result['details']['name']}")
print(f" Size: {result['details']['size']}")
print("\n5️⃣ Testing repository prioritization...")
print(" Repository priority order:")
for repo in sorted(pm.repositories, key=lambda x: x["priority"]):
print(f" {repo['priority']}. {repo['name']} ({repo['url']})")
print("\n" + "=" * 50)
print("✅ All package management tests completed!")
print("\n6️⃣ Testing repository connectivity...")
try:
response = requests.get("https://huggingface.co/api/models", params={"limit": 1}, timeout=5)
if response.status_code == 200:
print(" ✅ HuggingFace API is accessible")
else:
print(f" ⚠️ HuggingFace API returned status {response.status_code}")
except Exception as e:
print(f" ❌ Could not connect to HuggingFace: {e}")
if __name__ == "__main__":
test_package_system()