codebench/models.py
2025-03-16 01:32:37 +01:00

62 lines
1.7 KiB
Python

import ollama
import subprocess
import json
import requests
import re
from pydantic import BaseModel
server_url = "http://localhost:11434"
# ANSI color codes
GREEN = '\033[92m'
BLUE = '\033[94m'
YELLOW = '\033[93m'
WHITE = '\033[97m'
RED = '\033[91m'
ENDC = '\033[0m'
def get_available_models(server_url):
"""Get list of available models from the specified Ollama server."""
try:
response = requests.get(f"{server_url}/api/tags").json()
return [model['name'] for model in response['models']]
except Exception as e:
print(f"{RED}Error getting model list from {server_url}: {e}{ENDC}")
return []
def get_model_details(model_name):
try:
# Use subprocess to call `ollama show <model>` for detailed information
result = subprocess.run(
["ollama", "show", model_name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
print(result.stdout)
# Check if the command was successful
if result.returncode != 0:
print(f"Error: {result.stderr.strip()}")
return None
# Parse JSON output from `ollama show`
model_details = json.loads(result.stdout)
return model_details
except Exception as e:
print(f"An error occurred: {e}")
return None
# This code is commented out to prevent automatic execution when imported
# models = get_available_models(server_url)
# print("Available Models:")
# for model_name in models:
# print(model_name)
# details = get_model_details(model_name)
#
# # Display detailed information about the model
# if details:
# print("\nModel Details:")
# print(json.dumps(details, indent=4))