ollama automatic pull
This commit is contained in:
parent
f538ed1bd3
commit
730d61cfe3
@ -1,3 +1,5 @@
|
||||

|
||||
|
||||
# Codebench - Ollama Model Benchmark Tool
|
||||
|
||||
A Python-based benchmarking tool for testing and comparing different Ollama models on coding tasks. This tool allows you to benchmark multiple Ollama models against common coding problems, measure their performance, and visualize the results.
|
||||
@ -13,6 +15,7 @@ A Python-based benchmarking tool for testing and comparing different Ollama mode
|
||||
- Measure performance metrics (tokens/sec, response time)
|
||||
- Track success rates across different coding challenges
|
||||
- Support for local and remote Ollama servers
|
||||
- Automatic model download if not available locally
|
||||
- Detailed test results and leaderboard generation
|
||||
- CPU information tracking for benchmarks
|
||||
|
||||
@ -61,7 +64,7 @@ python main.py --server [local|z60] --model [model_name] --number [count|all] --
|
||||
## Arguments:
|
||||
|
||||
- --server : Choose Ollama server (default: local)
|
||||
- --model : Test specific model only
|
||||
- --model : Test specific model only (will be automatically downloaded if not available locally)
|
||||
- --number : Number of models to test
|
||||
- --verbose : Enable detailed output
|
||||
- --plot-only : Skip benchmarking and just generate graphs from existing results
|
||||
@ -125,6 +128,9 @@ By default, main.py will now automatically generate graphs after benchmarking. Y
|
||||
# Run benchmarks and generate graphs (default behavior)
|
||||
python3 main.py
|
||||
|
||||
# Test a specific model (will be downloaded automatically if not available locally)
|
||||
python3 main.py --model llama3
|
||||
|
||||
# Skip benchmarking and just generate graphs from the latest results
|
||||
python3 main.py --plot-only
|
||||
|
||||
|
64
main.py
64
main.py
@ -591,6 +591,53 @@ def get_available_models(server_url: str) -> List[str]:
|
||||
print(f"{RED}Error getting model list from {server_url}: {e}{ENDC}")
|
||||
return []
|
||||
|
||||
def check_model_exists_locally(model_name: str, server_url: str) -> bool:
|
||||
"""Check if a model exists locally on the specified Ollama server."""
|
||||
available_models = get_available_models(server_url)
|
||||
return model_name in available_models
|
||||
|
||||
def download_model(model_name: str) -> bool:
|
||||
"""Download a model using ollama pull command.
|
||||
|
||||
Args:
|
||||
model_name: Name of the model to download
|
||||
|
||||
Returns:
|
||||
bool: True if download was successful, False otherwise
|
||||
"""
|
||||
print(f"\n{INFO}Model '{model_name}' not found locally. Downloading...{ENDC}")
|
||||
try:
|
||||
# Run ollama pull command and capture output
|
||||
process = subprocess.Popen(
|
||||
["ollama", "pull", model_name],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
bufsize=1
|
||||
)
|
||||
|
||||
# Print output in real-time
|
||||
print(f"{INFO}Download progress:{ENDC}")
|
||||
while True:
|
||||
output = process.stdout.readline()
|
||||
if output == '' and process.poll() is not None:
|
||||
break
|
||||
if output:
|
||||
print(f"{MUTED}{output.strip()}{ENDC}")
|
||||
|
||||
# Check if download was successful
|
||||
return_code = process.poll()
|
||||
if return_code == 0:
|
||||
print(f"\n{SUCCESS}Successfully downloaded model '{model_name}'.{ENDC}")
|
||||
return True
|
||||
else:
|
||||
error = process.stderr.read()
|
||||
print(f"\n{ERROR}Failed to download model '{model_name}': {error}{ENDC}")
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"\n{ERROR}Error downloading model '{model_name}': {e}{ENDC}")
|
||||
return False
|
||||
|
||||
def get_model_details(model_name):
|
||||
try:
|
||||
result = subprocess.run(
|
||||
@ -961,7 +1008,22 @@ def main():
|
||||
|
||||
# Get available models or use specified model
|
||||
if args.model:
|
||||
models = [args.model]
|
||||
# Check if the specified model exists locally
|
||||
if not check_model_exists_locally(args.model, server_url):
|
||||
# If not, try to download it
|
||||
if download_model(args.model):
|
||||
# Verify the model is now available
|
||||
if check_model_exists_locally(args.model, server_url):
|
||||
models = [args.model]
|
||||
else:
|
||||
print(f"{WARNING}Model '{args.model}' was downloaded but not found on server. Please check manually.{ENDC}")
|
||||
return
|
||||
else:
|
||||
print(f"{RED}Could not download model '{args.model}'. Exiting.{ENDC}")
|
||||
return
|
||||
else:
|
||||
print(f"{SUCCESS}Using locally available model: {args.model}{ENDC}")
|
||||
models = [args.model]
|
||||
else:
|
||||
models = get_available_models(server_url)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user