allendpoints/example.py
2025-04-22 21:42:36 +02:00

165 lines
5.7 KiB
Python

#!/usr/bin/env python3
"""
Example script demonstrating how to use allendpoints as a Python module.
"""
from allendpoints import (
run_inference,
check_available_apis,
get_ollama_models,
InferenceHandler,
CONFIG,
check_provider_key_available
)
def basic_example():
"""Basic usage example of allendpoints."""
print("\n=== BASIC EXAMPLE ===")
# Inference with default model for Ollama
default_model = CONFIG["defaults"]["ollama"]
response = run_inference(
prompt="What is the capital of France?",
provider="ollama",
model=default_model
)
print(f"Response from Ollama (model: {default_model}): {response}")
# Inference with specific model and system prompt
response = run_inference(
prompt="Write a haiku about AI",
provider="ollama",
model="llama3.2:3b",
system_content="You are a poetic assistant that only writes in haiku."
)
print(f"\nHaiku from Ollama (llama3.2:3b):\n{response}")
def provider_availability_example():
"""Example showing how to check provider availability."""
print("\n=== PROVIDER AVAILABILITY EXAMPLE ===")
# Check which providers are available (have valid API keys)
available_providers = check_available_apis()
print(f"Available providers: {', '.join(available_providers)}")
# Check for specific providers
providers_to_check = ["ollama", "gemini", "github", "hf", "together", "aiql", "groq", "nvidia"]
for provider in providers_to_check:
is_available = check_provider_key_available(provider)
status = "✅ Available" if is_available else "❌ Not available"
print(f"{provider}: {status}")
def model_listing_example():
"""Example showing how to list available models."""
print("\n=== MODEL LISTING EXAMPLE ===")
# Get available Ollama models
try:
ollama_models = get_ollama_models()
print(f"Available Ollama models: {', '.join(ollama_models[:5])}...")
print(f"Total Ollama models: {len(ollama_models)}")
except Exception as e:
print(f"Error getting Ollama models: {str(e)}")
# Show configured models for each provider
print("\nConfigured models per provider:")
for provider, models in CONFIG["models"].items():
model_count = len(models)
print(f"{provider}: {model_count} models configured")
# Handle both list and dictionary model configurations
if isinstance(models, dict):
# For dictionary-based configurations (most providers)
sample_models = list(models.keys())[:3]
if sample_models:
print(f" Sample models: {', '.join(sample_models)}")
elif isinstance(models, list):
# For list-based configurations (ollama)
sample_models = models[:3]
if sample_models:
print(f" Sample models: {', '.join(sample_models)}")
def direct_provider_example():
"""Example showing how to use provider handlers directly."""
print("\n=== DIRECT PROVIDER EXAMPLE ===")
# Check if Ollama is available
if check_provider_key_available("ollama"):
try:
# Use the Ollama handler directly
response = InferenceHandler.ollama(
prompt="Explain how a computer works in one paragraph",
model="llama3.2:3b"
)
print(f"Direct Ollama response:\n{response}")
except Exception as e:
print(f"Error with direct Ollama call: {str(e)}")
# Check if Gemini is available
if check_provider_key_available("gemini"):
try:
# Use the Gemini handler directly
response = InferenceHandler.gemini(
prompt="What is quantum computing?",
model="gemini-1.5-pro"
)
print(f"\nDirect Gemini response:\n{response[:150]}...")
except Exception as e:
print(f"Error with direct Gemini call: {str(e)}")
def batch_processing_example():
"""Example showing how to process multiple prompts with multiple providers."""
print("\n=== BATCH PROCESSING EXAMPLE ===")
# Define a list of prompts
prompts = [
"What is machine learning?",
"Explain the theory of relativity briefly"
]
# Get available providers (only use the first 2 for this example)
available_providers = check_available_apis()[:2]
if not available_providers:
print("No providers available for batch processing")
return
print(f"Processing {len(prompts)} prompts with {len(available_providers)} providers: {', '.join(available_providers)}")
# Process each prompt with each provider
for prompt in prompts:
print(f"\nPrompt: {prompt}")
for provider in available_providers:
try:
# Get default model for this provider
default_model = CONFIG["defaults"][provider]
# Run inference with explicit model parameter
response = run_inference(prompt, provider, model=default_model)
# Print truncated response
print(f" {provider} ({default_model}): {response[:100]}...")
except Exception as e:
print(f" Error with {provider}: {str(e)}")
def main():
"""Run all examples."""
print("AllEndpoints Python Module Examples")
print("==================================")
# Run examples
basic_example()
provider_availability_example()
model_listing_example()
direct_provider_example()
batch_processing_example()
print("\nExamples completed!")
if __name__ == "__main__":
main()