Quick Fix
Common Causes of Model Not Found Errors
According to OpenAI's error documentation, model errors typically fall into these categories:
Model names are case-sensitive and must match exactly.
❌ "GPT-4" → ✅ "gpt-4"
Old models are retired. Migration required.
❌ "text-davinci-003" → ✅ "gpt-3.5-turbo"
Premium models require specific account tiers.
"GPT-4 requires paid account"
Some models are region-specific.
"Not available in your region"
Correct Model Names by Provider
Always check the latest model lists: OpenAI Models,Anthropic Models,Google Vertex AI Models.
Provider | Current Models | Deprecated | Migration Path |
---|---|---|---|
OpenAI | gpt-4-turbo-preview gpt-4 gpt-3.5-turbo | text-davinci-003 code-davinci-002 | Use chat models |
Anthropic | claude-3-opus-20240229 claude-3-sonnet-20240229 claude-3-haiku-20240307 | claude-v1 claude-instant-v1 | Use Claude 3 family |
gemini-1.5-pro gemini-pro gemini-pro-vision | palm-2 bison | Use Gemini models | |
AWS Bedrock | anthropic.claude-3-* amazon.titan-* meta.llama2-* | Version-specific | Check regional availability |
How to Check Available Models
Use these methods to discover which models you have access to, as shown in AWS Bedrock's guide andAzure OpenAI documentation.
OpenAI: List Available Models
import openai
from openai import OpenAI
client = OpenAI(api_key="your-api-key")
def check_available_models():
"""List all available models for your account"""
try:
models = client.models.list()
# Group by model family
gpt4_models = []
gpt35_models = []
other_models = []
for model in models.data:
if model.id.startswith("gpt-4"):
gpt4_models.append(model.id)
elif model.id.startswith("gpt-3.5"):
gpt35_models.append(model.id)
else:
other_models.append(model.id)
print("🤖 GPT-4 Models:")
for m in sorted(gpt4_models):
print(f" - {m}")
print("\n💬 GPT-3.5 Models:")
for m in sorted(gpt35_models):
print(f" - {m}")
print("\n📦 Other Models:")
for m in sorted(other_models):
print(f" - {m}")
return models.data
except Exception as e:
print(f"Error listing models: {e}")
return []
# Check if specific model exists
def verify_model_access(model_name: str) -> bool:
"""Check if you have access to a specific model"""
models = check_available_models()
model_ids = [m.id for m in models]
if model_name in model_ids:
print(f"✅ You have access to {model_name}")
return True
else:
print(f"❌ No access to {model_name}")
# Suggest alternatives
if "gpt-4" in model_name:
available_gpt4 = [m for m in model_ids if "gpt-4" in m]
if available_gpt4:
print(f" Try: {available_gpt4[0]}")
return False
# Usage
check_available_models()
verify_model_access("gpt-4")
Anthropic: Check Model Access
import anthropic
client = anthropic.Anthropic(api_key="your-api-key")
def test_anthropic_models():
"""Test which Claude models you can access"""
models_to_test = [
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
"claude-3-haiku-20240307",
"claude-2.1",
"claude-2.0",
"claude-instant-1.2"
]
available_models = []
for model in models_to_test:
try:
# Test with minimal request
response = client.messages.create(
model=model,
max_tokens=10,
messages=[{"role": "user", "content": "Hi"}]
)
available_models.append(model)
print(f"✅ {model} - Available")
except anthropic.NotFoundError:
print(f"❌ {model} - Not found or no access")
except anthropic.PermissionDeniedError:
print(f"🔒 {model} - Permission denied")
except Exception as e:
print(f"⚠️ {model} - Error: {str(e)[:50]}...")
return available_models
# Usage
available = test_anthropic_models()
print(f"\nYou have access to {len(available)} Claude models")
AWS Bedrock: List Foundation Models
import boto3
from botocore.exceptions import ClientError
def list_bedrock_models(region='us-east-1'):
"""List all available models in AWS Bedrock"""
bedrock = boto3.client(
service_name='bedrock',
region_name=region
)
try:
response = bedrock.list_foundation_models()
# Group by provider
models_by_provider = {}
for model in response['modelSummaries']:
provider = model['providerName']
model_id = model['modelId']
if provider not in models_by_provider:
models_by_provider[provider] = []
models_by_provider[provider].append({
'id': model_id,
'name': model['modelName'],
'input_modalities': model.get('inputModalities', []),
'output_modalities': model.get('outputModalities', [])
})
# Display results
for provider, models in sorted(models_by_provider.items()):
print(f"\n🏢 {provider}:")
for model in models:
print(f" - {model['id']}")
print(f" Name: {model['name']}")
print(f" Input: {', '.join(model['input_modalities'])}")
return models_by_provider
except ClientError as e:
error_code = e.response['Error']['Code']
if error_code == 'AccessDeniedException':
print("❌ Access denied. Check AWS permissions.")
else:
print(f"❌ Error: {error_code}")
return {}
# Check specific model availability
def check_bedrock_model(model_id: str, region='us-east-1'):
"""Check if a specific model is available"""
bedrock_runtime = boto3.client(
service_name='bedrock-runtime',
region_name=region
)
try:
# Test with minimal request
response = bedrock_runtime.invoke_model(
modelId=model_id,
body='{"prompt": "Hello", "max_tokens": 10}'
)
print(f"✅ Model {model_id} is available in {region}")
return True
except ClientError as e:
error_code = e.response['Error']['Code']
if error_code == 'ResourceNotFoundException':
print(f"❌ Model {model_id} not found in {region}")
elif error_code == 'AccessDeniedException':
print(f"🔒 No access to {model_id}")
else:
print(f"⚠️ Error with {model_id}: {error_code}")
return False
# Usage
models = list_bedrock_models('us-east-1')
check_bedrock_model('anthropic.claude-3-opus-20240229-v1:0')
Google Vertex AI: Model Discovery
from google.cloud import aiplatform
from google.api_core import exceptions
def list_vertex_models(project_id: str, location: str = 'us-central1'):
"""List available models in Vertex AI"""
aiplatform.init(project=project_id, location=location)
try:
# List models
models = aiplatform.Model.list()
print(f"📦 Available models in {location}:")
for model in models:
print(f" - {model.display_name}")
print(f" Resource: {model.resource_name}")
# Check Gemini availability
gemini_models = [
"gemini-1.5-pro",
"gemini-pro",
"gemini-pro-vision"
]
print("\n🔷 Gemini Models:")
for model_name in gemini_models:
# Test availability
print(f" - {model_name}: Available")
except exceptions.GoogleAPIError as e:
print(f"❌ Error: {e.message}")
except Exception as e:
print(f"⚠️ Unexpected error: {e}")
# Test specific model
def test_gemini_model(model_name: str = "gemini-pro"):
"""Test if Gemini model is accessible"""
import vertexai
from vertexai.generative_models import GenerativeModel
try:
model = GenerativeModel(model_name)
response = model.generate_content("Hello")
print(f"✅ {model_name} is working")
return True
except Exception as e:
print(f"❌ {model_name} error: {e}")
return False
Model Migration Guides
When models are deprecated, follow these migration paths. See OpenAI's deprecation timeline for details.
OpenAI: Migrate from Completions to Chat
# ❌ OLD: Using deprecated text-davinci-003
import openai
response = openai.Completion.create(
model="text-davinci-003",
prompt="Translate to French: Hello world",
max_tokens=100
)
# ✅ NEW: Using chat models
from openai import OpenAI
client = OpenAI()
response = client.chat.completions.create(
model="gpt-3.5-turbo", # or "gpt-4"
messages=[
{"role": "system", "content": "You are a translator."},
{"role": "user", "content": "Translate to French: Hello world"}
],
max_tokens=100
)
# ✅ For code generation (replacing code-davinci-002)
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a code assistant."},
{"role": "user", "content": "Write a Python function to calculate fibonacci"}
],
temperature=0 # For more deterministic code generation
)
Anthropic: Migrate to Claude 3
# ❌ OLD: Using deprecated Claude v1
import anthropic
client = anthropic.Client()
response = client.completion(
prompt=f"{anthropic.HUMAN_PROMPT} Hello {anthropic.AI_PROMPT}",
model="claude-v1",
max_tokens_to_sample=100
)
# ✅ NEW: Using Claude 3 with messages API
import anthropic
client = anthropic.Anthropic()
response = client.messages.create(
model="claude-3-sonnet-20240229", # or claude-3-opus, claude-3-haiku
messages=[
{"role": "user", "content": "Hello"}
],
max_tokens=100
)
# Model selection guide:
# - claude-3-opus: Best quality, highest cost
# - claude-3-sonnet: Balanced performance/cost
# - claude-3-haiku: Fastest, most affordable
Handling Model Not Found Errors
from typing import List, Optional
import logging
class ModelManager:
"""Intelligent model selection with fallbacks"""
def __init__(self):
# Define model preferences and fallbacks
self.model_fallbacks = {
"gpt-4": ["gpt-4-turbo-preview", "gpt-4-0125-preview", "gpt-3.5-turbo"],
"gpt-4-turbo": ["gpt-4-turbo-preview", "gpt-4", "gpt-3.5-turbo"],
"claude-3-opus": ["claude-3-opus-20240229", "claude-3-sonnet-20240229"],
"gemini-pro": ["gemini-1.5-pro", "gemini-pro", "palm-2"]
}
self.deprecated_models = {
"text-davinci-003": "gpt-3.5-turbo",
"code-davinci-002": "gpt-3.5-turbo",
"claude-v1": "claude-3-sonnet-20240229",
"claude-instant-v1": "claude-3-haiku-20240307"
}
def get_model_with_fallback(self, requested_model: str) -> str:
"""Get working model with automatic fallback"""
# Check if model is deprecated
if requested_model in self.deprecated_models:
replacement = self.deprecated_models[requested_model]
logging.warning(
f"Model '{requested_model}' is deprecated. "
f"Using '{replacement}' instead."
)
return replacement
# Get fallback chain
fallback_chain = self.model_fallbacks.get(
requested_model,
[requested_model]
)
# Try each model in the chain
for model in fallback_chain:
if self.test_model_availability(model):
if model != requested_model:
logging.info(
f"Using '{model}' as fallback for '{requested_model}'"
)
return model
raise ValueError(
f"No available models found for '{requested_model}'. "
f"Tried: {fallback_chain}"
)
def test_model_availability(self, model_name: str) -> bool:
"""Test if model is available"""
# Provider-specific testing logic here
# This is a simplified example
try:
if model_name.startswith("gpt"):
return self._test_openai_model(model_name)
elif model_name.startswith("claude"):
return self._test_anthropic_model(model_name)
elif model_name.startswith("gemini"):
return self._test_google_model(model_name)
else:
return False
except Exception as e:
logging.error(f"Error testing {model_name}: {e}")
return False
def _test_openai_model(self, model: str) -> bool:
"""Test OpenAI model availability"""
from openai import OpenAI
try:
client = OpenAI()
client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": "test"}],
max_tokens=1
)
return True
except Exception:
return False
def handle_model_error(self, error: Exception, requested_model: str):
"""Provide helpful error messages and solutions"""
error_msg = str(error).lower()
if "does not exist" in error_msg or "not found" in error_msg:
print(f"""
❌ Model Not Found: '{requested_model}'
Possible solutions:
1. Check spelling (model names are case-sensitive)
2. Verify you have access (GPT-4 requires paid account)
3. Check if model is deprecated
4. Try alternative models:
{self.get_alternatives(requested_model)}
Run diagnostics: model_manager.diagnose('{requested_model}')
""")
elif "permission" in error_msg or "access" in error_msg:
print(f"""
🔒 Access Denied: '{requested_model}'
This model requires:
- Paid account or higher tier
- API access approval
- Specific organization permissions
Contact support or upgrade your account.
""")
# Usage
manager = ModelManager()
try:
# Automatically handle deprecated models
model = manager.get_model_with_fallback("text-davinci-003")
print(f"Using model: {model}")
except ValueError as e:
manager.handle_model_error(e, "text-davinci-003")
Regional Availability Issues
Region-Specific Solutions
Azure OpenAI
Models vary by region. Check Azure's region availability table.
# Check Azure OpenAI deployments
from azure.ai.openai import AzureOpenAI
client = AzureOpenAI(
azure_endpoint="https://YOUR_RESOURCE.openai.azure.com/",
api_version="2024-02-15-preview"
)
# List your deployments
deployments = client.deployments.list()
for d in deployments:
print(f"{d.id}: {d.model}")
AWS Bedrock
Model availability varies by AWS region:
# Check models in different regions
aws bedrock list-foundation-models --region us-east-1
aws bedrock list-foundation-models --region eu-west-1
Best Practices
Do's
- • Always check model availability first
- • Implement fallback models
- • Handle deprecation gracefully
- • Use exact model names (case-sensitive)
- • Monitor deprecation announcements
- • Test models in your specific region
- • Cache model availability checks
Don'ts
- • Don't hardcode model names
- • Don't ignore deprecation warnings
- • Don't assume model availability
- • Don't mix model name formats
- • Don't skip error handling
- • Don't use outdated model lists
- • Don't ignore regional restrictions
References
- [1] OpenAI. "Error Codes Reference" (2024)
- [2] Anthropic. "API Errors" (2024)
- [3] Stack Overflow. "OpenAI API Questions" (2024)