OpenAI Integration Guide
Integrate Cortex with OpenAI's APIs to enhance your GPT applications with real-time web knowledge and source verification.
🚀 Quick Start​
Installation​
pip install openai cortex-ai
Basic Setup​
import openai
from cortex import CortexClient
import os
# Initialize clients
openai.api_key = os.getenv("OPENAI_API_KEY")
cortex_client = CortexClient(api_key=os.getenv("CORTEX_API_KEY"))
def enhanced_gpt_chat(user_message, enable_web_search=True):
"""GPT chat enhanced with real-time web knowledge"""
messages = [
{"role": "system", "content": "You are a helpful assistant with access to real-time web information."}
]
# Check if web search is needed
if enable_web_search and any(keyword in user_message.lower() for keyword in
["latest", "recent", "current", "today", "news", "update"]):
# Search for current information
search_result = cortex_client.search(user_message, max_results=5)
if search_result.success:
# Add search context to the conversation
context = f"Current web information:\n{search_result.summary}\n\n"
context += "Sources:\n"
for i, source in enumerate(search_result.sources, 1):
context += f"{i}. {source.title} ({source.url})\n"
messages.append({
"role": "system",
"content": f"Here's current information to help answer the user's question:\n\n{context}"
})
# Add user message
messages.append({"role": "user", "content": user_message})
# Generate response
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
temperature=0.7,
max_tokens=1000
)
return response.choices[0].message['content']
# Usage
response = enhanced_gpt_chat("What are the latest developments in quantum computing?")
print(response)
🔧 Function Calling Integration​
Cortex Functions for GPT​
import json
# Define Cortex functions for OpenAI
cortex_functions = [
{
"name": "search_web",
"description": "Search the web for current information on any topic",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The search query"
},
"max_results": {
"type": "integer",
"description": "Maximum number of results to return",
"default": 5
},
"recency": {
"type": "string",
"description": "Time filter for results",
"enum": ["day", "week", "month", "year", "auto"],
"default": "auto"
}
},
"required": ["query"]
}
},
{
"name": "extract_content",
"description": "Extract clean text content from a specific URL",
"parameters": {
"type": "object",
"properties": {
"url": {
"type": "string",
"description": "The URL to extract content from"
},
"format": {
"type": "string",
"description": "Output format",
"enum": ["text", "markdown"],
"default": "text"
}
},
"required": ["url"]
}
},
{
"name": "validate_claim",
"description": "Validate a factual claim against multiple sources",
"parameters": {
"type": "object",
"properties": {
"claim": {
"type": "string",
"description": "The claim to validate"
},
"domain": {
"type": "string",
"description": "Subject domain (technology, science, finance, etc.)"
}
},
"required": ["claim"]
}
}
]
def execute_cortex_function(function_name, arguments):
"""Execute Cortex functions called by GPT"""
if function_name == "search_web":
result = cortex_client.search(
query=arguments["query"],
max_results=arguments.get("max_results", 5),
recency=arguments.get("recency", "auto")
)
if result.success:
return {
"summary": result.summary,
"sources": [
{
"title": source.title,
"url": source.url,
"confidence": source.confidence,
"snippet": source.snippet
}
for source in result.sources
],
"query_time": result.metadata.processing_time
}
else:
return {"error": "Search failed", "message": "No results found"}
elif function_name == "extract_content":
result = cortex_client.extract(
url=arguments["url"],
format=arguments.get("format", "text")
)
if result.success:
return {
"content": result.text[:2000], # Limit for token usage
"title": result.metadata.title,
"word_count": result.metadata.word_count,
"quality_score": result.quality_score
}
else:
return {"error": "Extraction failed", "url": arguments["url"]}
elif function_name == "validate_claim":
result = cortex_client.validate(
claim=arguments["claim"],
context={"domain": arguments.get("domain", "general")}
)
return {
"claim": arguments["claim"],
"validation_result": result.validation_result,
"confidence_score": result.confidence_score,
"consensus_level": result.consensus_level,
"supporting_sources": len(result.evidence.supporting),
"contradicting_sources": len(result.evidence.contradicting)
}
else:
return {"error": "Unknown function", "function": function_name}
def gpt_with_cortex_functions(user_message, conversation_history=None):
"""GPT chat with Cortex function calling"""
messages = [
{
"role": "system",
"content": """You are a helpful assistant with access to real-time web information.
You can search the web, extract content from URLs, and validate factual claims.
Always cite your sources when using web information."""
}
]
# Add conversation history
if conversation_history:
messages.extend(conversation_history)
# Add user message
messages.append({"role": "user", "content": user_message})
# First API call
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
functions=cortex_functions,
function_call="auto",
temperature=0.7
)
message = response.choices[0].message
# Check if GPT wants to call a function
if message.get("function_call"):
function_name = message["function_call"]["name"]
function_args = json.loads(message["function_call"]["arguments"])
# Execute the Cortex function
function_result = execute_cortex_function(function_name, function_args)
# Add function call and result to conversation
messages.append({
"role": "assistant",
"content": None,
"function_call": {
"name": function_name,
"arguments": message["function_call"]["arguments"]
}
})
messages.append({
"role": "function",
"name": function_name,
"content": json.dumps(function_result)
})
# Second API call to get final response
final_response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
temperature=0.7
)
return final_response.choices[0].message['content']
else:
return message['content']
# Usage
response = gpt_with_cortex_functions("What are the latest AI safety research developments? Please validate any key claims.")
print(response)
🤖 Advanced Chat Applications​
Multi-Turn Conversation with Context​
class EnhancedChatBot:
def __init__(self, openai_api_key, cortex_api_key, model="gpt-4"):
self.openai_api_key = openai_api_key
self.cortex_client = CortexClient(api_key=cortex_api_key)
self.model = model
self.conversation_history = []
self.search_cache = {}
def add_system_message(self, content):
"""Add a system message to the conversation"""
self.conversation_history.append({
"role": "system",
"content": content
})
def search_and_cache(self, query, force_refresh=False):
"""Search with caching to avoid duplicate API calls"""
cache_key = query.lower().strip()
if not force_refresh and cache_key in self.search_cache:
return self.search_cache[cache_key]
result = self.cortex_client.search(query, max_results=8, recency="week")
if result.success:
search_data = {
"summary": result.summary,
"sources": result.sources,
"confidence": result.confidence_score,
"timestamp": result.metadata.timestamp
}
self.search_cache[cache_key] = search_data
return search_data
return None
def needs_web_search(self, message):
"""Determine if a message requires web search"""
search_indicators = [
"latest", "recent", "current", "today", "now", "update",
"news", "trending", "what's happening", "new", "breaking"
]
return any(indicator in message.lower() for indicator in search_indicators)
def extract_topics_for_search(self, message):
"""Extract key topics from user message for targeted search"""
# Simple keyword extraction (you can use more sophisticated NLP)
stop_words = {
"what", "is", "are", "the", "latest", "recent", "current",
"about", "on", "in", "with", "for", "to", "and", "or"
}
words = message.lower().split()
keywords = [word for word in words if word not in stop_words and len(word) > 3]
# Combine important keywords
if len(keywords) >= 2:
return " ".join(keywords[:4]) # Take first 4 keywords
else:
return message
def chat(self, user_message, enable_web_search=True):
"""Enhanced chat with web search integration"""
# Check if web search is needed
if enable_web_search and self.needs_web_search(user_message):
search_query = self.extract_topics_for_search(user_message)
search_data = self.search_and_cache(search_query)
if search_data:
# Add search context
context_message = f"""
Current web information about "{search_query}":
Summary: {search_data['summary']}
Key Sources:
"""
for i, source in enumerate(search_data['sources'][:5], 1):
context_message += f"\n{i}. {source.title}"
context_message += f"\n URL: {source.url}"
context_message += f"\n Confidence: {source.confidence:.2f}"
context_message += f"\n Snippet: {source.snippet[:150]}..."
context_message += "\n"
context_message += f"\nSearch confidence: {search_data['confidence']:.2f}"
context_message += f"\nInformation retrieved at: {search_data['timestamp']}"
self.conversation_history.append({
"role": "system",
"content": context_message
})
# Add user message
self.conversation_history.append({
"role": "user",
"content": user_message
})
# Generate response
response = openai.ChatCompletion.create(
model=self.model,
messages=self.conversation_history,
temperature=0.7,
max_tokens=1000
)
assistant_message = response.choices[0].message['content']
# Add assistant response to history
self.conversation_history.append({
"role": "assistant",
"content": assistant_message
})
return assistant_message
def validate_response_claims(self, response_text):
"""Validate key claims in the assistant's response"""
# Extract potential factual claims (simplified)
sentences = response_text.split('.')
factual_sentences = [
s.strip() for s in sentences
if len(s.strip()) > 20 and not any(word in s.lower() for word in
['i think', 'maybe', 'possibly', 'might', 'could'])
]
validations = []
for sentence in factual_sentences[:3]: # Validate first 3 factual sentences
validation = self.cortex_client.validate(sentence)
validations.append({
"claim": sentence,
"result": validation.validation_result,
"confidence": validation.confidence_score
})
return validations
def clear_history(self):
"""Clear conversation history"""
self.conversation_history = []
self.search_cache = {}
# Usage
bot = EnhancedChatBot(
openai_api_key="your_openai_key",
cortex_api_key="your_cortex_key"
)
# Set system context
bot.add_system_message("""
You are an AI assistant with access to real-time web information.
Always cite your sources when providing current information.
Be accurate and acknowledge when information might be uncertain.
""")
# Chat loop
while True:
user_input = input("\nYou: ")
if user_input.lower() in ['quit', 'exit', 'bye']:
break
response = bot.chat(user_input)
print(f"\nAssistant: {response}")
# Optional: Validate key claims
validations = bot.validate_response_claims(response)
if validations:
print("\nFact-check results:")
for val in validations:
status = "✓" if val["result"] in ["VERIFIED", "LIKELY_TRUE"] else "?"
print(f"{status} {val['claim'][:100]}... ({val['result']})")
Specialized Assistants​
class NewsAssistant:
"""Specialized assistant for news and current events"""
def __init__(self, openai_api_key, cortex_api_key):
self.openai_api_key = openai_api_key
self.cortex_client = CortexClient(api_key=cortex_api_key)
def get_breaking_news(self, topic=None, region="global"):
"""Get breaking news with GPT analysis"""
if topic:
search_query = f"breaking news {topic}"
else:
search_query = "breaking news today"
# Search for breaking news
news_result = self.cortex_client.search(
query=search_query,
max_results=10,
recency="day",
country=region if region != "global" else "global",
domain_filter={
"include": ["reuters.com", "bbc.com", "cnn.com", "bloomberg.com", "npr.org"]
}
)
if not news_result.success:
return "Unable to retrieve breaking news at this time."
# Format news for GPT analysis
news_context = f"Breaking News Summary:\n{news_result.summary}\n\nNews Articles:\n"
for i, source in enumerate(news_result.sources, 1):
news_context += f"\n{i}. {source.title}"
news_context += f"\n Source: {source.domain}"
news_context += f"\n Published: {source.published_date}"
news_context += f"\n Summary: {source.snippet}"
news_context += f"\n Confidence: {source.confidence:.2f}\n"
# Use GPT to analyze and summarize
messages = [
{
"role": "system",
"content": """You are a news analyst. Analyze the breaking news provided and create a comprehensive summary.
Include:
1. Key developments
2. Significance and impact
3. Different perspectives if available
4. What to watch for next
Always cite specific sources and be objective."""
},
{
"role": "user",
"content": f"Please analyze this breaking news:\n\n{news_context}"
}
]
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
temperature=0.3, # Lower temperature for factual content
max_tokens=1500
)
return response.choices[0].message['content']
def fact_check_news(self, news_claim):
"""Fact-check a news claim"""
# Validate the claim
validation = self.cortex_client.validate(
claim=news_claim,
context={"domain": "news", "claim_type": "event"}
)
# Search for additional context
search_result = self.cortex_client.search(
query=news_claim,
max_results=8,
recency="week"
)
# Create comprehensive fact-check report with GPT
fact_check_context = f"""
Claim to fact-check: {news_claim}
Validation Result: {validation.validation_result}
Confidence Score: {validation.confidence_score:.2f}
Consensus Level: {validation.consensus_level}
Supporting Evidence:
"""
for evidence in validation.evidence.supporting:
fact_check_context += f"\n- {evidence.source} (Authority: {evidence.authority_score:.2f})"
fact_check_context += f"\n Snippet: {evidence.snippet}"
if validation.evidence.contradicting:
fact_check_context += "\n\nContradicting Evidence:\n"
for evidence in validation.evidence.contradicting:
fact_check_context += f"\n- {evidence.source}"
fact_check_context += f"\n Snippet: {evidence.snippet}"
if search_result.success:
fact_check_context += f"\n\nAdditional Context:\n{search_result.summary}"
messages = [
{
"role": "system",
"content": """You are a fact-checker. Analyze the evidence provided and create a comprehensive fact-check report.
Include:
1. Overall assessment (True/False/Mixed/Unverified)
2. Evidence analysis
3. Source reliability assessment
4. Caveats and limitations
5. Conclusion with confidence level"""
},
{
"role": "user",
"content": f"Please fact-check this claim:\n\n{fact_check_context}"
}
]
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
temperature=0.2,
max_tokens=1000
)
return response.choices[0].message['content']
# Usage
news_assistant = NewsAssistant("your_openai_key", "your_cortex_key")
# Get breaking news analysis
breaking_news = news_assistant.get_breaking_news("artificial intelligence")
print("Breaking News Analysis:")
print(breaking_news)
# Fact-check a claim
fact_check = news_assistant.fact_check_news("Tesla stock reached an all-time high this week")
print("\nFact-Check Report:")
print(fact_check)
Research Assistant​
class ResearchAssistant:
"""Specialized assistant for research and analysis"""
def __init__(self, openai_api_key, cortex_api_key):
self.openai_api_key = openai_api_key
self.cortex_client = CortexClient(api_key=cortex_api_key)
def conduct_research(self, topic, research_depth="standard"):
"""Conduct comprehensive research on a topic"""
# Define search strategies based on depth
if research_depth == "deep":
search_queries = [
f"{topic} latest research",
f"{topic} academic papers 2024",
f"{topic} expert analysis",
f"{topic} industry trends",
f"{topic} future outlook"
]
max_results_per_query = 8
else:
search_queries = [
f"{topic} overview",
f"{topic} latest developments",
f"{topic} expert opinions"
]
max_results_per_query = 5
# Collect research data
research_data = []
all_sources = []
for query in search_queries:
result = self.cortex_client.search(
query=query,
max_results=max_results_per_query,
recency="year" if "academic" in query else "month"
)
if result.success:
research_data.append({
"query": query,
"summary": result.summary,
"sources": result.sources
})
all_sources.extend(result.sources)
# Filter and deduplicate sources
unique_sources = {}
for source in all_sources:
if source.url not in unique_sources:
unique_sources[source.url] = source
elif source.confidence > unique_sources[source.url].confidence:
unique_sources[source.url] = source
top_sources = sorted(unique_sources.values(), key=lambda x: x.confidence, reverse=True)[:15]
# Create comprehensive research context
research_context = f"Research Topic: {topic}\n\n"
for data in research_data:
research_context += f"Research Angle: {data['query']}\n"
research_context += f"Summary: {data['summary']}\n\n"
research_context += "Top Sources:\n"
for i, source in enumerate(top_sources, 1):
research_context += f"{i}. {source.title}\n"
research_context += f" URL: {source.url}\n"
research_context += f" Confidence: {source.confidence:.2f}\n"
research_context += f" Snippet: {source.snippet[:200]}...\n\n"
# Generate comprehensive research report with GPT
messages = [
{
"role": "system",
"content": """You are a research analyst. Create a comprehensive research report based on the provided information.
Structure your report as follows:
1. Executive Summary
2. Current State of the Field
3. Key Developments and Trends
4. Expert Perspectives
5. Future Outlook
6. Conclusion and Recommendations
7. Sources and References
Be thorough, objective, and cite specific sources throughout."""
},
{
"role": "user",
"content": f"Create a comprehensive research report on:\n\n{research_context}"
}
]
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
temperature=0.3,
max_tokens=2000
)
return response.choices[0].message['content']
def generate_literature_review(self, topic):
"""Generate a literature review section"""
# Search for academic papers and research
academic_result = self.cortex_client.search(
query=f"{topic} research papers academic studies",
max_results=12,
domain_filter={
"include": ["arxiv.org", "nature.com", "science.org", "pubmed.ncbi.nlm.nih.gov"]
}
)
if not academic_result.success:
return "Unable to find sufficient academic sources for literature review."
# Extract content from top papers
paper_contents = []
for source in academic_result.sources[:8]:
content = self.cortex_client.extract(source.url)
if content.success:
paper_contents.append({
"title": source.title,
"url": source.url,
"content": content.text[:1500], # First 1500 chars
"confidence": source.confidence
})
# Create literature review context
lit_review_context = f"Literature Review Topic: {topic}\n\n"
lit_review_context += f"Academic Search Summary: {academic_result.summary}\n\n"
lit_review_context += "Academic Papers:\n\n"
for i, paper in enumerate(paper_contents, 1):
lit_review_context += f"{i}. {paper['title']}\n"
lit_review_context += f" URL: {paper['url']}\n"
lit_review_context += f" Key Content: {paper['content'][:500]}...\n"
lit_review_context += f" Confidence: {paper['confidence']:.2f}\n\n"
# Generate literature review with GPT
messages = [
{
"role": "system",
"content": """You are an academic researcher writing a literature review.
Create a structured literature review that:
1. Synthesizes the current state of research
2. Identifies key themes and findings
3. Notes areas of consensus and disagreement
4. Highlights gaps in the literature
5. Suggests directions for future research
Use proper academic tone and cite sources appropriately."""
},
{
"role": "user",
"content": f"Write a literature review based on:\n\n{lit_review_context}"
}
]
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
temperature=0.2,
max_tokens=1500
)
return response.choices[0].message['content']
# Usage
research_assistant = ResearchAssistant("your_openai_key", "your_cortex_key")
# Conduct comprehensive research
research_report = research_assistant.conduct_research("quantum computing applications", research_depth="deep")
print("Research Report:")
print(research_report)
# Generate literature review
lit_review = research_assistant.generate_literature_review("machine learning interpretability")
print("\nLiterature Review:")
print(lit_review)
📊 Content Generation Workflows​
Blog Post Generator​
class BlogPostGenerator:
"""Generate data-driven blog posts with current information"""
def __init__(self, openai_api_key, cortex_api_key):
self.openai_api_key = openai_api_key
self.cortex_client = CortexClient(api_key=cortex_api_key)
def generate_blog_post(self, topic, target_audience="general", tone="informative"):
"""Generate a comprehensive blog post with current information"""
# Research current information
research_result = self.cortex_client.search(
query=f"{topic} latest trends insights 2024",
max_results=10,
recency="month"
)
if not research_result.success:
return "Unable to gather current information for blog post."
# Get expert opinions
expert_result = self.cortex_client.search(
query=f"{topic} expert opinion analysis",
max_results=6,
domain_filter={
"include": ["medium.com", "forbes.com", "harvard.edu", "mit.edu"]
}
)
# Validate key claims
key_claims = []
sentences = research_result.summary.split('.')
for sentence in sentences[:5]:
if len(sentence.strip()) > 30:
validation = self.cortex_client.validate(sentence.strip())
if validation.confidence_score > 0.7:
key_claims.append({
"claim": sentence.strip(),
"confidence": validation.confidence_score
})
# Create blog post context
blog_context = f"""
Blog Post Topic: {topic}
Target Audience: {target_audience}
Tone: {tone}
Current Research Summary: {research_result.summary}
Key Verified Claims:
"""
for claim in key_claims:
blog_context += f"- {claim['claim']} (Confidence: {claim['confidence']:.2f})\n"
blog_context += "\nCurrent Sources:\n"
for i, source in enumerate(research_result.sources[:8], 1):
blog_context += f"{i}. {source.title} ({source.url})\n"
if expert_result.success:
blog_context += f"\nExpert Insights: {expert_result.summary}\n"
# Generate blog post with GPT
messages = [
{
"role": "system",
"content": f"""You are a skilled content writer creating a blog post for {target_audience} audience with a {tone} tone.
Create an engaging blog post with:
1. Compelling headline
2. Introduction that hooks the reader
3. Well-structured main content with subheadings
4. Current data and insights
5. Expert quotes or perspectives
6. Actionable takeaways
7. Conclusion that reinforces key points
8. Sources and references
Make it informative, engaging, and backed by current data."""
},
{
"role": "user",
"content": f"Write a comprehensive blog post based on:\n\n{blog_context}"
}
]
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
temperature=0.7,
max_tokens=2500
)
return response.choices[0].message['content']
def generate_outline(self, topic):
"""Generate a detailed blog post outline"""
# Research for outline
result = self.cortex_client.search(
query=f"{topic} comprehensive guide overview",
max_results=8
)
if not result.success:
return "Unable to generate outline - research failed."
outline_context = f"""
Topic: {topic}
Research Summary: {result.summary}
Key Sources:
"""
for source in result.sources:
outline_context += f"- {source.title}\n"
messages = [
{
"role": "system",
"content": """Create a detailed blog post outline with:
1. Working title
2. Target audience
3. Key objectives
4. Detailed section breakdown with subsections
5. Key points to cover in each section
6. Suggested word count for each section
7. Call-to-action ideas
Make the outline comprehensive and logical."""
},
{
"role": "user",
"content": f"Create a blog post outline for:\n\n{outline_context}"
}
]
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
temperature=0.5,
max_tokens=1000
)
return response.choices[0].message['content']
# Usage
blog_generator = BlogPostGenerator("your_openai_key", "your_cortex_key")
# Generate outline first
outline = blog_generator.generate_outline("artificial intelligence in healthcare")
print("Blog Post Outline:")
print(outline)
# Generate full blog post
blog_post = blog_generator.generate_blog_post(
topic="artificial intelligence in healthcare",
target_audience="healthcare professionals",
tone="professional but accessible"
)
print("\nBlog Post:")
print(blog_post)
💡 Best Practices​
1. Token Management​
def optimize_context_for_tokens(content, max_tokens=3000):
"""Optimize content to fit within token limits"""
# Rough estimation: 1 token ≈ 4 characters
max_chars = max_tokens * 4
if len(content) <= max_chars:
return content
# Intelligent truncation
sentences = content.split('.')
truncated = []
char_count = 0
for sentence in sentences:
if char_count + len(sentence) + 1 <= max_chars:
truncated.append(sentence)
char_count += len(sentence) + 1
else:
break
return '.'.join(truncated) + '...[truncated for token limit]'
def count_tokens_estimate(text):
"""Rough token count estimation"""
return len(text) // 4
2. Cost Optimization​
class CostOptimizedCortexGPT:
def __init__(self, openai_api_key, cortex_api_key):
self.openai_api_key = openai_api_key
self.cortex_client = CortexClient(api_key=cortex_api_key)
self.cache = {}
self.daily_usage = {"gpt_tokens": 0, "cortex_calls": 0}
def smart_search(self, query, use_cache=True):
"""Search with cost-conscious caching"""
if use_cache and query in self.cache:
return self.cache[query]
result = self.cortex_client.search(query, max_results=5)
self.daily_usage["cortex_calls"] += 1
if result.success:
self.cache[query] = result
return result
def generate_response(self, messages, model="gpt-3.5-turbo"):
"""Generate response with token tracking"""
# Estimate input tokens
input_text = ' '.join([msg['content'] for msg in messages])
estimated_input_tokens = count_tokens_estimate(input_text)
response = openai.ChatCompletion.create(
model=model,
messages=messages,
max_tokens=min(1000, 4000 - estimated_input_tokens), # Leave room for response
temperature=0.7
)
# Track usage
self.daily_usage["gpt_tokens"] += response.usage.total_tokens
return response.choices[0].message['content']
def get_usage_report(self):
"""Get daily usage report"""
estimated_cost = (
(self.daily_usage["gpt_tokens"] / 1000) * 0.002 + # GPT-3.5 pricing
self.daily_usage["cortex_calls"] * 0.01 # Estimated Cortex cost
)
return {
"gpt_tokens_used": self.daily_usage["gpt_tokens"],
"cortex_calls_made": self.daily_usage["cortex_calls"],
"estimated_cost_usd": estimated_cost
}
3. Error Handling and Fallbacks​
class RobustCortexGPT:
def __init__(self, openai_api_key, cortex_api_key, fallback_search=None):
self.openai_api_key = openai_api_key
self.cortex_client = CortexClient(api_key=cortex_api_key)
self.fallback_search = fallback_search
self.max_retries = 3
def safe_search(self, query):
"""Search with error handling and fallbacks"""
for attempt in range(self.max_retries):
try:
result = self.cortex_client.search(query)
if result.success:
return result
except Exception as e:
print(f"Search attempt {attempt + 1} failed: {e}")
if attempt < self.max_retries - 1:
time.sleep(2 ** attempt) # Exponential backoff
continue
# Use fallback if available
if self.fallback_search:
try:
return self.fallback_search(query)
except Exception as e:
print(f"Fallback search failed: {e}")
# Return empty result
return None
def safe_gpt_call(self, messages, model="gpt-4"):
"""GPT call with error handling"""
for attempt in range(self.max_retries):
try:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0.7,
max_tokens=1000
)
return response.choices[0].message['content']
except Exception as e:
print(f"GPT call attempt {attempt + 1} failed: {e}")
if attempt < self.max_retries - 1:
time.sleep(2 ** attempt)
continue
return "I apologize, but I'm experiencing technical difficulties. Please try again later."
Next: Best Practices → - Optimization tips and production guidelines