Skip to main content

Chatbots & Conversational AI

Transform your chatbots from static, knowledge-limited assistants into dynamic, web-aware conversational partners. Cortex enables your chatbots to access real-time information, verify facts, and provide users with current, accurate responses backed by credible sources.

The Chatbot Knowledge Gap

Traditional Chatbot Limitations

  • Outdated Information: Responses based on training data that may be months or years old
  • Hallucination Problems: Making up facts when knowledge is incomplete
  • No Source Attribution: Unable to cite where information comes from
  • Static Responses: Same answers regardless of current events or changes

Cortex-Powered Chatbots

  • Real-Time Knowledge: Access to current information from across the web
  • Source Citations: Every response backed by verifiable sources
  • Fact Verification: Cross-reference information for accuracy
  • Dynamic Responses: Answers that adapt to current events and trends

Implementation Examples

Customer Support Chatbot

import cortex
from typing import Dict, List

class SupportChatbot:
def __init__(self, api_key: str, company_context: str):
self.cortex = cortex.Client(api_key=api_key)
self.company_context = company_context
self.conversation_history = []

def respond(self, user_message: str) -> Dict:
"""Generate response with real-time context"""
# Classify the intent
intent = self.classify_intent(user_message)

if intent == "product_inquiry":
return self.handle_product_inquiry(user_message)
elif intent == "technical_support":
return self.handle_technical_support(user_message)
elif intent == "company_news":
return self.handle_company_news(user_message)
else:
return self.handle_general_inquiry(user_message)

def handle_product_inquiry(self, message: str) -> Dict:
"""Handle product-related questions with current info"""
# Search for current product information
search_query = f"{self.company_context} {message} latest features pricing"

result = self.cortex.search(
query=search_query,
max_results=5,
profile="business_info"
)

# Format response for conversational context
response = f"""
Based on the latest information I found:

{result.summary}

Here are my sources for this information:
"""

for i, source in enumerate(result.sources[:3], 1):
response += f"\n{i}. {source.title} - {source.url}"

return {
"response": response,
"confidence": result.confidence,
"sources": result.sources,
"follow_up_suggestions": [
"Would you like more details about any specific feature?",
"Do you need help with pricing or plans?",
"Would you like to speak with a sales representative?"
]
}

def handle_technical_support(self, message: str) -> Dict:
"""Handle technical issues with current solutions"""
# Search for current solutions and known issues
tech_query = f"{message} solution troubleshooting 2025"

result = self.cortex.search(
query=tech_query,
max_results=7,
profile="technical_support"
)

# Validate the solution against official documentation
validation = self.cortex.validate(
claim=f"The solution for {message} is {result.summary}",
sources=3
)

if validation.confidence > 0.8:
response = f"""
I found a solution for your issue:

{result.summary}

This solution has been verified across multiple sources (confidence: {validation.confidence:.1%}).

Would you like me to walk you through the steps?
"""
else:
response = f"""
I found some information about your issue, but I want to make sure it's accurate:

{result.summary}

However, I recommend contacting our technical support team for personalized assistance, as the available information may not be complete.
"""

return {
"response": response,
"confidence": validation.confidence,
"verified": validation.confidence > 0.8,
"escalate": validation.confidence < 0.8
}

E-commerce Shopping Assistant

class ShoppingAssistant:
def __init__(self, cortex_client):
self.cortex = cortex_client
self.user_preferences = {}
self.cart = []

def product_recommendation(self, query: str, budget: float = None) -> Dict:
"""Recommend products based on current market data"""
# Build search query with context
search_parts = [query, "2025", "best", "reviews"]
if budget:
search_parts.append(f"under ${budget}")

search_query = " ".join(search_parts)

# Get current product information
products = self.cortex.search(
query=search_query,
max_results=10,
profile="product_reviews"
)

# Get price comparisons
price_comparison = self.cortex.search(
query=f"{query} price comparison deals discounts",
max_results=5,
profile="price_tracking"
)

# Combine information
response = f"""
Here are the current top recommendations for {query}:

**Product Overview:**
{products.summary}

**Current Pricing & Deals:**
{price_comparison.summary}

These recommendations are based on the latest reviews and current market prices.
"""

return {
"response": response,
"products": products,
"pricing": price_comparison,
"last_updated": "real-time",
"follow_up": [
"Would you like specific details about any of these products?",
"Do you want me to check for current discounts or coupon codes?",
"Should I set up price alerts for items you're interested in?"
]
}

def compare_products(self, product1: str, product2: str) -> Dict:
"""Compare two products with current information"""
comparison_query = f"{product1} vs {product2} comparison 2025"

comparison = self.cortex.search(
query=comparison_query,
max_results=8,
profile="product_comparison"
)

# Get individual product details
product1_details = self.cortex.search(
query=f"{product1} specs features reviews",
max_results=5
)

product2_details = self.cortex.search(
query=f"{product2} specs features reviews",
max_results=5
)

return {
"comparison_summary": comparison.summary,
"product1_details": product1_details.summary,
"product2_details": product2_details.summary,
"sources": comparison.sources + product1_details.sources + product2_details.sources,
"recommendation": self.generate_recommendation(comparison, product1_details, product2_details)
}

News & Information Chatbot

class NewsBot:
def __init__(self, cortex_client):
self.cortex = cortex_client
self.user_interests = []
self.briefing_history = []

def daily_briefing(self, topics: List[str] = None) -> Dict:
"""Generate personalized daily news briefing"""
if not topics:
topics = self.user_interests or ["technology", "business", "science"]

briefing = {}

for topic in topics:
# Get today's top stories
news = self.cortex.search(
query=f"{topic} news today latest developments",
max_results=5,
time_filter="today",
profile="news"
)

briefing[topic] = {
"summary": news.summary,
"top_stories": news.sources[:3],
"confidence": news.confidence
}

# Generate conversational briefing
response = "Here's your personalized briefing for today:\n\n"

for topic, content in briefing.items():
response += f"**{topic.upper()}**\n{content['summary']}\n\n"

response += "Would you like me to dive deeper into any of these topics?"

return {
"response": response,
"briefing": briefing,
"follow_up_options": list(briefing.keys())
}

def breaking_news_alert(self, keywords: List[str]) -> Dict:
"""Check for breaking news on specific topics"""
alerts = []

for keyword in keywords:
breaking = self.cortex.search(
query=f"{keyword} breaking news urgent latest",
max_results=3,
time_filter="last_hour",
sort_by="recency"
)

if breaking.sources: # If there are recent results
alerts.append({
"keyword": keyword,
"alert": breaking.summary,
"sources": breaking.sources,
"urgency": self.assess_urgency(breaking.summary)
})

if alerts:
response = "🚨 Breaking News Alerts:\n\n"
for alert in alerts:
urgency_emoji = "🔴" if alert["urgency"] == "high" else "🟡"
response += f"{urgency_emoji} **{alert['keyword'].upper()}**\n{alert['alert']}\n\n"
else:
response = "No breaking news found for your monitored topics."

return {
"response": response,
"alerts": alerts,
"has_breaking_news": len(alerts) > 0
}

Educational Tutor Bot

class TutorBot:
def __init__(self, cortex_client):
self.cortex = cortex_client
self.student_level = "intermediate"
self.learning_history = []

def explain_topic(self, topic: str, depth: str = "medium") -> Dict:
"""Explain a topic with current information and examples"""
# Get comprehensive information
explanation = self.cortex.search(
query=f"{topic} explanation tutorial {depth} level",
max_results=8,
profile="educational"
)

# Get current examples and case studies
examples = self.cortex.search(
query=f"{topic} examples case studies real world applications 2025",
max_results=5
)

# Verify key facts
key_facts = self.extract_key_facts(explanation.summary)
verified_facts = []

for fact in key_facts:
verification = self.cortex.validate(claim=fact, sources=3)
if verification.confidence > 0.8:
verified_facts.append(fact)

response = f"""
Let me explain {topic} for you:

**Core Explanation:**
{explanation.summary}

**Current Real-World Examples:**
{examples.summary}

**Key Verified Facts:**
"""

for i, fact in enumerate(verified_facts, 1):
response += f"\n{i}. {fact}"

response += f"\n\nSources: {', '.join([s.title for s in explanation.sources[:3]])}"

return {
"response": response,
"topic": topic,
"depth": depth,
"verified_facts": verified_facts,
"examples": examples.sources,
"quiz_questions": self.generate_quiz_questions(topic, verified_facts)
}

def current_developments(self, subject: str) -> Dict:
"""Get latest developments in a subject area"""
developments = self.cortex.search(
query=f"latest developments {subject} 2025 research breakthroughs",
max_results=7,
time_filter="last_month"
)

response = f"""
Here are the latest developments in {subject}:

{developments.summary}

These are recent developments that might not be in traditional textbooks yet!

Would you like me to explain how these developments relate to the fundamental concepts?
"""

return {
"response": response,
"developments": developments.sources,
"learning_opportunities": self.identify_learning_opportunities(developments.summary)
}

Chatbot Frameworks Integration

Microsoft Bot Framework

from botbuilder.core import ActivityHandler, MessageFactory, TurnContext
from botbuilder.schema import ChannelAccount
import cortex

class CortexBot(ActivityHandler):
def __init__(self, cortex_api_key: str):
self.cortex = cortex.Client(api_key=cortex_api_key)

async def on_message_activity(self, turn_context: TurnContext):
user_message = turn_context.activity.text

# Get web-aware response
result = self.cortex.search(
query=user_message,
max_results=5,
include_sources=True
)

# Format response with sources
response_text = f"{result.summary}\n\n"
response_text += "Sources:\n"
for i, source in enumerate(result.sources[:3], 1):
response_text += f"{i}. {source.title}\n"

response_activity = MessageFactory.text(response_text)
await turn_context.send_activity(response_activity)

Rasa Integration

from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
import cortex

class ActionWebSearch(Action):
def __init__(self):
self.cortex = cortex.Client(api_key="your_api_key")

def name(self) -> str:
return "action_web_search"

def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: dict) -> list:

# Get user's query
query = tracker.latest_message.get('text')

# Search with Cortex
try:
result = self.cortex.search(
query=query,
max_results=5
)

# Send response
dispatcher.utter_message(
text=result.summary,
json_message={
"sources": [s.dict() for s in result.sources],
"confidence": result.confidence
}
)

except Exception as e:
dispatcher.utter_message(
text="I'm sorry, I couldn't find current information about that. Please try rephrasing your question."
)

return []

Dialogflow Integration

from google.cloud import dialogflow
import cortex
import json

class DialogflowWebhook:
def __init__(self, cortex_api_key: str):
self.cortex = cortex.Client(api_key=cortex_api_key)

def handle_webhook(self, request):
"""Handle Dialogflow webhook with Cortex integration"""
req = request.get_json()

intent_name = req['queryResult']['intent']['displayName']
query_text = req['queryResult']['queryText']

if intent_name == "search.web":
# Use Cortex for web search
result = self.cortex.search(
query=query_text,
max_results=5
)

response = {
"fulfillmentText": result.summary,
"fulfillmentMessages": [
{
"text": {
"text": [result.summary]
}
},
{
"payload": {
"sources": [s.dict() for s in result.sources],
"confidence": result.confidence
}
}
]
}

return response

# Default response for other intents
return {"fulfillmentText": "I understand, but I need more specific information to help you."}

Advanced Chatbot Features

Context-Aware Conversations

class ContextAwareChatbot:
def __init__(self, cortex_client):
self.cortex = cortex_client
self.conversation_context = []
self.user_profile = {}

def respond_with_context(self, message: str, user_id: str) -> Dict:
"""Generate response considering conversation history"""
# Build context from conversation history
context = self.build_context(user_id)

# Enhanced query with context
enhanced_query = f"{message} context: {context}"

result = self.cortex.search(
query=enhanced_query,
max_results=5,
user_context=self.user_profile.get(user_id, {})
)

# Update conversation context
self.conversation_context.append({
"user_id": user_id,
"message": message,
"response": result.summary,
"timestamp": datetime.now(),
"sources": result.sources
})

return {
"response": result.summary,
"context_aware": True,
"personalized": True,
"sources": result.sources
}

def build_context(self, user_id: str) -> str:
"""Build conversation context for better responses"""
recent_messages = [
ctx for ctx in self.conversation_context[-10:] # Last 10 messages
if ctx["user_id"] == user_id
]

if not recent_messages:
return ""

context_parts = []
for msg in recent_messages:
context_parts.append(f"Previous: {msg['message']} -> {msg['response'][:100]}...")

return " | ".join(context_parts)

Multi-Language Support

class MultilingualChatbot:
def __init__(self, cortex_client):
self.cortex = cortex_client
self.supported_languages = ['en', 'es', 'fr', 'de', 'it', 'pt', 'zh', 'ja']

def respond_multilingual(self, message: str, language: str = 'en') -> Dict:
"""Respond in user's preferred language"""
# Translate query to English for better search results
if language != 'en':
english_query = self.translate_to_english(message, language)
else:
english_query = message

# Search with English query
result = self.cortex.search(
query=english_query,
max_results=5,
language_preference=language
)

# Translate response back to user's language
if language != 'en':
translated_response = self.translate_from_english(result.summary, language)
else:
translated_response = result.summary

return {
"response": translated_response,
"original_language": language,
"sources": result.sources,
"confidence": result.confidence
}

Sentiment-Aware Responses

class SentimentAwareChatbot:
def __init__(self, cortex_client):
self.cortex = cortex_client

def respond_with_sentiment(self, message: str) -> Dict:
"""Adjust response based on user sentiment"""
# Analyze sentiment
sentiment = self.analyze_sentiment(message)

# Adjust search strategy based on sentiment
if sentiment == "frustrated":
search_query = f"{message} solution help quick fix"
tone = "empathetic"
elif sentiment == "curious":
search_query = f"{message} detailed explanation learn more"
tone = "educational"
elif sentiment == "urgent":
search_query = f"{message} immediate solution emergency"
tone = "direct"
else:
search_query = message
tone = "neutral"

result = self.cortex.search(
query=search_query,
max_results=5,
response_tone=tone
)

# Adapt response based on sentiment
adapted_response = self.adapt_response_tone(result.summary, sentiment)

return {
"response": adapted_response,
"detected_sentiment": sentiment,
"tone_used": tone,
"sources": result.sources
}

Performance Optimization

Response Caching

import hashlib
import json
from datetime import datetime, timedelta

class CachedChatbot:
def __init__(self, cortex_client):
self.cortex = cortex_client
self.cache = {}
self.cache_duration = 300 # 5 minutes

def get_cached_response(self, query: str) -> Dict:
"""Get response with intelligent caching"""
cache_key = hashlib.md5(query.lower().encode()).hexdigest()

# Check cache
if cache_key in self.cache:
cached_item = self.cache[cache_key]
if datetime.now() - cached_item["timestamp"] < timedelta(seconds=self.cache_duration):
cached_item["from_cache"] = True
return cached_item["response"]

# Get fresh response
result = self.cortex.search(query=query, max_results=5)

response = {
"response": result.summary,
"sources": result.sources,
"confidence": result.confidence,
"from_cache": False
}

# Cache the response
self.cache[cache_key] = {
"response": response,
"timestamp": datetime.now()
}

return response

Streaming Responses

import asyncio

class StreamingChatbot:
def __init__(self, cortex_client):
self.cortex = cortex_client

async def stream_response(self, query: str, callback):
"""Stream response as it's generated"""
# Start search
search_task = asyncio.create_task(
self.cortex.search_async(query=query, max_results=5)
)

# Send immediate acknowledgment
await callback({"type": "acknowledgment", "message": "Searching for information..."})

# Wait for result
result = await search_task

# Stream response in chunks
response_chunks = self.chunk_response(result.summary)

for chunk in response_chunks:
await callback({"type": "text_chunk", "content": chunk})
await asyncio.sleep(0.1) # Small delay for natural feel

# Send sources
await callback({
"type": "sources",
"sources": [s.dict() for s in result.sources],
"confidence": result.confidence
})

# Send completion
await callback({"type": "complete"})

Analytics & Insights

Conversation Analytics

class ChatbotAnalytics:
def __init__(self, cortex_client):
self.cortex = cortex_client
self.conversation_logs = []

def log_conversation(self, user_id: str, message: str, response: Dict):
"""Log conversation for analytics"""
self.conversation_logs.append({
"timestamp": datetime.now(),
"user_id": user_id,
"message": message,
"response": response,
"sources_used": len(response.get("sources", [])),
"confidence": response.get("confidence", 0),
"response_time": response.get("response_time_ms", 0)
})

def generate_insights(self, time_period: str = "week") -> Dict:
"""Generate insights from conversation data"""
# Filter logs by time period
cutoff_date = datetime.now() - timedelta(days=7 if time_period == "week" else 30)
recent_logs = [log for log in self.conversation_logs if log["timestamp"] > cutoff_date]

if not recent_logs:
return {"error": "No data available for the specified period"}

# Calculate metrics
total_conversations = len(recent_logs)
avg_confidence = sum(log["confidence"] for log in recent_logs) / total_conversations
avg_response_time = sum(log["response_time"] for log in recent_logs) / total_conversations

# Topic analysis
common_topics = self.analyze_topics([log["message"] for log in recent_logs])

# User satisfaction (based on confidence scores)
high_confidence_responses = len([log for log in recent_logs if log["confidence"] > 0.8])
satisfaction_rate = high_confidence_responses / total_conversations

return {
"period": time_period,
"total_conversations": total_conversations,
"average_confidence": avg_confidence,
"average_response_time_ms": avg_response_time,
"satisfaction_rate": satisfaction_rate,
"common_topics": common_topics,
"insights": self.generate_actionable_insights(recent_logs)
}

Next: Research Tools → -