Research Tools
Cortex transforms traditional research workflows by providing AI-powered access to real-time web information, source verification, and comprehensive analysis capabilities. Build research tools that help users discover, validate, and synthesize information from across the web with confidence.
Research Challenges Solved
Traditional Research Limitations
- Time-Intensive: Manual searching across multiple sources
- Source Quality Uncertainty: Difficulty assessing credibility
- Information Overload: Too much data, not enough insight
- Outdated Information: Static databases and cached results
- No Cross-Validation: Limited ability to verify claims
Cortex-Powered Research
- Automated Discovery: AI-driven information gathering
- Source Verification: Automated credibility assessment
- Intelligent Synthesis: Comprehensive analysis and summarization
- Real-Time Data: Always current information
- Multi-Source Validation: Cross-reference facts across sources
Research Tool Examples
Academic Research Assistant
import cortex
from typing import List, Dict
from datetime import datetime, timedelta
class AcademicResearcher:
def __init__(self, api_key: str):
self.cortex = cortex.Client(api_key=api_key)
self.research_cache = {}
self.citation_style = "APA"
def literature_review(self, topic: str, years: int = 5) -> Dict:
"""Conduct comprehensive literature review"""
# Search for academic sources
academic_query = f"{topic} research studies academic papers {datetime.now().year - years}-{datetime.now().year}"
academic_results = self.cortex.search(
query=academic_query,
max_results=20,
profile="academic",
time_filter=f"last_{years}_years"
)
# Get recent developments
recent_developments = self.cortex.search(
query=f"{topic} latest research breakthrough 2025",
max_results=10,
profile="academic"
)
# Validate key findings
key_findings = self.extract_key_findings(academic_results.summary)
validated_findings = []
for finding in key_findings:
validation = self.cortex.validate(
claim=finding,
sources=3,
academic_sources_only=True
)
if validation.confidence > 0.8:
validated_findings.append({
"finding": finding,
"confidence": validation.confidence,
"supporting_sources": validation.sources
})
# Generate comprehensive review
review = {
"topic": topic,
"search_period": f"{datetime.now().year - years}-{datetime.now().year}",
"total_sources": len(academic_results.sources),
"overview": academic_results.summary,
"recent_developments": recent_developments.summary,
"validated_findings": validated_findings,
"research_gaps": self.identify_research_gaps(academic_results),
"citations": self.format_citations(academic_results.sources),
"methodology": self.generate_methodology_notes()
}
return review
def fact_checking_research(self, claims: List[str]) -> Dict:
"""Verify multiple claims against academic sources"""
verification_results = []
for claim in claims:
# Search for evidence
evidence = self.cortex.search(
query=f"evidence research {claim}",
max_results=10,
profile="fact_checking"
)
# Cross-validate with multiple sources
validation = self.cortex.validate(
claim=claim,
sources=5,
require_peer_review=True
)
verification_results.append({
"claim": claim,
"verdict": validation.verdict, # "supported", "contradicted", "inconclusive"
"confidence": validation.confidence,
"evidence_summary": evidence.summary,
"supporting_sources": validation.supporting_sources,
"contradicting_sources": validation.contradicting_sources,
"quality_score": self.assess_source_quality(validation.sources)
})
return {
"total_claims": len(claims),
"verified_claims": len([r for r in verification_results if r["verdict"] == "supported"]),
"contradicted_claims": len([r for r in verification_results if r["verdict"] == "contradicted"]),
"inconclusive_claims": len([r for r in verification_results if r["verdict"] == "inconclusive"]),
"results": verification_results,
"overall_credibility": self.calculate_overall_credibility(verification_results)
}
def comparative_analysis(self, topics: List[str], aspect: str) -> Dict:
"""Compare multiple topics across a specific aspect"""
comparisons = {}
for topic in topics:
result = self.cortex.search(
query=f"{topic} {aspect} analysis comparison",
max_results=8,
profile="comparative_research"
)
comparisons[topic] = {
"summary": result.summary,
"sources": result.sources,
"key_points": self.extract_key_points(result.summary),
"confidence": result.confidence
}
# Generate comparative insights
comparative_insights = self.generate_comparative_insights(comparisons, aspect)
return {
"topics": topics,
"aspect": aspect,
"individual_analyses": comparisons,
"comparative_insights": comparative_insights,
"recommendations": self.generate_research_recommendations(comparative_insights),
"visualization_data": self.prepare_visualization_data(comparisons)
}
Market Research Platform
class MarketResearcher:
def __init__(self, cortex_client):
self.cortex = cortex_client
self.industry_profiles = {
"tech": "technology startup venture capital",
"finance": "financial markets banking investment",
"healthcare": "medical healthcare pharmaceutical",
"retail": "retail ecommerce consumer goods"
}
def market_analysis(self, industry: str, region: str = "global") -> Dict:
"""Comprehensive market analysis"""
profile = self.industry_profiles.get(industry, industry)
# Market size and trends
market_size = self.cortex.search(
query=f"{industry} market size {region} 2025 growth trends",
max_results=10,
profile="market_research"
)
# Competitive landscape
competitors = self.cortex.search(
query=f"{industry} leading companies competitors market share {region}",
max_results=15,
profile="competitive_analysis"
)
# Investment and funding trends
funding = self.cortex.search(
query=f"{industry} investment funding venture capital {region} 2025",
max_results=8,
profile="investment_research"
)
# Regulatory environment
regulations = self.cortex.search(
query=f"{industry} regulations compliance legal changes {region}",
max_results=6,
profile="regulatory_research"
)
# Future projections
projections = self.cortex.search(
query=f"{industry} future outlook predictions {region} 2025-2030",
max_results=7,
profile="trend_analysis"
)
return {
"industry": industry,
"region": region,
"analysis_date": datetime.now().isoformat(),
"market_overview": {
"size_and_trends": market_size.summary,
"growth_rate": self.extract_growth_metrics(market_size.summary),
"key_drivers": self.extract_key_drivers(market_size.summary)
},
"competitive_landscape": {
"summary": competitors.summary,
"key_players": self.extract_key_players(competitors.summary),
"market_concentration": self.analyze_market_concentration(competitors.summary)
},
"investment_climate": {
"funding_trends": funding.summary,
"investment_volume": self.extract_investment_metrics(funding.summary),
"investor_sentiment": self.assess_investor_sentiment(funding.summary)
},
"regulatory_environment": {
"current_regulations": regulations.summary,
"upcoming_changes": self.identify_regulatory_changes(regulations.summary)
},
"future_outlook": {
"projections": projections.summary,
"opportunities": self.identify_opportunities(projections.summary),
"risks": self.identify_risks(projections.summary)
},
"sources": self.consolidate_sources([
market_size.sources, competitors.sources,
funding.sources, regulations.sources, projections.sources
]),
"confidence_score": self.calculate_analysis_confidence([
market_size.confidence, competitors.confidence,
funding.confidence, regulations.confidence, projections.confidence
])
}
def competitor_intelligence(self, company: str, competitors: List[str]) -> Dict:
"""Detailed competitor analysis"""
intelligence = {"target_company": company, "competitors": {}}
# Analyze target company
target_analysis = self.analyze_company(company)
intelligence["target_company_analysis"] = target_analysis
# Analyze each competitor
for competitor in competitors:
competitor_analysis = self.analyze_company(competitor)
# Direct comparison
comparison = self.cortex.search(
query=f"{company} vs {competitor} comparison analysis",
max_results=5,
profile="competitive_analysis"
)
intelligence["competitors"][competitor] = {
"analysis": competitor_analysis,
"comparison": comparison.summary,
"strengths": self.extract_strengths(competitor_analysis),
"weaknesses": self.extract_weaknesses(competitor_analysis),
"competitive_advantage": self.identify_competitive_advantage(comparison.summary)
}
# Generate strategic insights
strategic_insights = self.generate_strategic_insights(intelligence)
return {
**intelligence,
"strategic_insights": strategic_insights,
"competitive_positioning": self.analyze_competitive_positioning(intelligence),
"recommendations": self.generate_competitive_recommendations(strategic_insights)
}
def trend_monitoring(self, keywords: List[str], alert_threshold: float = 0.3) -> Dict:
"""Monitor trends and emerging topics"""
trend_data = {}
for keyword in keywords:
# Current trend analysis
current_trend = self.cortex.search(
query=f"{keyword} trend analysis 2025",
max_results=10,
profile="trend_analysis"
)
# Historical comparison
historical = self.cortex.search(
query=f"{keyword} trend historical comparison past years",
max_results=5,
profile="historical_analysis"
)
# Future predictions
predictions = self.cortex.search(
query=f"{keyword} future predictions outlook trends",
max_results=5,
profile="predictive_analysis"
)
# Sentiment analysis
sentiment = self.analyze_trend_sentiment(current_trend.summary)
trend_data[keyword] = {
"current_status": current_trend.summary,
"historical_context": historical.summary,
"future_outlook": predictions.summary,
"sentiment": sentiment,
"momentum": self.calculate_trend_momentum(current_trend, historical),
"confidence": current_trend.confidence,
"alert_triggered": sentiment["momentum_score"] > alert_threshold
}
return {
"monitoring_date": datetime.now().isoformat(),
"keywords": keywords,
"trend_data": trend_data,
"alerts": [k for k, v in trend_data.items() if v["alert_triggered"]],
"summary": self.generate_trend_summary(trend_data)
}
Investigative Journalism Tool
class InvestigativeResearcher:
def __init__(self, cortex_client):
self.cortex = cortex_client
self.fact_check_threshold = 0.8
self.source_diversity_target = 5
def investigate_story(self, story_topic: str, key_claims: List[str]) -> Dict:
"""Comprehensive investigative research"""
investigation = {
"topic": story_topic,
"investigation_date": datetime.now().isoformat(),
"key_claims": key_claims
}
# Background research
background = self.cortex.search(
query=f"{story_topic} background history context",
max_results=15,
profile="investigative"
)
# Verify each claim
claim_verification = []
for claim in key_claims:
verification = self.verify_claim_thoroughly(claim)
claim_verification.append(verification)
# Look for connections and patterns
connections = self.find_connections(story_topic, key_claims)
# Timeline analysis
timeline = self.construct_timeline(story_topic)
# Source credibility assessment
source_analysis = self.analyze_source_credibility(background.sources)
investigation.update({
"background_research": {
"summary": background.summary,
"key_facts": self.extract_verifiable_facts(background.summary),
"sources": background.sources
},
"claim_verification": claim_verification,
"connections_found": connections,
"timeline": timeline,
"source_credibility": source_analysis,
"red_flags": self.identify_red_flags(claim_verification, connections),
"confidence_score": self.calculate_investigation_confidence(claim_verification),
"recommended_actions": self.generate_investigation_recommendations(claim_verification)
})
return investigation
def verify_claim_thoroughly(self, claim: str) -> Dict:
"""Thorough verification of a single claim"""
# Search for supporting evidence
supporting = self.cortex.search(
query=f"evidence supports {claim}",
max_results=10,
profile="fact_checking"
)
# Search for contradicting evidence
contradicting = self.cortex.search(
query=f"contradicts disputes {claim}",
max_results=10,
profile="fact_checking"
)
# Cross-validation
validation = self.cortex.validate(
claim=claim,
sources=self.source_diversity_target,
require_diverse_sources=True
)
# Expert opinions
expert_opinions = self.cortex.search(
query=f"expert opinion analysis {claim}",
max_results=5,
profile="expert_analysis"
)
return {
"claim": claim,
"supporting_evidence": supporting.summary,
"contradicting_evidence": contradicting.summary,
"expert_opinions": expert_opinions.summary,
"validation_result": validation.verdict,
"confidence_score": validation.confidence,
"source_diversity": len(set([s.domain for s in validation.sources])),
"credibility_assessment": self.assess_claim_credibility(validation),
"verification_status": self.determine_verification_status(validation.confidence)
}
def source_network_analysis(self, topic: str) -> Dict:
"""Analyze the network of sources around a topic"""
# Get comprehensive source list
sources_search = self.cortex.search(
query=topic,
max_results=50,
include_source_metadata=True
)
# Analyze source relationships
source_network = {}
for source in sources_search.sources:
network_data = self.analyze_source_network(source)
source_network[source.url] = network_data
# Identify source clusters
clusters = self.identify_source_clusters(source_network)
# Detect potential bias patterns
bias_analysis = self.detect_bias_patterns(source_network)
return {
"topic": topic,
"total_sources": len(sources_search.sources),
"source_network": source_network,
"source_clusters": clusters,
"bias_analysis": bias_analysis,
"diversity_score": self.calculate_source_diversity(source_network),
"reliability_assessment": self.assess_network_reliability(source_network)
}
Scientific Research Assistant
class ScientificResearcher:
def __init__(self, cortex_client):
self.cortex = cortex_client
self.scientific_databases = ["pubmed", "arxiv", "scholar", "researchgate"]
def systematic_review(self, research_question: str, inclusion_criteria: List[str]) -> Dict:
"""Conduct systematic literature review"""
# Search for relevant studies
studies_search = self.cortex.search(
query=f"{research_question} systematic review meta-analysis",
max_results=30,
profile="scientific_research",
filters={"peer_reviewed": True}
)
# Apply inclusion criteria
filtered_studies = self.apply_inclusion_criteria(studies_search.sources, inclusion_criteria)
# Quality assessment
quality_assessment = []
for study in filtered_studies:
quality = self.assess_study_quality(study)
quality_assessment.append(quality)
# Data extraction
extracted_data = self.extract_study_data(filtered_studies)
# Meta-analysis (if applicable)
meta_analysis = self.perform_meta_analysis(extracted_data)
return {
"research_question": research_question,
"search_strategy": self.document_search_strategy(),
"total_studies_found": len(studies_search.sources),
"studies_after_filtering": len(filtered_studies),
"inclusion_criteria": inclusion_criteria,
"quality_assessment": quality_assessment,
"extracted_data": extracted_data,
"meta_analysis": meta_analysis,
"conclusions": self.draw_systematic_review_conclusions(meta_analysis),
"limitations": self.identify_review_limitations(quality_assessment),
"prisma_flow_chart": self.generate_prisma_data(studies_search, filtered_studies)
}
def hypothesis_validation(self, hypothesis: str, field: str) -> Dict:
"""Validate scientific hypothesis against current literature"""
# Search for supporting evidence
supporting_evidence = self.cortex.search(
query=f"{hypothesis} supporting evidence {field}",
max_results=20,
profile="scientific_validation"
)
# Search for contradicting evidence
contradicting_evidence = self.cortex.search(
query=f"{hypothesis} contradicting evidence criticism {field}",
max_results=15,
profile="scientific_validation"
)
# Get methodological perspectives
methodology = self.cortex.search(
query=f"{hypothesis} methodology research methods {field}",
max_results=10,
profile="methodology_analysis"
)
# Expert consensus analysis
expert_consensus = self.analyze_expert_consensus(hypothesis, field)
# Validation scoring
validation_score = self.calculate_hypothesis_validation_score(
supporting_evidence, contradicting_evidence, expert_consensus
)
return {
"hypothesis": hypothesis,
"field": field,
"supporting_evidence": {
"summary": supporting_evidence.summary,
"strength": self.assess_evidence_strength(supporting_evidence),
"study_count": len(supporting_evidence.sources)
},
"contradicting_evidence": {
"summary": contradicting_evidence.summary,
"strength": self.assess_evidence_strength(contradicting_evidence),
"study_count": len(contradicting_evidence.sources)
},
"methodological_considerations": methodology.summary,
"expert_consensus": expert_consensus,
"validation_score": validation_score,
"confidence_level": self.determine_confidence_level(validation_score),
"research_recommendations": self.generate_research_recommendations(validation_score),
"future_research_directions": self.identify_research_gaps(hypothesis, field)
}
Research Workflow Automation
Automated Research Pipeline
class ResearchPipeline:
def __init__(self, cortex_client):
self.cortex = cortex_client
self.pipeline_steps = []
self.results = {}
def create_pipeline(self, research_topic: str, depth: str = "comprehensive") -> 'ResearchPipeline':
"""Create automated research pipeline"""
if depth == "quick":
self.pipeline_steps = [
("overview", self.get_topic_overview),
("key_facts", self.extract_key_facts),
("recent_developments", self.get_recent_developments)
]
elif depth == "comprehensive":
self.pipeline_steps = [
("overview", self.get_topic_overview),
("background", self.get_background_research),
("key_facts", self.extract_key_facts),
("expert_opinions", self.gather_expert_opinions),
("recent_developments", self.get_recent_developments),
("future_trends", self.analyze_future_trends),
("validation", self.validate_findings),
("synthesis", self.synthesize_research)
]
self.research_topic = research_topic
return self
def execute(self) -> Dict:
"""Execute the research pipeline"""
pipeline_results = {}
for step_name, step_function in self.pipeline_steps:
try:
result = step_function(self.research_topic)
pipeline_results[step_name] = result
# Log progress
print(f"Completed: {step_name}")
except Exception as e:
pipeline_results[step_name] = {
"error": str(e),
"status": "failed"
}
# Generate final report
final_report = self.generate_final_report(pipeline_results)
return {
"research_topic": self.research_topic,
"pipeline_results": pipeline_results,
"final_report": final_report,
"execution_time": self.calculate_execution_time(),
"quality_score": self.assess_research_quality(pipeline_results)
}
def schedule_recurring_research(self, topic: str, frequency: str, alert_threshold: float):
"""Schedule recurring research with change detection"""
# Store baseline research
baseline = self.execute_pipeline(topic, "comprehensive")
# Set up monitoring
monitoring_config = {
"topic": topic,
"baseline": baseline,
"frequency": frequency, # "daily", "weekly", "monthly"
"alert_threshold": alert_threshold,
"last_update": datetime.now(),
"change_detection": True
}
return monitoring_config
Research Report Generator
class ResearchReportGenerator:
def __init__(self, cortex_client):
self.cortex = cortex_client
self.report_templates = {
"academic": self.generate_academic_report,
"business": self.generate_business_report,
"policy": self.generate_policy_report,
"journalism": self.generate_journalism_report
}
def generate_report(self, research_data: Dict, report_type: str = "academic") -> Dict:
"""Generate formatted research report"""
if report_type not in self.report_templates:
raise ValueError(f"Unknown report type: {report_type}")
generator = self.report_templates[report_type]
report = generator(research_data)
# Add metadata
report["metadata"] = {
"generated_at": datetime.now().isoformat(),
"report_type": report_type,
"source_count": self.count_unique_sources(research_data),
"confidence_score": self.calculate_overall_confidence(research_data),
"version": "1.0"
}
# Generate citations
report["bibliography"] = self.generate_bibliography(research_data)
# Create executive summary
report["executive_summary"] = self.generate_executive_summary(research_data)
return report
def generate_academic_report(self, research_data: Dict) -> Dict:
"""Generate academic-style research report"""
return {
"title": f"Research Report: {research_data.get('topic', 'Unknown Topic')}",
"abstract": self.generate_abstract(research_data),
"introduction": self.generate_introduction(research_data),
"literature_review": self.generate_literature_review(research_data),
"methodology": self.generate_methodology_section(research_data),
"findings": self.generate_findings_section(research_data),
"discussion": self.generate_discussion(research_data),
"conclusions": self.generate_conclusions(research_data),
"limitations": self.generate_limitations(research_data),
"future_research": self.generate_future_research(research_data),
"appendices": self.generate_appendices(research_data)
}
def export_formats(self, report: Dict) -> Dict:
"""Export report in multiple formats"""
return {
"markdown": self.to_markdown(report),
"html": self.to_html(report),
"pdf": self.to_pdf(report),
"docx": self.to_docx(report),
"json": self.to_json(report)
}
Analytics and Insights
Research Performance Analytics
class ResearchAnalytics:
def __init__(self, cortex_client):
self.cortex = cortex_client
self.research_history = []
def analyze_research_effectiveness(self, time_period: str = "month") -> Dict:
"""Analyze research tool effectiveness"""
# Filter research history by time period
recent_research = self.filter_by_time_period(self.research_history, time_period)
if not recent_research:
return {"error": "No research data available for analysis"}
# Calculate metrics
metrics = {
"total_research_sessions": len(recent_research),
"average_sources_per_session": sum(r["source_count"] for r in recent_research) / len(recent_research),
"average_confidence_score": sum(r["confidence"] for r in recent_research) / len(recent_research),
"most_researched_topics": self.get_top_topics(recent_research),
"research_success_rate": len([r for r in recent_research if r["confidence"] > 0.8]) / len(recent_research),
"time_to_insight": sum(r["duration_minutes"] for r in recent_research) / len(recent_research)
}
# Generate insights
insights = {
"performance_trends": self.analyze_performance_trends(recent_research),
"optimization_opportunities": self.identify_optimization_opportunities(metrics),
"quality_assessment": self.assess_research_quality(recent_research),
"user_behavior_patterns": self.analyze_user_patterns(recent_research)
}
return {
"analysis_period": time_period,
"metrics": metrics,
"insights": insights,
"recommendations": self.generate_improvement_recommendations(metrics, insights)
}
Integration Examples
Jupyter Notebook Integration
# Cortex Research Extension for Jupyter
import cortex
from IPython.display import display, HTML, Markdown
import pandas as pd
class CortexResearchMagic:
def __init__(self, api_key: str):
self.cortex = cortex.Client(api_key=api_key)
def research_cell(self, query: str, max_results: int = 10):
"""Magic command for research in Jupyter cells"""
result = self.cortex.search(
query=query,
max_results=max_results,
include_sources=True
)
# Display formatted results
display(Markdown(f"## Research Results: {query}"))
display(Markdown(result.summary))
# Display sources table
sources_df = pd.DataFrame([
{
"Title": s.title,
"URL": s.url,
"Domain": s.domain,
"Published": s.published_date,
"Relevance": f"{s.relevance_score:.2f}"
}
for s in result.sources
])
display(sources_df)
return result
# Usage in Jupyter
# %load_ext cortex_research
# %%research "machine learning trends 2025"
API Integration for Custom Tools
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import cortex
app = FastAPI(title="Research API")
cortex_client = cortex.Client(api_key="your_api_key")
class ResearchRequest(BaseModel):
query: str
max_results: int = 10
research_type: str = "general"
include_validation: bool = False
@app.post("/research")
async def conduct_research(request: ResearchRequest):
"""API endpoint for research requests"""
try:
# Execute research
result = cortex_client.search(
query=request.query,
max_results=request.max_results,
profile=request.research_type
)
response = {
"query": request.query,
"summary": result.summary,
"sources": [s.dict() for s in result.sources],
"confidence": result.confidence
}
# Add validation if requested
if request.include_validation:
validation = cortex_client.validate(
claim=result.summary,
sources=5
)
response["validation"] = {
"verdict": validation.verdict,
"confidence": validation.confidence
}
return response
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/fact-check")
async def fact_check(claims: List[str]):
"""Batch fact-checking endpoint"""
results = []
for claim in claims:
validation = cortex_client.validate(claim=claim, sources=3)
results.append({
"claim": claim,
"verdict": validation.verdict,
"confidence": validation.confidence,
"sources": [s.dict() for s in validation.sources]
})
return {
"total_claims": len(claims),
"results": results,
"overall_confidence": sum(r["confidence"] for r in results) / len(results)
}
Best Practices for Research Tools
Quality Assurance
class ResearchQualityController:
def __init__(self, cortex_client):
self.cortex = cortex_client
self.quality_thresholds = {
"minimum_sources": 3,
"minimum_confidence": 0.7,
"source_diversity": 0.6,
"recency_weight": 0.3
}
def assess_research_quality(self, research_result: Dict) -> Dict:
"""Assess the quality of research results"""
quality_score = 0
quality_factors = {}
# Source count
source_count = len(research_result.get("sources", []))
if source_count >= self.quality_thresholds["minimum_sources"]:
quality_factors["sufficient_sources"] = True
quality_score += 0.25
else:
quality_factors["sufficient_sources"] = False
# Confidence level
confidence = research_result.get("confidence", 0)
if confidence >= self.quality_thresholds["minimum_confidence"]:
quality_factors["high_confidence"] = True
quality_score += 0.25
else:
quality_factors["high_confidence"] = False
# Source diversity
domains = set([s.get("domain", "") for s in research_result.get("sources", [])])
diversity_score = len(domains) / max(source_count, 1)
if diversity_score >= self.quality_thresholds["source_diversity"]:
quality_factors["diverse_sources"] = True
quality_score += 0.25
else:
quality_factors["diverse_sources"] = False
# Recency
recent_sources = len([
s for s in research_result.get("sources", [])
if self.is_recent(s.get("published_date"))
])
recency_score = recent_sources / max(source_count, 1)
if recency_score >= self.quality_thresholds["recency_weight"]:
quality_factors["recent_sources"] = True
quality_score += 0.25
else:
quality_factors["recent_sources"] = False
return {
"overall_quality_score": quality_score,
"quality_factors": quality_factors,
"recommendations": self.generate_quality_recommendations(quality_factors),
"grade": self.assign_quality_grade(quality_score)
}
Next: Security Overview → -