106 lines
3.7 KiB
Python
106 lines
3.7 KiB
Python
from fastapi import APIRouter, HTTPException
|
|
from app.services.openai_client import OpenAIClient, AIFactChecker
|
|
from app.config import OPENAI_API_KEY
|
|
from app.models.ai_fact_check_models import (
|
|
AIFactCheckRequest,
|
|
AIFactCheckResponse,
|
|
VerificationResult,
|
|
TokenUsage,
|
|
ErrorResponse,
|
|
)
|
|
from urllib.parse import urlparse
|
|
import asyncio
|
|
|
|
# Initialize router and OpenAI client
|
|
aifact_check_router = APIRouter()
|
|
openai_client = OpenAIClient(api_key=OPENAI_API_KEY)
|
|
fact_checker = AIFactChecker(openai_client=openai_client)
|
|
|
|
|
|
@aifact_check_router.post(
|
|
"/aicheck-facts",
|
|
response_model=AIFactCheckResponse,
|
|
responses={400: {"model": ErrorResponse}, 500: {"model": ErrorResponse}},
|
|
)
|
|
async def ai_fact_check(request: AIFactCheckRequest):
|
|
"""
|
|
Endpoint to fact-check a given statement based on multiple webpage URLs.
|
|
Input:
|
|
- urls: List of webpage URLs to analyze (with or without http/https)
|
|
- content: The fact statement to verify
|
|
Response:
|
|
- JSON response with verification results per URL, sources, and token usage
|
|
"""
|
|
try:
|
|
results = {}
|
|
all_sources = set()
|
|
all_contexts = []
|
|
total_prompt_tokens = 0
|
|
total_completion_tokens = 0
|
|
total_tokens = 0
|
|
|
|
# Process all URLs concurrently
|
|
tasks = [
|
|
fact_checker.check_fact(url=url, query=request.content)
|
|
for url in request.urls
|
|
]
|
|
fact_check_results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
|
|
# Process results
|
|
for url, result in zip(request.urls, fact_check_results):
|
|
if isinstance(result, Exception):
|
|
# Handle failed URL checks
|
|
results[url] = VerificationResult(
|
|
verdict="Error",
|
|
confidence="Low",
|
|
evidence=f"Error checking URL: {str(result)}",
|
|
reasoning="URL processing failed",
|
|
missing_info="Could not access or process the URL",
|
|
)
|
|
continue
|
|
|
|
verification_result = VerificationResult(
|
|
verdict=result["verification_result"]["verdict"],
|
|
confidence=result["verification_result"]["confidence"],
|
|
evidence=result["verification_result"]["evidence"],
|
|
reasoning=result["verification_result"]["reasoning"],
|
|
missing_info=result["verification_result"].get("missing_info", None),
|
|
)
|
|
|
|
results[url] = verification_result
|
|
all_sources.update(result["sources"])
|
|
|
|
# Accumulate token usage
|
|
total_prompt_tokens += result["token_usage"]["prompt_tokens"]
|
|
total_completion_tokens += result["token_usage"]["completion_tokens"]
|
|
total_tokens += result["token_usage"]["total_tokens"]
|
|
|
|
token_usage = TokenUsage(
|
|
prompt_tokens=total_prompt_tokens,
|
|
completion_tokens=total_completion_tokens,
|
|
total_tokens=total_tokens,
|
|
)
|
|
|
|
return AIFactCheckResponse(
|
|
query=request.content,
|
|
verification_result=results,
|
|
sources=list(all_sources),
|
|
token_usage=token_usage,
|
|
)
|
|
|
|
except ValueError as e:
|
|
raise HTTPException(
|
|
status_code=400,
|
|
detail=ErrorResponse(
|
|
detail=str(e), error_code="INVALID_URL", path="/aicheck-facts"
|
|
).dict(),
|
|
)
|
|
except Exception as e:
|
|
raise HTTPException(
|
|
status_code=500,
|
|
detail=ErrorResponse(
|
|
detail=f"Error processing fact-check request: {str(e)}",
|
|
error_code="PROCESSING_ERROR",
|
|
path="/aicheck-facts",
|
|
).dict(),
|
|
)
|