from fastapi import APIRouter, HTTPException import httpx import asyncio import logging from typing import Union, Optional, Dict, Any from app.config import GOOGLE_API_KEY, GOOGLE_FACT_CHECK_BASE_URL, OPENAI_API_KEY from app.api.scrap_websites import search_websites, SearchRequest from app.services.openai_client import OpenAIClient, AIFactChecker from app.services.image_text_extractor import ImageTextExtractor from app.models.ai_fact_check_models import AIFactCheckResponse from app.models.fact_check_models import ( FactCheckRequest, FactCheckResponse, UnverifiedFactCheckResponse, ErrorResponse, Source, VerdictEnum, ConfidenceEnum ) from app.websites.fact_checker_website import get_all_sources # Setup logging logger = logging.getLogger(__name__) fact_check_router = APIRouter() openai_client = OpenAIClient(OPENAI_API_KEY) ai_fact_checker = AIFactChecker(openai_client) image_text_extractor = ImageTextExtractor(OPENAI_API_KEY) async def process_url_content(url: str) -> Optional[str]: """Extract text content from the provided URL.""" try: # Add await here text = await image_text_extractor.extract_text(url, is_url=True) if text: logger.info(f"Successfully extracted text from URL: {text}") else: logger.warning(f"No text could be extracted from URL: {url}") return text except Exception as e: logger.error(f"Error extracting text from URL: {str(e)}") return None async def process_fact_check(query: str) -> Union[FactCheckResponse, UnverifiedFactCheckResponse]: """Process a single fact check query.""" if not GOOGLE_API_KEY or not GOOGLE_FACT_CHECK_BASE_URL: return UnverifiedFactCheckResponse( claim=query, verdict=VerdictEnum.UNVERIFIED, confidence=ConfidenceEnum.LOW, sources=[], evidence="The fact-checking service is not properly configured.", explanation="The system is missing required API configuration for fact-checking services.", additional_context="This is a temporary system configuration issue." ) headers = {"Content-Type": "application/json"} async with httpx.AsyncClient() as client: fact_checker_sources = get_all_sources() for source in fact_checker_sources: params = { "key": GOOGLE_API_KEY, "query": query, "languageCode": "en-US", "reviewPublisherSiteFilter": source.domain, "pageSize": 10, } try: response = await client.get( GOOGLE_FACT_CHECK_BASE_URL, params=params, headers=headers ) response.raise_for_status() json_response = response.json() if json_response.get("claims"): return await generate_fact_report(query, json_response) except Exception as e: logger.error(f"Error with source {source.domain}: {str(e)}") continue try: search_request = SearchRequest( search_text=query, source_types=["fact_checkers"] ) ai_response = await search_websites(search_request) return await generate_fact_report(query, ai_response) except Exception as e: logger.error(f"Error in AI fact check: {str(e)}") return await generate_fact_report(query, { "status": "no_results", "verification_result": { "no_sources_found": True, "reason": str(e) } }) async def generate_fact_report(query: str, fact_check_data: dict | AIFactCheckResponse) -> Union[FactCheckResponse, UnverifiedFactCheckResponse]: """Generate a fact check report using OpenAI based on the fact check results.""" try: base_system_prompt = """You are a professional fact-checking reporter. Your task is to create a detailed fact check report based on the provided data. Focus on accuracy, clarity, and proper citation of sources. Rules: 1. Include all source URLs and names in the sources list 2. Keep the explanation focused on verifiable facts 3. Include dates when available 4. Maintain objectivity in the report 5. If no reliable sources are found, provide a clear explanation why""" # Handle both dictionary and AIFactCheckResponse if hasattr(fact_check_data, 'verification_result'): # It's an AIFactCheckResponse has_sources = bool(fact_check_data.sources) verification_result = fact_check_data.verification_result fact_check_data_dict = fact_check_data.dict() else: # It's a dictionary has_sources = bool(fact_check_data.get("claims") or fact_check_data.get("urls_found")) verification_result = fact_check_data.get("verification_result", {}) fact_check_data_dict = fact_check_data # If no sources were found, return an unverified response if not has_sources or ( isinstance(fact_check_data, dict) and fact_check_data.get("status") == "no_results" ) or (verification_result and verification_result.get("no_sources_found")): return UnverifiedFactCheckResponse( claim=query, verdict=VerdictEnum.UNVERIFIED, confidence=ConfidenceEnum.LOW, sources=[], evidence="No fact-checking sources have verified this claim yet.", explanation="Our search across reputable fact-checking websites did not find any formal verification of this claim. This doesn't mean the claim is false - just that it hasn't been formally fact-checked yet.", additional_context="The claim may be too recent for fact-checkers to have investigated, or it may not have been widely circulated enough to warrant formal fact-checking." ) base_user_prompt = """Generate a comprehensive fact check report in this exact JSON format: { "claim": "Write the exact claim being verified", "verdict": "One of: True/False/Partially True/Unverified", "confidence": "One of: High/Medium/Low", "sources": [ { "url": "Full URL of the source", "name": "Name of the source organization" } ], "evidence": "A concise summary of the key evidence (1-2 sentences)", "explanation": "A detailed explanation including who verified it, when it was verified, and the key findings (2-3 sentences)", "additional_context": "Important context about the verification process, limitations, or broader implications (1-2 sentences)" }""" if isinstance(fact_check_data, dict) and "claims" in fact_check_data: system_prompt = base_system_prompt user_prompt = f"""Query: {query} Fact Check Results: {fact_check_data_dict} {base_user_prompt} The report should: 1. Include ALL source URLs and organization names 2. Specify verification dates when available 3. Name the fact-checking organizations involved 4. Describe the verification process""" else: system_prompt = base_system_prompt user_prompt = f"""Query: {query} Fact Check Results: {fact_check_data_dict} {base_user_prompt} The report should: 1. Include ALL source URLs and names from both verification_result and sources fields 2. Mention all fact-checking organizations involved 3. Describe the verification process 4. Note any conflicting information between sources""" response = await openai_client.generate_text_response( system_prompt=system_prompt, user_prompt=user_prompt, max_tokens=1000 ) try: response_data = response["response"] if isinstance(response_data.get("sources"), list): cleaned_sources = [] for source in response_data["sources"]: if isinstance(source, str): url = source if source.startswith("http") else f"https://{source}" cleaned_sources.append({"url": url, "name": source}) elif isinstance(source, dict): url = source.get("url", "") if url and not url.startswith("http"): source["url"] = f"https://{url}" cleaned_sources.append(source) response_data["sources"] = cleaned_sources if response_data["verdict"] == "Unverified" or not response_data.get("sources"): return UnverifiedFactCheckResponse(**response_data) return FactCheckResponse(**response_data) except Exception as validation_error: logger.error(f"Response validation error: {str(validation_error)}") return UnverifiedFactCheckResponse( claim=query, verdict=VerdictEnum.UNVERIFIED, confidence=ConfidenceEnum.LOW, sources=[], evidence="An error occurred while processing the fact check results.", explanation="The system encountered an error while validating the fact check results.", additional_context="This is a technical error and does not reflect on the truthfulness of the claim." ) except Exception as e: logger.error(f"Error generating fact report: {str(e)}") return UnverifiedFactCheckResponse( claim=query, verdict=VerdictEnum.UNVERIFIED, confidence=ConfidenceEnum.LOW, sources=[], evidence="An error occurred while generating the fact check report.", explanation="The system encountered an unexpected error while processing the fact check request.", additional_context="This is a technical error and does not reflect on the truthfulness of the claim." ) async def combine_fact_reports(query: str, url_text: str, query_result: Dict[str, Any], url_result: Dict[str, Any]) -> Union[FactCheckResponse, UnverifiedFactCheckResponse]: """Combine fact check results from query and URL into a single comprehensive report.""" try: system_prompt = """You are a professional fact-checking reporter. Your task is to create a comprehensive fact check report by combining and analyzing multiple fact-checking results. Focus on accuracy, clarity, and proper citation of all sources. Rules: 1. Include all source URLs and names from both result sets 2. Compare and contrast findings from different sources 3. Include dates when available 4. Note any discrepancies between sources 5. Provide a balanced, objective analysis""" user_prompt = f"""Original Query: {query} Extracted Text from URL: {url_text} First Fact Check Result: {query_result} Second Fact Check Result: {url_result} Generate a comprehensive fact check report in this exact JSON format: {{ "claim": "Write the exact claim being verified", "verdict": "One of: True/False/Partially True/Unverified", "confidence": "One of: High/Medium/Low", "sources": [ {{ "url": "Full URL of the source", "name": "Name of the source organization" }} ], "evidence": "A concise summary of the key evidence from both sources (2-3 sentences)", "explanation": "A detailed explanation combining findings from both fact checks (3-4 sentences)", "additional_context": "Important context about differences or similarities in findings (1-2 sentences)" }} The report should: 1. Combine sources from both fact checks 2. Compare findings from both analyses 3. Note any differences in conclusions 4. Provide a unified verdict based on all available information""" response = await openai_client.generate_text_response( system_prompt=system_prompt, user_prompt=user_prompt, max_tokens=1000 ) response_data = response["response"] # Clean up sources from both results if isinstance(response_data.get("sources"), list): cleaned_sources = [] for source in response_data["sources"]: if isinstance(source, str): url = source if source.startswith("http") else f"https://{source}" cleaned_sources.append({"url": url, "name": source}) elif isinstance(source, dict): url = source.get("url", "") if url and not url.startswith("http"): source["url"] = f"https://{url}" cleaned_sources.append(source) response_data["sources"] = cleaned_sources if response_data["verdict"] == "Unverified" or not response_data.get("sources"): return UnverifiedFactCheckResponse(**response_data) return FactCheckResponse(**response_data) except Exception as e: logger.error(f"Error combining fact reports: {str(e)}") return UnverifiedFactCheckResponse( claim=query, verdict=VerdictEnum.UNVERIFIED, confidence=ConfidenceEnum.LOW, sources=[], evidence="An error occurred while combining fact check reports.", explanation="The system encountered an error while trying to combine results from multiple sources.", additional_context="This is a technical error and does not reflect on the truthfulness of the claim." ) @fact_check_router.post("/check-facts", response_model=Union[FactCheckResponse, UnverifiedFactCheckResponse]) async def check_facts(request: FactCheckRequest): """ Fetch fact check results and generate a comprehensive report. Handles both query-based and URL-based fact checking. """ url_text = None query_result = None url_result = None # If URL is provided, try to extract text if request.url: url_text = await process_url_content(request.url) if not url_text and not request.query: # Only return early if URL text extraction failed and no query provided return UnverifiedFactCheckResponse( claim=f"URL check requested: {request.url}", verdict=VerdictEnum.UNVERIFIED, confidence=ConfidenceEnum.LOW, sources=[], evidence="Unable to extract text from the provided URL.", explanation="The system could not process the content from the provided URL. The URL might be invalid or inaccessible.", additional_context="Please provide a valid URL or a text query for fact-checking." ) # If URL text was successfully extracted, process it if url_text: logger.info(f"Processing fact check for extracted text: {url_text}") url_result = await process_fact_check(url_text) # Process query if provided if request.query: query_result = await process_fact_check(request.query) # If both results are available, combine them if query_result and url_result and url_text: return await combine_fact_reports(request.query, url_text, query_result.dict(), url_result.dict()) # If only one result is available if query_result: return query_result if url_result: return url_result # If no valid results return UnverifiedFactCheckResponse( claim=request.query or f"URL: {request.url}", verdict=VerdictEnum.UNVERIFIED, confidence=ConfidenceEnum.LOW, sources=[], evidence="Failed to process fact-checking request.", explanation="The system encountered errors while processing the fact checks.", additional_context="Please try again with different input or contact support if the issue persists." )