content fact checked is functional

This commit is contained in:
Utsho Dey 2024-12-17 17:31:13 +06:00
parent 790d58402a
commit e56163a8c3
13 changed files with 610 additions and 1423 deletions

View file

@ -1,7 +1,7 @@
from fastapi import APIRouter, HTTPException
from app.services.openai_client import OpenAIClient, AIFactChecker
from app.config import OPENAI_API_KEY
from app.models.fact_check_models import (
from app.models.ai_fact_check_models import (
AIFactCheckRequest,
AIFactCheckResponse,
VerificationResult,

View file

@ -1,173 +1,192 @@
from fastapi import APIRouter, HTTPException
import json
from datetime import datetime
from typing import Dict, List
import httpx
from app.config import GOOGLE_API_KEY, GOOGLE_FACT_CHECK_BASE_URL
from app.config import GOOGLE_API_KEY, GOOGLE_FACT_CHECK_BASE_URL, OPENAI_API_KEY
from app.api.scrap_websites import search_websites, SearchRequest
from app.services.openai_client import OpenAIClient
from app.models.fact_check_models import (
GoogleFactCheckRequest as FactCheckRequest,
GoogleFactCheckResponse as FactCheckResponse,
Claim,
FactCheckRequest,
FactCheckResponse,
ErrorResponse,
TokenUsage
Source
)
from app.websites.fact_checker_website import fetch_fact_checks, get_all_sources
from app.websites.fact_checker_website import get_all_sources
fact_check_router = APIRouter()
openai_client = OpenAIClient(OPENAI_API_KEY)
class CustomJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
return super().default(obj)
async def generate_fact_report(query: str, fact_check_data: dict) -> FactCheckResponse:
"""Generate a fact check report using OpenAI based on the fact check results."""
try:
base_system_prompt = """You are a professional fact-checking reporter. Your task is to create a detailed fact check report based on the provided data. Focus on accuracy, clarity, and proper citation of sources.
async def validate_api_key():
"""Validate the Google API key with a test request"""
async with httpx.AsyncClient() as client:
Rules:
1. Include all source URLs and names in the sources list
2. Keep the explanation focused on verifiable facts
3. Include dates when available
4. Maintain objectivity in the report"""
base_user_prompt = """Generate a comprehensive fact check report in this exact JSON format:
{
"claim": "Write the exact claim being verified",
"verdict": "One of: True/False/Partially True/Unverified",
"confidence": "One of: High/Medium/Low",
"sources": [
{
"url": "Full URL of the source",
"name": "Name of the source organization"
}
],
"evidence": "A concise summary of the key evidence (1-2 sentences)",
"explanation": "A detailed explanation including who verified it, when it was verified, and the key findings (2-3 sentences)",
"additional_context": "Important context about the verification process, limitations, or broader implications (1-2 sentences)"
}
Ensure all URLs in sources are complete (including https:// if missing) and each source has both a URL and name."""
if "claims" in fact_check_data:
system_prompt = base_system_prompt
user_prompt = f"""Query: {query}
Fact Check Results: {fact_check_data}
{base_user_prompt}
The report should:
1. Include ALL source URLs and organization names
2. Specify verification dates when available
3. Name the fact-checking organizations involved
4. Describe the verification process"""
else:
system_prompt = base_system_prompt
user_prompt = f"""Query: {query}
Fact Check Results: {fact_check_data}
{base_user_prompt}
The report should:
1. Include ALL source URLs and names from both verification_result and sources fields
2. Mention all fact-checking organizations involved
3. Describe the verification process
4. Note any conflicting information between sources"""
response = await openai_client.generate_text_response(
system_prompt=system_prompt,
user_prompt=user_prompt,
max_tokens=1000
)
try:
test_url = f"{GOOGLE_FACT_CHECK_BASE_URL}claims:search"
params = {
"key": GOOGLE_API_KEY,
"query": "test",
"languageCode": "en-US",
"pageSize": 1
}
response = await client.get(test_url, params=params)
response.raise_for_status()
return True
except httpx.HTTPStatusError as e:
if e.response.status_code == 403:
raise HTTPException(
status_code=503,
detail=ErrorResponse(
detail="Invalid or expired API key",
error_code="INVALID_API_KEY",
path="/check-facts"
).dict()
)
# First try to parse the response directly
response_data = response["response"]
# Clean up sources before validation
if isinstance(response_data.get('sources'), list):
cleaned_sources = []
for source in response_data['sources']:
if isinstance(source, str):
# Convert string sources to Source objects
url = source if source.startswith('http') else f"https://{source}"
cleaned_sources.append({
"url": url,
"name": source
})
elif isinstance(source, dict):
# Ensure URL has proper scheme
url = source.get('url', '')
if url and not url.startswith('http'):
source['url'] = f"https://{url}"
cleaned_sources.append(source)
response_data['sources'] = cleaned_sources
fact_check_response = FactCheckResponse(**response_data)
return fact_check_response
except Exception as validation_error:
print(f"Response validation error: {str(validation_error)}")
raise HTTPException(
status_code=503,
status_code=422,
detail=ErrorResponse(
detail=f"API validation failed: {str(e)}",
error_code="API_VALIDATION_ERROR",
detail=f"Invalid response format: {str(validation_error)}",
error_code="VALIDATION_ERROR",
path="/check-facts"
).dict()
)
except Exception as e:
print(f"Error generating fact report: {str(e)}")
raise HTTPException(
status_code=500,
detail=ErrorResponse(
detail="Error generating fact report",
error_code="FACT_CHECK_ERROR",
path="/check-facts"
).dict()
)
@fact_check_router.post(
"/check-facts",
response_model=FactCheckResponse,
responses={
400: {"model": ErrorResponse},
404: {"model": ErrorResponse},
500: {"model": ErrorResponse},
503: {"model": ErrorResponse}
}
)
async def check_facts(request: FactCheckRequest) -> FactCheckResponse:
@fact_check_router.post("/check-facts", response_model=FactCheckResponse)
async def check_facts(request: FactCheckRequest):
"""
Check facts using multiple fact-checking sources
Fetch fact check results and generate a comprehensive report.
"""
all_results = []
verified_results = []
# Validate configuration
if not GOOGLE_API_KEY or not GOOGLE_FACT_CHECK_BASE_URL:
raise HTTPException(
status_code=500,
detail=ErrorResponse(
detail="API configuration is missing",
detail="Google API key or base URL is not configured",
error_code="CONFIGURATION_ERROR",
path="/check-facts"
).dict()
)
# Validate API key before proceeding
await validate_api_key()
# Get all sources in priority order
all_sources = get_all_sources()
all_sources_list = [] # To store source URLs
contexts_used = [] # To store context snippets
failed_sources = [] # Track failed sources
for source in all_sources:
headers = {"Content-Type": "application/json"}
async with httpx.AsyncClient() as client:
# Get fact checker sources from the centralized configuration
fact_checker_sources = get_all_sources()
for source in fact_checker_sources:
params = {
"key": GOOGLE_API_KEY,
"query": request.query,
"languageCode": "en-US",
"reviewPublisherSiteFilter": source.domain,
"pageSize": 10
}
try:
response = await client.get(
GOOGLE_FACT_CHECK_BASE_URL,
params=params,
headers=headers
)
response.raise_for_status()
json_response = response.json()
if json_response.get("claims"):
return await generate_fact_report(request.query, json_response)
except httpx.RequestError as e:
print(f"Error fetching results for site {source.domain}: {str(e)}")
continue
except Exception as e:
print(f"Unexpected error for site {source.domain}: {str(e)}")
continue
try:
result = await fetch_fact_checks(
GOOGLE_API_KEY,
GOOGLE_FACT_CHECK_BASE_URL,
request.content,
source
search_request = SearchRequest(
search_text=request.query,
source_types=["fact_checkers"]
)
if "claims" in result:
# Validate each claim through Pydantic
for claim in result["claims"]:
validated_claim = Claim(**claim).dict()
all_results.append(validated_claim)
# Extract source and context information
if "claimReview" in validated_claim:
review = validated_claim["claimReview"][0]
if "publisher" in review and "site" in review["publisher"]:
all_sources_list.append(review["publisher"]["site"])
if "textualRating" in review:
contexts_used.append(review["textualRating"])
except HTTPException as http_err:
failed_sources.append({
"source": source.domain,
"error": str(http_err.detail)
})
continue
ai_response = await search_websites(search_request)
return await generate_fact_report(request.query, ai_response)
except Exception as e:
failed_sources.append({
"source": source.domain,
"error": str(e)
})
continue
# Return partial results if some sources failed but we have data
if all_results:
verification_result = {
"verdict": "Partial Results Available" if failed_sources else "Complete Results",
"confidence": "Medium" if failed_sources else "High",
"evidence": contexts_used,
"reasoning": "Based on available fact checks",
"missing_info": f"{len(failed_sources)} sources failed" if failed_sources else None
}
else:
raise HTTPException(
status_code=404,
detail=ErrorResponse(
detail="No fact check results found. Failed sources: " +
", ".join([f"{f['source']}: {f['error']}" for f in failed_sources]),
error_code="NO_RESULTS_FOUND",
path="/check-facts"
).dict()
)
# Create token usage information
token_usage = TokenUsage(
prompt_tokens=0,
completion_tokens=0,
total_tokens=0
)
# Create the response using Pydantic model with all required fields
response = FactCheckResponse(
query=request.content,
total_claims_found=len(all_results),
results=all_results,
verification_result=verification_result,
sources=list(set(all_sources_list)),
context_used=contexts_used,
token_usage=token_usage,
summary={
"total_sources": len(set(all_sources_list)),
"fact_checking_sites_queried": len(all_sources),
"failed_sources": failed_sources
}
)
return response
print(f"Error in AI fact check: {str(e)}")
raise HTTPException(
status_code=404,
detail=ErrorResponse(
detail="No fact check results found",
error_code="NOT_FOUND",
path="/check-facts"
).dict()
)

View file

@ -2,60 +2,25 @@ from fastapi import APIRouter, HTTPException
import httpx
import logging
from urllib.parse import urlparse
import json
from app.services.openai_client import OpenAIClient
from app.config import OPENAI_API_KEY, GOOGLE_API_KEY, GOOGLE_ENGINE_ID
from app.websites.fact_checker_website import SOURCES, get_all_sources
from app.api.ai_fact_check import ai_fact_check
from typing import List, Dict, Optional
from pydantic import BaseModel
from app.models.fact_check_models import (
from app.models.ai_fact_check_models import (
AIFactCheckRequest,
FactCheckSource,
SourceType
)
from app.websites.fact_checker_website import SOURCES, get_all_sources
from app.api.ai_fact_check import ai_fact_check
from app.config import GOOGLE_API_KEY, GOOGLE_ENGINE_ID, GOOGLE_SEARCH_URL
# Define Pydantic models
class Publisher(BaseModel):
name: str
site: str
class ClaimReview(BaseModel):
publisher: Publisher
textualRating: str
class Claim(BaseModel):
claimReview: List[ClaimReview]
claimant: str
text: str
class Summary(BaseModel):
fact_checking_sites_queried: int
total_sources: int
class VerificationResult(BaseModel):
verdict: str
confidence: str
evidence: List[str]
reasoning: str
fact_check_type: str
class SearchRequest(BaseModel):
search_text: str
source_types: List[str]
class EnhancedFactCheckResponse(BaseModel):
query: str
results: List[Dict]
sources: List
summary: Summary
token_usage: Dict[str, int]
total_claims_found: int
verification_result: VerificationResult
source_types: List[str] = ["fact_checkers"]
# Configure logging
logging.basicConfig(
level=logging.INFO, # Changed back to INFO from DEBUG
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
@ -66,14 +31,13 @@ scrap_websites_router = APIRouter()
RESULTS_PER_PAGE = 10
MAX_PAGES = 5
MAX_URLS_PER_DOMAIN = 5
GOOGLE_SEARCH_URL = "https://www.googleapis.com/customsearch/v1"
def get_domain_from_url(url: str) -> str:
"""Extract domain from URL with improved handling."""
try:
parsed = urlparse(url)
domain = parsed.netloc.lower()
# Remove 'www.' if present
if domain.startswith('www.'):
domain = domain[4:]
return domain
@ -95,26 +59,16 @@ def is_valid_source_domain(domain: str, sources: List[FactCheckSource]) -> bool:
if source_domain.startswith('www.'):
source_domain = source_domain[4:]
# Check exact match
if domain == source_domain:
logger.debug(f"Exact domain match found: {domain} = {source_domain}")
return True
# Check if domain ends with source domain
if domain.endswith('.' + source_domain):
logger.debug(f"Subdomain match found: {domain} ends with {source_domain}")
if domain == source_domain or domain.endswith('.' + source_domain):
return True
logger.debug(f"No match found for domain: {domain}")
return False
async def build_enhanced_search_query(query: str, sources: List[FactCheckSource]) -> str:
"""Build search query with site restrictions."""
site_queries = [f"site:{source.domain}" for source in sources]
site_restriction = " OR ".join(site_queries)
enhanced_query = f"({query}) ({site_restriction})"
logger.debug(f"Enhanced search query: {enhanced_query}")
return enhanced_query
return f"({query}) ({site_restriction})"
async def google_custom_search(query: str, sources: List[FactCheckSource], page: int = 1) -> Optional[Dict]:
"""Perform Google Custom Search with enhanced query."""
@ -131,141 +85,39 @@ async def google_custom_search(query: str, sources: List[FactCheckSource], page:
async with httpx.AsyncClient(timeout=30.0) as client:
try:
logger.info(f"Making API request to Google Custom Search with params: {params}")
response = await client.get(GOOGLE_SEARCH_URL, params=params)
response.raise_for_status()
data = response.json()
search_info = data.get('searchInformation', {})
logger.info(f"Search info: Total results: {search_info.get('totalResults', 0)}, "
f"Time taken: {search_info.get('searchTime', 0)}s")
if 'error' in data:
error_details = data['error']
logger.error(f"API Error: {error_details}")
raise HTTPException(
status_code=response.status_code,
detail=f"Google API Error: {error_details.get('message')}"
)
return data
return response.json()
except Exception as e:
logger.error(f"Search error: {str(e)}", exc_info=True)
logger.error(f"Search error: {str(e)}")
raise HTTPException(status_code=500, detail=f"Search error: {str(e)}")
async def analyze_fact_check_results(openai_client: OpenAIClient, original_response: Dict) -> Dict:
"""Analyze fact check results using OpenAI to generate a consolidated verdict."""
# Extract verification results from sources
verification_results = []
for url, result in original_response.get('verification_result', {}).items():
verification_results.append(f"""
Source: {url}
Verdict: {result.get('verdict')}
Confidence: {result.get('confidence')}
Evidence: {result.get('evidence')}
Reasoning: {result.get('reasoning')}
""")
system_prompt = """You are a professional fact-checking analyzer. Your task is to analyze multiple fact-checking results
and provide a consolidated verdict. Respond with a valid JSON object containing your analysis."""
user_prompt = f"""
Analyze these fact-checking results and provide a final verdict.
Query: {original_response.get('query', '')}
Fact Check Results:
{'\n'.join(verification_results)}"""
try:
logger.info("Generating AI analysis of fact check results")
response = await openai_client.generate_text_response(
system_prompt=system_prompt,
user_prompt=user_prompt,
max_tokens=2000
)
# Create the enhanced result structure
enhanced_result = {
"query": original_response.get('query', ''),
"results": [
{
"claimReview": [
{
"publisher": {
"name": source,
"site": source
},
"textualRating": result.get('verdict', '')
} for source in original_response.get('sources', [])
],
"claimant": "source",
"text": original_response.get('query', '')
}
],
"sources": original_response.get('sources', []),
"summary": {
"fact_checking_sites_queried": len(original_response.get('sources', [])),
"total_sources": len(original_response.get('verification_result', {}))
},
"verification_result": {
"verdict": next(iter(original_response.get('verification_result', {}).values()), {}).get('verdict', ''),
"confidence": next(iter(original_response.get('verification_result', {}).values()), {}).get('confidence', ''),
"evidence": [next(iter(original_response.get('verification_result', {}).values()), {}).get('evidence', '')],
"reasoning": next(iter(original_response.get('verification_result', {}).values()), {}).get('reasoning', ''),
"fact_check_type": "ai fact checker"
},
"token_usage": original_response.get('token_usage', {
"prompt_tokens": 0,
"completion_tokens": 0,
"total_tokens": 0
})
}
enhanced_result["total_claims_found"] = len(enhanced_result.get("results", []))
logger.info("Successfully generated AI analysis")
return enhanced_result
except Exception as e:
logger.error(f"Error in OpenAI analysis: {str(e)}")
raise HTTPException(status_code=500, detail=f"Error in fact check analysis: {str(e)}")
@scrap_websites_router.post("/search", response_model=EnhancedFactCheckResponse)
@scrap_websites_router.post("/search")
async def search_websites(request: SearchRequest):
logger.info(f"Starting search with query: {request.search_text}")
logger.info(f"Source types requested: {request.source_types}")
# Get the source types from the request
source_types = request.source_types if request.source_types else ["fact_checkers"]
# Get sources for requested types
# Get sources based on requested types
selected_sources = []
for source_type in request.source_types:
for source_type in source_types:
if source_type in SOURCES:
selected_sources.extend(SOURCES[source_type])
# If no valid sources found, use fact checkers as default
if not selected_sources:
logger.warning("No valid source types provided. Using all available sources.")
selected_sources = get_all_sources()
selected_sources = SOURCES["fact_checkers"]
logger.info(f"Selected sources: {[source.domain for source in selected_sources]}")
# Initialize collections for URLs
all_urls = []
domain_results = {}
try:
# Search and collect URLs
for page in range(1, MAX_PAGES + 1):
if len(all_urls) >= 50:
logger.info("Reached maximum URL limit of 50")
break
logger.info(f"Fetching page {page} of search results")
search_response = await google_custom_search(request.search_text, selected_sources, page)
if not search_response or not search_response.get("items"):
logger.warning(f"No results found on page {page}")
break
for item in search_response.get("items", []):
@ -274,7 +126,6 @@ async def search_websites(request: SearchRequest):
continue
domain = get_domain_from_url(url)
logger.debug(f"Processing URL: {url} with domain: {domain}")
if is_valid_source_domain(domain, selected_sources):
if domain not in domain_results:
@ -287,56 +138,23 @@ async def search_websites(request: SearchRequest):
"snippet": item.get("snippet", "")
})
all_urls.append(url)
else:
logger.debug(f"Skipping URL {url} - domain not in allowed list")
if len(all_urls) >= 50:
break
logger.info(f"Total URLs collected: {len(all_urls)}")
if not all_urls:
return EnhancedFactCheckResponse(
query=request.search_text,
results=[],
sources=[],
summary=Summary(
fact_checking_sites_queried=len(selected_sources),
total_sources=0
),
token_usage={
"prompt_tokens": 0,
"completion_tokens": 0,
"total_tokens": 0
},
total_claims_found=0,
verification_result=VerificationResult(
verdict="Insufficient Evidence",
confidence="Low",
evidence=["No relevant sources found"],
reasoning="No fact-checking sources were found for this claim",
fact_check_type="ai fact checker"
)
)
return {
"status": "no_results",
"urls_found": 0
}
# Perform fact check with collected URLs
fact_check_request = AIFactCheckRequest(
content=request.search_text,
urls=all_urls[:5] # Limit to 5 URLs
urls=all_urls[:5]
)
logger.info(f"Performing fact check with {len(fact_check_request.urls)} URLs")
fact_check_response = await ai_fact_check(fact_check_request)
# Get enhanced analysis
openai_client = OpenAIClient(OPENAI_API_KEY)
enhanced_response = await analyze_fact_check_results(
openai_client,
fact_check_response.dict()
)
return EnhancedFactCheckResponse(**enhanced_response)
return await ai_fact_check(fact_check_request)
except Exception as e:
logger.error(f"Error during search/fact-check process: {str(e)}", exc_info=True)
logger.error(f"Error during search/fact-check process: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))

View file

@ -1,261 +0,0 @@
from fastapi import APIRouter, HTTPException
from pydantic import BaseModel
from typing import List, Dict, Optional
import requests
from bs4 import BeautifulSoup
import urllib.parse
import numpy as np
from time import sleep
import logging
from app.services.openai_client import OpenAIClient
from app.config import OPENAI_API_KEY
from app.websites.fact_checker_website import SOURCES, get_all_sources
from app.api.ai_fact_check import ai_fact_check
from app.models.fact_check_models import AIFactCheckRequest, AIFactCheckResponse
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
scrap_websites_router = APIRouter()
# Configuration for rate limiting
RATE_LIMIT_DELAY = 2 # Delay between requests in seconds
MAX_RETRIES = 1 # Maximum number of retries per domain
RETRY_DELAY = 1 # Delay between retries in seconds
class SearchRequest(BaseModel):
search_text: str
source_types: List[str] = ["fact_checkers"]
class UrlSimilarityInfo(BaseModel):
url: str
similarity: float
extracted_text: str
class SearchResponse(BaseModel):
results: Dict[str, List[str]]
error_messages: Dict[str, str]
ai_fact_check_result: Optional[AIFactCheckResponse] = None
def extract_url_text(url: str) -> str:
"""Extract and process meaningful text from URL path with improved cleaning"""
logger.debug(f"Extracting text from URL: {url}")
try:
parsed = urllib.parse.urlparse(url)
path = parsed.path
path = path.replace('.html', '').replace('/index', '').replace('.php', '')
segments = [seg for seg in path.split('/') if seg]
cleaned_segments = []
for segment in segments:
segment = segment.replace('-', ' ').replace('_', ' ')
if not (segment.replace(' ', '').isdigit() or
all(part.isdigit() for part in segment.split() if part)):
cleaned_segments.append(segment)
common_words = {
'www', 'live', 'news', 'intl', 'index', 'world', 'us', 'uk',
'updates', 'update', 'latest', 'breaking', 'new', 'article'
}
text = ' '.join(cleaned_segments)
words = [word.lower() for word in text.split()
if word.lower() not in common_words and len(word) > 1]
result = ' '.join(words)
logger.debug(f"Extracted text: {result}")
return result
except Exception as e:
logger.error(f"Error extracting text from URL {url}: {str(e)}")
return ''
def google_search_scraper(search_text: str, site_domain: str, retry_count: int = 0) -> List[str]:
"""Scrape Google search results with retry logic and rate limiting"""
logger.info(f"Searching for '{search_text}' on domain: {site_domain} (Attempt {retry_count + 1}/{MAX_RETRIES})")
if retry_count >= MAX_RETRIES:
logger.error(f"Max retries exceeded for domain: {site_domain}")
raise HTTPException(
status_code=429,
detail=f"Max retries exceeded for {site_domain}"
)
query = f"{search_text} \"site:{site_domain}\""
encoded_query = urllib.parse.quote(query)
base_url = "https://www.google.com/search"
url = f"{base_url}?q={encoded_query}"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}
try:
logger.debug(f"Waiting {RATE_LIMIT_DELAY} seconds before request")
sleep(RATE_LIMIT_DELAY)
logger.debug(f"Making request to Google Search for domain: {site_domain}")
response = requests.get(url, headers=headers)
if response.status_code == 429 or "sorry/index" in response.url:
logger.warning(f"Rate limit hit for domain {site_domain}. Retrying after delay...")
sleep(RETRY_DELAY)
return google_search_scraper(search_text, site_domain, retry_count + 1)
response.raise_for_status()
soup = BeautifulSoup(response.content, 'html.parser')
search_results = soup.find_all('div', class_='g')
urls = []
for result in search_results[:3]:
link = result.find('a')
if link and 'href' in link.attrs:
url = link['href']
if url.startswith('http'):
urls.append(url)
logger.info(f"Found {len(urls)} results for domain: {site_domain}")
return urls[:5]
except requests.RequestException as e:
if retry_count < MAX_RETRIES:
logger.warning(f"Request failed for {site_domain}. Retrying... Error: {str(e)}")
sleep(RETRY_DELAY)
return google_search_scraper(search_text, site_domain, retry_count + 1)
logger.error(f"All retries failed for domain {site_domain}. Error: {str(e)}")
raise HTTPException(
status_code=500,
detail=f"Error scraping {site_domain}: {str(e)}"
)
def calculate_similarity(query_embedding: List[float], url_embedding: List[float]) -> float:
"""Calculate cosine similarity between two embeddings"""
query_array = np.array(query_embedding)
url_array = np.array(url_embedding)
similarity = np.dot(url_array, query_array) / (
np.linalg.norm(url_array) * np.linalg.norm(query_array)
)
return float(similarity)
@scrap_websites_router.post("/search", response_model=SearchResponse)
async def search_websites(request: SearchRequest):
logger.info(f"Starting search with query: {request.search_text}")
logger.info(f"Source types requested: {request.source_types}")
results = {}
error_messages = {}
url_similarities = {}
# Initialize OpenAI client
logger.debug("Initializing OpenAI client")
openai_client = OpenAIClient(OPENAI_API_KEY)
# Get domains based on requested source types
domains = []
for source_type in request.source_types:
if source_type in SOURCES:
domains.extend([source.domain for source in SOURCES[source_type]])
if not domains:
logger.warning("No valid source types provided. Using all available domains.")
domains = [source.domain for source in get_all_sources()]
logger.info(f"Processing {len(domains)} domains")
# Enhance search text with key terms
search_context = request.search_text
logger.debug("Getting query embedding from OpenAI")
query_embedding = openai_client.get_embeddings([search_context])[0]
# Higher similarity threshold for better filtering
SIMILARITY_THRESHOLD = 0.75
for domain in domains:
logger.info(f"Processing domain: {domain}")
try:
urls = google_search_scraper(request.search_text, domain)
url_sims = []
valid_urls = []
logger.debug(f"Found {len(urls)} URLs for domain {domain}")
for url in urls:
url_text = extract_url_text(url)
if not url_text:
logger.debug(f"No meaningful text extracted from URL: {url}")
continue
logger.debug("Getting URL embedding from OpenAI")
url_embedding = openai_client.get_embeddings([url_text])[0]
similarity = calculate_similarity(query_embedding, url_embedding)
logger.debug(f"Similarity score for {url}: {similarity}")
url_sims.append(UrlSimilarityInfo(
url=url,
similarity=similarity,
extracted_text=url_text
))
if similarity >= SIMILARITY_THRESHOLD:
valid_urls.append(url)
results[domain] = valid_urls
url_similarities[domain] = sorted(url_sims,
key=lambda x: x.similarity,
reverse=True)
logger.info(f"Successfully processed domain {domain}. Found {len(valid_urls)} valid URLs")
except HTTPException as e:
logger.error(f"HTTP Exception for domain {domain}: {str(e.detail)}")
error_messages[domain] = str(e.detail)
except Exception as e:
logger.error(f"Unexpected error for domain {domain}: {str(e)}")
error_messages[domain] = f"Unexpected error for {domain}: {str(e)}"
logger.info("Search completed")
logger.debug(f"Results found for {len(results)} domains")
logger.debug(f"Errors encountered for {len(error_messages)} domains")
# Collect all valid URLs from results
all_valid_urls = []
for domain_urls in results.values():
all_valid_urls.extend(domain_urls)
logger.info(f"Total valid URLs collected: {len(all_valid_urls)}")
# Create request body for AI fact check
if all_valid_urls:
fact_check_request = AIFactCheckRequest(
content=request.search_text,
urls=all_valid_urls
)
logger.info("Calling AI fact check service")
try:
ai_response = await ai_fact_check(fact_check_request)
logger.info("AI fact check completed successfully")
# Return response with AI fact check results
return SearchResponse(
results=results,
error_messages=error_messages,
ai_fact_check_result=ai_response
)
except Exception as e:
logger.error(f"Error during AI fact check: {str(e)}")
error_messages["ai_fact_check"] = f"Error during fact checking: {str(e)}"
# Return response without AI fact check if no valid URLs or error occurred
return SearchResponse(
results=results,
error_messages=error_messages,
ai_fact_check_result=None
)

View file

@ -6,6 +6,7 @@ load_dotenv()
GOOGLE_API_KEY = os.environ["GOOGLE_API_KEY"]
GOOGLE_FACT_CHECK_BASE_URL= os.environ["GOOGLE_FACT_CHECK_BASE_URL"]
GOOGLE_ENGINE_ID = os.environ["GOOGLE_ENGINE_ID"]
GOOGLE_SEARCH_URL = os.environ["GOOGLE_SEARCH_URL"]
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
FRONTEND_URL = os.environ["FRONTEND_URL"]

View file

@ -0,0 +1,229 @@
from pydantic import BaseModel, Field, HttpUrl, validator, ConfigDict
from typing import Dict, List, Optional, Any, Union
from enum import Enum
from datetime import datetime
from urllib.parse import urlparse
# Common Models
class TokenUsage(BaseModel):
prompt_tokens: Optional[int] = 0
completion_tokens: Optional[int] = 0
total_tokens: Optional[int] = 0
class ErrorResponse(BaseModel):
detail: str
error_code: str = Field(..., description="Unique error code for this type of error")
timestamp: str = Field(default_factory=lambda: datetime.now().isoformat())
path: Optional[str] = Field(None, description="The endpoint path where error occurred")
model_config = ConfigDict(json_schema_extra={
"example": {
"detail": "Error description",
"error_code": "ERROR_CODE",
"timestamp": "2024-12-09T16:49:30.905765",
"path": "/check-facts"
}
})
# Fact Check Models
class Publisher(BaseModel):
name: str
site: Optional[str] = Field(None, description="Publisher's website")
@validator('site')
def validate_site(cls, v):
if v and not (v.startswith('http://') or v.startswith('https://')):
return f"https://{v}"
return v
class ClaimReview(BaseModel):
publisher: Publisher
url: Optional[HttpUrl] = None
title: Optional[str] = None
reviewDate: Optional[str] = None
textualRating: Optional[str] = None
languageCode: str = Field(default="en-US")
class Claim(BaseModel):
text: str
claimant: Optional[str] = None
claimDate: Optional[str] = None
claimReview: List[ClaimReview]
class SourceType(str, Enum):
FACT_CHECKER = "fact_checker"
NEWS_SITE = "news_site"
class FactCheckSource(BaseModel):
domain: str
type: SourceType
priority: int = Field(default=1, ge=1, le=10)
# Verification Models
class VerificationResult(BaseModel):
verdict: str = Field(..., description="True/False/Insufficient Information")
confidence: str = Field(..., description="High/Medium/Low")
evidence: Union[str, List[str]]
reasoning: str
missing_info: Optional[str] = None
model_config = ConfigDict(json_schema_extra={
"example": {
"verdict": "True",
"confidence": "High",
"evidence": ["Direct quote from source supporting the claim"],
"reasoning": "Detailed analysis of why the claim is considered true",
"missing_info": "Any caveats or limitations of the verification"
}
})
# Request Models
class BaseFactCheckRequest(BaseModel):
content: str = Field(
...,
min_length=10,
max_length=1000,
description="The claim to be fact-checked"
)
@validator('content')
def validate_content(cls, v):
if not v.strip():
raise ValueError("Content cannot be empty or just whitespace")
return v.strip()
class GoogleFactCheckRequest(BaseFactCheckRequest):
language: str = Field(default="en-US", pattern="^[a-z]{2}-[A-Z]{2}$")
max_results_per_source: int = Field(default=10, ge=1, le=50)
class AIFactCheckRequest(BaseFactCheckRequest):
urls: List[str] = Field(
...,
min_items=1,
max_items=5,
description="List of URLs to check the content against. URLs will be prefixed with https:// if protocol is missing"
)
@validator('urls')
def validate_urls(cls, urls):
validated_urls = []
for url in urls:
if not url.strip():
raise ValueError("URL cannot be empty")
# Add https:// if no protocol specified
if not url.startswith(('http://', 'https://')):
url = f'https://{url}'
try:
result = urlparse(url)
if not result.netloc:
raise ValueError(f"Invalid URL structure for {url}")
validated_urls.append(url)
except Exception as e:
raise ValueError(f"Invalid URL {url}: {str(e)}")
return validated_urls
model_config = ConfigDict(json_schema_extra={
"example": {
"content": "Indian flag was drawn in BUET campus",
"urls": [
"www.altnews.in/article-about-flag",
"www.another-source.com/related-news"
]
}
})
# Response Models
class BaseFactCheckResponse(BaseModel):
query: str
token_usage: TokenUsage
sources: List[str]
model_config = ConfigDict(json_schema_extra={
"example": {
"query": "Example statement to verify",
"token_usage": {
"prompt_tokens": 100,
"completion_tokens": 50,
"total_tokens": 150
},
"sources": ["source1.com", "source2.com"],
}
})
class GoogleFactCheckResponse(BaseFactCheckResponse):
total_claims_found: int
results: List[Dict[str, Any]]
verification_result: Dict[str, Any]
summary: Dict[str, int]
model_config = ConfigDict(json_schema_extra={
"example": {
"query": "Example claim",
"total_claims_found": 1,
"results": [{
"text": "Example claim text",
"claimant": "Source name",
"claimReview": [{
"publisher": {
"name": "Fact Checker",
"site": "factchecker.com"
},
"textualRating": "True"
}]
}],
"verification_result": {
"verdict": "True",
"confidence": "High",
"evidence": ["Supporting evidence"],
"reasoning": "Detailed analysis"
},
"sources": ["factchecker.com"],
"token_usage": {
"prompt_tokens": 100,
"completion_tokens": 50,
"total_tokens": 150
},
"summary": {
"total_sources": 1,
"fact_checking_sites_queried": 10
}
}
})
class AIFactCheckResponse(BaseFactCheckResponse):
verification_result: Dict[str, VerificationResult] # Changed to Dict to store results per URL
model_config = ConfigDict(json_schema_extra={
"example": {
"query": "Indian flag was drawn in BUET campus",
"verification_result": {
"https://www.source1.com": {
"verdict": "True",
"confidence": "High",
"evidence": ["Supporting evidence from source 1"],
"reasoning": "Detailed analysis from source 1",
"missing_info": None
},
"https://www.source2.com": {
"verdict": "True",
"confidence": "Medium",
"evidence": ["Supporting evidence from source 2"],
"reasoning": "Analysis from source 2",
"missing_info": "Additional context needed"
}
},
"sources": ["source1.com", "source2.com"],
"token_usage": {
"prompt_tokens": 200,
"completion_tokens": 100,
"total_tokens": 300
}
}
})
# Backwards compatibility aliases
FactCheckRequest = GoogleFactCheckRequest
FactCheckResponse = GoogleFactCheckResponse

View file

@ -1,229 +1,101 @@
from pydantic import BaseModel, Field, HttpUrl, validator, ConfigDict
from typing import Dict, List, Optional, Any, Union
from enum import Enum
from pydantic import BaseModel, Field, HttpUrl, validator
from typing import List, Literal, Union
from datetime import datetime
from urllib.parse import urlparse
from enum import Enum
# Common Models
class TokenUsage(BaseModel):
prompt_tokens: Optional[int] = 0
completion_tokens: Optional[int] = 0
total_tokens: Optional[int] = 0
class VerdictEnum(str, Enum):
TRUE = "True"
FALSE = "False"
PARTIALLY_TRUE = "Partially True"
UNVERIFIED = "Unverified"
class ErrorResponse(BaseModel):
detail: str
error_code: str = Field(..., description="Unique error code for this type of error")
timestamp: str = Field(default_factory=lambda: datetime.now().isoformat())
path: Optional[str] = Field(None, description="The endpoint path where error occurred")
class ConfidenceEnum(str, Enum):
HIGH = "High"
MEDIUM = "Medium"
LOW = "Low"
model_config = ConfigDict(json_schema_extra={
"example": {
"detail": "Error description",
"error_code": "ERROR_CODE",
"timestamp": "2024-12-09T16:49:30.905765",
"path": "/check-facts"
}
})
class FactCheckRequest(BaseModel):
query: str = Field(
...,
min_length=3,
max_length=500,
description="The claim or statement to be fact-checked",
example="Did NASA confirm finding alien structures on Mars in 2024?"
)
# Fact Check Models
class Publisher(BaseModel):
name: str
site: Optional[str] = Field(None, description="Publisher's website")
@validator('site')
def validate_site(cls, v):
if v and not (v.startswith('http://') or v.startswith('https://')):
return f"https://{v}"
class Source(BaseModel):
url: str
name: str = ""
@validator('url')
def validate_url(cls, v):
# Basic URL validation without requiring HTTP/HTTPS
if not v or len(v) < 3:
raise ValueError("URL must not be empty and must be at least 3 characters")
return v
class ClaimReview(BaseModel):
publisher: Publisher
url: Optional[HttpUrl] = None
title: Optional[str] = None
reviewDate: Optional[str] = None
textualRating: Optional[str] = None
languageCode: str = Field(default="en-US")
class Claim(BaseModel):
text: str
claimant: Optional[str] = None
claimDate: Optional[str] = None
claimReview: List[ClaimReview]
class SourceType(str, Enum):
FACT_CHECKER = "fact_checker"
NEWS_SITE = "news_site"
class FactCheckSource(BaseModel):
domain: str
type: SourceType
priority: int = Field(default=1, ge=1, le=10)
# Verification Models
class VerificationResult(BaseModel):
verdict: str = Field(..., description="True/False/Insufficient Information")
confidence: str = Field(..., description="High/Medium/Low")
evidence: Union[str, List[str]]
reasoning: str
missing_info: Optional[str] = None
model_config = ConfigDict(json_schema_extra={
"example": {
"verdict": "True",
"confidence": "High",
"evidence": ["Direct quote from source supporting the claim"],
"reasoning": "Detailed analysis of why the claim is considered true",
"missing_info": "Any caveats or limitations of the verification"
}
})
# Request Models
class BaseFactCheckRequest(BaseModel):
content: str = Field(
class FactCheckResponse(BaseModel):
claim: str = Field(
...,
min_length=10,
max_length=1000,
description="The claim to be fact-checked"
description="The exact claim being verified"
)
@validator('content')
def validate_content(cls, v):
if not v.strip():
raise ValueError("Content cannot be empty or just whitespace")
return v.strip()
class GoogleFactCheckRequest(BaseFactCheckRequest):
language: str = Field(default="en-US", pattern="^[a-z]{2}-[A-Z]{2}$")
max_results_per_source: int = Field(default=10, ge=1, le=50)
class AIFactCheckRequest(BaseFactCheckRequest):
urls: List[str] = Field(
verdict: VerdictEnum = Field(
...,
description="The verification verdict"
)
confidence: ConfidenceEnum = Field(
...,
description="Confidence level in the verdict"
)
sources: List[Source] = Field(
...,
min_items=1,
max_items=5,
description="List of URLs to check the content against. URLs will be prefixed with https:// if protocol is missing"
description="List of sources used in verification"
)
evidence: str = Field(
...,
min_length=20,
max_length=500,
description="Concise summary of key evidence"
)
explanation: str = Field(
...,
min_length=50,
max_length=1000,
description="Detailed explanation of verification findings"
)
additional_context: str = Field(
...,
min_length=20,
max_length=500,
description="Important context about the verification"
)
@validator('urls')
def validate_urls(cls, urls):
validated_urls = []
for url in urls:
if not url.strip():
raise ValueError("URL cannot be empty")
# Add https:// if no protocol specified
if not url.startswith(('http://', 'https://')):
url = f'https://{url}'
try:
result = urlparse(url)
if not result.netloc:
raise ValueError(f"Invalid URL structure for {url}")
validated_urls.append(url)
except Exception as e:
raise ValueError(f"Invalid URL {url}: {str(e)}")
return validated_urls
model_config = ConfigDict(json_schema_extra={
"example": {
"content": "Indian flag was drawn in BUET campus",
"urls": [
"www.altnews.in/article-about-flag",
"www.another-source.com/related-news"
]
}
})
# Response Models
class BaseFactCheckResponse(BaseModel):
query: str
token_usage: TokenUsage
sources: List[str]
model_config = ConfigDict(json_schema_extra={
"example": {
"query": "Example statement to verify",
"token_usage": {
"prompt_tokens": 100,
"completion_tokens": 50,
"total_tokens": 150
},
"sources": ["source1.com", "source2.com"],
}
})
class GoogleFactCheckResponse(BaseFactCheckResponse):
total_claims_found: int
results: List[Dict[str, Any]]
verification_result: Dict[str, Any]
summary: Dict[str, int]
model_config = ConfigDict(json_schema_extra={
"example": {
"query": "Example claim",
"total_claims_found": 1,
"results": [{
"text": "Example claim text",
"claimant": "Source name",
"claimReview": [{
"publisher": {
"name": "Fact Checker",
"site": "factchecker.com"
},
"textualRating": "True"
}]
}],
"verification_result": {
"verdict": "True",
class Config:
json_schema_extra = {
"example": {
"claim": "NASA confirmed finding alien structures on Mars in 2024",
"verdict": "False",
"confidence": "High",
"evidence": ["Supporting evidence"],
"reasoning": "Detailed analysis"
},
"sources": ["factchecker.com"],
"token_usage": {
"prompt_tokens": 100,
"completion_tokens": 50,
"total_tokens": 150
},
"summary": {
"total_sources": 1,
"fact_checking_sites_queried": 10
"sources": [
{
"url": "https://www.nasa.gov/mars-exploration",
"name": "NASA Mars Exploration"
},
{
"url": "https://factcheck.org/2024/mars-claims",
"name": "FactCheck.org"
}
],
"evidence": "NASA has made no such announcement. Recent Mars rover images show natural rock formations.",
"explanation": "Multiple fact-checking organizations investigated this claim. NASA's official communications and Mars mission reports from 2024 contain no mention of alien structures. The viral images being shared are misidentified natural geological formations.",
"additional_context": "Similar false claims about alien structures on Mars have circulated periodically since the first Mars rovers began sending back images."
}
}
})
class AIFactCheckResponse(BaseFactCheckResponse):
verification_result: Dict[str, VerificationResult] # Changed to Dict to store results per URL
model_config = ConfigDict(json_schema_extra={
"example": {
"query": "Indian flag was drawn in BUET campus",
"verification_result": {
"https://www.source1.com": {
"verdict": "True",
"confidence": "High",
"evidence": ["Supporting evidence from source 1"],
"reasoning": "Detailed analysis from source 1",
"missing_info": None
},
"https://www.source2.com": {
"verdict": "True",
"confidence": "Medium",
"evidence": ["Supporting evidence from source 2"],
"reasoning": "Analysis from source 2",
"missing_info": "Additional context needed"
}
},
"sources": ["source1.com", "source2.com"],
"token_usage": {
"prompt_tokens": 200,
"completion_tokens": 100,
"total_tokens": 300
}
}
})
# Backwards compatibility aliases
FactCheckRequest = GoogleFactCheckRequest
FactCheckResponse = GoogleFactCheckResponse
class ErrorResponse(BaseModel):
detail: str
error_code: str = Field(..., example="VALIDATION_ERROR")
path: str = Field(..., example="/check-facts")

View file

@ -1,17 +1,120 @@
from typing import Dict, List
import requests
from fastapi import HTTPException
from app.models.fact_check_models import FactCheckSource, ErrorResponse, FactCheckRequest, SourceType
from app.models.ai_fact_check_models import FactCheckSource, ErrorResponse, FactCheckRequest, SourceType
# Sources configuration with validation
SOURCES = {
"fact_checkers": [
FactCheckSource(domain=domain, type=SourceType.FACT_CHECKER, priority=1)
for domain in [
"bbc.com",
"altnews.in",
"en.prothomalo.com"
]
"snopes.com",
"politifact.com",
"factcheck.org",
"reuters.com/fact-check",
"apnews.com/hub/ap-fact-check",
"bbc.com/news/reality_check",
"fullfact.org",
"afp.com/fact-check",
"truthorfiction.com",
"leadstories.com",
"checkyourfact.com",
"washingtonpost.com/news/fact-checker",
"factcheck.kz",
"poynter.org/ifcn",
"factcheckeu.info",
"africacheck.org",
"thequint.com/webqoof",
"altnews.in",
"facta.news",
"factcheckni.org",
"mythdetector.ge",
"verificado.mx",
"euvsdisinfo.eu",
"factcheck.afp.com",
"newtral.es",
"maldita.es",
"faktograf.hr",
"demagog.org.pl",
"factnameh.com",
"faktiskt.se",
"teyit.org",
"factly.in",
"boom.live",
"stopfake.org",
"factcheck.ge",
"factcheck.kg",
"factcheck.uz",
"factcheck.tj",
"factcheck.az",
"factcheck.am",
"factcheck.md",
"verafiles.org",
"rappler.com/fact-check",
"vera.com.gt",
"chequeado.com",
"aosfatos.org",
"lasillavacia.com/detector-mentiras",
"colombiacheck.com",
"ecuadorchequea.com",
"elsurti.com/checado",
"verificat.cat",
"mafindo.or.id",
"tempo.co/cek-fakta",
"factcheck.mk",
"raskrinkavanje.ba",
"faktograf.hr",
"demagog.cz",
"faktabaari.fi",
"correctiv.org",
"mimikama.at",
"factcheck.vlaanderen",
"factuel.afp.com",
"nieuwscheckers.nl",
"faktisk.no",
"tjekdet.dk",
"ellinikahoaxes.gr",
"faktograf.id",
"stopfake.kz",
"pesacheck.org",
"dubawa.org",
"namibiafactcheck.org.na",
"zimfact.org",
"ghanafact.com",
"factspace.africa",
"factcrescendo.com",
"vishvasnews.com",
"factcheck.lk",
"newschecker.in",
"boomlive.in",
"digiteye.in",
"indiatoday.in/fact-check",
"factcrescendo.com",
"piyasa.com/fact-check",
"taiwanese.facts.news",
"taiwanfactcheck.com",
"mygopen.com",
"tfc-taiwan.org.tw",
"cofacts.tw",
"rumor.taipei",
"fact.qq.com",
"factcheck.afp.com/list",
"acfta.org",
"crosscheck.firstdraftnews.org",
"healthfeedback.org",
"climatefeedback.org",
"sciencefeedback.co",
"factcheck.aap.com.au",
"emergent.info",
"hoax-slayer.net",
"truthorfiction.com",
"factcheck.media",
"mediawise.org",
"thejournal.ie/factcheck",
"journalistsresource.org",
"metafact.io",
"reporterslab.org/fact-checking"
]
],
"news_sites": [
FactCheckSource(domain=domain, type=SourceType.NEWS_SITE, priority=2)
@ -82,5 +185,6 @@ def get_all_sources() -> List[FactCheckSource]:
"""
Get all sources sorted by priority
"""
all_sources = SOURCES["fact_checkers"] + SOURCES["news_sites"]
# all_sources = SOURCES["fact_checkers"] + SOURCES["news_sites"]
all_sources = SOURCES["fact_checkers"]
return sorted(all_sources, key=lambda x: x.priority)

View file

@ -1,595 +0,0 @@
{
"kind": "customsearch#search",
"url": {
"type": "application/json",
"template": "https://www.googleapis.com/customsearch/v1?q={searchTerms}&num={count?}&start={startIndex?}&lr={language?}&safe={safe?}&cx={cx?}&sort={sort?}&filter={filter?}&gl={gl?}&cr={cr?}&googlehost={googleHost?}&c2coff={disableCnTwTranslation?}&hq={hq?}&hl={hl?}&siteSearch={siteSearch?}&siteSearchFilter={siteSearchFilter?}&exactTerms={exactTerms?}&excludeTerms={excludeTerms?}&linkSite={linkSite?}&orTerms={orTerms?}&dateRestrict={dateRestrict?}&lowRange={lowRange?}&highRange={highRange?}&searchType={searchType}&fileType={fileType?}&rights={rights?}&imgSize={imgSize?}&imgType={imgType?}&imgColorType={imgColorType?}&imgDominantColor={imgDominantColor?}&alt=json"
},
"queries": {
"request": [
{
"title": "Google Custom Search - Sheikh Hasina resigned as a Prime Minister of Bangladesh",
"totalResults": "758000",
"searchTerms": "Sheikh Hasina resigned as a Prime Minister of Bangladesh",
"count": 10,
"startIndex": 1,
"inputEncoding": "utf8",
"outputEncoding": "utf8",
"safe": "off",
"cx": "d437f1eb581de4590"
}
],
"nextPage": [
{
"title": "Google Custom Search - Sheikh Hasina resigned as a Prime Minister of Bangladesh",
"totalResults": "758000",
"searchTerms": "Sheikh Hasina resigned as a Prime Minister of Bangladesh",
"count": 10,
"startIndex": 11,
"inputEncoding": "utf8",
"outputEncoding": "utf8",
"safe": "off",
"cx": "d437f1eb581de4590"
}
]
},
"context": {
"title": "Prothom Alo"
},
"searchInformation": {
"searchTime": 0.513164,
"formattedSearchTime": "0.51",
"totalResults": "758000",
"formattedTotalResults": "758,000"
},
"items": [
{
"kind": "customsearch#result",
"title": "Sheikh Hasina: Euphoria in Bangladesh after PM flees country",
"htmlTitle": "\u003cb\u003eSheikh Hasina\u003c/b\u003e: Euphoria in \u003cb\u003eBangladesh\u003c/b\u003e after PM flees country",
"link": "https://www.bbc.com/news/articles/clywww69p2vo",
"displayLink": "www.bbc.com",
"snippet": "Aug 5, 2024 ... Bangladeshi Prime Minister Sheikh Hasina has resigned after weeks of deadly anti-government protests, putting an end to her more than two decades dominating ...",
"htmlSnippet": "Aug 5, 2024 \u003cb\u003e...\u003c/b\u003e \u003cb\u003eBangladeshi Prime Minister Sheikh Hasina\u003c/b\u003e has \u003cb\u003eresigned\u003c/b\u003e after weeks of deadly anti-government protests, putting an end to her more than two decades dominating&nbsp;...",
"formattedUrl": "https://www.bbc.com/news/articles/clywww69p2vo",
"htmlFormattedUrl": "https://www.bbc.com/news/articles/clywww69p2vo",
"pagemap": {
"cse_thumbnail": [
{
"src": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQ2noEFH2T-yJo4oB7DU_MF2FqAUzIHU5paMXHka1ny_vMi037f2gtOZ3of&s",
"width": "300",
"height": "168"
}
],
"metatags": [
{
"msapplication-tilecolor": "#da532c",
"og:image": "https://ichef.bbci.co.uk/news/1024/branded_news/db85/live/388ebc30-5367-11ef-aebc-6de4d31bf5cd.jpg",
"apple-itunes-app": "app-id=364147881, app-argument=https://www.bbc.com/news/articles/clywww69p2vo",
"twitter:title": "Sheikh Hasina: Euphoria in Bangladesh after PM flees country",
"twitter:card": "summary_large_image",
"og:image:alt": "Protesters storming Prime Minister Sheikh Hasina's palace after she fled the country",
"theme-color": "#ffffff",
"al:ios:app_name": "BBC: World News & Stories",
"og:title": "Sheikh Hasina: Euphoria in Bangladesh after PM flees country",
"al:android:package": "bbc.mobile.news.ww",
"al:ios:url": "bbcx://news/articles/clywww69p2vo",
"al:web:url": "https://bbc.com/news/articles/clywww69p2vo",
"og:description": "President Mohammed Shahabuddin ordered the release of a jailed former prime minister.",
"version": "2.12.0+20",
"al:ios:app_store_id": "364147881",
"twitter:image:src": "https://ichef.bbci.co.uk/news/1024/branded_news/db85/live/388ebc30-5367-11ef-aebc-6de4d31bf5cd.jpg",
"al:android:url": "bbcx://news/articles/clywww69p2vo",
"next-head-count": "36",
"twitter:image:alt": "Protesters storming Prime Minister Sheikh Hasina's palace after she fled the country",
"viewport": "width=device-width",
"twitter:description": "President Mohammed Shahabuddin ordered the release of a jailed former prime minister.",
"al:android:app_name": "BBC: World News & Stories"
}
],
"cse_image": [
{
"src": "https://ichef.bbci.co.uk/news/1024/branded_news/db85/live/388ebc30-5367-11ef-aebc-6de4d31bf5cd.jpg"
}
]
}
},
{
"kind": "customsearch#result",
"title": "Bangladesh: Prime Minister Hasina Resigns amid Mass Protests ...",
"htmlTitle": "\u003cb\u003eBangladesh\u003c/b\u003e: \u003cb\u003ePrime Minister Hasina Resigns\u003c/b\u003e amid Mass Protests ...",
"link": "https://www.hrw.org/news/2024/08/06/bangladesh-prime-minister-hasina-resigns-amid-mass-protests",
"displayLink": "www.hrw.org",
"snippet": "Aug 6, 2024 ... (London) Bangladesh Prime Minister Sheikh Hasina resigned on August 5, 2024, and fled the country after weeks of student protests, ...",
"htmlSnippet": "Aug 6, 2024 \u003cb\u003e...\u003c/b\u003e (London) \u003cb\u003eBangladesh Prime Minister Sheikh Hasina resigned\u003c/b\u003e on August 5, 2024, and fled the country after weeks of student protests,&nbsp;...",
"formattedUrl": "https://www.hrw.org/.../bangladesh-prime-minister-hasina-resigns-amid-ma...",
"htmlFormattedUrl": "https://www.hrw.org/.../\u003cb\u003ebangladesh\u003c/b\u003e-\u003cb\u003eprime\u003c/b\u003e-\u003cb\u003eminister\u003c/b\u003e-\u003cb\u003ehasina\u003c/b\u003e-resigns-amid-ma...",
"pagemap": {
"cse_thumbnail": [
{
"src": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcT7Rd-kZwml7ax4Q_93QFbon2bmbwYliEYvMil6qgM0xEG6tV72lS_iclM&s",
"width": "310",
"height": "163"
}
],
"metatags": [
{
"og:image": "https://www.hrw.org/sites/default/files/styles/opengraph/public/media_2024/08/202408asia_bangladesh_Sheikh%20Hasina.jpg?h=888143e8&itok=IKUTUc3F",
"og:image:alt": "Bangladeshs former Prime Minister Sheikh Hasina addresses the media in Mirpur after the anti-quota protests.",
"article:published_time": "2024-08-06T14:00:00-0400",
"twitter:card": "summary_large_image",
"twitter:title": "Bangladesh: Prime Minister Hasina Resigns amid Mass Protests",
"og:site_name": "Human Rights Watch",
"twitter:site:id": "14700316",
"handheldfriendly": "true",
"og:title": "Bangladesh: Prime Minister Hasina Resigns amid Mass Protests",
"google": "H_DzcJuJMJKVAO6atlPsK4HHr2WienspT6e74P5fVFY",
"og:updated_time": "2024-08-08T10:24:02-0400",
"og:description": "Bangladesh Prime Minister Sheikh Hasina resigned on August 5, 2024, and fled the country after weeks of student protests.",
"og:image:secure_url": "https://www.hrw.org/sites/default/files/styles/opengraph/public/media_2024/08/202408asia_bangladesh_Sheikh%20Hasina.jpg?h=888143e8&itok=IKUTUc3F",
"article:publisher": "https://www.facebook.com/HumanRightsWatch",
"twitter:image": "https://www.hrw.org/sites/default/files/styles/opengraph/public/media_2024/08/202408asia_bangladesh_Sheikh%20Hasina.jpg?h=888143e8&itok=IKUTUc3F",
"twitter:image:alt": "Bangladeshs former Prime Minister Sheikh Hasina addresses the media in Mirpur after the anti-quota protests.",
"twitter:site": "@hrw",
"article:modified_time": "2024-08-08T10:24:02-0400",
"viewport": "width=device-width, initial-scale=1.0",
"twitter:description": "Bangladesh Prime Minister Sheikh Hasina resigned on August 5, 2024, and fled the country after weeks of student protests.",
"mobileoptimized": "width",
"og:url": "https://www.hrw.org/news/2024/08/06/bangladesh-prime-minister-hasina-resigns-amid-mass-protests"
}
],
"cse_image": [
{
"src": "https://www.hrw.org/sites/default/files/styles/opengraph/public/media_2024/08/202408asia_bangladesh_Sheikh%20Hasina.jpg?h=888143e8&itok=IKUTUc3F"
}
]
}
},
{
"kind": "customsearch#result",
"title": "Bangladesh wakes up to new uncertain future after PM Sheikh ...",
"htmlTitle": "\u003cb\u003eBangladesh\u003c/b\u003e wakes up to new uncertain future after PM \u003cb\u003eSheikh\u003c/b\u003e ...",
"link": "https://www.bbc.com/news/live/ckdgg87lnkdt",
"displayLink": "www.bbc.com",
"snippet": "Aug 5, 2024 ... Yesterday's historic events saw Bangladesh's Prime Minister Sheikh Hasina resign from power and flee the country. Today, government ...",
"htmlSnippet": "Aug 5, 2024 \u003cb\u003e...\u003c/b\u003e Yesterday&#39;s historic events saw \u003cb\u003eBangladesh&#39;s Prime Minister Sheikh Hasina resign\u003c/b\u003e from power and flee the country. Today, government&nbsp;...",
"formattedUrl": "https://www.bbc.com/news/live/ckdgg87lnkdt",
"htmlFormattedUrl": "https://www.bbc.com/news/live/ckdgg87lnkdt",
"pagemap": {
"cse_thumbnail": [
{
"src": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQ9V5V2pFKUOVvlosPa5swslIzMQnDiFW21RkSxNXvXxhrcyvRNZMc2bqXE&s",
"width": "300",
"height": "168"
}
],
"metatags": [
{
"og:image": "https://static.files.bbci.co.uk/ws/simorgh-assets/public/news/images/metadata/poster-1024x576.png",
"theme-color": "#FFFFFF",
"og:type": "article",
"twitter:title": "Bangladesh wakes up to new uncertain future after PM Sheikh Hasina's dramatic resignation",
"og:site_name": "BBC News",
"twitter:url": "https://www.bbc.com/news/live/ckdgg87lnkdt",
"og:title": "Bangladesh wakes up to new uncertain future after PM Sheikh Hasina's dramatic resignation",
"msapplication-tileimage": "https://static.files.bbci.co.uk/core/website/assets/static/icons/windows-phone/news/windows-phone-icon-270x270.23502b4459eb7a6ab2ab.png",
"og:description": "Looting and disorder have been reported in the South Asian nation, a day after mass protests forced Ms Hasina to flee and resign.",
"fb:pages": "1143803202301544,317278538359186,1392506827668140,742734325867560,185246968166196,156060587793370,137920769558355,193435954068976,21263239760,156400551056385,929399697073756,154344434967,228735667216,80758950658,260212261199,294662213128,1086451581439054,283348121682053,295830058648,239931389545417,304314573046,310719525611571,647687225371774,1159932557403143,286567251709437,1731770190373618,125309456546,163571453661989,285361880228,512423982152360,238003846549831,176663550714,260967092113,118450564909230,100978706649892,15286229625,122103087870579,120655094632228,102814153147070,124715648647,153132638110668,150467675018739",
"twitter:creator": "@BBCWorld",
"article:author": "https://www.facebook.com/bbcnews",
"twitter:image": "https://static.files.bbci.co.uk/ws/simorgh-assets/public/news/images/metadata/poster-1024x576.png",
"fb:app_id": "1609039196070050",
"twitter:site": "@BBCWorld",
"viewport": "width=device-width, initial-scale=1",
"twitter:description": "Looting and disorder have been reported in the South Asian nation, a day after mass protests forced Ms Hasina to flee and resign.",
"og:locale": "en_GB",
"og:image_alt": "BBC News",
"fb:admins": "100004154058350",
"og:url": "https://www.bbc.com/news/live/ckdgg87lnkdt",
"format-detection": "telephone=no"
}
],
"cse_image": [
{
"src": "https://static.files.bbci.co.uk/ws/simorgh-assets/public/news/images/metadata/poster-1024x576.png"
}
]
}
},
{
"kind": "customsearch#result",
"title": "Bangladesh protests: PM Sheikh Hasina flees to India as ...",
"htmlTitle": "\u003cb\u003eBangladesh\u003c/b\u003e protests: PM \u003cb\u003eSheikh Hasina\u003c/b\u003e flees to India as ...",
"link": "https://www.cnn.com/2024/08/05/asia/bangladesh-prime-minister-residence-stormed-intl/index.html",
"displayLink": "www.cnn.com",
"snippet": "Aug 6, 2024 ... The prime minister of Bangladesh, Sheikh Hasina, resigned and fled to neighboring India on Monday after protesters stormed her official ...",
"htmlSnippet": "Aug 6, 2024 \u003cb\u003e...\u003c/b\u003e The \u003cb\u003eprime minister of Bangladesh\u003c/b\u003e, \u003cb\u003eSheikh Hasina\u003c/b\u003e, \u003cb\u003eresigned\u003c/b\u003e and fled to neighboring India on Monday after protesters stormed her official&nbsp;...",
"formattedUrl": "https://www.cnn.com/2024/08/05/.../bangladesh-prime-minister.../index.ht...",
"htmlFormattedUrl": "https://www.cnn.com/2024/08/05/.../\u003cb\u003ebangladesh\u003c/b\u003e-\u003cb\u003eprime\u003c/b\u003e-\u003cb\u003eminister\u003c/b\u003e.../index.ht...",
"pagemap": {
"cse_thumbnail": [
{
"src": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcScyayfP1an0tjs821kLSqSGIsgUFwc02vkRXh6ERXuqeV7xOEt3sC__sM&s",
"width": "300",
"height": "168"
}
],
"metatags": [
{
"og:image": "https://media.cnn.com/api/v1/images/stellar/prod/ap24218390125876-2.jpg?c=16x9&q=w_800,c_fill",
"twitter:title": "Bangladesh prime minister flees to India as anti-government protesters storm her residence | CNN",
"og:type": "article",
"twitter:card": "summary_large_image",
"article:published_time": "2024-08-05T10:01:00.074Z",
"og:site_name": "CNN",
"author": "Isaac Yee, Tanbirul Miraj Ripon",
"og:title": "Bangladesh prime minister flees to India as anti-government protesters storm her residence | CNN",
"meta-section": "world",
"type": "article",
"og:description": "The prime minister of Bangladesh, Sheikh Hasina, resigned and fled to neighboring India on Monday after protesters stormed her official residence after weeks of deadly anti-government demonstrations in the South Asian nation.",
"twitter:image": "https://media.cnn.com/api/v1/images/stellar/prod/ap24218390125876-2.jpg?c=16x9&q=w_800,c_fill",
"article:publisher": "https://www.facebook.com/CNN",
"fb:app_id": "80401312489",
"twitter:site": "@CNN",
"article:modified_time": "2024-08-06T05:24:05.249Z",
"viewport": "width=device-width,initial-scale=1,shrink-to-fit=no",
"twitter:description": "The prime minister of Bangladesh, Sheikh Hasina, resigned and fled to neighboring India on Monday after protesters stormed her official residence after weeks of deadly anti-government demonstrations in the South Asian nation.",
"template_type": "article_leaf",
"theme": "world",
"og:url": "https://www.cnn.com/2024/08/05/asia/bangladesh-prime-minister-residence-stormed-intl/index.html"
}
],
"cse_image": [
{
"src": "https://media.cnn.com/api/v1/images/stellar/prod/ap24218390125876-2.jpg?c=16x9&q=w_800,c_fill"
}
]
}
},
{
"kind": "customsearch#result",
"title": "Why did Bangladesh PM Sheikh Hasina resign and where is she ...",
"htmlTitle": "Why did \u003cb\u003eBangladesh\u003c/b\u003e PM \u003cb\u003eSheikh Hasina resign\u003c/b\u003e and where is she ...",
"link": "https://www.reuters.com/world/asia-pacific/why-did-bangladesh-pm-sheikh-hasina-resign-where-is-she-now-2024-08-06/",
"displayLink": "www.reuters.com",
"snippet": "Aug 6, 2024 ... Aug 7 (Reuters) - Sheikh Hasina resigned as Bangladesh's prime minister and fled the country on Monday following weeks of dedly protests ...",
"htmlSnippet": "Aug 6, 2024 \u003cb\u003e...\u003c/b\u003e Aug 7 (Reuters) - \u003cb\u003eSheikh Hasina resigned\u003c/b\u003e as \u003cb\u003eBangladesh&#39;s prime minister\u003c/b\u003e and fled the country on Monday following weeks of dedly protests&nbsp;...",
"formattedUrl": "https://www.reuters.com/.../why-did-bangladesh-pm-sheikh-hasina-resign-...",
"htmlFormattedUrl": "https://www.reuters.com/.../why-did-\u003cb\u003ebangladesh\u003c/b\u003e-pm-\u003cb\u003esheikh\u003c/b\u003e-\u003cb\u003ehasina\u003c/b\u003e-resign-...",
"pagemap": {
"cse_thumbnail": [
{
"src": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcR_IDuyjGdce77t1tWrSwheC6g8XSyuUQKn_KxA0H9x3eCRV4kretMyY0_J&s",
"width": "310",
"height": "162"
}
],
"metatags": [
{
"apple-itunes-app": "app-id=602660809, app-argument=https://www.reuters.com/world/asia-pacific/why-did-bangladesh-pm-sheikh-hasina-resign-where-is-she-now-2024-08-06/?id=E5O5XBJMZBPTDAUM7I6BFYX4UA",
"og:image": "https://www.reuters.com/resizer/v2/QSLRZINWOVJ25LWOYIOOXO4L6A.jpg?auth=581b869970d6c61101b4e8bba552bd5ae55ec08c8a333a33ef63a72f57b8f0c4&height=1005&width=1920&quality=80&smart=true",
"analytics:page_layout": "regular-article",
"article:published_time": "2024-08-07T03:23:35Z",
"og:image:width": "1200",
"twitter:card": "summary_large_image",
"og:site_name": "Reuters",
"og:article:modified_time": "2024-08-07T03:51:39.907Z",
"ccbot": "nofollow",
"analytics:ad_layout": "leaderboard, right rail, sponsored",
"analyticsattributes.topicchannel": "World",
"title": "Why did Bangladesh PM Sheikh Hasina resign and where is she now? | Reuters",
"og:description": "Sheikh Hasina resigned as Bangladesh's prime minister and fled the country on Monday following weeks of dedly protests that began as demonstrations by students against government job quotas but surged into a movement demanding her resignation.",
"twitter:creator": "@Reuters",
"twitter:image": "https://www.reuters.com/resizer/v2/QSLRZINWOVJ25LWOYIOOXO4L6A.jpg?auth=581b869970d6c61101b4e8bba552bd5ae55ec08c8a333a33ef63a72f57b8f0c4&height=1005&width=1920&quality=80&smart=true",
"twitter:image:alt": "Bangladeshi Prime Minister Sheikh Hasina reviews an honour guard at the Government House, during her visit to Thailand, in Bangkok, Thailand, April 26, 2024. REUTERS/Athit Perawongmetha/File Photo",
"twitter:site": "@Reuters",
"article:modified_time": "2024-08-07T03:51:39.907Z",
"fb:admins": "988502044532272",
"article:content_tier": "metered",
"og:type": "article",
"article:section": "Asia Pacific",
"og:image:alt": "Bangladeshi Prime Minister Sheikh Hasina reviews an honour guard at the Government House, during her visit to Thailand, in Bangkok, Thailand, April 26, 2024. REUTERS/Athit Perawongmetha/File Photo",
"twitter:title": "Why did Bangladesh PM Sheikh Hasina resign and where is she now?",
"ad:template": "article",
"og:image:url": "https://www.reuters.com/resizer/v2/QSLRZINWOVJ25LWOYIOOXO4L6A.jpg?auth=581b869970d6c61101b4e8bba552bd5ae55ec08c8a333a33ef63a72f57b8f0c4&height=1005&width=1920&quality=80&smart=true",
"dcsext.dartzone": "/4735792/reuters.com/world/apac/article",
"og:title": "Why did Bangladesh PM Sheikh Hasina resign and where is she now?",
"dcsext.channellist": "World;World;Asia Pacific;Asian Markets",
"og:image:height": "628",
"og:article:published_time": "2024-08-07T03:23:35Z",
"og:updated_time": "2024-08-07T03:51:39.907Z",
"fb:pages": "114050161948682",
"article:author": "Sudipto Ganguly",
"article:tag": "MTVID,EXPLN,TOPNWS,ANLINS,CIV,CWP,DIP,DLI,ECI,ECO,EDU,GEN,JOB,MCE,MPLT,MPOP,NEWS1,POL,RACR,SOCI,TOPCMB,VIO,SASIA,IN,PK,ASXPAC,BD,EMRG,ASIA,PACKAGE:US-TOP-NEWS,PACKAGE:WORLD-NEWS",
"analyticsattributes.topicsubchannel": "Asia Pacific",
"fb:app_id": "988502044532272",
"og:locale:alternate": "en_US",
"viewport": "width=device-width, initial-scale=1",
"twitter:description": "Sheikh Hasina resigned as Bangladesh's prime minister and fled the country on Monday following weeks of dedly protests that began as demonstrations by students against government job quotas but surged into a movement demanding her resignation.",
"og:locale": "en_US",
"og:url": "https://www.reuters.com/world/asia-pacific/why-did-bangladesh-pm-sheikh-hasina-resign-where-is-she-now-2024-08-06/"
}
],
"cse_image": [
{
"src": "https://www.reuters.com/resizer/v2/QSLRZINWOVJ25LWOYIOOXO4L6A.jpg?auth=581b869970d6c61101b4e8bba552bd5ae55ec08c8a333a33ef63a72f57b8f0c4&height=1005&width=1920&quality=80&smart=true"
}
]
}
},
{
"kind": "customsearch#result",
"title": "Bangladesh's 'Gen Z revolution' toppled PM Sheikh Hasina. Why did ...",
"htmlTitle": "\u003cb\u003eBangladesh&#39;s\u003c/b\u003e &#39;Gen Z revolution&#39; toppled PM \u003cb\u003eSheikh Hasina\u003c/b\u003e. Why did ...",
"link": "https://www.cnn.com/2024/08/06/asia/bangladesh-protests-hasina-resignation-explainer-intl-hnk/index.html",
"displayLink": "www.cnn.com",
"snippet": "Aug 6, 2024 ... People celebrate the resignation of Prime Minister Sheikh Hasina in Dhaka, Bangladesh, on August 5, 2024. Mohammad Ponir Hossain/Reuters. CNN —.",
"htmlSnippet": "Aug 6, 2024 \u003cb\u003e...\u003c/b\u003e People celebrate the \u003cb\u003eresignation\u003c/b\u003e of \u003cb\u003ePrime Minister Sheikh Hasina\u003c/b\u003e in Dhaka, \u003cb\u003eBangladesh\u003c/b\u003e, on August 5, 2024. Mohammad Ponir Hossain/Reuters. CNN —.",
"formattedUrl": "https://www.cnn.com/2024/08/06/asia/bangladesh...hasina.../index.html",
"htmlFormattedUrl": "https://www.cnn.com/2024/08/06/asia/\u003cb\u003ebangladesh\u003c/b\u003e...\u003cb\u003ehasina\u003c/b\u003e.../index.html",
"pagemap": {
"cse_thumbnail": [
{
"src": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTOW5T__EO6GShxs6es-aGavTBFUU2GCU-SyqlBE3t5d0hFX5WugbjKA-JH&s",
"width": "300",
"height": "168"
}
],
"metatags": [
{
"og:image": "https://media.cnn.com/api/v1/images/stellar/prod/2024-08-05t184829z-2105365796-rc2l99a18sqr-rtrmadp-3-bangladesh-protests.jpg?c=16x9&q=w_800,c_fill",
"twitter:title": "Bangladeshs Gen Z revolution toppled a veteran leader. Why did they hit the streets and what happens now? | CNN",
"og:type": "article",
"twitter:card": "summary_large_image",
"article:published_time": "2024-08-06T08:16:31.519Z",
"og:site_name": "CNN",
"author": "Helen Regan",
"og:title": "Bangladeshs Gen Z revolution toppled a veteran leader. Why did they hit the streets and what happens now? | CNN",
"meta-section": "world",
"type": "article",
"og:description": "Inside Bangladesh its being dubbed a Gen Z revolution a protest movement that pitted mostly young student demonstrators against a 76-year-old leader who had dominated her nation for decades and turned increasingly authoritarian in recent years.",
"twitter:image": "https://media.cnn.com/api/v1/images/stellar/prod/2024-08-05t184829z-2105365796-rc2l99a18sqr-rtrmadp-3-bangladesh-protests.jpg?c=16x9&q=w_800,c_fill",
"article:publisher": "https://www.facebook.com/CNN",
"fb:app_id": "80401312489",
"twitter:site": "@CNN",
"article:modified_time": "2024-08-07T03:48:11.066Z",
"viewport": "width=device-width,initial-scale=1,shrink-to-fit=no",
"twitter:description": "Inside Bangladesh its being dubbed a Gen Z revolution a protest movement that pitted mostly young student demonstrators against a 76-year-old leader who had dominated her nation for decades and turned increasingly authoritarian in recent years.",
"template_type": "article_leaf",
"theme": "world",
"og:url": "https://www.cnn.com/2024/08/06/asia/bangladesh-protests-hasina-resignation-explainer-intl-hnk/index.html"
}
],
"cse_image": [
{
"src": "https://media.cnn.com/api/v1/images/stellar/prod/2024-08-05t184829z-2105365796-rc2l99a18sqr-rtrmadp-3-bangladesh-protests.jpg?c=16x9&q=w_800,c_fill"
}
]
}
},
{
"kind": "customsearch#result",
"title": "Bangladesh PM Sheikh Hasina resigns, ending 15 years in power ...",
"htmlTitle": "\u003cb\u003eBangladesh\u003c/b\u003e PM \u003cb\u003eSheikh Hasina resigns\u003c/b\u003e, ending 15 years in power ...",
"link": "https://www.npr.org/2024/08/05/g-s1-15332/bangladesh-protests",
"displayLink": "www.npr.org",
"snippet": "Aug 5, 2024 ... DHAKA, Bangladesh — Bangladesh's Prime Minister Sheikh Hasina resigned on Monday, ending 15 years in power as thousands of protesters defied ...",
"htmlSnippet": "Aug 5, 2024 \u003cb\u003e...\u003c/b\u003e DHAKA, \u003cb\u003eBangladesh\u003c/b\u003e — \u003cb\u003eBangladesh&#39;s Prime Minister Sheikh Hasina resigned\u003c/b\u003e on Monday, ending 15 years in power as thousands of protesters defied&nbsp;...",
"formattedUrl": "https://www.npr.org/2024/08/05/g-s1-15332/bangladesh-protests",
"htmlFormattedUrl": "https://www.npr.org/2024/08/05/g-s1-15332/\u003cb\u003ebangladesh\u003c/b\u003e-protests",
"pagemap": {
"cse_thumbnail": [
{
"src": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSqvTrTl13trd-nrF4oQvAQOY3z2N2MfxSSyZsmd4Pm6E_e0TTbu0ER6zE&s",
"width": "300",
"height": "168"
}
],
"speakablespecification": [
{
"cssselector": "[data-is-speakable]"
}
],
"metatags": [
{
"date": "2024-08-05",
"apple-itunes-app": "app-id=324906251, app-argument=https://www.npr.org/2024/08/05/g-s1-15332/bangladesh-protests",
"og:image": "https://npr.brightspotcdn.com/dims3/default/strip/false/crop/6043x3399+0+315/resize/1400/quality/100/format/jpeg/?url=http%3A%2F%2Fnpr-brightspot.s3.amazonaws.com%2Fba%2F99%2Ff772f9bd44ee9b1ddf5a4d9d1d98%2Fap24217447347066.jpg",
"og:type": "article",
"twitter:card": "summary_large_image",
"twitter:title": "Bangladesh PM Sheikh Hasina resigns, ending 15 years in power, as thousands protest",
"og:site_name": "NPR",
"cxenseparse:pageclass": "article",
"twitter:domain": "npr.org",
"cxenseparse:publishtime": "2024-08-05T04:07:23-04:00",
"og:title": "Bangladesh PM Sheikh Hasina resigns, ending 15 years in power, as thousands protest",
"rating": "General",
"og:description": "At least 95 people, including at least 14 police officers, died in clashes in the capital on Sunday. Broadband internet and mobile data services were cut off for about three hours on Monday.",
"fb:pages": "10643211755",
"twitter:image:src": "https://npr.brightspotcdn.com/dims3/default/strip/false/crop/6043x3399+0+315/resize/1400/quality/100/format/jpeg/?url=http%3A%2F%2Fnpr-brightspot.s3.amazonaws.com%2Fba%2F99%2Ff772f9bd44ee9b1ddf5a4d9d1d98%2Fap24217447347066.jpg",
"fb:app_id": "138837436154588",
"cxenseparse:author": "The Associated Press",
"twitter:site": "@NPR",
"article:modified_time": "2024-08-05T06:50:55-04:00",
"viewport": "width=device-width, initial-scale=1, shrink-to-fit=no",
"article:content_tier": "free",
"og:url": "https://www.npr.org/2024/08/05/g-s1-15332/bangladesh-protests",
"article:opinion": "false"
}
],
"cse_image": [
{
"src": "https://npr.brightspotcdn.com/dims3/default/strip/false/crop/6043x3399+0+315/resize/1400/quality/100/format/jpeg/?url=http%3A%2F%2Fnpr-brightspot.s3.amazonaws.com%2Fba%2F99%2Ff772f9bd44ee9b1ddf5a4d9d1d98%2Fap24217447347066.jpg"
}
]
}
},
{
"kind": "customsearch#result",
"title": "Tens of thousands protest in Bangladesh to demand resignation of ...",
"htmlTitle": "Tens of thousands protest in \u003cb\u003eBangladesh\u003c/b\u003e to demand \u003cb\u003eresignation\u003c/b\u003e of ...",
"link": "https://www.cnn.com/2022/12/11/asia/bangladesh-protests-prime-minister-sheikh-hasina-intl-hnk/index.html",
"displayLink": "www.cnn.com",
"snippet": "Dec 11, 2022 ... Supporters of Bangladesh's opposition party protest against the government of Prime Minister Sheikh Hasina on December 10, 2022. Mamunur Rashid/ ...",
"htmlSnippet": "Dec 11, 2022 \u003cb\u003e...\u003c/b\u003e Supporters of \u003cb\u003eBangladesh&#39;s\u003c/b\u003e opposition party protest against the government of \u003cb\u003ePrime Minister Sheikh Hasina\u003c/b\u003e on December 10, 2022. Mamunur Rashid/&nbsp;...",
"formattedUrl": "https://www.cnn.com/.../bangladesh...prime-minister-sheikh-hasina.../index....",
"htmlFormattedUrl": "https://www.cnn.com/.../\u003cb\u003ebangladesh\u003c/b\u003e...\u003cb\u003eprime\u003c/b\u003e-\u003cb\u003eminister\u003c/b\u003e-\u003cb\u003esheikh\u003c/b\u003e-\u003cb\u003ehasina\u003c/b\u003e.../index....",
"pagemap": {
"cse_thumbnail": [
{
"src": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQ-JqzYxoZHcQ5wWQhH5Xq-JrKFFyWbdfS339bDlIrhMrc2Y_9BznDwjN5u&s",
"width": "275",
"height": "183"
}
],
"metatags": [
{
"og:image": "https://media.cnn.com/api/v1/images/stellar/prod/221210230748-02-dhaka-protests-121022.jpg?c=16x9&q=w_800,c_fill",
"twitter:title": "Tens of thousands protest in Bangladesh to demand resignation of Prime Minister | CNN",
"og:type": "article",
"twitter:card": "summary_large_image",
"article:published_time": "2022-12-11T06:09:58Z",
"og:site_name": "CNN",
"author": "Vedika Sud,Yong Xiong",
"og:title": "Tens of thousands protest in Bangladesh to demand resignation of Prime Minister | CNN",
"meta-section": "world",
"type": "article",
"og:description": "Tens of thousands of protesters took to the streets of Dhaka on Saturday calling for the dissolution of parliament to make way for new elections, and demand the resignation of Bangladeshi Prime Minister Sheikh Hasina.",
"twitter:image": "https://media.cnn.com/api/v1/images/stellar/prod/221210230748-02-dhaka-protests-121022.jpg?c=16x9&q=w_800,c_fill",
"article:publisher": "https://www.facebook.com/CNN",
"article:tag": "asia, bangladesh, brand safety-nsf other, brand safety-nsf sensitive, british national party, civil disobedience, continents and regions, domestic alerts, domestic-international news, elections and campaigns, government and public administration, iab-elections, iab-politics, political figures - intl, political organizations, political parties - intl, politics, protests and demonstrations, resignations, sheikh hasina, society, south asia",
"fb:app_id": "80401312489",
"twitter:site": "@CNN",
"article:modified_time": "2022-12-11T06:09:58Z",
"viewport": "width=device-width,initial-scale=1,shrink-to-fit=no",
"twitter:description": "Tens of thousands of protesters took to the streets of Dhaka on Saturday calling for the dissolution of parliament to make way for new elections, and demand the resignation of Bangladeshi Prime Minister Sheikh Hasina.",
"template_type": "article_leaf",
"theme": "world",
"og:url": "https://www.cnn.com/2022/12/11/asia/bangladesh-protests-prime-minister-sheikh-hasina-intl-hnk/index.html"
}
],
"cse_image": [
{
"src": "https://media.cnn.com/api/v1/images/stellar/prod/221210230749-dhaka-protests-221207.jpg?q=w_1110,c_fill"
}
]
}
},
{
"kind": "customsearch#result",
"title": "Timeline of events leading to the resignation of Bangladesh Prime ...",
"htmlTitle": "Timeline of events leading to the \u003cb\u003eresignation\u003c/b\u003e of \u003cb\u003eBangladesh Prime\u003c/b\u003e ...",
"link": "https://www.voanews.com/a/timeline-of-events-leading-to-the-resignation-of-bangladesh-prime-minister-sheikh-hasina/7731456.html",
"displayLink": "www.voanews.com",
"snippet": "Aug 5, 2024 ... Bangladesh Prime Minister Sheikh Hasina resigned and left the country Monday after clashes between student protesters and police left nearly 300 people dead.",
"htmlSnippet": "Aug 5, 2024 \u003cb\u003e...\u003c/b\u003e \u003cb\u003eBangladesh Prime Minister Sheikh Hasina resigned\u003c/b\u003e and left the \u003cb\u003ecountry Monday\u003c/b\u003e after clashes between student protesters and police left nearly 300 people dead.",
"formattedUrl": "https://www.voanews.com/...bangladesh-prime-minister-sheikh-hasina/7731...",
"htmlFormattedUrl": "https://www.voanews.com/...\u003cb\u003ebangladesh\u003c/b\u003e-\u003cb\u003eprime\u003c/b\u003e-\u003cb\u003eminister\u003c/b\u003e-\u003cb\u003esheikh\u003c/b\u003e-\u003cb\u003ehasina\u003c/b\u003e/7731...",
"pagemap": {
"cse_thumbnail": [
{
"src": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcS2o9D0XbnmDtsmWEVzDYCwGv4IHKkzATikOvXEDghsD_uzZj-G6_63zGyR&s",
"width": "311",
"height": "162"
}
],
"metatags": [
{
"msapplication-tilecolor": "#ffffff",
"apple-itunes-app": "app-id=632618796, app-argument=//7731456.ltr",
"og:image": "https://gdb.voanews.com/28CFE9FB-9B7B-4474-8342-C7BC5434B54A.jpg",
"og:type": "article",
"og:image:width": "308",
"twitter:card": "summary_large_image",
"og:site_name": "Voice of America",
"msvalidate.01": "3286EE554B6F672A6F2E608C02343C0E",
"author": "Sabir Mustafa",
"apple-mobile-web-app-title": "VOA",
"og:title": "Timeline of events leading to the resignation of Bangladesh Prime Minister Sheikh Hasina",
"msapplication-tileimage": "/Content/responsive/VOA/img/webApp/ico-144x144.png",
"fb:pages": "36235438073",
"og:description": "Hasina resigns after weeks of clashes between student protesters and police leave nearly 300 dead",
"article:publisher": "https://www.facebook.com/voiceofamerica",
"twitter:image": "https://gdb.voanews.com/28CFE9FB-9B7B-4474-8342-C7BC5434B54A.jpg",
"fb:app_id": "362002700549372",
"apple-mobile-web-app-status-bar-style": "black",
"twitter:site": "@voanews",
"viewport": "width=device-width, initial-scale=1.0",
"twitter:description": "Hasina resigns after weeks of clashes between student protesters and police leave nearly 300 dead",
"og:url": "https://www.voanews.com/a/timeline-of-events-leading-to-the-resignation-of-bangladesh-prime-minister-sheikh-hasina/7731456.html"
}
],
"cse_image": [
{
"src": "https://gdb.voanews.com/28CFE9FB-9B7B-4474-8342-C7BC5434B54A.jpg"
}
]
}
},
{
"kind": "customsearch#result",
"title": "Bangladesh's Sheikh Hasina forced to resign: What happened and ...",
"htmlTitle": "\u003cb\u003eBangladesh&#39;s Sheikh Hasina\u003c/b\u003e forced to \u003cb\u003eresign\u003c/b\u003e: What happened and ...",
"link": "https://www.aljazeera.com/news/2024/8/5/bangladeshs-sheikh-hasina-forced-to-resign-what-happened-and-whats-next",
"displayLink": "www.aljazeera.com",
"snippet": "Aug 5, 2024 ... Bangladesh Prime Minister Sheikh Hasina has stepped down from office, ending 15 years of what the opposition says was “authoritarian rule” and sparking ...",
"htmlSnippet": "Aug 5, 2024 \u003cb\u003e...\u003c/b\u003e \u003cb\u003eBangladesh Prime Minister Sheikh Hasina\u003c/b\u003e has \u003cb\u003estepped down\u003c/b\u003e from office, ending 15 years of what the opposition says was “authoritarian rule” and sparking&nbsp;...",
"formattedUrl": "https://www.aljazeera.com/.../bangladeshs-sheikh-hasina-forced-to-resign-w...",
"htmlFormattedUrl": "https://www.aljazeera.com/.../\u003cb\u003ebangladesh\u003c/b\u003es-\u003cb\u003esheikh\u003c/b\u003e-\u003cb\u003ehasina\u003c/b\u003e-forced-to-resign-w...",
"pagemap": {
"cse_thumbnail": [
{
"src": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcS2uyLUKVFCDpJ-_MjZ6dRKW5_LC1zknAIICxM5ZcVuAZtYqupigTOI_l_0&s",
"width": "259",
"height": "194"
}
],
"metatags": [
{
"pagetype": "Article Page",
"og:image": "https://www.aljazeera.com/wp-content/uploads/2024/08/AP24218390076912-1722855595.jpg?resize=1920%2C1440",
"apple-itunes-app": "app-id=1534955972",
"twitter:card": "summary_large_image",
"og:site_name": "Al Jazeera",
"postlabel": "Explainer",
"twitter:url": "https://www.aljazeera.com/news/2024/8/5/bangladeshs-sheikh-hasina-forced-to-resign-what-happened-and-whats-next",
"pagesection": "Explainer,News,Sheikh Hasina",
"channel": "aje",
"publisheddate": "2024-08-05T15:14:49",
"postid": "3096869",
"source": "Al Jazeera",
"og:description": "Prime minister reportedly flees to India after weeks of antigovernment protests.",
"taxonomyterms": "News, Sheikh Hasina, Asia, Bangladesh",
"lastdate": "2024-08-05T15:40:26",
"primarytopic": "News",
"twitter:image:alt": "Sheikh Hasina forced to resign: What happened and whats next?",
"sourcetaxonomy": "Al Jazeera",
"internalreporting": "Break it down for me",
"where": "Asia, Bangladesh",
"primarytag": "Sheikh Hasina",
"ga4": "G-XN9JB9Q0M1",
"twitter:account_id": "5536782",
"og:type": "article",
"twitter:title": "Sheikh Hasina forced to resign: What happened and whats next?",
"taxonomy-tags": "News, Sheikh Hasina",
"topics": "News",
"og:title": "Sheikh Hasina forced to resign: What happened and whats next?",
"tags": "Sheikh Hasina",
"contenttype": "post",
"twitter:image:src": "https://www.aljazeera.com/wp-content/uploads/2024/08/AP24218390076912-1722855595.jpg?resize=1920%2C1440",
"articleslug": "bangladeshs-sheikh-hasina-forced-to-resign-what-happened-and-whats-next",
"postlink": "/news/2024/8/5/bangladeshs-sheikh-hasina-forced-to-resign-what-happened-and-whats-next",
"viewport": "width=device-width,initial-scale=1,shrink-to-fit=no",
"twitter:description": "Prime minister reportedly flees to India after weeks of antigovernment protests.",
"pagetitle": "Bangladeshs Sheikh Hasina forced to resign: What happened and whats next?",
"og:url": "https://www.aljazeera.com/news/2024/8/5/bangladeshs-sheikh-hasina-forced-to-resign-what-happened-and-whats-next"
}
],
"cse_image": [
{
"src": "https://www.aljazeera.com/wp-content/uploads/2024/08/AP24218390076912-1722855595.jpg?resize=1920%2C1440"
}
]
}
}
]
}