|
|
import os |
|
|
import sys |
|
|
import joblib |
|
|
import pandas as pd |
|
|
import json |
|
|
import re |
|
|
import uuid |
|
|
|
|
|
from huggingface_hub import hf_hub_download |
|
|
|
|
|
from fastapi import FastAPI, HTTPException, Depends, BackgroundTasks |
|
|
from supabase import Client |
|
|
|
|
|
from pydantic import BaseModel, Field |
|
|
from pydantic.config import ConfigDict |
|
|
|
|
|
from typing import List, Optional, Any, Dict |
|
|
import traceback |
|
|
from llama_cpp import Llama |
|
|
from statsmodels.tsa.api import Holt |
|
|
from dateutil.relativedelta import relativedelta |
|
|
from sklearn.preprocessing import LabelEncoder |
|
|
from core.support_agent import SupportAgent |
|
|
from core.strategist import AIStrategist |
|
|
from core.predictor import rank_influencers_by_match |
|
|
from core.utils import get_supabase_client |
|
|
from core.anomaly_detector import find_anomalies |
|
|
from core.matcher import rank_documents_by_similarity |
|
|
from core.utils import get_supabase_client, extract_colors_from_url |
|
|
from core.document_parser import parse_pdf_from_url |
|
|
from core.creative_chat import CreativeDirector |
|
|
from core.matcher import load_embedding_model |
|
|
from core.community_brain import CommunityBrain |
|
|
from core.thunderbird_engine import get_external_trends, predict_niche_trends |
|
|
|
|
|
try: |
|
|
from core.rag.store import VectorStore |
|
|
from core.inference.cache import cached_response |
|
|
except ImportError: |
|
|
VectorStore = None |
|
|
def cached_response(func): return func |
|
|
|
|
|
|
|
|
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) |
|
|
MODELS_DIR = os.path.join(ROOT_DIR, 'models') |
|
|
|
|
|
|
|
|
MODEL_REPO = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF" |
|
|
MODEL_FILENAME = "tinyllama-1.1b-chat-v1.0.Q2_K.gguf" |
|
|
|
|
|
MODEL_SAVE_DIRECTORY = os.path.join(os.environ.get("WRITABLE_DIR", "/data"), "llm_model") |
|
|
LLAMA_MODEL_PATH = os.path.join(MODEL_SAVE_DIRECTORY, MODEL_FILENAME) |
|
|
EMBEDDING_MODEL_PATH = os.path.join(ROOT_DIR, 'embedding_model') |
|
|
DB_PATH = os.path.join(os.environ.get("WRITABLE_DIR", "/tmp"), "vector_db_persistent") |
|
|
|
|
|
|
|
|
_llm_instance = None |
|
|
_vector_store = None |
|
|
_ai_strategist = None |
|
|
_creative_director = None |
|
|
_support_agent = None |
|
|
_budget_predictor = None |
|
|
_influencer_matcher = None |
|
|
_performance_predictor = None |
|
|
_payout_forecaster = None |
|
|
_earnings_optimizer = None |
|
|
_earnings_encoder = None |
|
|
_likes_predictor = None |
|
|
_comments_predictor = None |
|
|
_revenue_forecaster = None |
|
|
_performance_scorer = None |
|
|
_community_brain = None |
|
|
|
|
|
def to_snake(name: str) -> str: |
|
|
return re.sub(r'(?<!^)(?=[A-Z])', '_', name).lower() |
|
|
|
|
|
def get_lazy_llm(): |
|
|
"""Wakes up the AI model only when it's needed.""" |
|
|
global _llm_instance |
|
|
if _llm_instance: |
|
|
return _llm_instance |
|
|
|
|
|
print("β³ Awakening AI Brain (Loading LLM on-demand)...") |
|
|
try: |
|
|
from llama_cpp import Llama |
|
|
if not os.path.exists(LLAMA_MODEL_PATH): |
|
|
print(" - Downloading model (first-time only)...") |
|
|
hf_hub_download(repo_id=MODEL_REPO, filename=MODEL_FILENAME, local_dir=MODEL_SAVE_DIRECTORY) |
|
|
|
|
|
_llm_instance = Llama(model_path=LLAMA_MODEL_PATH, n_ctx=1024, n_threads=2, verbose=False) |
|
|
print("β
AI Brain is Active.") |
|
|
return _llm_instance |
|
|
except Exception as e: |
|
|
print(f"β Failed to load AI: {e}") |
|
|
return None |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class PerformanceForecast(BaseModel): |
|
|
predicted_engagement_rate: float |
|
|
predicted_reach: int |
|
|
|
|
|
class PayoutForecast(BaseModel): |
|
|
estimated_earning: float |
|
|
|
|
|
class RequestConfig(BaseModel): |
|
|
model_name: Optional[str] = "phi-2" |
|
|
temperature: Optional[float] = 0.7 |
|
|
system_prompt: Optional[str] = None |
|
|
|
|
|
class DirectPromptPayload(BaseModel): |
|
|
prompt: str |
|
|
config: Optional[RequestConfig] = None |
|
|
|
|
|
|
|
|
|
|
|
class CreativeChatRequest(BaseModel): message: str; history: list; task_context: str |
|
|
class FinalizeScriptRequest(BaseModel): history: list; task_context: str |
|
|
class FinalScriptResponse(BaseModel): hook: str; script: str; visuals: List[str]; tools: List[str] |
|
|
class ChatQuery(BaseModel): question: str = Field(..., min_length=1); role: str; live_data: str; conversationId: str |
|
|
class ChatAnswer(BaseModel): response: str; context: Optional[str] = None |
|
|
class ChatResponseRequest(BaseModel): prompt: str = Field(..., description="The user's direct question."); context: str = Field(..., description="The real-time data context from the backend.") |
|
|
class ChatResponsePayload(BaseModel): response: str |
|
|
class CaptionRequest(BaseModel): caption: str; action: str |
|
|
class CaptionResponse(BaseModel): new_caption: str |
|
|
class BudgetRequest(BaseModel): |
|
|
campaign_goal: str; influencer_count: int; platform: str; location: str; category: str; final_reach: int |
|
|
config: Optional[Dict[str, str]] = None |
|
|
class BudgetResponse(BaseModel): predicted_budget_usd: float |
|
|
class MatcherRequest(BaseModel): campaign_description: str; target_audience_age: str; target_audience_gender: str; engagement_rate: float; followers: int; country: str; niche: str |
|
|
class MatcherResponse(BaseModel): suggested_influencer_ids: List[int] |
|
|
class PerformanceRequest(BaseModel): |
|
|
budget_usd: float; influencer_count: int; platform: str; location: str; category: str; budget: float |
|
|
config: Optional[Dict[str, str]] = None |
|
|
class PerformanceResponse(BaseModel): predicted_engagement_rate: float; predicted_reach: int |
|
|
class StrategyRequest(BaseModel): prompt: str |
|
|
class StrategyResponse(BaseModel): response: str |
|
|
class OutlineRequest(BaseModel): title: str |
|
|
class OutlineResponse(BaseModel): outline: str |
|
|
class TaskPrioritizationRequest(BaseModel): title: str; description: Optional[str] = None |
|
|
class TaskPrioritizationResponse(BaseModel): priority: str |
|
|
class DashboardInsightsRequest(BaseModel): total_revenue_monthly: float; new_users_weekly: int; active_campaigns: int; pending_approvals: int |
|
|
class TimeSeriesDataPoint(BaseModel): date: str; value: float |
|
|
class TimeSeriesForecastRequest(BaseModel): data: List[TimeSeriesDataPoint]; periods_to_predict: int; business_context: Optional[str] = "No specific context provided." |
|
|
class SmartForecastDataPoint(BaseModel): date: str; predicted_value: float; trend: str; commentary: Optional[str] = None |
|
|
class TimeSeriesForecastResponse(BaseModel): forecast: List[SmartForecastDataPoint] |
|
|
class HealthKpiRequest(BaseModel): platformRevenue: float; activeCampaigns: int; totalBrands: int |
|
|
class HealthSummaryResponse(BaseModel): summary: str |
|
|
class InfluencerData(BaseModel): id: str; name: Optional[str] = None; handle: Optional[str] = None; followers: Optional[int] = 0; category: Optional[str] = None; bio: Optional[str] = None |
|
|
class TeamStrategyRequest(BaseModel): brand_name: str; campaign_goal: str; target_audience: str; budget_range: str; influencers: List[InfluencerData] |
|
|
class CreativeBrief(BaseModel): title: str; description: str; goal_kpi: str; content_guidelines: List[str] |
|
|
class TeamStrategyResponse(BaseModel): success: bool; strategy: Optional[CreativeBrief] = None; suggested_influencers: Optional[List[InfluencerData]] = None; error: Optional[str] = None |
|
|
class AnalyticsInsightsRequest(BaseModel): totalReach: Optional[int] = 0; totalLikes: Optional[int] = 0; averageEngagementRate: Optional[float] = 0.0; topPerformingInfluencer: Optional[str] = "N/A" |
|
|
class AnalyticsInsightsResponse(BaseModel): insights: str |
|
|
class CampaignDetailsForMatch(BaseModel): description: Optional[str] = ""; goal_kpi: Optional[str] = ""; category: Optional[str] = "" |
|
|
class InfluencerRankRequest(BaseModel): campaign_details: CampaignDetailsForMatch; influencers: List[InfluencerData] |
|
|
class InfluencerRankResponse(BaseModel): ranked_influencers: List[InfluencerData] |
|
|
class WeeklySummaryRequest(BaseModel): start_date: str; end_date: str; total_ad_spend: float; total_clicks: int; new_followers: int; top_performing_campaign: str |
|
|
class WeeklySummaryResponse(BaseModel): summary: str |
|
|
class PayoutForecastInput(BaseModel): total_budget_active_campaigns: float = Field(..., description="The sum of budgets for all of a manager's currently active campaigns.") |
|
|
class PayoutForecastOutput(BaseModel): forecastedAmount: float; commentary: str |
|
|
class CampaignForRanking(BaseModel): id: int; description: Optional[str] = "" |
|
|
class InfluencerForRanking(BaseModel): id: str; category: Optional[str] = "Fashion"; bio: Optional[str] = "" |
|
|
class RankCampaignsRequest(BaseModel): influencer: InfluencerForRanking; campaigns: List[CampaignForRanking] |
|
|
class RankedCampaignResult(BaseModel): campaign_id: int; score: float |
|
|
class RankCampaignsResponse(BaseModel): ranked_campaigns: List[RankedCampaignResult] |
|
|
class CaptionAssistRequest(BaseModel): caption: str; action: str = Field(..., description="Action to perform: 'improve', 'hashtags', or 'check_guidelines'"); guidelines: Optional[str] = None |
|
|
class CaptionAssistResponse(BaseModel): new_text: str |
|
|
class ForecastRequest(BaseModel): |
|
|
budget: float; category: str; follower_count: int; engagement_rate: float |
|
|
config: Optional[Dict[str, str]] = None |
|
|
|
|
|
|
|
|
|
|
|
class ContentCheckRequest(BaseModel): |
|
|
text: str |
|
|
user_id: Optional[str] = None |
|
|
|
|
|
class TagGenerationRequest(BaseModel): |
|
|
content: str |
|
|
niche: Optional[str] = "General" |
|
|
|
|
|
class ContentCheckResponse(BaseModel): |
|
|
toxicity_score: float |
|
|
is_safe: bool |
|
|
tags: List[str] |
|
|
|
|
|
class ThreadSummaryRequest(BaseModel): |
|
|
comments: List[str] |
|
|
class ThreadSummaryResponse(BaseModel): |
|
|
summary: str |
|
|
|
|
|
|
|
|
class TrendAnalysisRequest(BaseModel): |
|
|
topic: str |
|
|
|
|
|
|
|
|
class ForecastResponse(BaseModel): |
|
|
performance: PerformanceForecast |
|
|
payout: PayoutForecast |
|
|
|
|
|
|
|
|
class InfluencerKpiData(BaseModel): totalReach: int; totalLikes: int; totalComments: int; avgEngagementRate: float; totalSubmissions: int |
|
|
class InfluencerAnalyticsSummaryResponse(BaseModel): summary: str |
|
|
class PortfolioOption(BaseModel): id: str; contentUrl: str; caption: Optional[str] = ""; likes: Optional[int] = 0; campaign: dict |
|
|
class CuratePortfolioRequest(BaseModel): submissions: List[PortfolioOption] |
|
|
class CuratePortfolioResponse(BaseModel): featured_submission_ids: List[str] |
|
|
class EarningOpportunityRequest(BaseModel): follower_count: int = Field(..., description="Influencer ke current followers") |
|
|
class Opportunity(BaseModel): campaign_niche: str; content_format: str; estimated_score: float; commentary: str |
|
|
class EarningOpportunityResponse(BaseModel): opportunities: List[Opportunity] |
|
|
class PostPerformanceRequest(BaseModel): follower_count: int; caption_length: int; campaign_niche: str; content_format: str |
|
|
class PostPerformanceResponse(BaseModel): predicted_likes: int; predicted_comments: int; feedback: str |
|
|
class AnomalyInsight(BaseModel): influencer_id: str; influencer_name: str; insights: List[str] |
|
|
class RevenueForecastDatapoint(BaseModel): month: str; predicted_revenue: float; trend: str |
|
|
class RevenueForecastResponse(BaseModel): forecast: List[RevenueForecastDatapoint]; ai_commentary: str |
|
|
class MatchDocument(BaseModel): id: str; text: str; match_score: Optional[int] = None |
|
|
class RankBySimilarityRequest(BaseModel): query: str; documents: List[MatchDocument] |
|
|
class RankBySimilarityResponse(BaseModel): ranked_documents: List[MatchDocument] |
|
|
class ContentQualityRequest(BaseModel): caption: str = Field(..., description="The caption text to be analyzed.") |
|
|
class ContentQualityScore(BaseModel): readability: int; engagement: int; call_to_action: int; hashtag_strategy: int |
|
|
class ContentQualityResponse(BaseModel): overall_score: float; scores: ContentQualityScore; feedback: str |
|
|
class DailyBriefingData(BaseModel): roster_size: int; on_bench_influencers: int; pending_submissions: int; revisions_requested: int; lowest_ai_score: Optional[int] = None; highest_pending_payout: float |
|
|
class DailyBriefingResponse(BaseModel): briefing_text: str |
|
|
class ContractURL(BaseModel): pdf_url: str |
|
|
class ContractSummary(BaseModel): payment_details: str; deliverables: str; deadlines: str; exclusivity: str; ownership: str; summary_points: List[str] |
|
|
class InfluencerPerformanceStats(BaseModel): avg_engagement_rate: float; on_time_submission_rate: float; avg_brand_rating: float; monthly_earnings: float |
|
|
class InfluencerPerformanceResponse(BaseModel): performance_score: int |
|
|
class AIGrowthPlanRequest(BaseModel): fullName: str; category: Optional[str] = None; avgEngagementRate: float; monthlyEarnings: float; onTimeSubmissionRate: float; bestPostCaption: Optional[str] = None; worstPostCaption: Optional[str] = None |
|
|
class AIGrowthPlanResponse(BaseModel): insights: List[str] |
|
|
class BrandAssetAnalysisRequest(BaseModel): file_url: str = Field(..., description="URL of the logo or brand image"); asset_type: str = "logo" |
|
|
class BrandAssetAnalysisResponse(BaseModel): dominant_colors: List[str] |
|
|
class ServiceBlueprintRequest(BaseModel): service_type: str = Field(..., description="e.g., 'web-dev' or 'growth'"); requirements: str = Field(..., min_length=10) |
|
|
class ServiceBlueprintResponse(BaseModel): title: str; deliverables: List[str]; stack: str; price_est: str; timeline: str |
|
|
class GrowthPlanRequest(BaseModel): platform_handle: str; goals: str; challenges: str |
|
|
class AISummaryJobRequest(BaseModel): checkin_id: int; raw_text: str |
|
|
class WeeklyCheckinSummaryResponse(BaseModel): wins: List[str]; challenges: List[str]; opportunities: List[str]; sentiment: str |
|
|
class WeeklyPlanContext(BaseModel): niche: str; current_mood: str; recent_achievements: List[str]; active_trends: List[Dict[str, str]] |
|
|
class WeeklyPlanRequest(BaseModel): context: WeeklyPlanContext |
|
|
class PlanOption(BaseModel): type: str; title: str; platform: str; contentType: str; instructions: str; reasoning: str |
|
|
class WeeklyPlanResponse(BaseModel): options: List[PlanOption] |
|
|
|
|
|
|
|
|
app = FastAPI(title="Reachify AI Service (Deploy-Ready)", version="11.0.0") |
|
|
|
|
|
@app.on_event("startup") |
|
|
def startup_event(): |
|
|
|
|
|
global _llm_instance, _creative_director, _support_agent, _ai_strategist, _community_brain, \ |
|
|
_vector_store, _budget_predictor, _influencer_matcher, _performance_predictor, \ |
|
|
_payout_forecaster, _earnings_optimizer, _earnings_encoder, _likes_predictor, \ |
|
|
_comments_predictor, _revenue_forecaster, _performance_scorer |
|
|
|
|
|
|
|
|
print("--- π AI Service Starting Up... ---") |
|
|
try: |
|
|
os.makedirs(MODEL_SAVE_DIRECTORY, exist_ok=True) |
|
|
if not os.path.exists(LLAMA_MODEL_PATH): |
|
|
print(f" - Downloading '{MODEL_FILENAME}' from '{MODEL_REPO}'...") |
|
|
hf_hub_download( |
|
|
repo_id=MODEL_REPO, |
|
|
filename=MODEL_FILENAME, |
|
|
local_dir=MODEL_SAVE_DIRECTORY, |
|
|
local_dir_use_symlinks=False |
|
|
) |
|
|
print(" - β
Model downloaded successfully.") |
|
|
else: |
|
|
print(f" - LLM model found locally.") |
|
|
|
|
|
|
|
|
print(" - Loading Llama LLM into memory...") |
|
|
_llm_instance = Llama(model_path=LLAMA_MODEL_PATH, n_gpu_layers=0, n_ctx=2048, verbose=False) |
|
|
print(" - β
LLM Loaded successfully.") |
|
|
|
|
|
except Exception as e: |
|
|
print(f" - β FATAL ERROR: LLM failed to load. Features disabled. Error: {e}") |
|
|
|
|
|
_llm_instance = None |
|
|
|
|
|
|
|
|
if _llm_instance: |
|
|
try: |
|
|
print(" - Initializing AI components that depend on LLM...") |
|
|
_creative_director = CreativeDirector(llm_instance=_llm_instance) |
|
|
|
|
|
if VectorStore: _vector_store = VectorStore() |
|
|
|
|
|
_ai_strategist = AIStrategist(llm_instance=_llm_instance, store=_vector_store) |
|
|
|
|
|
from core.community_brain import CommunityBrain |
|
|
_community_brain = CommunityBrain(llm_instance=_llm_instance) |
|
|
_support_agent = SupportAgent(llm_instance=_llm_instance, embedding_path=EMBEDDING_MODEL_PATH, db_path=DB_PATH) |
|
|
|
|
|
print(" - β
Core AI components are online.") |
|
|
except Exception as e: |
|
|
print(f" - β FAILED to initialize AI Agents: {e}") |
|
|
|
|
|
|
|
|
|
|
|
print(" - Loading ML models from joblib files...") |
|
|
model_paths = { |
|
|
'budget': ('_budget_predictor', 'budget_predictor_v1.joblib'), |
|
|
'matcher': ('_influencer_matcher', 'influencer_matcher_v1.joblib'), |
|
|
'performance': ('_performance_predictor', 'performance_predictor_v1.joblib'), |
|
|
'payout': ('_payout_forecaster', 'payout_forecaster_v1.joblib'), |
|
|
'earnings': ('_earnings_optimizer', 'earnings_model.joblib'), |
|
|
'earnings_encoder': ('_earnings_encoder', 'earnings_encoder.joblib'), |
|
|
'likes_predictor': ('_likes_predictor', 'likes_predictor_v1.joblib'), |
|
|
'comments_predictor': ('_comments_predictor', 'comments_predictor_v1.joblib'), |
|
|
'revenue_forecaster': ('_revenue_forecaster', 'revenue_forecaster_v1.joblib'), |
|
|
'performance_scorer': ('_performance_scorer', 'performance_scorer_v1.joblib'), |
|
|
} |
|
|
|
|
|
|
|
|
for name, (var, file) in model_paths.items(): |
|
|
path = os.path.join(MODELS_DIR, file) |
|
|
try: |
|
|
if os.path.exists(path): |
|
|
|
|
|
loaded = joblib.load(path) |
|
|
globals()[var] = loaded |
|
|
print(f" - β
Loaded {name} model.") |
|
|
else: |
|
|
globals()[var] = None |
|
|
print(f" - β οΈ Model '{name}' file not found.") |
|
|
except Exception as e: |
|
|
|
|
|
globals()[var] = None |
|
|
print(f" - β SKIPPING {name}: Failed to load ({str(e)})") |
|
|
|
|
|
|
|
|
try: |
|
|
load_embedding_model(EMBEDDING_MODEL_PATH) |
|
|
except Exception as e: |
|
|
print(f" - β οΈ Failed to load Embedding model: {e}") |
|
|
|
|
|
print("\n--- β
AI Service Startup Complete! ---") |
|
|
|
|
|
|
|
|
@app.get("/") |
|
|
def health_check(): |
|
|
if _llm_instance: |
|
|
return {"status": "AI Service is Running"} |
|
|
else: |
|
|
return {"status": "AI Service is in a degraded state: Core LLM failed to load."} |
|
|
|
|
|
def _cleanup_llm_response(data: dict) -> dict: |
|
|
"""A robust helper to clean common messy JSON outputs from smaller LLMs.""" |
|
|
cleaned = { "wins": [], "challenges": [], "opportunities": [], "sentiment": "Mixed" } |
|
|
|
|
|
|
|
|
for key in ["wins", "challenges", "opportunities"]: |
|
|
if key in data and isinstance(data[key], list): |
|
|
for item in data[key]: |
|
|
if isinstance(item, str) and item: |
|
|
cleaned[key].append(item.strip()) |
|
|
elif isinstance(item, dict) and 'text' in item and isinstance(item['text'], str) and item['text']: |
|
|
cleaned[key].append(item['text'].strip()) |
|
|
|
|
|
|
|
|
sentiment_data = data.get("sentiment") |
|
|
if isinstance(sentiment_data, str) and sentiment_data: |
|
|
|
|
|
cleaned["sentiment"] = sentiment_data.strip().replace('.', '') |
|
|
elif isinstance(sentiment_data, dict): |
|
|
if sentiment_data.get('positive'): cleaned["sentiment"] = "Positive" |
|
|
elif sentiment_data.get('negative'): cleaned["sentiment"] = "Negative" |
|
|
else: cleaned["sentiment"] = "Mixed" |
|
|
|
|
|
return cleaned |
|
|
|
|
|
def process_summary_in_background(checkin_id: int, raw_text: str): |
|
|
""" |
|
|
[FINAL, RELIABLE VERSION] This function no longer uses the LLM for sorting. |
|
|
It performs keyword matching directly in Python for 100% accuracy. |
|
|
""" |
|
|
print(f" - βοΈ BACKGROUND JOB STARTED for check-in ID: {checkin_id} (Reliable Python Sorter)") |
|
|
|
|
|
supabase = get_supabase_client() |
|
|
|
|
|
try: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
win_keywords = ["awesome", "happy", "insane engagement", "finished", "managed to", "productive", "went really well", "pleased with", "love making"] |
|
|
challenge_keywords = ["rough week", "disaster", "struggled", "blocked", "nervous", "issue", "frustrating", "lagging", "disconnecting"] |
|
|
opportunity_keywords = ["thinking of", "next week", "maybe I should", "idea", "look into", "research"] |
|
|
|
|
|
|
|
|
wins = [] |
|
|
challenges = [] |
|
|
opportunities = [] |
|
|
|
|
|
|
|
|
|
|
|
sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?|!)\s', raw_text) |
|
|
|
|
|
|
|
|
for sentence in sentences: |
|
|
s_lower = sentence.lower() |
|
|
categorized = False |
|
|
|
|
|
|
|
|
if any(keyword in s_lower for keyword in challenge_keywords): |
|
|
challenges.append(sentence.strip()) |
|
|
categorized = True |
|
|
|
|
|
elif any(keyword in s_lower for keyword in opportunity_keywords): |
|
|
opportunities.append(sentence.strip()) |
|
|
categorized = True |
|
|
|
|
|
elif any(keyword in s_lower for keyword in win_keywords): |
|
|
wins.append(sentence.strip()) |
|
|
categorized = True |
|
|
|
|
|
|
|
|
if not wins: wins.append("No specific wins were mentioned.") |
|
|
if not challenges: challenges.append("No specific challenges were mentioned.") |
|
|
if not opportunities: opportunities.append("No new opportunities were mentioned.") |
|
|
|
|
|
|
|
|
sentiment = "Mixed" |
|
|
if len(challenges) > len(wins) + 1: |
|
|
sentiment = "Negative" |
|
|
elif len(wins) > len(challenges) + 1: |
|
|
sentiment = "Positive" |
|
|
|
|
|
|
|
|
cleaned_summary = { |
|
|
"wins": wins, |
|
|
"challenges": challenges, |
|
|
"opportunities": opportunities, |
|
|
"sentiment": sentiment |
|
|
} |
|
|
|
|
|
|
|
|
print(f" - β
JOB ({checkin_id}): PYTHON SORTER COMPLETED. Updating database with: {cleaned_summary}") |
|
|
supabase.table("influencer_weekly_checkins").update({ |
|
|
"structured_summary": cleaned_summary, |
|
|
"status": "completed" |
|
|
}).eq("id", checkin_id).execute() |
|
|
|
|
|
except Exception as e: |
|
|
error_message = f"Python Sorter failed: {str(e)}" |
|
|
print(f" - β JOB FAILED for check-in ID: {checkin_id}. Error: {error_message}") |
|
|
traceback.print_exc() |
|
|
supabase.table("influencer_weekly_checkins").update({ |
|
|
"status": "failed", |
|
|
"error_message": error_message |
|
|
}).eq("id", checkin_id).execute() |
|
|
|
|
|
|
|
|
@app.post("/generate-chat-response", response_model=ChatResponsePayload, summary="Interactive AI Strategist Chat") |
|
|
async def generate_chat_response_route(request: ChatResponseRequest): |
|
|
print(f"\nβ
Received request on /generate-chat-response") |
|
|
if not _ai_strategist: |
|
|
raise HTTPException(status_code=503, detail="The AI Strategist is not available.") |
|
|
try: |
|
|
response_text = _ai_strategist.generate_chat_response(prompt=request.prompt, context=request.context) |
|
|
return ChatResponsePayload(response=response_text) |
|
|
except Exception as e: |
|
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
@app.post("/api/v1/chat", response_model=ChatAnswer, summary="Role-Aware AI Support Agent") |
|
|
async def ask_support_agent(query: ChatQuery): |
|
|
if not _support_agent: raise HTTPException(status_code=503, detail="AI Support Agent is not available.") |
|
|
return _support_agent.answer(payload=query.model_dump(), conversation_id=query.conversationId) |
|
|
|
|
|
@app.post("/api/v1/generate/caption", response_model=CaptionResponse, summary="Generate variations of a caption") |
|
|
async def generate_caption_route(request: CaptionRequest): |
|
|
if not _support_agent: raise HTTPException(status_code=503, detail="AI Support Agent is not available.") |
|
|
new_caption_text = _support_agent.generate_caption_variant(caption=request.caption, action=request.action) |
|
|
return CaptionResponse(new_caption=new_caption_text) |
|
|
|
|
|
@app.post("/generate-strategy", response_model=StrategyResponse, summary="Generate a Digital Marketing Strategy") |
|
|
async def generate_strategy_route(request: StrategyRequest): |
|
|
if not _support_agent: |
|
|
raise HTTPException(status_code=503, detail="AI Support Agent is not available.") |
|
|
try: |
|
|
strategy_text = _support_agent.generate_marketing_strategy(prompt=request.prompt) |
|
|
return StrategyResponse(response=strategy_text) |
|
|
except Exception as e: |
|
|
raise HTTPException(status_code=500, detail=f"An internal error occurred in the AI model: {e}") |
|
|
|
|
|
@app.post("/api/v1/predict/budget", response_model=BudgetResponse) |
|
|
async def predict_budget(request: BudgetRequest): |
|
|
if not _budget_predictor: raise HTTPException(status_code=503, detail="Predictor Unavailable") |
|
|
|
|
|
input_data = pd.DataFrame([request.model_dump(exclude={'config'})]) |
|
|
prediction = float(_budget_predictor.predict(input_data)[0]) |
|
|
|
|
|
|
|
|
if request.config: |
|
|
multiplier = float(request.config.get("budget_multiplier", 1.0)) |
|
|
prediction = prediction * multiplier |
|
|
|
|
|
return BudgetResponse(predicted_budget_usd=round(prediction, 2)) |
|
|
|
|
|
@app.post("/api/v1/match/influencers", response_model=MatcherResponse, summary="Match Influencers to Campaign") |
|
|
async def match_influencers(request: MatcherRequest): |
|
|
if not _influencer_matcher: raise HTTPException(status_code=503, detail="Influencer matcher is not available.") |
|
|
input_data = pd.DataFrame([request.model_dump()]) |
|
|
prediction = _influencer_matcher.predict(input_data) |
|
|
integer_ids = [int(pid) for pid in prediction] |
|
|
return MatcherResponse(suggested_influencer_ids=integer_ids) |
|
|
|
|
|
@app.post("/api/v1/predict/performance", response_model=PerformanceResponse, summary="Predict Campaign Performance") |
|
|
async def predict_performance(request: PerformanceRequest): |
|
|
|
|
|
if not _performance_predictor: |
|
|
return PerformanceResponse(predicted_engagement_rate=0.03, predicted_reach=50000) |
|
|
|
|
|
try: |
|
|
input_data = pd.DataFrame([request.model_dump()]) |
|
|
prediction_value = _performance_predictor.predict(input_data)[0] |
|
|
return PerformanceResponse(predicted_engagement_rate=0.035, predicted_reach=int(prediction_value)) |
|
|
except: |
|
|
|
|
|
return PerformanceResponse(predicted_engagement_rate=0.03, predicted_reach=50000) |
|
|
|
|
|
@app.post("/generate-outline", response_model=OutlineResponse, summary="Generate a Blog Post Outline") |
|
|
async def generate_outline_route(request: OutlineRequest): |
|
|
if not _support_agent: |
|
|
raise HTTPException(status_code=503, detail="AI Support Agent is not available.") |
|
|
try: |
|
|
outline_text = _support_agent.generate_content_outline(title=request.title) |
|
|
return OutlineResponse(outline=outline_text) |
|
|
except Exception as e: |
|
|
raise HTTPException(status_code=500, detail=f"An internal error occurred in the AI model: {e}") |
|
|
|
|
|
|
|
|
@app.post("/generate-dashboard-insights", response_model=StrategyResponse, summary="Generate Insights from Dashboard KPIs") |
|
|
@cached_response |
|
|
def generate_dashboard_insights_route(request: DashboardInsightsRequest): |
|
|
""" |
|
|
This is the corrected SYNCHRONOUS version of the endpoint. |
|
|
""" |
|
|
print(f"\nβ
Received request on /generate-dashboard-insights with data: {request.model_dump()}") |
|
|
if not _llm_instance: |
|
|
raise HTTPException(status_code=503, detail="The Llama model is not available.") |
|
|
|
|
|
kpis = request.model_dump() |
|
|
prompt = f""" |
|
|
[SYSTEM] |
|
|
You are a senior data analyst at Reachify. Your task is to write a short, insightful summary for the agency's admin based on this week's key performance indicators. Please identify the most important trends, be proactive, and suggest a potential action. The summary should be in the form of 2-3 human-readable bullet points. |
|
|
|
|
|
[THIS WEEK'S KPI DATA] |
|
|
- Revenue This Month (so far): ${kpis.get('total_revenue_monthly', 0):.2f} |
|
|
- New Users This Week: {kpis.get('new_users_weekly', 0)} |
|
|
- Currently Active Campaigns: {kpis.get('active_campaigns', 0)} |
|
|
- Items Awaiting Approval: {kpis.get('pending_approvals', 0)} |
|
|
|
|
|
[YOUR INSIGHTFUL BULLET POINTS] |
|
|
- """ |
|
|
try: |
|
|
print("--- Sending composed prompt to LLM...") |
|
|
response = _llm_instance(prompt, max_tokens=250, temperature=0.7, stop=["[SYSTEM]", "Human:", "\n\n"], echo=False) |
|
|
insight_text = response['choices'][0]['text'].strip() |
|
|
if not insight_text.startswith('-'): |
|
|
insight_text = '- ' + insight_text |
|
|
print("--- Successfully received response from LLM.") |
|
|
return StrategyResponse(response=insight_text) |
|
|
except Exception as e: |
|
|
print(f"π¨ AN ERROR OCCURRED in /generate-dashboard-insights:") |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
|
|
|
@app.get("/", summary="Health Check") |
|
|
def read_root(): |
|
|
return {"status": "Unified AI Service is running"} |
|
|
|
|
|
@app.post("/predict/time-series", response_model=TimeSeriesForecastResponse, summary="Forecast Time Series with Trend Analysis") |
|
|
def predict_time_series(request: TimeSeriesForecastRequest): |
|
|
print(f"\nβ
Received smart forecast request with context: '{request.business_context}'") |
|
|
|
|
|
if len(request.data) < 5: |
|
|
raise HTTPException(status_code=400, detail="Not enough data. At least 5 data points required.") |
|
|
|
|
|
try: |
|
|
df = pd.DataFrame([item.model_dump() for item in request.data]) |
|
|
df['date'] = pd.to_datetime(df['date']) |
|
|
df = df.set_index('date').asfreq('MS', method='ffill') |
|
|
|
|
|
model = Holt(df['value'], initialization_method="estimated").fit(optimized=True) |
|
|
forecast_result = model.forecast(steps=request.periods_to_predict) |
|
|
|
|
|
smart_forecast_output = [] |
|
|
last_historical_value = df['value'].iloc[-1] |
|
|
|
|
|
for date, predicted_val in forecast_result.items(): |
|
|
trend_label = "Stable" |
|
|
commentary = None |
|
|
percentage_change = ((predicted_val - last_historical_value) / last_historical_value) * 100 |
|
|
|
|
|
if percentage_change > 10: |
|
|
trend_label = "Strong Growth" |
|
|
if "by " in request.business_context: |
|
|
reason = request.business_context.split('by ')[-1] |
|
|
commentary = f"Strong growth expected, likely driven by {reason}" |
|
|
else: |
|
|
commentary = "Strong growth expected due to positive trends." |
|
|
elif percentage_change > 2: |
|
|
trend_label = "Modest Growth" |
|
|
elif percentage_change < -5: |
|
|
trend_label = "Potential Downturn" |
|
|
commentary = "Warning: A potential downturn is detected. This may not account for upcoming campaigns. Review your strategy." |
|
|
|
|
|
smart_forecast_output.append( |
|
|
SmartForecastDataPoint( |
|
|
date=date.strftime('%Y-%m-%d'), |
|
|
predicted_value=round(predicted_val, 2), |
|
|
trend=trend_label, |
|
|
commentary=commentary |
|
|
) |
|
|
) |
|
|
last_historical_value = predicted_val |
|
|
|
|
|
return TimeSeriesForecastResponse(forecast=smart_forecast_output) |
|
|
|
|
|
except Exception as e: |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
@app.post("/generate-health-summary", response_model=HealthSummaryResponse, summary="Generates an actionable summary from KPIs") |
|
|
def generate_health_summary(request: HealthKpiRequest): |
|
|
print(f"\nβ
Received request to generate health summary.") |
|
|
if not _llm_instance: |
|
|
raise HTTPException(status_code=503, detail="LLM not available for summary.") |
|
|
|
|
|
kpis = request.model_dump() |
|
|
|
|
|
prompt = f""" |
|
|
[SYSTEM] |
|
|
You are a business analyst. Analyze these KPIs: Platform Revenue (βΉ{kpis.get('platformRevenue', 0):,.0f}), Active Campaigns ({kpis.get('activeCampaigns', 0)}). Provide one [PROGRESS] point and one [AREA TO WATCH] with a next action. Under 50 words. |
|
|
[YOUR ANALYSIS] |
|
|
""" |
|
|
|
|
|
try: |
|
|
|
|
|
response = _llm_instance(prompt, max_tokens=150, temperature=0.6, stop=["[SYSTEM]"], echo=False) |
|
|
summary_text = response['choices'][0]['text'].strip() |
|
|
print(f" - β
Generated summary: {summary_text}") |
|
|
return HealthSummaryResponse(summary=summary_text) |
|
|
|
|
|
except OSError as e: |
|
|
print(f"π¨ CRITICAL LLM CRASH CAUGHT (OSError): {e}. Returning a fallback message.") |
|
|
traceback.print_exc() |
|
|
return HealthSummaryResponse(summary="[AREA TO WATCH]: The AI analyst model is currently unstable and is being reviewed. Manual analysis is recommended.") |
|
|
except Exception as e: |
|
|
print(f"π¨ An unexpected error occurred during summary generation: {e}") |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
|
|
|
@app.post("/generate_team_strategy", response_model=TeamStrategyResponse, summary="Generates a full campaign strategy for the internal team") |
|
|
def generate_team_strategy(request: TeamStrategyRequest): |
|
|
""" |
|
|
This endpoint orchestrates the AI/ML logic for the Team Strategist tool. |
|
|
It takes campaign details and a list of influencers from the backend. |
|
|
""" |
|
|
print(f"\nβ
Received request on /generate_team_strategy for brand: {request.brand_name}") |
|
|
|
|
|
if not _ai_strategist: |
|
|
raise HTTPException(status_code=503, detail="AI Strategist model is not available or failed to load.") |
|
|
|
|
|
try: |
|
|
|
|
|
creative_brief_dict = _ai_strategist.generate_campaign_brief( |
|
|
brand_name=request.brand_name, |
|
|
campaign_goal=request.campaign_goal, |
|
|
target_audience=request.target_audience, |
|
|
budget_range=request.budget_range |
|
|
) |
|
|
if "error" in creative_brief_dict: |
|
|
raise Exception(f"LLM Error during brief generation: {creative_brief_dict['error']}") |
|
|
|
|
|
|
|
|
influencer_list_of_dicts = [inf.model_dump() for inf in request.influencers] |
|
|
suggested_influencers_list = rank_influencers_by_match( |
|
|
influencers=influencer_list_of_dicts, |
|
|
campaign_details=request.model_dump(exclude={"influencers"}), |
|
|
top_n=3 |
|
|
) |
|
|
|
|
|
print("β
Successfully generated brief and ranked influencers.") |
|
|
return TeamStrategyResponse( |
|
|
success=True, |
|
|
strategy=CreativeBrief(**creative_brief_dict), |
|
|
suggested_influencers=[InfluencerData(**inf) for inf in suggested_influencers_list] |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"π¨ An error occurred in /generate_team_strategy endpoint:") |
|
|
traceback.print_exc() |
|
|
return TeamStrategyResponse(success=False, error=str(e)) |
|
|
|
|
|
|
|
|
@app.post("/strategist/generate-analytics-insights", response_model=AnalyticsInsightsResponse, summary="Generates Actionable Insights from Campaign Analytics") |
|
|
async def generate_analytics_insights_route(request: AnalyticsInsightsRequest): |
|
|
""" |
|
|
Receives campaign analytics data and uses the AI Strategist to generate key insights. |
|
|
""" |
|
|
print(f"\nβ
Received request on /strategist/generate-analytics-insights") |
|
|
if not _ai_strategist: |
|
|
raise HTTPException(status_code=503, detail="The AI Strategist is not available.") |
|
|
|
|
|
try: |
|
|
|
|
|
analytics_data = request.model_dump() |
|
|
|
|
|
|
|
|
insights_text = _ai_strategist.generate_analytics_insights(analytics_data=analytics_data) |
|
|
|
|
|
return AnalyticsInsightsResponse(insights=insights_text) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"π¨ An error occurred in /strategist/generate-analytics-insights endpoint:") |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
@app.post("/predictor/rank-influencers", response_model=InfluencerRankResponse, summary="Ranks a given list of influencers for a specific campaign") |
|
|
async def rank_influencers_route(request: InfluencerRankRequest): |
|
|
""" |
|
|
Backend se campaign details aur sabhi influencers ki list leta hai, |
|
|
aur ML model ka istemal karke top 3 ranked influencers wapas bhejta hai. |
|
|
""" |
|
|
print(f"\nβ
Received request on /predictor/rank-influencers for campaign: '{request.campaign_details.description[:30]}...'") |
|
|
|
|
|
try: |
|
|
influencers_list = [inf.model_dump() for inf in request.influencers] |
|
|
campaign_details_dict = request.campaign_details.model_dump() |
|
|
|
|
|
ranked_list = rank_influencers_by_match( |
|
|
influencers=influencers_list, |
|
|
campaign_details=campaign_details_dict, |
|
|
top_n=5 |
|
|
) |
|
|
|
|
|
print(f" - β
Successfully ranked {len(ranked_list)} influencers.") |
|
|
return InfluencerRankResponse(ranked_influencers=ranked_list) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"π¨ An error occurred in /predictor/rank-influencers endpoint:") |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
@app.post("/strategist/generate-weekly-summary", response_model=WeeklySummaryResponse, summary="Generates a Weekly Summary from Metrics") |
|
|
def generate_weekly_summary_route(request: WeeklySummaryRequest): |
|
|
print(f"\nβ
Received request on the NEW /strategist/generate-weekly-summary endpoint.") |
|
|
if not _ai_strategist: |
|
|
raise HTTPException(status_code=503, detail="AI Strategist is not initialized.") |
|
|
try: |
|
|
summary_text = _ai_strategist.generate_weekly_summary(metrics=request.model_dump()) |
|
|
if not summary_text or "error" in summary_text.lower(): |
|
|
raise Exception("AI model failed to generate a valid summary.") |
|
|
return WeeklySummaryResponse(summary=summary_text) |
|
|
except Exception as e: |
|
|
print(f"π¨ An error occurred in /strategist/generate-weekly-summary: {e}") |
|
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
@app.post("/predict/payout_forecast", response_model=PayoutForecastOutput) |
|
|
def predict_payout(data: PayoutForecastInput): |
|
|
if not _payout_forecaster: raise HTTPException(status_code=503, detail="Model Unavailable") |
|
|
|
|
|
pred = float(_payout_forecaster.predict(pd.DataFrame([{'budget': data.total_budget_active_campaigns}]))[0]) |
|
|
|
|
|
|
|
|
if data.config: |
|
|
pred = pred * float(data.config.get("budget_multiplier", 1.0)) |
|
|
|
|
|
return {"forecastedAmount": max(0, pred), "commentary": "Based on budget trends."} |
|
|
|
|
|
|
|
|
@app.post("/analyze/content-quality", response_model=ContentQualityResponse, summary="Analyzes a caption for a quality score") |
|
|
def analyze_content_quality(request: ContentQualityRequest): |
|
|
""" |
|
|
Uses the loaded LLM to analyze a social media caption based on several criteria |
|
|
and returns a quantitative score and qualitative feedback. |
|
|
""" |
|
|
print(f"\nβ
Received request on /analyze/content_quality") |
|
|
if not _llm_instance: |
|
|
raise HTTPException(status_code=503, detail="The Llama model is not available.") |
|
|
|
|
|
caption = request.caption |
|
|
|
|
|
prompt = f""" |
|
|
[SYSTEM] |
|
|
You are a social media expert. Analyze the following caption... Respond ONLY with a valid JSON object. |
|
|
|
|
|
[CAPTION TO ANALYZE] |
|
|
"{caption}" |
|
|
|
|
|
[YOUR JSON RESPONSE] |
|
|
""" |
|
|
try: |
|
|
print("--- Sending caption to LLM for quality analysis...") |
|
|
response = _llm_instance(prompt, max_tokens=512, temperature=0.2, stop=["[SYSTEM]", "\n\n"], echo=False) |
|
|
|
|
|
json_text = response['choices'][0]['text'].strip() |
|
|
start_index = json_text.find('{') |
|
|
end_index = json_text.rfind('}') + 1 |
|
|
|
|
|
if start_index == -1 or end_index == 0: |
|
|
raise ValueError("LLM did not return a valid JSON object.") |
|
|
|
|
|
clean_json_text = json_text[start_index:end_index] |
|
|
import json |
|
|
|
|
|
|
|
|
analysis_result = json.loads(clean_json_text) |
|
|
|
|
|
final_result = { |
|
|
"overall_score": analysis_result.get("overall_score"), |
|
|
"feedback": analysis_result.get("feedback"), |
|
|
"scores": analysis_result.get("scores") or analysis_result.get("score") |
|
|
} |
|
|
|
|
|
print("--- Successfully received and parsed JSON response from LLM.") |
|
|
return ContentQualityResponse(**final_result) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"π¨ Error in Content Quality Analysis: {e}") |
|
|
raise HTTPException(status_code=500, detail="Failed to parse analysis.") |
|
|
|
|
|
@app.post("/rank/campaigns-for-influencer", response_model=RankCampaignsResponse, summary="Ranks a list of campaigns for one influencer") |
|
|
async def rank_campaigns_for_influencer_route(request: RankCampaignsRequest): |
|
|
""" |
|
|
Takes an influencer's profile and a list of campaigns, uses the ML model |
|
|
to predict a 'match score' for each, and returns the list ranked by that score. |
|
|
""" |
|
|
print(f"\nβ
Received request on /rank/campaigns-for-influencer for influencer: {request.influencer.id}") |
|
|
|
|
|
|
|
|
if not _influencer_matcher: |
|
|
raise HTTPException(status_code=503, detail="Influencer Matcher model is not available.") |
|
|
if not request.campaigns: |
|
|
return RankCampaignsResponse(ranked_campaigns=[]) |
|
|
|
|
|
try: |
|
|
|
|
|
|
|
|
df_list = [] |
|
|
for campaign in request.campaigns: |
|
|
df_list.append({ |
|
|
'influencer_category': request.influencer.category, |
|
|
'influencer_bio': request.influencer.bio, |
|
|
'campaign_description': campaign.description, |
|
|
|
|
|
'followers': 50000, |
|
|
'engagement_rate': 0.04, |
|
|
'country': 'USA', |
|
|
'niche': request.influencer.category or 'lifestyle' |
|
|
}) |
|
|
|
|
|
df_to_predict = pd.DataFrame(df_list) |
|
|
|
|
|
|
|
|
|
|
|
print(f" - Predicting scores for {len(df_to_predict)} campaigns...") |
|
|
predicted_scores = _influencer_matcher.predict(df_to_predict) |
|
|
|
|
|
|
|
|
|
|
|
results_with_scores = zip(request.campaigns, predicted_scores) |
|
|
|
|
|
|
|
|
sorted_results = sorted(results_with_scores, key=lambda x: x[1], reverse=True) |
|
|
|
|
|
|
|
|
output = [ |
|
|
RankedCampaignResult(campaign_id=camp.id, score=float(score)) |
|
|
for camp, score in sorted_results |
|
|
] |
|
|
|
|
|
print(f" - β
Successfully scored and ranked campaigns.") |
|
|
return RankCampaignsResponse(ranked_campaigns=output) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"π¨ An error occurred during campaign ranking:") |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
@app.post("/ai/assist/caption", response_model=CaptionAssistResponse, summary="Assists with writing or improving captions") |
|
|
async def caption_assistant_route(request: CaptionAssistRequest): |
|
|
""" |
|
|
Takes a caption and performs an action (improve, suggest hashtags, etc.) using the LLM. |
|
|
""" |
|
|
print(f"\nβ
Received request on /ai/assist/caption with action: {request.action}") |
|
|
if not _ai_strategist: |
|
|
raise HTTPException(status_code=503, detail="AI Strategist is not available.") |
|
|
|
|
|
try: |
|
|
|
|
|
generated_text = _ai_strategist.get_caption_assistance( |
|
|
caption=request.caption, |
|
|
action=request.action, |
|
|
guidelines=request.guidelines |
|
|
) |
|
|
return CaptionAssistResponse(new_text=generated_text) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"π¨ An error occurred in /ai/assist/caption endpoint:") |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
|
|
|
@app.post("/predict/campaign-outcome", response_model=ForecastResponse) |
|
|
async def predict_campaign_outcome(request: ForecastRequest): |
|
|
if not _performance_predictor or not _payout_forecaster: raise HTTPException(status_code=503, detail="Models Unavailable") |
|
|
|
|
|
input_df = pd.DataFrame([request.model_dump(exclude={'config'})]) |
|
|
input_df['influencer_count'] = 1; input_df['platform'] = 'instagram'; input_df['location'] = 'USA'; input_df['followers'] = request.follower_count |
|
|
|
|
|
|
|
|
reach = _performance_predictor.predict(input_df[['budget','influencer_count','platform','location','category']])[0] |
|
|
payout = float(_payout_forecaster.predict(input_df[['budget']])[0]) |
|
|
|
|
|
|
|
|
if request.config: |
|
|
payout_multiplier = float(request.config.get("budget_multiplier", 1.0)) |
|
|
payout = payout * payout_multiplier |
|
|
|
|
|
min_payout = float(request.config.get("ml_payout_floor", 0)) |
|
|
payout = max(min_payout, payout) |
|
|
|
|
|
return ForecastResponse( |
|
|
performance=PerformanceForecast(predicted_reach=int(reach), predicted_engagement_rate=round(request.engagement_rate*100, 2)), |
|
|
payout=PayoutForecast(estimated_earning=max(0, payout)) |
|
|
) |
|
|
|
|
|
@app.post("/ai/summarize/influencer-analytics", response_model=InfluencerAnalyticsSummaryResponse, summary="Generates a summary for the influencer's analytics page") |
|
|
async def summarize_influencer_analytics(request: InfluencerKpiData): |
|
|
""" |
|
|
Takes an influencer's KPIs and uses the AI strategist to create an actionable summary. |
|
|
""" |
|
|
print(f"\nβ
Received request on /ai/summarize/influencer-analytics") |
|
|
if not _ai_strategist: |
|
|
raise HTTPException(status_code=503, detail="AI Strategist is not available.") |
|
|
|
|
|
try: |
|
|
|
|
|
summary_text = _ai_strategist.generate_influencer_analytics_summary(kpis=request.model_dump()) |
|
|
return InfluencerAnalyticsSummaryResponse(summary=summary_text) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"π¨ An error occurred in the analytics summary endpoint:") |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
|
|
|
@app.post("/portfolio/curate-with-ai", response_model=CuratePortfolioResponse) |
|
|
def curate_portfolio_with_ai(request: CuratePortfolioRequest): |
|
|
""" |
|
|
Accepts a list of approved submissions, scores them based on simple logic, |
|
|
and returns the IDs of the best ones. THIS VERSION DOES NOT USE THE LLM. |
|
|
""" |
|
|
print(f"\nβ
β
β
RUNNING FINAL, NON-LLM VERSION of Portfolio Curation β
β
β
") |
|
|
|
|
|
submissions = request.submissions |
|
|
|
|
|
if not submissions: |
|
|
return CuratePortfolioResponse(featured_submission_ids=[]) |
|
|
|
|
|
scored_submissions = [] |
|
|
for sub in submissions: |
|
|
|
|
|
score = 0 |
|
|
|
|
|
score += (sub.likes or 0) * 0.7 |
|
|
|
|
|
|
|
|
if sub.caption and len(sub.caption) > 100: |
|
|
score += 100 |
|
|
|
|
|
|
|
|
scored_submissions.append({'id': sub.id, 'score': score}) |
|
|
|
|
|
|
|
|
sorted_submissions = sorted(scored_submissions, key=lambda x: x['score'], reverse=True) |
|
|
|
|
|
|
|
|
top_submissions = sorted_submissions[:5] |
|
|
|
|
|
|
|
|
featured_ids = [sub['id'] for sub in top_submissions] |
|
|
|
|
|
print(f" - β
Scored and selected {len(featured_ids)} posts: {featured_ids}") |
|
|
return CuratePortfolioResponse(featured_submission_ids=featured_ids) |
|
|
|
|
|
@app.post("/tasks/prioritize", response_model=TaskPrioritizationResponse) |
|
|
def prioritize_task(request: TaskPrioritizationRequest): |
|
|
""" |
|
|
Analyzes a task's title and description to assign a priority level. |
|
|
""" |
|
|
if not _llm_instance: |
|
|
raise HTTPException(status_code=503, detail="LLM model is not available.") |
|
|
|
|
|
prompt = f""" |
|
|
[INST] You are an expert assistant for a social media influencer. Your job is to assign a priority to a new task based on its title. Use these rules: |
|
|
- If the task mentions "revise", "rejection", "feedback", "contract", or is a deadline, the priority is "high". |
|
|
- If the task is about a "new invitation", "new opportunity", or "message", the priority is "medium". |
|
|
- For anything else like "update profile", "explore campaigns", the priority is "low". |
|
|
|
|
|
Respond ONLY with one of the following words: high, medium, or low. |
|
|
|
|
|
Task Title: "{request.title}" |
|
|
[/INST] |
|
|
""" |
|
|
|
|
|
try: |
|
|
print(f" - π€ Prioritizing task: '{request.title}'") |
|
|
output = _llm_instance(prompt, max_tokens=10, stop=["[INST]"], echo=False) |
|
|
|
|
|
|
|
|
priority = output['choices'][0]['text'].strip().lower() |
|
|
|
|
|
|
|
|
if priority not in ['high', 'medium', 'low']: |
|
|
print(f" - β οΈ LLM returned invalid priority: '{priority}'. Defaulting to 'medium'.") |
|
|
priority = 'medium' |
|
|
|
|
|
print(f" - β
AI assigned priority: '{priority}'") |
|
|
return TaskPrioritizationResponse(priority=priority) |
|
|
|
|
|
except Exception as e: |
|
|
print(f" - β An unexpected error occurred during task prioritization: {e}") |
|
|
return TaskPrioritizationResponse(priority='medium') |
|
|
|
|
|
|
|
|
@app.post("/predict/earning-opportunities", response_model=EarningOpportunityResponse, summary="Finds the best earning opportunities for an influencer") |
|
|
async def predict_earning_opportunities(request: EarningOpportunityRequest): |
|
|
""" |
|
|
[FINAL POLISHED VERSION] Uses the model for a score and adds dynamic, helpful |
|
|
commentary for every content format. |
|
|
""" |
|
|
print(f"\nβ
Received request on /predict/earning-opportunities (FINAL POLISH)") |
|
|
if _earnings_optimizer is None or _earnings_encoder is None: |
|
|
raise HTTPException(status_code=503, detail="Earning Optimizer model or encoder is not available.") |
|
|
|
|
|
try: |
|
|
|
|
|
scenarios_list = [ |
|
|
{'campaign_niche': niche, 'content_format': c_format, 'follower_count': request.follower_count} |
|
|
for niche in ['Tech', 'Fashion', 'Food', 'Gaming', 'General'] |
|
|
for c_format in ['Reel', 'Post', 'Story'] |
|
|
] |
|
|
df_scenarios = pd.DataFrame(scenarios_list) |
|
|
categorical_features = ['campaign_niche', 'content_format'] |
|
|
encoded_cats = _earnings_encoder.transform(df_scenarios[categorical_features]) |
|
|
encoded_df = pd.DataFrame(encoded_cats, columns=_earnings_encoder.get_feature_names_out(categorical_features)) |
|
|
numerical_features = df_scenarios[['follower_count']].reset_index(drop=True) |
|
|
X_final_to_predict = pd.concat([encoded_df, numerical_features], axis=1) |
|
|
predicted_scores = _earnings_optimizer.predict(X_final_to_predict) |
|
|
|
|
|
|
|
|
results = [] |
|
|
for i, scenario in enumerate(scenarios_list): |
|
|
score = float(predicted_scores[i]) |
|
|
niche = scenario['campaign_niche'] |
|
|
c_format = scenario['content_format'] |
|
|
|
|
|
|
|
|
if score > 0.75: |
|
|
comment = "Excellent match! This area has high potential for you." |
|
|
elif score < 0.4: |
|
|
comment = "This could be a challenging area to grow in." |
|
|
else: |
|
|
comment = "This is a solid opportunity worth exploring." |
|
|
|
|
|
|
|
|
if c_format == 'Reel': |
|
|
comment += " Reels are perfect for reaching a wider audience with trending audio." |
|
|
elif c_format == 'Post': |
|
|
|
|
|
comment += " Use high-quality visuals and a strong caption for best results with posts." |
|
|
elif c_format == 'Story': |
|
|
|
|
|
comment += " Stories are great for engaging your current followers with interactive polls or Q&As." |
|
|
|
|
|
results.append(Opportunity( |
|
|
campaign_niche=niche, |
|
|
content_format=c_format, |
|
|
estimated_score=score, |
|
|
commentary=comment |
|
|
)) |
|
|
|
|
|
|
|
|
sorted_results = sorted(results, key=lambda x: x.estimated_score, reverse=True) |
|
|
return EarningOpportunityResponse(opportunities=sorted_results[:5]) |
|
|
|
|
|
except Exception as e: |
|
|
print("π¨ An error occurred in /predict/earning-opportunities endpoint:") |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
|
|
|
@app.post("/predict/post-performance", response_model=PostPerformanceResponse, summary="Predicts likes and comments for a new post") |
|
|
async def predict_post_performance(request: PostPerformanceRequest): |
|
|
""" |
|
|
Takes details of a potential post and uses two ML models to predict the |
|
|
number of likes and comments it might receive. |
|
|
""" |
|
|
print(f"\nβ
Received request on /predict/post-performance") |
|
|
if not _likes_predictor or not _comments_predictor: |
|
|
raise HTTPException(status_code=503, detail="Performance prediction models are not available.") |
|
|
|
|
|
try: |
|
|
|
|
|
input_data = pd.DataFrame([request.model_dump()]) |
|
|
|
|
|
|
|
|
print(" - Predicting likes...") |
|
|
predicted_likes_raw = _likes_predictor.predict(input_data)[0] |
|
|
|
|
|
print(" - Predicting comments...") |
|
|
predicted_comments_raw = _comments_predictor.predict(input_data)[0] |
|
|
|
|
|
|
|
|
predicted_likes = max(0, int(predicted_likes_raw)) |
|
|
predicted_comments = max(0, int(predicted_comments_raw)) |
|
|
|
|
|
|
|
|
feedback_messages = [] |
|
|
if request.caption_length < 50: |
|
|
feedback_messages.append("Consider writing a slightly longer caption to increase engagement.") |
|
|
elif request.caption_length > 800: |
|
|
feedback_messages.append("This is a long caption! Ensure the first line is very engaging.") |
|
|
else: |
|
|
feedback_messages.append("The caption length is good for engagement.") |
|
|
|
|
|
if request.campaign_niche == 'General': |
|
|
feedback_messages.append("Try to target a more specific niche in the future for better performance.") |
|
|
|
|
|
feedback_text = " ".join(feedback_messages) |
|
|
|
|
|
print(" - β
Successfully generated performance prediction and feedback.") |
|
|
|
|
|
return PostPerformanceResponse( |
|
|
predicted_likes=predicted_likes, |
|
|
predicted_comments=predicted_comments, |
|
|
feedback=feedback_text |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"π¨ An error occurred in the post-performance endpoint:") |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
|
|
|
@app.get("/analyze/performance-anomalies", response_model=List[AnomalyInsight], summary="Finds unusual performance trends for all influencers") |
|
|
def analyze_anomalies(supabase: Client = Depends(get_supabase_client)): |
|
|
|
|
|
print("π€ Running platform-wide Anomaly Detection...") |
|
|
|
|
|
try: |
|
|
|
|
|
stats_res = supabase.table('daily_influencer_stats').select('*').order('date', desc=True).limit(5000).execute() |
|
|
profiles_res = supabase.table('profiles').select('id, full_name').eq('role', 'influencer').execute() |
|
|
|
|
|
if not stats_res.data: return [] |
|
|
|
|
|
all_stats_df = pd.DataFrame(stats_res.data) |
|
|
profiles_map = {p['id']: p['full_name'] for p in profiles_res.data} |
|
|
|
|
|
all_insights = [] |
|
|
|
|
|
|
|
|
for influencer_id, group in all_stats_df.groupby('profile_id'): |
|
|
historical_df = group.sort_values('date') |
|
|
today_stats = historical_df.iloc[-1].to_dict() |
|
|
|
|
|
|
|
|
insights = find_anomalies(influencer_id, historical_df, today_stats) |
|
|
|
|
|
if insights: |
|
|
all_insights.append(AnomalyInsight( |
|
|
influencer_id=influencer_id, |
|
|
influencer_name=profiles_map.get(influencer_id, 'Unknown Influencer'), |
|
|
insights=insights |
|
|
)) |
|
|
|
|
|
return all_insights |
|
|
|
|
|
except Exception as e: |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
|
|
|
@app.post("/predict/revenue-forecast", response_model=RevenueForecastResponse, summary="Generates a 3-month revenue forecast") |
|
|
async def predict_revenue_forecast(): |
|
|
""" |
|
|
(FAST VERSION) Uses the trained Holt's model to forecast revenue and adds simple commentary. |
|
|
""" |
|
|
print(f"\nβ
Received request on /predict/revenue-forecast (FAST VERSION)") |
|
|
if not _revenue_forecaster: |
|
|
raise HTTPException(status_code=503, detail="Revenue forecasting model is not available.") |
|
|
|
|
|
try: |
|
|
|
|
|
forecast_result = _revenue_forecaster.forecast(steps=3) |
|
|
|
|
|
|
|
|
forecast_datapoints = [] |
|
|
last_historical_value = _revenue_forecaster.model.endog[-1] |
|
|
|
|
|
for timestamp, predicted_value in forecast_result.items(): |
|
|
trend_label = "Stable" |
|
|
percentage_change = ((predicted_value - last_historical_value) / last_historical_value) * 100 |
|
|
if percentage_change > 15: trend_label = "Strong Growth" |
|
|
elif percentage_change > 5: trend_label = "Modest Growth" |
|
|
elif percentage_change < -10: trend_label = "Potential Downturn" |
|
|
|
|
|
forecast_datapoints.append(RevenueForecastDatapoint( |
|
|
month=timestamp.strftime('%B %Y'), |
|
|
predicted_revenue=round(predicted_value, 2), |
|
|
trend=trend_label |
|
|
)) |
|
|
last_historical_value = predicted_value |
|
|
|
|
|
|
|
|
first_trend = forecast_datapoints[0].trend if forecast_datapoints else "Stable" |
|
|
ai_commentary = "AI Insight: The forecast shows a stable outlook for the coming quarter." |
|
|
if "Growth" in first_trend: |
|
|
ai_commentary = "AI Insight: The model predicts a positive growth trend for the next quarter." |
|
|
elif "Downturn" in first_trend: |
|
|
ai_commentary = "AI Insight: A potential slowdown is predicted. It's a good time to review upcoming campaigns." |
|
|
|
|
|
print(" - β
Successfully generated revenue forecast (fast method).") |
|
|
|
|
|
return RevenueForecastResponse( |
|
|
forecast=forecast_datapoints, |
|
|
ai_commentary=ai_commentary |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"π¨ An error occurred in the revenue forecast endpoint:") |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
|
|
|
@app.post("/predict/influencer-performance", response_model=InfluencerPerformanceResponse, summary="Predicts a holistic performance score for an influencer") |
|
|
async def predict_influencer_performance(stats: InfluencerPerformanceStats): |
|
|
""" |
|
|
Takes an influencer's key performance metrics and returns a single, |
|
|
AI-generated performance score from 0-100. |
|
|
""" |
|
|
print(f"\nβ
Received request on /predict/influencer-performance") |
|
|
if not _performance_scorer: |
|
|
raise HTTPException(status_code=503, detail="The Performance Scorer model is not available. Please train it first.") |
|
|
|
|
|
try: |
|
|
|
|
|
input_data = pd.DataFrame([stats.model_dump()]) |
|
|
|
|
|
|
|
|
score = _performance_scorer.predict(input_data) |
|
|
|
|
|
|
|
|
predicted_score = max(0, min(100, int(score[0]))) |
|
|
|
|
|
print(f" - β
Successfully predicted performance score: {predicted_score}") |
|
|
return {"performance_score": predicted_score} |
|
|
|
|
|
except Exception as e: |
|
|
print(f"π¨ An error occurred in the influencer performance endpoint:") |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
|
|
|
@app.post("/v1/match/rank-by-similarity", response_model=RankBySimilarityResponse, summary="Generic endpoint to rank documents by text similarity") |
|
|
async def rank_by_similarity_endpoint(request: RankBySimilarityRequest): |
|
|
print(f"\nβ
Received request on /v1/match/rank-by-similarity") |
|
|
try: |
|
|
documents_list = [doc.model_dump(exclude_unset=True) for doc in request.documents] |
|
|
ranked_docs = rank_documents_by_similarity(query=request.query, documents=documents_list) |
|
|
print(f" - β
Successfully ranked {len(ranked_docs)} documents.") |
|
|
return RankBySimilarityResponse(ranked_documents=ranked_docs) |
|
|
except Exception as e: |
|
|
print(f"π¨ An error occurred in the ranking endpoint:") |
|
|
import traceback |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
|
|
|
@app.post("/analyze/content-quality", response_model=ContentQualityResponse, summary="Analyzes a caption for a quality score") |
|
|
def analyze_content_quality(request: ContentQualityRequest): |
|
|
""" |
|
|
Uses the loaded LLM to analyze a social media caption based on several criteria |
|
|
and returns a quantitative score and qualitative feedback. |
|
|
""" |
|
|
print(f"\nβ
Received request on /analyze/content_quality") |
|
|
if not _llm_instance: |
|
|
raise HTTPException(status_code=503, detail="The Llama model is not available.") |
|
|
|
|
|
caption = request.caption |
|
|
|
|
|
prompt = f""" |
|
|
[SYSTEM] |
|
|
You are a social media expert. Analyze the following caption... Respond ONLY with a valid JSON object in the following format: |
|
|
{{ |
|
|
"overall_score": <float>, |
|
|
"scores": {{ "readability": <int>, "engagement": <int>, "call_to_action": <int>, "hashtag_strategy": <int> }}, |
|
|
"feedback": "<string>" |
|
|
}} |
|
|
|
|
|
[CAPTION TO ANALYZE] |
|
|
"{caption}" |
|
|
|
|
|
[YOUR JSON RESPONSE] |
|
|
""" |
|
|
|
|
|
try: |
|
|
print("--- Sending caption to LLM for quality analysis...") |
|
|
response = _llm_instance(prompt, max_tokens=512, temperature=0.2, stop=["[SYSTEM]", "\n\n"], echo=False) |
|
|
|
|
|
json_text = response['choices'][0]['text'].strip() |
|
|
start_index = json_text.find('{') |
|
|
end_index = json_text.rfind('}') + 1 |
|
|
if start_index == -1 or end_index == 0: |
|
|
raise ValueError("LLM did not return a valid JSON object.") |
|
|
|
|
|
clean_json_text = json_text[start_index:end_index] |
|
|
|
|
|
import json |
|
|
|
|
|
|
|
|
analysis_result = json.loads(clean_json_text) |
|
|
|
|
|
final_result = { |
|
|
"overall_score": analysis_result.get("overall_score"), |
|
|
"feedback": analysis_result.get("feedback"), |
|
|
"scores": analysis_result.get("scores") or analysis_result.get("score") |
|
|
} |
|
|
|
|
|
print("--- Successfully received and parsed JSON response from LLM.") |
|
|
return ContentQualityResponse(**final_result) |
|
|
|
|
|
except (json.JSONDecodeError, KeyError, ValueError) as e: |
|
|
print(f"π¨ ERROR parsing LLM response: {e}. Raw response was: {json_text}") |
|
|
raise HTTPException(status_code=500, detail="Failed to parse analysis from AI model.") |
|
|
except Exception as e: |
|
|
print(f"π¨ An unexpected error occurred during content analysis:") |
|
|
import traceback |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
|
|
|
@app.post("/generate/daily-briefing", response_model=DailyBriefingResponse, summary="Generates a daily action plan for the Talent Manager") |
|
|
def generate_daily_briefing(data: DailyBriefingData): |
|
|
""" |
|
|
[BULLETPROOF VERSION] Takes KPIs and uses either the LLM (if data exists) or |
|
|
Python logic (if data is empty) to generate a daily briefing. |
|
|
""" |
|
|
print(f"\nβ
Received request on /generate/daily-briefing with data: {data}") |
|
|
|
|
|
|
|
|
on_bench = data.on_bench_influencers |
|
|
pending_tasks = data.pending_submissions + data.revisions_requested |
|
|
|
|
|
|
|
|
|
|
|
if on_bench == 0 and pending_tasks == 0: |
|
|
print(" - β
No critical tasks found. Returning Python-generated 'All Clear' message.") |
|
|
return DailyBriefingResponse( |
|
|
briefing_text="All clear! No urgent actions are required. Your roster is fully engaged and up-to-date." |
|
|
) |
|
|
|
|
|
|
|
|
if not _llm_instance: |
|
|
raise HTTPException(status_code=503, detail="The Llama model is not available for briefing.") |
|
|
|
|
|
final_prompt = f""" |
|
|
Summarize these key points into 2-3 direct bullet points for a manager. |
|
|
|
|
|
DATA: |
|
|
- Influencers without campaigns: {on_bench} |
|
|
- Submissions needing review: {pending_tasks} |
|
|
- Total pending money: {data.highest_pending_payout:,.0f} INR |
|
|
|
|
|
SUMMARY: |
|
|
- """ |
|
|
|
|
|
try: |
|
|
print("--- Sending briefing data to LLM (Data exists)...") |
|
|
response = _llm_instance(final_prompt, max_tokens=150, temperature=0.1, stop=["DATA:"], echo=False) |
|
|
briefing_text = response['choices'][0]['text'].strip() |
|
|
|
|
|
final_briefing = f"Here are your top priorities for today:\n- {briefing_text}" |
|
|
print("--- Successfully generated AI briefing.") |
|
|
return DailyBriefingResponse(briefing_text=final_briefing) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"π¨ An unexpected error occurred during briefing generation:") |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail="Failed to generate AI briefing.") |
|
|
|
|
|
|
|
|
@app.post("/summarize-contract", response_model=ContractSummary, summary="Analyzes a PDF contract and extracts key terms") |
|
|
def summarize_contract(request: ContractURL): |
|
|
print(f"\nβ
Received request on /summarize-contract (v3 - ROBUST)") |
|
|
if not _llm_instance: |
|
|
raise HTTPException(status_code=503, detail="The Llama model is not available.") |
|
|
|
|
|
try: |
|
|
|
|
|
print(" - π Parsing PDF from URL...") |
|
|
contract_text = parse_pdf_from_url(request.pdf_url) |
|
|
contract_text = contract_text[:4000] |
|
|
print(f" - β
PDF parsed successfully. Truncated to {len(contract_text)} chars.") |
|
|
|
|
|
final_prompt = f""" |
|
|
[INST] |
|
|
You are a legal analysis AI. Your task is to extract specific details from a contract. You MUST respond ONLY with a single, valid JSON object. Do not add any text before or after the JSON. |
|
|
|
|
|
**RULES FOR THE JSON VALUES:** |
|
|
1. All values for "payment_details", "deliverables", "deadlines", "exclusivity", and "ownership" MUST be a single, plain string. |
|
|
2. The value for "summary_points" MUST be a simple list of strings. |
|
|
3. DO NOT use nested objects. DO NOT use nested lists. Summarize the content into plain text. |
|
|
|
|
|
[EXAMPLE of a GOOD RESPONSE] |
|
|
{{ |
|
|
"payment_details": "Client agrees to pay Influencer a total fee of $5,000 USD, payable in two installments.", |
|
|
"deliverables": "Influencer must create 2 Instagram Reels and 5 Instagram Stories.", |
|
|
"deadlines": "The deadline for all deliverables is October 30, 2024.", |
|
|
"exclusivity": "Influencer agrees to an exclusivity period of 30 days post-campaign.", |
|
|
"ownership": "The Client retains ownership of all created content.", |
|
|
"summary_points": [ |
|
|
"Total payment is $5,000 USD.", |
|
|
"Deliverables: 2 Reels, 5 Stories.", |
|
|
"A 30-day exclusivity period applies after the campaign." |
|
|
] |
|
|
}} |
|
|
[/EXAMPLE] |
|
|
|
|
|
Now, based on these strict rules, analyze the following text: |
|
|
|
|
|
[CONTRACT TEXT] |
|
|
{contract_text} |
|
|
[/CONTRACT TEXT] |
|
|
|
|
|
[YOUR JSON RESPONSE] |
|
|
""" |
|
|
|
|
|
print(" - π Calling LLM with the new, stricter prompt...") |
|
|
response = _llm_instance( |
|
|
final_prompt, |
|
|
max_tokens=1024, |
|
|
temperature=0.0, |
|
|
echo=False |
|
|
) |
|
|
|
|
|
raw_response_text = response['choices'][0]['text'].strip() |
|
|
|
|
|
print(" - βοΈ Parsing JSON response from LLM...") |
|
|
try: |
|
|
start_index = raw_response_text.find('{') |
|
|
end_index = raw_response_text.rfind('}') + 1 |
|
|
clean_json_text = raw_response_text[start_index:end_index] |
|
|
summary_data = json.loads(clean_json_text) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"π¨ ERROR parsing LLM response: {e}. Raw response was: '{raw_response_text}'") |
|
|
raise HTTPException(status_code=500, detail="Failed to parse analysis from the AI model.") |
|
|
|
|
|
print("--- β
Successfully generated contract summary from LLM.") |
|
|
|
|
|
|
|
|
return summary_data |
|
|
|
|
|
except Exception as e: |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail="An internal server error occurred in the AI.") |
|
|
|
|
|
|
|
|
@app.post("/predict/influencer-performance-score", response_model=InfluencerPerformanceResponse, summary="Predicts a holistic performance score for an influencer") |
|
|
async def predict_influencer_performance_score(stats: InfluencerPerformanceStats): |
|
|
""" |
|
|
Backend se influencer ki stats leta hai aur pre-trained model ka use karke |
|
|
ek performance score (0-100) return karta hai. |
|
|
""" |
|
|
print(f"\nβ
Received request on /predict/influencer-performance-score") |
|
|
|
|
|
|
|
|
if _performance_scorer is None: |
|
|
print(" - β ERROR: The Performance Scorer model (_performance_scorer) is not loaded.") |
|
|
raise HTTPException( |
|
|
status_code=503, |
|
|
detail="The Performance Scorer model is not available. Please ensure 'performance_scorer_v1.joblib' exists and is loaded." |
|
|
) |
|
|
|
|
|
try: |
|
|
|
|
|
|
|
|
input_data = pd.DataFrame([stats.model_dump()]) |
|
|
print(f" - Input data for model: \n{input_data}") |
|
|
|
|
|
|
|
|
predicted_score_raw = _performance_scorer.predict(input_data) |
|
|
|
|
|
|
|
|
|
|
|
predicted_score = max(0, min(100, int(predicted_score_raw[0]))) |
|
|
|
|
|
print(f" - β
Successfully predicted performance score: {predicted_score}") |
|
|
|
|
|
|
|
|
return InfluencerPerformanceResponse(performance_score=predicted_score) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"π¨ An error occurred in the /predict/influencer-performance-score endpoint:") |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
|
|
|
@app.post("/ai/coach/generate-growth-plan", response_model=AIGrowthPlanResponse, summary="Generates personalized growth tips for a single influencer") |
|
|
def generate_growth_plan_route(request: AIGrowthPlanRequest): |
|
|
""" |
|
|
Backend se ek influencer ka live performance data leta hai aur LLM ka use karke |
|
|
personalized improvement tips generate karta hai. |
|
|
""" |
|
|
print(f"\nβ
Received request on /ai/coach/generate-growth-plan for: {request.fullName}") |
|
|
if not _ai_strategist: |
|
|
raise HTTPException(status_code=503, detail="AI Strategist is not available.") |
|
|
|
|
|
try: |
|
|
|
|
|
insights_list = _ai_strategist.generate_influencer_growth_plan(request.model_dump()) |
|
|
|
|
|
return AIGrowthPlanResponse(insights=insights_list) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"π¨ An error occurred in the Growth Plan endpoint: {e}") |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
|
|
|
@app.post("/analyze/brand-asset-colors", response_model=BrandAssetAnalysisResponse, summary="Extracts dominant colors from a logo URL") |
|
|
def analyze_brand_asset_colors(request: BrandAssetAnalysisRequest): |
|
|
""" |
|
|
Takes an image URL (logo/product), downloads it in memory, |
|
|
and uses AI (KMeans Clustering) to extract the main brand colors. |
|
|
""" |
|
|
print(f"\nβ
Received request on /analyze/brand-asset-colors") |
|
|
try: |
|
|
|
|
|
colors = extract_colors_from_url(request.file_url) |
|
|
|
|
|
print(f" - β
Extracted colors: {colors}") |
|
|
return BrandAssetAnalysisResponse(dominant_colors=colors) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"π¨ An error occurred during color extraction:") |
|
|
traceback.print_exc() |
|
|
|
|
|
return BrandAssetAnalysisResponse(dominant_colors=["#000000"]) |
|
|
|
|
|
|
|
|
@app.post("/generate/service-blueprint", response_model=ServiceBlueprintResponse, summary="Generates an AI project plan for a service") |
|
|
async def generate_service_blueprint_route(request: ServiceBlueprintRequest): |
|
|
""" |
|
|
Takes a service type and user requirements, then uses the AI Strategist |
|
|
to generate a structured project plan (blueprint). |
|
|
""" |
|
|
print(f"\nβ
Received request on /generate/service-blueprint for type: {request.service_type}") |
|
|
if not _ai_strategist: |
|
|
raise HTTPException(status_code=503, detail="AI Strategist is not available.") |
|
|
|
|
|
try: |
|
|
|
|
|
blueprint_data = _ai_strategist.generate_service_blueprint( |
|
|
service_type=request.service_type, |
|
|
requirements=request.requirements |
|
|
) |
|
|
|
|
|
|
|
|
if "error" in blueprint_data: |
|
|
raise HTTPException(status_code=500, detail=blueprint_data["error"]) |
|
|
|
|
|
return ServiceBlueprintResponse(**blueprint_data) |
|
|
|
|
|
except HTTPException as http_exc: |
|
|
|
|
|
raise http_exc |
|
|
except Exception as e: |
|
|
print(f"π¨ An unexpected error occurred in the blueprint endpoint:") |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail="An internal server error occurred while generating the blueprint.") |
|
|
|
|
|
|
|
|
@app.post("/generate/growth-plan", response_model=ServiceBlueprintResponse, summary="Generates an AI management plan for an influencer") |
|
|
async def generate_growth_plan_route(request: GrowthPlanRequest): |
|
|
""" |
|
|
Takes influencer goals and uses the AI Strategist to generate a growth plan. |
|
|
""" |
|
|
print(f"\nβ
Naya Endpoint Hit: /generate/growth-plan for handle: {request.platform_handle}") |
|
|
if not _ai_strategist: |
|
|
raise HTTPException(status_code=503, detail="AI Strategist is not available.") |
|
|
|
|
|
try: |
|
|
|
|
|
blueprint_data = _ai_strategist.generate_growth_plan( |
|
|
platform_handle=request.platform_handle, |
|
|
goals=request.goals, |
|
|
challenges=request.challenges |
|
|
) |
|
|
|
|
|
if "error" in blueprint_data: |
|
|
raise HTTPException(status_code=500, detail=blueprint_data["error"]) |
|
|
|
|
|
return ServiceBlueprintResponse(**blueprint_data) |
|
|
|
|
|
except HTTPException as http_exc: |
|
|
raise http_exc |
|
|
except Exception as e: |
|
|
print(f"π¨ Unexpected error in growth plan endpoint: {e}") |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail="An internal server error occurred.") |
|
|
|
|
|
|
|
|
@app.post("/submit_summary_job") |
|
|
def submit_summary_job(request: AISummaryJobRequest, background_tasks: BackgroundTasks): |
|
|
""" |
|
|
Accepts a job, responds INSTANTLY, and runs the AI in the background. |
|
|
""" |
|
|
print(f" - β
Job accepted for check-in ID: {request.checkin_id}. Starting in background...") |
|
|
background_tasks.add_task(process_summary_in_background, request.checkin_id, request.raw_text) |
|
|
return {"message": "Job accepted", "checkin_id": request.checkin_id} |
|
|
|
|
|
|
|
|
@app.post("/generate/weekly-plan", response_model=WeeklyPlanResponse, summary="Generates 3 content tasks for an influencer") |
|
|
def generate_weekly_plan_route(request: WeeklyPlanRequest): |
|
|
""" |
|
|
Takes influencer context (mood, niche, trends) and generates 3 tailored content options. |
|
|
""" |
|
|
print(f"\nβ
Received request on /generate/weekly-plan") |
|
|
if not _ai_strategist: |
|
|
raise HTTPException(status_code=503, detail="AI Strategist is not available.") |
|
|
|
|
|
try: |
|
|
|
|
|
context_dict = request.context.model_dump() |
|
|
|
|
|
|
|
|
plan_data = _ai_strategist.generate_weekly_content_plan(context_dict) |
|
|
|
|
|
return WeeklyPlanResponse(**plan_data) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"π¨ Error in weekly plan endpoint: {e}") |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
|
|
|
@app.post("/chat/creative", response_model=Dict[str, str], summary="Brainstorming chat with AI Creative Director") |
|
|
def creative_chat_endpoint(request: CreativeChatRequest): |
|
|
if not _creative_director: |
|
|
raise HTTPException(status_code=503, detail="The AI Creative Director is not available due to a startup error.") |
|
|
try: |
|
|
history_list = [m.model_dump() for m in request.history] |
|
|
response_text = _creative_director.chat( |
|
|
user_message=request.message, |
|
|
history=history_list, |
|
|
task_context=request.task_context |
|
|
) |
|
|
return {"reply": response_text} |
|
|
except Exception as e: |
|
|
print(f"π¨ Creative Chat Error: {e}") |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail="An error occurred with the AI Director.") |
|
|
|
|
|
|
|
|
@app.post("/generate/final-from-chat", response_model=FinalScriptResponse, summary="Generates final structured script from chat history") |
|
|
def finalize_script_endpoint(request: FinalizeScriptRequest): |
|
|
if not _creative_director: |
|
|
raise HTTPException(status_code=503, detail="The AI Creative Director is not available due to a startup error.") |
|
|
try: |
|
|
history_list = [m.model_dump() for m in request.history] |
|
|
return _creative_director.generate_final_plan( |
|
|
task_context=request.task_context, |
|
|
history=history_list |
|
|
) |
|
|
except Exception as e: |
|
|
print(f"π¨ Finalize Script Error: {e}") |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail="Failed to generate the final plan.") |
|
|
|
|
|
|
|
|
@app.post("/api/v1/generate-campaign-from-prompt") |
|
|
def create_campaign_from_prompt_endpoint(payload: DirectPromptPayload): |
|
|
|
|
|
if not _ai_strategist: |
|
|
raise HTTPException(status_code=503, detail="AI Strategist model unavailable.") |
|
|
|
|
|
|
|
|
current_config = payload.config if payload.config else RequestConfig() |
|
|
|
|
|
try: |
|
|
|
|
|
response_text = _ai_strategist.generate_strategy_from_prompt( |
|
|
user_prompt=payload.prompt, |
|
|
config=current_config |
|
|
) |
|
|
return {"response": response_text} |
|
|
except Exception as e: |
|
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@app.post("/community/moderate-and-tag", response_model=ContentCheckResponse) |
|
|
def moderate_and_tag(request: ContentCheckRequest): |
|
|
""" |
|
|
Called when a user hits 'Post'. Checks toxicity AND generates tags in one go. |
|
|
""" |
|
|
print(f"\nπ§ Checking community post content...") |
|
|
|
|
|
|
|
|
if not _community_brain: |
|
|
|
|
|
return ContentCheckResponse(toxicity_score=0.0, is_safe=True, tags=["#NewPost"]) |
|
|
|
|
|
mod_result = _community_brain.moderate_content(request.text) |
|
|
|
|
|
|
|
|
tags = [] |
|
|
if mod_result['is_safe']: |
|
|
|
|
|
tags = _community_brain.generate_smart_tags(request.text) |
|
|
|
|
|
return ContentCheckResponse( |
|
|
toxicity_score=mod_result['toxicity_score'], |
|
|
is_safe=mod_result['is_safe'], |
|
|
tags=tags |
|
|
) |
|
|
|
|
|
@app.post("/community/summarize-discussion", response_model=ThreadSummaryResponse) |
|
|
def summarize_community_thread(request: ThreadSummaryRequest): |
|
|
if not _community_brain: |
|
|
return ThreadSummaryResponse(summary="Summary unavailable.") |
|
|
|
|
|
summary = _community_brain.summarize_thread(request.comments) |
|
|
return ThreadSummaryResponse(summary=summary) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@app.post("/thunderbird/get_pulse_data", summary="Get All Data for Market Intelligence 'Pulse' Page") |
|
|
def get_pulse_data_endpoint(): |
|
|
""" |
|
|
This is the main orchestrator endpoint for the /pulse page. |
|
|
It calls all necessary Thunderbird engine functions and combines their data. |
|
|
""" |
|
|
print("π API HIT: /thunderbird/get_pulse_data") |
|
|
try: |
|
|
|
|
|
live_trends = get_external_trends() |
|
|
niche_predictions = predict_niche_trends() |
|
|
|
|
|
|
|
|
|
|
|
return { |
|
|
**live_trends, |
|
|
**niche_predictions, |
|
|
} |
|
|
except Exception as e: |
|
|
print(f"β API ERROR in /thunderbird/get_pulse_data: {e}") |
|
|
traceback.print_exc() |
|
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
@app.post("/thunderbird/decode_trend", summary="AI Analysis of a specific trend") |
|
|
async def decode_trend_endpoint(req: TrendAnalysisRequest): |
|
|
""" |
|
|
Asynchronously wakes up the AI and decodes the trend. |
|
|
This prevents server timeouts while the model is thinking. |
|
|
""" |
|
|
try: |
|
|
|
|
|
ai_brain = get_lazy_llm() |
|
|
if not ai_brain: |
|
|
raise HTTPException(status_code=503, detail="AI engine is currently offline or overloaded.") |
|
|
|
|
|
|
|
|
from core.thunderbird_engine import decode_market_trend |
|
|
|
|
|
result = decode_market_trend(req.topic, ai_brain) |
|
|
|
|
|
return result |
|
|
|
|
|
except Exception as e: |
|
|
print(f"β AI Decoding Error in Endpoint: {e}") |
|
|
raise HTTPException(status_code=500, detail="An internal error occurred in the AI.") |