File size: 84,850 Bytes
0914e96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a78ff79
0914e96
 
a78ff79
 
5ac001d
01c71d2
0914e96
 
 
 
 
 
 
 
285b088
0914e96
 
9ad4566
 
aa50189
 
9ad4566
c6cf010
0914e96
a78ff79
0914e96
 
285b088
43c61c9
 
 
 
 
0914e96
 
 
 
 
 
 
 
 
 
43c61c9
0914e96
 
 
 
43c61c9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4f2c694
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
285b088
a78ff79
 
 
285b088
 
 
 
 
 
cd0ab55
 
 
285b088
 
 
cd0ab55
 
 
285b088
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cd0ab55
 
 
4f2c694
5ac001d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f66fbba
 
 
5ac001d
f66fbba
4f2c694
 
 
 
f66fbba
285b088
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c5562b0
285b088
0914e96
 
 
 
a5c0333
5ac001d
 
 
 
a5c0333
4f2d467
a5c0333
 
 
 
4f2d467
a5c0333
 
 
 
 
 
 
 
4f2d467
a5c0333
4f2d467
a5c0333
 
 
 
 
4f2d467
 
 
a5c0333
4f2d467
a5c0333
 
 
 
4f2d467
 
 
a5c0333
5ac001d
4f2d467
5ac001d
a5c0333
 
4f2d467
a5c0333
4f2d467
 
a5c0333
4f2d467
a5c0333
 
 
 
 
 
 
 
 
 
 
 
 
4f2d467
 
a5c0333
 
 
4f2d467
 
 
 
 
 
 
 
 
 
a5c0333
4f2d467
 
 
 
 
 
 
a5c0333
4f2d467
a78ff79
5ac001d
fb11997
 
 
 
 
 
0914e96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d920385
 
0914e96
d920385
0914e96
 
 
 
d920385
 
0914e96
d920385
 
 
 
3b8edd1
d920385
 
 
 
 
 
 
 
 
 
 
 
 
3b8edd1
d920385
 
 
 
 
 
 
 
 
 
 
 
0914e96
d920385
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0914e96
917e351
d920385
0914e96
 
 
 
 
 
d920385
0914e96
 
 
 
 
 
 
6dddef0
0914e96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cd0ab55
0914e96
cd0ab55
 
 
 
 
 
 
 
 
 
0914e96
 
 
 
 
 
 
 
 
 
 
 
4f2d467
 
 
 
 
 
 
 
 
 
 
0914e96
 
 
 
 
 
 
 
 
 
 
6cb46f3
0914e96
9a399eb
 
 
 
 
0914e96
 
 
 
 
 
 
9a399eb
0914e96
 
 
 
 
 
 
 
 
 
9a399eb
0914e96
 
 
 
9a399eb
0914e96
 
9a399eb
0914e96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
285b088
0914e96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cd0ab55
0914e96
cd0ab55
0914e96
cd0ab55
 
 
 
 
 
 
0914e96
 
4f2c694
0914e96
 
 
 
 
 
 
 
 
 
 
 
 
5ac001d
0914e96
 
 
 
 
 
 
 
 
 
 
 
 
5ac001d
0914e96
 
 
 
 
5ac001d
 
0914e96
 
4f2c694
5ac001d
 
 
4f2c694
 
0914e96
4f2c694
0914e96
 
5ac001d
 
0914e96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cd0ab55
0914e96
cd0ab55
0914e96
cd0ab55
 
0914e96
cd0ab55
 
 
0914e96
cd0ab55
 
 
 
 
 
 
0914e96
cd0ab55
 
 
 
0914e96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
02aef91
 
0914e96
02aef91
0914e96
 
 
 
02aef91
0914e96
 
 
 
 
 
 
 
 
 
 
 
 
02aef91
0914e96
 
 
6dddef0
 
 
 
 
02aef91
6dddef0
02aef91
6dddef0
 
 
02aef91
6dddef0
02aef91
 
 
 
 
 
 
6dddef0
0914e96
6dddef0
 
0914e96
 
 
02aef91
0914e96
 
 
 
 
 
 
 
02aef91
0914e96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4f2c694
 
0914e96
4f2c694
0914e96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4f2c694
 
 
0914e96
 
4f2c694
 
 
0914e96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6cb46f3
 
0914e96
6cb46f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0914e96
 
 
 
 
 
 
6cb46f3
 
0914e96
 
 
 
 
 
6cb46f3
0914e96
 
 
 
6cb46f3
0914e96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3eb5edf
c6cf010
0914e96
 
3eb5edf
0914e96
 
 
 
 
 
 
c6cf010
3eb5edf
0914e96
 
 
 
3eb5edf
c6cf010
0914e96
 
3eb5edf
0914e96
 
 
 
 
c6cf010
 
cd0ab55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5ac001d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01c71d2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f507f45
01c71d2
 
 
 
 
 
 
 
 
f66fbba
 
 
e806ed2
 
 
 
 
f66fbba
e806ed2
 
 
 
 
 
f66fbba
e806ed2
 
 
 
 
f66fbba
e806ed2
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
import os
import sys
import joblib
import pandas as pd
import json
import re
import uuid

from huggingface_hub import hf_hub_download

from fastapi import FastAPI, HTTPException, Depends, BackgroundTasks 
from supabase import Client

from pydantic import BaseModel, Field
from pydantic.config import ConfigDict

from typing import List, Optional, Any, Dict  
import traceback
from llama_cpp import Llama
from statsmodels.tsa.api import Holt
from dateutil.relativedelta import relativedelta
from sklearn.preprocessing import LabelEncoder 
from core.support_agent import SupportAgent
from core.strategist import AIStrategist
from core.predictor import rank_influencers_by_match
from core.utils import get_supabase_client
from core.anomaly_detector import find_anomalies
from core.matcher import rank_documents_by_similarity
from core.utils import get_supabase_client, extract_colors_from_url
from core.document_parser import parse_pdf_from_url
from core.creative_chat import CreativeDirector
from core.matcher import load_embedding_model
from core.community_brain import CommunityBrain
from core.thunderbird_engine import get_external_trends, predict_niche_trends

try:
    from core.rag.store import VectorStore
    from core.inference.cache import cached_response
except ImportError:
    VectorStore = None
    def cached_response(func): return func 

# --- Constants ---
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
MODELS_DIR = os.path.join(ROOT_DIR, 'models') 

# βœ… FIX: Swapped to a smaller, memory-friendly model to avoid crashing on free tier
MODEL_REPO = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF"
MODEL_FILENAME = "tinyllama-1.1b-chat-v1.0.Q2_K.gguf" 

MODEL_SAVE_DIRECTORY = os.path.join(os.environ.get("WRITABLE_DIR", "/data"), "llm_model")
LLAMA_MODEL_PATH = os.path.join(MODEL_SAVE_DIRECTORY, MODEL_FILENAME)
EMBEDDING_MODEL_PATH = os.path.join(ROOT_DIR, 'embedding_model')
DB_PATH = os.path.join(os.environ.get("WRITABLE_DIR", "/tmp"), "vector_db_persistent")

# --- Global Instances ---
_llm_instance = None 
_vector_store = None
_ai_strategist = None
_creative_director = None 
_support_agent = None
_budget_predictor = None
_influencer_matcher = None
_performance_predictor = None
_payout_forecaster = None
_earnings_optimizer = None
_earnings_encoder = None
_likes_predictor = None
_comments_predictor = None
_revenue_forecaster = None
_performance_scorer = None
_community_brain = None

def to_snake(name: str) -> str:
    return re.sub(r'(?<!^)(?=[A-Z])', '_', name).lower()

def get_lazy_llm():
    """Wakes up the AI model only when it's needed."""
    global _llm_instance
    if _llm_instance:
        return _llm_instance
    
    print("⏳ Awakening AI Brain (Loading LLM on-demand)...")
    try:
        from llama_cpp import Llama
        if not os.path.exists(LLAMA_MODEL_PATH):
            print("   - Downloading model (first-time only)...")
            hf_hub_download(repo_id=MODEL_REPO, filename=MODEL_FILENAME, local_dir=MODEL_SAVE_DIRECTORY)
        
        _llm_instance = Llama(model_path=LLAMA_MODEL_PATH, n_ctx=1024, n_threads=2, verbose=False)
        print("βœ… AI Brain is Active.")
        return _llm_instance
    except Exception as e:
        print(f"❌ Failed to load AI: {e}")
        return None

# ==============================================================
# 🎯 FIX 1: DEFINE NESTED CLASSES FIRST
# These MUST come before they are used in ForecastResponse.
# ==============================================================

class PerformanceForecast(BaseModel):
    predicted_engagement_rate: float
    predicted_reach: int

class PayoutForecast(BaseModel):
    estimated_earning: float

class RequestConfig(BaseModel):
    model_name: Optional[str] = "phi-2"
    temperature: Optional[float] = 0.7
    system_prompt: Optional[str] = None

class DirectPromptPayload(BaseModel):
    prompt: str
    config: Optional[RequestConfig] = None 

# --- Other Pydantic Models ---

class CreativeChatRequest(BaseModel): message: str; history: list; task_context: str
class FinalizeScriptRequest(BaseModel): history: list; task_context: str
class FinalScriptResponse(BaseModel): hook: str; script: str; visuals: List[str]; tools: List[str]
class ChatQuery(BaseModel): question: str = Field(..., min_length=1); role: str; live_data: str; conversationId: str
class ChatAnswer(BaseModel): response: str; context: Optional[str] = None
class ChatResponseRequest(BaseModel): prompt: str = Field(..., description="The user's direct question."); context: str = Field(..., description="The real-time data context from the backend.")
class ChatResponsePayload(BaseModel): response: str
class CaptionRequest(BaseModel): caption: str; action: str
class CaptionResponse(BaseModel): new_caption: str
class BudgetRequest(BaseModel):
    campaign_goal: str; influencer_count: int; platform: str; location: str; category: str; final_reach: int
    config: Optional[Dict[str, str]] = None
class BudgetResponse(BaseModel): predicted_budget_usd: float
class MatcherRequest(BaseModel): campaign_description: str; target_audience_age: str; target_audience_gender: str; engagement_rate: float; followers: int; country: str; niche: str
class MatcherResponse(BaseModel): suggested_influencer_ids: List[int]
class PerformanceRequest(BaseModel):
    budget_usd: float; influencer_count: int; platform: str; location: str; category: str; budget: float
    config: Optional[Dict[str, str]] = None
class PerformanceResponse(BaseModel): predicted_engagement_rate: float; predicted_reach: int
class StrategyRequest(BaseModel): prompt: str
class StrategyResponse(BaseModel): response: str
class OutlineRequest(BaseModel): title: str
class OutlineResponse(BaseModel): outline: str    
class TaskPrioritizationRequest(BaseModel): title: str; description: Optional[str] = None
class TaskPrioritizationResponse(BaseModel): priority: str    
class DashboardInsightsRequest(BaseModel): total_revenue_monthly: float; new_users_weekly: int; active_campaigns: int; pending_approvals: int
class TimeSeriesDataPoint(BaseModel): date: str; value: float
class TimeSeriesForecastRequest(BaseModel): data: List[TimeSeriesDataPoint]; periods_to_predict: int; business_context: Optional[str] = "No specific context provided."
class SmartForecastDataPoint(BaseModel): date: str; predicted_value: float; trend: str; commentary: Optional[str] = None
class TimeSeriesForecastResponse(BaseModel): forecast: List[SmartForecastDataPoint]  
class HealthKpiRequest(BaseModel): platformRevenue: float; activeCampaigns: int; totalBrands: int
class HealthSummaryResponse(BaseModel): summary: str     
class InfluencerData(BaseModel): id: str; name: Optional[str] = None; handle: Optional[str] = None; followers: Optional[int] = 0; category: Optional[str] = None; bio: Optional[str] = None
class TeamStrategyRequest(BaseModel): brand_name: str; campaign_goal: str; target_audience: str; budget_range: str; influencers: List[InfluencerData]
class CreativeBrief(BaseModel): title: str; description: str; goal_kpi: str; content_guidelines: List[str]
class TeamStrategyResponse(BaseModel): success: bool; strategy: Optional[CreativeBrief] = None; suggested_influencers: Optional[List[InfluencerData]] = None; error: Optional[str] = None    
class AnalyticsInsightsRequest(BaseModel): totalReach: Optional[int] = 0; totalLikes: Optional[int] = 0; averageEngagementRate: Optional[float] = 0.0; topPerformingInfluencer: Optional[str] = "N/A"
class AnalyticsInsightsResponse(BaseModel): insights: str
class CampaignDetailsForMatch(BaseModel): description: Optional[str] = ""; goal_kpi: Optional[str] = ""; category: Optional[str] = ""
class InfluencerRankRequest(BaseModel): campaign_details: CampaignDetailsForMatch; influencers: List[InfluencerData]
class InfluencerRankResponse(BaseModel): ranked_influencers: List[InfluencerData]    
class WeeklySummaryRequest(BaseModel): start_date: str; end_date: str; total_ad_spend: float; total_clicks: int; new_followers: int; top_performing_campaign: str
class WeeklySummaryResponse(BaseModel): summary: str    
class PayoutForecastInput(BaseModel): total_budget_active_campaigns: float = Field(..., description="The sum of budgets for all of a manager's currently active campaigns.")
class PayoutForecastOutput(BaseModel): forecastedAmount: float; commentary: str
class CampaignForRanking(BaseModel): id: int; description: Optional[str] = ""
class InfluencerForRanking(BaseModel): id: str; category: Optional[str] = "Fashion"; bio: Optional[str] = ""
class RankCampaignsRequest(BaseModel): influencer: InfluencerForRanking; campaigns: List[CampaignForRanking]
class RankedCampaignResult(BaseModel): campaign_id: int; score: float
class RankCampaignsResponse(BaseModel): ranked_campaigns: List[RankedCampaignResult] 
class CaptionAssistRequest(BaseModel): caption: str; action: str = Field(..., description="Action to perform: 'improve', 'hashtags', or 'check_guidelines'"); guidelines: Optional[str] = None
class CaptionAssistResponse(BaseModel): new_text: str
class ForecastRequest(BaseModel):
    budget: float; category: str; follower_count: int; engagement_rate: float
    config: Optional[Dict[str, str]] = None

    
# --- COMMUNITY LAYER MODELS ---
class ContentCheckRequest(BaseModel):
    text: str
    user_id: Optional[str] = None

class TagGenerationRequest(BaseModel):
    content: str
    niche: Optional[str] = "General"

class ContentCheckResponse(BaseModel):
    toxicity_score: float
    is_safe: bool
    tags: List[str]

class ThreadSummaryRequest(BaseModel):
    comments: List[str]
class ThreadSummaryResponse(BaseModel):
    summary: str

          
class TrendAnalysisRequest(BaseModel):
    topic: str

  
class ForecastResponse(BaseModel):
    performance: PerformanceForecast
    payout: PayoutForecast


class InfluencerKpiData(BaseModel): totalReach: int; totalLikes: int; totalComments: int; avgEngagementRate: float; totalSubmissions: int
class InfluencerAnalyticsSummaryResponse(BaseModel): summary: str
class PortfolioOption(BaseModel): id: str; contentUrl: str; caption: Optional[str] = ""; likes: Optional[int] = 0; campaign: dict
class CuratePortfolioRequest(BaseModel): submissions: List[PortfolioOption]
class CuratePortfolioResponse(BaseModel): featured_submission_ids: List[str]
class EarningOpportunityRequest(BaseModel): follower_count: int = Field(..., description="Influencer ke current followers")
class Opportunity(BaseModel): campaign_niche: str; content_format: str; estimated_score: float; commentary: str
class EarningOpportunityResponse(BaseModel): opportunities: List[Opportunity]
class PostPerformanceRequest(BaseModel): follower_count: int; caption_length: int; campaign_niche: str; content_format: str
class PostPerformanceResponse(BaseModel): predicted_likes: int; predicted_comments: int; feedback: str
class AnomalyInsight(BaseModel): influencer_id: str; influencer_name: str; insights: List[str]
class RevenueForecastDatapoint(BaseModel): month: str; predicted_revenue: float; trend: str
class RevenueForecastResponse(BaseModel): forecast: List[RevenueForecastDatapoint]; ai_commentary: str   
class MatchDocument(BaseModel): id: str; text: str; match_score: Optional[int] = None
class RankBySimilarityRequest(BaseModel): query: str; documents: List[MatchDocument]
class RankBySimilarityResponse(BaseModel): ranked_documents: List[MatchDocument]
class ContentQualityRequest(BaseModel): caption: str = Field(..., description="The caption text to be analyzed.")
class ContentQualityScore(BaseModel): readability: int; engagement: int; call_to_action: int; hashtag_strategy: int
class ContentQualityResponse(BaseModel): overall_score: float; scores: ContentQualityScore; feedback: str    
class DailyBriefingData(BaseModel): roster_size: int; on_bench_influencers: int; pending_submissions: int; revisions_requested: int; lowest_ai_score: Optional[int] = None; highest_pending_payout: float
class DailyBriefingResponse(BaseModel): briefing_text: str
class ContractURL(BaseModel): pdf_url: str
class ContractSummary(BaseModel): payment_details: str; deliverables: str; deadlines: str; exclusivity: str; ownership: str; summary_points: List[str] 
class InfluencerPerformanceStats(BaseModel): avg_engagement_rate: float; on_time_submission_rate: float; avg_brand_rating: float; monthly_earnings: float
class InfluencerPerformanceResponse(BaseModel): performance_score: int
class AIGrowthPlanRequest(BaseModel): fullName: str; category: Optional[str] = None; avgEngagementRate: float; monthlyEarnings: float; onTimeSubmissionRate: float; bestPostCaption: Optional[str] = None; worstPostCaption: Optional[str] = None
class AIGrowthPlanResponse(BaseModel): insights: List[str]
class BrandAssetAnalysisRequest(BaseModel): file_url: str = Field(..., description="URL of the logo or brand image"); asset_type: str = "logo"
class BrandAssetAnalysisResponse(BaseModel): dominant_colors: List[str]
class ServiceBlueprintRequest(BaseModel): service_type: str = Field(..., description="e.g., 'web-dev' or 'growth'"); requirements: str = Field(..., min_length=10)
class ServiceBlueprintResponse(BaseModel): title: str; deliverables: List[str]; stack: str; price_est: str; timeline: str
class GrowthPlanRequest(BaseModel): platform_handle: str; goals: str; challenges: str
class AISummaryJobRequest(BaseModel): checkin_id: int; raw_text: str
class WeeklyCheckinSummaryResponse(BaseModel): wins: List[str]; challenges: List[str]; opportunities: List[str]; sentiment: str
class WeeklyPlanContext(BaseModel): niche: str; current_mood: str; recent_achievements: List[str]; active_trends: List[Dict[str, str]]
class WeeklyPlanRequest(BaseModel): context: WeeklyPlanContext
class PlanOption(BaseModel): type: str; title: str; platform: str; contentType: str; instructions: str; reasoning: str
class WeeklyPlanResponse(BaseModel): options: List[PlanOption]

# --- FastAPI App ---
app = FastAPI(title="Reachify AI Service (Deploy-Ready)", version="11.0.0")

@app.on_event("startup")
def startup_event():
    # Make sure we can modify the global variables
    global _llm_instance, _creative_director, _support_agent, _ai_strategist, _community_brain, \
           _vector_store, _budget_predictor, _influencer_matcher, _performance_predictor, \
           _payout_forecaster, _earnings_optimizer, _earnings_encoder, _likes_predictor, \
           _comments_predictor, _revenue_forecaster, _performance_scorer
    
    # 1. DOWNLOAD AND LOAD LLM
    print("--- πŸš€ AI Service Starting Up... ---")
    try:
        os.makedirs(MODEL_SAVE_DIRECTORY, exist_ok=True)
        if not os.path.exists(LLAMA_MODEL_PATH):
            print(f"   - Downloading '{MODEL_FILENAME}' from '{MODEL_REPO}'...")
            hf_hub_download(
                repo_id=MODEL_REPO,
                filename=MODEL_FILENAME,
                local_dir=MODEL_SAVE_DIRECTORY,
                local_dir_use_symlinks=False
            )
            print("   - βœ… Model downloaded successfully.")
        else:
            print(f"   - LLM model found locally.")
        
        # Load LLM
        print("   - Loading Llama LLM into memory...")
        _llm_instance = Llama(model_path=LLAMA_MODEL_PATH, n_gpu_layers=0, n_ctx=2048, verbose=False)
        print("   - βœ… LLM Loaded successfully.")
    
    except Exception as e:
        print(f"   - ❌ FATAL ERROR: LLM failed to load. Features disabled. Error: {e}")
        # traceback.print_exc()
        _llm_instance = None 

    # 2. INITIALIZE AGENTS
    if _llm_instance:
        try:
            print("   - Initializing AI components that depend on LLM...")
            _creative_director = CreativeDirector(llm_instance=_llm_instance)
            
            if VectorStore: _vector_store = VectorStore()
            
            _ai_strategist = AIStrategist(llm_instance=_llm_instance, store=_vector_store)
            
            from core.community_brain import CommunityBrain 
            _community_brain = CommunityBrain(llm_instance=_llm_instance)
            _support_agent = SupportAgent(llm_instance=_llm_instance, embedding_path=EMBEDDING_MODEL_PATH, db_path=DB_PATH)
            
            print("   - βœ… Core AI components are online.")
        except Exception as e:
            print(f"   - ❌ FAILED to initialize AI Agents: {e}")
            # traceback.print_exc()

    # 3. LOAD ML MODELS (The Critical Fix: Safe Loading)
    print("   - Loading ML models from joblib files...")
    model_paths = {
        'budget': ('_budget_predictor', 'budget_predictor_v1.joblib'),
        'matcher': ('_influencer_matcher', 'influencer_matcher_v1.joblib'),
        'performance': ('_performance_predictor', 'performance_predictor_v1.joblib'),
        'payout': ('_payout_forecaster', 'payout_forecaster_v1.joblib'),
        'earnings': ('_earnings_optimizer', 'earnings_model.joblib'),
        'earnings_encoder': ('_earnings_encoder', 'earnings_encoder.joblib'),
        'likes_predictor': ('_likes_predictor', 'likes_predictor_v1.joblib'),
        'comments_predictor': ('_comments_predictor', 'comments_predictor_v1.joblib'),
        'revenue_forecaster': ('_revenue_forecaster', 'revenue_forecaster_v1.joblib'),
        'performance_scorer': ('_performance_scorer', 'performance_scorer_v1.joblib'),
    }
    
    # Loop through each model safely
    for name, (var, file) in model_paths.items():
        path = os.path.join(MODELS_DIR, file)
        try:
            if os.path.exists(path):
                # Try to load joblib file
                loaded = joblib.load(path)
                globals()[var] = loaded
                print(f"     - βœ… Loaded {name} model.")
            else:
                globals()[var] = None
                print(f"     - ⚠️ Model '{name}' file not found.")
        except Exception as e:
            # THIS IS THE FIX: Instead of crashing, just set to None and print error
            globals()[var] = None
            print(f"     - ❌ SKIPPING {name}: Failed to load ({str(e)})")

    # Load Embeddings
    try:
        load_embedding_model(EMBEDDING_MODEL_PATH)
    except Exception as e:
        print(f"   - ⚠️ Failed to load Embedding model: {e}")

    print("\n--- βœ… AI Service Startup Complete! ---")
    
    
@app.get("/")
def health_check():
    if _llm_instance:
        return {"status": "AI Service is Running"}
    else:
        return {"status": "AI Service is in a degraded state: Core LLM failed to load."}

def _cleanup_llm_response(data: dict) -> dict:
    """A robust helper to clean common messy JSON outputs from smaller LLMs."""
    cleaned = { "wins": [], "challenges": [], "opportunities": [], "sentiment": "Mixed" } # Default to Mixed
    
    # Clean list-based fields
    for key in ["wins", "challenges", "opportunities"]:
        if key in data and isinstance(data[key], list):
            for item in data[key]:
                if isinstance(item, str) and item: # Check if string is not empty
                    cleaned[key].append(item.strip())
                elif isinstance(item, dict) and 'text' in item and isinstance(item['text'], str) and item['text']:
                    cleaned[key].append(item['text'].strip())
    
    # Clean sentiment field
    sentiment_data = data.get("sentiment")
    if isinstance(sentiment_data, str) and sentiment_data:
        # Sometimes model sends "Positive." with a period, strip it.
        cleaned["sentiment"] = sentiment_data.strip().replace('.', '')
    elif isinstance(sentiment_data, dict):
        if sentiment_data.get('positive'): cleaned["sentiment"] = "Positive"
        elif sentiment_data.get('negative'): cleaned["sentiment"] = "Negative"
        else: cleaned["sentiment"] = "Mixed"

    return cleaned

def process_summary_in_background(checkin_id: int, raw_text: str):
    """
    [FINAL, RELIABLE VERSION] This function no longer uses the LLM for sorting.
    It performs keyword matching directly in Python for 100% accuracy.
    """
    print(f"   - βš™οΈ BACKGROUND JOB STARTED for check-in ID: {checkin_id} (Reliable Python Sorter)")
    
    supabase = get_supabase_client()
    
    try:
        # --- FINAL SOLUTION LOGIC: DO THE SORTING IN PYTHON ---
        # AI ka kaam ab khatam. Python khud keywords dhoondhega.
        
        # Step 1: Define our keywords
        win_keywords = ["awesome", "happy", "insane engagement", "finished", "managed to", "productive", "went really well", "pleased with", "love making"]
        challenge_keywords = ["rough week", "disaster", "struggled", "blocked", "nervous", "issue", "frustrating", "lagging", "disconnecting"]
        opportunity_keywords = ["thinking of", "next week", "maybe I should", "idea", "look into", "research"]

        # Step 2: Initialize our results
        wins = []
        challenges = []
        opportunities = []
        
        # Step 3: Break the raw text into sentences
        # This regex handles periods, question marks, and exclamation marks
        sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?|!)\s', raw_text)

        # Step 4: Loop through each sentence and categorize it
        for sentence in sentences:
            s_lower = sentence.lower()
            categorized = False
            
            # Check for challenge keywords first (they are most important)
            if any(keyword in s_lower for keyword in challenge_keywords):
                challenges.append(sentence.strip())
                categorized = True
            # Then check for opportunity keywords
            elif any(keyword in s_lower for keyword in opportunity_keywords):
                opportunities.append(sentence.strip())
                categorized = True
            # Finally, check for win keywords
            elif any(keyword in s_lower for keyword in win_keywords):
                wins.append(sentence.strip())
                categorized = True
        
        # If any category is empty, we can add a placeholder
        if not wins: wins.append("No specific wins were mentioned.")
        if not challenges: challenges.append("No specific challenges were mentioned.")
        if not opportunities: opportunities.append("No new opportunities were mentioned.")
            
        # Step 5: Determine sentiment based on the counts
        sentiment = "Mixed"
        if len(challenges) > len(wins) + 1: # Significantly more challenges
            sentiment = "Negative"
        elif len(wins) > len(challenges) + 1: # Significantly more wins
            sentiment = "Positive"

        # Step 6: Create the final JSON object
        cleaned_summary = {
            "wins": wins,
            "challenges": challenges,
            "opportunities": opportunities,
            "sentiment": sentiment
        }

        # SUCCESS
        print(f"   - βœ… JOB ({checkin_id}): PYTHON SORTER COMPLETED. Updating database with: {cleaned_summary}")
        supabase.table("influencer_weekly_checkins").update({
            "structured_summary": cleaned_summary,
            "status": "completed"
        }).eq("id", checkin_id).execute()

    except Exception as e:
        error_message = f"Python Sorter failed: {str(e)}"
        print(f"   - ❌ JOB FAILED for check-in ID: {checkin_id}. Error: {error_message}")
        traceback.print_exc()
        supabase.table("influencer_weekly_checkins").update({
            "status": "failed",
            "error_message": error_message
        }).eq("id", checkin_id).execute()


@app.post("/generate-chat-response", response_model=ChatResponsePayload, summary="Interactive AI Strategist Chat")
async def generate_chat_response_route(request: ChatResponseRequest):
    print(f"\nβœ… Received request on /generate-chat-response")
    if not _ai_strategist:
        raise HTTPException(status_code=503, detail="The AI Strategist is not available.")
    try:
        response_text = _ai_strategist.generate_chat_response(prompt=request.prompt, context=request.context)
        return ChatResponsePayload(response=response_text)
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.post("/api/v1/chat", response_model=ChatAnswer, summary="Role-Aware AI Support Agent")
async def ask_support_agent(query: ChatQuery):
    if not _support_agent: raise HTTPException(status_code=503, detail="AI Support Agent is not available.")
    return _support_agent.answer(payload=query.model_dump(), conversation_id=query.conversationId)

@app.post("/api/v1/generate/caption", response_model=CaptionResponse, summary="Generate variations of a caption")
async def generate_caption_route(request: CaptionRequest):
    if not _support_agent: raise HTTPException(status_code=503, detail="AI Support Agent is not available.")
    new_caption_text = _support_agent.generate_caption_variant(caption=request.caption, action=request.action)
    return CaptionResponse(new_caption=new_caption_text)

@app.post("/generate-strategy", response_model=StrategyResponse, summary="Generate a Digital Marketing Strategy")
async def generate_strategy_route(request: StrategyRequest):
    if not _support_agent:
        raise HTTPException(status_code=503, detail="AI Support Agent is not available.")
    try:
        strategy_text = _support_agent.generate_marketing_strategy(prompt=request.prompt)
        return StrategyResponse(response=strategy_text)
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"An internal error occurred in the AI model: {e}")

@app.post("/api/v1/predict/budget", response_model=BudgetResponse)
async def predict_budget(request: BudgetRequest):
    if not _budget_predictor: raise HTTPException(status_code=503, detail="Predictor Unavailable")
    
    input_data = pd.DataFrame([request.model_dump(exclude={'config'})])
    prediction = float(_budget_predictor.predict(input_data)[0])
    
    # βš™οΈ CONTROL: Admin Multiplier Check
    if request.config:
        multiplier = float(request.config.get("budget_multiplier", 1.0))
        prediction = prediction * multiplier
        
    return BudgetResponse(predicted_budget_usd=round(prediction, 2))

@app.post("/api/v1/match/influencers", response_model=MatcherResponse, summary="Match Influencers to Campaign")
async def match_influencers(request: MatcherRequest):
    if not _influencer_matcher: raise HTTPException(status_code=503, detail="Influencer matcher is not available.")
    input_data = pd.DataFrame([request.model_dump()])
    prediction = _influencer_matcher.predict(input_data)
    integer_ids = [int(pid) for pid in prediction]
    return MatcherResponse(suggested_influencer_ids=integer_ids)

@app.post("/api/v1/predict/performance", response_model=PerformanceResponse, summary="Predict Campaign Performance")
async def predict_performance(request: PerformanceRequest):
    # Safety Check: Return default if model failed to load
    if not _performance_predictor: 
        return PerformanceResponse(predicted_engagement_rate=0.03, predicted_reach=50000)
    
    try:
        input_data = pd.DataFrame([request.model_dump()])
        prediction_value = _performance_predictor.predict(input_data)[0]
        return PerformanceResponse(predicted_engagement_rate=0.035, predicted_reach=int(prediction_value))
    except:
        # Fallback in case of runtime error
        return PerformanceResponse(predicted_engagement_rate=0.03, predicted_reach=50000)

@app.post("/generate-outline", response_model=OutlineResponse, summary="Generate a Blog Post Outline")
async def generate_outline_route(request: OutlineRequest):
    if not _support_agent:
        raise HTTPException(status_code=503, detail="AI Support Agent is not available.")
    try:
        outline_text = _support_agent.generate_content_outline(title=request.title)
        return OutlineResponse(outline=outline_text)
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"An internal error occurred in the AI model: {e}")
    

@app.post("/generate-dashboard-insights", response_model=StrategyResponse, summary="Generate Insights from Dashboard KPIs")
@cached_response
def generate_dashboard_insights_route(request: DashboardInsightsRequest):
    """
    This is the corrected SYNCHRONOUS version of the endpoint.
    """
    print(f"\nβœ… Received request on /generate-dashboard-insights with data: {request.model_dump()}")
    if not _llm_instance:
        raise HTTPException(status_code=503, detail="The Llama model is not available.")
    
    kpis = request.model_dump()
    prompt = f"""
[SYSTEM]
You are a senior data analyst at Reachify. Your task is to write a short, insightful summary for the agency's admin based on this week's key performance indicators. Please identify the most important trends, be proactive, and suggest a potential action. The summary should be in the form of 2-3 human-readable bullet points.

[THIS WEEK'S KPI DATA]
- Revenue This Month (so far): ${kpis.get('total_revenue_monthly', 0):.2f}
- New Users This Week: {kpis.get('new_users_weekly', 0)}
- Currently Active Campaigns: {kpis.get('active_campaigns', 0)}
- Items Awaiting Approval: {kpis.get('pending_approvals', 0)}

[YOUR INSIGHTFUL BULLET POINTS]
- """
    try:
        print("--- Sending composed prompt to LLM...")
        response = _llm_instance(prompt, max_tokens=250, temperature=0.7, stop=["[SYSTEM]", "Human:", "\n\n"], echo=False)
        insight_text = response['choices'][0]['text'].strip()
        if not insight_text.startswith('-'):
            insight_text = '- ' + insight_text
        print("--- Successfully received response from LLM.")
        return StrategyResponse(response=insight_text)
    except Exception as e:
        print(f"🚨 AN ERROR OCCURRED in /generate-dashboard-insights:")
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))
    

@app.get("/", summary="Health Check")
def read_root():
    return {"status": "Unified AI Service is running"}

@app.post("/predict/time-series", response_model=TimeSeriesForecastResponse, summary="Forecast Time Series with Trend Analysis")
def predict_time_series(request: TimeSeriesForecastRequest):
    print(f"\nβœ… Received smart forecast request with context: '{request.business_context}'")
    
    if len(request.data) < 5:
        raise HTTPException(status_code=400, detail="Not enough data. At least 5 data points required.")

    try:
        df = pd.DataFrame([item.model_dump() for item in request.data])
        df['date'] = pd.to_datetime(df['date'])
        df = df.set_index('date').asfreq('MS', method='ffill')

        model = Holt(df['value'], initialization_method="estimated").fit(optimized=True)
        forecast_result = model.forecast(steps=request.periods_to_predict)

        smart_forecast_output = []
        last_historical_value = df['value'].iloc[-1]
        
        for date, predicted_val in forecast_result.items():
            trend_label = "Stable"
            commentary = None
            percentage_change = ((predicted_val - last_historical_value) / last_historical_value) * 100

            if percentage_change > 10:
                trend_label = "Strong Growth"
                if "by " in request.business_context:
                    reason = request.business_context.split('by ')[-1]
                    commentary = f"Strong growth expected, likely driven by {reason}"
                else:
                    commentary = "Strong growth expected due to positive trends."
            elif percentage_change > 2:
                trend_label = "Modest Growth"
            elif percentage_change < -5:
                trend_label = "Potential Downturn"
                commentary = "Warning: A potential downturn is detected. This may not account for upcoming campaigns. Review your strategy."

            smart_forecast_output.append(
                SmartForecastDataPoint(
                    date=date.strftime('%Y-%m-%d'),
                    predicted_value=round(predicted_val, 2),
                    trend=trend_label,
                    commentary=commentary
                )
            )
            last_historical_value = predicted_val

        return TimeSeriesForecastResponse(forecast=smart_forecast_output)

    except Exception as e:
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))
    
@app.post("/generate-health-summary", response_model=HealthSummaryResponse, summary="Generates an actionable summary from KPIs")
def generate_health_summary(request: HealthKpiRequest):
    print(f"\nβœ… Received request to generate health summary.")
    if not _llm_instance:
        raise HTTPException(status_code=503, detail="LLM not available for summary.")
    
    kpis = request.model_dump()
    
    prompt = f"""
[SYSTEM]
You are a business analyst. Analyze these KPIs: Platform Revenue (β‚Ή{kpis.get('platformRevenue', 0):,.0f}), Active Campaigns ({kpis.get('activeCampaigns', 0)}). Provide one [PROGRESS] point and one [AREA TO WATCH] with a next action. Under 50 words.
[YOUR ANALYSIS]
"""

    try:
        
        response = _llm_instance(prompt, max_tokens=150, temperature=0.6, stop=["[SYSTEM]"], echo=False)
        summary_text = response['choices'][0]['text'].strip()
        print(f"   - βœ… Generated summary: {summary_text}")
        return HealthSummaryResponse(summary=summary_text)

    except OSError as e:
        print(f"🚨 CRITICAL LLM CRASH CAUGHT (OSError): {e}. Returning a fallback message.")
        traceback.print_exc()
        return HealthSummaryResponse(summary="[AREA TO WATCH]: The AI analyst model is currently unstable and is being reviewed. Manual analysis is recommended.")
    except Exception as e:
        print(f"🚨 An unexpected error occurred during summary generation: {e}")
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))
    

@app.post("/generate_team_strategy", response_model=TeamStrategyResponse, summary="Generates a full campaign strategy for the internal team")
def generate_team_strategy(request: TeamStrategyRequest):
    """
    This endpoint orchestrates the AI/ML logic for the Team Strategist tool.
    It takes campaign details and a list of influencers from the backend.
    """
    print(f"\nβœ… Received request on /generate_team_strategy for brand: {request.brand_name}")
    
    if not _ai_strategist:
        raise HTTPException(status_code=503, detail="AI Strategist model is not available or failed to load.")

    try:
        # Step 1: Generate the creative brief using the LLM
        creative_brief_dict = _ai_strategist.generate_campaign_brief(
            brand_name=request.brand_name,
            campaign_goal=request.campaign_goal,
            target_audience=request.target_audience,
            budget_range=request.budget_range
        )
        if "error" in creative_brief_dict:
            raise Exception(f"LLM Error during brief generation: {creative_brief_dict['error']}")
        
        # Step 2: Rank the provided influencers using the ML model
        influencer_list_of_dicts = [inf.model_dump() for inf in request.influencers]
        suggested_influencers_list = rank_influencers_by_match(
            influencers=influencer_list_of_dicts,
            campaign_details=request.model_dump(exclude={"influencers"}),
            top_n=3
        )
        
        print("βœ… Successfully generated brief and ranked influencers.")
        return TeamStrategyResponse(
            success=True,
            strategy=CreativeBrief(**creative_brief_dict),
            suggested_influencers=[InfluencerData(**inf) for inf in suggested_influencers_list]
        )

    except Exception as e:
        print(f"🚨 An error occurred in /generate_team_strategy endpoint:")
        traceback.print_exc()
        return TeamStrategyResponse(success=False, error=str(e))


@app.post("/strategist/generate-analytics-insights", response_model=AnalyticsInsightsResponse, summary="Generates Actionable Insights from Campaign Analytics")
async def generate_analytics_insights_route(request: AnalyticsInsightsRequest):
    """
    Receives campaign analytics data and uses the AI Strategist to generate key insights.
    """
    print(f"\nβœ… Received request on /strategist/generate-analytics-insights")
    if not _ai_strategist:
        raise HTTPException(status_code=503, detail="The AI Strategist is not available.")
    
    try:
        # Pydantic model se data ko dictionary mein convert karein
        analytics_data = request.model_dump()
        
        # Naye function ko call karein
        insights_text = _ai_strategist.generate_analytics_insights(analytics_data=analytics_data)
        
        return AnalyticsInsightsResponse(insights=insights_text)

    except Exception as e:
        print(f"🚨 An error occurred in /strategist/generate-analytics-insights endpoint:")
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))

@app.post("/predictor/rank-influencers", response_model=InfluencerRankResponse, summary="Ranks a given list of influencers for a specific campaign")
async def rank_influencers_route(request: InfluencerRankRequest):
    """
    Backend se campaign details aur sabhi influencers ki list leta hai,
    aur ML model ka istemal karke top 3 ranked influencers wapas bhejta hai.
    """
    print(f"\nβœ… Received request on /predictor/rank-influencers for campaign: '{request.campaign_details.description[:30]}...'")
    
    try:
        influencers_list = [inf.model_dump() for inf in request.influencers]
        campaign_details_dict = request.campaign_details.model_dump()

        ranked_list = rank_influencers_by_match(
            influencers=influencers_list,
            campaign_details=campaign_details_dict,
            top_n=5
        )
        
        print(f"   - βœ… Successfully ranked {len(ranked_list)} influencers.")
        return InfluencerRankResponse(ranked_influencers=ranked_list)

    except Exception as e:
        print(f"🚨 An error occurred in /predictor/rank-influencers endpoint:")
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))       

@app.post("/strategist/generate-weekly-summary", response_model=WeeklySummaryResponse, summary="Generates a Weekly Summary from Metrics")
def generate_weekly_summary_route(request: WeeklySummaryRequest):
    print(f"\nβœ… Received request on the NEW /strategist/generate-weekly-summary endpoint.")
    if not _ai_strategist:
        raise HTTPException(status_code=503, detail="AI Strategist is not initialized.")
    try:
        summary_text = _ai_strategist.generate_weekly_summary(metrics=request.model_dump())
        if not summary_text or "error" in summary_text.lower():
            raise Exception("AI model failed to generate a valid summary.")
        return WeeklySummaryResponse(summary=summary_text)
    except Exception as e:
        print(f"🚨 An error occurred in /strategist/generate-weekly-summary: {e}")
        raise HTTPException(status_code=500, detail=str(e))     
    
@app.post("/predict/payout_forecast", response_model=PayoutForecastOutput)
def predict_payout(data: PayoutForecastInput):
    if not _payout_forecaster: raise HTTPException(status_code=503, detail="Model Unavailable")

    pred = float(_payout_forecaster.predict(pd.DataFrame([{'budget': data.total_budget_active_campaigns}]))[0])
    
    # βš™οΈ CONTROL
    if data.config:
        pred = pred * float(data.config.get("budget_multiplier", 1.0))

    return {"forecastedAmount": max(0, pred), "commentary": "Based on budget trends."}


@app.post("/analyze/content-quality", response_model=ContentQualityResponse, summary="Analyzes a caption for a quality score")
def analyze_content_quality(request: ContentQualityRequest):
    """
    Uses the loaded LLM to analyze a social media caption based on several criteria
    and returns a quantitative score and qualitative feedback.
    """
    print(f"\nβœ… Received request on /analyze/content_quality")
    if not _llm_instance:
        raise HTTPException(status_code=503, detail="The Llama model is not available.")

    caption = request.caption
    
    prompt = f"""
[SYSTEM]
You are a social media expert. Analyze the following caption... Respond ONLY with a valid JSON object.

[CAPTION TO ANALYZE]
"{caption}"

[YOUR JSON RESPONSE]
"""
    try:
        print("--- Sending caption to LLM for quality analysis...")
        response = _llm_instance(prompt, max_tokens=512, temperature=0.2, stop=["[SYSTEM]", "\n\n"], echo=False)
        
        json_text = response['choices'][0]['text'].strip()
        start_index = json_text.find('{')
        end_index = json_text.rfind('}') + 1
        
        if start_index == -1 or end_index == 0:
            raise ValueError("LLM did not return a valid JSON object.")
        
        clean_json_text = json_text[start_index:end_index]
        import json
        
        # βœ… Corrected Variable Name
        analysis_result = json.loads(clean_json_text)
        
        final_result = {
            "overall_score": analysis_result.get("overall_score"),  # FIXED: Removed _raw
            "feedback": analysis_result.get("feedback"),            # FIXED: Removed _raw
            "scores": analysis_result.get("scores") or analysis_result.get("score") # FIXED: Removed _raw
        }

        print("--- Successfully received and parsed JSON response from LLM.")
        return ContentQualityResponse(**final_result)

    except Exception as e:
        print(f"🚨 Error in Content Quality Analysis: {e}")
        raise HTTPException(status_code=500, detail="Failed to parse analysis.")

@app.post("/rank/campaigns-for-influencer", response_model=RankCampaignsResponse, summary="Ranks a list of campaigns for one influencer")
async def rank_campaigns_for_influencer_route(request: RankCampaignsRequest):
    """
    Takes an influencer's profile and a list of campaigns, uses the ML model
    to predict a 'match score' for each, and returns the list ranked by that score.
    """
    print(f"\nβœ… Received request on /rank/campaigns-for-influencer for influencer: {request.influencer.id}")
    
    # 1. Security Check: Model loaded hai ya nahi?
    if not _influencer_matcher:
        raise HTTPException(status_code=503, detail="Influencer Matcher model is not available.")
    if not request.campaigns:
        return RankCampaignsResponse(ranked_campaigns=[])

    try:
        # 2. Data Preparation: Model ke liye DataFrame banayein
        # Model ko wahi columns chahiye jin par woh train hua tha.
        df_list = []
        for campaign in request.campaigns:
            df_list.append({
                'influencer_category': request.influencer.category,
                'influencer_bio': request.influencer.bio,
                'campaign_description': campaign.description,
                # Hum woh columns bhi denge jo is context me nahi hain, par model ko chahiye
                'followers': 50000, # Ek average value
                'engagement_rate': 0.04, # Ek acchi value
                'country': 'USA', # Ek default value
                'niche': request.influencer.category or 'lifestyle'
            })
        
        df_to_predict = pd.DataFrame(df_list)

        # 3. πŸ”₯ AI Prediction (The Missing Part) πŸ”₯
        # Model se har campaign ke liye ek score predict karwayein
        print(f"   - Predicting scores for {len(df_to_predict)} campaigns...")
        predicted_scores = _influencer_matcher.predict(df_to_predict)
        
        # 4. Sorting & Ranking
        # Campaigns ko unke score ke saath combine karein
        results_with_scores = zip(request.campaigns, predicted_scores)
        
        # Unhein score ke hisaab se sort karein (zyada score upar)
        sorted_results = sorted(results_with_scores, key=lambda x: x[1], reverse=True)
        
        # 5. Final Jawab (Response) taiyaar karein
        output = [
            RankedCampaignResult(campaign_id=camp.id, score=float(score)) 
            for camp, score in sorted_results
        ]
        
        print(f"   - βœ… Successfully scored and ranked campaigns.")
        return RankCampaignsResponse(ranked_campaigns=output)
        
    except Exception as e:
        print(f"🚨 An error occurred during campaign ranking:")
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))
    
@app.post("/ai/assist/caption", response_model=CaptionAssistResponse, summary="Assists with writing or improving captions")
async def caption_assistant_route(request: CaptionAssistRequest):
    """
    Takes a caption and performs an action (improve, suggest hashtags, etc.) using the LLM.
    """
    print(f"\nβœ… Received request on /ai/assist/caption with action: {request.action}")
    if not _ai_strategist:
        raise HTTPException(status_code=503, detail="AI Strategist is not available.")
    
    try:
        # _ai_strategist ke andar ek naya function banayenge
        generated_text = _ai_strategist.get_caption_assistance(
            caption=request.caption,
            action=request.action,
            guidelines=request.guidelines
        )
        return CaptionAssistResponse(new_text=generated_text)

    except Exception as e:
        print(f"🚨 An error occurred in /ai/assist/caption endpoint:")
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/predict/campaign-outcome", response_model=ForecastResponse)
async def predict_campaign_outcome(request: ForecastRequest):
    if not _performance_predictor or not _payout_forecaster: raise HTTPException(status_code=503, detail="Models Unavailable")

    input_df = pd.DataFrame([request.model_dump(exclude={'config'})])
    input_df['influencer_count'] = 1; input_df['platform'] = 'instagram'; input_df['location'] = 'USA'; input_df['followers'] = request.follower_count 

    # Predict
    reach = _performance_predictor.predict(input_df[['budget','influencer_count','platform','location','category']])[0]
    payout = float(_payout_forecaster.predict(input_df[['budget']])[0])

    # βš™οΈ CONTROL: Adjust Values if needed
    if request.config:
        payout_multiplier = float(request.config.get("budget_multiplier", 1.0)) # Shared Logic for simplicity
        payout = payout * payout_multiplier
        # Ensure Minimum Payout (Floor)
        min_payout = float(request.config.get("ml_payout_floor", 0))
        payout = max(min_payout, payout)

    return ForecastResponse(
        performance=PerformanceForecast(predicted_reach=int(reach), predicted_engagement_rate=round(request.engagement_rate*100, 2)),
        payout=PayoutForecast(estimated_earning=max(0, payout))
    )

@app.post("/ai/summarize/influencer-analytics", response_model=InfluencerAnalyticsSummaryResponse, summary="Generates a summary for the influencer's analytics page")
async def summarize_influencer_analytics(request: InfluencerKpiData):
    """
    Takes an influencer's KPIs and uses the AI strategist to create an actionable summary.
    """
    print(f"\nβœ… Received request on /ai/summarize/influencer-analytics")
    if not _ai_strategist:
        raise HTTPException(status_code=503, detail="AI Strategist is not available.")
    
    try:
        # Pass the data as a dictionary to the strategist
        summary_text = _ai_strategist.generate_influencer_analytics_summary(kpis=request.model_dump())
        return InfluencerAnalyticsSummaryResponse(summary=summary_text)

    except Exception as e:
        print(f"🚨 An error occurred in the analytics summary endpoint:")
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))
    

@app.post("/portfolio/curate-with-ai", response_model=CuratePortfolioResponse)
def curate_portfolio_with_ai(request: CuratePortfolioRequest):
    """
    Accepts a list of approved submissions, scores them based on simple logic, 
    and returns the IDs of the best ones. THIS VERSION DOES NOT USE THE LLM.
    """
    print(f"\nβœ…βœ…βœ… RUNNING FINAL, NON-LLM VERSION of Portfolio Curation βœ…βœ…βœ…")
    
    submissions = request.submissions

    if not submissions:
        return CuratePortfolioResponse(featured_submission_ids=[])

    scored_submissions = []
    for sub in submissions:
        # Step 1: Ek score calculate karein
        score = 0
        # Likes ke liye points (sabse zaroori)
        score += (sub.likes or 0) * 0.7 
        
        # Caption lamba hai to extra points
        if sub.caption and len(sub.caption) > 100:
            score += 100 # Ek boost

        # Step 2: Har submission ko uske score ke saath save karein
        scored_submissions.append({'id': sub.id, 'score': score})

    # Step 3: Sabhi submissions ko score ke hisaab se sort karein
    sorted_submissions = sorted(scored_submissions, key=lambda x: x['score'], reverse=True)
    
    # Step 4: Sabse behtareen 5 submissions ko chunein (ya jitne bhi hain)
    top_submissions = sorted_submissions[:5]
    
    # Step 5: Sirf unki ID waapis bhejein
    featured_ids = [sub['id'] for sub in top_submissions]
    
    print(f"   - βœ… Scored and selected {len(featured_ids)} posts: {featured_ids}")
    return CuratePortfolioResponse(featured_submission_ids=featured_ids)

@app.post("/tasks/prioritize", response_model=TaskPrioritizationResponse)
def prioritize_task(request: TaskPrioritizationRequest):
    """
    Analyzes a task's title and description to assign a priority level.
    """
    if not _llm_instance:
        raise HTTPException(status_code=503, detail="LLM model is not available.")
    
    prompt = f"""
[INST] You are an expert assistant for a social media influencer. Your job is to assign a priority to a new task based on its title. Use these rules:
- If the task mentions "revise", "rejection", "feedback", "contract", or is a deadline, the priority is "high".
- If the task is about a "new invitation", "new opportunity", or "message", the priority is "medium".
- For anything else like "update profile", "explore campaigns", the priority is "low".

Respond ONLY with one of the following words: high, medium, or low.

Task Title: "{request.title}"
[/INST]
"""

    try:
        print(f"   - πŸ€– Prioritizing task: '{request.title}'")
        output = _llm_instance(prompt, max_tokens=10, stop=["[INST]"], echo=False)
        
        # LLM se aaye response ko saaf karein
        priority = output['choices'][0]['text'].strip().lower()

        # Ek safety check, taaki LLM kuch galat na bhej de
        if priority not in ['high', 'medium', 'low']:
            print(f"   - ⚠️ LLM returned invalid priority: '{priority}'. Defaulting to 'medium'.")
            priority = 'medium'

        print(f"   - βœ… AI assigned priority: '{priority}'")
        return TaskPrioritizationResponse(priority=priority)

    except Exception as e:
        print(f"   - ❌ An unexpected error occurred during task prioritization: {e}")
        return TaskPrioritizationResponse(priority='medium')


@app.post("/predict/earning-opportunities", response_model=EarningOpportunityResponse, summary="Finds the best earning opportunities for an influencer")
async def predict_earning_opportunities(request: EarningOpportunityRequest):
    """
    [FINAL POLISHED VERSION] Uses the model for a score and adds dynamic, helpful
    commentary for every content format.
    """
    print(f"\nβœ… Received request on /predict/earning-opportunities (FINAL POLISH)")
    if _earnings_optimizer is None or _earnings_encoder is None:
        raise HTTPException(status_code=503, detail="Earning Optimizer model or encoder is not available.")

    try:
        # This part remains the same: preparing data and getting a score from the model
        scenarios_list = [
            {'campaign_niche': niche, 'content_format': c_format, 'follower_count': request.follower_count}
            for niche in ['Tech', 'Fashion', 'Food', 'Gaming', 'General']
            for c_format in ['Reel', 'Post', 'Story']
        ]
        df_scenarios = pd.DataFrame(scenarios_list)
        categorical_features = ['campaign_niche', 'content_format']
        encoded_cats = _earnings_encoder.transform(df_scenarios[categorical_features])
        encoded_df = pd.DataFrame(encoded_cats, columns=_earnings_encoder.get_feature_names_out(categorical_features))
        numerical_features = df_scenarios[['follower_count']].reset_index(drop=True)
        X_final_to_predict = pd.concat([encoded_df, numerical_features], axis=1)
        predicted_scores = _earnings_optimizer.predict(X_final_to_predict)

        # === ✨ FINAL POLISH: MORE DYNAMIC COMMENTARY LOGIC ✨ ===
        results = []
        for i, scenario in enumerate(scenarios_list):
            score = float(predicted_scores[i])
            niche = scenario['campaign_niche']
            c_format = scenario['content_format']
            
            # Default commentary based on score
            if score > 0.75:
                comment = "Excellent match! This area has high potential for you."
            elif score < 0.4:
                comment = "This could be a challenging area to grow in."
            else:
                comment = "This is a solid opportunity worth exploring."
            
            # Add dynamic, helpful tips for EVERY format
            if c_format == 'Reel':
                comment += " Reels are perfect for reaching a wider audience with trending audio."
            elif c_format == 'Post':
                # Ab yeh tip hamesha 'Post' ke saath aayegi
                comment += " Use high-quality visuals and a strong caption for best results with posts."
            elif c_format == 'Story':
                # Ab yeh tip hamesha 'Story' ke saath aayegi
                comment += " Stories are great for engaging your current followers with interactive polls or Q&As."
            
            results.append(Opportunity(
                campaign_niche=niche,
                content_format=c_format,
                estimated_score=score,
                commentary=comment
            ))
        # === ✨ END OF FINAL POLISH ✨ ===
        
        sorted_results = sorted(results, key=lambda x: x.estimated_score, reverse=True)
        return EarningOpportunityResponse(opportunities=sorted_results[:5])
        
    except Exception as e:
        print("🚨 An error occurred in /predict/earning-opportunities endpoint:")
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))
    

@app.post("/predict/post-performance", response_model=PostPerformanceResponse, summary="Predicts likes and comments for a new post")
async def predict_post_performance(request: PostPerformanceRequest):
    """
    Takes details of a potential post and uses two ML models to predict the
    number of likes and comments it might receive.
    """
    print(f"\nβœ… Received request on /predict/post-performance")
    if not _likes_predictor or not _comments_predictor:
        raise HTTPException(status_code=503, detail="Performance prediction models are not available.")

    try:
        # Step 1: Prepare the input data in a DataFrame, just like during training
        input_data = pd.DataFrame([request.model_dump()])

        # Step 2: Use the models to predict
        print("   - Predicting likes...")
        predicted_likes_raw = _likes_predictor.predict(input_data)[0]

        print("   - Predicting comments...")
        predicted_comments_raw = _comments_predictor.predict(input_data)[0]

        # Step 3: Clean the predictions (e.g., ensure they are not negative)
        predicted_likes = max(0, int(predicted_likes_raw))
        predicted_comments = max(0, int(predicted_comments_raw))
        
        # Step 4: Generate simple, rule-based feedback
        feedback_messages = []
        if request.caption_length < 50:
            feedback_messages.append("Consider writing a slightly longer caption to increase engagement.")
        elif request.caption_length > 800:
            feedback_messages.append("This is a long caption! Ensure the first line is very engaging.")
        else:
            feedback_messages.append("The caption length is good for engagement.")

        if request.campaign_niche == 'General':
            feedback_messages.append("Try to target a more specific niche in the future for better performance.")

        feedback_text = " ".join(feedback_messages)
        
        print("   - βœ… Successfully generated performance prediction and feedback.")

        return PostPerformanceResponse(
            predicted_likes=predicted_likes,
            predicted_comments=predicted_comments,
            feedback=feedback_text
        )

    except Exception as e:
        print(f"🚨 An error occurred in the post-performance endpoint:")
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))       
     

@app.get("/analyze/performance-anomalies", response_model=List[AnomalyInsight], summary="Finds unusual performance trends for all influencers")
def analyze_anomalies(supabase: Client = Depends(get_supabase_client)):
    # This endpoint is heavy, so it should have security (e.g., requires an admin API key)
    print("πŸ€– Running platform-wide Anomaly Detection...")
    
    try:
        # 1. Fetch historical data for all influencers from our new stats table
        stats_res = supabase.table('daily_influencer_stats').select('*').order('date', desc=True).limit(5000).execute() # Get last ~5000 entries
        profiles_res = supabase.table('profiles').select('id, full_name').eq('role', 'influencer').execute()
        
        if not stats_res.data: return []

        all_stats_df = pd.DataFrame(stats_res.data)
        profiles_map = {p['id']: p['full_name'] for p in profiles_res.data}
        
        all_insights = []
        
        # 2. Loop through each influencer
        for influencer_id, group in all_stats_df.groupby('profile_id'):
            historical_df = group.sort_values('date')
            today_stats = historical_df.iloc[-1].to_dict()
            
            # 3. Call the Anomaly Detector AI
            insights = find_anomalies(influencer_id, historical_df, today_stats)
            
            if insights:
                all_insights.append(AnomalyInsight(
                    influencer_id=influencer_id,
                    influencer_name=profiles_map.get(influencer_id, 'Unknown Influencer'),
                    insights=insights
                ))
        
        return all_insights
        
    except Exception as e:
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))

        
@app.post("/predict/revenue-forecast", response_model=RevenueForecastResponse, summary="Generates a 3-month revenue forecast")
async def predict_revenue_forecast():
    """
    (FAST VERSION) Uses the trained Holt's model to forecast revenue and adds simple commentary.
    """
    print(f"\nβœ… Received request on /predict/revenue-forecast (FAST VERSION)")
    if not _revenue_forecaster:
        raise HTTPException(status_code=503, detail="Revenue forecasting model is not available.")

    try:
        # Step 1: Generate forecast (This is fast)
        forecast_result = _revenue_forecaster.forecast(steps=3)
        
        # Step 2: Format the output and add trend analysis (Also fast)
        forecast_datapoints = []
        last_historical_value = _revenue_forecaster.model.endog[-1]
        
        for timestamp, predicted_value in forecast_result.items():
            trend_label = "Stable"
            percentage_change = ((predicted_value - last_historical_value) / last_historical_value) * 100
            if percentage_change > 15: trend_label = "Strong Growth"
            elif percentage_change > 5: trend_label = "Modest Growth"
            elif percentage_change < -10: trend_label = "Potential Downturn"
            
            forecast_datapoints.append(RevenueForecastDatapoint(
                month=timestamp.strftime('%B %Y'),
                predicted_revenue=round(predicted_value, 2),
                trend=trend_label
            ))
            last_historical_value = predicted_value

        # Step 3: Use simple, rule-based commentary (This is instant)
        first_trend = forecast_datapoints[0].trend if forecast_datapoints else "Stable"
        ai_commentary = "AI Insight: The forecast shows a stable outlook for the coming quarter."
        if "Growth" in first_trend:
            ai_commentary = "AI Insight: The model predicts a positive growth trend for the next quarter."
        elif "Downturn" in first_trend:
            ai_commentary = "AI Insight: A potential slowdown is predicted. It's a good time to review upcoming campaigns."
        
        print("   - βœ… Successfully generated revenue forecast (fast method).")
        
        return RevenueForecastResponse(
            forecast=forecast_datapoints,
            ai_commentary=ai_commentary
        )

    except Exception as e:
        print(f"🚨 An error occurred in the revenue forecast endpoint:")
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/predict/influencer-performance", response_model=InfluencerPerformanceResponse, summary="Predicts a holistic performance score for an influencer")
async def predict_influencer_performance(stats: InfluencerPerformanceStats):
    """
    Takes an influencer's key performance metrics and returns a single,
    AI-generated performance score from 0-100.
    """
    print(f"\nβœ… Received request on /predict/influencer-performance")
    if not _performance_scorer:
        raise HTTPException(status_code=503, detail="The Performance Scorer model is not available. Please train it first.")

    try:
        # Input data ko DataFrame mein convert karein, jaisa model ko chahiye
        input_data = pd.DataFrame([stats.model_dump()])
        
        # Model se prediction karein
        score = _performance_scorer.predict(input_data)
        
        # Score ko saaf karke 0-100 ke beech rakhein
        predicted_score = max(0, min(100, int(score[0])))
        
        print(f"   - βœ… Successfully predicted performance score: {predicted_score}")
        return {"performance_score": predicted_score}
        
    except Exception as e:
        print(f"🚨 An error occurred in the influencer performance endpoint:")
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))
 

@app.post("/v1/match/rank-by-similarity", response_model=RankBySimilarityResponse, summary="Generic endpoint to rank documents by text similarity")
async def rank_by_similarity_endpoint(request: RankBySimilarityRequest):
    print(f"\nβœ… Received request on /v1/match/rank-by-similarity")
    try:
        documents_list = [doc.model_dump(exclude_unset=True) for doc in request.documents]
        ranked_docs = rank_documents_by_similarity(query=request.query, documents=documents_list)
        print(f"   - βœ… Successfully ranked {len(ranked_docs)} documents.")
        return RankBySimilarityResponse(ranked_documents=ranked_docs)
    except Exception as e:
        print(f"🚨 An error occurred in the ranking endpoint:")
        import traceback
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/analyze/content-quality", response_model=ContentQualityResponse, summary="Analyzes a caption for a quality score")
def analyze_content_quality(request: ContentQualityRequest):
    """
    Uses the loaded LLM to analyze a social media caption based on several criteria
    and returns a quantitative score and qualitative feedback.
    """
    print(f"\nβœ… Received request on /analyze/content_quality")
    if not _llm_instance:
        raise HTTPException(status_code=503, detail="The Llama model is not available.")

    caption = request.caption
    
    prompt = f"""
[SYSTEM]
You are a social media expert. Analyze the following caption... Respond ONLY with a valid JSON object in the following format:
{{
  "overall_score": <float>,
  "scores": {{ "readability": <int>, "engagement": <int>, "call_to_action": <int>, "hashtag_strategy": <int> }},
  "feedback": "<string>"
}}

[CAPTION TO ANALYZE]
"{caption}"

[YOUR JSON RESPONSE]
"""

    try:
        print("--- Sending caption to LLM for quality analysis...")
        response = _llm_instance(prompt, max_tokens=512, temperature=0.2, stop=["[SYSTEM]", "\n\n"], echo=False)
        
        json_text = response['choices'][0]['text'].strip()
        start_index = json_text.find('{')
        end_index = json_text.rfind('}') + 1
        if start_index == -1 or end_index == 0:
            raise ValueError("LLM did not return a valid JSON object.")
        
        clean_json_text = json_text[start_index:end_index]
        
        import json
        
        # βœ… FIX: Using consistent variable name 'analysis_result' everywhere
        analysis_result = json.loads(clean_json_text)
        
        final_result = {
            "overall_score": analysis_result.get("overall_score"),
            "feedback": analysis_result.get("feedback"),
            "scores": analysis_result.get("scores") or analysis_result.get("score")
        }

        print("--- Successfully received and parsed JSON response from LLM.")
        return ContentQualityResponse(**final_result)

    except (json.JSONDecodeError, KeyError, ValueError) as e:
        print(f"🚨 ERROR parsing LLM response: {e}. Raw response was: {json_text}")
        raise HTTPException(status_code=500, detail="Failed to parse analysis from AI model.")
    except Exception as e:
        print(f"🚨 An unexpected error occurred during content analysis:")
        import traceback
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/generate/daily-briefing", response_model=DailyBriefingResponse, summary="Generates a daily action plan for the Talent Manager")
def generate_daily_briefing(data: DailyBriefingData):
    """
    [BULLETPROOF VERSION] Takes KPIs and uses either the LLM (if data exists) or
    Python logic (if data is empty) to generate a daily briefing.
    """
    print(f"\nβœ… Received request on /generate/daily-briefing with data: {data}")
    
    # === THE FINAL, BULLETPROOF FIX IS HERE ===
    on_bench = data.on_bench_influencers
    pending_tasks = data.pending_submissions + data.revisions_requested

    # SAFETY CHECK: Agar koi important data nahi hai, toh AI ko call mat karo.
    # Python se hi ek accha, static message bhejo.
    if on_bench == 0 and pending_tasks == 0:
        print("   - βœ… No critical tasks found. Returning Python-generated 'All Clear' message.")
        return DailyBriefingResponse(
            briefing_text="All clear! No urgent actions are required. Your roster is fully engaged and up-to-date."
        )
    # === END OF FIX ===
        
    if not _llm_instance:
        raise HTTPException(status_code=503, detail="The Llama model is not available for briefing.")

    final_prompt = f"""
Summarize these key points into 2-3 direct bullet points for a manager.

DATA:
- Influencers without campaigns: {on_bench}
- Submissions needing review: {pending_tasks}
- Total pending money: {data.highest_pending_payout:,.0f} INR

SUMMARY:
- """
    
    try:
        print("--- Sending briefing data to LLM (Data exists)...")
        response = _llm_instance(final_prompt, max_tokens=150, temperature=0.1, stop=["DATA:"], echo=False)
        briefing_text = response['choices'][0]['text'].strip()

        final_briefing = f"Here are your top priorities for today:\n- {briefing_text}"
        print("--- Successfully generated AI briefing.")
        return DailyBriefingResponse(briefing_text=final_briefing)

    except Exception as e:
        print(f"🚨 An unexpected error occurred during briefing generation:")
        traceback.print_exc()
        raise HTTPException(status_code=500, detail="Failed to generate AI briefing.")


@app.post("/summarize-contract", response_model=ContractSummary, summary="Analyzes a PDF contract and extracts key terms")
def summarize_contract(request: ContractURL):
    print(f"\nβœ… Received request on /summarize-contract (v3 - ROBUST)")
    if not _llm_instance:
        raise HTTPException(status_code=503, detail="The Llama model is not available.")

    try:

        print("   - πŸ“‘ Parsing PDF from URL...")
        contract_text = parse_pdf_from_url(request.pdf_url)
        contract_text = contract_text[:4000] # Truncate
        print(f"   - βœ… PDF parsed successfully. Truncated to {len(contract_text)} chars.")

        final_prompt = f"""
[INST]
You are a legal analysis AI. Your task is to extract specific details from a contract. You MUST respond ONLY with a single, valid JSON object. Do not add any text before or after the JSON.

**RULES FOR THE JSON VALUES:**
1.  All values for "payment_details", "deliverables", "deadlines", "exclusivity", and "ownership" MUST be a single, plain string.
2.  The value for "summary_points" MUST be a simple list of strings.
3.  DO NOT use nested objects. DO NOT use nested lists. Summarize the content into plain text.

[EXAMPLE of a GOOD RESPONSE]
{{
  "payment_details": "Client agrees to pay Influencer a total fee of $5,000 USD, payable in two installments.",
  "deliverables": "Influencer must create 2 Instagram Reels and 5 Instagram Stories.",
  "deadlines": "The deadline for all deliverables is October 30, 2024.",
  "exclusivity": "Influencer agrees to an exclusivity period of 30 days post-campaign.",
  "ownership": "The Client retains ownership of all created content.",
  "summary_points": [
    "Total payment is $5,000 USD.",
    "Deliverables: 2 Reels, 5 Stories.",
    "A 30-day exclusivity period applies after the campaign."
  ]
}}
[/EXAMPLE]

Now, based on these strict rules, analyze the following text:

[CONTRACT TEXT]
{contract_text}
[/CONTRACT TEXT]

[YOUR JSON RESPONSE]
"""

        print("   - πŸ“ž Calling LLM with the new, stricter prompt...")
        response = _llm_instance(
            final_prompt,
            max_tokens=1024,
            temperature=0.0, # Set to 0 for maximum factuality
            echo=False
        )
        
        raw_response_text = response['choices'][0]['text'].strip()

        print("   - βš™οΈ Parsing JSON response from LLM...")
        try:
            start_index = raw_response_text.find('{')
            end_index = raw_response_text.rfind('}') + 1
            clean_json_text = raw_response_text[start_index:end_index]
            summary_data = json.loads(clean_json_text)
            
        except Exception as e:
            print(f"🚨 ERROR parsing LLM response: {e}. Raw response was: '{raw_response_text}'")
            raise HTTPException(status_code=500, detail="Failed to parse analysis from the AI model.")

        print("--- βœ… Successfully generated contract summary from LLM.")
        
        # We now return the raw dictionary. FastAPI will validate it against our simple ContractSummary model.
        return summary_data

    except Exception as e:
        traceback.print_exc()
        raise HTTPException(status_code=500, detail="An internal server error occurred in the AI.")


@app.post("/predict/influencer-performance-score", response_model=InfluencerPerformanceResponse, summary="Predicts a holistic performance score for an influencer")
async def predict_influencer_performance_score(stats: InfluencerPerformanceStats):
    """
    Backend se influencer ki stats leta hai aur pre-trained model ka use karke
    ek performance score (0-100) return karta hai.
    """
    print(f"\nβœ… Received request on /predict/influencer-performance-score")
    
    # Safety Check: Kya model load hua tha startup par?
    if _performance_scorer is None:
        print("   - ❌ ERROR: The Performance Scorer model (_performance_scorer) is not loaded.")
        raise HTTPException(
            status_code=503, 
            detail="The Performance Scorer model is not available. Please ensure 'performance_scorer_v1.joblib' exists and is loaded."
        )

    try:
        # Step 1: Backend se aaye data ko Pandas DataFrame mein badlo.
        # Column ke naam training ke waqt use hue naamo se bilkul match hone chahiye.
        input_data = pd.DataFrame([stats.model_dump()])
        print(f"   - Input data for model: \n{input_data}")
        
        # Step 2: Loaded model se prediction karo.
        predicted_score_raw = _performance_scorer.predict(input_data)
        
        # Step 3: Jawab ko saaf-suthra karo.
        # Score ko integer banao aur 0 se 100 ke beech rakho.
        predicted_score = max(0, min(100, int(predicted_score_raw[0])))
        
        print(f"   - βœ… Successfully predicted performance score: {predicted_score}")
        
        # Step 4: Sahi format mein jawab wapas bhejo.
        return InfluencerPerformanceResponse(performance_score=predicted_score)
        
    except Exception as e:
        print(f"🚨 An error occurred in the /predict/influencer-performance-score endpoint:")
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/ai/coach/generate-growth-plan", response_model=AIGrowthPlanResponse, summary="Generates personalized growth tips for a single influencer")
def generate_growth_plan_route(request: AIGrowthPlanRequest):
    """
    Backend se ek influencer ka live performance data leta hai aur LLM ka use karke
    personalized improvement tips generate karta hai.
    """
    print(f"\nβœ… Received request on /ai/coach/generate-growth-plan for: {request.fullName}")
    if not _ai_strategist:
        raise HTTPException(status_code=503, detail="AI Strategist is not available.")
    
    try:
        # Pydantic model ko dictionary mein convert karke strategist ko bhejein
        insights_list = _ai_strategist.generate_influencer_growth_plan(request.model_dump())
        
        return AIGrowthPlanResponse(insights=insights_list)

    except Exception as e:
        print(f"🚨 An error occurred in the Growth Plan endpoint: {e}")
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))
    

@app.post("/analyze/brand-asset-colors", response_model=BrandAssetAnalysisResponse, summary="Extracts dominant colors from a logo URL")
def analyze_brand_asset_colors(request: BrandAssetAnalysisRequest):
    """
    Takes an image URL (logo/product), downloads it in memory,
    and uses AI (KMeans Clustering) to extract the main brand colors.
    """
    print(f"\nβœ… Received request on /analyze/brand-asset-colors")
    try:
        # Utility function call
        colors = extract_colors_from_url(request.file_url)
        
        print(f"   - βœ… Extracted colors: {colors}")
        return BrandAssetAnalysisResponse(dominant_colors=colors)
        
    except Exception as e:
        print(f"🚨 An error occurred during color extraction:")
        traceback.print_exc()
        # Fail gracefully
        return BrandAssetAnalysisResponse(dominant_colors=["#000000"])


@app.post("/generate/service-blueprint", response_model=ServiceBlueprintResponse, summary="Generates an AI project plan for a service")
async def generate_service_blueprint_route(request: ServiceBlueprintRequest):
    """
    Takes a service type and user requirements, then uses the AI Strategist
    to generate a structured project plan (blueprint).
    """
    print(f"\nβœ… Received request on /generate/service-blueprint for type: {request.service_type}")
    if not _ai_strategist:
        raise HTTPException(status_code=503, detail="AI Strategist is not available.")

    try:
        # Call the new method in our strategist
        blueprint_data = _ai_strategist.generate_service_blueprint(
            service_type=request.service_type,
            requirements=request.requirements
        )

        # Check if the AI returned an error internally
        if "error" in blueprint_data:
            raise HTTPException(status_code=500, detail=blueprint_data["error"])

        return ServiceBlueprintResponse(**blueprint_data)

    except HTTPException as http_exc:
        # Re-raise known HTTP exceptions
        raise http_exc
    except Exception as e:
        print(f"🚨 An unexpected error occurred in the blueprint endpoint:")
        traceback.print_exc()
        raise HTTPException(status_code=500, detail="An internal server error occurred while generating the blueprint.")


@app.post("/generate/growth-plan", response_model=ServiceBlueprintResponse, summary="Generates an AI management plan for an influencer")
async def generate_growth_plan_route(request: GrowthPlanRequest):
    """
    Takes influencer goals and uses the AI Strategist to generate a growth plan.
    """
    print(f"\nβœ… Naya Endpoint Hit: /generate/growth-plan for handle: {request.platform_handle}")
    if not _ai_strategist:
        raise HTTPException(status_code=503, detail="AI Strategist is not available.")

    try:
        # Naye, alag function ko call karo
        blueprint_data = _ai_strategist.generate_growth_plan(
            platform_handle=request.platform_handle,
            goals=request.goals,
            challenges=request.challenges
        )

        if "error" in blueprint_data:
            raise HTTPException(status_code=500, detail=blueprint_data["error"])

        return ServiceBlueprintResponse(**blueprint_data)

    except HTTPException as http_exc:
        raise http_exc
    except Exception as e:
        print(f"🚨 Unexpected error in growth plan endpoint: {e}")
        traceback.print_exc()
        raise HTTPException(status_code=500, detail="An internal server error occurred.")
    
    
@app.post("/submit_summary_job")
def submit_summary_job(request: AISummaryJobRequest, background_tasks: BackgroundTasks):
    """
    Accepts a job, responds INSTANTLY, and runs the AI in the background.
    """
    print(f"   - βœ… Job accepted for check-in ID: {request.checkin_id}. Starting in background...")
    background_tasks.add_task(process_summary_in_background, request.checkin_id, request.raw_text)
    return {"message": "Job accepted", "checkin_id": request.checkin_id}


@app.post("/generate/weekly-plan", response_model=WeeklyPlanResponse, summary="Generates 3 content tasks for an influencer")
def generate_weekly_plan_route(request: WeeklyPlanRequest):  # <--- async hata diya
    """
    Takes influencer context (mood, niche, trends) and generates 3 tailored content options.
    """
    print(f"\nβœ… Received request on /generate/weekly-plan")
    if not _ai_strategist:
        raise HTTPException(status_code=503, detail="AI Strategist is not available.")

    try:
        # Convert Pydantic model to dict
        context_dict = request.context.model_dump()
        
        # Call Strategist (Ab ye thread pool mein chalega)
        plan_data = _ai_strategist.generate_weekly_content_plan(context_dict)
        
        return WeeklyPlanResponse(**plan_data)

    except Exception as e:
        print(f"🚨 Error in weekly plan endpoint: {e}")
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))
    

@app.post("/chat/creative", response_model=Dict[str, str], summary="Brainstorming chat with AI Creative Director")
def creative_chat_endpoint(request: CreativeChatRequest):
    if not _creative_director:
        raise HTTPException(status_code=503, detail="The AI Creative Director is not available due to a startup error.")
    try:
        history_list = [m.model_dump() for m in request.history]
        response_text = _creative_director.chat(
            user_message=request.message,
            history=history_list,
            task_context=request.task_context
        )
        return {"reply": response_text}
    except Exception as e:
        print(f"🚨 Creative Chat Error: {e}")
        traceback.print_exc()
        raise HTTPException(status_code=500, detail="An error occurred with the AI Director.")


@app.post("/generate/final-from-chat", response_model=FinalScriptResponse, summary="Generates final structured script from chat history")
def finalize_script_endpoint(request: FinalizeScriptRequest):
    if not _creative_director:
        raise HTTPException(status_code=503, detail="The AI Creative Director is not available due to a startup error.")
    try:
        history_list = [m.model_dump() for m in request.history]
        return _creative_director.generate_final_plan(
            task_context=request.task_context,
            history=history_list
        )
    except Exception as e:
        print(f"🚨 Finalize Script Error: {e}")
        traceback.print_exc()
        raise HTTPException(status_code=500, detail="Failed to generate the final plan.")


@app.post("/api/v1/generate-campaign-from-prompt")
def create_campaign_from_prompt_endpoint(payload: DirectPromptPayload):
    # Check if Strategist is loaded
    if not _ai_strategist:
        raise HTTPException(status_code=503, detail="AI Strategist model unavailable.")

    # Use Config or Default
    current_config = payload.config if payload.config else RequestConfig()

    try:
        # Core Logic Call (Make sure Core Logic updated too)
        response_text = _ai_strategist.generate_strategy_from_prompt(
            user_prompt=payload.prompt, 
            config=current_config
        )
        return {"response": response_text}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


        # ==============================================================
# 🧠 COMMUNITY INTELLIGENCE ENDPOINTS
# ==============================================================

@app.post("/community/moderate-and-tag", response_model=ContentCheckResponse)
def moderate_and_tag(request: ContentCheckRequest):
    """
    Called when a user hits 'Post'. Checks toxicity AND generates tags in one go.
    """
    print(f"\n🧠 Checking community post content...")
    
    # 1. Moderation Check (Fast)
    if not _community_brain:
        # Fail safe
        return ContentCheckResponse(toxicity_score=0.0, is_safe=True, tags=["#NewPost"])
    
    mod_result = _community_brain.moderate_content(request.text)
    
    # 2. Tagging (Only if safe)
    tags = []
    if mod_result['is_safe']:
        # If model exists, run extraction
        tags = _community_brain.generate_smart_tags(request.text)
    
    return ContentCheckResponse(
        toxicity_score=mod_result['toxicity_score'],
        is_safe=mod_result['is_safe'],
        tags=tags
    )

@app.post("/community/summarize-discussion", response_model=ThreadSummaryResponse)
def summarize_community_thread(request: ThreadSummaryRequest):
    if not _community_brain:
        return ThreadSummaryResponse(summary="Summary unavailable.")
    
    summary = _community_brain.summarize_thread(request.comments)
    return ThreadSummaryResponse(summary=summary)


# =============================================================
# === ⚑️ PROJECT THUNDERBIRD - MARKET INTELLIGENCE HUB ===
# =============================================================

@app.post("/thunderbird/get_pulse_data", summary="Get All Data for Market Intelligence 'Pulse' Page")
def get_pulse_data_endpoint():
    """
    This is the main orchestrator endpoint for the /pulse page.
    It calls all necessary Thunderbird engine functions and combines their data.
    """
    print("πŸš€ API HIT: /thunderbird/get_pulse_data")
    try:
        # Call core logic functions in sequence
        live_trends = get_external_trends()
        niche_predictions = predict_niche_trends()
        # In the future, we would add the AI briefing call here

        # Combine results into one object for the frontend
        return {
            **live_trends,
            **niche_predictions,
        }
    except Exception as e:
        print(f"❌ API ERROR in /thunderbird/get_pulse_data: {e}")
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))
    
@app.post("/thunderbird/decode_trend", summary="AI Analysis of a specific trend")
async def decode_trend_endpoint(req: TrendAnalysisRequest):
    """
    Asynchronously wakes up the AI and decodes the trend.
    This prevents server timeouts while the model is thinking.
    """
    try:
        # 1. Wake up the Brain
        ai_brain = get_lazy_llm()
        if not ai_brain:
            raise HTTPException(status_code=503, detail="AI engine is currently offline or overloaded.")
            
        # 2. Process the request
        from core.thunderbird_engine import decode_market_trend
        
        result = decode_market_trend(req.topic, ai_brain)
        
        return result
    
    except Exception as e:
        print(f"❌ AI Decoding Error in Endpoint: {e}")
        raise HTTPException(status_code=500, detail="An internal error occurred in the AI.")