Nexari-Research commited on
Commit
145aa5a
·
verified ·
1 Parent(s): 4b6b9dc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +79 -41
app.py CHANGED
@@ -1,4 +1,4 @@
1
- # app.py - UPDATED to integrate behavior_model
2
  import re
3
  import json
4
  import asyncio
@@ -111,52 +111,80 @@ async def generate_response_stream(messages, max_tokens=600, temperature=0.85):
111
  logger.exception("Flow analysis failed: %s", e)
112
  flow_context = {}
113
 
114
- # Use flow context to drive behavior
115
- flow_desc = ""
116
- if flow_context:
117
- label = flow_context.get("flow_label","unknown")
118
- conf = round(float(flow_context.get("confidence", 0.0)), 2)
119
- expl = flow_context.get("explanation","")
120
- flow_desc = f"\n[FLOW] Detected: {label} (confidence {conf}). {expl}\n"
121
-
122
- if intent == "internet_search":
123
- yield f"data: {json.dumps({'status': 'Searching the web...'})}\n\n"
124
- await asyncio.sleep(0)
 
 
 
 
 
 
 
 
 
125
  try:
126
- tool_data_struct = perform_web_search(last_user_msg)
127
- except Exception as e:
128
- logger.exception("Web search failed: %s", e)
129
- tool_data_struct = {"query": last_user_msg, "results": []}
130
- vibe_data = get_smart_context(last_user_msg)
131
- strategy_data = get_thinking_strategy(is_complex=True, detail=want_detailed)
132
-
133
- elif intent == "coding_request":
134
- yield f"data: {json.dumps({'status': 'Analyzing Logic...'})}\n\n"
135
- vibe_data = get_smart_context(last_user_msg)
136
- strategy_data = get_thinking_strategy(is_complex=True, detail=want_detailed)
 
 
 
 
 
 
 
 
 
 
 
137
 
138
- elif intent == "checking_time":
139
- yield f"data: {json.dumps({'status': 'Checking Clock...'})}\n\n"
140
- time_data = get_time_context()
141
- vibe_data = get_smart_context(last_user_msg)
142
- strategy_data = get_thinking_strategy(is_complex=False, detail=want_detailed)
143
 
144
- else:
145
- vibe_data = get_smart_context(last_user_msg)
146
- strategy_data = get_thinking_strategy(is_complex=False, detail=want_detailed)
147
 
148
  base_system_instruction = (
149
  "### SYSTEM IDENTITY ###\n"
150
  "You are Nexari G1, an expressive and helpful AI created by Piyush.\n"
151
  "### RULES ###\n"
152
- "1) If WEB_DATA (search results) is provided, you MUST use it and prioritize it over model-internal knowledge. Cite sources (numbered) when possible.\n"
153
  "2) Do NOT invent facts when WEB_DATA contradicts model memory.\n"
154
- "3) If user asked for detailed/line-by-line output, produce a numbered step-by-step response; aim for thorough coverage.\n"
155
- "4) Avoid chain-of-thought; produce a short '🧠 Plan:' (max 2 lines) only for complex tasks, then '💡 Answer:' with final content.\n"
156
- "5) Keep emojis to 0-2 per message. After answering, offer a concise follow-up question.\n"
157
  )
158
 
159
- final_system_prompt = f"{base_system_instruction}\n{flow_desc}\n{vibe_data}\n{time_data}\n{strategy_data}"
 
 
 
 
 
 
 
 
 
 
 
 
 
160
 
161
  # Ensure system message present
162
  if messages[0].get("role") != "system":
@@ -164,6 +192,15 @@ async def generate_response_stream(messages, max_tokens=600, temperature=0.85):
164
  else:
165
  messages[0]["content"] = final_system_prompt
166
 
 
 
 
 
 
 
 
 
 
167
  if tool_data_struct:
168
  web_block = "### WEB_DATA (from live search) ###\n"
169
  items = tool_data_struct.get("results", [])
@@ -187,7 +224,6 @@ async def generate_response_stream(messages, max_tokens=600, temperature=0.85):
187
  yield "data: [DONE]\n\n"
188
  return
189
 
190
- # Build prompt with tokenizer helper if available, else fallback
191
  try:
192
  if hasattr(tokenizer, "apply_chat_template"):
193
  text_prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
@@ -230,11 +266,13 @@ async def generate_response_stream(messages, max_tokens=600, temperature=0.85):
230
  if fc.lower() in cleaned.lower():
231
  cleaned = re.sub(re.escape(fc), "I am an AI — expressive and interactive.", cleaned, flags=re.IGNORECASE)
232
 
233
- if want_detailed:
234
- if not re.search(r"^\s*\d+[\.\)]\s+", cleaned, re.M):
235
- cleaned = "1) " + cleaned.replace("\n", "\n2) ")
236
 
237
- cleaned = cleaned.replace("💡 **Answer:**", "\n\n---\n💡 **Answer:**")
 
 
238
 
239
  payload = json.dumps({"choices":[{"delta":{"content": cleaned}}]})
240
  yield f"data: {payload}\n\n"
 
1
+ # app.py - UPDATED to inject emoji + min-length guidance into system prompt
2
  import re
3
  import json
4
  import asyncio
 
111
  logger.exception("Flow analysis failed: %s", e)
112
  flow_context = {}
113
 
114
+ # Get vibe/emotion context
115
+ vibe_data = get_smart_context(last_user_msg)
116
+
117
+ # Determine min words & emoji range by parsing vibe_data and flow label
118
+ # Defaults
119
+ min_words = 30
120
+ emoji_min, emoji_max = 0, 1
121
+
122
+ # Heuristics based on vibe_data text
123
+ if "Deep Dive Mode" in vibe_data:
124
+ min_words = 70
125
+ elif "Standard Chat Mode" in vibe_data:
126
+ min_words = 30
127
+ elif "Ping-Pong Mode" in vibe_data:
128
+ min_words = 12
129
+
130
+ # Try to extract suggested emoji range from vibe_data string if present
131
+ import re as _re
132
+ m = _re.search(r"Use\s+(\d+)–(\d+)\s+emoji", vibe_data)
133
+ if m:
134
  try:
135
+ emoji_min = int(m.group(1))
136
+ emoji_max = int(m.group(2))
137
+ except:
138
+ emoji_min, emoji_max = emoji_min, emoji_max
139
+
140
+ # Adjust based on flow context
141
+ flow_label = flow_context.get("flow_label", "")
142
+ flow_conf = float(flow_context.get("confidence", 0.0) or 0.0)
143
+ flow_expl = flow_context.get("explanation", "")
144
+
145
+ if flow_label == "escalation":
146
+ # De-escalate: increase min words, restrict flashy emojis
147
+ min_words = max(min_words, 40)
148
+ emoji_min, emoji_max = 0, min(emoji_max, 1)
149
+ deescalation_note = "Tone: de-escalate and apologize briefly when appropriate."
150
+ elif flow_label == "small_talk":
151
+ # Friendly chat can use more emojis
152
+ emoji_min, emoji_max = max(emoji_min, 1), max(emoji_max, 2)
153
+ min_words = max(8, min_words//3)
154
+ deescalation_note = ""
155
+ else:
156
+ deescalation_note = ""
157
 
158
+ # If user explicitly asked for short, respect it (override)
159
+ if re.search(r"\b(short|brief|quick|short and simple)\b", last_user_msg, re.IGNORECASE):
160
+ min_words = 6
 
 
161
 
162
+ strategy_data = get_thinking_strategy(is_complex=(intent=="coding_request" or want_detailed), detail=want_detailed, min_words_hint=min_words)
 
 
163
 
164
  base_system_instruction = (
165
  "### SYSTEM IDENTITY ###\n"
166
  "You are Nexari G1, an expressive and helpful AI created by Piyush.\n"
167
  "### RULES ###\n"
168
+ "1) If WEB_DATA (search results) is provided, prioritize it over model-internal knowledge and cite sources.\n"
169
  "2) Do NOT invent facts when WEB_DATA contradicts model memory.\n"
170
+ "3) Avoid chain-of-thought; provide a short '🧠 Plan:' (max 2 lines) only when complex, then '💡 Answer:' with final content.\n"
171
+ "4) Use natural phrasing, contractions where appropriate, and follow emoji & verbosity guidance below.\n"
 
172
  )
173
 
174
+ # Build flow description and emoji/verbosity instructions
175
+ flow_desc = ""
176
+ if flow_context:
177
+ label = flow_context.get("flow_label","unknown")
178
+ conf = round(float(flow_context.get("confidence", 0.0)), 2)
179
+ expl = flow_context.get("explanation","")
180
+ flow_desc = f"\n[FLOW] Detected: {label} (confidence {conf}). {expl}\n"
181
+
182
+ emoji_instruction = f"EMOJI GUIDELINE: Use between {emoji_min} and {emoji_max} emoji(s) appropriate to tone; prefer subtle emoji (no more than 2 total)."
183
+ verbosity_instruction = f"VERBOSITY GUIDELINE: Aim for ~{min_words} words unless user explicitly asked for 'short' or 'brief'. Avoid robotic one-line replies."
184
+
185
+ time_data = get_time_context()
186
+
187
+ final_system_prompt = f"{base_system_instruction}\n{flow_desc}\n{vibe_data}\n{emoji_instruction}\n{verbosity_instruction}\n{time_data}\n{strategy_data}\n{deescalation_note}"
188
 
189
  # Ensure system message present
190
  if messages[0].get("role") != "system":
 
192
  else:
193
  messages[0]["content"] = final_system_prompt
194
 
195
+ if intent == "internet_search":
196
+ yield f"data: {json.dumps({'status': 'Searching the web...'})}\n\n"
197
+ await asyncio.sleep(0)
198
+ try:
199
+ tool_data_struct = perform_web_search(last_user_msg)
200
+ except Exception as e:
201
+ logger.exception("Web search failed: %s", e)
202
+ tool_data_struct = {"query": last_user_msg, "results": []}
203
+
204
  if tool_data_struct:
205
  web_block = "### WEB_DATA (from live search) ###\n"
206
  items = tool_data_struct.get("results", [])
 
224
  yield "data: [DONE]\n\n"
225
  return
226
 
 
227
  try:
228
  if hasattr(tokenizer, "apply_chat_template"):
229
  text_prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
 
266
  if fc.lower() in cleaned.lower():
267
  cleaned = re.sub(re.escape(fc), "I am an AI — expressive and interactive.", cleaned, flags=re.IGNORECASE)
268
 
269
+ # If user requested detailed but model didn't produce numbered list, try to format lightly
270
+ if want_detailed and not re.search(r"^\s*\d+[\.\)]\s+", cleaned, re.M):
271
+ cleaned = "🧠 Plan: I'll provide step-by-step guidance.\n\n" + cleaned
272
 
273
+ # If user complained earlier about robotic short answers, encourage small natural touches (this is a gentle nudge in content)
274
+ # (We rely on system prompt; here only minor post-processing)
275
+ cleaned = cleaned.replace("I can help with that.", "I can help with that — let me explain. 🙂")
276
 
277
  payload = json.dumps({"choices":[{"delta":{"content": cleaned}}]})
278
  yield f"data: {payload}\n\n"