Spaces:
Running
Running
Manoj Thapa
commited on
Commit
·
b4ed585
1
Parent(s):
5b1b7c1
Fix: Improve language context switching by appending explicit instruction to user prompt
Browse files- README.md +1 -2
- backend/agent/graph.py +38 -33
- backend/main.py +27 -28
- backend/tools/code_executor.py +3 -29
- frontend/src/components/Header.jsx +0 -3
README.md
CHANGED
|
@@ -21,8 +21,7 @@ Planck AI is an advanced agentic search interface that combines the reasoning po
|
|
| 21 |
- **💻 Code Execution**: Writes and runs code in **7+ Languages** (Python, Java, JS/TS, C/C++, Go) to solve logic/math problems.
|
| 22 |
- **🌐 Deep Web Search**: Integated with Google & DuckDuckGo to find real-time information.
|
| 23 |
- **📄 Document Analysis**: Upload PDFs or paste URLs—Planck reads and analyzes them (up to 10k chars/page).
|
| 24 |
-
-
|
| 25 |
-
- **👁️ Vision Capabilities**: Analyze uploaded images using GPT-4o's vision model.
|
| 26 |
- **⚡ Reactive UI**: A beautiful, dark-mode interface built with React & TailwindCSS.
|
| 27 |
- **🔒 Privacy First**: All sessions are isolated. No data is stored permanently.
|
| 28 |
|
|
|
|
| 21 |
- **💻 Code Execution**: Writes and runs code in **7+ Languages** (Python, Java, JS/TS, C/C++, Go) to solve logic/math problems.
|
| 22 |
- **🌐 Deep Web Search**: Integated with Google & DuckDuckGo to find real-time information.
|
| 23 |
- **📄 Document Analysis**: Upload PDFs or paste URLs—Planck reads and analyzes them (up to 10k chars/page).
|
| 24 |
+
- **️ Vision Capabilities**: Analyze uploaded images using GPT-4o's vision model.
|
|
|
|
| 25 |
- **⚡ Reactive UI**: A beautiful, dark-mode interface built with React & TailwindCSS.
|
| 26 |
- **🔒 Privacy First**: All sessions are isolated. No data is stored permanently.
|
| 27 |
|
backend/agent/graph.py
CHANGED
|
@@ -201,14 +201,6 @@ class AgentRunner:
|
|
| 201 |
# Append Language Instruction
|
| 202 |
if language and language != "English":
|
| 203 |
formatted_system_prompt += f"\n\nIMPORTANT: You must respond in {language}. Translate your internal reasoning if necessary, but the final output must be in {language}."
|
| 204 |
-
|
| 205 |
-
# Override History Bias:
|
| 206 |
-
# If the conversation history has a different language, the model might get confused.
|
| 207 |
-
# We explicitly append the instruction to the *current* user message to force the switch.
|
| 208 |
-
current_user_content = user_message
|
| 209 |
-
if language and language != "English":
|
| 210 |
-
current_user_content += f"\n\n(Please answer in {language})"
|
| 211 |
-
|
| 212 |
messages = [{"role": "system", "content": formatted_system_prompt}]
|
| 213 |
|
| 214 |
# Smart Context Management
|
|
@@ -216,36 +208,49 @@ class AgentRunner:
|
|
| 216 |
# Github Models Free Tier has a strict 8k token limit for ALL models
|
| 217 |
# 8k tokens ~= 32k chars. We use 30k to be safe.
|
| 218 |
MAX_HISTORY_CHARS = 30000
|
|
|
|
|
|
|
|
|
|
| 219 |
|
| 220 |
if conversation_history:
|
| 221 |
-
#
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
|
|
|
|
|
|
| 232 |
break
|
| 233 |
-
truncated_history.insert(0, msg)
|
| 234 |
-
current_chars += content_len
|
| 235 |
-
|
| 236 |
-
# Convert to OpenAI format
|
| 237 |
-
for msg in truncated_history:
|
| 238 |
-
# Handle stored document format vs raw content
|
| 239 |
-
content = msg.get("documents", [""])[0] if isinstance(msg.get("documents"), list) else msg.get("content", "")
|
| 240 |
|
| 241 |
-
#
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
messages.append({
|
| 246 |
-
"role": msg.get("role"),
|
| 247 |
"content": content
|
| 248 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 249 |
|
| 250 |
# Add file context if any
|
| 251 |
file_context = ""
|
|
|
|
| 201 |
# Append Language Instruction
|
| 202 |
if language and language != "English":
|
| 203 |
formatted_system_prompt += f"\n\nIMPORTANT: You must respond in {language}. Translate your internal reasoning if necessary, but the final output must be in {language}."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 204 |
messages = [{"role": "system", "content": formatted_system_prompt}]
|
| 205 |
|
| 206 |
# Smart Context Management
|
|
|
|
| 208 |
# Github Models Free Tier has a strict 8k token limit for ALL models
|
| 209 |
# 8k tokens ~= 32k chars. We use 30k to be safe.
|
| 210 |
MAX_HISTORY_CHARS = 30000
|
| 211 |
+
|
| 212 |
+
current_chars = 0
|
| 213 |
+
selected_history = []
|
| 214 |
|
| 215 |
if conversation_history:
|
| 216 |
+
# Iterate backwards to keep most recent first
|
| 217 |
+
for msg in reversed(conversation_history):
|
| 218 |
+
content = msg.get("content") or ""
|
| 219 |
+
|
| 220 |
+
# Truncate extremely long individual text messages
|
| 221 |
+
if content and len(content) > 2000:
|
| 222 |
+
content = content[:2000] + "... [truncated]"
|
| 223 |
+
|
| 224 |
+
# Estimate size (including tool call overhead)
|
| 225 |
+
msg_len = len(content) + 200 # Buffer for metadata
|
| 226 |
+
|
| 227 |
+
if current_chars + msg_len > MAX_HISTORY_CHARS:
|
| 228 |
+
# Soft limit hit - stop adding history
|
| 229 |
break
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 230 |
|
| 231 |
+
# Reconstruct message preserving CRITICAL fields for API validity
|
| 232 |
+
clean_msg = {
|
| 233 |
+
"role": msg["role"],
|
|
|
|
|
|
|
|
|
|
| 234 |
"content": content
|
| 235 |
+
}
|
| 236 |
+
if "tool_calls" in msg:
|
| 237 |
+
clean_msg["tool_calls"] = msg["tool_calls"]
|
| 238 |
+
if "tool_call_id" in msg:
|
| 239 |
+
clean_msg["tool_call_id"] = msg["tool_call_id"]
|
| 240 |
+
if "name" in msg:
|
| 241 |
+
clean_msg["name"] = msg["name"]
|
| 242 |
+
|
| 243 |
+
selected_history.insert(0, clean_msg)
|
| 244 |
+
current_chars += msg_len
|
| 245 |
+
|
| 246 |
+
# SAFETY: Ensure history doesn't start with a 'tool' result (orphan)
|
| 247 |
+
# API requires: User/System -> Assistant -> Tool -> Assistant ...
|
| 248 |
+
# If we cut in the middle, we might start with 'tool'.
|
| 249 |
+
while selected_history and selected_history[0].get("role") == "tool":
|
| 250 |
+
selected_history.pop(0)
|
| 251 |
+
|
| 252 |
+
# Add trimmed history to messages
|
| 253 |
+
messages.extend(selected_history)
|
| 254 |
|
| 255 |
# Add file context if any
|
| 256 |
file_context = ""
|
backend/main.py
CHANGED
|
@@ -98,41 +98,40 @@ async def chat(request: ChatRequest):
|
|
| 98 |
for msg in history
|
| 99 |
]
|
| 100 |
|
| 101 |
-
#
|
| 102 |
-
|
| 103 |
-
clean_message = request.message
|
| 104 |
-
language = "English"
|
| 105 |
-
|
| 106 |
-
# Detect mode
|
| 107 |
-
if "[Mode: Chat]" in request.message:
|
| 108 |
-
mode = "chat"
|
| 109 |
-
clean_message = request.message.replace("[Mode: Chat]", "").strip()
|
| 110 |
-
elif "[Mode: Web]" in request.message:
|
| 111 |
-
mode = "web"
|
| 112 |
-
clean_message = request.message.replace("[Mode: Web]", "").strip()
|
| 113 |
-
|
| 114 |
-
# Detect Language
|
| 115 |
-
import re
|
| 116 |
-
lang_match = re.search(r'\[Language: (.*?)\]', clean_message)
|
| 117 |
-
if lang_match:
|
| 118 |
-
language = lang_match.group(1)
|
| 119 |
-
clean_message = clean_message.replace(lang_match.group(0), "").strip()
|
| 120 |
-
|
| 121 |
-
# Handle legacy format
|
| 122 |
-
elif "[Focus Mode: Chat Only]" in request.message:
|
| 123 |
-
mode = "chat"
|
| 124 |
-
clean_message = re.sub(r'\[Focus Mode:.*?\]', '', request.message).strip()
|
| 125 |
-
|
| 126 |
-
# --- SAVE CLEAN MESSAGE ---
|
| 127 |
-
memory.add_message(conversation_id, "user", clean_message)
|
| 128 |
|
| 129 |
async def generate():
|
| 130 |
"""Generate streaming response."""
|
| 131 |
full_response = ""
|
| 132 |
tool_calls = []
|
| 133 |
|
| 134 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 135 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 136 |
async for chunk in agent_runner.run(
|
| 137 |
user_message=clean_message,
|
| 138 |
conversation_history=conversation_history,
|
|
|
|
| 98 |
for msg in history
|
| 99 |
]
|
| 100 |
|
| 101 |
+
# Add user message to memory
|
| 102 |
+
memory.add_message(conversation_id, "user", request.message)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
|
| 104 |
async def generate():
|
| 105 |
"""Generate streaming response."""
|
| 106 |
full_response = ""
|
| 107 |
tool_calls = []
|
| 108 |
|
| 109 |
+
# Detect mode from message content (injected by frontend)
|
| 110 |
+
mode = "web"
|
| 111 |
+
clean_message = request.message
|
| 112 |
+
|
| 113 |
+
if "[Mode: Chat]" in request.message:
|
| 114 |
+
mode = "chat"
|
| 115 |
+
clean_message = request.message.replace("[Mode: Chat]", "").strip()
|
| 116 |
+
elif "[Mode: Web]" in request.message:
|
| 117 |
+
mode = "web"
|
| 118 |
+
clean_message = request.message.replace("[Mode: Web]", "").strip()
|
| 119 |
|
| 120 |
+
# Detect and extract language preference
|
| 121 |
+
# Format: [Language: Nepali]
|
| 122 |
+
language = "English"
|
| 123 |
+
import re
|
| 124 |
+
lang_match = re.search(r'\[Language: (.*?)\]', clean_message)
|
| 125 |
+
if lang_match:
|
| 126 |
+
language = lang_match.group(1)
|
| 127 |
+
clean_message = clean_message.replace(lang_match.group(0), "").strip()
|
| 128 |
+
|
| 129 |
+
# Also handle legacy format just in case
|
| 130 |
+
elif "[Focus Mode: Chat Only]" in request.message:
|
| 131 |
+
mode = "chat"
|
| 132 |
+
# Strip the heavy-handed legacy tag
|
| 133 |
+
clean_message = re.sub(r'\[Focus Mode:.*?\]', '', request.message).strip()
|
| 134 |
+
|
| 135 |
async for chunk in agent_runner.run(
|
| 136 |
user_message=clean_message,
|
| 137 |
conversation_history=conversation_history,
|
backend/tools/code_executor.py
CHANGED
|
@@ -66,7 +66,7 @@ def code_executor_tool(code: str, language: str = "python") -> str:
|
|
| 66 |
|
| 67 |
|
| 68 |
def _execute_python(code: str, result: Dict[str, Any]) -> str:
|
| 69 |
-
"""Executes Python code in-process using
|
| 70 |
stdout_capture = io.StringIO()
|
| 71 |
stderr_capture = io.StringIO()
|
| 72 |
|
|
@@ -83,40 +83,14 @@ def _execute_python(code: str, result: Dict[str, Any]) -> str:
|
|
| 83 |
exec_globals = {'__builtins__': safe_builtins}
|
| 84 |
exec_locals = {}
|
| 85 |
|
| 86 |
-
import ast
|
| 87 |
-
|
| 88 |
try:
|
| 89 |
with redirect_stdout(stdout_capture), redirect_stderr(stderr_capture):
|
| 90 |
-
# Parse the code into an AST
|
| 91 |
try:
|
| 92 |
-
|
|
|
|
| 93 |
except SyntaxError:
|
| 94 |
-
# If parsing fails, fall back to simple exec to let it raise the error naturally
|
| 95 |
exec(code, exec_globals, exec_locals)
|
| 96 |
|
| 97 |
-
# Check if likely an expression at the end
|
| 98 |
-
last_node = None
|
| 99 |
-
if tree.body and isinstance(tree.body[-1], ast.Expr):
|
| 100 |
-
last_node = tree.body.pop()
|
| 101 |
-
|
| 102 |
-
# Execute the main block (all statements except the last expression)
|
| 103 |
-
if tree.body:
|
| 104 |
-
# Compile as a module
|
| 105 |
-
module = ast.Module(body=tree.body, type_ignores=[])
|
| 106 |
-
# We must fix locations for the new AST to be compilable
|
| 107 |
-
ast.fix_missing_locations(module)
|
| 108 |
-
compiled_module = compile(module, filename="<string>", mode="exec")
|
| 109 |
-
exec(compiled_module, exec_globals, exec_locals)
|
| 110 |
-
|
| 111 |
-
# Evaluate the last expression (if any)
|
| 112 |
-
if last_node:
|
| 113 |
-
expr = ast.Expression(body=last_node.value)
|
| 114 |
-
ast.fix_missing_locations(expr)
|
| 115 |
-
compiled_expr = compile(expr, filename="<string>", mode="eval")
|
| 116 |
-
exec_result = eval(compiled_expr, exec_globals, exec_locals)
|
| 117 |
-
# Store the result
|
| 118 |
-
result["result"] = str(exec_result) if exec_result is not None else None
|
| 119 |
-
|
| 120 |
result["stdout"] = stdout_capture.getvalue()
|
| 121 |
result["stderr"] = stderr_capture.getvalue()
|
| 122 |
result["success"] = True
|
|
|
|
| 66 |
|
| 67 |
|
| 68 |
def _execute_python(code: str, result: Dict[str, Any]) -> str:
|
| 69 |
+
"""Executes Python code in-process using exec()."""
|
| 70 |
stdout_capture = io.StringIO()
|
| 71 |
stderr_capture = io.StringIO()
|
| 72 |
|
|
|
|
| 83 |
exec_globals = {'__builtins__': safe_builtins}
|
| 84 |
exec_locals = {}
|
| 85 |
|
|
|
|
|
|
|
| 86 |
try:
|
| 87 |
with redirect_stdout(stdout_capture), redirect_stderr(stderr_capture):
|
|
|
|
| 88 |
try:
|
| 89 |
+
exec_result = eval(code, exec_globals, exec_locals)
|
| 90 |
+
result["result"] = str(exec_result) if exec_result is not None else None
|
| 91 |
except SyntaxError:
|
|
|
|
| 92 |
exec(code, exec_globals, exec_locals)
|
| 93 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
result["stdout"] = stdout_capture.getvalue()
|
| 95 |
result["stderr"] = stderr_capture.getvalue()
|
| 96 |
result["success"] = True
|
frontend/src/components/Header.jsx
CHANGED
|
@@ -63,9 +63,6 @@ export default function Header({ onToggleSidebar, onNewChat, currentLanguage = '
|
|
| 63 |
{/* Dropdown - Using pt-2 instead of mt-2 to maintain hover bridge */}
|
| 64 |
<div className="absolute right-0 top-full pt-2 w-56 hidden group-hover:block transition-all z-50">
|
| 65 |
<div className="bg-pplx-card border border-white/10 rounded-xl shadow-xl overflow-hidden">
|
| 66 |
-
<div className="px-4 py-2 text-xs text-pplx-muted font-medium border-b border-white/5 bg-white/5">
|
| 67 |
-
Sets language for new messages
|
| 68 |
-
</div>
|
| 69 |
<div className="max-h-80 overflow-y-auto py-1 custom-scrollbar">
|
| 70 |
{LANGUAGES.map((lang) => (
|
| 71 |
<button
|
|
|
|
| 63 |
{/* Dropdown - Using pt-2 instead of mt-2 to maintain hover bridge */}
|
| 64 |
<div className="absolute right-0 top-full pt-2 w-56 hidden group-hover:block transition-all z-50">
|
| 65 |
<div className="bg-pplx-card border border-white/10 rounded-xl shadow-xl overflow-hidden">
|
|
|
|
|
|
|
|
|
|
| 66 |
<div className="max-h-80 overflow-y-auto py-1 custom-scrollbar">
|
| 67 |
{LANGUAGES.map((lang) => (
|
| 68 |
<button
|