OpenRouter-UI / brick_app.py
Alibrown's picture
Rename app.py to brick_app.py
1156d35 verified
import streamlit as st
import requests
import json
from PIL import Image
import io
import base64
import pandas as pd
import zipfile
import PyPDF2
import os
# --- Konfiguration ---
st.set_page_config(page_title="OpenRouter Free-Tier Hub", layout="wide", initial_sidebar_state="expanded")
OPENROUTER_API_BASE = "https://openrouter.ai/api/v1"
# --- Title ---
st.title("🤖 OpenRouter Free-Tier Hub (Deluxe)")
st.markdown("""
**Chatte mit kostenlosen OpenRouter Modellen.** Du kannst **Dateien** (Text, PDF, ZIP, Bilder) anhängen, um Kontext zu liefern.
""")
# --- Session State Management ---
if "messages" not in st.session_state:
st.session_state.messages = []
if "uploaded_content" not in st.session_state:
st.session_state.uploaded_content = None
if "last_response" not in st.session_state:
st.session_state.last_response = "" # NEU: Speichert die letzte Antwort zum Kopieren
# --- Utilities ---
def encode_image(image):
"""Encodiert ein PIL-Image-Objekt in einen Base64-String."""
buf = io.BytesIO()
image.save(buf, format="JPEG")
return base64.b64encode(buf.getvalue()).decode("utf-8")
def process_file(uploaded_file):
"""Verarbeitet die hochgeladene Datei (Text, Bild, PDF, ZIP, Tabellen) und extrahiert den Inhalt."""
file_type = uploaded_file.name.split('.')[-1].lower()
text_exts = ('.txt', '.csv', '.py', '.html', '.js', '.css', '.json', '.xml', '.sql', '.xlsx')
if file_type in ["jpg", "jpeg", "png"]:
return {"type": "image", "content": Image.open(uploaded_file).convert('RGB')}
if file_type in ["txt"] + [ext.strip('.') for ext in text_exts if ext not in ('.csv', '.xlsx')]:
return {"type": "text", "content": uploaded_file.read().decode("utf-8", errors="ignore")}
if file_type in ["csv", "xlsx"]:
try:
df = pd.read_csv(uploaded_file) if file_type == "csv" else pd.read_excel(uploaded_file)
return {"type": "text", "content": df.to_string()}
except Exception as e:
return {"type": "error", "content": f"Fehler beim Lesen der Tabelle: {e}"}
if file_type == "pdf":
try:
reader = PyPDF2.PdfReader(uploaded_file)
return {"type": "text", "content": "".join(page.extract_text() or "" for page in reader.pages)}
except Exception as e:
return {"type": "error", "content": f"PDF Fehler: {e}"}
if file_type == "zip":
try:
with zipfile.ZipFile(uploaded_file) as z:
content = "ZIP Contents:\n"
# Verbesserte Filterung für Textdateien in ZIP
for f in z.infolist():
if not f.is_dir() and f.filename.lower().endswith(text_exts):
content += f"\n📄 {f.filename}:\n"
content += z.read(f.filename).decode("utf-8", errors="ignore")
return {"type": "text", "content": content or "ZIP enthält keine lesbaren Textdateien."}
except Exception as e:
return {"type": "error", "content": f"ZIP Fehler: {e}"}
return {"type": "error", "content": "Nicht unterstütztes Dateiformat."}
@st.cache_data(show_spinner=False, ttl=3600)
def fetch_model_contexts(api_key):
"""Fetches Context Lengths and Price for models from OpenRouter API (Cached)."""
if not api_key:
return {}
headers = {"Authorization": f"Bearer {api_key}"}
try:
res = requests.get(f"{OPENROUTER_API_BASE}/models", headers=headers, timeout=5)
contexts = {}
if res.status_code == 200:
for m in res.json().get("data", []):
# Filtert nur Modelle, die kostenlos sind (prompt price = 0)
if m.get("pricing", {}).get("prompt", 1) == 0:
contexts[m.get("id")] = m.get("context_length", 4096)
# Speichere die kostenlosen Modelle
st.session_state.free_models = list(contexts.keys())
return contexts
except Exception as e:
st.warning(f"⚠️ Fehler beim Abrufen der Modellinformationen (API-Key, Limit?): {e}")
return {}
def call_openrouter(model, messages, temp, max_tok, key):
"""Führt den API-Aufruf an OpenRouter durch (OpenAI-Chat-Schema)."""
headers = {
"Authorization": f"Bearer {key}",
"Content-Type": "application/json",
"Referer": "https://aicodecraft.io",
"X-Title": "OpenRouter-Free-Interface",
}
payload = {
"model": model,
"messages": messages,
"temperature": temp,
"max_tokens": max_tok,
}
res = requests.post(f"{OPENROUTER_API_BASE}/chat/completions", headers=headers, data=json.dumps(payload))
if res.status_code == 200:
try:
return res.json()["choices"][0]["message"]["content"]
except (KeyError, IndexError):
raise Exception("Ungültige API-Antwort: Konnte Antworttext nicht extrahieren.")
else:
try:
err = res.json()
msg = err.get("error", {}).get("message", res.text)
except:
msg = res.text
raise Exception(f"API Fehler {res.status_code}: {msg}")
# --- Sidebar ---
with st.sidebar:
st.header("⚙️ API Settings")
api_key = st.text_input("OpenRouter API Key", type="password")
# 1. Context holen
model_contexts = fetch_model_contexts(api_key)
# 2. Liste der kostenlosen Modelle definieren/aktualisieren
FREE_MODEL_LIST = st.session_state.get("free_models", [
"cognitivecomputations/dolphin-mistral-24b-venice-edition",
"deepseek/deepseek-chat-v3",
"google/gemma-2-9b-it",
"mistralai/mistral-7b-instruct-v0.2",
])
st.subheader("Modell-Konfiguration")
model = st.selectbox("Wähle ein Modell", FREE_MODEL_LIST, index=0)
# 3. Context Length Slider setzen
default_ctx = model_contexts.get(model, 4096)
max_tokens = st.slider(
f"Max Output Tokens (Total Context: {default_ctx})",
min_value=1,
max_value=min(default_ctx, 32768),
value=min(1024, default_ctx),
step=256
)
temperature = st.slider("Temperature", 0.0, 1.0, 0.7)
st.markdown("---")
# NEU: Kopier-Funktion
st.subheader("📋 Letzte Antwort kopieren")
# Textfeld, das die letzte Antwort zum einfachen Kopieren anzeigt
st.text_area(
"Response Text",
st.session_state.last_response,
height=200,
key="copy_area_key",
help="Markiere den Text im Feld und kopiere ihn (Strg+C)."
)
st.markdown("---")
# Verbesserter Reset-Button
if st.button("🔄 Reset Chat & Attachment"):
st.session_state.messages = []
st.session_state.uploaded_content = None
st.session_state.last_response = "" # Auch die letzte Antwort löschen
# Da st.file_uploader nicht einfach im Code resettet wird, muss die App neu starten:
st.experimental_rerun()
# Hinweis: st.success wird wegen Rerun nicht angezeigt, aber der Reset ist effektiv.
# --- File Upload & Preview ---
uploaded_file = st.file_uploader("Upload File (optional)",
type=["jpg", "jpeg", "png", "txt", "pdf", "zip", "csv", "xlsx", "html", "css", "js", "py"])
if uploaded_file and st.session_state.uploaded_content is None:
st.session_state.uploaded_content = process_file(uploaded_file)
# Wenn eine neue Datei hochgeladen wird, das Interface neu rendern, um die Vorschau anzuzeigen.
st.experimental_rerun()
if st.session_state.uploaded_content:
processed = st.session_state.uploaded_content
st.subheader("📎 Aktueller Anhang:")
if processed["type"] == "image":
st.image(processed["content"], caption="Attached Image", width=300)
elif processed["type"] == "text":
st.text_area("File Preview", processed["content"], height=150)
elif processed["type"] == "error":
st.error(processed["content"])
if st.button("❌ Anhang entfernen"):
st.session_state.uploaded_content = None
st.experimental_rerun()
# --- Chat History ---
for msg in st.session_state.messages:
with st.chat_message(msg["role"]):
st.markdown(msg["content"])
# --- Chat Input & Logic ---
if prompt := st.chat_input("Deine Nachricht..."):
if not api_key:
st.warning("Bitte trage deinen OpenRouter API Key in der Sidebar ein.")
st.stop()
# 1. Benutzer-Nachricht hinzufügen und sofort anzeigen
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
# 2. API Nachrichten vorbereiten (für Chatverlauf)
messages = [{"role": m["role"], "content": m["content"]} for m in st.session_state.messages]
# 3. Datei anhängen (Multimodalitäts-Handling)
if st.session_state.uploaded_content:
content = st.session_state.uploaded_content
if content["type"] == "image":
base64_img = encode_image(content["content"])
# OpenRouter Multimodalität: Bild als 'image_url' (OpenAI-Schema)
messages[-1]["content"] = [
{"type": "text", "text": prompt},
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_img}"}}
]
elif content["type"] == "text":
# Text-Dateien einfach dem letzten Prompt anhängen
messages[-1]["content"] += f"\n\n[Attached File Content]\n{content['content']}"
# 4. Antwort generieren
with st.chat_message("assistant"):
with st.spinner(f"Fragend {model}..."):
try:
reply = call_openrouter(model, messages, temperature, max_tokens, api_key)
# Antwort anzeigen
st.markdown(reply)
# Antwort speichern und Copy-Feld aktualisieren
st.session_state.messages.append({"role": "assistant", "content": reply})
st.session_state.last_response = reply
# Nach erfolgreicher Antwort neu rendern, um das Copy-Feld zu aktualisieren
st.experimental_rerun()
except Exception as e:
st.error(str(e))
# Fehler zur Historie hinzufügen
st.session_state.messages.append({"role": "assistant", "content": f"❌ {str(e)}"})