Spaces:
Running
on
A10G
Running
on
A10G
git lfs install
#1
by
sushiwill
- opened
- README.md +0 -2
- app.py +2 -7
- requirements.txt +1 -2
README.md
CHANGED
|
@@ -7,8 +7,6 @@ sdk: gradio
|
|
| 7 |
sdk_version: 5.49.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
-
hf_oauth: true
|
| 11 |
-
hf_oauth_expiration_minutes: 480
|
| 12 |
---
|
| 13 |
|
| 14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 7 |
sdk_version: 5.49.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
|
|
|
|
|
|
| 10 |
---
|
| 11 |
|
| 12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
CHANGED
|
@@ -6,7 +6,6 @@ import time
|
|
| 6 |
import uuid
|
| 7 |
import subprocess
|
| 8 |
import requests
|
| 9 |
-
|
| 10 |
from typing import List, Dict, Any, Iterator
|
| 11 |
|
| 12 |
from dotenv import load_dotenv
|
|
@@ -26,8 +25,6 @@ from agentflow.models.utils import make_json_serializable_truncated
|
|
| 26 |
from pathlib import Path
|
| 27 |
from huggingface_hub import CommitScheduler
|
| 28 |
|
| 29 |
-
import spaces
|
| 30 |
-
|
| 31 |
# Get Huggingface token from environment variable
|
| 32 |
HF_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
|
| 33 |
|
|
@@ -87,9 +84,7 @@ def start_vllm_service() -> bool:
|
|
| 87 |
[
|
| 88 |
"vllm", "serve", VLLM_MODEL_NAME,
|
| 89 |
"--port", str(VLLM_PORT),
|
| 90 |
-
"--host", VLLM_HOST
|
| 91 |
-
"--tensor-parallel-size", "1",
|
| 92 |
-
"--gpu-memory-utilization", "0.95"
|
| 93 |
],
|
| 94 |
text=True
|
| 95 |
)
|
|
@@ -487,7 +482,7 @@ def parse_arguments():
|
|
| 487 |
parser.add_argument("--openai_api_source", default="we_provided", choices=["we_provided", "user_provided"], help="Source of OpenAI API key.")
|
| 488 |
return parser.parse_args()
|
| 489 |
|
| 490 |
-
|
| 491 |
def solve_problem_gradio(user_query, max_steps=10, max_time=60, llm_model_engine=None, enabled_tools=None):
|
| 492 |
"""
|
| 493 |
Wrapper function to connect the solver to Gradio.
|
|
|
|
| 6 |
import uuid
|
| 7 |
import subprocess
|
| 8 |
import requests
|
|
|
|
| 9 |
from typing import List, Dict, Any, Iterator
|
| 10 |
|
| 11 |
from dotenv import load_dotenv
|
|
|
|
| 25 |
from pathlib import Path
|
| 26 |
from huggingface_hub import CommitScheduler
|
| 27 |
|
|
|
|
|
|
|
| 28 |
# Get Huggingface token from environment variable
|
| 29 |
HF_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
|
| 30 |
|
|
|
|
| 84 |
[
|
| 85 |
"vllm", "serve", VLLM_MODEL_NAME,
|
| 86 |
"--port", str(VLLM_PORT),
|
| 87 |
+
"--host", VLLM_HOST
|
|
|
|
|
|
|
| 88 |
],
|
| 89 |
text=True
|
| 90 |
)
|
|
|
|
| 482 |
parser.add_argument("--openai_api_source", default="we_provided", choices=["we_provided", "user_provided"], help="Source of OpenAI API key.")
|
| 483 |
return parser.parse_args()
|
| 484 |
|
| 485 |
+
|
| 486 |
def solve_problem_gradio(user_query, max_steps=10, max_time=60, llm_model_engine=None, enabled_tools=None):
|
| 487 |
"""
|
| 488 |
Wrapper function to connect the solver to Gradio.
|
requirements.txt
CHANGED
|
@@ -19,5 +19,4 @@ dashscope==1.24.2
|
|
| 19 |
gradio
|
| 20 |
# litellm==2.1.1
|
| 21 |
# ollama==0.5.1
|
| 22 |
-
# e2b_code_interpreter==2.0.0
|
| 23 |
-
spaces
|
|
|
|
| 19 |
gradio
|
| 20 |
# litellm==2.1.1
|
| 21 |
# ollama==0.5.1
|
| 22 |
+
# e2b_code_interpreter==2.0.0
|
|
|