Bi Yoo commited on
Commit
086f11b
·
1 Parent(s): 4eee3b0
Files changed (1) hide show
  1. config.py +1 -1
config.py CHANGED
@@ -16,7 +16,7 @@ HUGGINGFACE_MODEL = "google/gemma-2-2b-it"
16
 
17
  # Local model configuration (for quantized models hosted within the Space)
18
  LOCAL_MODEL_REPO = os.getenv("LOCAL_MODEL_REPO", "bartowski/Qwen_Qwen3-1.7B-GGUF")
19
- LOCAL_MODEL_FILENAME = os.getenv("LOCAL_MODEL_FILENAME", "Qwen3-1.7B-Q4_K_M.gguf")
20
  LOCAL_MODEL_CONTEXT_LENGTH = int(os.getenv("LOCAL_MODEL_CONTEXT_LENGTH", "2048"))
21
  LOCAL_MODEL_THREADS = int(os.getenv("LOCAL_MODEL_THREADS", str(os.cpu_count() or 4)))
22
  LOCAL_MODEL_BATCH_SIZE = int(os.getenv("LOCAL_MODEL_BATCH_SIZE", "256"))
 
16
 
17
  # Local model configuration (for quantized models hosted within the Space)
18
  LOCAL_MODEL_REPO = os.getenv("LOCAL_MODEL_REPO", "bartowski/Qwen_Qwen3-1.7B-GGUF")
19
+ LOCAL_MODEL_FILENAME = os.getenv("LOCAL_MODEL_FILENAME", "Qwen_Qwen3-1.7B-Q4_K_M.gguf")
20
  LOCAL_MODEL_CONTEXT_LENGTH = int(os.getenv("LOCAL_MODEL_CONTEXT_LENGTH", "2048"))
21
  LOCAL_MODEL_THREADS = int(os.getenv("LOCAL_MODEL_THREADS", str(os.cpu_count() or 4)))
22
  LOCAL_MODEL_BATCH_SIZE = int(os.getenv("LOCAL_MODEL_BATCH_SIZE", "256"))