Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import spaces | |
| from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer | |
| import torch | |
| from threading import Thread | |
| import os | |
| import logging | |
| # Set up logging | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| # Model and tokenizer configuration | |
| MODEL_NAME = "FractalAIResearch/Fathom-R1-14B" | |
| device = "cuda:0" if torch.cuda.is_available() else "cpu" | |
| # Load tokenizer and model | |
| try: | |
| logger.info("Loading tokenizer and model...") | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| MODEL_NAME, | |
| torch_dtype=torch.bfloat16, # Optimize for H200 GPUs | |
| device_map="auto", # Automatically distribute across GPU | |
| trust_remote_code=True # Required for Qwen2-based models | |
| ) | |
| logger.info("Model and tokenizer loaded successfully.") | |
| except Exception as e: | |
| logger.error(f"Error loading model or tokenizer: {str(e)}") | |
| raise e | |
| # Ensure model is on GPU | |
| #model = model.to(device) | |
| def generate_response(user_message, max_tokens, temperature, top_k, top_p, repetition_penalty, history_state): | |
| if not user_message.strip(): | |
| logger.info("Empty message received, returning history unchanged.") | |
| return history_state, history_state | |
| try: | |
| logger.info("Processing new message...") | |
| # System prompt for Fathom-R1-14B | |
| system_message = "You are a helpful assistant, specialising at math and STEM reasoning." | |
| # Build messages list using Qwen2 chat template format | |
| messages = [{"role": "system", "content": system_message}] | |
| for message in history_state: | |
| messages.append({"role": message["role"], "content": message["content"]}) | |
| messages.append({"role": "user", "content": user_message}) | |
| # Apply Qwen2 chat template | |
| prompt = tokenizer.apply_chat_template( | |
| messages, | |
| tokenize=False, | |
| add_generation_prompt=True | |
| ) | |
| # Tokenize input | |
| inputs = tokenizer(prompt, return_tensors="pt").to(device) | |
| # Configure sampling | |
| do_sample = not (temperature == 1.0 and top_k >= 100 and top_p == 1.0) | |
| # Set up streaming | |
| streamer = TextIteratorStreamer(tokenizer, skip_prompt=True) | |
| # Generation parameters | |
| generation_kwargs = { | |
| "input_ids": inputs["input_ids"], | |
| "attention_mask": inputs["attention_mask"], | |
| "max_new_tokens": int(max_tokens), | |
| "do_sample": do_sample, | |
| "temperature": temperature, | |
| "top_k": int(top_k), | |
| "top_p": top_p, | |
| "repetition_penalty": repetition_penalty, | |
| "streamer": streamer, | |
| "pad_token_id": tokenizer.eos_token_id, | |
| "eos_token_id": tokenizer.eos_token_id | |
| } | |
| # Start generation in a separate thread | |
| thread = Thread(target=model.generate, kwargs=generation_kwargs) | |
| thread.start() | |
| # Stream the response | |
| assistant_response = "" | |
| new_history = history_state + [ | |
| {"role": "user", "content": user_message}, | |
| {"role": "assistant", "content": ""} | |
| ] | |
| for new_token in streamer: | |
| assistant_response += new_token | |
| new_history[-1]["content"] = assistant_response.strip() | |
| yield new_history, new_history | |
| logger.info("Response generated successfully.") | |
| yield new_history, new_history | |
| except Exception as e: | |
| logger.error(f"Error during inference: {str(e)}") | |
| return f"Error: {str(e)}", history_state | |
| # Example prompts | |
| example_messages = { | |
| "IIT-JEE 2025 Physics": "A person sitting inside an elevator performs a weighing experiment with an object of mass 50 kg. Suppose that the variation of the height π¦ (in m) of the elevator, from the ground, with time π‘ (in s) is given by π¦ = 8 [1 + sin ( 2ππ‘/π )], where π = 40π s. Taking acceleration due to gravity, π = 10 m/s^2 , the maximum variation of the objectβs weight (in N) as observed in the experiment is ?", | |
| "Goldman Sachs Interview Puzzle": "Four friends need to cross a dangerous bridge at night. Unfortunately, they have only one torch and the bridge is too dangerous to cross without one. The bridge is only strong enough to support two people at a time. Not all people take the same time to cross the bridge. Times for each person: 1 min, 2 mins, 7 mins and 10 mins. What is the shortest time needed for all four of them to cross the bridge?", | |
| "IIT-JEE 2025 Mathematics": "Let π be the set of all seven-digit numbers that can be formed using the digits 0, 1 and 2. For example, 2210222 is in π, but 0210222 is NOT in π.Then the number of elements π₯ in π such that at least one of the digits 0 and 1 appears exactly twice in π₯, is ?" | |
| } | |
| # Gradio interface | |
| with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
| gr.Markdown( | |
| """ | |
| # Fathom-R1-14B Chatbot | |
| Welcome to the Fathom-R1-14B Chatbot! This model excels at multi-step reasoning tasks in mathematics, logic, and science. | |
| The model specializes in math and STEM reasoning, providing detailed step-by-step solutions. | |
| Try the example problems below to see how the model breaks down complex reasoning problems. | |
| """ | |
| ) | |
| history_state = gr.State([]) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| gr.Markdown("### Settings") | |
| max_tokens_slider = gr.Slider( | |
| minimum=8192, | |
| maximum=16384, # Fathomβs context window is 16K | |
| step=1024, | |
| value=16384, | |
| label="Max Tokens" | |
| ) | |
| with gr.Accordion("Advanced Settings", open=False): | |
| temperature_slider = gr.Slider( | |
| minimum=0.1, | |
| maximum=2.0, | |
| value=0.8, | |
| label="Temperature" | |
| ) | |
| top_k_slider = gr.Slider( | |
| minimum=1, | |
| maximum=100, | |
| step=1, | |
| value=50, | |
| label="Top-k" | |
| ) | |
| top_p_slider = gr.Slider( | |
| minimum=0.1, | |
| maximum=1.0, | |
| value=0.95, | |
| label="Top-p" | |
| ) | |
| repetition_penalty_slider = gr.Slider( | |
| minimum=1.0, | |
| maximum=2.0, | |
| value=1.0, | |
| label="Repetition Penalty" | |
| ) | |
| with gr.Column(scale=4): | |
| chatbot = gr.Chatbot(label="Chat", type="messages") | |
| with gr.Row(): | |
| user_input = gr.Textbox( | |
| label="Your message", | |
| placeholder="Type your message here...", | |
| scale=3 | |
| ) | |
| submit_button = gr.Button("Send", variant="primary", scale=1) | |
| clear_button = gr.Button("Clear", scale=1) | |
| gr.Markdown("**Try these examples:**") | |
| with gr.Row(): | |
| example1_button = gr.Button("IIT-JEE 2025 Mathematics") | |
| example2_button = gr.Button("Goldman Sachs Interview Puzzle") | |
| example3_button = gr.Button("IIT-JEE 2025 Physics") | |
| submit_button.click( | |
| fn=generate_response, | |
| inputs=[user_input, max_tokens_slider, temperature_slider, top_k_slider, top_p_slider, repetition_penalty_slider, history_state], | |
| outputs=[chatbot, history_state] | |
| ).then( | |
| fn=lambda: gr.update(value=""), | |
| inputs=None, | |
| outputs=user_input | |
| ) | |
| clear_button.click( | |
| fn=lambda: ([], []), | |
| inputs=None, | |
| outputs=[chatbot, history_state] | |
| ) | |
| example1_button.click( | |
| fn=lambda: gr.update(value=example_messages["IIT-JEE 2025 Mathematics"]), | |
| inputs=None, | |
| outputs=user_input | |
| ) | |
| example2_button.click( | |
| fn=lambda: gr.update(value=example_messages["Goldman Sachs Interview Puzzle"]), | |
| inputs=None, | |
| outputs=user_input | |
| ) | |
| example3_button.click( | |
| fn=lambda: gr.update(value=example_messages["IIT-JEE 2025 Physics"]), | |
| inputs=None, | |
| outputs=user_input | |
| ) | |
| demo.launch(ssr_mode=False) |