Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import spaces | |
| import torch | |
| import io | |
| import whisper | |
| #from huggingface_hub import hf_hub_download | |
| #model_path = hf_hub_download(repo_id="distil-whisper/distil-large-v3-openai", filename="model.bin") | |
| import os | |
| import urllib | |
| os.makedirs("/home/user/.cache/whisper") | |
| urllib.request.urlretrieve( | |
| "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", | |
| "/home/user/.cache/whisper/medium.pt" | |
| ) | |
| model_path = "medium" | |
| writer = whisper.utils.get_writer("srt", "/dev/null") | |
| def generate(file, progress=gr.Progress(track_tqdm=True)): | |
| # get file to type bytes somehow | |
| model = whisper.load_model(model_path, device="cuda") | |
| audio = whisper.load_audio(file) | |
| result = model.transcribe(audio, verbose=False) | |
| out = io.StringIO() | |
| writer.write_result(result, out) | |
| return out.getvalue() | |
| gr.Interface(fn=generate, inputs=gr.File(type="filepath"), outputs=gr.Text()).launch() |