Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
| model_name = "huggingface/llama-model" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForSeq2SeqLM.from_pretrained(model_name) | |
| def chunk_text(text, chunk_size=512): | |
| tokens = tokenizer.encode(text, return_tensors="pt", truncation=False) | |
| chunks = [tokens[0][i:i + chunk_size] for i in range(0, tokens.size(1), chunk_size)] | |
| return chunks | |
| def summarize_chunk(chunk, max_length=50): | |
| summary_ids = model.generate(chunk.unsqueeze(0), max_length=max_length, min_length=25, length_penalty=2.0, num_beams=4, early_stopping=True) | |
| summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True) | |
| return summary | |
| def summarize(text, max_summary_length=50): | |
| chunks = chunk_text(text) | |
| summaries = [summarize_chunk(chunk, max_summary_length) for chunk in chunks] | |
| combined_summary = " ".join(summaries) | |
| final_summary = summarize_chunk(tokenizer.encode(combined_summary, return_tensors="pt", truncation=True)[0], max_length=max_summary_length) | |
| return final_summary | |
| iface = gr.Interface( | |
| fn=summarize, | |
| inputs=[ | |
| gr.inputs.Textbox(lines=10, label="Input Text"), | |
| gr.inputs.Slider(minimum=10, maximum=100, default=50, label="Max Summary Length (Optional)") | |
| ], | |
| outputs="text", | |
| title="Concise Text Summarization with Llama" | |
| ) | |
| if __name__ == "__main__": | |
| iface.launch() | |