Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,11 +1,16 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import spacy
|
|
|
|
| 3 |
from transformers import T5Tokenizer, AutoModelForSeq2SeqLM, pipeline
|
| 4 |
|
| 5 |
-
#
|
| 6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
-
# Load
|
| 9 |
model_name = "valhalla/t5-base-e2e-qg"
|
| 10 |
tokenizer = T5Tokenizer.from_pretrained(model_name)
|
| 11 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
|
@@ -13,11 +18,10 @@ qg_pipeline = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
|
|
| 13 |
|
| 14 |
def extract_answers(context):
|
| 15 |
doc = nlp_spacy(context)
|
| 16 |
-
answers = list(set([ent.text for ent in doc.ents]))
|
| 17 |
if not answers:
|
| 18 |
-
# Fallback to noun chunks if no named entities
|
| 19 |
answers = list(set([chunk.text for chunk in doc.noun_chunks if len(chunk.text.split()) > 1]))
|
| 20 |
-
return answers[:5]
|
| 21 |
|
| 22 |
def generate_questions_answers(context):
|
| 23 |
answers = extract_answers(context)
|
|
@@ -31,14 +35,13 @@ def generate_questions_answers(context):
|
|
| 31 |
|
| 32 |
return "\n\n".join(qa_pairs) if qa_pairs else "Could not generate any QA pairs."
|
| 33 |
|
| 34 |
-
# Gradio UI
|
| 35 |
iface = gr.Interface(
|
| 36 |
fn=generate_questions_answers,
|
| 37 |
inputs=gr.Textbox(lines=6, label="Paste Paragraph"),
|
| 38 |
outputs="textbox",
|
| 39 |
-
title="
|
| 40 |
-
description="
|
| 41 |
)
|
| 42 |
|
| 43 |
-
iface.queue()
|
| 44 |
iface.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import spacy
|
| 3 |
+
from spacy.cli import download
|
| 4 |
from transformers import T5Tokenizer, AutoModelForSeq2SeqLM, pipeline
|
| 5 |
|
| 6 |
+
# 🛠 Ensure spaCy model is available
|
| 7 |
+
try:
|
| 8 |
+
nlp_spacy = spacy.load("en_core_web_sm")
|
| 9 |
+
except OSError:
|
| 10 |
+
download("en_core_web_sm")
|
| 11 |
+
nlp_spacy = spacy.load("en_core_web_sm")
|
| 12 |
|
| 13 |
+
# Load T5 model for question generation
|
| 14 |
model_name = "valhalla/t5-base-e2e-qg"
|
| 15 |
tokenizer = T5Tokenizer.from_pretrained(model_name)
|
| 16 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
|
|
|
| 18 |
|
| 19 |
def extract_answers(context):
|
| 20 |
doc = nlp_spacy(context)
|
| 21 |
+
answers = list(set([ent.text for ent in doc.ents]))
|
| 22 |
if not answers:
|
|
|
|
| 23 |
answers = list(set([chunk.text for chunk in doc.noun_chunks if len(chunk.text.split()) > 1]))
|
| 24 |
+
return answers[:5]
|
| 25 |
|
| 26 |
def generate_questions_answers(context):
|
| 27 |
answers = extract_answers(context)
|
|
|
|
| 35 |
|
| 36 |
return "\n\n".join(qa_pairs) if qa_pairs else "Could not generate any QA pairs."
|
| 37 |
|
|
|
|
| 38 |
iface = gr.Interface(
|
| 39 |
fn=generate_questions_answers,
|
| 40 |
inputs=gr.Textbox(lines=6, label="Paste Paragraph"),
|
| 41 |
outputs="textbox",
|
| 42 |
+
title="Q&A Generator from Paragraph",
|
| 43 |
+
description="Get both questions and answers using spaCy + T5!"
|
| 44 |
)
|
| 45 |
|
| 46 |
+
iface.queue()
|
| 47 |
iface.launch()
|