|
|
import gradio as gr |
|
|
import os |
|
|
import torch |
|
|
import yake |
|
|
import shutil |
|
|
import glob |
|
|
import ffmpeg |
|
|
import cv2 |
|
|
import numpy as np |
|
|
from datetime import datetime |
|
|
from gtts import gTTS |
|
|
from diffusers import StableDiffusionPipeline |
|
|
from deep_translator import GoogleTranslator |
|
|
import wikipediaapi |
|
|
from groq import Groq |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
os.environ["GROQ_API_KEY"] = "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" |
|
|
client = Groq(api_key=os.environ["GROQ_API_KEY"]) |
|
|
|
|
|
|
|
|
def fetch_wikipedia_summary(topic): |
|
|
wiki_wiki = wikipediaapi.Wikipedia( |
|
|
user_agent="EducationalScriptApp/1.0", |
|
|
language="en" |
|
|
) |
|
|
page = wiki_wiki.page(topic) |
|
|
return page.summary if page.exists() else "No Wikipedia summary available." |
|
|
|
|
|
def generate_script(topic, duration): |
|
|
try: |
|
|
factual_content = fetch_wikipedia_summary(topic) |
|
|
words_per_minute = 130 |
|
|
target_words = duration * words_per_minute |
|
|
|
|
|
response = client.chat.completions.create( |
|
|
messages=[{"role": "user", "content": f"Format the following factual content into a well-structured educational script in English with approximately {target_words} words: \n{factual_content}"}], |
|
|
model="llama-3.3-70b-versatile" |
|
|
) |
|
|
return response.choices[0].message.content |
|
|
except Exception as e: |
|
|
return f"β Error in script generation: {str(e)}" |
|
|
|
|
|
|
|
|
|
|
|
def extract_keywords(script): |
|
|
try: |
|
|
kw_extractor = yake.KeywordExtractor( |
|
|
lan="en", |
|
|
n=3, |
|
|
dedupLim=0.9, |
|
|
|
|
|
) |
|
|
|
|
|
keywords = kw_extractor.extract_keywords(script) |
|
|
return ", ".join([kw[0] for kw in keywords]) |
|
|
except Exception as e: |
|
|
return f"β Error extracting keywords: {str(e)}" |
|
|
|
|
|
|
|
|
def save_keywords_file(keywords, topic): |
|
|
today = datetime.today().strftime('%Y_%b_%d') |
|
|
filename = f"Keywords/{topic}_Keyword_{today}.txt" |
|
|
os.makedirs(os.path.dirname(filename), exist_ok=True) |
|
|
with open(filename, "w", encoding="utf-8") as f: |
|
|
f.write(keywords) |
|
|
return filename |
|
|
|
|
|
|
|
|
|
|
|
def translate_to_urdu(english_script): |
|
|
try: |
|
|
|
|
|
max_chunk_size = 4500 |
|
|
chunks = [english_script[i:i + max_chunk_size] for i in range(0, len(english_script), max_chunk_size)] |
|
|
|
|
|
translated_chunks = [] |
|
|
for chunk in chunks: |
|
|
translated_chunk = GoogleTranslator(source='en', target='ur').translate(chunk) |
|
|
translated_chunks.append(translated_chunk) |
|
|
|
|
|
return " ".join(translated_chunks) |
|
|
except Exception as e: |
|
|
return f"β Error in translation: {str(e)}" |
|
|
|
|
|
|
|
|
|
|
|
def save_english_file(content, topic): |
|
|
today = datetime.today().strftime('%Y_%b_%d') |
|
|
filename = f"English_Scripts/{topic}_Eng_{today}.txt" |
|
|
os.makedirs(os.path.dirname(filename), exist_ok=True) |
|
|
with open(filename, "w", encoding="utf-8") as f: |
|
|
f.write(content) |
|
|
return filename |
|
|
|
|
|
|
|
|
def save_urdu_file(content, topic): |
|
|
today = datetime.today().strftime('%Y_%b_%d') |
|
|
filename = f"Urdu_Scripts/{topic}_Urdu_{today}.txt" |
|
|
os.makedirs(os.path.dirname(filename), exist_ok=True) |
|
|
with open(filename, "w", encoding="utf-8") as f: |
|
|
f.write(content) |
|
|
return filename |
|
|
|
|
|
|
|
|
def save_final_urdu_file(topic, content): |
|
|
date_str = datetime.now().strftime("%Y_%b_%d") |
|
|
filename = f"Urdu_Final/{topic}_Urdu_Final_{date_str}.txt" |
|
|
os.makedirs(os.path.dirname(filename), exist_ok=True) |
|
|
with open(filename, "w", encoding="utf-8") as f: |
|
|
f.write(content) |
|
|
return filename |
|
|
|
|
|
|
|
|
def finalize_process(): |
|
|
return "β
Script Generation Completed Successfully!" |
|
|
|
|
|
|
|
|
def clear_old_files(): |
|
|
|
|
|
directories = ["English_Scripts", "Urdu_Scripts", "Urdu_Final", "Keywords"] |
|
|
|
|
|
for directory in directories: |
|
|
if os.path.exists(directory): |
|
|
files = glob.glob(f"{directory}/*") |
|
|
for file in files: |
|
|
try: |
|
|
os.remove(file) |
|
|
except Exception as e: |
|
|
print(f"β Error deleting {file}: {e}") |
|
|
|
|
|
return "", "", "", "", "" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
os.makedirs("generated_images", exist_ok=True) |
|
|
os.makedirs("output", exist_ok=True) |
|
|
|
|
|
|
|
|
model_id = "runwayml/stable-diffusion-v1-5" |
|
|
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32) |
|
|
pipe.safety_checker = None |
|
|
|
|
|
|
|
|
global_audio_path = None |
|
|
|
|
|
|
|
|
|
|
|
def text_to_speech(script_file): |
|
|
if script_file is None: |
|
|
return None, "β οΈ Please upload an Urdu script file!" |
|
|
|
|
|
with open(script_file.name, "r", encoding="utf-8") as f: |
|
|
urdu_text = f.read().strip() |
|
|
|
|
|
audio_path = "output/urdu_audio.mp3" |
|
|
tts = gTTS(text=urdu_text, lang="ur") |
|
|
tts.save(audio_path) |
|
|
|
|
|
global global_audio_path |
|
|
global_audio_path = audio_path |
|
|
|
|
|
return audio_path, "β
Audio generated successfully!" |
|
|
|
|
|
|
|
|
|
|
|
def generate_images(script_file, num_images): |
|
|
if script_file is None: |
|
|
return None, "β οΈ Please upload a script file!" |
|
|
|
|
|
num_images = int(num_images) |
|
|
|
|
|
with open(script_file.name, "r", encoding="utf-8") as f: |
|
|
text_lines = f.read().split("\n\n") |
|
|
|
|
|
image_paths = [] |
|
|
for i, scene in enumerate(text_lines[:num_images]): |
|
|
prompt = f"Scene {i+1}: {scene.strip()}" |
|
|
image = pipe(prompt).images[0] |
|
|
image_path = f"generated_images/image_{i+1}.png" |
|
|
image.save(image_path) |
|
|
image_paths.append(image_path) |
|
|
|
|
|
return image_paths, "β
Images generated successfully!" |
|
|
|
|
|
|
|
|
|
|
|
def images_to_video(image_paths, fps=1): |
|
|
if not image_paths: |
|
|
return None |
|
|
|
|
|
frame = cv2.imread(image_paths[0]) |
|
|
height, width, layers = frame.shape |
|
|
|
|
|
video_path = "output/generated_video.mp4" |
|
|
fourcc = cv2.VideoWriter_fourcc(*"mp4v") |
|
|
video = cv2.VideoWriter(video_path, fourcc, fps, (width, height)) |
|
|
|
|
|
for image in image_paths: |
|
|
frame = cv2.imread(image) |
|
|
video.write(frame) |
|
|
|
|
|
video.release() |
|
|
return video_path |
|
|
|
|
|
|
|
|
|
|
|
def merge_audio_video(video_path): |
|
|
if global_audio_path is None: |
|
|
return None, "β οΈ No audio found! Please generate Urdu TTS first." |
|
|
|
|
|
final_video_path = "output/final_video.mp4" |
|
|
|
|
|
video = ffmpeg.input(video_path) |
|
|
audio = ffmpeg.input(global_audio_path) |
|
|
|
|
|
ffmpeg.output(video, audio, final_video_path, vcodec="libx264", acodec="aac").run(overwrite_output=True) |
|
|
|
|
|
return final_video_path, "β
Video with Urdu voice-over generated successfully!" |
|
|
|
|
|
|
|
|
|
|
|
def generate_final_video(script_file, num_images): |
|
|
if script_file is None: |
|
|
return None, "β οΈ Please upload a script file for image generation!" |
|
|
|
|
|
image_paths, img_msg = generate_images(script_file, num_images) |
|
|
if not image_paths: |
|
|
return None, img_msg |
|
|
|
|
|
video_path = images_to_video(image_paths, fps=1) |
|
|
final_video_path, vid_msg = merge_audio_video(video_path) |
|
|
|
|
|
return final_video_path, vid_msg |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks()as app: |
|
|
|
|
|
gr.Markdown("## # π¬ AI-Powered Educational Video Generator") |
|
|
|
|
|
|
|
|
with gr.Tab("Script Generator"): |
|
|
topic_input = gr.Textbox(label="Enter Topic") |
|
|
duration_input = gr.Slider(minimum=1, maximum=30, step=1, label="Duration (minutes)") |
|
|
|
|
|
|
|
|
generate_button = gr.Button("Generate English Script") |
|
|
eng_output = gr.Textbox(label="Generated English Script", interactive=False) |
|
|
download_english_button = gr.Button("Download English Script") |
|
|
download_english_button.click(save_english_file, inputs=[eng_output, topic_input], outputs=[gr.File()]) |
|
|
|
|
|
|
|
|
|
|
|
extract_keywords_btn = gr.Button("π Extract Keywords") |
|
|
keyword_output = gr.Textbox(label="π Extracted Keywords", interactive=True) |
|
|
download_keywords_btn = gr.Button("β¬οΈ Download Keywords") |
|
|
download_keywords_btn.click(save_keywords_file, inputs=[keyword_output, topic_input], outputs=[gr.File()]) |
|
|
|
|
|
translate_button = gr.Button("Generate Urdu Script") |
|
|
urdu_output = gr.Textbox(label="Translated Urdu Script", interactive=False, rtl=True) |
|
|
download_urdu_button = gr.Button("Download Urdu Script") |
|
|
download_urdu_button.click(save_urdu_file, inputs=[urdu_output, topic_input], outputs=[gr.File()]) |
|
|
|
|
|
|
|
|
final_edited_urdu_output = gr.Textbox(label="Edited Urdu Script", interactive=True, rtl=True) |
|
|
download_final_urdu_button = gr.Button("Download Final Urdu Script") |
|
|
download_final_urdu_button.click(save_final_urdu_file, inputs=[topic_input, final_edited_urdu_output], outputs=[gr.File()]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
generate_button.click(generate_script, inputs=[topic_input, duration_input], outputs=[eng_output]) |
|
|
extract_keywords_btn.click(extract_keywords, inputs=[eng_output], outputs=[keyword_output]) |
|
|
translate_button.click(translate_to_urdu, inputs=[eng_output], outputs=[urdu_output]) |
|
|
|
|
|
status_output = gr.Textbox(label="Status", interactive=False) |
|
|
finalize_button = gr.Button("Finalize Process") |
|
|
finalize_button.click(finalize_process, outputs=[status_output]) |
|
|
|
|
|
generate_button.click( |
|
|
lambda topic, duration: (*clear_old_files(), generate_script(topic, duration)), |
|
|
inputs=[topic_input, duration_input], |
|
|
outputs=[keyword_output, urdu_output, final_edited_urdu_output, status_output] ) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Tab("π£οΈ Urdu Text-to-Speech"): |
|
|
script_file_tts = gr.File(label="π Upload Urdu Script for Audio", type="filepath") |
|
|
generate_audio_btn = gr.Button("ποΈ Generate Audio", variant="primary") |
|
|
audio_output = gr.Audio(label="π Urdu Speech Output", interactive=False) |
|
|
audio_status = gr.Textbox(label="βΉοΈ Status", interactive=False) |
|
|
|
|
|
generate_audio_btn.click(text_to_speech, inputs=[script_file_tts], outputs=[audio_output, audio_status]) |
|
|
|
|
|
|
|
|
with gr.Tab("π₯ AI Video Generator"): |
|
|
script_file_video = gr.File(label="π Upload Urdu Script for Images", type="filepath") |
|
|
num_images = gr.Number(label="πΈ Number of Scenes", value=3, minimum=1, maximum=10, step=1) |
|
|
generate_video_btn = gr.Button("π¬ Generate Video", variant="primary") |
|
|
video_output = gr.Video(label="ποΈ Generated Video") |
|
|
video_status = gr.Textbox(label="βΉοΈ Status", interactive=False) |
|
|
|
|
|
generate_video_btn.click(generate_final_video, inputs=[script_file_video, num_images], outputs=[video_output, video_status]) |
|
|
|
|
|
app.launch() |
|
|
|