Spaces:
Sleeping
Sleeping
| """ | |
| Utility functions for video processing and file handling | |
| """ | |
| import os | |
| import tempfile | |
| import uuid | |
| from typing import Optional, Tuple | |
| import torch | |
| import numpy as np | |
| from PIL import Image | |
| def create_temp_video_path(extension: str = "mp4") -> str: | |
| """Create a temporary file path for video output""" | |
| temp_dir = tempfile.gettempdir() | |
| unique_id = str(uuid.uuid4()) | |
| return os.path.join(temp_dir, f"video_{unique_id}.{extension}") | |
| def validate_generation_params( | |
| width: int, | |
| height: int, | |
| num_frames: int, | |
| num_inference_steps: int, | |
| guidance_scale: float | |
| ) -> Tuple[bool, Optional[str]]: | |
| """Validate video generation parameters""" | |
| # Check width and height | |
| if width < 64 or width > 1920: | |
| return False, "Width must be between 64 and 1920 pixels" | |
| if height < 64 or height > 1080: | |
| return False, "Height must be between 64 and 1080 pixels" | |
| # Check if dimensions are divisible by 8 (common requirement for video models) | |
| if width % 8 != 0: | |
| return False, "Width must be divisible by 8" | |
| if height % 8 != 0: | |
| return False, "Height must be divisible by 8" | |
| # Check frames | |
| if num_frames < 1 or num_frames > 200: | |
| return False, "Number of frames must be between 1 and 200" | |
| # Check inference steps | |
| if num_inference_steps < 1 or num_inference_steps > 100: | |
| return False, "Number of inference steps must be between 1 and 100" | |
| # Check guidance scale | |
| if guidance_scale < 0 or guidance_scale > 20: | |
| return False, "Guidance scale must be between 0 and 20" | |
| return True, None | |
| def validate_prompt(prompt: str) -> Tuple[bool, Optional[str]]: | |
| """Validate the input prompt""" | |
| if not prompt or len(prompt.strip()) == 0: | |
| return False, "Prompt cannot be empty" | |
| if len(prompt) > 1000: | |
| return False, "Prompt must be less than 1000 characters" | |
| return True, None | |
| def get_memory_usage() -> str: | |
| """Get current GPU memory usage if available""" | |
| if torch.cuda.is_available(): | |
| allocated = torch.cuda.memory_allocated() / 1024**3 # Convert to GB | |
| cached = torch.cuda.memory_reserved() / 1024**3 | |
| return f"GPU Memory - Allocated: {allocated:.2f}GB, Cached: {cached:.2f}GB" | |
| else: | |
| return "GPU not available" | |
| def cleanup_temp_files(file_path: str) -> None: | |
| """Clean up temporary files""" | |
| try: | |
| if os.path.exists(file_path): | |
| os.remove(file_path) | |
| except Exception as e: | |
| print(f"Warning: Could not remove temporary file {file_path}: {e}") | |
| def format_generation_info( | |
| prompt: str, | |
| negative_prompt: str, | |
| width: int, | |
| height: int, | |
| num_frames: int, | |
| num_inference_steps: int, | |
| guidance_scale: float, | |
| generation_time: float | |
| ) -> str: | |
| """Format generation information for display""" | |
| info = f""" | |
| **Generation Details:** | |
| - **Prompt:** {prompt} | |
| - **Negative Prompt:** {negative_prompt if negative_prompt else "None"} | |
| - **Dimensions:** {width}x{height} | |
| - **Frames:** {num_frames} | |
| - **Inference Steps:** {num_inference_steps} | |
| - **Guidance Scale:** {guidance_scale} | |
| - **Generation Time:** {generation_time:.2f} seconds | |
| - **Memory Usage:** {get_memory_usage()} | |
| """ | |
| return info | |