Nekochu's picture
Revert MCP workarounds.. unfixable: @gradio-app fix MCP schema $ref - $defs nested incorrectly!
a6a2e67 verified
#!/usr/bin/env python3
"""SD Dataset Automaker - HF Space (CPU) - Anime character dataset generator for LoRA/fine-tuning."""
import warnings
warnings.filterwarnings('ignore', category=FutureWarning) # Suppress torch.cuda.amp spam
warnings.filterwarnings('ignore', category=UserWarning, message='.*trust_repo.*')
import os, re, shutil, zipfile, random, tempfile, argparse, sys
from urllib.parse import quote_plus
from collections import Counter
from pathlib import Path
from tqdm import tqdm
import gradio as gr
from bs4 import BeautifulSoup
import requests as req_lib
import time
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
from torchvision import models, transforms
from sklearn.metrics.pairwise import pairwise_distances
import onnxruntime as rt
import pandas as pd
import huggingface_hub
# =============================================================================
# CONFIG
# =============================================================================
EXTS = ('.jpg', '.jpeg', '.png')
MODEL_DIR = Path(__file__).parent.resolve() # Ensure absolute path
YOLO_PATH = MODEL_DIR / "yolov5s_anime.pt"
SIM_PATH = MODEL_DIR / "similarity.pt"
EXAMPLES = [str(MODEL_DIR / f"from_url_spike_spiegel{i}.jpg") for i in range(1, 4)] # absolute paths for gr.Examples
WD_REPO = "SmilingWolf/wd-swinv2-tagger-v3"
TAG_THRESH, BLACKLIST = 0.35, ["bangs", "breasts", "multicolored hair", "gradient hair", "two-tone hair", "virtual youtuber"]
FACE_CONF, FACE_IOU, MIN_FACE, CROP_PAD = 0.5, 0.5, 35, 0.2
SIM_THRESH, BATCH_SZ, FACE_SZ = 32, 16, 224
# =============================================================================
# UTILS
# =============================================================================
sanitize = lambda s: re.sub(r'[^\w.-]', '', s.replace(" ", "_"))
get_imgs = lambda d: sorted([os.path.join(r,f) for r,_,fs in os.walk(d) for f in fs if f.lower().endswith(EXTS)])
valid_img = lambda p: (lambda i: i.load() or True)(Image.open(p)) if os.path.exists(p) else False
# HTTP session - mode depends on environment
# CLI (local Windows): cloudscraper bypasses Cloudflare
# HF Spaces: plain requests (cloudscraper fingerprint gets blocked on datacenter IPs)
def init_session(use_cloudscraper=False):
global SESSION, HTTP_CLIENT
if use_cloudscraper:
try:
import cloudscraper
SESSION = cloudscraper.create_scraper()
HTTP_CLIENT = "cloudscraper"
return
except ImportError:
pass # fallback to requests
SESSION = req_lib.Session()
SESSION.headers.update({
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
'DNT': '1',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Sec-Fetch-Dest': 'document',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'none',
'Sec-Fetch-User': '?1',
'Cache-Control': 'max-age=0',
'Referer': 'https://fancaps.net/',
})
HTTP_CLIENT = "requests/browser-headers"
# Default: plain requests (for HF Spaces import)
SESSION = None
HTTP_CLIENT = None
init_session(use_cloudscraper=False)
# =============================================================================
# SCRAPING
# =============================================================================
def search_fancaps(prompt, anime=True, movies=False, tv=False, log_fn=None):
L = log_fn or print
url = f"https://fancaps.net/search.php?q={quote_plus(prompt)}&submit=Submit"
if anime: url += "&animeCB=Anime"
if movies: url += "&MoviesCB=Movies"
if tv: url += "&TVCB=TV"
L(f" URL: {url}")
try:
resp = SESSION.get(url, timeout=30)
L(f" Status: {resp.status_code}, Size: {len(resp.content)} bytes")
# Log key headers for debugging
cf_ray = resp.headers.get('cf-ray', 'none')
server = resp.headers.get('server', 'unknown')
L(f" Server: {server}, CF-Ray: {cf_ray}")
if resp.status_code != 200:
L(f" ERROR: HTTP {resp.status_code}")
# Show snippet of response to understand the block reason
content_snippet = resp.text[:500].replace('\n', ' ').strip()
L(f" Response snippet: {content_snippet[:200]}...")
return {}
soup = BeautifulSoup(resp.content, "html.parser")
# Debug: check if we got Cloudflare challenge
title = soup.title.string if soup.title else "No title"
L(f" Page title: {title[:50]}")
if "cloudflare" in title.lower() or "challenge" in title.lower():
L(" ERROR: Cloudflare challenge detected!")
return {}
except Exception as e:
L(f" ERROR: {type(e).__name__}: {e}")
return {}
results, cnt = {}, 1
divs = soup.find_all("div", class_="single_post_content")
L(f" Found {len(divs)} content divs")
for div in divs:
if not div.find('h2'): continue
for h2 in div.find_all('h2'):
title = h2.get_text(strip=True).lower()
cat = 'Movies' if 'movie' in title else 'TV' if 'tv' in title else 'Anime' if 'anime' in title else None
if not cat: continue
table = h2.find_next('table')
if not table: continue
results.setdefault(cat, [])
for h4 in table.find_all('h4'):
a = h4.find('a')
if a and a.get('href'):
results[cat].append((a.get_text(strip=True), a['href'], cnt)); cnt += 1
break
L(f" Parsed results: {sum(len(v) for v in results.values())} items in {list(results.keys())}")
return results
def get_episodes(url, log_fn=None):
L = log_fn or (lambda x: None)
links, page = [], 1
while True:
try:
resp = SESSION.get(f"{url}&page={page}", timeout=20)
L(f" get_episodes page {page}: status={resp.status_code}")
if resp.status_code != 200:
L(f" ERROR: {resp.text[:150]}...")
break
soup = BeautifulSoup(resp.content, "html.parser")
except Exception as e:
L(f" get_episodes ERROR: {type(e).__name__}: {e}")
break
btns = soup.find_all('a', class_='btn btn-block')
if not btns:
L(f" No episode buttons on page {page}")
break
links.extend([("https://fancaps.net" + b['href'] if b['href'].startswith('/') else b['href']) for b in btns if b.get('href')])
L(f" Found {len(btns)} episodes on page {page}, total: {len(links)}")
page += 1
return links or [url]
def get_frame_names(url, log_fn=None):
L = log_fn or (lambda x: None)
names, page = [], 1
while True:
try:
resp = SESSION.get(f"{url}&page={page}", timeout=20)
if resp.status_code != 200:
L(f" get_frames page {page}: status={resp.status_code}")
break
soup = BeautifulSoup(resp.content, "html.parser")
except Exception as e:
L(f" get_frames ERROR: {type(e).__name__}: {e}")
break
imgs = soup.find_all('img', class_='imageFade')
if not imgs:
if page == 1: L(f" No images found on first page!")
break
names.extend([s.split('/')[-1] for i in imgs if (s := i.get('src')) and s.split('/')[-1] not in names])
pager = soup.select_one('ul.pagination li:last-child a')
if not pager or pager.get('href') in ['#', None]: break
page += 1
L(f" Total frame names: {len(names)}")
return names
def download(url, folder, name, timeout=10, retries=3):
"""Download single image with retry - returns (success, status_code)."""
fp = os.path.join(folder, name)
if os.path.exists(fp): return True, 200
for attempt in range(retries):
try:
r = SESSION.get(url, stream=True, timeout=timeout)
if r.status_code == 200:
with open(fp, 'wb') as f:
for chunk in r.iter_content(16384):
if chunk: f.write(chunk)
return True, 200
if r.status_code == 429: # Rate limit - don't retry immediately
return False, 429
# Other errors - retry
except:
pass
if attempt < retries - 1:
time.sleep(1)
return False, None
def scrape(name, link, save_dir, max_imgs, progress=None, log_fn=None):
L = log_fn or print
url, folder = "https://fancaps.net" + link, os.path.join(save_dir, sanitize(name))
os.makedirs(folder, exist_ok=True)
section = 'movie' if '/movies/' in link else 'anime' if '/anime/' in link else 'tv'
L(f" [2/8] Scraping: {url}")
L(f" Section: {section}, max: {max_imgs}")
consecutive_429 = 0
max_429 = 3 # Abort after 3 consecutive 429s
if section == 'movie':
names = get_frame_names(url, log_fn=L)
L(f" Movie frames: {len(names)}")
sampled = random.sample(names, min(max_imgs, len(names))) if names else []
downloaded = 0
for i, n in enumerate(sampled):
if consecutive_429 >= max_429:
L(f" Aborting: {consecutive_429} consecutive 429s")
break
if i > 0: time.sleep(random.uniform(0.3, 0.8)) # Faster delay
try:
if progress and len(sampled) > 0: progress((i+1)/len(sampled), desc=f"Downloading {name[:20]}")
except: pass
success, status = download(f"https://cdni.fancaps.net/file/fancaps-{section}images/{n}", folder, n)
if success:
downloaded += 1
consecutive_429 = 0
elif status == 429:
consecutive_429 += 1
cooldown = 30 * consecutive_429
L(f" 429 rate limit ({consecutive_429}/{max_429}), cooling {cooldown}s...")
time.sleep(cooldown)
else:
consecutive_429 = 0
L(f" Downloaded: {downloaded}/{len(sampled)}")
else:
L(f" Fetching episodes...")
eps = get_episodes(url, log_fn=L)
L(f" Episodes: {len(eps)}")
total = 0
per_ep = max(1, max_imgs // len(eps)) if eps else max_imgs
for i, ep in enumerate(eps):
if total >= max_imgs or consecutive_429 >= max_429: break
names = get_frame_names(ep, log_fn=L)
if not names: continue
ep_dir = os.path.join(folder, f"Ep{i+1}")
os.makedirs(ep_dir, exist_ok=True)
sampled = random.sample(names, min(per_ep, len(names), max_imgs - total))
for j, n in enumerate(sampled):
if consecutive_429 >= max_429: break
if j > 0: time.sleep(random.uniform(0.3, 0.8)) # Faster delay
try:
if progress and max_imgs > 0: progress(total/max_imgs, desc=f"Ep{i+1}")
except: pass # Gradio progress can fail in some contexts
success, status = download(f"https://cdni.fancaps.net/file/fancaps-{section}images/{n}", ep_dir, n)
if success:
total += 1
consecutive_429 = 0
elif status == 429:
consecutive_429 += 1
cooldown = 30 * consecutive_429
L(f" 429 rate limit ({consecutive_429}/{max_429}), cooling {cooldown}s...")
time.sleep(cooldown)
else:
consecutive_429 = 0
L(f" Total downloaded: {total}")
# =============================================================================
# ML MODELS (cached)
# =============================================================================
_models = {}
def get_yolo():
if 'yolo' not in _models:
_models['yolo'] = torch.hub.load('ultralytics/yolov5', 'custom', path=str(YOLO_PATH), force_reload=False, verbose=False)
_models['yolo'].conf, _models['yolo'].iou = FACE_CONF, FACE_IOU
return _models['yolo']
def get_sim():
if 'sim' not in _models:
class SiameseNetwork(nn.Module):
def __init__(self):
super().__init__()
self.base_model = models.efficientnet_b0(weights=models.EfficientNet_B0_Weights.DEFAULT)
def forward(self, x):
return self.base_model(x) # 1000-class output (trained this way)
m = SiameseNetwork()
# Keep on CPU for consistent distance values across devices
m.load_state_dict(torch.load(str(SIM_PATH), map_location="cpu", weights_only=True))
m.eval()
_models['sim'] = m
return _models['sim']
def get_tagger():
if 'tag' not in _models:
mp, cp = huggingface_hub.hf_hub_download(WD_REPO, "model.onnx"), huggingface_hub.hf_hub_download(WD_REPO, "selected_tags.csv")
tags = [str(x).replace('_', ' ') for x in pd.read_csv(cp)['name'].tolist()]
sess = rt.InferenceSession(mp, providers=['CPUExecutionProvider'])
_models['tag'] = (sess, tags, sess.get_inputs()[0].shape[1])
return _models['tag']
# =============================================================================
# PROCESSING
# =============================================================================
def dedup(paths, thresh=0.98):
if not paths: return [], []
m = models.resnet50(weights=models.ResNet50_Weights.IMAGENET1K_V1); m.fc = nn.Identity(); m.eval()
tf = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([.485,.456,.406],[.229,.224,.225])])
emb, valid = [], []
with torch.no_grad():
for i in range(0, len(paths), 32):
batch = [(tf(Image.open(p).convert('RGB')), p) for p in paths[i:i+32] if valid_img(p)]
if batch:
x = torch.stack([b[0] for b in batch])
emb.append(m(x).numpy()); valid.extend([b[1] for b in batch])
del m
if not emb: return [], []
emb = np.vstack(emb); emb /= np.linalg.norm(emb, axis=1, keepdims=True).clip(1e-8)
sim = emb @ emb.T; np.fill_diagonal(sim, 0)
keep, drop = [], set()
for i in range(len(valid)):
if i not in drop: keep.append(valid[i]); drop.update(j for j in np.where(sim[i] > thresh)[0] if j > i)
return keep, [valid[i] for i in drop]
def detect_faces(paths, out_dir):
yolo = get_yolo(); os.makedirs(out_dir, exist_ok=True); cnt = 0
for p in paths:
try:
img = Image.open(p).convert('RGB'); w, h = img.size
for j, det in enumerate(yolo(img, size=640).xyxy[0].cpu().numpy()):
x1, y1, x2, y2, conf, _ = det
bw, bh = x2-x1, y2-y1
x1, y1, x2, y2 = max(0, x1-bw*CROP_PAD), max(0, y1-bh*CROP_PAD), min(w, x2+bw*CROP_PAD), min(h, y2+bh*CROP_PAD)
if min(x2-x1, y2-y1) >= MIN_FACE:
img.crop((int(x1), int(y1), int(x2), int(y2))).save(os.path.join(out_dir, f"{Path(p).stem}-{j+1}-{conf:.2f}.jpg"), quality=95)
cnt += 1
except: pass
return cnt
def face_emb(paths):
if not paths: return np.array([]), []
m = get_sim() # Always on CPU for consistent distances
def pad(img):
t, w, h = FACE_SZ, img.size[0], img.size[1]; r = w/h
nw, nh = (t, int(t/r)) if r > 1 else (int(t*r), t)
img = img.resize((nw, nh), Image.BICUBIC)
out = Image.new('RGB', (t, t), (0,0,0)); out.paste(img, ((t-nw)//2, (t-nh)//2)); return out
tf = transforms.Compose([lambda x: pad(x.convert('RGB') if x.mode == 'RGBA' else x), transforms.ToTensor()])
emb, valid = [], []
with torch.no_grad():
for i in range(0, len(paths), BATCH_SZ):
batch = [(tf(Image.open(p)), p) for p in paths[i:i+BATCH_SZ] if valid_img(p)]
if batch:
x = torch.stack([b[0] for b in batch]) # CPU tensor
emb.append(m(x).numpy())
valid.extend([b[1] for b in batch])
return (np.vstack(emb), valid) if emb else (np.array([]), [])
def tag(path, act_tag="", char_tag=""):
sess, tags, sz = get_tagger()
img = Image.open(path).convert('RGB'); w, h = img.size
s = min(sz/w, sz/h); nw, nh = int(w*s), int(h*s)
pad = Image.new('RGB', (sz, sz), (255,255,255)); pad.paste(img.resize((nw, nh), Image.BICUBIC), ((sz-nw)//2, (sz-nh)//2))
probs = sess.run(None, {sess.get_inputs()[0].name: np.expand_dims(np.array(pad).astype(np.float32)[:,:,::-1], 0)})[0][0]
found = [tags[i] for i, p in enumerate(probs) if p > TAG_THRESH and tags[i] not in BLACKLIST]
# Prepend activation tag and character tag if provided
prefix = []
if act_tag: prefix.append(act_tag); found = [t for t in found if t != act_tag]
if char_tag: prefix.append(char_tag.replace("_", " ")); found = [t for t in found if t != char_tag.replace("_", " ")]
return prefix + found
# =============================================================================
# PIPELINE
# =============================================================================
def parse_direct_url(url):
"""Parse direct fancaps URL, extract show name and relative link. Returns (name, link) or (None, None)."""
# Match patterns: showimages.php?ID-Name, MovieImages.php?movieid=ID&name=Name, episodeimages.php?ID-Name
patterns = [
r'fancaps\.net/anime/showimages\.php\?(\d+)-([^&/]+)', # anime show
r'fancaps\.net/tv/showimages\.php\?(\d+)-([^&/]+)', # tv show
r'fancaps\.net/movies/MovieImages\.php\?.*?movieid=(\d+)', # movie
]
for pat in patterns:
m = re.search(pat, url)
if m:
if 'anime' in url: section = 'anime'
elif 'movies' in url: section = 'movies'
else: section = 'tv'
# Extract name from URL (replace underscores with spaces)
name = m.group(2).replace('_', ' ') if len(m.groups()) > 1 else f"Show_{m.group(1)}"
# Build relative link (what scrape() expects)
if section == 'movies':
link = f"/movies/MovieImages.php?movieid={m.group(1)}"
else:
link = f"/{section}/showimages.php?{m.group(1)}-{m.group(2) if len(m.groups()) > 1 else ''}"
return name, link
return None, None
def run(query, char, examples, max_img, thresh, act_tag, anime, movies, tv, progress=None, cli_mode=False):
log = []
def L(m): log.append(m); print(m)
def prog(val, desc=""):
if progress and not cli_mode: progress(val, desc=desc)
work = tempfile.mkdtemp(prefix="ds_")
dirs = {k: os.path.join(work, f"{i}_{k}") for i, k in enumerate(['scrapped','filtered','faces','ex_faces','similar','results'], 1)}
for d in dirs.values(): os.makedirs(d, exist_ok=True)
final_zip = None # Track ZIP for cleanup
try:
L(f"HTTP client: {HTTP_CLIENT}")
t0 = time.time()
# Check if query is a direct fancaps URL (bypasses search, works on HF Spaces)
if 'fancaps.net' in query and ('showimages.php' in query or 'MovieImages.php' in query):
L(f"[1/8] Direct URL mode")
name, link = parse_direct_url(query)
if not link:
return None, "\n".join(log) + "\n\nCouldn't parse URL!"
item = (name, link, 1)
L(f" Parsed: {name}")
else:
L(f"[1/8] Search: {query}")
prog(0.05, desc="Searching...")
res = search_fancaps(query, anime, movies, tv, log_fn=L)
if not res:
return None, "\n".join(log) + "\n\nSearch blocked! Use direct fancaps URL."
item = next((items[0] for items in res.values() if items), None)
if not item: return None, "No results!"
show_name = item[0]
if not char: char = sanitize(show_name)
t1 = time.time(); L(f" Found: {show_name} ({t1-t0:.0f}s)"); prog(0.1, desc="Downloading...")
# [2/8] Scrape
scrape(item[0], item[1], dirs['scrapped'], max_img, progress if not cli_mode else None, log_fn=L)
imgs = get_imgs(dirs['scrapped'])
t2 = time.time(); L(f"[2/8] Downloaded: {len(imgs)} ({t2-t1:.0f}s)")
if not imgs: return None, "No images downloaded!"
# [3/8] Dedup
prog(0.3, desc="Dedup...")
imgs = [p for p in imgs if valid_img(p)]
kept, rm = dedup(imgs)
for p in kept: shutil.copy(p, os.path.join(dirs['filtered'], os.path.basename(p)))
t3 = time.time(); L(f"[3/8] Dedup: {len(kept)} kept, -{len(rm)} ({t3-t2:.0f}s)")
# [4/8] Detect faces
prog(0.4, desc="Faces...")
n = detect_faces(get_imgs(dirs['filtered']), dirs['faces'])
t4 = time.time(); L(f"[4/8] Faces: {n} ({t4-t3:.0f}s)")
if n == 0: return None, "No faces detected!"
# [5/8] Process examples
prog(0.5, desc="Examples...")
ex_paths = [p for p in (examples or []) if p and os.path.exists(p)]
if not ex_paths: ex_paths = [p for p in EXAMPLES if os.path.exists(p)]
if not ex_paths: return None, "No example images!"
n_ex = detect_faces(ex_paths, dirs['ex_faces'])
t5 = time.time(); L(f"[5/8] Examples: {len(ex_paths)} imgs -> {n_ex} faces ({t5-t4:.0f}s)")
if n_ex == 0: return None, "No faces in examples!"
# [6/8] Match
prog(0.6, desc="Matching...")
f_emb, f_valid = face_emb(get_imgs(dirs['faces']))
e_emb, _ = face_emb(get_imgs(dirs['ex_faces']))
dists = pairwise_distances(f_emb, e_emb, metric='euclidean').min(axis=1)
similar_idx = np.where(dists < thresh)[0]
similar = [f_valid[i] for i in similar_idx]
similar_dists = dists[similar_idx]
t6 = time.time()
L(f"[6/8] Matches: {len(similar)} (thresh={thresh}) ({t6-t5:.0f}s)")
if len(similar_dists) > 0:
L(f" Distances: min={similar_dists.min():.1f}, max={similar_dists.max():.1f}, mean={similar_dists.mean():.1f}")
if not similar: return None, f"No matches! Try threshold > {thresh}"
# [7/8] Get originals
prog(0.7, desc="Collect...")
origs = set()
orig_to_dist = {}
for i, fp in enumerate(similar):
parts = os.path.basename(fp).rsplit('-', 2)
base = parts[0] if len(parts) >= 3 else Path(fp).stem
for ext in EXTS:
op = os.path.join(dirs['filtered'], base + ext)
if os.path.exists(op):
origs.add(op)
orig_to_dist[os.path.basename(op)] = similar_dists[i]
break
res_dir = os.path.join(work, f"results_{sanitize(char)}")
os.makedirs(res_dir, exist_ok=True)
for p in origs: shutil.copy(p, os.path.join(res_dir, os.path.basename(p)))
t7 = time.time(); L(f"[7/8] Collected: {len(origs)} ({t7-t6:.0f}s)")
# [8/8] Tag
prog(0.8, desc="Tagging...")
char_tag = char if char != sanitize(show_name) else ""
for p in get_imgs(res_dir):
tags = tag(p, act_tag, char_tag)
with open(os.path.splitext(p)[0] + ".txt", 'w') as f: f.write(", ".join(tags))
t8 = time.time(); L(f"[8/8] Tagged: {len(origs)} ({t8-t7:.0f}s)")
# Log each image with distance
L(f"\nResults (distance to ref):")
for name, d in sorted(orig_to_dist.items(), key=lambda x: x[1]):
L(f" {name}: {d:.1f}")
# Zip
prog(0.95, desc="Zipping...")
zp = os.path.join(work, f"{sanitize(char)}_dataset.zip")
with zipfile.ZipFile(zp, 'w', zipfile.ZIP_DEFLATED) as z:
for p in get_imgs(res_dir) + [os.path.splitext(p)[0]+".txt" for p in get_imgs(res_dir)]:
if os.path.exists(p): z.write(p, os.path.basename(p))
# Copy ZIP to persistent temp location (Gradio needs file to exist after return)
final_zip = tempfile.NamedTemporaryFile(delete=False, suffix=".zip", prefix=f"{sanitize(char)}_").name
shutil.copy(zp, final_zip)
L(f"\nDone! {len(origs)} images, total {t8-t0:.0f}s"); prog(1.0, desc="Complete!")
return final_zip, "\n".join(log)
except Exception as e:
import traceback
return None, "\n".join(log) + f"\n\nERROR: {e}\n{traceback.format_exc()}"
finally:
# Clean up work directory (ZIP already copied out)
if os.path.exists(work):
shutil.rmtree(work, ignore_errors=True)
# =============================================================================
# UI
# =============================================================================
css = """
.gradio-container { padding-top: 10px !important; }
.compact-group { margin-bottom: 8px !important; }
"""
with gr.Blocks(title="SD Dataset Automaker: Fancaps → Face Crop (YOLO) → Similarity (Siamese) → WD Tagger → ZIP") as demo:
gr.Markdown("### SD Dataset Automaker: Fancaps → Face Crop (YOLO) → Similarity (Siamese) → WD Tagger → ZIP")
with gr.Row():
with gr.Column(scale=3):
# Compact input group
with gr.Group():
with gr.Row():
query = gr.Textbox(
label="Anime / Movie / Fancaps URL",
placeholder="'Cowboy Bebop' or paste URL",
scale=3
)
char = gr.Textbox(label="Character (optional, appends to tags)", placeholder="spike_spiegel", scale=2)
with gr.Row():
ref_imgs = gr.Gallery(
label="Reference Face Image(s)",
columns=4,
height=100,
interactive=True,
object_fit="scale-down",
scale=3,
)
run_btn = gr.Button("Generate Dataset", variant="primary", size="lg", scale=1)
# Hidden File input for MCP compatibility (Gallery $ref schema bug persists in Gradio 6.0.1)
ref_files = gr.File(
label="Reference Images (MCP)",
file_count="multiple",
file_types=["image"],
visible=False,
)
# gr.Examples + gr.Gallery works in Gradio 5.46.0+ (PR #11787)
gr.Examples(
examples=[
["https://fancaps.net/anime/showimages.php?3092-Cowboy_Bebop", "spike_spiegel", EXAMPLES],
],
inputs=[query, char, ref_imgs],
label="Example (click to load)",
)
# Advanced settings in accordion
with gr.Accordion("Advanced Settings", open=False):
with gr.Row():
max_img = gr.Slider(50, 500, 200, step=50, label="Max Downloads (frames to scrape)")
thresh = gr.Slider(20, 60, 32, step=1, label="Face Similarity (lower=stricter)")
with gr.Row():
act_tag = gr.Textbox(label="Trigger Word (prepends to captions)", placeholder="e.g. sks_style", scale=2)
anime_cb = gr.Checkbox(label="Anime", value=True, scale=1)
movies_cb = gr.Checkbox(label="Movies", scale=1)
tv_cb = gr.Checkbox(label="TV", scale=1)
with gr.Column(scale=1):
out_file = gr.File(label="Download ZIP", interactive=False)
with gr.Accordion("Log", open=True):
out_log = gr.Textbox(label="", lines=12, max_lines=50, show_label=False, autoscroll=False)
gr.Markdown("*CPU: ~5-10 min/run*")
def process(q, c, imgs, files, mi, th, at, an, mo, tv, prog=gr.Progress()):
if not q:
gr.Warning("Enter anime name or URL")
return None, ""
# Collect paths from Gallery (imgs) or File input (files)
paths = []
for item in (imgs or []):
p = item[0] if isinstance(item, (list, tuple)) else item
if p and os.path.exists(p): paths.append(p)
if not paths and files:
for f in (files if isinstance(files, list) else [files]):
fp = f.name if hasattr(f, 'name') else str(f)
if fp and os.path.exists(fp): paths.append(fp)
if not paths:
gr.Warning("Upload reference images or click 'Load Example'")
return None, ""
if 'fancaps.net' in q:
gr.Info("Direct URL detected")
else:
gr.Info(f"Searching: {q}")
zp, log = run(q, c, paths, mi, th, at, an, mo, tv, prog)
if zp:
gr.Info("Done!")
return zp, log
run_btn.click(process, [query, char, ref_imgs, ref_files, max_img, thresh, act_tag, anime_cb, movies_cb, tv_cb], [out_file, out_log])
def run_cli():
"""CLI mode with cloudscraper for Cloudflare bypass"""
# Use cloudscraper for CLI (bypasses Cloudflare on local/residential IPs)
init_session(use_cloudscraper=True)
parser = argparse.ArgumentParser(description="SD Dataset Automaker - Anime character dataset generator")
parser.add_argument("--title", "-t", required=True, help="Anime name or fancaps.net URL")
parser.add_argument("--image", "-i", nargs="+", required=True, help="Reference face images (1-5)")
parser.add_argument("--char", "-c", default="", help="Character name (optional, appends to tags)")
parser.add_argument("--max", "-m", type=int, default=200, help="Max frames to scrape (default: 200)")
parser.add_argument("--thresh", type=float, default=32.0, help="Face similarity threshold, lower=stricter (default: 32)")
parser.add_argument("--tag", default="", help="Trigger word to prepend to captions")
parser.add_argument("--anime", action="store_true", default=True, help="Search anime (default)")
parser.add_argument("--movies", action="store_true", help="Search movies")
parser.add_argument("--tv", action="store_true", help="Search TV")
parser.add_argument("--output", "-o", default=".", help="Output directory (default: current)")
args = parser.parse_args()
# Validate images
ref_imgs = [p for p in args.image if os.path.exists(p)]
if not ref_imgs:
print(f"ERROR: No valid reference images found: {args.image}")
sys.exit(1)
print(f"SD Dataset Automaker - CLI Mode")
print(f" Title: {args.title}")
print(f" Refs: {len(ref_imgs)} images")
print(f" Char: {args.char or '(auto from title)'}")
print()
zp, log = run(
query=args.title,
char=args.char,
examples=ref_imgs,
max_img=args.max,
thresh=args.thresh,
act_tag=args.tag,
anime=args.anime,
movies=args.movies,
tv=args.tv,
cli_mode=True
)
if zp:
# Copy to output dir
out_path = os.path.join(args.output, os.path.basename(zp))
shutil.copy(zp, out_path)
print(f"\nSaved: {out_path}")
else:
print(f"\nFailed!")
sys.exit(1)
if __name__ == "__main__":
# CLI mode if args provided, else Gradio UI
if len(sys.argv) > 1:
run_cli()
else:
# Gradio UI mode
allowed_dir = os.path.dirname(os.path.abspath(__file__))
demo.launch(
server_name="0.0.0.0",
server_port=7860,
mcp_server=True,
show_error=True,
allowed_paths=[allowed_dir],
css=css,
)