Spaces:
Runtime error
Runtime error
Upload 21 files
Browse files- configs/image.yaml +69 -0
- configs/text.yaml +68 -0
- data/anya_rgba.png +3 -0
- data/catstatue_rgba.png +3 -0
- data/csm_luigi_rgba.png +3 -0
- data/test.png +3 -0
- data/zelda_rgba.png +3 -0
- guidance/sd_utils.py +334 -0
- guidance/zero123_utils.py +226 -0
- scripts/convert_obj_to_video.py +20 -0
- scripts/run.sh +5 -0
- scripts/run_sd.sh +31 -0
- scripts/runall.py +48 -0
- scripts/runall_sd.py +45 -0
- simple-knn/ext.cpp +17 -0
- simple-knn/setup.py +35 -0
- simple-knn/simple_knn.cu +221 -0
- simple-knn/simple_knn.h +21 -0
- simple-knn/simple_knn/.gitkeep +0 -0
- simple-knn/spatial.cu +26 -0
- simple-knn/spatial.h +14 -0
configs/image.yaml
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
### Input
|
| 2 |
+
# input rgba image path (default to None, can be load in GUI too)
|
| 3 |
+
input:
|
| 4 |
+
# input text prompt (default to None, can be input in GUI too)
|
| 5 |
+
prompt:
|
| 6 |
+
# input mesh for stage 2 (auto-search from stage 1 output path if None)
|
| 7 |
+
mesh:
|
| 8 |
+
# estimated elevation angle for input image
|
| 9 |
+
elevation: 0
|
| 10 |
+
# reference image resolution
|
| 11 |
+
ref_size: 256
|
| 12 |
+
# density thresh for mesh extraction
|
| 13 |
+
density_thresh: 1
|
| 14 |
+
|
| 15 |
+
### Output
|
| 16 |
+
outdir: logs
|
| 17 |
+
mesh_format: obj
|
| 18 |
+
save_path: ???
|
| 19 |
+
|
| 20 |
+
### Training
|
| 21 |
+
# guidance loss weights (0 to disable)
|
| 22 |
+
lambda_sd: 0
|
| 23 |
+
lambda_zero123: 1
|
| 24 |
+
# training batch size per iter
|
| 25 |
+
batch_size: 1
|
| 26 |
+
# training iterations for stage 1
|
| 27 |
+
iters: 500
|
| 28 |
+
# training iterations for stage 2
|
| 29 |
+
iters_refine: 50
|
| 30 |
+
# training camera radius
|
| 31 |
+
radius: 2
|
| 32 |
+
# training camera fovy
|
| 33 |
+
fovy: 49.1 # align with zero123 rendering setting (ref: https://github.com/cvlab-columbia/zero123/blob/main/objaverse-rendering/scripts/blender_script.py#L61
|
| 34 |
+
# checkpoint to load for stage 1 (should be a ply file)
|
| 35 |
+
load:
|
| 36 |
+
# whether allow geom training in stage 2
|
| 37 |
+
train_geo: False
|
| 38 |
+
# prob to invert background color during training (0 = always black, 1 = always white)
|
| 39 |
+
invert_bg_prob: 0.5
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
### GUI
|
| 43 |
+
gui: False
|
| 44 |
+
force_cuda_rast: False
|
| 45 |
+
# GUI resolution
|
| 46 |
+
H: 800
|
| 47 |
+
W: 800
|
| 48 |
+
|
| 49 |
+
### Gaussian splatting
|
| 50 |
+
num_pts: 5000
|
| 51 |
+
sh_degree: 0
|
| 52 |
+
position_lr_init: 0.001
|
| 53 |
+
position_lr_final: 0.00002
|
| 54 |
+
position_lr_delay_mult: 0.02
|
| 55 |
+
position_lr_max_steps: 500
|
| 56 |
+
feature_lr: 0.01
|
| 57 |
+
opacity_lr: 0.05
|
| 58 |
+
scaling_lr: 0.005
|
| 59 |
+
rotation_lr: 0.005
|
| 60 |
+
percent_dense: 0.1
|
| 61 |
+
density_start_iter: 100
|
| 62 |
+
density_end_iter: 3000
|
| 63 |
+
densification_interval: 100
|
| 64 |
+
opacity_reset_interval: 700
|
| 65 |
+
densify_grad_threshold: 0.5
|
| 66 |
+
|
| 67 |
+
### Textured Mesh
|
| 68 |
+
geom_lr: 0.0001
|
| 69 |
+
texture_lr: 0.2
|
configs/text.yaml
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
### Input
|
| 2 |
+
# input rgba image path (default to None, can be load in GUI too)
|
| 3 |
+
input:
|
| 4 |
+
# input text prompt (default to None, can be input in GUI too)
|
| 5 |
+
prompt:
|
| 6 |
+
# input mesh for stage 2 (auto-search from stage 1 output path if None)
|
| 7 |
+
mesh:
|
| 8 |
+
# estimated elevation angle for input image
|
| 9 |
+
elevation: 0
|
| 10 |
+
# reference image resolution
|
| 11 |
+
ref_size: 256
|
| 12 |
+
# density thresh for mesh extraction
|
| 13 |
+
density_thresh: 1
|
| 14 |
+
|
| 15 |
+
### Output
|
| 16 |
+
outdir: logs
|
| 17 |
+
mesh_format: obj
|
| 18 |
+
save_path: ???
|
| 19 |
+
|
| 20 |
+
### Training
|
| 21 |
+
# guidance loss weights (0 to disable)
|
| 22 |
+
lambda_sd: 1
|
| 23 |
+
lambda_zero123: 0
|
| 24 |
+
# training batch size per iter
|
| 25 |
+
batch_size: 1
|
| 26 |
+
# training iterations for stage 1
|
| 27 |
+
iters: 500
|
| 28 |
+
# training iterations for stage 2
|
| 29 |
+
iters_refine: 50
|
| 30 |
+
# training camera radius
|
| 31 |
+
radius: 2.5
|
| 32 |
+
# training camera fovy
|
| 33 |
+
fovy: 49.1
|
| 34 |
+
# checkpoint to load for stage 1 (should be a ply file)
|
| 35 |
+
load:
|
| 36 |
+
# whether allow geom training in stage 2
|
| 37 |
+
train_geo: False
|
| 38 |
+
# prob to invert background color during training (0 = always black, 1 = always white)
|
| 39 |
+
invert_bg_prob: 0.5
|
| 40 |
+
|
| 41 |
+
### GUI
|
| 42 |
+
gui: False
|
| 43 |
+
force_cuda_rast: False
|
| 44 |
+
# GUI resolution
|
| 45 |
+
H: 800
|
| 46 |
+
W: 800
|
| 47 |
+
|
| 48 |
+
### Gaussian splatting
|
| 49 |
+
num_pts: 1000
|
| 50 |
+
sh_degree: 0
|
| 51 |
+
position_lr_init: 0.001
|
| 52 |
+
position_lr_final: 0.00002
|
| 53 |
+
position_lr_delay_mult: 0.02
|
| 54 |
+
position_lr_max_steps: 500
|
| 55 |
+
feature_lr: 0.01
|
| 56 |
+
opacity_lr: 0.05
|
| 57 |
+
scaling_lr: 0.005
|
| 58 |
+
rotation_lr: 0.005
|
| 59 |
+
percent_dense: 0.1
|
| 60 |
+
density_start_iter: 100
|
| 61 |
+
density_end_iter: 3000
|
| 62 |
+
densification_interval: 50
|
| 63 |
+
opacity_reset_interval: 700
|
| 64 |
+
densify_grad_threshold: 0.01
|
| 65 |
+
|
| 66 |
+
### Textured Mesh
|
| 67 |
+
geom_lr: 0.0001
|
| 68 |
+
texture_lr: 0.2
|
data/anya_rgba.png
ADDED
|
Git LFS Details
|
data/catstatue_rgba.png
ADDED
|
Git LFS Details
|
data/csm_luigi_rgba.png
ADDED
|
Git LFS Details
|
data/test.png
ADDED
|
Git LFS Details
|
data/zelda_rgba.png
ADDED
|
Git LFS Details
|
guidance/sd_utils.py
ADDED
|
@@ -0,0 +1,334 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import CLIPTextModel, CLIPTokenizer, logging
|
| 2 |
+
from diffusers import (
|
| 3 |
+
AutoencoderKL,
|
| 4 |
+
UNet2DConditionModel,
|
| 5 |
+
PNDMScheduler,
|
| 6 |
+
DDIMScheduler,
|
| 7 |
+
StableDiffusionPipeline,
|
| 8 |
+
)
|
| 9 |
+
from diffusers.utils.import_utils import is_xformers_available
|
| 10 |
+
|
| 11 |
+
# suppress partial model loading warning
|
| 12 |
+
logging.set_verbosity_error()
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
import torch
|
| 16 |
+
import torch.nn as nn
|
| 17 |
+
import torch.nn.functional as F
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def seed_everything(seed):
|
| 21 |
+
torch.manual_seed(seed)
|
| 22 |
+
torch.cuda.manual_seed(seed)
|
| 23 |
+
# torch.backends.cudnn.deterministic = True
|
| 24 |
+
# torch.backends.cudnn.benchmark = True
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class StableDiffusion(nn.Module):
|
| 28 |
+
def __init__(
|
| 29 |
+
self,
|
| 30 |
+
device,
|
| 31 |
+
fp16=True,
|
| 32 |
+
vram_O=False,
|
| 33 |
+
sd_version="2.1",
|
| 34 |
+
hf_key=None,
|
| 35 |
+
t_range=[0.02, 0.98],
|
| 36 |
+
):
|
| 37 |
+
super().__init__()
|
| 38 |
+
|
| 39 |
+
self.device = device
|
| 40 |
+
self.sd_version = sd_version
|
| 41 |
+
|
| 42 |
+
if hf_key is not None:
|
| 43 |
+
print(f"[INFO] using hugging face custom model key: {hf_key}")
|
| 44 |
+
model_key = hf_key
|
| 45 |
+
elif self.sd_version == "2.1":
|
| 46 |
+
model_key = "stabilityai/stable-diffusion-2-1-base"
|
| 47 |
+
elif self.sd_version == "2.0":
|
| 48 |
+
model_key = "stabilityai/stable-diffusion-2-base"
|
| 49 |
+
elif self.sd_version == "1.5":
|
| 50 |
+
model_key = "runwayml/stable-diffusion-v1-5"
|
| 51 |
+
else:
|
| 52 |
+
raise ValueError(
|
| 53 |
+
f"Stable-diffusion version {self.sd_version} not supported."
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
self.dtype = torch.float16 if fp16 else torch.float32
|
| 57 |
+
|
| 58 |
+
# Create model
|
| 59 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
| 60 |
+
model_key, torch_dtype=self.dtype
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
if vram_O:
|
| 64 |
+
pipe.enable_sequential_cpu_offload()
|
| 65 |
+
pipe.enable_vae_slicing()
|
| 66 |
+
pipe.unet.to(memory_format=torch.channels_last)
|
| 67 |
+
pipe.enable_attention_slicing(1)
|
| 68 |
+
# pipe.enable_model_cpu_offload()
|
| 69 |
+
else:
|
| 70 |
+
pipe.to(device)
|
| 71 |
+
|
| 72 |
+
self.vae = pipe.vae
|
| 73 |
+
self.tokenizer = pipe.tokenizer
|
| 74 |
+
self.text_encoder = pipe.text_encoder
|
| 75 |
+
self.unet = pipe.unet
|
| 76 |
+
|
| 77 |
+
self.scheduler = DDIMScheduler.from_pretrained(
|
| 78 |
+
model_key, subfolder="scheduler", torch_dtype=self.dtype
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
del pipe
|
| 82 |
+
|
| 83 |
+
self.num_train_timesteps = self.scheduler.config.num_train_timesteps
|
| 84 |
+
self.min_step = int(self.num_train_timesteps * t_range[0])
|
| 85 |
+
self.max_step = int(self.num_train_timesteps * t_range[1])
|
| 86 |
+
self.alphas = self.scheduler.alphas_cumprod.to(self.device) # for convenience
|
| 87 |
+
|
| 88 |
+
self.embeddings = None
|
| 89 |
+
|
| 90 |
+
@torch.no_grad()
|
| 91 |
+
def get_text_embeds(self, prompts, negative_prompts):
|
| 92 |
+
pos_embeds = self.encode_text(prompts) # [1, 77, 768]
|
| 93 |
+
neg_embeds = self.encode_text(negative_prompts)
|
| 94 |
+
self.embeddings = torch.cat([neg_embeds, pos_embeds], dim=0) # [2, 77, 768]
|
| 95 |
+
|
| 96 |
+
def encode_text(self, prompt):
|
| 97 |
+
# prompt: [str]
|
| 98 |
+
inputs = self.tokenizer(
|
| 99 |
+
prompt,
|
| 100 |
+
padding="max_length",
|
| 101 |
+
max_length=self.tokenizer.model_max_length,
|
| 102 |
+
return_tensors="pt",
|
| 103 |
+
)
|
| 104 |
+
embeddings = self.text_encoder(inputs.input_ids.to(self.device))[0]
|
| 105 |
+
return embeddings
|
| 106 |
+
|
| 107 |
+
@torch.no_grad()
|
| 108 |
+
def refine(self, pred_rgb,
|
| 109 |
+
guidance_scale=100, steps=50, strength=0.8,
|
| 110 |
+
):
|
| 111 |
+
|
| 112 |
+
batch_size = pred_rgb.shape[0]
|
| 113 |
+
pred_rgb_512 = F.interpolate(pred_rgb, (512, 512), mode='bilinear', align_corners=False)
|
| 114 |
+
latents = self.encode_imgs(pred_rgb_512.to(self.dtype))
|
| 115 |
+
# latents = torch.randn((1, 4, 64, 64), device=self.device, dtype=self.dtype)
|
| 116 |
+
|
| 117 |
+
self.scheduler.set_timesteps(steps)
|
| 118 |
+
init_step = int(steps * strength)
|
| 119 |
+
latents = self.scheduler.add_noise(latents, torch.randn_like(latents), self.scheduler.timesteps[init_step])
|
| 120 |
+
|
| 121 |
+
for i, t in enumerate(self.scheduler.timesteps[init_step:]):
|
| 122 |
+
|
| 123 |
+
latent_model_input = torch.cat([latents] * 2)
|
| 124 |
+
|
| 125 |
+
noise_pred = self.unet(
|
| 126 |
+
latent_model_input, t, encoder_hidden_states=self.embeddings,
|
| 127 |
+
).sample
|
| 128 |
+
|
| 129 |
+
noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2)
|
| 130 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_cond - noise_pred_uncond)
|
| 131 |
+
|
| 132 |
+
latents = self.scheduler.step(noise_pred, t, latents).prev_sample
|
| 133 |
+
|
| 134 |
+
imgs = self.decode_latents(latents) # [1, 3, 512, 512]
|
| 135 |
+
return imgs
|
| 136 |
+
|
| 137 |
+
def train_step(
|
| 138 |
+
self,
|
| 139 |
+
pred_rgb,
|
| 140 |
+
step_ratio=None,
|
| 141 |
+
guidance_scale=100,
|
| 142 |
+
as_latent=False,
|
| 143 |
+
):
|
| 144 |
+
|
| 145 |
+
batch_size = pred_rgb.shape[0]
|
| 146 |
+
pred_rgb = pred_rgb.to(self.dtype)
|
| 147 |
+
|
| 148 |
+
if as_latent:
|
| 149 |
+
latents = F.interpolate(pred_rgb, (64, 64), mode="bilinear", align_corners=False) * 2 - 1
|
| 150 |
+
else:
|
| 151 |
+
# interp to 512x512 to be fed into vae.
|
| 152 |
+
pred_rgb_512 = F.interpolate(pred_rgb, (512, 512), mode="bilinear", align_corners=False)
|
| 153 |
+
# encode image into latents with vae, requires grad!
|
| 154 |
+
latents = self.encode_imgs(pred_rgb_512)
|
| 155 |
+
|
| 156 |
+
if step_ratio is not None:
|
| 157 |
+
# dreamtime-like
|
| 158 |
+
# t = self.max_step - (self.max_step - self.min_step) * np.sqrt(step_ratio)
|
| 159 |
+
t = np.round((1 - step_ratio) * self.num_train_timesteps).clip(self.min_step, self.max_step)
|
| 160 |
+
t = torch.full((batch_size,), t, dtype=torch.long, device=self.device)
|
| 161 |
+
else:
|
| 162 |
+
t = torch.randint(self.min_step, self.max_step + 1, (batch_size,), dtype=torch.long, device=self.device)
|
| 163 |
+
|
| 164 |
+
# w(t), sigma_t^2
|
| 165 |
+
w = (1 - self.alphas[t]).view(batch_size, 1, 1, 1)
|
| 166 |
+
|
| 167 |
+
# predict the noise residual with unet, NO grad!
|
| 168 |
+
with torch.no_grad():
|
| 169 |
+
# add noise
|
| 170 |
+
noise = torch.randn_like(latents)
|
| 171 |
+
latents_noisy = self.scheduler.add_noise(latents, noise, t)
|
| 172 |
+
# pred noise
|
| 173 |
+
latent_model_input = torch.cat([latents_noisy] * 2)
|
| 174 |
+
tt = torch.cat([t] * 2)
|
| 175 |
+
|
| 176 |
+
noise_pred = self.unet(
|
| 177 |
+
latent_model_input, tt, encoder_hidden_states=self.embeddings.repeat(batch_size, 1, 1)
|
| 178 |
+
).sample
|
| 179 |
+
|
| 180 |
+
# perform guidance (high scale from paper!)
|
| 181 |
+
noise_pred_uncond, noise_pred_pos = noise_pred.chunk(2)
|
| 182 |
+
noise_pred = noise_pred_uncond + guidance_scale * (
|
| 183 |
+
noise_pred_pos - noise_pred_uncond
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
grad = w * (noise_pred - noise)
|
| 187 |
+
grad = torch.nan_to_num(grad)
|
| 188 |
+
|
| 189 |
+
# seems important to avoid NaN...
|
| 190 |
+
# grad = grad.clamp(-1, 1)
|
| 191 |
+
|
| 192 |
+
target = (latents - grad).detach()
|
| 193 |
+
loss = 0.5 * F.mse_loss(latents.float(), target, reduction='sum') / latents.shape[0]
|
| 194 |
+
|
| 195 |
+
return loss
|
| 196 |
+
|
| 197 |
+
@torch.no_grad()
|
| 198 |
+
def produce_latents(
|
| 199 |
+
self,
|
| 200 |
+
height=512,
|
| 201 |
+
width=512,
|
| 202 |
+
num_inference_steps=50,
|
| 203 |
+
guidance_scale=7.5,
|
| 204 |
+
latents=None,
|
| 205 |
+
):
|
| 206 |
+
if latents is None:
|
| 207 |
+
latents = torch.randn(
|
| 208 |
+
(
|
| 209 |
+
self.embeddings.shape[0] // 2,
|
| 210 |
+
self.unet.in_channels,
|
| 211 |
+
height // 8,
|
| 212 |
+
width // 8,
|
| 213 |
+
),
|
| 214 |
+
device=self.device,
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
self.scheduler.set_timesteps(num_inference_steps)
|
| 218 |
+
|
| 219 |
+
for i, t in enumerate(self.scheduler.timesteps):
|
| 220 |
+
# expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
|
| 221 |
+
latent_model_input = torch.cat([latents] * 2)
|
| 222 |
+
# predict the noise residual
|
| 223 |
+
noise_pred = self.unet(
|
| 224 |
+
latent_model_input, t, encoder_hidden_states=self.embeddings
|
| 225 |
+
).sample
|
| 226 |
+
|
| 227 |
+
# perform guidance
|
| 228 |
+
noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2)
|
| 229 |
+
noise_pred = noise_pred_uncond + guidance_scale * (
|
| 230 |
+
noise_pred_cond - noise_pred_uncond
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 234 |
+
latents = self.scheduler.step(noise_pred, t, latents).prev_sample
|
| 235 |
+
|
| 236 |
+
return latents
|
| 237 |
+
|
| 238 |
+
def decode_latents(self, latents):
|
| 239 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 240 |
+
|
| 241 |
+
imgs = self.vae.decode(latents).sample
|
| 242 |
+
imgs = (imgs / 2 + 0.5).clamp(0, 1)
|
| 243 |
+
|
| 244 |
+
return imgs
|
| 245 |
+
|
| 246 |
+
def encode_imgs(self, imgs):
|
| 247 |
+
# imgs: [B, 3, H, W]
|
| 248 |
+
|
| 249 |
+
imgs = 2 * imgs - 1
|
| 250 |
+
|
| 251 |
+
posterior = self.vae.encode(imgs).latent_dist
|
| 252 |
+
latents = posterior.sample() * self.vae.config.scaling_factor
|
| 253 |
+
|
| 254 |
+
return latents
|
| 255 |
+
|
| 256 |
+
def prompt_to_img(
|
| 257 |
+
self,
|
| 258 |
+
prompts,
|
| 259 |
+
negative_prompts="",
|
| 260 |
+
height=512,
|
| 261 |
+
width=512,
|
| 262 |
+
num_inference_steps=50,
|
| 263 |
+
guidance_scale=7.5,
|
| 264 |
+
latents=None,
|
| 265 |
+
):
|
| 266 |
+
if isinstance(prompts, str):
|
| 267 |
+
prompts = [prompts]
|
| 268 |
+
|
| 269 |
+
if isinstance(negative_prompts, str):
|
| 270 |
+
negative_prompts = [negative_prompts]
|
| 271 |
+
|
| 272 |
+
# Prompts -> text embeds
|
| 273 |
+
self.get_text_embeds(prompts, negative_prompts)
|
| 274 |
+
|
| 275 |
+
# Text embeds -> img latents
|
| 276 |
+
latents = self.produce_latents(
|
| 277 |
+
height=height,
|
| 278 |
+
width=width,
|
| 279 |
+
latents=latents,
|
| 280 |
+
num_inference_steps=num_inference_steps,
|
| 281 |
+
guidance_scale=guidance_scale,
|
| 282 |
+
) # [1, 4, 64, 64]
|
| 283 |
+
|
| 284 |
+
# Img latents -> imgs
|
| 285 |
+
imgs = self.decode_latents(latents) # [1, 3, 512, 512]
|
| 286 |
+
|
| 287 |
+
# Img to Numpy
|
| 288 |
+
imgs = imgs.detach().cpu().permute(0, 2, 3, 1).numpy()
|
| 289 |
+
imgs = (imgs * 255).round().astype("uint8")
|
| 290 |
+
|
| 291 |
+
return imgs
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
if __name__ == "__main__":
|
| 295 |
+
import argparse
|
| 296 |
+
import matplotlib.pyplot as plt
|
| 297 |
+
|
| 298 |
+
parser = argparse.ArgumentParser()
|
| 299 |
+
parser.add_argument("prompt", type=str)
|
| 300 |
+
parser.add_argument("--negative", default="", type=str)
|
| 301 |
+
parser.add_argument(
|
| 302 |
+
"--sd_version",
|
| 303 |
+
type=str,
|
| 304 |
+
default="2.1",
|
| 305 |
+
choices=["1.5", "2.0", "2.1"],
|
| 306 |
+
help="stable diffusion version",
|
| 307 |
+
)
|
| 308 |
+
parser.add_argument(
|
| 309 |
+
"--hf_key",
|
| 310 |
+
type=str,
|
| 311 |
+
default=None,
|
| 312 |
+
help="hugging face Stable diffusion model key",
|
| 313 |
+
)
|
| 314 |
+
parser.add_argument("--fp16", action="store_true", help="use float16 for training")
|
| 315 |
+
parser.add_argument(
|
| 316 |
+
"--vram_O", action="store_true", help="optimization for low VRAM usage"
|
| 317 |
+
)
|
| 318 |
+
parser.add_argument("-H", type=int, default=512)
|
| 319 |
+
parser.add_argument("-W", type=int, default=512)
|
| 320 |
+
parser.add_argument("--seed", type=int, default=0)
|
| 321 |
+
parser.add_argument("--steps", type=int, default=50)
|
| 322 |
+
opt = parser.parse_args()
|
| 323 |
+
|
| 324 |
+
seed_everything(opt.seed)
|
| 325 |
+
|
| 326 |
+
device = torch.device("cuda")
|
| 327 |
+
|
| 328 |
+
sd = StableDiffusion(device, opt.fp16, opt.vram_O, opt.sd_version, opt.hf_key)
|
| 329 |
+
|
| 330 |
+
imgs = sd.prompt_to_img(opt.prompt, opt.negative, opt.H, opt.W, opt.steps)
|
| 331 |
+
|
| 332 |
+
# visualize image
|
| 333 |
+
plt.imshow(imgs[0])
|
| 334 |
+
plt.show()
|
guidance/zero123_utils.py
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import CLIPTextModel, CLIPTokenizer, logging
|
| 2 |
+
from diffusers import (
|
| 3 |
+
AutoencoderKL,
|
| 4 |
+
UNet2DConditionModel,
|
| 5 |
+
DDIMScheduler,
|
| 6 |
+
StableDiffusionPipeline,
|
| 7 |
+
)
|
| 8 |
+
import torchvision.transforms.functional as TF
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
import torch
|
| 12 |
+
import torch.nn as nn
|
| 13 |
+
import torch.nn.functional as F
|
| 14 |
+
|
| 15 |
+
import sys
|
| 16 |
+
sys.path.append('./')
|
| 17 |
+
|
| 18 |
+
from zero123 import Zero123Pipeline
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class Zero123(nn.Module):
|
| 22 |
+
def __init__(self, device, fp16=True, t_range=[0.02, 0.98]):
|
| 23 |
+
super().__init__()
|
| 24 |
+
|
| 25 |
+
self.device = device
|
| 26 |
+
self.fp16 = fp16
|
| 27 |
+
self.dtype = torch.float16 if fp16 else torch.float32
|
| 28 |
+
|
| 29 |
+
self.pipe = Zero123Pipeline.from_pretrained(
|
| 30 |
+
# "bennyguo/zero123-diffusers",
|
| 31 |
+
"bennyguo/zero123-xl-diffusers",
|
| 32 |
+
# './model_cache/zero123_xl',
|
| 33 |
+
variant="fp16_ema" if self.fp16 else None,
|
| 34 |
+
torch_dtype=self.dtype,
|
| 35 |
+
).to(self.device)
|
| 36 |
+
|
| 37 |
+
# for param in self.pipe.parameters():
|
| 38 |
+
# param.requires_grad = False
|
| 39 |
+
|
| 40 |
+
self.pipe.image_encoder.eval()
|
| 41 |
+
self.pipe.vae.eval()
|
| 42 |
+
self.pipe.unet.eval()
|
| 43 |
+
self.pipe.clip_camera_projection.eval()
|
| 44 |
+
|
| 45 |
+
self.vae = self.pipe.vae
|
| 46 |
+
self.unet = self.pipe.unet
|
| 47 |
+
|
| 48 |
+
self.pipe.set_progress_bar_config(disable=True)
|
| 49 |
+
|
| 50 |
+
self.scheduler = DDIMScheduler.from_config(self.pipe.scheduler.config)
|
| 51 |
+
self.num_train_timesteps = self.scheduler.config.num_train_timesteps
|
| 52 |
+
|
| 53 |
+
self.min_step = int(self.num_train_timesteps * t_range[0])
|
| 54 |
+
self.max_step = int(self.num_train_timesteps * t_range[1])
|
| 55 |
+
self.alphas = self.scheduler.alphas_cumprod.to(self.device) # for convenience
|
| 56 |
+
|
| 57 |
+
self.embeddings = None
|
| 58 |
+
|
| 59 |
+
@torch.no_grad()
|
| 60 |
+
def get_img_embeds(self, x):
|
| 61 |
+
# x: image tensor in [0, 1]
|
| 62 |
+
x = F.interpolate(x, (256, 256), mode='bilinear', align_corners=False)
|
| 63 |
+
x_pil = [TF.to_pil_image(image) for image in x]
|
| 64 |
+
x_clip = self.pipe.feature_extractor(images=x_pil, return_tensors="pt").pixel_values.to(device=self.device, dtype=self.dtype)
|
| 65 |
+
c = self.pipe.image_encoder(x_clip).image_embeds
|
| 66 |
+
v = self.encode_imgs(x.to(self.dtype)) / self.vae.config.scaling_factor
|
| 67 |
+
self.embeddings = [c, v]
|
| 68 |
+
|
| 69 |
+
@torch.no_grad()
|
| 70 |
+
def refine(self, pred_rgb, polar, azimuth, radius,
|
| 71 |
+
guidance_scale=5, steps=50, strength=0.8,
|
| 72 |
+
):
|
| 73 |
+
|
| 74 |
+
batch_size = pred_rgb.shape[0]
|
| 75 |
+
|
| 76 |
+
self.scheduler.set_timesteps(steps)
|
| 77 |
+
|
| 78 |
+
if strength == 0:
|
| 79 |
+
init_step = 0
|
| 80 |
+
latents = torch.randn((1, 4, 32, 32), device=self.device, dtype=self.dtype)
|
| 81 |
+
else:
|
| 82 |
+
init_step = int(steps * strength)
|
| 83 |
+
pred_rgb_256 = F.interpolate(pred_rgb, (256, 256), mode='bilinear', align_corners=False)
|
| 84 |
+
latents = self.encode_imgs(pred_rgb_256.to(self.dtype))
|
| 85 |
+
latents = self.scheduler.add_noise(latents, torch.randn_like(latents), self.scheduler.timesteps[init_step])
|
| 86 |
+
|
| 87 |
+
T = np.stack([np.deg2rad(polar), np.sin(np.deg2rad(azimuth)), np.cos(np.deg2rad(azimuth)), radius], axis=-1)
|
| 88 |
+
T = torch.from_numpy(T).unsqueeze(1).to(self.dtype).to(self.device) # [8, 1, 4]
|
| 89 |
+
cc_emb = torch.cat([self.embeddings[0].repeat(batch_size, 1, 1), T], dim=-1)
|
| 90 |
+
cc_emb = self.pipe.clip_camera_projection(cc_emb)
|
| 91 |
+
cc_emb = torch.cat([cc_emb, torch.zeros_like(cc_emb)], dim=0)
|
| 92 |
+
|
| 93 |
+
vae_emb = self.embeddings[1].repeat(batch_size, 1, 1, 1)
|
| 94 |
+
vae_emb = torch.cat([vae_emb, torch.zeros_like(vae_emb)], dim=0)
|
| 95 |
+
|
| 96 |
+
for i, t in enumerate(self.scheduler.timesteps[init_step:]):
|
| 97 |
+
|
| 98 |
+
x_in = torch.cat([latents] * 2)
|
| 99 |
+
t_in = torch.cat([t.view(1)] * 2).to(self.device)
|
| 100 |
+
|
| 101 |
+
noise_pred = self.unet(
|
| 102 |
+
torch.cat([x_in, vae_emb], dim=1),
|
| 103 |
+
t_in.to(self.unet.dtype),
|
| 104 |
+
encoder_hidden_states=cc_emb,
|
| 105 |
+
).sample
|
| 106 |
+
|
| 107 |
+
noise_pred_cond, noise_pred_uncond = noise_pred.chunk(2)
|
| 108 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_cond - noise_pred_uncond)
|
| 109 |
+
|
| 110 |
+
latents = self.scheduler.step(noise_pred, t, latents).prev_sample
|
| 111 |
+
|
| 112 |
+
imgs = self.decode_latents(latents) # [1, 3, 256, 256]
|
| 113 |
+
return imgs
|
| 114 |
+
|
| 115 |
+
def train_step(self, pred_rgb, polar, azimuth, radius, step_ratio=None, guidance_scale=5, as_latent=False):
|
| 116 |
+
# pred_rgb: tensor [1, 3, H, W] in [0, 1]
|
| 117 |
+
|
| 118 |
+
batch_size = pred_rgb.shape[0]
|
| 119 |
+
|
| 120 |
+
if as_latent:
|
| 121 |
+
latents = F.interpolate(pred_rgb, (32, 32), mode='bilinear', align_corners=False) * 2 - 1
|
| 122 |
+
else:
|
| 123 |
+
pred_rgb_256 = F.interpolate(pred_rgb, (256, 256), mode='bilinear', align_corners=False)
|
| 124 |
+
latents = self.encode_imgs(pred_rgb_256.to(self.dtype))
|
| 125 |
+
|
| 126 |
+
if step_ratio is not None:
|
| 127 |
+
# dreamtime-like
|
| 128 |
+
# t = self.max_step - (self.max_step - self.min_step) * np.sqrt(step_ratio)
|
| 129 |
+
t = np.round((1 - step_ratio) * self.num_train_timesteps).clip(self.min_step, self.max_step)
|
| 130 |
+
t = torch.full((batch_size,), t, dtype=torch.long, device=self.device)
|
| 131 |
+
else:
|
| 132 |
+
t = torch.randint(self.min_step, self.max_step + 1, (batch_size,), dtype=torch.long, device=self.device)
|
| 133 |
+
|
| 134 |
+
w = (1 - self.alphas[t]).view(batch_size, 1, 1, 1)
|
| 135 |
+
|
| 136 |
+
with torch.no_grad():
|
| 137 |
+
noise = torch.randn_like(latents)
|
| 138 |
+
latents_noisy = self.scheduler.add_noise(latents, noise, t)
|
| 139 |
+
|
| 140 |
+
x_in = torch.cat([latents_noisy] * 2)
|
| 141 |
+
t_in = torch.cat([t] * 2)
|
| 142 |
+
|
| 143 |
+
T = np.stack([np.deg2rad(polar), np.sin(np.deg2rad(azimuth)), np.cos(np.deg2rad(azimuth)), radius], axis=-1)
|
| 144 |
+
T = torch.from_numpy(T).unsqueeze(1).to(self.dtype).to(self.device) # [8, 1, 4]
|
| 145 |
+
cc_emb = torch.cat([self.embeddings[0].repeat(batch_size, 1, 1), T], dim=-1)
|
| 146 |
+
cc_emb = self.pipe.clip_camera_projection(cc_emb)
|
| 147 |
+
cc_emb = torch.cat([cc_emb, torch.zeros_like(cc_emb)], dim=0)
|
| 148 |
+
|
| 149 |
+
vae_emb = self.embeddings[1].repeat(batch_size, 1, 1, 1)
|
| 150 |
+
vae_emb = torch.cat([vae_emb, torch.zeros_like(vae_emb)], dim=0)
|
| 151 |
+
|
| 152 |
+
noise_pred = self.unet(
|
| 153 |
+
torch.cat([x_in, vae_emb], dim=1),
|
| 154 |
+
t_in.to(self.unet.dtype),
|
| 155 |
+
encoder_hidden_states=cc_emb,
|
| 156 |
+
).sample
|
| 157 |
+
|
| 158 |
+
noise_pred_cond, noise_pred_uncond = noise_pred.chunk(2)
|
| 159 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_cond - noise_pred_uncond)
|
| 160 |
+
|
| 161 |
+
grad = w * (noise_pred - noise)
|
| 162 |
+
grad = torch.nan_to_num(grad)
|
| 163 |
+
|
| 164 |
+
target = (latents - grad).detach()
|
| 165 |
+
loss = 0.5 * F.mse_loss(latents.float(), target, reduction='sum')
|
| 166 |
+
|
| 167 |
+
return loss
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def decode_latents(self, latents):
|
| 171 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 172 |
+
|
| 173 |
+
imgs = self.vae.decode(latents).sample
|
| 174 |
+
imgs = (imgs / 2 + 0.5).clamp(0, 1)
|
| 175 |
+
|
| 176 |
+
return imgs
|
| 177 |
+
|
| 178 |
+
def encode_imgs(self, imgs, mode=False):
|
| 179 |
+
# imgs: [B, 3, H, W]
|
| 180 |
+
|
| 181 |
+
imgs = 2 * imgs - 1
|
| 182 |
+
|
| 183 |
+
posterior = self.vae.encode(imgs).latent_dist
|
| 184 |
+
if mode:
|
| 185 |
+
latents = posterior.mode()
|
| 186 |
+
else:
|
| 187 |
+
latents = posterior.sample()
|
| 188 |
+
latents = latents * self.vae.config.scaling_factor
|
| 189 |
+
|
| 190 |
+
return latents
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
if __name__ == '__main__':
|
| 194 |
+
import cv2
|
| 195 |
+
import argparse
|
| 196 |
+
import numpy as np
|
| 197 |
+
import matplotlib.pyplot as plt
|
| 198 |
+
|
| 199 |
+
parser = argparse.ArgumentParser()
|
| 200 |
+
|
| 201 |
+
parser.add_argument('input', type=str)
|
| 202 |
+
parser.add_argument('--polar', type=float, default=0, help='delta polar angle in [-90, 90]')
|
| 203 |
+
parser.add_argument('--azimuth', type=float, default=0, help='delta azimuth angle in [-180, 180]')
|
| 204 |
+
parser.add_argument('--radius', type=float, default=0, help='delta camera radius multiplier in [-0.5, 0.5]')
|
| 205 |
+
|
| 206 |
+
opt = parser.parse_args()
|
| 207 |
+
|
| 208 |
+
device = torch.device('cuda')
|
| 209 |
+
|
| 210 |
+
print(f'[INFO] loading image from {opt.input} ...')
|
| 211 |
+
image = cv2.imread(opt.input, cv2.IMREAD_UNCHANGED)
|
| 212 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 213 |
+
image = cv2.resize(image, (256, 256), interpolation=cv2.INTER_AREA)
|
| 214 |
+
image = image.astype(np.float32) / 255.0
|
| 215 |
+
image = torch.from_numpy(image).permute(2, 0, 1).unsqueeze(0).contiguous().to(device)
|
| 216 |
+
|
| 217 |
+
print(f'[INFO] loading model ...')
|
| 218 |
+
zero123 = Zero123(device)
|
| 219 |
+
|
| 220 |
+
print(f'[INFO] running model ...')
|
| 221 |
+
zero123.get_img_embeds(image)
|
| 222 |
+
|
| 223 |
+
while True:
|
| 224 |
+
outputs = zero123.refine(image, polar=[opt.polar], azimuth=[opt.azimuth], radius=[opt.radius], strength=0)
|
| 225 |
+
plt.imshow(outputs.float().cpu().numpy().transpose(0, 2, 3, 1)[0])
|
| 226 |
+
plt.show()
|
scripts/convert_obj_to_video.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import glob
|
| 3 |
+
import argparse
|
| 4 |
+
|
| 5 |
+
parser = argparse.ArgumentParser()
|
| 6 |
+
parser.add_argument('--dir', default='logs', type=str, help='Directory where obj files are stored')
|
| 7 |
+
parser.add_argument('--out', default='videos', type=str, help='Directory where videos will be saved')
|
| 8 |
+
args = parser.parse_args()
|
| 9 |
+
|
| 10 |
+
out = args.out
|
| 11 |
+
os.makedirs(out, exist_ok=True)
|
| 12 |
+
|
| 13 |
+
files = glob.glob(f'{args.dir}/*.obj')
|
| 14 |
+
for f in files:
|
| 15 |
+
name = os.path.basename(f)
|
| 16 |
+
# first stage model, ignore
|
| 17 |
+
if name.endswith('_mesh.obj'):
|
| 18 |
+
continue
|
| 19 |
+
print(f'[INFO] process {name}')
|
| 20 |
+
os.system(f"python -m kiui.render {f} --save_video {os.path.join(out, name.replace('.obj', '.mp4'))} ")
|
scripts/run.sh
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
export CUDA_VISIBLE_DEVICES=5
|
| 2 |
+
|
| 3 |
+
python main.py --config configs/image.yaml input=data/anya_rgba.png save_path=anya
|
| 4 |
+
python main2.py --config configs/image.yaml input=data/anya_rgba.png save_path=anya
|
| 5 |
+
python -m kiui.render logs/anya.obj --save_video videos/anya.mp4 --wogui
|
scripts/run_sd.sh
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
export CUDA_VISIBLE_DEVICES=6
|
| 2 |
+
|
| 3 |
+
# easy samples
|
| 4 |
+
python main.py --config configs/text.yaml prompt="a photo of an icecream" save_path=icecream
|
| 5 |
+
python main2.py --config configs/text.yaml prompt="a photo of an icecream" save_path=icecream
|
| 6 |
+
python main.py --config configs/text.yaml prompt="a ripe strawberry" save_path=strawberry
|
| 7 |
+
python main2.py --config configs/text.yaml prompt="a ripe strawberry" save_path=strawberry
|
| 8 |
+
python main.py --config configs/text.yaml prompt="a blue tulip" save_path=tulip
|
| 9 |
+
python main2.py --config configs/text.yaml prompt="a blue tulip" save_path=tulip
|
| 10 |
+
|
| 11 |
+
python main.py --config configs/text.yaml prompt="a golden goblet" save_path=goblet
|
| 12 |
+
python main2.py --config configs/text.yaml prompt="a golden goblet" save_path=goblet
|
| 13 |
+
python main.py --config configs/text.yaml prompt="a photo of a hamburger" save_path=hamburger
|
| 14 |
+
python main2.py --config configs/text.yaml prompt="a photo of a hamburger" save_path=hamburger
|
| 15 |
+
python main.py --config configs/text.yaml prompt="a delicious croissant" save_path=croissant
|
| 16 |
+
python main2.py --config configs/text.yaml prompt="a delicious croissant" save_path=croissant
|
| 17 |
+
|
| 18 |
+
# hard samples
|
| 19 |
+
python main.py --config configs/text.yaml prompt="a baby bunny sitting on top of a stack of pancake" save_path=bunny_pancake
|
| 20 |
+
python main2.py --config configs/text.yaml prompt="a baby bunny sitting on top of a stack of pancake" save_path=bunny_pancake
|
| 21 |
+
python main.py --config configs/text.yaml prompt="a typewriter" save_path=typewriter
|
| 22 |
+
python main2.py --config configs/text.yaml prompt="a typewriter" save_path=typewriter
|
| 23 |
+
python main.py --config configs/text.yaml prompt="a pineapple" save_path=pineapple
|
| 24 |
+
python main2.py --config configs/text.yaml prompt="a pineapple" save_path=pineapple
|
| 25 |
+
|
| 26 |
+
python main.py --config configs/text.yaml prompt="a model of a house in Tudor style" save_path=tudor_house
|
| 27 |
+
python main2.py --config configs/text.yaml prompt="a model of a house in Tudor style" save_path=tudor_house
|
| 28 |
+
python main.py --config configs/text.yaml prompt="a lionfish" save_path=lionfish
|
| 29 |
+
python main2.py --config configs/text.yaml prompt="a lionfish" save_path=lionfish
|
| 30 |
+
python main.py --config configs/text.yaml prompt="a bunch of yellow rose, highly detailed" save_path=rose
|
| 31 |
+
python main2.py --config configs/text.yaml prompt="a bunch of yellow rose, highly detailed" save_path=rose
|
scripts/runall.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import glob
|
| 3 |
+
import argparse
|
| 4 |
+
|
| 5 |
+
parser = argparse.ArgumentParser()
|
| 6 |
+
parser.add_argument('--dir', default='data', type=str, help='Directory where processed images are stored')
|
| 7 |
+
parser.add_argument('--out', default='logs', type=str, help='Directory where obj files will be saved')
|
| 8 |
+
parser.add_argument('--video-out', default='videos', type=str, help='Directory where videos will be saved')
|
| 9 |
+
parser.add_argument('--gpu', default=0, type=int, help='ID of GPU to use')
|
| 10 |
+
parser.add_argument('--elevation', default=0, type=int, help='Elevation angle of view in degrees')
|
| 11 |
+
parser.add_argument('--config', default='configs', type=str, help='Path to config directory, which contains image.yaml')
|
| 12 |
+
args = parser.parse_args()
|
| 13 |
+
|
| 14 |
+
files = glob.glob(f'{args.dir}/*_rgba.png')
|
| 15 |
+
configs_dir = args.config
|
| 16 |
+
|
| 17 |
+
# check if image.yaml exists
|
| 18 |
+
if not os.path.exists(os.path.join(configs_dir, 'image.yaml')):
|
| 19 |
+
raise FileNotFoundError(
|
| 20 |
+
f'image.yaml not found in {configs_dir} directory. Please check if the directory is correct.'
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
# create output directories if not exists
|
| 24 |
+
out_dir = args.out
|
| 25 |
+
os.makedirs(out_dir, exist_ok=True)
|
| 26 |
+
video_dir = args.video_out
|
| 27 |
+
os.makedirs(video_dir, exist_ok=True)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
for file in files:
|
| 31 |
+
name = os.path.basename(file).replace("_rgba.png", "")
|
| 32 |
+
print(f'======== processing {name} ========')
|
| 33 |
+
# first stage
|
| 34 |
+
os.system(f'CUDA_VISIBLE_DEVICES={args.gpu} python main.py '
|
| 35 |
+
f'--config {configs_dir}/image.yaml '
|
| 36 |
+
f'input={file} '
|
| 37 |
+
f'save_path={name} elevation={args.elevation}')
|
| 38 |
+
# second stage
|
| 39 |
+
os.system(f'CUDA_VISIBLE_DEVICES={args.gpu} python main2.py '
|
| 40 |
+
f'--config {configs_dir}/image.yaml '
|
| 41 |
+
f'input={file} '
|
| 42 |
+
f'save_path={name} elevation={args.elevation}')
|
| 43 |
+
# export video
|
| 44 |
+
mesh_path = os.path.join(out_dir, f'{name}.obj')
|
| 45 |
+
os.system(f'python -m kiui.render {mesh_path} '
|
| 46 |
+
f'--save_video {video_dir}/{name}.mp4 '
|
| 47 |
+
f'--wogui '
|
| 48 |
+
f'--elevation {args.elevation}')
|
scripts/runall_sd.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import glob
|
| 3 |
+
import argparse
|
| 4 |
+
|
| 5 |
+
parser = argparse.ArgumentParser()
|
| 6 |
+
parser.add_argument('--gpu', default=0, type=int)
|
| 7 |
+
args = parser.parse_args()
|
| 8 |
+
|
| 9 |
+
prompts = [
|
| 10 |
+
('strawberry', 'a ripe strawberry'),
|
| 11 |
+
('cactus_pot', 'a small saguaro cactus planted in a clay pot'),
|
| 12 |
+
('hamburger', 'a delicious hamburger'),
|
| 13 |
+
('icecream', 'an icecream'),
|
| 14 |
+
('tulip', 'a blue tulip'),
|
| 15 |
+
('pineapple', 'a ripe pineapple'),
|
| 16 |
+
('goblet', 'a golden goblet'),
|
| 17 |
+
# ('squitopus', 'a squirrel-octopus hybrid'),
|
| 18 |
+
# ('astronaut', 'Michelangelo style statue of an astronaut'),
|
| 19 |
+
# ('teddy_bear', 'a teddy bear'),
|
| 20 |
+
# ('corgi_nurse', 'a plush toy of a corgi nurse'),
|
| 21 |
+
# ('teapot', 'a blue and white porcelain teapot'),
|
| 22 |
+
# ('skull', "a human skull"),
|
| 23 |
+
# ('penguin', 'a penguin'),
|
| 24 |
+
# ('campfire', 'a campfire'),
|
| 25 |
+
# ('donut', 'a donut with pink icing'),
|
| 26 |
+
# ('cupcake', 'a birthday cupcake'),
|
| 27 |
+
# ('pie', 'shepherds pie'),
|
| 28 |
+
# ('cone', 'a traffic cone'),
|
| 29 |
+
# ('schoolbus', 'a schoolbus'),
|
| 30 |
+
# ('avocado_chair', 'a chair that looks like an avocado'),
|
| 31 |
+
# ('glasses', 'a pair of sunglasses')
|
| 32 |
+
# ('potion', 'a bottle of green potion'),
|
| 33 |
+
# ('chalice', 'a delicate chalice'),
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
for name, prompt in prompts:
|
| 37 |
+
print(f'======== processing {name} ========')
|
| 38 |
+
# first stage
|
| 39 |
+
os.system(f'CUDA_VISIBLE_DEVICES={args.gpu} python main.py --config configs/text.yaml prompt="{prompt}" save_path={name}')
|
| 40 |
+
# second stage
|
| 41 |
+
os.system(f'CUDA_VISIBLE_DEVICES={args.gpu} python main2.py --config configs/text.yaml prompt="{prompt}" save_path={name}')
|
| 42 |
+
# export video
|
| 43 |
+
mesh_path = os.path.join('logs', f'{name}.obj')
|
| 44 |
+
os.makedirs('videos', exist_ok=True)
|
| 45 |
+
os.system(f'python -m kiui.render {mesh_path} --save_video videos/{name}.mp4 --wogui')
|
simple-knn/ext.cpp
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (C) 2023, Inria
|
| 3 |
+
* GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 4 |
+
* All rights reserved.
|
| 5 |
+
*
|
| 6 |
+
* This software is free for non-commercial, research and evaluation use
|
| 7 |
+
* under the terms of the LICENSE.md file.
|
| 8 |
+
*
|
| 9 |
+
* For inquiries contact [email protected]
|
| 10 |
+
*/
|
| 11 |
+
|
| 12 |
+
#include <torch/extension.h>
|
| 13 |
+
#include "spatial.h"
|
| 14 |
+
|
| 15 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
| 16 |
+
m.def("distCUDA2", &distCUDA2);
|
| 17 |
+
}
|
simple-knn/setup.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Copyright (C) 2023, Inria
|
| 3 |
+
# GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 4 |
+
# All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
# This software is free for non-commercial, research and evaluation use
|
| 7 |
+
# under the terms of the LICENSE.md file.
|
| 8 |
+
#
|
| 9 |
+
# For inquiries contact [email protected]
|
| 10 |
+
#
|
| 11 |
+
|
| 12 |
+
from setuptools import setup
|
| 13 |
+
from torch.utils.cpp_extension import CUDAExtension, BuildExtension
|
| 14 |
+
import os
|
| 15 |
+
|
| 16 |
+
cxx_compiler_flags = []
|
| 17 |
+
|
| 18 |
+
if os.name == 'nt':
|
| 19 |
+
cxx_compiler_flags.append("/wd4624")
|
| 20 |
+
|
| 21 |
+
setup(
|
| 22 |
+
name="simple_knn",
|
| 23 |
+
ext_modules=[
|
| 24 |
+
CUDAExtension(
|
| 25 |
+
name="simple_knn._C",
|
| 26 |
+
sources=[
|
| 27 |
+
"spatial.cu",
|
| 28 |
+
"simple_knn.cu",
|
| 29 |
+
"ext.cpp"],
|
| 30 |
+
extra_compile_args={"nvcc": [], "cxx": cxx_compiler_flags})
|
| 31 |
+
],
|
| 32 |
+
cmdclass={
|
| 33 |
+
'build_ext': BuildExtension
|
| 34 |
+
}
|
| 35 |
+
)
|
simple-knn/simple_knn.cu
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (C) 2023, Inria
|
| 3 |
+
* GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 4 |
+
* All rights reserved.
|
| 5 |
+
*
|
| 6 |
+
* This software is free for non-commercial, research and evaluation use
|
| 7 |
+
* under the terms of the LICENSE.md file.
|
| 8 |
+
*
|
| 9 |
+
* For inquiries contact [email protected]
|
| 10 |
+
*/
|
| 11 |
+
|
| 12 |
+
#define BOX_SIZE 1024
|
| 13 |
+
|
| 14 |
+
#include "cuda_runtime.h"
|
| 15 |
+
#include "device_launch_parameters.h"
|
| 16 |
+
#include "simple_knn.h"
|
| 17 |
+
#include <cub/cub.cuh>
|
| 18 |
+
#include <cub/device/device_radix_sort.cuh>
|
| 19 |
+
#include <vector>
|
| 20 |
+
#include <cuda_runtime_api.h>
|
| 21 |
+
#include <thrust/device_vector.h>
|
| 22 |
+
#include <thrust/sequence.h>
|
| 23 |
+
#define __CUDACC__
|
| 24 |
+
#include <cooperative_groups.h>
|
| 25 |
+
#include <cooperative_groups/reduce.h>
|
| 26 |
+
|
| 27 |
+
namespace cg = cooperative_groups;
|
| 28 |
+
|
| 29 |
+
struct CustomMin
|
| 30 |
+
{
|
| 31 |
+
__device__ __forceinline__
|
| 32 |
+
float3 operator()(const float3& a, const float3& b) const {
|
| 33 |
+
return { min(a.x, b.x), min(a.y, b.y), min(a.z, b.z) };
|
| 34 |
+
}
|
| 35 |
+
};
|
| 36 |
+
|
| 37 |
+
struct CustomMax
|
| 38 |
+
{
|
| 39 |
+
__device__ __forceinline__
|
| 40 |
+
float3 operator()(const float3& a, const float3& b) const {
|
| 41 |
+
return { max(a.x, b.x), max(a.y, b.y), max(a.z, b.z) };
|
| 42 |
+
}
|
| 43 |
+
};
|
| 44 |
+
|
| 45 |
+
__host__ __device__ uint32_t prepMorton(uint32_t x)
|
| 46 |
+
{
|
| 47 |
+
x = (x | (x << 16)) & 0x030000FF;
|
| 48 |
+
x = (x | (x << 8)) & 0x0300F00F;
|
| 49 |
+
x = (x | (x << 4)) & 0x030C30C3;
|
| 50 |
+
x = (x | (x << 2)) & 0x09249249;
|
| 51 |
+
return x;
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
__host__ __device__ uint32_t coord2Morton(float3 coord, float3 minn, float3 maxx)
|
| 55 |
+
{
|
| 56 |
+
uint32_t x = prepMorton(((coord.x - minn.x) / (maxx.x - minn.x)) * ((1 << 10) - 1));
|
| 57 |
+
uint32_t y = prepMorton(((coord.y - minn.y) / (maxx.y - minn.y)) * ((1 << 10) - 1));
|
| 58 |
+
uint32_t z = prepMorton(((coord.z - minn.z) / (maxx.z - minn.z)) * ((1 << 10) - 1));
|
| 59 |
+
|
| 60 |
+
return x | (y << 1) | (z << 2);
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
__global__ void coord2Morton(int P, const float3* points, float3 minn, float3 maxx, uint32_t* codes)
|
| 64 |
+
{
|
| 65 |
+
auto idx = cg::this_grid().thread_rank();
|
| 66 |
+
if (idx >= P)
|
| 67 |
+
return;
|
| 68 |
+
|
| 69 |
+
codes[idx] = coord2Morton(points[idx], minn, maxx);
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
struct MinMax
|
| 73 |
+
{
|
| 74 |
+
float3 minn;
|
| 75 |
+
float3 maxx;
|
| 76 |
+
};
|
| 77 |
+
|
| 78 |
+
__global__ void boxMinMax(uint32_t P, float3* points, uint32_t* indices, MinMax* boxes)
|
| 79 |
+
{
|
| 80 |
+
auto idx = cg::this_grid().thread_rank();
|
| 81 |
+
|
| 82 |
+
MinMax me;
|
| 83 |
+
if (idx < P)
|
| 84 |
+
{
|
| 85 |
+
me.minn = points[indices[idx]];
|
| 86 |
+
me.maxx = points[indices[idx]];
|
| 87 |
+
}
|
| 88 |
+
else
|
| 89 |
+
{
|
| 90 |
+
me.minn = { FLT_MAX, FLT_MAX, FLT_MAX };
|
| 91 |
+
me.maxx = { -FLT_MAX,-FLT_MAX,-FLT_MAX };
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
__shared__ MinMax redResult[BOX_SIZE];
|
| 95 |
+
|
| 96 |
+
for (int off = BOX_SIZE / 2; off >= 1; off /= 2)
|
| 97 |
+
{
|
| 98 |
+
if (threadIdx.x < 2 * off)
|
| 99 |
+
redResult[threadIdx.x] = me;
|
| 100 |
+
__syncthreads();
|
| 101 |
+
|
| 102 |
+
if (threadIdx.x < off)
|
| 103 |
+
{
|
| 104 |
+
MinMax other = redResult[threadIdx.x + off];
|
| 105 |
+
me.minn.x = min(me.minn.x, other.minn.x);
|
| 106 |
+
me.minn.y = min(me.minn.y, other.minn.y);
|
| 107 |
+
me.minn.z = min(me.minn.z, other.minn.z);
|
| 108 |
+
me.maxx.x = max(me.maxx.x, other.maxx.x);
|
| 109 |
+
me.maxx.y = max(me.maxx.y, other.maxx.y);
|
| 110 |
+
me.maxx.z = max(me.maxx.z, other.maxx.z);
|
| 111 |
+
}
|
| 112 |
+
__syncthreads();
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
if (threadIdx.x == 0)
|
| 116 |
+
boxes[blockIdx.x] = me;
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
__device__ __host__ float distBoxPoint(const MinMax& box, const float3& p)
|
| 120 |
+
{
|
| 121 |
+
float3 diff = { 0, 0, 0 };
|
| 122 |
+
if (p.x < box.minn.x || p.x > box.maxx.x)
|
| 123 |
+
diff.x = min(abs(p.x - box.minn.x), abs(p.x - box.maxx.x));
|
| 124 |
+
if (p.y < box.minn.y || p.y > box.maxx.y)
|
| 125 |
+
diff.y = min(abs(p.y - box.minn.y), abs(p.y - box.maxx.y));
|
| 126 |
+
if (p.z < box.minn.z || p.z > box.maxx.z)
|
| 127 |
+
diff.z = min(abs(p.z - box.minn.z), abs(p.z - box.maxx.z));
|
| 128 |
+
return diff.x * diff.x + diff.y * diff.y + diff.z * diff.z;
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
template<int K>
|
| 132 |
+
__device__ void updateKBest(const float3& ref, const float3& point, float* knn)
|
| 133 |
+
{
|
| 134 |
+
float3 d = { point.x - ref.x, point.y - ref.y, point.z - ref.z };
|
| 135 |
+
float dist = d.x * d.x + d.y * d.y + d.z * d.z;
|
| 136 |
+
for (int j = 0; j < K; j++)
|
| 137 |
+
{
|
| 138 |
+
if (knn[j] > dist)
|
| 139 |
+
{
|
| 140 |
+
float t = knn[j];
|
| 141 |
+
knn[j] = dist;
|
| 142 |
+
dist = t;
|
| 143 |
+
}
|
| 144 |
+
}
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
__global__ void boxMeanDist(uint32_t P, float3* points, uint32_t* indices, MinMax* boxes, float* dists)
|
| 148 |
+
{
|
| 149 |
+
int idx = cg::this_grid().thread_rank();
|
| 150 |
+
if (idx >= P)
|
| 151 |
+
return;
|
| 152 |
+
|
| 153 |
+
float3 point = points[indices[idx]];
|
| 154 |
+
float best[3] = { FLT_MAX, FLT_MAX, FLT_MAX };
|
| 155 |
+
|
| 156 |
+
for (int i = max(0, idx - 3); i <= min(P - 1, idx + 3); i++)
|
| 157 |
+
{
|
| 158 |
+
if (i == idx)
|
| 159 |
+
continue;
|
| 160 |
+
updateKBest<3>(point, points[indices[i]], best);
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
float reject = best[2];
|
| 164 |
+
best[0] = FLT_MAX;
|
| 165 |
+
best[1] = FLT_MAX;
|
| 166 |
+
best[2] = FLT_MAX;
|
| 167 |
+
|
| 168 |
+
for (int b = 0; b < (P + BOX_SIZE - 1) / BOX_SIZE; b++)
|
| 169 |
+
{
|
| 170 |
+
MinMax box = boxes[b];
|
| 171 |
+
float dist = distBoxPoint(box, point);
|
| 172 |
+
if (dist > reject || dist > best[2])
|
| 173 |
+
continue;
|
| 174 |
+
|
| 175 |
+
for (int i = b * BOX_SIZE; i < min(P, (b + 1) * BOX_SIZE); i++)
|
| 176 |
+
{
|
| 177 |
+
if (i == idx)
|
| 178 |
+
continue;
|
| 179 |
+
updateKBest<3>(point, points[indices[i]], best);
|
| 180 |
+
}
|
| 181 |
+
}
|
| 182 |
+
dists[indices[idx]] = (best[0] + best[1] + best[2]) / 3.0f;
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
void SimpleKNN::knn(int P, float3* points, float* meanDists)
|
| 186 |
+
{
|
| 187 |
+
float3* result;
|
| 188 |
+
cudaMalloc(&result, sizeof(float3));
|
| 189 |
+
size_t temp_storage_bytes;
|
| 190 |
+
|
| 191 |
+
float3 init = { 0, 0, 0 }, minn, maxx;
|
| 192 |
+
|
| 193 |
+
cub::DeviceReduce::Reduce(nullptr, temp_storage_bytes, points, result, P, CustomMin(), init);
|
| 194 |
+
thrust::device_vector<char> temp_storage(temp_storage_bytes);
|
| 195 |
+
|
| 196 |
+
cub::DeviceReduce::Reduce(temp_storage.data().get(), temp_storage_bytes, points, result, P, CustomMin(), init);
|
| 197 |
+
cudaMemcpy(&minn, result, sizeof(float3), cudaMemcpyDeviceToHost);
|
| 198 |
+
|
| 199 |
+
cub::DeviceReduce::Reduce(temp_storage.data().get(), temp_storage_bytes, points, result, P, CustomMax(), init);
|
| 200 |
+
cudaMemcpy(&maxx, result, sizeof(float3), cudaMemcpyDeviceToHost);
|
| 201 |
+
|
| 202 |
+
thrust::device_vector<uint32_t> morton(P);
|
| 203 |
+
thrust::device_vector<uint32_t> morton_sorted(P);
|
| 204 |
+
coord2Morton << <(P + 255) / 256, 256 >> > (P, points, minn, maxx, morton.data().get());
|
| 205 |
+
|
| 206 |
+
thrust::device_vector<uint32_t> indices(P);
|
| 207 |
+
thrust::sequence(indices.begin(), indices.end());
|
| 208 |
+
thrust::device_vector<uint32_t> indices_sorted(P);
|
| 209 |
+
|
| 210 |
+
cub::DeviceRadixSort::SortPairs(nullptr, temp_storage_bytes, morton.data().get(), morton_sorted.data().get(), indices.data().get(), indices_sorted.data().get(), P);
|
| 211 |
+
temp_storage.resize(temp_storage_bytes);
|
| 212 |
+
|
| 213 |
+
cub::DeviceRadixSort::SortPairs(temp_storage.data().get(), temp_storage_bytes, morton.data().get(), morton_sorted.data().get(), indices.data().get(), indices_sorted.data().get(), P);
|
| 214 |
+
|
| 215 |
+
uint32_t num_boxes = (P + BOX_SIZE - 1) / BOX_SIZE;
|
| 216 |
+
thrust::device_vector<MinMax> boxes(num_boxes);
|
| 217 |
+
boxMinMax << <num_boxes, BOX_SIZE >> > (P, points, indices_sorted.data().get(), boxes.data().get());
|
| 218 |
+
boxMeanDist << <num_boxes, BOX_SIZE >> > (P, points, indices_sorted.data().get(), boxes.data().get(), meanDists);
|
| 219 |
+
|
| 220 |
+
cudaFree(result);
|
| 221 |
+
}
|
simple-knn/simple_knn.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (C) 2023, Inria
|
| 3 |
+
* GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 4 |
+
* All rights reserved.
|
| 5 |
+
*
|
| 6 |
+
* This software is free for non-commercial, research and evaluation use
|
| 7 |
+
* under the terms of the LICENSE.md file.
|
| 8 |
+
*
|
| 9 |
+
* For inquiries contact [email protected]
|
| 10 |
+
*/
|
| 11 |
+
|
| 12 |
+
#ifndef SIMPLEKNN_H_INCLUDED
|
| 13 |
+
#define SIMPLEKNN_H_INCLUDED
|
| 14 |
+
|
| 15 |
+
class SimpleKNN
|
| 16 |
+
{
|
| 17 |
+
public:
|
| 18 |
+
static void knn(int P, float3* points, float* meanDists);
|
| 19 |
+
};
|
| 20 |
+
|
| 21 |
+
#endif
|
simple-knn/simple_knn/.gitkeep
ADDED
|
File without changes
|
simple-knn/spatial.cu
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (C) 2023, Inria
|
| 3 |
+
* GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 4 |
+
* All rights reserved.
|
| 5 |
+
*
|
| 6 |
+
* This software is free for non-commercial, research and evaluation use
|
| 7 |
+
* under the terms of the LICENSE.md file.
|
| 8 |
+
*
|
| 9 |
+
* For inquiries contact [email protected]
|
| 10 |
+
*/
|
| 11 |
+
|
| 12 |
+
#include "spatial.h"
|
| 13 |
+
#include "simple_knn.h"
|
| 14 |
+
|
| 15 |
+
torch::Tensor
|
| 16 |
+
distCUDA2(const torch::Tensor& points)
|
| 17 |
+
{
|
| 18 |
+
const int P = points.size(0);
|
| 19 |
+
|
| 20 |
+
auto float_opts = points.options().dtype(torch::kFloat32);
|
| 21 |
+
torch::Tensor means = torch::full({P}, 0.0, float_opts);
|
| 22 |
+
|
| 23 |
+
SimpleKNN::knn(P, (float3*)points.contiguous().data<float>(), means.contiguous().data<float>());
|
| 24 |
+
|
| 25 |
+
return means;
|
| 26 |
+
}
|
simple-knn/spatial.h
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (C) 2023, Inria
|
| 3 |
+
* GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 4 |
+
* All rights reserved.
|
| 5 |
+
*
|
| 6 |
+
* This software is free for non-commercial, research and evaluation use
|
| 7 |
+
* under the terms of the LICENSE.md file.
|
| 8 |
+
*
|
| 9 |
+
* For inquiries contact [email protected]
|
| 10 |
+
*/
|
| 11 |
+
|
| 12 |
+
#include <torch/extension.h>
|
| 13 |
+
|
| 14 |
+
torch::Tensor distCUDA2(const torch::Tensor& points);
|