Compare commits

...

10 commits

Author SHA1 Message Date
Laurent FAINSIN 4beb963aff print number of parameters of network 2023-05-22 09:33:10 +02:00
Laurent FAINSIN c09ff5b20f feat: remove deformation prediction, add selectable cardinality when sampling 2023-04-17 16:16:11 +02:00
Laurent FAINSIN b21d3a2fe2 fix: move translation instructions at the right place 2023-04-17 13:46:48 +02:00
Laurent FAINSIN 091d2074ca feat: modify test_generation to translate back sampled blade from norminal deformations 2023-04-17 10:35:46 +02:00
Laurent FAINSIN a3e23f59c5 feat: train on deformation instead of absolute position 2023-04-14 14:00:11 +02:00
Laurent FAINSIN 859da1a847 feat: dirty script to compare recenter generated blades 2023-04-14 13:09:44 +02:00
Laurent FAINSIN 4e59146e86 fix: ignore *.vtk 2023-04-14 13:09:16 +02:00
Laurent FAINSIN 004322ba53 feat: add ninja as deps 2023-04-13 16:21:27 +02:00
Laurent FAINSIN 04ef18105e chore: ignore .ruff_cache 2023-04-13 10:00:03 +02:00
Laurent FAINSIN 7675581b8f fix: change MEAN and STD in test_rotor37_data.py 2023-04-13 09:58:42 +02:00
8 changed files with 57 additions and 6 deletions

3
.gitignore vendored
View file

@ -13,6 +13,9 @@ ShapeNetCore.v2.PC15k*
checkpoints
*.txt
*.vtk
.ruff_cache
# https://github.com/github/gitignore/blob/main/Python.gitignore
# Basic .gitignore for a python repo.

2
.vscode/launch.json vendored
View file

@ -13,7 +13,7 @@
"justMyCode": true,
"args": [
"--model",
"output/train_generation/2023-04-11-23-38-23/epoch_99.pth",
"/gpfs_new/data/users/lfainsin/PVD/output/train_generation/2023-04-14-15-02-19/epoch_499.pth",
"--generate",
"True",
"--workers",

View file

@ -33,5 +33,6 @@
"**/.hg/store/**": true,
"**/output/**": true,
"**/ShapeNetCore.v2.PC15k/**": true,
"**/.ruff_cache/**": true,
}
}

39
compare_samples.py Normal file
View file

@ -0,0 +1,39 @@
from pathlib import Path
import numpy as np
import pyvista as pv
VTKFILE_NOMINAL = Path("~/data/stage-laurent-f/datasets/Rotor37/processed/nominal_blade_rotated.vtk")
# load nominal blade
nominal = pv.read(VTKFILE_NOMINAL)
# for each generated/sampled blade
gen_files = Path("./output").glob("gen*.txt")
pc_files = Path("./output").glob("pc*.txt")
files = list(gen_files) + list(pc_files)
for gen_file in files:
# load numpy txt
blade = np.loadtxt(gen_file)
# get top and bottom index (filter outliers a bit)
top = int(blade.shape[0] * 0.001)
bot = int(blade.shape[0] * 0.999)
# sort blade coordinates
sorted_blade = np.sort(blade, axis=0)
# get center of blade
center = (sorted_blade[bot, :] + sorted_blade[top, :]) / 2
# translate blade to world origin
blade -= center
# save to txt
np.savetxt(f"output/test_{gen_file.stem}.txt", blade)
# swap nominal points to blade points
nominal.points = blade
# save altered blade to vtk
nominal.save(f"output/test_{gen_file.stem}.vtk")

View file

@ -1,5 +1,6 @@
import datasets
import numpy as np
from rotor37_data import MEAN, STD
test_ds = datasets.load_dataset("dataset/rotor37_data.py", split="test")
test_ds = test_ds.with_format("torch")
@ -14,10 +15,10 @@ for idx, blade in enumerate(test_ds):
pc = blade["positions"]
# unnormalize
pc = pc * blade["std"] + blade["mean"]
pc = pc * STD + MEAN
print(f"Saving point cloud {idx}...")
np.savetxt(f"pc_{idx}.txt", pc)
np.savetxt(f"output/pc_{idx}.txt", pc)
if idx >= 10:
break

View file

@ -7,8 +7,12 @@ channels:
- nvidia
- conda-forge
- pyg
- HuggingFace
dependencies:
#---# compiler
- ninja
#---# python libs
- python
- pytorch
- torchvision

View file

@ -323,6 +323,9 @@ class Model(nn.Module):
extra_feature_channels=0,
)
pytorch_total_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
print("Total params: {}".format(pytorch_total_params))
def prior_kl(self, x0):
return self.diffusion._prior_bpd(x0)
@ -510,7 +513,8 @@ def generate(model, opt):
x = data["positions"].transpose(1, 2)
# m, s = data["mean"].float(), data["std"].float()
gen = model.gen_samples(x.shape, "cuda", clip_denoised=False).detach().cpu()
shape = torch.Size((*x.shape[:-1], 75000))
gen = model.gen_samples(shape, "cuda", clip_denoised=False).detach().cpu()
gen = gen.transpose(1, 2).contiguous()
x = x.transpose(1, 2).contiguous()
@ -528,7 +532,7 @@ def generate(model, opt):
pc = pc * STD + MEAN
print(f"Saving point cloud {idx}...")
np.savetxt(f"gen_{i}_{idx}.txt", pc)
np.savetxt(f"output/gen_{i}_{idx}.txt", pc)
if idx >= 10:
break

View file

@ -708,7 +708,6 @@ def train(gpu, opt, output_dir):
lr_scheduler.step(epoch)
for i, data in enumerate(dataloader):
# x = data["train_points"].transpose(1, 2)
x = data["positions"].transpose(1, 2)
noises_batch = torch.randn_like(x)