chore: various files from 3 weeks ago

Former-commit-id: 502f9821bddd45574a46cdfe017524a6fac9f102 [formerly 5e2f3679b65b8ae554e1cdd3e1c13ae190e07bd0]
Former-commit-id: 6337e42ef9b051064a9cb2716ef37b272b09673e
This commit is contained in:
Laurent Fainsin 2022-08-23 13:09:44 +02:00
parent fb7e33c437
commit 0cb52febd7
10 changed files with 486 additions and 1668 deletions

3
.devcontainer.tmp/.env Normal file
View file

@ -0,0 +1,3 @@
WANDB_VOLUME_PATH=/media/disk2/lfainsin/wandb-local/
WANDB_BASE_URL=http://wandb:8080
WANDB_API_KEY=XXXX

View file

@ -0,0 +1,12 @@
FROM python:3
WORKDIR /workspace
RUN pip install --upgrade pip --no-input
RUN curl -sSL https://install.python-poetry.org | python3 -
RUN echo 'export PATH="$PATH:/root/.local/bin"' >> /root/.bashrc
RUN echo '/root/.local/bin/poetry install --no-interaction' >> /root/.bashrc
RUN echo '/root/.local/bin/poetry shell' >> /root/.bashrc
SHELL ["/bin/bash", "-ec"]

View file

@ -0,0 +1,6 @@
{
"name": "devcontainer-example",
"dockerComposeFile": "docker-compose.yml",
"service": "development",
"workspaceFolder": "/workspace"
}

View file

@ -0,0 +1,24 @@
version: "3"
services:
# development container
development:
container_name: dev
hostname: dev
build:
context: .
dockerfile: Dockerfile
volumes:
- ..:/workspace
stdin_open: true
# wandb dashboard
wandb:
image: wandb/local
container_name: wandb-local
hostname: wandb-local
volumes:
- /media/disk2/lfainsin/wandb-local/:/vol
ports:
- 8080:8080

View file

@ -5,6 +5,7 @@
"python.linting.lintOnSave": true, "python.linting.lintOnSave": true,
// "python.linting.flake8Enabled": true, // "python.linting.flake8Enabled": true,
// "python.linting.banditEnabled": true, // "python.linting.banditEnabled": true,
"python.defaultInterpreterPath": ".venv/bin/python",
"[python]": { "[python]": {
"editor.codeActionsOnSave": { "editor.codeActionsOnSave": {
"source.organizeImports": true "source.organizeImports": true

1967
poetry.lock generated

File diff suppressed because it is too large Load diff

View file

@ -14,11 +14,10 @@ python = ">=3.8,<3.11"
pytorch-lightning = "^1.6.4" pytorch-lightning = "^1.6.4"
rich = "^12.4.4" rich = "^12.4.4"
scipy = "^1.8.1" scipy = "^1.8.1"
torch = "^1.11.0" torch = "1.11.0"
torchvision = "^0.12.0" torchvision = "^0.12.0"
tqdm = "^4.64.0" tqdm = "^4.64.0"
wandb = "^0.12.19" wandb = "^0.12.19"
Pillow-SIMD = "^9.0.0"
[tool.poetry.dev-dependencies] [tool.poetry.dev-dependencies]
black = "^22.3.0" black = "^22.3.0"

View file

@ -2,9 +2,18 @@
"cells": [ "cells": [
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 5, "execution_count": 1,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/laurent_fainsin/Documents/unet/.venv/lib/python3.8/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
" from .autonotebook import tqdm as notebook_tqdm\n"
]
}
],
"source": [ "source": [
"import torch\n", "import torch\n",
"from unet.model import UNet\n" "from unet.model import UNet\n"
@ -12,7 +21,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 7, "execution_count": 2,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -25,7 +34,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 10, "execution_count": 5,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
@ -34,54 +43,41 @@
"<All keys matched successfully>" "<All keys matched successfully>"
] ]
}, },
"execution_count": 10, "execution_count": 5,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
], ],
"source": [ "source": [
"net.load_state_dict(\n", "net.load_state_dict(\n",
" torch.load(\"../../checkpoints/best.pth\")\n", " torch.load(\"../../checkpoints/best.pth\", map_location=torch.device('cpu'))\n",
")\n" ")\n"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 11, "execution_count": 6,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [],
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n",
"WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n",
"WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n",
"WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n",
"WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n",
"WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n"
]
}
],
"source": [ "source": [
"dummy_input = torch.randn(1, 3, 1024, 1024, requires_grad=True)\n", "dummy_input = torch.randn(1, 3, 512, 512, requires_grad=True)\n",
"torch.onnx.export(\n", "torch.onnx.export(\n",
" net,\n", " net,\n",
" dummy_input,\n", " dummy_input,\n",
" \"../../checkpoints/best.onnx\",\n", " \"../../checkpoints/best-fixed.onnx\",\n",
" opset_version=14,\n", " opset_version=10,\n",
" input_names=[\"input\"],\n", " # input_names=[\"input\"],\n",
" output_names=[\"output\"],\n", " # output_names=[\"output\"],\n",
" dynamic_axes={\n", " # dynamic_axes={\n",
" \"input\": {\n", " # \"input\": {\n",
" 2: \"height\",\n", " # 2: \"height\",\n",
" 3: \"width\",\n", " # 3: \"width\",\n",
" },\n", " # },\n",
" \"output\": {\n", " # \"output\": {\n",
" 2: \"height\",\n", " # 2: \"height\",\n",
" 3: \"width\",\n", " # 3: \"width\",\n",
" },\n", " # },\n",
" },\n", " # },\n",
")\n" ")\n"
] ]
} }

View file

@ -1 +1 @@
dde3ef9f5b63d6bac80a9cf7e4409a17c831d771 559ebbcd9a27a78eac0984fea6717b20b5084ae0

View file

@ -2,35 +2,27 @@
"cells": [ "cells": [
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 1, "execution_count": 5,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [],
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/laurent_fainsin/Documents/unet/.venv/lib/python3.8/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
" from .autonotebook import tqdm as notebook_tqdm\n",
"/home/laurent_fainsin/Documents/unet/.venv/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: /home/laurent_fainsin/Documents/unet/.venv/lib/python3.8/site-packages/torchvision/image.so: undefined symbol: _ZNK3c1010TensorImpl36is_contiguous_nondefault_policy_implENS_12MemoryFormatE\n",
" warn(f\"Failed to load image Python extension: {e}\")\n"
]
}
],
"source": [ "source": [
"from PIL import Image\n", "from PIL import Image\n",
"\n", "\n",
"import numpy as np\n",
"import albumentations as A\n", "import albumentations as A\n",
"import torchvision.transforms as T\n",
"\n", "\n",
"import numpy as np\n",
"from utils import RandomPaste\n", "from utils import RandomPaste\n",
"from data.dataset import SyntheticDataset\n", "from data.dataset import SyntheticDataset\n",
"\n", "\n",
"from pathlib import Path\n" "from pathlib import Path\n",
"\n",
"from joblib import Parallel, delayed\n"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 2, "execution_count": 6,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -39,39 +31,41 @@
" A.Resize(512, 512),\n", " A.Resize(512, 512),\n",
" A.Flip(),\n", " A.Flip(),\n",
" A.ColorJitter(),\n", " A.ColorJitter(),\n",
" RandomPaste(5, \"/home/lilian/data_disk/lfainsin/spheres/\"),\n", " RandomPaste(5, \"/media/disk1/lfainsin/SPHERES/\"),\n",
" A.GaussianBlur(),\n", " A.GaussianBlur(),\n",
" A.ISONoise(),\n", " A.ISONoise(),\n",
" ],\n", " ],\n",
")\n", ")\n",
"\n", "\n",
"dataset = SyntheticDataset(image_dir=\"/home/lilian/data_disk/lfainsin/train/\", transform=transform)\n" "dataset = SyntheticDataset(image_dir=\"/media/disk1/lfainsin/BACKGROUND/\", transform=transform)\n",
"transform = T.ToPILImage()"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 6, "execution_count": 12,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [],
{
"name": "stdout",
"output_type": "stream",
"text": [
"10001\r"
]
}
],
"source": [ "source": [
"for i, (image, mask) in enumerate(dataset):\n", "def render(i, image, mask):\n",
" path = f\"/home/lilian/data_disk/lfainsin/prerender/{i}/\"\n",
" Path(path).mkdir(parents=True, exist_ok=True)\n",
" Image.fromarray(image).save(f\"{path}/image.jpg\")\n",
" Image.fromarray(mask*255).save(f\"{path}/MASK.PNG\")\n",
" \n",
" print(i, end=\"\\r\")\n",
"\n", "\n",
" if i > 10000:\n", " image = transform(image)\n",
" break" " mask = transform(mask)\n",
"\n",
" path = f\"/media/disk1/lfainsin/TRAIN_prerender/{i}/\"\n",
" Path(path).mkdir(parents=True, exist_ok=True)\n",
" \n",
" image.save(f\"{path}/image.jpg\")\n",
" mask.save(f\"{path}/MASK.PNG\")\n"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
"Parallel(n_jobs=-1)(delayed(render)(i, image, mask) for i, (image, mask) in enumerate(dataset))\n"
] ]
} }
], ],