feat: switch from poetry to micromamba

This commit is contained in:
Laurent Fainsin 2023-04-01 19:10:27 +02:00
parent 2cc47bbb9e
commit ac26b2a62a
9 changed files with 48 additions and 5671 deletions

10
.gitignore vendored
View file

@ -1,8 +1,6 @@
data/ dataset*
dataset*/ lightning_logs
*.parquet __pycache__
.venv/
lightning_logs/
__pycache__/
*.jpg *.jpg
*.png

34
env.yml Normal file
View file

@ -0,0 +1,34 @@
name: qcav
channels:
- nodefaults
- pytorch
- nvidia
- conda-forge
dependencies:
# basic python
- rich
# science
- numpy
- scipy
- opencv
# pytorch
- pytorch
- torchvision
- torchaudio
- pytorch-cuda=11.8
- pytorch-lightning
# deep learning libraries
- transformers
- datasets
- timm
# dev tools
- ruff
- isort
- mypy
- pre-commit
# logging
- tensorboard
# visualization
- matplotlib

5596
poetry.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,3 +0,0 @@
[virtualenvs]
create = true
in-project = true

View file

@ -1,58 +1,6 @@
[tool.poetry] [tool.ruff]
authors = ["Laurent Fainsin <laurentfainsin@protonmail.com>"] line-length = 120
description = "" select = ["E", "F", "I"]
name = "label-studio"
version = "1.0.0"
[tool.poetry.dependencies]
datasets = "^2.9.0"
fastapi = "0.86.0"
jsonargparse = {extras = ["signatures"], version = "^4.20.0"}
lightning = "1.9.1"
matplotlib = "^3.7.0"
numpy = "^1.24.2"
opencv-python = "^4.7.0.72"
opencv-python-headless = "^4.7.0.72"
python = ">=3.8,<3.12"
rich = "^13.3.1"
scipy = "^1.10.0"
timm = "^0.6.12"
torch = "^1.13.1"
transformers = "^4.26.1"
[tool.poetry.group.notebooks]
optional = true
[tool.poetry.group.notebooks.dependencies]
ipykernel = "^6.20.2"
ipywidgets = "^8.0.4"
jupyter = "^1.0.0"
matplotlib = "^3.6.3"
[tool.poetry.group.dev.dependencies]
Flake8-pyproject = "^1.1.0"
bandit = "^1.7.4"
black = "^22.8.0"
flake8 = "^5.0.4"
flake8-docstrings = "^1.6.0"
isort = "^5.10.1"
mypy = "^0.971"
pre-commit = "^2.20.0"
tensorboard = "^2.12.0"
torchtyping = "^0.1.4"
torch-tb-profiler = "^0.4.1"
[build-system]
build-backend = "poetry.core.masonry.api"
requires = ["poetry-core"]
[tool.flake8]
# rules ignored
extend-ignore = ["W503", "D401", "D100", "D104"]
per-file-ignores = ["__init__.py:F401"]
# black
ignore = "E203"
max-line-length = 120
[tool.black] [tool.black]
exclude = ''' exclude = '''

View file

@ -1,9 +1,8 @@
import datasets import datasets
import torch import torch
from lightning.pytorch import LightningDataModule from pytorch_lightning import LightningDataModule
from lightning.pytorch.trainer.supporters import CombinedLoader from pytorch_lightning.utilities import CombinedLoader
from torch.utils.data import DataLoader from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from torchvision.transforms import AugMix from torchvision.transforms import AugMix
from transformers import DetrFeatureExtractor from transformers import DetrFeatureExtractor

View file

@ -1,12 +1,11 @@
from lightning.pytorch.callbacks import ( from datamodule import DETRDataModule
from module import DETR
from pytorch_lightning.callbacks import (
ModelCheckpoint, ModelCheckpoint,
RichModelSummary, RichModelSummary,
RichProgressBar, RichProgressBar,
) )
from lightning.pytorch.cli import LightningCLI from pytorch_lightning.cli import LightningCLI
from datamodule import DETRDataModule
from module import DETR
class MyLightningCLI(LightningCLI): class MyLightningCLI(LightningCLI):

View file

@ -1,6 +1,6 @@
import torch import torch
from lightning.pytorch import LightningModule
from PIL import ImageDraw from PIL import ImageDraw
from pytorch_lightning import LightningModule
from transformers import ( from transformers import (
DetrForObjectDetection, DetrForObjectDetection,
get_cosine_with_hard_restarts_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup,

View file

@ -74,8 +74,6 @@ class SpherePredict(datasets.GeneratorBasedBuilder):
if __name__ == "__main__": if __name__ == "__main__":
from PIL import ImageDraw
# load dataset # load dataset
dataset = datasets.load_dataset("src/spheres_predict.py", split="train") dataset = datasets.load_dataset("src/spheres_predict.py", split="train")