This commit is contained in:
Laureηt 2023-03-07 16:52:48 +01:00
commit 1d5f339ef3
Signed by: Laurent
SSH key fingerprint: SHA256:kZEpW8cMJ54PDeCvOhzreNr4FSh6R13CMGH/POoO8DI
32 changed files with 7029 additions and 0 deletions

1
.envrc Normal file
View file

@ -0,0 +1 @@
use flake

3
.gitignore vendored Normal file
View file

@ -0,0 +1,3 @@
.direnv/
node_modules/
dist/

BIN
assets/DETR.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 217 KiB

BIN
assets/algo1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

BIN
assets/antoine.webp Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 MiB

BIN
assets/capture_hdri.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 692 KiB

BIN
assets/compositing.webp Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.3 MiB

BIN
assets/illumination.webp Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 MiB

BIN
assets/im12.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

BIN
assets/im13.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

BIN
assets/im14.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

BIN
assets/im15.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

BIN
assets/im2.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

BIN
assets/im3.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

BIN
assets/im4.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

BIN
assets/im5.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

BIN
assets/image-026.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 191 KiB

BIN
assets/image-027.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 362 KiB

BIN
assets/render.webp Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.2 MiB

BIN
assets/residu2d_3.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 78 KiB

BIN
assets/residu_4.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

BIN
assets/resultats_finaux.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

BIN
assets/shiny.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 526 KiB

BIN
assets/spheres.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.4 MiB

43
flake.lock Normal file
View file

@ -0,0 +1,43 @@
{
"nodes": {
"flake-utils": {
"locked": {
"lastModified": 1676283394,
"narHash": "sha256-XX2f9c3iySLCw54rJ/CZs+ZK6IQy7GXNY4nSOyu2QG4=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "3db36a8b464d0c4532ba1c7dda728f4576d6d073",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1677932085,
"narHash": "sha256-+AB4dYllWig8iO6vAiGGYl0NEgmMgGHpy9gzWJ3322g=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "3c5319ad3aa51551182ac82ea17ab1c6b0f0df89",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

13
flake.nix Normal file
View file

@ -0,0 +1,13 @@
{
description = "Proj long prez";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
flake-utils.url = "github:numtide/flake-utils";
};
outputs = { self, nixpkgs, flake-utils }:
flake-utils.lib.eachDefaultSystem (system:
let pkgs = nixpkgs.legacyPackages.${system};
in { devShell = pkgs.mkShell { buildInputs = with pkgs; [ nodejs ]; }; });
}

6283
package-lock.json generated Normal file

File diff suppressed because it is too large Load diff

10
package.json Normal file
View file

@ -0,0 +1,10 @@
{
"dependencies": {
"@slidev/cli": "^0.38.5",
"@slidev/theme-default": "^0.21.2",
"slidev-theme-academic": "^1.1.1"
},
"devDependencies": {
"playwright-chromium": "^1.30.0"
}
}

342
slides.md Normal file
View file

@ -0,0 +1,342 @@
---
theme: academic
class: text-white
coverAuthor: Laurent Fainsin, Pierre-Eliot Jourdan, Raphaëlle Monville-Letu, Jade Neav
coverBackgroundUrl: https://plus.unsplash.com/premium_photo-1673553304257-018c85e606f8?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8
coverBackgroundSource: unplash
coverBackgroundSourceUrl: https://unsplash.com/photos/g4I556WCJT0
coverDate: "2023-03-09"
themeConfig:
paginationX: r
paginationY: t
paginationPagesDisabled:
- 1
title: Projet Long
---
<h2 class="opacity-50" style="font-size: 1.9rem;">End of study project</h2>
<h1 style="font-size: 2.3rem;">Sphere detection and multimedia applications</h1>
---
# Contents
<div class="h-100 flex items-center text-2xl">
- Types of spheres
- Automatic sphere detection
- Lighting intensity estimation
- Lightning direction estimation
</div>
<figure class="absolute top-15 right-25 w-35">
<img src="https://images.pexels.com/photos/13849458/pexels-photo-13849458.jpeg?auto=compress&cs=tinysrgb&w=1260&h=750&dpr=1"/>
<figcaption class="text-center">Architecture</figcaption>
</figure>
<figure class="absolute top-40 right-75 w-50">
<img src="https://images.pexels.com/photos/3945321/pexels-photo-3945321.jpeg?auto=compress&cs=tinysrgb&w=1260&h=750&dpr=1"/>
<figcaption class="text-center">Cinema</figcaption>
</figure>
<figure class="absolute top-72 right-30 w-45">
<img src="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTzg_yM_NbCIYXfZ55WdtFbAtaF7EUGSKSVBQ&usqp=CAU"/>
<figcaption class="text-center">3D Reconstruction</figcaption>
</figure>
<a href="https://www.pexels.com" class="absolute bottom-0 font-extralight mb-1 mr-2 right-0 text-xs">pexels</a>
---
class: text-white custombg
---
<style>
.custombg {
background-repeat: no-repeat;
background-position: center center;
background-size: cover;
background-image: url("/assets/spheres.png");
}
</style>
## Types of spheres
---
class: text-white custombg2
---
<style>
.custombg2 {
background-repeat: no-repeat;
background-position: center center;
background-size: cover;
background-image: url("https://media.caveacademy.com/wp-content/uploads/2021/05/04000307/cave_prop1002_chrome_v001_r001.jpg");
}
</style>
## Chrome sphere
<a href="https://caveacademy.com/wiki/onset-production/data-acquisition/data-acquisition-training/the-grey-the-chrome-and-the-macbeth-chart/" class="absolute bottom-0 font-extralight mb-1 mr-2 right-0 text-xs">CaveAcademy</a>
---
## Acquisition techniques
<img src="/assets/capture_hdri.jpg" class="m-auto"/>
<a href="https://www.youtube.com/watch?v=kwGZa5qTeAI" class="absolute bottom-0 font-extralight mb-1 mr-2 right-0 text-xs">Louis du Mont</a>
<!-- https://www.youtube.com/watch?v=HCfHQL4kLnw -->
---
## Realistic lighting
<div class="grid grid-cols-2 col-auto m-auto h-100 gap-1">
<img src="/assets/image-026.png" class="m-auto w-full"/>
<img src="/assets/image-027.png" class="m-auto w-full"/>
</div>
---
class: text-white custombg3
---
<style>
.custombg3 {
background-repeat: no-repeat;
background-position: center center;
background-size: cover;
background-image: url("/assets/shiny.jpg");
}
</style>
## Shiny sphere
<a href="https://caveacademy.com/wiki/onset-production/data-acquisition/data-acquisition-training/the-grey-the-chrome-and-the-macbeth-chart/" class="absolute bottom-0 font-extralight mb-1 mr-2 right-0 text-xs">CaveAcademy</a>
---
class: text-white custombg4
---
<style>
.custombg4 {
background-repeat: no-repeat;
background-position: center center;
background-size: cover;
background-image: url("https://media.caveacademy.com/wp-content/uploads/2021/05/04000316/cave_prop1002_grey_v001_r001.jpg");
}
</style>
## Matte sphere
<a href="https://caveacademy.com/wiki/onset-production/data-acquisition/data-acquisition-training/the-grey-the-chrome-and-the-macbeth-chart/" class="absolute bottom-0 font-extralight mb-1 mr-2 right-0 text-xs">CaveAcademy</a>
---
# Automatic detection of spheres
---
## Model
<div class="h-100 flex items-center">
<img src="/assets/DETR.png" class="m-auto"/>
</div>
<a href="https://arxiv.org/abs/2005.12872" class="absolute bottom-0 font-extralight mb-1 mr-2 right-0 text-xs">End-to-End Object Detection with Transformers, arXiv:2005.12872
</a>
---
## Datasets
<div class="grid grid-cols-2 col-auto m-auto h-full">
<img src="/assets/antoine.webp" class="m-auto h-55"/>
<img src="/assets/illumination.webp" class="m-auto h-55"/>
<img src="/assets/compositing.webp" class="m-auto h-55"/>
<img src="/assets/render.webp" class="m-auto h-55"/>
</div>
---
## Results
---
# Estimation of the lighting intensity in an image
---
## Photometric Stereo
<div class="h-100 flex items-center">
<img src= "https://upload.wikimedia.org/wikipedia/commons/b/b5/Photometric_stereo.png" class="m-auto h-90"/>
</div>
- Estimate the surface normals of an object
- Shiny spheres $\rightarrow$ direction of the lighting
---
## Lambert law
<div class="h-100 flex items-center">
<img src= "https://img.laserfocusworld.com/files/base/ebm/lfw/image/2019/06/1906LFW_ost_1.5d13a8a881e81.png?auto=format,compress&w=1050&h=590&fit=clip" class="m-auto h-90"/>
</div>
$I(q) = \rho(Q) \times \vec{n}(Q) \cdot \vec{s}(Q)$
$\rho(Q)$ is the albedo
$\vec{n}(Q)$ is the normal vector
$\vec{s}(Q) = \phi \times \vec{s_0}(Q)$, $\vec{s_0}(Q)$ being the direction of the lighting vector
---
## Problem formulation
$N$ lightings, $P$ pixels
$I = M \times S \times D_{\phi}$
$I \in \mathbb{R}^{P \times N} \rightarrow$ gray scale levels
$M \in \mathbb{R}^{P \times 3} \rightarrow$ the albedo and the normals (unknown)
$S \in \mathbb{R}^{3 \times N} \rightarrow$ direction of lightings
$D_{phi} = diag(\phi_1,...,\phi_{N}) \in \mathbb{R}^{ N \times N} \rightarrow$ intensities of lightings (to be determined)
---
## Algorithm 1
<div class="h-100 flex items-center">
<img src="/assets/algo1.png" class="m-auto h-80"/>
</div>
Intensities : $[\phi_1,...,\phi_{N}]$
New values : $\phi_j + \delta$ and $\phi_j + \delta$, $j \in [1,..,N]$
Mean-squared error : $\underset{\phi_i}{\min} || I - M S D_{\phi} ||_2^2$
Update the value of $\phi_j$
Repeat previous steps
---
## Algorithm 2
Algorithm 1 $\rightarrow$ too long
$$I = M S D_{\phi} \iff M = I(S D_{\phi})^\dagger = I (S D_{\phi})^T [(S D_{\phi})(S D_{\phi})^T]^{-1}$$
Lambert law :
$$
\begin{align*}
I &= I (S D_{\phi})^T [(S D_{\phi})(S D_{\phi})^T]^{-1} S D_{\phi} \\
&= I D_{\phi} S^T S^{-T} D_{\phi}^{-2} S^{-1} S D_{\phi}
\end{align*}
$$
New residual :
$$\underset{\phi_i}{\min} || I - I D_{\phi} S^T S^{-T} D_{\phi}^{-2} S^{-1} S D_{\phi} ||_2^2$$
---
## Generated images
<div class="grid grid-cols-4 col-auto h-110 m-auto">
<img src="/assets/im2.jpg" class="m-auto h-50"/>
<img src="/assets/im3.jpg" class="m-auto h-50"/>
<img src="/assets/im4.jpg" class="m-auto h-50"/>
<img src="/assets/im5.jpg" class="m-auto h-50"/>
<img src="/assets/im12.jpg" class="m-auto h-50"/>
<img src="/assets/im13.jpg" class="m-auto h-50"/>
<img src="/assets/im14.jpg" class="m-auto h-50"/>
<img src="/assets/im15.jpg" class="m-auto h-50"/>
</div>
---
## Results (1/2)
<div class="h-100 flex items-center">
<img src="/assets/residu_4.jpg" class="m-auto w-full"/>
<img src="/assets/residu2d_3.jpg" class="m-auto w-full"/>
</div>
---
## Results (2/2)
<div class="h-100 flex items-center">
<img src="/assets/resultats_finaux.jpg" class="m-auto h-110"/>
</div>
---
## Real images
TODO LOLO : mettre les images comme dans les slides avec carre rouge
---
## Results
<div class="h-100 flex items-center">
<img src="/assets/resultats_finaux_comete.jpg" class="m-auto w-full"/>
<img src="/assets/resultats_finaux_stsernin.jpg" class="m-auto w-full"/>
</div>
---
# Automatic estimation of lighting vector
- Creation of data
- Estimation of light vector with matte balls
- Training of neural networks
---
## Creation of mask
---
## Generated data with Blender
---
## Estimation of lighting vector for training
---
## Verification estimation of lighting vector
---
## Which type of neural network ?
---
## Results
---
## Conclusion
---
## Perspectives

334
slides.md.bak Normal file
View file

@ -0,0 +1,334 @@
---
theme: academic
class: text-white
coverAuthor: Laurent Fainsin, Damien Guillotin, Pierre-Eliot Jourdan
coverBackgroundUrl: https://images.unsplash.com/photo-1655720408861-8b04c0724fd9?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8
coverBackgroundSource: unplash
coverBackgroundSourceUrl: https://unsplash.com/photos/Vc0CmuIfMg0
coverDate: '2023-02-01'
themeConfig:
paginationX: r
paginationY: t
paginationPagesDisabled:
- 1
title: Bureau d'étude de PI3D
---
<h2 class="opacity-50" style="font-size: 2rem;">Bureau d'étude de PI3D</h2>
<h1 style="font-size: 2.4rem;">Sujet 6 - Reformulation du MVS par level sets</h1>
---
## Plan du BE
<div class="h-100 flex items-center text-2xl">
- Définitions
- L'idée générale
- Hyposthèses
- L'algorithme
- Résultats
- Conclusion
</div>
<!--
Notre objectif dans ce BE c'était donc d'implémenter ce procédé.
Et pour ce faire on avait besoin de faire les étapes suivantes:
- Définitions
1. Level sets
2. MVS
- L'idée générale
- Hyposthèses
1. Discrétisation de l'espace
2. Binarisation du levelset
- L'algorithme
1. Initialisation du volume
2. Mise à jour du volume
- Résultats
- Conclusion
-->
---
## Définition
### Level sets
<img src="/figs/lvl7_2D.gif" class="m-auto"/>
$\mathcal{V} = \{ \textbf{Q} = (X, Y) \in \mathbb{R}^2, u(\textbf{Q}) > s \}, \quad s\in [0,1], \quad u \colon \mathbb{R}^2 \to [0, 1]$
<!--
level set ≃ un seuillage. \
Ici on représente le seuillage d'une fonction par un contour rouge, avec le resultat binarisé à droite.
Ça fait un peu penser aux contours actifs, et on peut considérer les levels sets comme une généralisation des contours actifs.
Définition mathématique et continue des levels sets:
Volume = Ensemble des points de l'espace tel que, l'image de ces points par une fonction valide une condition
- V -> volume (2D ici, noir à droite)
- Q -> point 2D de l'espace
- u -> fonction indicatrice (que l'on souhaite apprendre) qui indique "l'intériorité" d'un point Q dans V
- s -> seuil qui détermine la valeur à partir de laquelle on est dans V
-->
---
### Exemple Level sets 3D
<div class="flex items-center">
<img src="/figs/lvl7_3D.gif" class="h-110"/>
<figure>
<img src="https://upload.wikimedia.org/wikipedia/commons/thumb/a/a7/MarchingCubes.svg/350px-MarchingCubes.svg.png" class="h-50">
<figcaption class="text-center">Marching cubes</figcaption>
<!-- mettre lien ici -->
</figure>
</div>
<!--
La définition précédente se généralise très bien aux espace de dimension supérieurs, voici un exemple 3D.
Juste on vous parle de la 2D psk c'est plus simple.
Par exemple ici on peut pas représenter u, car il nous faudrait une représentation 4D.
Par contre on peut vous montrer le résultat de V_t pour différent t.
En ce qui concerne le rendu de la surface 3D, on utilise l'ago de marching cubes.
-->
---
## Définition
### Multi-view Stereo
<img src="https://people.inf.ethz.ch/~moswald/publications/resources/Oswald-DA-2007.png" class="m-auto h-105"/>
<a href="https://people.inf.ethz.ch/~moswald/publications/resources/Oswald-DA-2007.pdf" class="absolute bottom-0 font-extralight mb-1 mr-2 right-0 text-xs">Concurrent Stereo Reconstruction, Martin R. Oswald, 2007</a>
<!--
MVS -> reconstruction 3D à partir des positions des caméras et de leur image.
On obtient un nuage de point dense
Avec la méthode classique, il peut y avoir des décalages entre les nuages de points
Mais avec level set, on peut s'affranchir de ce problème
-->
---
## L'idée générale
### Évolution de $u$
<img src="/figs/lvl7_2D_1.png" class="m-auto h-100"/>
<a href="https://hal.inria.fr/inria-00073673/document" class="absolute bottom-0 font-extralight mb-1 mr-2 right-0 text-xs">Variational principles, surface evolution, PDEs, level set methods, and the stereo problem - Olivier Faugeras, Renaud Keriven, 1998</a>
<!--
Faugeras et Keriven ont écrit ce "livre" pour poser pleins de preuves mathématiques sur des problèmes variationnels... dont les levels sets.
Et donc dans le leur "livre" ils montrent plusieurs exemples de levels sets.
Ils montrent un procédé pour mettre à jour u (le contour bleu) pour que celui-ci converge vers un volume capturé (le contour rouge) par des caméras (dont les poses sont connues).
-->
---
## L'idée générale
### Évolution de $u$
<img src="/figs/lvl7_2D_3.png" class="m-auto h-100"/>
<a href="https://hal.inria.fr/inria-00073673/document" class="absolute bottom-0 font-extralight mb-1 mr-2 right-0 text-xs">Variational principles, surface evolution, PDEs, level set methods, and the stereo problem - Olivier Faugeras, Renaud Keriven, 1998</a>
<!--
changement de topologie du level set
-->
---
## Hypothèses
### Discrétisation de l'espace
<img src="https://upload.wikimedia.org/wikipedia/commons/b/bc/Voxels.svg" class="m-auto mr-50 -mt-13 h-100">
<div class="absolute top-25">
$\mathbb{R}^3 \to \mathbb{V}$
</div>
### Binarisation du levelset
<div class="absolute bottom-3">
$\mathcal{V} = \{ \textbf{v} = (x, y, z) \in \mathbb{V}, u(\textbf{v}) > 0 \}, \quad u \colon \mathbb{V} \to \{0, 1\}$
</div>
<a href="https://en.wikipedia.org/wiki/Voxel" class="absolute bottom-0 font-extralight mb-1 mr-2 right-0 text-xs">Wikipedia</a>
<!--
Dans un premier on va discrétiser notre espace, puisque qu'on travaille dans le monde numérique + les observations (photos caméras) que l'on va utiliser pour calculer notre levelset sont aussi des échantillons.
On va donc travailler avec une grille de pixels si on est en 2D, et un grille de voxels (V) si on est en 3D.
On va aussi simplifier le problème en binarisant notre fonction u, celle-ci désormais sort soit 0 (de l'air) ou 1 (du solide).
Et donc t on va aussi le définir égal à 0.
-->
---
## L'algorithme
### Initialisation du volume
- Définition des bornes de notre grille de voxels:
- $x \in \llbracket x_{\min}, x_{\max} \rrbracket$
- $y \in \llbracket y_{\min}, y_{\max} \rrbracket$
- $z \in \llbracket z_{\min}, z_{\max} \rrbracket$
- Définition d'une résolution de voxel:
- Pas trop grand, sinon un voxel projeté sur nos caméras comprends plusieurs pixels
- Pas trop petit, sinon plusieurs voxels se projetent sur le même pixel
- Exemple: $5.10^{-2}\ m$
- Initialisation des valeurs des voxels:
- Uniforme: $\forall \textbf{Q} = (X, Y, Z) \in \mathbb{R}, u_0(\textbf{Q}) = 1$
- Shape From Silhouette <span style="opacity: 0.025;">BE4 CHEH</span>
<!--
Parler init volume juste avant. \
Comme on l'a vu dans les exemple de Faugeras, il nous faut un u_0 initial (le contour bleu). Pour ça, dans notre cas, on va dans un premier temps définir des bornes à notre grille de voxels. Il faut aussi définir la taille des voxels.
-->
---
## L'algorithme
### Principe du Shape from Silhouette
<img src="https://www.researchgate.net/profile/Silvio-Savarese/publication/221625880/figure/fig1/AS:652956261158913@1532688312594/Shape-from-Silhouettes-The-silhouette-and-camera-location-for-each-view-forms-a-cone.png" class="m-auto h-105">
<a href="https://ieeexplore.ieee.org/document/1024034" class="absolute bottom-0 font-extralight mb-1 mr-2 right-0 text-xs">Implementation of a Shadow Carving System for Shape Capture, doi: 10.1109/TDPVT.2002.1024034</a>
<!--
On suppose qu'on connait les poses de plusieurs caméras, ainsi que les masques de l'objet qu'elles capturent.
On va prendre tous les voxels de notre grille et les projeter sur chacune de nos caméras.
Si on voxel tombe à l'exterieur du masque d'au moins d'une caméra, on le supprime.
Il en résulte l'enveloppe convexe de l'objet (nuage de points / voxels).
Plus on a de caméra, meilleure sera la définition de l'enveloppe.
-->
---
### Shape from Silhouette 3D
<div class="flex items-center">
<img src="/figs/example3D.gif" class="m-auto h-100">
<iframe frameborder="0" allowfullscreen mozallowfullscreen="true" webkitallowfullscreen="true" allow="autoplay; fullscreen; xr-spatial-tracking" xr-spatial-tracking execution-while-out-of-viewport execution-while-not-rendered web-share width="100%" height="100%" src="https://sketchfab.com/models/e5717ee34c9e481a817a34aeacd8a48e/embed?autostart=1" class="h-100"></iframe>
</div>
<!--
25 poses, torus avec briques, génération blender, masques parfait par render raytracing (cycles).
nuage de voxel. si nuage de points -> conversion en mesh possible grace aux marching cubes.
-->
---
## L'algorithme
### Lancer de rayon (Fast Voxel Intersect)
<div class="grid grid-cols-2 col-auto w-110 m-auto">
<img src="https://cdn.discordapp.com/attachments/953586522572066826/1068141883810914427/fvi2.png" class="m-auto h-55"/>
<img src="https://cdn.discordapp.com/attachments/953586522572066826/1068141884242931712/fvi4.png" class="m-auto h-55"/>
<img src="https://cdn.discordapp.com/attachments/953586522572066826/1068141884679147602/fvi6.png" class="m-auto h-55"/>
<img src="https://cdn.discordapp.com/attachments/953586522572066826/1068141885056622661/fvi8.png" class="m-auto h-55"/>
</div>
<!--
- Sélection des voxels sur la bordure du volume
- Vérification de la visibilité du voxel par toutes les caméras
- Récupération des couleurs visibles par les caméras
- Si !consensus et air, !update -> air
- Si consensus et air, update -> solide
- Si !consensus et solide, update -> air
- Si consensus et solide, !update -> solide
-->
---
## Résultats
### L'environnement
<img src="https://cdn.discordapp.com/attachments/953586522572066826/1070291885823889408/peanut.png" class="m-auto h-110"/>
---
## Résultats
### Les données
<style>
img.shadowy {
box-shadow: 0 0px 6px rgb(0 0 0 / 30%);
}
</style>
<div class="flex items-center">
<img src="https://cdn.discordapp.com/attachments/953586522572066826/1070293528288165930/peanut_cams.png" class="h-90"/>
<div class="flex-col inline-flex gap-5">
<img src="https://cdn.discordapp.com/attachments/953586522572066826/1070292853282054225/Image0000.png" class="w-100 shadowy"/>
<img src="https://cdn.discordapp.com/attachments/953586522572066826/1070292923322744903/Image0000.png" class="w-100 shadowy"/>
<img src="https://cdn.discordapp.com/attachments/953586522572066826/1070292853495975988/Image0010.png" class="w-100 shadowy"/>
<img src="https://cdn.discordapp.com/attachments/953586522572066826/1070292923549224980/Image0010.png" class="w-100 shadowy"/>
<img src="https://cdn.discordapp.com/attachments/953586522572066826/1070292853764407306/Image0020.png" class="w-100 shadowy"/>
<img src="https://cdn.discordapp.com/attachments/953586522572066826/1070292923754741770/Image0020.png" class="w-100 shadowy"/>
</div>
</div>
---
## Résultats
### L'initialisation (Shape from Silhouette)
<img src="https://cdn.discordapp.com/attachments/953586522572066826/1070287482186383450/init.png" class="m-auto h-110">
---
## Résultats
### Bordures
<img src="https://cdn.discordapp.com/attachments/953586522572066826/1070307308032233532/border.png" class="m-auto h-110">
---
## Résultats
### Visibilité des voxels
<div class="h-100 flex items-center">
<img src="https://cdn.discordapp.com/attachments/953586522572066826/1070312481894973460/ray.png" class="h-105 -ml-15 -mr-15">
<img src="https://cdn.discordapp.com/attachments/953586522572066826/1070312482243104839/selected.png" class="h-105 -ml-15">
</div>
---
## Résultats
### Évolution du level set
<div class="h-100 flex items-center">
<img src="https://cdn.discordapp.com/attachments/953586522572066826/1070319867946872912/evol7bis.gif" class="m-auto h-105">
<img src="https://cdn.discordapp.com/attachments/953586522572066826/1070319868324360252/shape7bis.gif" class="m-auto h-105">
</div>
---
## Conclusion
<div class="h-100 flex flex-col text-2xl justify-center">
### Ce que nous avons fait
- Réalisation du SfS en 2D/3D
- Réalisation du MVS par level sets en 2D avec initialisation par SfS
<br>
### Axes d'amélioration soon™
- Résultat en 3D
- $\{0, 1\} \to [0, 1]$
</div>