AAAHAAAHAHHAHAHAHAHAHHAHAHAH
Co-authored-by: Damien Guillotin <damguillotin@gmail.com> Co-authored-by: pejour <pejour@users.noreply.github.com>
This commit is contained in:
parent
f3292494bb
commit
69ea7232dc
|
@ -53,7 +53,7 @@ $\mathcal{V}_t = \{ \textbf{Q} \in \mathbb{R}^3, u(\textbf{Q}) = t \}, \quad t\i
|
||||||
|
|
||||||
<img src="figs/lvl7_2D.png" class="m-auto h-110"/>
|
<img src="figs/lvl7_2D.png" class="m-auto h-110"/>
|
||||||
|
|
||||||
<span class="absolute bottom-0 font-extralight mb-1 mr-2 right-0 text-xs">Variational principles, surface evolution, PDEs, level set methods, and the stereo problem - Olivier Faugeras, Renaud Keriven, 1998</span>
|
<a href="https://hal.inria.fr/inria-00073673/document" class="absolute bottom-0 font-extralight mb-1 mr-2 right-0 text-xs">Variational principles, surface evolution, PDEs, level set methods, and the stereo problem - Olivier Faugeras, Renaud Keriven, 1998</a>
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -67,18 +67,34 @@ $\mathcal{V}_t = \{ \textbf{Q} \in \mathbb{R}^3, u(\textbf{Q}) = t \}, \quad t\i
|
||||||
|
|
||||||
## Mise à jour du volume
|
## Mise à jour du volume
|
||||||
|
|
||||||
consensus blabla
|
- Sélection des voxels sur la bordure du "marbre"
|
||||||
|
- Vérification de la visibilité du voxel par toutes les caméras
|
||||||
|
- Récupération des couleurs / niveaux de gris visibles par les caméras
|
||||||
|
- Consensus sur la couleur / niveau de gris du voxel étudié
|
||||||
|
- Remplissage si consensus, creusage sinon
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Initialisation du volume
|
## Initialisation du volume
|
||||||
|
|
||||||
ones ou alors shape from silouhette
|
- Définition d'une taille limite:
|
||||||
|
- $x \in [-1, 1]$
|
||||||
|
- $y \in [-1, 1]$
|
||||||
|
- $z \in [-1, 1]$
|
||||||
|
- Définition d'une résolution (un pas): $5.10^{-2}$
|
||||||
|
- Initialisation des valeurs du marbre:
|
||||||
|
- Uniforme (`np.zeros` ou `np.ones`)
|
||||||
|
- Shape From Silouhette (BE4)
|
||||||
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Exemple Shape from Silouhette
|
## Exemple Shape from Silouhette
|
||||||
|
|
||||||
|
<img src="https://www.researchgate.net/profile/Silvio-Savarese/publication/221625880/figure/fig1/AS:652956261158913@1532688312594/Shape-from-Silhouettes-The-silhouette-and-camera-location-for-each-view-forms-a-cone.png" class="m-auto mt-2 h-110">
|
||||||
|
|
||||||
|
<a href="https://ieeexplore.ieee.org/document/1024034" class="absolute bottom-0 font-extralight mb-1 mr-2 right-0 text-xs">Implementation of a Shadow Carving System for Shape Capture, doi: 10.1109/TDPVT.2002.1024034</a>
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Raytracing
|
## Raytracing
|
||||||
|
|
51
src/draw.py
51
src/draw.py
|
@ -19,7 +19,11 @@ Z *= np.random.rand(*Z.shape)
|
||||||
|
|
||||||
for i, x in enumerate(x_vals):
|
for i, x in enumerate(x_vals):
|
||||||
for j, y in enumerate(y_vals):
|
for j, y in enumerate(y_vals):
|
||||||
color = f"#{hex(int(Z[j, i] * 255))[2:]}{hex(int(Z[j, i] * 255))[2:]}{hex(int(Z[j, i] * 255))[2:]}"
|
color = f"{hex(int(Z[j, i] * 255))[2:]}"
|
||||||
|
if color == "0":
|
||||||
|
color = "#f00"
|
||||||
|
else:
|
||||||
|
color = "#" + 3 * color
|
||||||
plt.fill([x, x + 0.1, x + 0.1, x], [y, y, y + 0.1, y + 0.1], color=color)
|
plt.fill([x, x + 0.1, x + 0.1, x], [y, y, y + 0.1, y + 0.1], color=color)
|
||||||
|
|
||||||
|
|
||||||
|
@ -44,16 +48,43 @@ for i in range(nb_cams):
|
||||||
plt.plot(x[0, :], x[1, :], "r-")
|
plt.plot(x[0, :], x[1, :], "r-")
|
||||||
|
|
||||||
|
|
||||||
# draw 1d image of the scene for each camera
|
|
||||||
for i in range(nb_cams):
|
|
||||||
# sort pixels by distance to camera
|
|
||||||
cam_pose = cam_poses[i]
|
|
||||||
pixels_dist = np.linalg.norm(np.array([X.flatten(), Y.flatten()]).T - cam_pose, axis=1)
|
|
||||||
X_ = X.flatten()[np.argsort(pixels_dist)]
|
|
||||||
Y_ = Y.flatten()[np.argsort(pixels_dist)]
|
|
||||||
|
|
||||||
|
|
||||||
plt.xlim(-7, 7)
|
plt.xlim(-7, 7)
|
||||||
plt.ylim(-7, 7)
|
plt.ylim(-7, 7)
|
||||||
plt.axis("equal")
|
plt.axis("equal")
|
||||||
|
|
||||||
|
|
||||||
|
# draw 1d image of the scene for each camera
|
||||||
|
for i in range(2):
|
||||||
|
plt.figure()
|
||||||
|
# sort pixels by distance to camera
|
||||||
|
cam_pose = cam_poses[i]
|
||||||
|
pixels_dist = np.linalg.norm(np.array([X.flatten(), Y.flatten()]).T - cam_pose, axis=1)
|
||||||
|
pixels_sort = np.argsort(pixels_dist)[::-1]
|
||||||
|
|
||||||
|
px = np.array([[-5, -5, 1], [5, -5, 1], [5, 5, 1], [-5, 5, 1]]).T
|
||||||
|
px = np.linalg.inv(cam2world_projs[i]) @ px
|
||||||
|
px /= px[1, :]
|
||||||
|
|
||||||
|
x0 = px[1, :].min()
|
||||||
|
x1 = px[1, :].max()
|
||||||
|
|
||||||
|
plt.fill([x0, x1, x1, x0], [0, 0, 1, 1], color="r")
|
||||||
|
|
||||||
|
for j in pixels_sort:
|
||||||
|
x, y = X.flatten()[j], Y.flatten()[j]
|
||||||
|
color = f"{hex(int(Z.flatten()[j] * 255))[2:]}"
|
||||||
|
if color == "0":
|
||||||
|
continue
|
||||||
|
color = "#" + 3 * color
|
||||||
|
|
||||||
|
px = np.array([[x, y, 1], [x + 0.1, y, 1], [x + 0.1, y + 0.1, 1], [x, y + 0.1, 1]]).T
|
||||||
|
px = np.linalg.inv(cam2world_projs[i]) @ px
|
||||||
|
px /= px[1, :]
|
||||||
|
|
||||||
|
x0 = px[1, :].min()
|
||||||
|
x1 = px[1, :].max()
|
||||||
|
|
||||||
|
plt.fill([x0, x1, x1, x0], [0, 0, 1, 1], color=color)
|
||||||
|
plt.axis("equal")
|
||||||
|
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
Loading…
Reference in a new issue