fomat: apply black formatting
This commit is contained in:
parent
7418e28988
commit
9c40024a90
|
@ -1,25 +1,27 @@
|
|||
import bpy
|
||||
from mathutils import Matrix
|
||||
|
||||
#---------------------------------------------------------------
|
||||
# ---------------------------------------------------------------
|
||||
# 3x4 P matrix from Blender camera
|
||||
#---------------------------------------------------------------
|
||||
# ---------------------------------------------------------------
|
||||
|
||||
# BKE_camera_sensor_size
|
||||
def get_sensor_size(sensor_fit, sensor_x, sensor_y):
|
||||
if sensor_fit == 'VERTICAL':
|
||||
if sensor_fit == "VERTICAL":
|
||||
return sensor_y
|
||||
return sensor_x
|
||||
|
||||
|
||||
# BKE_camera_sensor_fit
|
||||
def get_sensor_fit(sensor_fit, size_x, size_y):
|
||||
if sensor_fit == 'AUTO':
|
||||
if sensor_fit == "AUTO":
|
||||
if size_x >= size_y:
|
||||
return 'HORIZONTAL'
|
||||
return "HORIZONTAL"
|
||||
else:
|
||||
return 'VERTICAL'
|
||||
return "VERTICAL"
|
||||
return sensor_fit
|
||||
|
||||
|
||||
# Build intrinsic camera parameters from Blender camera data
|
||||
#
|
||||
# See notes on this in
|
||||
|
@ -27,8 +29,8 @@ def get_sensor_fit(sensor_fit, size_x, size_y):
|
|||
# as well as
|
||||
# https://blender.stackexchange.com/a/120063/3581
|
||||
def get_calibration_matrix_K_from_blender(camd):
|
||||
if camd.type != 'PERSP':
|
||||
raise ValueError('Non-perspective cameras not supported')
|
||||
if camd.type != "PERSP":
|
||||
raise ValueError("Non-perspective cameras not supported")
|
||||
scene = bpy.context.scene
|
||||
f_in_mm = camd.lens
|
||||
scale = scene.render.resolution_percentage / 100
|
||||
|
@ -38,10 +40,10 @@ def get_calibration_matrix_K_from_blender(camd):
|
|||
sensor_fit = get_sensor_fit(
|
||||
camd.sensor_fit,
|
||||
scene.render.pixel_aspect_x * resolution_x_in_px,
|
||||
scene.render.pixel_aspect_y * resolution_y_in_px
|
||||
scene.render.pixel_aspect_y * resolution_y_in_px,
|
||||
)
|
||||
pixel_aspect_ratio = scene.render.pixel_aspect_y / scene.render.pixel_aspect_x
|
||||
if sensor_fit == 'HORIZONTAL':
|
||||
if sensor_fit == "HORIZONTAL":
|
||||
view_fac_in_px = resolution_x_in_px
|
||||
else:
|
||||
view_fac_in_px = pixel_aspect_ratio * resolution_y_in_px
|
||||
|
@ -54,12 +56,10 @@ def get_calibration_matrix_K_from_blender(camd):
|
|||
v_0 = resolution_y_in_px / 2 + camd.shift_y * view_fac_in_px / pixel_aspect_ratio
|
||||
skew = 0 # only use rectangular pixels
|
||||
|
||||
K = Matrix(
|
||||
((s_u, skew, u_0),
|
||||
( 0, s_v, v_0),
|
||||
( 0, 0, 1)))
|
||||
K = Matrix(((s_u, skew, u_0), (0, s_v, v_0), (0, 0, 1)))
|
||||
return K
|
||||
|
||||
|
||||
# Returns camera rotation and translation matrices from Blender.
|
||||
#
|
||||
# There are 3 coordinate systems involved:
|
||||
|
@ -76,10 +76,7 @@ def get_calibration_matrix_K_from_blender(camd):
|
|||
# - right-handed: positive z look-at direction
|
||||
def get_3x4_RT_matrix_from_blender(cam):
|
||||
# bcam stands for blender camera
|
||||
R_bcam2cv = Matrix(
|
||||
((1, 0, 0),
|
||||
(0, -1, 0),
|
||||
(0, 0, -1)))
|
||||
R_bcam2cv = Matrix(((1, 0, 0), (0, -1, 0), (0, 0, -1)))
|
||||
|
||||
# Transpose since the rotation is object rotation,
|
||||
# and we want coordinate rotation
|
||||
|
@ -93,29 +90,29 @@ def get_3x4_RT_matrix_from_blender(cam):
|
|||
# Convert camera location to translation vector used in coordinate changes
|
||||
# T_world2bcam = -1*R_world2bcam @ cam.location
|
||||
# Use location from matrix_world to account for constraints:
|
||||
T_world2bcam = -1*R_world2bcam @ location
|
||||
T_world2bcam = -1 * R_world2bcam @ location
|
||||
|
||||
# Build the coordinate transform matrix from world to computer vision camera
|
||||
R_world2cv = R_bcam2cv@R_world2bcam
|
||||
T_world2cv = R_bcam2cv@T_world2bcam
|
||||
R_world2cv = R_bcam2cv @ R_world2bcam
|
||||
T_world2cv = R_bcam2cv @ T_world2bcam
|
||||
|
||||
# put into 3x4 matrix
|
||||
RT = Matrix((
|
||||
R_world2cv[0][:] + (T_world2cv[0],),
|
||||
R_world2cv[1][:] + (T_world2cv[1],),
|
||||
R_world2cv[2][:] + (T_world2cv[2],)
|
||||
))
|
||||
RT = Matrix(
|
||||
(R_world2cv[0][:] + (T_world2cv[0],), R_world2cv[1][:] + (T_world2cv[1],), R_world2cv[2][:] + (T_world2cv[2],))
|
||||
)
|
||||
return RT
|
||||
|
||||
|
||||
def get_3x4_P_matrix_from_blender(cam):
|
||||
K = get_calibration_matrix_K_from_blender(cam.data)
|
||||
RT = get_3x4_RT_matrix_from_blender(cam)
|
||||
return K@RT, K, RT
|
||||
return K @ RT, K, RT
|
||||
|
||||
|
||||
# ----------------------------------------------------------
|
||||
if __name__ == "__main__":
|
||||
# Insert your camera name here
|
||||
cam = bpy.data.objects['Camera']
|
||||
cam = bpy.data.objects["Camera"]
|
||||
P, K, RT = get_3x4_P_matrix_from_blender(cam)
|
||||
print("K")
|
||||
print(K)
|
||||
|
@ -132,5 +129,5 @@ if __name__ == "__main__":
|
|||
|
||||
# Bonus code: save the 3x4 P matrix into a plain text file
|
||||
# Don't forget to import numpy for this
|
||||
#nP = numpy.matrix(P)
|
||||
#numpy.savetxt("/tmp/P3x4.txt", nP) # to select precision, use e.g. fmt='%.2f'
|
||||
# nP = numpy.matrix(P)
|
||||
# numpy.savetxt("/tmp/P3x4.txt", nP) # to select precision, use e.g. fmt='%.2f'
|
||||
|
|
|
@ -54,14 +54,16 @@ for i, (phi, theta) in enumerate(poses):
|
|||
|
||||
# save camera matrices
|
||||
with open(EXPORT_PATH / "cameras" / f"{i:04d}.pickle", "wb") as f:
|
||||
pickle.dump({
|
||||
pickle.dump(
|
||||
{
|
||||
"P": np.array(P),
|
||||
"K": np.array(K),
|
||||
"RT": np.array(RT),
|
||||
}, f)
|
||||
},
|
||||
f,
|
||||
)
|
||||
print(f"Saved camera matrices: {i:04d}.pickle")
|
||||
|
||||
# render the frame
|
||||
bpy.context.scene.frame_current = i
|
||||
bpy.ops.render.render(write_still=False)
|
||||
|
||||
|
|
|
@ -25,21 +25,33 @@ def update_border(voxel_values, idx=None):
|
|||
z_p1 = voxel_values[:, :, :-1]
|
||||
z_p1 = np.concatenate((np.zeros((z_p1.shape[0], z_p1.shape[1], 1)), z_p1), axis=2)
|
||||
|
||||
return np.logical_or.reduce((voxel_values != x_m1, voxel_values != x_p1,
|
||||
voxel_values != y_m1, voxel_values != y_p1,
|
||||
voxel_values != z_m1, voxel_values != z_p1))
|
||||
return np.logical_or.reduce(
|
||||
(
|
||||
voxel_values != x_m1,
|
||||
voxel_values != x_p1,
|
||||
voxel_values != y_m1,
|
||||
voxel_values != y_p1,
|
||||
voxel_values != z_m1,
|
||||
voxel_values != z_p1,
|
||||
)
|
||||
)
|
||||
|
||||
# TODO: update only concidered voxels (idx)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
voxel_values = np.array([[[np.sqrt(x**2 + y**2 + z**2) < 20 for z in np.arange(-10, 10, 1.0)] for y in np.arange(-10, 10, 1.0)] for x in np.arange(-10, 10, 1.0)])
|
||||
if __name__ == "__main__":
|
||||
voxel_values = np.array(
|
||||
[
|
||||
[[np.sqrt(x**2 + y**2 + z**2) < 20 for z in np.arange(-10, 10, 1.0)] for y in np.arange(-10, 10, 1.0)]
|
||||
for x in np.arange(-10, 10, 1.0)
|
||||
]
|
||||
)
|
||||
|
||||
border = update_border(voxel_values)
|
||||
|
||||
# Plot voxel grid that are the border
|
||||
fig = plt.figure()
|
||||
ax = fig.add_subplot(111, projection='3d')
|
||||
ax.scatter(np.where(voxel_values)[0], np.where(voxel_values)[1], np.where(voxel_values)[2], c='r', marker='o')
|
||||
ax.scatter(np.where(border)[0], np.where(border)[1], np.where(border)[2], c='b', marker='o', s=1)
|
||||
ax = fig.add_subplot(111, projection="3d")
|
||||
ax.scatter(np.where(voxel_values)[0], np.where(voxel_values)[1], np.where(voxel_values)[2], c="r", marker="o")
|
||||
ax.scatter(np.where(border)[0], np.where(border)[1], np.where(border)[2], c="b", marker="o", s=1)
|
||||
plt.show()
|
||||
|
|
58
src/fvi.py
58
src/fvi.py
|
@ -53,8 +53,9 @@ def fast_voxel_intersect(start, end, origin, step, shape) -> tuple[list, list, l
|
|||
next_boundaries = np.divide(position + step * direction_signs, step)
|
||||
errored = np.abs(np.round(next_boundaries) - next_boundaries) < 1e-12
|
||||
next_boundaries[errored] = np.round(next_boundaries[errored])
|
||||
distances = ((1 - is_negative) * np.floor(next_boundaries) +
|
||||
is_negative * np.ceil(next_boundaries)) * step - position
|
||||
distances = (
|
||||
(1 - is_negative) * np.floor(next_boundaries) + is_negative * np.ceil(next_boundaries)
|
||||
) * step - position
|
||||
|
||||
# Determine the nearest boundary to be reached
|
||||
boundary_distances = np.abs(distances / direction)
|
||||
|
@ -71,8 +72,9 @@ def fast_voxel_intersect(start, end, origin, step, shape) -> tuple[list, list, l
|
|||
# print("position_update: ", position)
|
||||
|
||||
# Correct position to be on boundary
|
||||
position[clothest_boundary] = round(
|
||||
position[clothest_boundary] / step[clothest_boundary]) * step[clothest_boundary]
|
||||
position[clothest_boundary] = (
|
||||
round(position[clothest_boundary] / step[clothest_boundary]) * step[clothest_boundary]
|
||||
)
|
||||
|
||||
# Get corresponding voxel
|
||||
on_boundary = np.mod(position, step) == 0
|
||||
|
@ -94,7 +96,7 @@ def fast_voxel_intersect(start, end, origin, step, shape) -> tuple[list, list, l
|
|||
return intersections, voxels, voxels_idx
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
def update_figure():
|
||||
|
@ -104,30 +106,42 @@ if __name__ == '__main__':
|
|||
|
||||
# Plot hitted voxels
|
||||
for voxel in voxels:
|
||||
plt.fill([voxel[0], voxel[0] + step[0], voxel[0] + step[0], voxel[0]],
|
||||
plt.fill(
|
||||
[voxel[0], voxel[0] + step[0], voxel[0] + step[0], voxel[0]],
|
||||
[voxel[1], voxel[1], voxel[1] + step[1], voxel[1] + step[1]],
|
||||
color='#e25', alpha=0.5)
|
||||
color="#e25",
|
||||
alpha=0.5,
|
||||
)
|
||||
|
||||
for voxel_id in voxels_idx:
|
||||
plt.fill([
|
||||
origin[0] + voxel_id[0] * step[0], origin[0] + (voxel_id[0] + 1) * step[0],
|
||||
origin[0] + (voxel_id[0] + 1) * step[0], origin[0] + voxel_id[0] * step[0]
|
||||
], [
|
||||
origin[1] + voxel_id[1] * step[1], origin[1] + voxel_id[1] * step[1],
|
||||
origin[1] + (voxel_id[1] + 1) * step[1], origin[1] + (voxel_id[1] + 1) * step[1]
|
||||
], color='#2e3', alpha=0.5)
|
||||
plt.fill(
|
||||
[
|
||||
origin[0] + voxel_id[0] * step[0],
|
||||
origin[0] + (voxel_id[0] + 1) * step[0],
|
||||
origin[0] + (voxel_id[0] + 1) * step[0],
|
||||
origin[0] + voxel_id[0] * step[0],
|
||||
],
|
||||
[
|
||||
origin[1] + voxel_id[1] * step[1],
|
||||
origin[1] + voxel_id[1] * step[1],
|
||||
origin[1] + (voxel_id[1] + 1) * step[1],
|
||||
origin[1] + (voxel_id[1] + 1) * step[1],
|
||||
],
|
||||
color="#2e3",
|
||||
alpha=0.5,
|
||||
)
|
||||
|
||||
# Plot line segment
|
||||
plt.plot([start[0], end[0]], [start[1], end[1]], 'k-')
|
||||
plt.plot(start[0], start[1], 'go')
|
||||
plt.plot(end[0], end[1], 'ro')
|
||||
plt.plot([start[0], end[0]], [start[1], end[1]], "k-")
|
||||
plt.plot(start[0], start[1], "go")
|
||||
plt.plot(end[0], end[1], "ro")
|
||||
|
||||
# Plot intersection points
|
||||
for pos in positions:
|
||||
plt.plot(pos[0], pos[1], 'bo')
|
||||
plt.plot(pos[0], pos[1], "bo")
|
||||
|
||||
# Plot voxel grid
|
||||
plt.axis('equal')
|
||||
plt.axis("equal")
|
||||
plt.xlim((-10, 10))
|
||||
plt.ylim((-10, 10))
|
||||
plt.xticks(origin[0] + step[0] * np.arange(shape[0] + 1))
|
||||
|
@ -137,13 +151,13 @@ if __name__ == '__main__':
|
|||
|
||||
def onkey(event):
|
||||
global start, end
|
||||
if event.key == ' ':
|
||||
if event.key == " ":
|
||||
start = np.random.rand(2) * 20 - 10
|
||||
end = np.random.rand(2) * 20 - 10
|
||||
update_figure()
|
||||
|
||||
# Define voxel grid
|
||||
origin = np.array([-5., -5.])
|
||||
origin = np.array([-5.0, -5.0])
|
||||
step = np.array([0.7, 0.7])
|
||||
shape = (10, 10)
|
||||
|
||||
|
@ -155,6 +169,6 @@ if __name__ == '__main__':
|
|||
|
||||
# Plot
|
||||
fig = plt.figure()
|
||||
fig.canvas.mpl_connect('key_press_event', onkey)
|
||||
fig.canvas.mpl_connect("key_press_event", onkey)
|
||||
update_figure()
|
||||
plt.show()
|
||||
|
|
|
@ -1,14 +1,10 @@
|
|||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from itertools import product
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
def check_line_voxel(
|
||||
px, py, pz,
|
||||
dx, dy, dz,
|
||||
vx, vy, vz,
|
||||
c
|
||||
):
|
||||
|
||||
def check_line_voxel(px, py, pz, dx, dy, dz, vx, vy, vz, c):
|
||||
"""Check if a line intersects a voxel.
|
||||
|
||||
Parameters:
|
||||
|
@ -17,7 +13,6 @@ def check_line_voxel(
|
|||
- vx, vy, vz: line direction coordinates
|
||||
- c: voxel size
|
||||
"""
|
||||
|
||||
# Compute the intersection bounds
|
||||
kx1 = (px - dx) / vx
|
||||
ky1 = (py - dy) / vy
|
||||
|
@ -27,54 +22,27 @@ def check_line_voxel(
|
|||
kz2 = (pz - dz + c) / vz
|
||||
|
||||
# Order the bounds
|
||||
kxmin = np.min(np.concatenate([
|
||||
kx1[:, np.newaxis],
|
||||
kx2[:, np.newaxis]
|
||||
], axis=1), axis=1)
|
||||
kymin = np.min(np.concatenate([
|
||||
ky1[:, np.newaxis],
|
||||
ky2[:, np.newaxis]
|
||||
], axis=1), axis=1)
|
||||
kzmin = np.min(np.concatenate([
|
||||
kz1[:, np.newaxis],
|
||||
kz2[:, np.newaxis]
|
||||
], axis=1), axis=1)
|
||||
kxmax = np.max(np.concatenate([
|
||||
kx1[:, np.newaxis],
|
||||
kx2[:, np.newaxis]
|
||||
], axis=1), axis=1)
|
||||
kymax = np.max(np.concatenate([
|
||||
ky1[:, np.newaxis],
|
||||
ky2[:, np.newaxis]
|
||||
], axis=1), axis=1)
|
||||
kzmax = np.max(np.concatenate([
|
||||
kz1[:, np.newaxis],
|
||||
kz2[:, np.newaxis]
|
||||
], axis=1), axis=1)
|
||||
kxmin = np.min(np.concatenate([kx1[:, np.newaxis], kx2[:, np.newaxis]], axis=1), axis=1)
|
||||
kymin = np.min(np.concatenate([ky1[:, np.newaxis], ky2[:, np.newaxis]], axis=1), axis=1)
|
||||
kzmin = np.min(np.concatenate([kz1[:, np.newaxis], kz2[:, np.newaxis]], axis=1), axis=1)
|
||||
kxmax = np.max(np.concatenate([kx1[:, np.newaxis], kx2[:, np.newaxis]], axis=1), axis=1)
|
||||
kymax = np.max(np.concatenate([ky1[:, np.newaxis], ky2[:, np.newaxis]], axis=1), axis=1)
|
||||
kzmax = np.max(np.concatenate([kz1[:, np.newaxis], kz2[:, np.newaxis]], axis=1), axis=1)
|
||||
|
||||
# Check if the bounds overlap
|
||||
kmax = np.min(np.concatenate([
|
||||
kxmax[:, np.newaxis],
|
||||
kymax[:, np.newaxis],
|
||||
kzmax[:, np.newaxis]
|
||||
], axis=1), axis=1)
|
||||
kmin = np.max(np.concatenate([
|
||||
kxmin[:, np.newaxis],
|
||||
kymin[:, np.newaxis],
|
||||
kzmin[:, np.newaxis]
|
||||
], axis=1), axis=1)
|
||||
kmax = np.min(np.concatenate([kxmax[:, np.newaxis], kymax[:, np.newaxis], kzmax[:, np.newaxis]], axis=1), axis=1)
|
||||
kmin = np.max(np.concatenate([kxmin[:, np.newaxis], kymin[:, np.newaxis], kzmin[:, np.newaxis]], axis=1), axis=1)
|
||||
return kmin <= kmax
|
||||
|
||||
|
||||
c = 1.0
|
||||
points = np.array([[x, y, z] for x, y, z in product(
|
||||
np.arange(-5.0, 4.0, c),
|
||||
np.arange(-5.0, 4.0, c),
|
||||
np.arange(-5.0, 4.0, c))
|
||||
])
|
||||
points = np.array(
|
||||
[[x, y, z] for x, y, z in product(np.arange(-5.0, 4.0, c), np.arange(-5.0, 4.0, c), np.arange(-5.0, 4.0, c))]
|
||||
)
|
||||
while True:
|
||||
|
||||
fig = plt.figure()
|
||||
ax = plt.axes(projection='3d')
|
||||
ax = plt.axes(projection="3d")
|
||||
|
||||
d = np.random.rand(3) * 1 - 0.5
|
||||
v = np.random.rand(3) * 1 - 0.5
|
||||
|
@ -90,20 +58,20 @@ while True:
|
|||
if not bool_vect[i]:
|
||||
continue
|
||||
|
||||
ax.plot([px, px+c], [py, py], [pz, pz], 'b')
|
||||
ax.plot([px, px+c], [py, py], [pz+c, pz+c], 'b')
|
||||
ax.plot([px, px+c], [py+c, py+c], [pz, pz], 'b')
|
||||
ax.plot([px, px+c], [py+c, py+c], [pz+c, pz+c], 'b')
|
||||
ax.plot([px, px], [py, py+c], [pz, pz], 'b')
|
||||
ax.plot([px, px], [py, py+c], [pz+c, pz+c], 'b')
|
||||
ax.plot([px+c, px+c], [py, py+c], [pz, pz], 'b')
|
||||
ax.plot([px+c, px+c], [py, py+c], [pz+c, pz+c], 'b')
|
||||
ax.plot([px, px], [py, py], [pz, pz+c], 'b')
|
||||
ax.plot([px, px], [py+c, py+c], [pz, pz+c], 'b')
|
||||
ax.plot([px+c, px+c], [py, py], [pz, pz+c], 'b')
|
||||
ax.plot([px+c, px+c], [py+c, py+c], [pz, pz+c], 'b')
|
||||
ax.plot([px, px + c], [py, py], [pz, pz], "b")
|
||||
ax.plot([px, px + c], [py, py], [pz + c, pz + c], "b")
|
||||
ax.plot([px, px + c], [py + c, py + c], [pz, pz], "b")
|
||||
ax.plot([px, px + c], [py + c, py + c], [pz + c, pz + c], "b")
|
||||
ax.plot([px, px], [py, py + c], [pz, pz], "b")
|
||||
ax.plot([px, px], [py, py + c], [pz + c, pz + c], "b")
|
||||
ax.plot([px + c, px + c], [py, py + c], [pz, pz], "b")
|
||||
ax.plot([px + c, px + c], [py, py + c], [pz + c, pz + c], "b")
|
||||
ax.plot([px, px], [py, py], [pz, pz + c], "b")
|
||||
ax.plot([px, px], [py + c, py + c], [pz, pz + c], "b")
|
||||
ax.plot([px + c, px + c], [py, py], [pz, pz + c], "b")
|
||||
ax.plot([px + c, px + c], [py + c, py + c], [pz, pz + c], "b")
|
||||
|
||||
# plot line
|
||||
ax.plot([dx, dx+vx], [dy, dy+vy], [dz, dz+vz], 'g')
|
||||
ax.plot([dx, dx + vx], [dy, dy + vy], [dz, dz + vz], "g")
|
||||
|
||||
plt.show()
|
||||
|
|
59
src/main.py
59
src/main.py
|
@ -17,10 +17,16 @@ Z_MIN, Z_MAX = -0.1, 0.1
|
|||
|
||||
nb_frame = 24
|
||||
|
||||
points = np.array([[x, y, z, 1.0] for x, y, z in product(
|
||||
points = np.array(
|
||||
[
|
||||
[x, y, z, 1.0]
|
||||
for x, y, z in product(
|
||||
np.arange(X_MIN, X_MAX, VOXEL_SIZE),
|
||||
np.arange(Y_MIN, Y_MAX, VOXEL_SIZE),
|
||||
np.arange(Z_MIN, Z_MAX, VOXEL_SIZE))])
|
||||
np.arange(Z_MIN, Z_MAX, VOXEL_SIZE),
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
mask = 255
|
||||
|
||||
|
@ -29,10 +35,10 @@ proj_mats = []
|
|||
frames = []
|
||||
|
||||
for k in range(nb_frame):
|
||||
frame = cv2.imread(f'data/torus/masks/Image{k:04}.png', cv2.IMREAD_GRAYSCALE)
|
||||
frames.append(cv2.imread(f'data/torus/images/Image{k:04}.png', cv2.IMREAD_GRAYSCALE))
|
||||
frame = cv2.imread(f"data/torus/masks/Image{k:04}.png", cv2.IMREAD_GRAYSCALE)
|
||||
frames.append(cv2.imread(f"data/torus/images/Image{k:04}.png", cv2.IMREAD_GRAYSCALE))
|
||||
|
||||
with open(f"data/torus/cameras/{k:04d}.pickle", 'rb') as file:
|
||||
with open(f"data/torus/cameras/{k:04d}.pickle", "rb") as file:
|
||||
matrices = pickle.load(file)
|
||||
proj_mat = matrices["P"]
|
||||
proj_mats.append(proj_mat)
|
||||
|
@ -40,16 +46,23 @@ for k in range(nb_frame):
|
|||
positions.append(position)
|
||||
|
||||
cam_points = proj_mat @ points.T
|
||||
cam_points /= cam_points[2,:]
|
||||
cam_points /= cam_points[2, :]
|
||||
cam_points = np.round(cam_points).astype(np.int32)
|
||||
|
||||
visible = np.logical_and.reduce((0 <= cam_points[0,:], cam_points[0,:] < frame.shape[1], 0 <= cam_points[1,:], cam_points[1,:] < frame.shape[0]))
|
||||
cam_points = cam_points[:,visible]
|
||||
points = points[visible,:]
|
||||
visible = np.logical_and.reduce(
|
||||
(
|
||||
0 <= cam_points[0, :],
|
||||
cam_points[0, :] < frame.shape[1],
|
||||
0 <= cam_points[1, :],
|
||||
cam_points[1, :] < frame.shape[0],
|
||||
)
|
||||
)
|
||||
cam_points = cam_points[:, visible]
|
||||
points = points[visible, :]
|
||||
|
||||
solid = frame[cam_points[1,:],cam_points[0,:]] == mask
|
||||
cam_points = cam_points[:,solid]
|
||||
points = points[solid,:]
|
||||
solid = frame[cam_points[1, :], cam_points[0, :]] == mask
|
||||
cam_points = cam_points[:, solid]
|
||||
points = points[solid, :]
|
||||
|
||||
# for cam_point in cam_points.T:
|
||||
# cv2.circle(frame, (cam_point[0], cam_point[1]), 2, (255*is_in[k], 0, 255*(not is_in[k])))
|
||||
|
@ -73,9 +86,15 @@ for k in range(nb_frame):
|
|||
# cv2.waitKey(0)
|
||||
|
||||
|
||||
voxel = np.zeros((int((X_MAX-X_MIN)/VOXEL_SIZE + 1), int((Y_MAX-Y_MIN)/VOXEL_SIZE + 1), int((Z_MAX-Z_MIN)/VOXEL_SIZE + 1)))
|
||||
voxel = np.zeros(
|
||||
(
|
||||
int((X_MAX - X_MIN) / VOXEL_SIZE + 1),
|
||||
int((Y_MAX - Y_MIN) / VOXEL_SIZE + 1),
|
||||
int((Z_MAX - Z_MIN) / VOXEL_SIZE + 1),
|
||||
)
|
||||
)
|
||||
idx = np.floor_divide(points[:, :3] - np.array([X_MIN, Y_MIN, Z_MIN]), VOXEL_SIZE).astype(int)
|
||||
voxel[idx[:,0], idx[:,1], idx[:,2]] = 1
|
||||
voxel[idx[:, 0], idx[:, 1], idx[:, 2]] = 1
|
||||
|
||||
border = update_border(voxel)
|
||||
|
||||
|
@ -88,14 +107,15 @@ border = update_border(voxel)
|
|||
|
||||
origin = np.array([X_MIN, Y_MIN, Z_MIN])
|
||||
step = np.array([VOXEL_SIZE, VOXEL_SIZE, VOXEL_SIZE])
|
||||
shape = np.array([int((X_MAX-X_MIN)/VOXEL_SIZE), int((Y_MAX-Y_MIN)/VOXEL_SIZE), int((Z_MAX-Z_MIN)/VOXEL_SIZE)])
|
||||
shape = np.array(
|
||||
[int((X_MAX - X_MIN) / VOXEL_SIZE), int((Y_MAX - Y_MIN) / VOXEL_SIZE), int((Z_MAX - Z_MIN) / VOXEL_SIZE)]
|
||||
)
|
||||
|
||||
for idx in track(np.argwhere(border)):
|
||||
# coordonnées du centre du voxel
|
||||
start = np.array([
|
||||
X_MIN + (idx[0] + 0.5) * VOXEL_SIZE,
|
||||
Y_MIN + (idx[1] + 0.5) * VOXEL_SIZE,
|
||||
Z_MIN + (idx[2] + 0.5) * VOXEL_SIZE])
|
||||
start = np.array(
|
||||
[X_MIN + (idx[0] + 0.5) * VOXEL_SIZE, Y_MIN + (idx[1] + 0.5) * VOXEL_SIZE, Z_MIN + (idx[2] + 0.5) * VOXEL_SIZE]
|
||||
)
|
||||
|
||||
# array qui contiendra les nuances de gris des frames qui voient le voxel
|
||||
values = []
|
||||
|
@ -130,6 +150,5 @@ for idx in track(np.argwhere(border)):
|
|||
voxel[idx[0], idx[1], idx[2]] = 0
|
||||
|
||||
|
||||
|
||||
vertices, triangles = mcubes.marching_cubes(voxel, 0)
|
||||
mcubes.export_obj(vertices, triangles, "result.obj")
|
||||
|
|
|
@ -12,23 +12,25 @@ def matrices_reader(path: str) -> list[np.ndarray]:
|
|||
list[np.ndarray]: list of projection matrix
|
||||
"""
|
||||
|
||||
with open(path, 'r') as f:
|
||||
with open(path, "r") as f:
|
||||
lines = f.readlines()
|
||||
|
||||
k = 0
|
||||
world_matrices = []
|
||||
while k+3 < len(lines):
|
||||
while k + 3 < len(lines):
|
||||
# Match matrices one by one
|
||||
mat_str = ""
|
||||
for line in lines[k:k+4]:
|
||||
for line in lines[k : k + 4]:
|
||||
mat_str += line
|
||||
float_reg = r"(-|\d|\.|e)+"
|
||||
res = re.search(
|
||||
f"Matrix\(\(\(({float_reg}), ({float_reg}), ({float_reg}), ({float_reg})\),\n +\(({float_reg}), ({float_reg}), ({float_reg}), ({float_reg})\),\n\ +\(({float_reg}), ({float_reg}), ({float_reg}), ({float_reg})\)\)\)", mat_str)
|
||||
f"Matrix\(\(\(({float_reg}), ({float_reg}), ({float_reg}), ({float_reg})\),\n +\(({float_reg}), ({float_reg}), ({float_reg}), ({float_reg})\),\n\ +\(({float_reg}), ({float_reg}), ({float_reg}), ({float_reg})\)\)\)",
|
||||
mat_str,
|
||||
)
|
||||
|
||||
# Convert string to np.ndarray
|
||||
values = [float(res.group(i)) for i in range(1,len(res.groups()) + 1, 2)]
|
||||
world_mat = np.array([[values[4*i + j] for j in range(4)] for i in range(3)])
|
||||
values = [float(res.group(i)) for i in range(1, len(res.groups()) + 1, 2)]
|
||||
world_mat = np.array([[values[4 * i + j] for j in range(4)] for i in range(3)])
|
||||
world_matrices.append(world_mat)
|
||||
|
||||
k += 4
|
||||
|
|
|
@ -4,8 +4,7 @@ nb_frame = 24
|
|||
|
||||
for k in range(nb_frame):
|
||||
|
||||
with open(f"/tmp/cameras/{k:04d}.pickle", 'rb') as file:
|
||||
with open(f"/tmp/cameras/{k:04d}.pickle", "rb") as file:
|
||||
proj_mat = pickle.load(file)["P"]
|
||||
|
||||
print(k, proj_mat)
|
||||
|
||||
|
|
Loading…
Reference in a new issue