fomat: apply black formatting
This commit is contained in:
parent
7418e28988
commit
9c40024a90
|
@ -1,25 +1,27 @@
|
||||||
import bpy
|
import bpy
|
||||||
from mathutils import Matrix
|
from mathutils import Matrix
|
||||||
|
|
||||||
#---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# 3x4 P matrix from Blender camera
|
# 3x4 P matrix from Blender camera
|
||||||
#---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
# BKE_camera_sensor_size
|
# BKE_camera_sensor_size
|
||||||
def get_sensor_size(sensor_fit, sensor_x, sensor_y):
|
def get_sensor_size(sensor_fit, sensor_x, sensor_y):
|
||||||
if sensor_fit == 'VERTICAL':
|
if sensor_fit == "VERTICAL":
|
||||||
return sensor_y
|
return sensor_y
|
||||||
return sensor_x
|
return sensor_x
|
||||||
|
|
||||||
|
|
||||||
# BKE_camera_sensor_fit
|
# BKE_camera_sensor_fit
|
||||||
def get_sensor_fit(sensor_fit, size_x, size_y):
|
def get_sensor_fit(sensor_fit, size_x, size_y):
|
||||||
if sensor_fit == 'AUTO':
|
if sensor_fit == "AUTO":
|
||||||
if size_x >= size_y:
|
if size_x >= size_y:
|
||||||
return 'HORIZONTAL'
|
return "HORIZONTAL"
|
||||||
else:
|
else:
|
||||||
return 'VERTICAL'
|
return "VERTICAL"
|
||||||
return sensor_fit
|
return sensor_fit
|
||||||
|
|
||||||
|
|
||||||
# Build intrinsic camera parameters from Blender camera data
|
# Build intrinsic camera parameters from Blender camera data
|
||||||
#
|
#
|
||||||
# See notes on this in
|
# See notes on this in
|
||||||
|
@ -27,8 +29,8 @@ def get_sensor_fit(sensor_fit, size_x, size_y):
|
||||||
# as well as
|
# as well as
|
||||||
# https://blender.stackexchange.com/a/120063/3581
|
# https://blender.stackexchange.com/a/120063/3581
|
||||||
def get_calibration_matrix_K_from_blender(camd):
|
def get_calibration_matrix_K_from_blender(camd):
|
||||||
if camd.type != 'PERSP':
|
if camd.type != "PERSP":
|
||||||
raise ValueError('Non-perspective cameras not supported')
|
raise ValueError("Non-perspective cameras not supported")
|
||||||
scene = bpy.context.scene
|
scene = bpy.context.scene
|
||||||
f_in_mm = camd.lens
|
f_in_mm = camd.lens
|
||||||
scale = scene.render.resolution_percentage / 100
|
scale = scene.render.resolution_percentage / 100
|
||||||
|
@ -38,10 +40,10 @@ def get_calibration_matrix_K_from_blender(camd):
|
||||||
sensor_fit = get_sensor_fit(
|
sensor_fit = get_sensor_fit(
|
||||||
camd.sensor_fit,
|
camd.sensor_fit,
|
||||||
scene.render.pixel_aspect_x * resolution_x_in_px,
|
scene.render.pixel_aspect_x * resolution_x_in_px,
|
||||||
scene.render.pixel_aspect_y * resolution_y_in_px
|
scene.render.pixel_aspect_y * resolution_y_in_px,
|
||||||
)
|
)
|
||||||
pixel_aspect_ratio = scene.render.pixel_aspect_y / scene.render.pixel_aspect_x
|
pixel_aspect_ratio = scene.render.pixel_aspect_y / scene.render.pixel_aspect_x
|
||||||
if sensor_fit == 'HORIZONTAL':
|
if sensor_fit == "HORIZONTAL":
|
||||||
view_fac_in_px = resolution_x_in_px
|
view_fac_in_px = resolution_x_in_px
|
||||||
else:
|
else:
|
||||||
view_fac_in_px = pixel_aspect_ratio * resolution_y_in_px
|
view_fac_in_px = pixel_aspect_ratio * resolution_y_in_px
|
||||||
|
@ -52,14 +54,12 @@ def get_calibration_matrix_K_from_blender(camd):
|
||||||
# Parameters of intrinsic calibration matrix K
|
# Parameters of intrinsic calibration matrix K
|
||||||
u_0 = resolution_x_in_px / 2 - camd.shift_x * view_fac_in_px
|
u_0 = resolution_x_in_px / 2 - camd.shift_x * view_fac_in_px
|
||||||
v_0 = resolution_y_in_px / 2 + camd.shift_y * view_fac_in_px / pixel_aspect_ratio
|
v_0 = resolution_y_in_px / 2 + camd.shift_y * view_fac_in_px / pixel_aspect_ratio
|
||||||
skew = 0 # only use rectangular pixels
|
skew = 0 # only use rectangular pixels
|
||||||
|
|
||||||
K = Matrix(
|
K = Matrix(((s_u, skew, u_0), (0, s_v, v_0), (0, 0, 1)))
|
||||||
((s_u, skew, u_0),
|
|
||||||
( 0, s_v, v_0),
|
|
||||||
( 0, 0, 1)))
|
|
||||||
return K
|
return K
|
||||||
|
|
||||||
|
|
||||||
# Returns camera rotation and translation matrices from Blender.
|
# Returns camera rotation and translation matrices from Blender.
|
||||||
#
|
#
|
||||||
# There are 3 coordinate systems involved:
|
# There are 3 coordinate systems involved:
|
||||||
|
@ -76,10 +76,7 @@ def get_calibration_matrix_K_from_blender(camd):
|
||||||
# - right-handed: positive z look-at direction
|
# - right-handed: positive z look-at direction
|
||||||
def get_3x4_RT_matrix_from_blender(cam):
|
def get_3x4_RT_matrix_from_blender(cam):
|
||||||
# bcam stands for blender camera
|
# bcam stands for blender camera
|
||||||
R_bcam2cv = Matrix(
|
R_bcam2cv = Matrix(((1, 0, 0), (0, -1, 0), (0, 0, -1)))
|
||||||
((1, 0, 0),
|
|
||||||
(0, -1, 0),
|
|
||||||
(0, 0, -1)))
|
|
||||||
|
|
||||||
# Transpose since the rotation is object rotation,
|
# Transpose since the rotation is object rotation,
|
||||||
# and we want coordinate rotation
|
# and we want coordinate rotation
|
||||||
|
@ -93,29 +90,29 @@ def get_3x4_RT_matrix_from_blender(cam):
|
||||||
# Convert camera location to translation vector used in coordinate changes
|
# Convert camera location to translation vector used in coordinate changes
|
||||||
# T_world2bcam = -1*R_world2bcam @ cam.location
|
# T_world2bcam = -1*R_world2bcam @ cam.location
|
||||||
# Use location from matrix_world to account for constraints:
|
# Use location from matrix_world to account for constraints:
|
||||||
T_world2bcam = -1*R_world2bcam @ location
|
T_world2bcam = -1 * R_world2bcam @ location
|
||||||
|
|
||||||
# Build the coordinate transform matrix from world to computer vision camera
|
# Build the coordinate transform matrix from world to computer vision camera
|
||||||
R_world2cv = R_bcam2cv@R_world2bcam
|
R_world2cv = R_bcam2cv @ R_world2bcam
|
||||||
T_world2cv = R_bcam2cv@T_world2bcam
|
T_world2cv = R_bcam2cv @ T_world2bcam
|
||||||
|
|
||||||
# put into 3x4 matrix
|
# put into 3x4 matrix
|
||||||
RT = Matrix((
|
RT = Matrix(
|
||||||
R_world2cv[0][:] + (T_world2cv[0],),
|
(R_world2cv[0][:] + (T_world2cv[0],), R_world2cv[1][:] + (T_world2cv[1],), R_world2cv[2][:] + (T_world2cv[2],))
|
||||||
R_world2cv[1][:] + (T_world2cv[1],),
|
)
|
||||||
R_world2cv[2][:] + (T_world2cv[2],)
|
|
||||||
))
|
|
||||||
return RT
|
return RT
|
||||||
|
|
||||||
|
|
||||||
def get_3x4_P_matrix_from_blender(cam):
|
def get_3x4_P_matrix_from_blender(cam):
|
||||||
K = get_calibration_matrix_K_from_blender(cam.data)
|
K = get_calibration_matrix_K_from_blender(cam.data)
|
||||||
RT = get_3x4_RT_matrix_from_blender(cam)
|
RT = get_3x4_RT_matrix_from_blender(cam)
|
||||||
return K@RT, K, RT
|
return K @ RT, K, RT
|
||||||
|
|
||||||
|
|
||||||
# ----------------------------------------------------------
|
# ----------------------------------------------------------
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
# Insert your camera name here
|
# Insert your camera name here
|
||||||
cam = bpy.data.objects['Camera']
|
cam = bpy.data.objects["Camera"]
|
||||||
P, K, RT = get_3x4_P_matrix_from_blender(cam)
|
P, K, RT = get_3x4_P_matrix_from_blender(cam)
|
||||||
print("K")
|
print("K")
|
||||||
print(K)
|
print(K)
|
||||||
|
@ -132,5 +129,5 @@ if __name__ == "__main__":
|
||||||
|
|
||||||
# Bonus code: save the 3x4 P matrix into a plain text file
|
# Bonus code: save the 3x4 P matrix into a plain text file
|
||||||
# Don't forget to import numpy for this
|
# Don't forget to import numpy for this
|
||||||
#nP = numpy.matrix(P)
|
# nP = numpy.matrix(P)
|
||||||
#numpy.savetxt("/tmp/P3x4.txt", nP) # to select precision, use e.g. fmt='%.2f'
|
# numpy.savetxt("/tmp/P3x4.txt", nP) # to select precision, use e.g. fmt='%.2f'
|
||||||
|
|
|
@ -54,14 +54,16 @@ for i, (phi, theta) in enumerate(poses):
|
||||||
|
|
||||||
# save camera matrices
|
# save camera matrices
|
||||||
with open(EXPORT_PATH / "cameras" / f"{i:04d}.pickle", "wb") as f:
|
with open(EXPORT_PATH / "cameras" / f"{i:04d}.pickle", "wb") as f:
|
||||||
pickle.dump({
|
pickle.dump(
|
||||||
"P": np.array(P),
|
{
|
||||||
"K": np.array(K),
|
"P": np.array(P),
|
||||||
"RT": np.array(RT),
|
"K": np.array(K),
|
||||||
}, f)
|
"RT": np.array(RT),
|
||||||
|
},
|
||||||
|
f,
|
||||||
|
)
|
||||||
print(f"Saved camera matrices: {i:04d}.pickle")
|
print(f"Saved camera matrices: {i:04d}.pickle")
|
||||||
|
|
||||||
# render the frame
|
# render the frame
|
||||||
bpy.context.scene.frame_current = i
|
bpy.context.scene.frame_current = i
|
||||||
bpy.ops.render.render(write_still=False)
|
bpy.ops.render.render(write_still=False)
|
||||||
|
|
||||||
|
|
|
@ -25,21 +25,33 @@ def update_border(voxel_values, idx=None):
|
||||||
z_p1 = voxel_values[:, :, :-1]
|
z_p1 = voxel_values[:, :, :-1]
|
||||||
z_p1 = np.concatenate((np.zeros((z_p1.shape[0], z_p1.shape[1], 1)), z_p1), axis=2)
|
z_p1 = np.concatenate((np.zeros((z_p1.shape[0], z_p1.shape[1], 1)), z_p1), axis=2)
|
||||||
|
|
||||||
return np.logical_or.reduce((voxel_values != x_m1, voxel_values != x_p1,
|
return np.logical_or.reduce(
|
||||||
voxel_values != y_m1, voxel_values != y_p1,
|
(
|
||||||
voxel_values != z_m1, voxel_values != z_p1))
|
voxel_values != x_m1,
|
||||||
|
voxel_values != x_p1,
|
||||||
|
voxel_values != y_m1,
|
||||||
|
voxel_values != y_p1,
|
||||||
|
voxel_values != z_m1,
|
||||||
|
voxel_values != z_p1,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
# TODO: update only concidered voxels (idx)
|
# TODO: update only concidered voxels (idx)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
voxel_values = np.array([[[np.sqrt(x**2 + y**2 + z**2) < 20 for z in np.arange(-10, 10, 1.0)] for y in np.arange(-10, 10, 1.0)] for x in np.arange(-10, 10, 1.0)])
|
voxel_values = np.array(
|
||||||
|
[
|
||||||
|
[[np.sqrt(x**2 + y**2 + z**2) < 20 for z in np.arange(-10, 10, 1.0)] for y in np.arange(-10, 10, 1.0)]
|
||||||
|
for x in np.arange(-10, 10, 1.0)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
border = update_border(voxel_values)
|
border = update_border(voxel_values)
|
||||||
|
|
||||||
# Plot voxel grid that are the border
|
# Plot voxel grid that are the border
|
||||||
fig = plt.figure()
|
fig = plt.figure()
|
||||||
ax = fig.add_subplot(111, projection='3d')
|
ax = fig.add_subplot(111, projection="3d")
|
||||||
ax.scatter(np.where(voxel_values)[0], np.where(voxel_values)[1], np.where(voxel_values)[2], c='r', marker='o')
|
ax.scatter(np.where(voxel_values)[0], np.where(voxel_values)[1], np.where(voxel_values)[2], c="r", marker="o")
|
||||||
ax.scatter(np.where(border)[0], np.where(border)[1], np.where(border)[2], c='b', marker='o', s=1)
|
ax.scatter(np.where(border)[0], np.where(border)[1], np.where(border)[2], c="b", marker="o", s=1)
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
62
src/fvi.py
62
src/fvi.py
|
@ -53,8 +53,9 @@ def fast_voxel_intersect(start, end, origin, step, shape) -> tuple[list, list, l
|
||||||
next_boundaries = np.divide(position + step * direction_signs, step)
|
next_boundaries = np.divide(position + step * direction_signs, step)
|
||||||
errored = np.abs(np.round(next_boundaries) - next_boundaries) < 1e-12
|
errored = np.abs(np.round(next_boundaries) - next_boundaries) < 1e-12
|
||||||
next_boundaries[errored] = np.round(next_boundaries[errored])
|
next_boundaries[errored] = np.round(next_boundaries[errored])
|
||||||
distances = ((1 - is_negative) * np.floor(next_boundaries) +
|
distances = (
|
||||||
is_negative * np.ceil(next_boundaries)) * step - position
|
(1 - is_negative) * np.floor(next_boundaries) + is_negative * np.ceil(next_boundaries)
|
||||||
|
) * step - position
|
||||||
|
|
||||||
# Determine the nearest boundary to be reached
|
# Determine the nearest boundary to be reached
|
||||||
boundary_distances = np.abs(distances / direction)
|
boundary_distances = np.abs(distances / direction)
|
||||||
|
@ -71,8 +72,9 @@ def fast_voxel_intersect(start, end, origin, step, shape) -> tuple[list, list, l
|
||||||
# print("position_update: ", position)
|
# print("position_update: ", position)
|
||||||
|
|
||||||
# Correct position to be on boundary
|
# Correct position to be on boundary
|
||||||
position[clothest_boundary] = round(
|
position[clothest_boundary] = (
|
||||||
position[clothest_boundary] / step[clothest_boundary]) * step[clothest_boundary]
|
round(position[clothest_boundary] / step[clothest_boundary]) * step[clothest_boundary]
|
||||||
|
)
|
||||||
|
|
||||||
# Get corresponding voxel
|
# Get corresponding voxel
|
||||||
on_boundary = np.mod(position, step) == 0
|
on_boundary = np.mod(position, step) == 0
|
||||||
|
@ -94,7 +96,7 @@ def fast_voxel_intersect(start, end, origin, step, shape) -> tuple[list, list, l
|
||||||
return intersections, voxels, voxels_idx
|
return intersections, voxels, voxels_idx
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
def update_figure():
|
def update_figure():
|
||||||
|
@ -104,30 +106,42 @@ if __name__ == '__main__':
|
||||||
|
|
||||||
# Plot hitted voxels
|
# Plot hitted voxels
|
||||||
for voxel in voxels:
|
for voxel in voxels:
|
||||||
plt.fill([voxel[0], voxel[0] + step[0], voxel[0] + step[0], voxel[0]],
|
plt.fill(
|
||||||
[voxel[1], voxel[1], voxel[1] + step[1], voxel[1] + step[1]],
|
[voxel[0], voxel[0] + step[0], voxel[0] + step[0], voxel[0]],
|
||||||
color='#e25', alpha=0.5)
|
[voxel[1], voxel[1], voxel[1] + step[1], voxel[1] + step[1]],
|
||||||
|
color="#e25",
|
||||||
|
alpha=0.5,
|
||||||
|
)
|
||||||
|
|
||||||
for voxel_id in voxels_idx:
|
for voxel_id in voxels_idx:
|
||||||
plt.fill([
|
plt.fill(
|
||||||
origin[0] + voxel_id[0] * step[0], origin[0] + (voxel_id[0] + 1) * step[0],
|
[
|
||||||
origin[0] + (voxel_id[0] + 1) * step[0], origin[0] + voxel_id[0] * step[0]
|
origin[0] + voxel_id[0] * step[0],
|
||||||
], [
|
origin[0] + (voxel_id[0] + 1) * step[0],
|
||||||
origin[1] + voxel_id[1] * step[1], origin[1] + voxel_id[1] * step[1],
|
origin[0] + (voxel_id[0] + 1) * step[0],
|
||||||
origin[1] + (voxel_id[1] + 1) * step[1], origin[1] + (voxel_id[1] + 1) * step[1]
|
origin[0] + voxel_id[0] * step[0],
|
||||||
], color='#2e3', alpha=0.5)
|
],
|
||||||
|
[
|
||||||
|
origin[1] + voxel_id[1] * step[1],
|
||||||
|
origin[1] + voxel_id[1] * step[1],
|
||||||
|
origin[1] + (voxel_id[1] + 1) * step[1],
|
||||||
|
origin[1] + (voxel_id[1] + 1) * step[1],
|
||||||
|
],
|
||||||
|
color="#2e3",
|
||||||
|
alpha=0.5,
|
||||||
|
)
|
||||||
|
|
||||||
# Plot line segment
|
# Plot line segment
|
||||||
plt.plot([start[0], end[0]], [start[1], end[1]], 'k-')
|
plt.plot([start[0], end[0]], [start[1], end[1]], "k-")
|
||||||
plt.plot(start[0], start[1], 'go')
|
plt.plot(start[0], start[1], "go")
|
||||||
plt.plot(end[0], end[1], 'ro')
|
plt.plot(end[0], end[1], "ro")
|
||||||
|
|
||||||
# Plot intersection points
|
# Plot intersection points
|
||||||
for pos in positions:
|
for pos in positions:
|
||||||
plt.plot(pos[0], pos[1], 'bo')
|
plt.plot(pos[0], pos[1], "bo")
|
||||||
|
|
||||||
# Plot voxel grid
|
# Plot voxel grid
|
||||||
plt.axis('equal')
|
plt.axis("equal")
|
||||||
plt.xlim((-10, 10))
|
plt.xlim((-10, 10))
|
||||||
plt.ylim((-10, 10))
|
plt.ylim((-10, 10))
|
||||||
plt.xticks(origin[0] + step[0] * np.arange(shape[0] + 1))
|
plt.xticks(origin[0] + step[0] * np.arange(shape[0] + 1))
|
||||||
|
@ -137,13 +151,13 @@ if __name__ == '__main__':
|
||||||
|
|
||||||
def onkey(event):
|
def onkey(event):
|
||||||
global start, end
|
global start, end
|
||||||
if event.key == ' ':
|
if event.key == " ":
|
||||||
start = np.random.rand(2) * 20 - 10
|
start = np.random.rand(2) * 20 - 10
|
||||||
end = np.random.rand(2) * 20 - 10
|
end = np.random.rand(2) * 20 - 10
|
||||||
update_figure()
|
update_figure()
|
||||||
|
|
||||||
# Define voxel grid
|
# Define voxel grid
|
||||||
origin = np.array([-5., -5.])
|
origin = np.array([-5.0, -5.0])
|
||||||
step = np.array([0.7, 0.7])
|
step = np.array([0.7, 0.7])
|
||||||
shape = (10, 10)
|
shape = (10, 10)
|
||||||
|
|
||||||
|
@ -155,6 +169,6 @@ if __name__ == '__main__':
|
||||||
|
|
||||||
# Plot
|
# Plot
|
||||||
fig = plt.figure()
|
fig = plt.figure()
|
||||||
fig.canvas.mpl_connect('key_press_event', onkey)
|
fig.canvas.mpl_connect("key_press_event", onkey)
|
||||||
update_figure()
|
update_figure()
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
|
@ -1,23 +1,18 @@
|
||||||
import numpy as np
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
from itertools import product
|
from itertools import product
|
||||||
|
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
def check_line_voxel(
|
|
||||||
px, py, pz,
|
def check_line_voxel(px, py, pz, dx, dy, dz, vx, vy, vz, c):
|
||||||
dx, dy, dz,
|
|
||||||
vx, vy, vz,
|
|
||||||
c
|
|
||||||
):
|
|
||||||
"""Check if a line intersects a voxel.
|
"""Check if a line intersects a voxel.
|
||||||
|
|
||||||
Parameters:
|
Parameters:
|
||||||
- px, py, pz: voxel coner coordinates
|
- px, py, pz: voxel coner coordinates
|
||||||
- dx, dy, dz: line origin coordinates
|
- dx, dy, dz: line origin coordinates
|
||||||
- vx, vy, vz: line direction coordinates
|
- vx, vy, vz: line direction coordinates
|
||||||
- c: voxel size
|
- c: voxel size
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Compute the intersection bounds
|
# Compute the intersection bounds
|
||||||
kx1 = (px - dx) / vx
|
kx1 = (px - dx) / vx
|
||||||
ky1 = (py - dy) / vy
|
ky1 = (py - dy) / vy
|
||||||
|
@ -27,54 +22,27 @@ def check_line_voxel(
|
||||||
kz2 = (pz - dz + c) / vz
|
kz2 = (pz - dz + c) / vz
|
||||||
|
|
||||||
# Order the bounds
|
# Order the bounds
|
||||||
kxmin = np.min(np.concatenate([
|
kxmin = np.min(np.concatenate([kx1[:, np.newaxis], kx2[:, np.newaxis]], axis=1), axis=1)
|
||||||
kx1[:, np.newaxis],
|
kymin = np.min(np.concatenate([ky1[:, np.newaxis], ky2[:, np.newaxis]], axis=1), axis=1)
|
||||||
kx2[:, np.newaxis]
|
kzmin = np.min(np.concatenate([kz1[:, np.newaxis], kz2[:, np.newaxis]], axis=1), axis=1)
|
||||||
], axis=1), axis=1)
|
kxmax = np.max(np.concatenate([kx1[:, np.newaxis], kx2[:, np.newaxis]], axis=1), axis=1)
|
||||||
kymin = np.min(np.concatenate([
|
kymax = np.max(np.concatenate([ky1[:, np.newaxis], ky2[:, np.newaxis]], axis=1), axis=1)
|
||||||
ky1[:, np.newaxis],
|
kzmax = np.max(np.concatenate([kz1[:, np.newaxis], kz2[:, np.newaxis]], axis=1), axis=1)
|
||||||
ky2[:, np.newaxis]
|
|
||||||
], axis=1), axis=1)
|
|
||||||
kzmin = np.min(np.concatenate([
|
|
||||||
kz1[:, np.newaxis],
|
|
||||||
kz2[:, np.newaxis]
|
|
||||||
], axis=1), axis=1)
|
|
||||||
kxmax = np.max(np.concatenate([
|
|
||||||
kx1[:, np.newaxis],
|
|
||||||
kx2[:, np.newaxis]
|
|
||||||
], axis=1), axis=1)
|
|
||||||
kymax = np.max(np.concatenate([
|
|
||||||
ky1[:, np.newaxis],
|
|
||||||
ky2[:, np.newaxis]
|
|
||||||
], axis=1), axis=1)
|
|
||||||
kzmax = np.max(np.concatenate([
|
|
||||||
kz1[:, np.newaxis],
|
|
||||||
kz2[:, np.newaxis]
|
|
||||||
], axis=1), axis=1)
|
|
||||||
|
|
||||||
# Check if the bounds overlap
|
# Check if the bounds overlap
|
||||||
kmax = np.min(np.concatenate([
|
kmax = np.min(np.concatenate([kxmax[:, np.newaxis], kymax[:, np.newaxis], kzmax[:, np.newaxis]], axis=1), axis=1)
|
||||||
kxmax[:, np.newaxis],
|
kmin = np.max(np.concatenate([kxmin[:, np.newaxis], kymin[:, np.newaxis], kzmin[:, np.newaxis]], axis=1), axis=1)
|
||||||
kymax[:, np.newaxis],
|
|
||||||
kzmax[:, np.newaxis]
|
|
||||||
], axis=1), axis=1)
|
|
||||||
kmin = np.max(np.concatenate([
|
|
||||||
kxmin[:, np.newaxis],
|
|
||||||
kymin[:, np.newaxis],
|
|
||||||
kzmin[:, np.newaxis]
|
|
||||||
], axis=1), axis=1)
|
|
||||||
return kmin <= kmax
|
return kmin <= kmax
|
||||||
|
|
||||||
|
|
||||||
c = 1.0
|
c = 1.0
|
||||||
points = np.array([[x, y, z] for x, y, z in product(
|
points = np.array(
|
||||||
np.arange(-5.0, 4.0, c),
|
[[x, y, z] for x, y, z in product(np.arange(-5.0, 4.0, c), np.arange(-5.0, 4.0, c), np.arange(-5.0, 4.0, c))]
|
||||||
np.arange(-5.0, 4.0, c),
|
)
|
||||||
np.arange(-5.0, 4.0, c))
|
|
||||||
])
|
|
||||||
while True:
|
while True:
|
||||||
|
|
||||||
fig = plt.figure()
|
fig = plt.figure()
|
||||||
ax = plt.axes(projection='3d')
|
ax = plt.axes(projection="3d")
|
||||||
|
|
||||||
d = np.random.rand(3) * 1 - 0.5
|
d = np.random.rand(3) * 1 - 0.5
|
||||||
v = np.random.rand(3) * 1 - 0.5
|
v = np.random.rand(3) * 1 - 0.5
|
||||||
|
@ -90,20 +58,20 @@ while True:
|
||||||
if not bool_vect[i]:
|
if not bool_vect[i]:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
ax.plot([px, px+c], [py, py], [pz, pz], 'b')
|
ax.plot([px, px + c], [py, py], [pz, pz], "b")
|
||||||
ax.plot([px, px+c], [py, py], [pz+c, pz+c], 'b')
|
ax.plot([px, px + c], [py, py], [pz + c, pz + c], "b")
|
||||||
ax.plot([px, px+c], [py+c, py+c], [pz, pz], 'b')
|
ax.plot([px, px + c], [py + c, py + c], [pz, pz], "b")
|
||||||
ax.plot([px, px+c], [py+c, py+c], [pz+c, pz+c], 'b')
|
ax.plot([px, px + c], [py + c, py + c], [pz + c, pz + c], "b")
|
||||||
ax.plot([px, px], [py, py+c], [pz, pz], 'b')
|
ax.plot([px, px], [py, py + c], [pz, pz], "b")
|
||||||
ax.plot([px, px], [py, py+c], [pz+c, pz+c], 'b')
|
ax.plot([px, px], [py, py + c], [pz + c, pz + c], "b")
|
||||||
ax.plot([px+c, px+c], [py, py+c], [pz, pz], 'b')
|
ax.plot([px + c, px + c], [py, py + c], [pz, pz], "b")
|
||||||
ax.plot([px+c, px+c], [py, py+c], [pz+c, pz+c], 'b')
|
ax.plot([px + c, px + c], [py, py + c], [pz + c, pz + c], "b")
|
||||||
ax.plot([px, px], [py, py], [pz, pz+c], 'b')
|
ax.plot([px, px], [py, py], [pz, pz + c], "b")
|
||||||
ax.plot([px, px], [py+c, py+c], [pz, pz+c], 'b')
|
ax.plot([px, px], [py + c, py + c], [pz, pz + c], "b")
|
||||||
ax.plot([px+c, px+c], [py, py], [pz, pz+c], 'b')
|
ax.plot([px + c, px + c], [py, py], [pz, pz + c], "b")
|
||||||
ax.plot([px+c, px+c], [py+c, py+c], [pz, pz+c], 'b')
|
ax.plot([px + c, px + c], [py + c, py + c], [pz, pz + c], "b")
|
||||||
|
|
||||||
# plot line
|
# plot line
|
||||||
ax.plot([dx, dx+vx], [dy, dy+vy], [dz, dz+vz], 'g')
|
ax.plot([dx, dx + vx], [dy, dy + vy], [dz, dz + vz], "g")
|
||||||
|
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
67
src/main.py
67
src/main.py
|
@ -17,10 +17,16 @@ Z_MIN, Z_MAX = -0.1, 0.1
|
||||||
|
|
||||||
nb_frame = 24
|
nb_frame = 24
|
||||||
|
|
||||||
points = np.array([[x, y, z, 1.0] for x, y, z in product(
|
points = np.array(
|
||||||
np.arange(X_MIN, X_MAX, VOXEL_SIZE),
|
[
|
||||||
np.arange(Y_MIN, Y_MAX, VOXEL_SIZE),
|
[x, y, z, 1.0]
|
||||||
np.arange(Z_MIN, Z_MAX, VOXEL_SIZE))])
|
for x, y, z in product(
|
||||||
|
np.arange(X_MIN, X_MAX, VOXEL_SIZE),
|
||||||
|
np.arange(Y_MIN, Y_MAX, VOXEL_SIZE),
|
||||||
|
np.arange(Z_MIN, Z_MAX, VOXEL_SIZE),
|
||||||
|
)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
mask = 255
|
mask = 255
|
||||||
|
|
||||||
|
@ -29,27 +35,34 @@ proj_mats = []
|
||||||
frames = []
|
frames = []
|
||||||
|
|
||||||
for k in range(nb_frame):
|
for k in range(nb_frame):
|
||||||
frame = cv2.imread(f'data/torus/masks/Image{k:04}.png', cv2.IMREAD_GRAYSCALE)
|
frame = cv2.imread(f"data/torus/masks/Image{k:04}.png", cv2.IMREAD_GRAYSCALE)
|
||||||
frames.append(cv2.imread(f'data/torus/images/Image{k:04}.png', cv2.IMREAD_GRAYSCALE))
|
frames.append(cv2.imread(f"data/torus/images/Image{k:04}.png", cv2.IMREAD_GRAYSCALE))
|
||||||
|
|
||||||
with open(f"data/torus/cameras/{k:04d}.pickle", 'rb') as file:
|
with open(f"data/torus/cameras/{k:04d}.pickle", "rb") as file:
|
||||||
matrices = pickle.load(file)
|
matrices = pickle.load(file)
|
||||||
proj_mat = matrices["P"]
|
proj_mat = matrices["P"]
|
||||||
proj_mats.append(proj_mat)
|
proj_mats.append(proj_mat)
|
||||||
position = matrices["RT"][:, 3]
|
position = matrices["RT"][:, 3]
|
||||||
positions.append(position)
|
positions.append(position)
|
||||||
|
|
||||||
cam_points = proj_mat @ points.T
|
cam_points = proj_mat @ points.T
|
||||||
cam_points /= cam_points[2,:]
|
cam_points /= cam_points[2, :]
|
||||||
cam_points = np.round(cam_points).astype(np.int32)
|
cam_points = np.round(cam_points).astype(np.int32)
|
||||||
|
|
||||||
visible = np.logical_and.reduce((0 <= cam_points[0,:], cam_points[0,:] < frame.shape[1], 0 <= cam_points[1,:], cam_points[1,:] < frame.shape[0]))
|
visible = np.logical_and.reduce(
|
||||||
cam_points = cam_points[:,visible]
|
(
|
||||||
points = points[visible,:]
|
0 <= cam_points[0, :],
|
||||||
|
cam_points[0, :] < frame.shape[1],
|
||||||
|
0 <= cam_points[1, :],
|
||||||
|
cam_points[1, :] < frame.shape[0],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
cam_points = cam_points[:, visible]
|
||||||
|
points = points[visible, :]
|
||||||
|
|
||||||
solid = frame[cam_points[1,:],cam_points[0,:]] == mask
|
solid = frame[cam_points[1, :], cam_points[0, :]] == mask
|
||||||
cam_points = cam_points[:,solid]
|
cam_points = cam_points[:, solid]
|
||||||
points = points[solid,:]
|
points = points[solid, :]
|
||||||
|
|
||||||
# for cam_point in cam_points.T:
|
# for cam_point in cam_points.T:
|
||||||
# cv2.circle(frame, (cam_point[0], cam_point[1]), 2, (255*is_in[k], 0, 255*(not is_in[k])))
|
# cv2.circle(frame, (cam_point[0], cam_point[1]), 2, (255*is_in[k], 0, 255*(not is_in[k])))
|
||||||
|
@ -73,9 +86,15 @@ for k in range(nb_frame):
|
||||||
# cv2.waitKey(0)
|
# cv2.waitKey(0)
|
||||||
|
|
||||||
|
|
||||||
voxel = np.zeros((int((X_MAX-X_MIN)/VOXEL_SIZE + 1), int((Y_MAX-Y_MIN)/VOXEL_SIZE + 1), int((Z_MAX-Z_MIN)/VOXEL_SIZE + 1)))
|
voxel = np.zeros(
|
||||||
|
(
|
||||||
|
int((X_MAX - X_MIN) / VOXEL_SIZE + 1),
|
||||||
|
int((Y_MAX - Y_MIN) / VOXEL_SIZE + 1),
|
||||||
|
int((Z_MAX - Z_MIN) / VOXEL_SIZE + 1),
|
||||||
|
)
|
||||||
|
)
|
||||||
idx = np.floor_divide(points[:, :3] - np.array([X_MIN, Y_MIN, Z_MIN]), VOXEL_SIZE).astype(int)
|
idx = np.floor_divide(points[:, :3] - np.array([X_MIN, Y_MIN, Z_MIN]), VOXEL_SIZE).astype(int)
|
||||||
voxel[idx[:,0], idx[:,1], idx[:,2]] = 1
|
voxel[idx[:, 0], idx[:, 1], idx[:, 2]] = 1
|
||||||
|
|
||||||
border = update_border(voxel)
|
border = update_border(voxel)
|
||||||
|
|
||||||
|
@ -88,18 +107,19 @@ border = update_border(voxel)
|
||||||
|
|
||||||
origin = np.array([X_MIN, Y_MIN, Z_MIN])
|
origin = np.array([X_MIN, Y_MIN, Z_MIN])
|
||||||
step = np.array([VOXEL_SIZE, VOXEL_SIZE, VOXEL_SIZE])
|
step = np.array([VOXEL_SIZE, VOXEL_SIZE, VOXEL_SIZE])
|
||||||
shape = np.array([int((X_MAX-X_MIN)/VOXEL_SIZE), int((Y_MAX-Y_MIN)/VOXEL_SIZE), int((Z_MAX-Z_MIN)/VOXEL_SIZE)])
|
shape = np.array(
|
||||||
|
[int((X_MAX - X_MIN) / VOXEL_SIZE), int((Y_MAX - Y_MIN) / VOXEL_SIZE), int((Z_MAX - Z_MIN) / VOXEL_SIZE)]
|
||||||
|
)
|
||||||
|
|
||||||
for idx in track(np.argwhere(border)):
|
for idx in track(np.argwhere(border)):
|
||||||
# coordonnées du centre du voxel
|
# coordonnées du centre du voxel
|
||||||
start = np.array([
|
start = np.array(
|
||||||
X_MIN + (idx[0] + 0.5) * VOXEL_SIZE,
|
[X_MIN + (idx[0] + 0.5) * VOXEL_SIZE, Y_MIN + (idx[1] + 0.5) * VOXEL_SIZE, Z_MIN + (idx[2] + 0.5) * VOXEL_SIZE]
|
||||||
Y_MIN + (idx[1] + 0.5) * VOXEL_SIZE,
|
)
|
||||||
Z_MIN + (idx[2] + 0.5) * VOXEL_SIZE])
|
|
||||||
|
|
||||||
# array qui contiendra les nuances de gris des frames qui voient le voxel
|
# array qui contiendra les nuances de gris des frames qui voient le voxel
|
||||||
values = []
|
values = []
|
||||||
|
|
||||||
# pour chaque camera (frame)
|
# pour chaque camera (frame)
|
||||||
for i in range(nb_frame):
|
for i in range(nb_frame):
|
||||||
|
|
||||||
|
@ -130,6 +150,5 @@ for idx in track(np.argwhere(border)):
|
||||||
voxel[idx[0], idx[1], idx[2]] = 0
|
voxel[idx[0], idx[1], idx[2]] = 0
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
vertices, triangles = mcubes.marching_cubes(voxel, 0)
|
vertices, triangles = mcubes.marching_cubes(voxel, 0)
|
||||||
mcubes.export_obj(vertices, triangles, "result.obj")
|
mcubes.export_obj(vertices, triangles, "result.obj")
|
||||||
|
|
|
@ -12,25 +12,27 @@ def matrices_reader(path: str) -> list[np.ndarray]:
|
||||||
list[np.ndarray]: list of projection matrix
|
list[np.ndarray]: list of projection matrix
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with open(path, 'r') as f:
|
with open(path, "r") as f:
|
||||||
lines = f.readlines()
|
lines = f.readlines()
|
||||||
|
|
||||||
k = 0
|
k = 0
|
||||||
world_matrices = []
|
world_matrices = []
|
||||||
while k+3 < len(lines):
|
while k + 3 < len(lines):
|
||||||
# Match matrices one by one
|
# Match matrices one by one
|
||||||
mat_str = ""
|
mat_str = ""
|
||||||
for line in lines[k:k+4]:
|
for line in lines[k : k + 4]:
|
||||||
mat_str += line
|
mat_str += line
|
||||||
float_reg = r"(-|\d|\.|e)+"
|
float_reg = r"(-|\d|\.|e)+"
|
||||||
res = re.search(
|
res = re.search(
|
||||||
f"Matrix\(\(\(({float_reg}), ({float_reg}), ({float_reg}), ({float_reg})\),\n +\(({float_reg}), ({float_reg}), ({float_reg}), ({float_reg})\),\n\ +\(({float_reg}), ({float_reg}), ({float_reg}), ({float_reg})\)\)\)", mat_str)
|
f"Matrix\(\(\(({float_reg}), ({float_reg}), ({float_reg}), ({float_reg})\),\n +\(({float_reg}), ({float_reg}), ({float_reg}), ({float_reg})\),\n\ +\(({float_reg}), ({float_reg}), ({float_reg}), ({float_reg})\)\)\)",
|
||||||
|
mat_str,
|
||||||
|
)
|
||||||
|
|
||||||
# Convert string to np.ndarray
|
# Convert string to np.ndarray
|
||||||
values = [float(res.group(i)) for i in range(1,len(res.groups()) + 1, 2)]
|
values = [float(res.group(i)) for i in range(1, len(res.groups()) + 1, 2)]
|
||||||
world_mat = np.array([[values[4*i + j] for j in range(4)] for i in range(3)])
|
world_mat = np.array([[values[4 * i + j] for j in range(4)] for i in range(3)])
|
||||||
world_matrices.append(world_mat)
|
world_matrices.append(world_mat)
|
||||||
|
|
||||||
k += 4
|
k += 4
|
||||||
|
|
||||||
return world_matrices[1:]
|
return world_matrices[1:]
|
||||||
|
|
|
@ -4,8 +4,7 @@ nb_frame = 24
|
||||||
|
|
||||||
for k in range(nb_frame):
|
for k in range(nb_frame):
|
||||||
|
|
||||||
with open(f"/tmp/cameras/{k:04d}.pickle", 'rb') as file:
|
with open(f"/tmp/cameras/{k:04d}.pickle", "rb") as file:
|
||||||
proj_mat = pickle.load(file)["P"]
|
proj_mat = pickle.load(file)["P"]
|
||||||
|
|
||||||
print(k, proj_mat)
|
print(k, proj_mat)
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue