KPConv-PyTorch/kernels/kernel_points.py

482 lines
17 KiB
Python
Raw Normal View History

2020-03-31 19:42:35 +00:00
#
#
# 0=================================0
# | Kernel Point Convolutions |
# 0=================================0
#
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Functions handling the disposition of kernel points.
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Hugues THOMAS - 11/06/2018
#
# ------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Import numpy package and name it "np"
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from os import makedirs
from os.path import join, exists
from utils.ply import read_ply, write_ply
from utils.config import bcolors
# ------------------------------------------------------------------------------------------
#
# Functions
# \***************/
#
#
def create_3D_rotations(axis, angle):
"""
Create rotation matrices from a list of axes and angles. Code from wikipedia on quaternions
:param axis: float32[N, 3]
:param angle: float32[N,]
:return: float32[N, 3, 3]
"""
t1 = np.cos(angle)
t2 = 1 - t1
t3 = axis[:, 0] * axis[:, 0]
t6 = t2 * axis[:, 0]
t7 = t6 * axis[:, 1]
t8 = np.sin(angle)
t9 = t8 * axis[:, 2]
t11 = t6 * axis[:, 2]
t12 = t8 * axis[:, 1]
t15 = axis[:, 1] * axis[:, 1]
t19 = t2 * axis[:, 1] * axis[:, 2]
t20 = t8 * axis[:, 0]
t24 = axis[:, 2] * axis[:, 2]
R = np.stack([t1 + t2 * t3,
t7 - t9,
t11 + t12,
t7 + t9,
t1 + t2 * t15,
t19 - t20,
t11 - t12,
t19 + t20,
t1 + t2 * t24], axis=1)
return np.reshape(R, (-1, 3, 3))
2020-04-27 22:01:40 +00:00
2020-03-31 19:42:35 +00:00
def spherical_Lloyd(radius, num_cells, dimension=3, fixed='center', approximation='monte-carlo',
approx_n=5000, max_iter=500, momentum=0.9, verbose=0):
"""
Creation of kernel point via Lloyd algorithm. We use an approximation of the algorithm, and compute the Voronoi
cell centers with discretization of space. The exact formula is not trivial with part of the sphere as sides.
:param radius: Radius of the kernels
:param num_cells: Number of cell (kernel points) in the Voronoi diagram.
:param dimension: dimension of the space
:param fixed: fix position of certain kernel points ('none', 'center' or 'verticals')
:param approximation: Approximation method for Lloyd's algorithm ('discretization', 'monte-carlo')
:param approx_n: Number of point used for approximation.
:param max_iter: Maximum nu;ber of iteration for the algorithm.
:param momentum: Momentum of the low pass filter smoothing kernel point positions
:param verbose: display option
:return: points [num_kernels, num_points, dimension]
"""
#######################
# Parameters definition
#######################
# Radius used for optimization (points are rescaled afterwards)
radius0 = 1.0
#######################
# Kernel initialization
#######################
# Random kernel points (Uniform distribution in a sphere)
kernel_points = np.zeros((0, dimension))
while kernel_points.shape[0] < num_cells:
new_points = np.random.rand(num_cells, dimension) * 2 * radius0 - radius0
kernel_points = np.vstack((kernel_points, new_points))
d2 = np.sum(np.power(kernel_points, 2), axis=1)
kernel_points = kernel_points[np.logical_and(d2 < radius0 ** 2, (0.9 * radius0) ** 2 < d2), :]
kernel_points = kernel_points[:num_cells, :].reshape((num_cells, -1))
# Optional fixing
if fixed == 'center':
kernel_points[0, :] *= 0
if fixed == 'verticals':
kernel_points[:3, :] *= 0
kernel_points[1, -1] += 2 * radius0 / 3
kernel_points[2, -1] -= 2 * radius0 / 3
##############################
# Approximation initialization
##############################
# Initialize figure
if verbose > 1:
fig = plt.figure()
# Initialize discretization in this method is chosen
if approximation == 'discretization':
side_n = int(np.floor(approx_n ** (1. / dimension)))
dl = 2 * radius0 / side_n
coords = np.arange(-radius0 + dl/2, radius0, dl)
if dimension == 2:
x, y = np.meshgrid(coords, coords)
X = np.vstack((np.ravel(x), np.ravel(y))).T
elif dimension == 3:
x, y, z = np.meshgrid(coords, coords, coords)
X = np.vstack((np.ravel(x), np.ravel(y), np.ravel(z))).T
elif dimension == 4:
x, y, z, t = np.meshgrid(coords, coords, coords, coords)
X = np.vstack((np.ravel(x), np.ravel(y), np.ravel(z), np.ravel(t))).T
else:
raise ValueError('Unsupported dimension (max is 4)')
elif approximation == 'monte-carlo':
X = np.zeros((0, dimension))
else:
raise ValueError('Wrong approximation method chosen: "{:s}"'.format(approximation))
# Only points inside the sphere are used
d2 = np.sum(np.power(X, 2), axis=1)
X = X[d2 < radius0 * radius0, :]
#####################
# Kernel optimization
#####################
# Warning if at least one kernel point has no cell
warning = False
# moving vectors of kernel points saved to detect convergence
max_moves = np.zeros((0,))
for iter in range(max_iter):
# In the case of monte-carlo, renew the sampled points
if approximation == 'monte-carlo':
X = np.random.rand(approx_n, dimension) * 2 * radius0 - radius0
d2 = np.sum(np.power(X, 2), axis=1)
X = X[d2 < radius0 * radius0, :]
# Get the distances matrix [n_approx, K, dim]
differences = np.expand_dims(X, 1) - kernel_points
sq_distances = np.sum(np.square(differences), axis=2)
# Compute cell centers
cell_inds = np.argmin(sq_distances, axis=1)
centers = []
for c in range(num_cells):
bool_c = (cell_inds == c)
num_c = np.sum(bool_c.astype(np.int32))
if num_c > 0:
centers.append(np.sum(X[bool_c, :], axis=0) / num_c)
else:
warning = True
centers.append(kernel_points[c])
# Update kernel points with low pass filter to smooth mote carlo
centers = np.vstack(centers)
moves = (1 - momentum) * (centers - kernel_points)
kernel_points += moves
# Check moves for convergence
max_moves = np.append(max_moves, np.max(np.linalg.norm(moves, axis=1)))
# Optional fixing
if fixed == 'center':
kernel_points[0, :] *= 0
if fixed == 'verticals':
kernel_points[0, :] *= 0
kernel_points[:3, :-1] *= 0
if verbose:
print('iter {:5d} / max move = {:f}'.format(iter, np.max(np.linalg.norm(moves, axis=1))))
if warning:
print('{:}WARNING: at least one point has no cell{:}'.format(bcolors.WARNING, bcolors.ENDC))
if verbose > 1:
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=cell_inds, s=20.0,
marker='.', cmap=plt.get_cmap('tab20'))
#plt.scatter(kernel_points[:, 0], kernel_points[:, 1], c=np.arange(num_cells), s=100.0,
# marker='+', cmap=plt.get_cmap('tab20'))
plt.plot(kernel_points[:, 0], kernel_points[:, 1], 'k+')
circle = plt.Circle((0, 0), radius0, color='r', fill=False)
fig.axes[0].add_artist(circle)
fig.axes[0].set_xlim((-radius0 * 1.1, radius0 * 1.1))
fig.axes[0].set_ylim((-radius0 * 1.1, radius0 * 1.1))
fig.axes[0].set_aspect('equal')
plt.draw()
plt.pause(0.001)
plt.show(block=False)
###################
# User verification
###################
# Show the convergence to ask user if this kernel is correct
if verbose:
if dimension == 2:
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[10.4, 4.8])
ax1.plot(max_moves)
ax2.scatter(X[:, 0], X[:, 1], c=cell_inds, s=20.0,
marker='.', cmap=plt.get_cmap('tab20'))
# plt.scatter(kernel_points[:, 0], kernel_points[:, 1], c=np.arange(num_cells), s=100.0,
# marker='+', cmap=plt.get_cmap('tab20'))
ax2.plot(kernel_points[:, 0], kernel_points[:, 1], 'k+')
circle = plt.Circle((0, 0), radius0, color='r', fill=False)
ax2.add_artist(circle)
ax2.set_xlim((-radius0 * 1.1, radius0 * 1.1))
ax2.set_ylim((-radius0 * 1.1, radius0 * 1.1))
ax2.set_aspect('equal')
plt.title('Check if kernel is correct.')
plt.draw()
plt.show()
if dimension > 2:
plt.figure()
plt.plot(max_moves)
plt.title('Check if kernel is correct.')
plt.show()
# Rescale kernels with real radius
return kernel_points * radius
def kernel_point_optimization_debug(radius, num_points, num_kernels=1, dimension=3,
fixed='center', ratio=0.66, verbose=0):
"""
Creation of kernel point via optimization of potentials.
:param radius: Radius of the kernels
:param num_points: points composing kernels
:param num_kernels: number of wanted kernels
:param dimension: dimension of the space
:param fixed: fix position of certain kernel points ('none', 'center' or 'verticals')
:param ratio: ratio of the radius where you want the kernels points to be placed
:param verbose: display option
:return: points [num_kernels, num_points, dimension]
"""
#######################
# Parameters definition
#######################
# Radius used for optimization (points are rescaled afterwards)
radius0 = 1
diameter0 = 2
# Factor multiplicating gradients for moving points (~learning rate)
moving_factor = 1e-2
continuous_moving_decay = 0.9995
# Gradient threshold to stop optimization
thresh = 1e-5
# Gradient clipping value
clip = 0.05 * radius0
#######################
# Kernel initialization
#######################
# Random kernel points
kernel_points = np.random.rand(num_kernels * num_points - 1, dimension) * diameter0 - radius0
while (kernel_points.shape[0] < num_kernels * num_points):
new_points = np.random.rand(num_kernels * num_points - 1, dimension) * diameter0 - radius0
kernel_points = np.vstack((kernel_points, new_points))
d2 = np.sum(np.power(kernel_points, 2), axis=1)
kernel_points = kernel_points[d2 < 0.5 * radius0 * radius0, :]
kernel_points = kernel_points[:num_kernels * num_points, :].reshape((num_kernels, num_points, -1))
# Optionnal fixing
if fixed == 'center':
kernel_points[:, 0, :] *= 0
if fixed == 'verticals':
kernel_points[:, :3, :] *= 0
kernel_points[:, 1, -1] += 2 * radius0 / 3
kernel_points[:, 2, -1] -= 2 * radius0 / 3
#####################
# Kernel optimization
#####################
# Initialize figure
if verbose>1:
fig = plt.figure()
saved_gradient_norms = np.zeros((10000, num_kernels))
old_gradient_norms = np.zeros((num_kernels, num_points))
for iter in range(10000):
# Compute gradients
# *****************
# Derivative of the sum of potentials of all points
A = np.expand_dims(kernel_points, axis=2)
B = np.expand_dims(kernel_points, axis=1)
interd2 = np.sum(np.power(A - B, 2), axis=-1)
inter_grads = (A - B) / (np.power(np.expand_dims(interd2, -1), 3/2) + 1e-6)
inter_grads = np.sum(inter_grads, axis=1)
# Derivative of the radius potential
circle_grads = 10*kernel_points
# All gradients
gradients = inter_grads + circle_grads
if fixed == 'verticals':
gradients[:, 1:3, :-1] = 0
# Stop condition
# **************
# Compute norm of gradients
gradients_norms = np.sqrt(np.sum(np.power(gradients, 2), axis=-1))
saved_gradient_norms[iter, :] = np.max(gradients_norms, axis=1)
# Stop if all moving points are gradients fixed (low gradients diff)
if fixed == 'center' and np.max(np.abs(old_gradient_norms[:, 1:] - gradients_norms[:, 1:])) < thresh:
break
elif fixed == 'verticals' and np.max(np.abs(old_gradient_norms[:, 3:] - gradients_norms[:, 3:])) < thresh:
break
elif np.max(np.abs(old_gradient_norms - gradients_norms)) < thresh:
break
old_gradient_norms = gradients_norms
# Move points
# ***********
# Clip gradient to get moving dists
moving_dists = np.minimum(moving_factor * gradients_norms, clip)
# Fix central point
if fixed == 'center':
moving_dists[:, 0] = 0
if fixed == 'verticals':
moving_dists[:, 0] = 0
# Move points
kernel_points -= np.expand_dims(moving_dists, -1) * gradients / np.expand_dims(gradients_norms + 1e-6, -1)
if verbose:
print('iter {:5d} / max grad = {:f}'.format(iter, np.max(gradients_norms[:, 3:])))
if verbose > 1:
plt.clf()
plt.plot(kernel_points[0, :, 0], kernel_points[0, :, 1], '.')
circle = plt.Circle((0, 0), radius, color='r', fill=False)
fig.axes[0].add_artist(circle)
fig.axes[0].set_xlim((-radius*1.1, radius*1.1))
fig.axes[0].set_ylim((-radius*1.1, radius*1.1))
fig.axes[0].set_aspect('equal')
plt.draw()
plt.pause(0.001)
plt.show(block=False)
print(moving_factor)
# moving factor decay
moving_factor *= continuous_moving_decay
# Rescale radius to fit the wanted ratio of radius
r = np.sqrt(np.sum(np.power(kernel_points, 2), axis=-1))
kernel_points *= ratio / np.mean(r[:, 1:])
# Rescale kernels with real radius
return kernel_points * radius, saved_gradient_norms
def load_kernels(radius, num_kpoints, dimension, fixed, lloyd=False):
# Kernel directory
kernel_dir = 'kernels/dispositions'
if not exists(kernel_dir):
makedirs(kernel_dir)
# To many points switch to Lloyds
if num_kpoints > 30:
lloyd = True
# Kernel_file
kernel_file = join(kernel_dir, 'k_{:03d}_{:s}_{:d}D.ply'.format(num_kpoints, fixed, dimension))
# Check if already done
if not exists(kernel_file):
if lloyd:
# Create kernels
kernel_points = spherical_Lloyd(1.0,
num_kpoints,
dimension=dimension,
fixed=fixed,
verbose=0)
else:
# Create kernels
kernel_points, grad_norms = kernel_point_optimization_debug(1.0,
num_kpoints,
num_kernels=100,
dimension=dimension,
fixed=fixed,
verbose=0)
# Find best candidate
best_k = np.argmin(grad_norms[-1, :])
# Save points
kernel_points = kernel_points[best_k, :, :]
write_ply(kernel_file, kernel_points, ['x', 'y', 'z'])
else:
data = read_ply(kernel_file)
kernel_points = np.vstack((data['x'], data['y'], data['z'])).T
# Random roations for the kernel
# N.B. 4D random rotations not supported yet
R = np.eye(dimension)
theta = np.random.rand() * 2 * np.pi
if dimension == 2:
if fixed != 'vertical':
c, s = np.cos(theta), np.sin(theta)
R = np.array([[c, -s], [s, c]], dtype=np.float32)
elif dimension == 3:
if fixed != 'vertical':
c, s = np.cos(theta), np.sin(theta)
R = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]], dtype=np.float32)
else:
phi = (np.random.rand() - 0.5) * np.pi
# Create the first vector in carthesian coordinates
u = np.array([np.cos(theta) * np.cos(phi), np.sin(theta) * np.cos(phi), np.sin(phi)])
# Choose a random rotation angle
alpha = np.random.rand() * 2 * np.pi
# Create the rotation matrix with this vector and angle
R = create_3D_rotations(np.reshape(u, (1, -1)), np.reshape(alpha, (1, -1)))[0]
R = R.astype(np.float32)
# Add a small noise
kernel_points = kernel_points + np.random.normal(scale=0.01, size=kernel_points.shape)
# Scale kernels
kernel_points = radius * kernel_points
# Rotate kernels
kernel_points = np.matmul(kernel_points, R)
return kernel_points.astype(np.float32)