# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from itertools import islice import torch def mesh_normal_consistency(meshes, reduce=True): r""" Computes the normal consistency of each mesh in meshes. We compute the normal consistency for each pair of neighboring faces. If e = (v0, v1) is the connecting edge of two neighboring faces f0 and f1, then the normal consistency between f0 and f1 .. code-block:: python a /\ / \ / f0 \ / \ v0 /____e___\ v1 \ / \ / \ f1 / \ / \/ b The normal consistency is .. code-block:: python nc(f0, f1) = 1 - cos(n0, n1) where cos(n0, n1) = n0^n1 / ||n0|| / ||n1|| is the cosine of the angle between the normals n0 and n1, and n0 = (v1 - v0) x (a - v0) n1 = - (v1 - v0) x (b - v0) = (b - v0) x (v1 - v0) This means that if nc(f0, f1) = 0 then n0 and n1 point to the same direction, while if nc(f0, f1) = 2 then n0 and n1 point opposite direction. .. note:: For well-constructed meshes the assumption that only two faces share an edge is true. This assumption could make the implementation easier and faster. This implementation does not follow this assumption. All the faces sharing e, which can be any in number, are discovered. Args: meshes: Meshes object with a batch of meshes. Returns: loss: Average normal consistency across the batch. Returns 0 if meshes contains no meshes or all empty meshes. """ if meshes.isempty(): return torch.tensor( [0.0], dtype=torch.float32, device=meshes.device, requires_grad=True ) N = len(meshes) verts_packed = meshes.verts_packed() # (sum(V_n), 3) faces_packed = meshes.faces_packed() # (sum(F_n), 3) edges_packed = meshes.edges_packed() # (sum(E_n), 2) verts_packed_to_mesh_idx = meshes.verts_packed_to_mesh_idx() # (sum(V_n),) face_to_edge = meshes.faces_packed_to_edges_packed() # (sum(F_n), 3) E = edges_packed.shape[0] # sum(E_n) F = faces_packed.shape[0] # sum(F_n) # We don't want gradients for the following operation. The goal is to # find for each edge e all the vertices associated with e. In the example above, # the vertices associated with e are (v0, v1, a, b), i.e. points on e (=v0, v1) # and points connected on faces to e (=a, b). with torch.no_grad(): edge_idx = face_to_edge.reshape(F * 3) # (3 * F,) indexes into edges vert_idx = ( faces_packed.view(1, F, 3).expand(3, F, 3).transpose(0, 1).reshape(3 * F, 3) ) edge_idx, edge_sort_idx = edge_idx.sort() vert_idx = vert_idx[edge_sort_idx] # In well constructed meshes each edge is shared by precisely 2 faces # However, in many meshes, this assumption is not always satisfied. # We want to find all faces that share an edge, a number which can # vary and which depends on the topology. # In particular, we find the vertices not on the edge on the shared faces. # In the example above, we want to associate edge e with vertices a and b. # This operation is done more efficiently in cpu with lists. # TODO(gkioxari) find a better way to do this. # edge_idx represents the index of the edge for each vertex. We can count # the number of vertices which are associated with each edge. # There can be a different number for each edge. edge_num = edge_idx.bincount(minlength=E) # Create pairs of vertices associated to e. We generate a list of lists: # each list has the indices of the vertices which are opposite to one edge. # The length of the list for each edge will vary. vert_edge_pair_idx = split_list( list(range(edge_idx.shape[0])), edge_num.tolist() ) # For each list find all combinations of pairs in the list. This represents # all pairs of vertices which are opposite to the same edge. vert_edge_pair_idx = [ [e[i], e[j]] for e in vert_edge_pair_idx for i in range(len(e) - 1) for j in range(1, len(e)) if i != j ] vert_edge_pair_idx = torch.tensor( vert_edge_pair_idx, device=meshes.device, dtype=torch.int64 ) v0_idx = edges_packed[edge_idx, 0] v0 = verts_packed[v0_idx] v1_idx = edges_packed[edge_idx, 1] v1 = verts_packed[v1_idx] # two of the following cross products are zeros as they are cross product # with either (v1-v0)x(v1-v0) or (v1-v0)x(v0-v0) n_temp0 = (v1 - v0).cross(verts_packed[vert_idx[:, 0]] - v0, dim=1) n_temp1 = (v1 - v0).cross(verts_packed[vert_idx[:, 1]] - v0, dim=1) n_temp2 = (v1 - v0).cross(verts_packed[vert_idx[:, 2]] - v0, dim=1) n = n_temp0 + n_temp1 + n_temp2 n0 = n[vert_edge_pair_idx[:, 0]] n1 = -n[vert_edge_pair_idx[:, 1]] loss = 1 - torch.cosine_similarity(n0, n1, dim=1) verts_packed_to_mesh_idx = verts_packed_to_mesh_idx[vert_idx[:, 0]] verts_packed_to_mesh_idx = verts_packed_to_mesh_idx[vert_edge_pair_idx[:, 0]] num_normals = verts_packed_to_mesh_idx.bincount(minlength=N) weights = 1.0 / num_normals[verts_packed_to_mesh_idx].float() loss = loss * weights if reduce: return loss.sum() / N else: return loss def split_list(input, length_to_split): inputt = iter(input) return [list(islice(inputt, elem)) for elem in length_to_split] ## new mesh laplacian loss with bug fix (https://github.com/facebookresearch/pytorch3d/blob/ff9c6612b457a2021d88fea119bdb9b94ba017bd/pytorch3d/loss/mesh_laplacian_smoothing.py) def mesh_laplacian_smoothing(meshes, method: str = "uniform"): r""" Computes the laplacian smoothing objective for a batch of meshes. This function supports three variants of Laplacian smoothing, namely with uniform weights("uniform"), with cotangent weights ("cot"), and cotangent cuvature ("cotcurv").For more details read [1, 2]. Args: meshes: Meshes object with a batch of meshes. method: str specifying the method for the laplacian. Returns: loss: Average laplacian smoothing loss across the batch. Returns 0 if meshes contains no meshes or all empty meshes. Consider a mesh M = (V, F), with verts of shape Nx3 and faces of shape Mx3. The Laplacian matrix L is a NxN tensor such that LV gives a tensor of vectors: for a uniform Laplacian, LuV[i] points to the centroid of its neighboring vertices, a cotangent Laplacian LcV[i] is known to be an approximation of the surface normal, while the curvature variant LckV[i] scales the normals by the discrete mean curvature. For vertex i, assume S[i] is the set of neighboring vertices to i, a_ij and b_ij are the "outside" angles in the two triangles connecting vertex v_i and its neighboring vertex v_j for j in S[i], as seen in the diagram below. .. code-block:: python a_ij /\ / \ / \ / \ v_i /________\ v_j \ / \ / \ / \ / \/ b_ij The definition of the Laplacian is LV[i] = sum_j w_ij (v_j - v_i) For the uniform variant, w_ij = 1 / |S[i]| For the cotangent variant, w_ij = (cot a_ij + cot b_ij) / (sum_k cot a_ik + cot b_ik) For the cotangent curvature, w_ij = (cot a_ij + cot b_ij) / (4 A[i]) where A[i] is the sum of the areas of all triangles containing vertex v_i. There is a nice trigonometry identity to compute cotangents. Consider a triangle with side lengths A, B, C and angles a, b, c. .. code-block:: python c /|\ / | \ / | \ B / H| \ A / | \ / | \ /a_____|_____b\ C Then cot a = (B^2 + C^2 - A^2) / 4 * area We know that area = CH/2, and by the law of cosines we have A^2 = B^2 + C^2 - 2BC cos a => B^2 + C^2 - A^2 = 2BC cos a Putting these together, we get: B^2 + C^2 - A^2 2BC cos a _______________ = _________ = (B/H) cos a = cos a / sin a = cot a 4 * area 2CH [1] Desbrun et al, "Implicit fairing of irregular meshes using diffusion and curvature flow", SIGGRAPH 1999. [2] Nealan et al, "Laplacian Mesh Optimization", Graphite 2006. """ if meshes.isempty(): return torch.tensor( [0.0], dtype=torch.float32, device=meshes.device, requires_grad=True ) N = len(meshes) verts_packed = meshes.verts_packed() # (sum(V_n), 3) num_verts_per_mesh = meshes.num_verts_per_mesh() # (N,) verts_packed_idx = meshes.verts_packed_to_mesh_idx() # (sum(V_n),) weights = num_verts_per_mesh.gather(0, verts_packed_idx) # (sum(V_n),) weights = 1.0 / weights.float() # We don't want to backprop through the computation of the Laplacian; # just treat it as a magic constant matrix that is used to transform # verts into normals with torch.no_grad(): if method == "uniform": L = meshes.laplacian_packed() elif method in ["cot", "cotcurv"]: L, inv_areas = laplacian_cot(meshes) if method == "cot": norm_w = torch.sparse.sum(L, dim=1).to_dense().view(-1, 1) idx = norm_w > 0 norm_w[idx] = 1.0 / norm_w[idx] else: L_sum = torch.sparse.sum(L, dim=1).to_dense().view(-1, 1) norm_w = 0.25 * inv_areas else: raise ValueError("Method should be one of {uniform, cot, cotcurv}") if method == "uniform": loss = L.mm(verts_packed) elif method == "cot": loss = L.mm(verts_packed) * norm_w - verts_packed elif method == "cotcurv": loss = (L.mm(verts_packed) - L_sum * verts_packed) * norm_w loss = loss.norm(dim=1) loss = loss * weights return loss.sum() / N def laplacian_cot(meshes): """ Returns the Laplacian matrix with cotangent weights and the inverse of the face areas. Args: meshes: Meshes object with a batch of meshes. Returns: 2-element tuple containing - **L**: FloatTensor of shape (V,V) for the Laplacian matrix (V = sum(V_n)) Here, L[i, j] = cot a_ij + cot b_ij iff (i, j) is an edge in meshes. See the description above for more clarity. - **inv_areas**: FloatTensor of shape (V,) containing the inverse of sum of face areas containing each vertex """ verts_packed = meshes.verts_packed() # (sum(V_n), 3) faces_packed = meshes.faces_packed() # (sum(F_n), 3) # V = sum(V_n), F = sum(F_n) V, F = verts_packed.shape[0], faces_packed.shape[0] face_verts = verts_packed[faces_packed] v0, v1, v2 = face_verts[:, 0], face_verts[:, 1], face_verts[:, 2] # Side lengths of each triangle, of shape (sum(F_n),) # A is the side opposite v1, B is opposite v2, and C is opposite v3 A = (v1 - v2).norm(dim=1) B = (v0 - v2).norm(dim=1) C = (v0 - v1).norm(dim=1) # Area of each triangle (with Heron's formula); shape is (sum(F_n),) s = 0.5 * (A + B + C) # note that the area can be negative (close to 0) causing nans after sqrt() # we clip it to a small positive value area = (s * (s - A) * (s - B) * (s - C)).clamp_(min=1e-12).sqrt() # Compute cotangents of angles, of shape (sum(F_n), 3) A2, B2, C2 = A * A, B * B, C * C cota = (B2 + C2 - A2) / area cotb = (A2 + C2 - B2) / area cotc = (A2 + B2 - C2) / area cot = torch.stack([cota, cotb, cotc], dim=1) cot /= 4.0 # Construct a sparse matrix by basically doing: # L[v1, v2] = cota # L[v2, v0] = cotb # L[v0, v1] = cotc ii = faces_packed[:, [1, 2, 0]] jj = faces_packed[:, [2, 0, 1]] idx = torch.stack([ii, jj], dim=0).view(2, F * 3) L = torch.sparse.FloatTensor(idx, cot.view(-1), (V, V)) # Make it symmetric; this means we are also setting # L[v2, v1] = cota # L[v0, v2] = cotb # L[v1, v0] = cotc L += L.t() # For each vertex, compute the sum of areas for triangles containing it. idx = faces_packed.view(-1) inv_areas = torch.zeros(V, dtype=torch.float32, device=meshes.device) val = torch.stack([area] * 3, dim=1).view(-1) inv_areas.scatter_add_(0, idx, val) idx = inv_areas > 0 inv_areas[idx] = 1.0 / inv_areas[idx] inv_areas = inv_areas.view(-1, 1) return L, inv_areas