Skip to content

odak.learn.raytracing

odak.learn.raytracing

Provides necessary definitions for geometric optics. See "General Ray tracing procedure" from G.H. Spencerand M.V.R.K Murty for more theoratical explanation.

detector

A class to represent a detector.

Source code in odak/learn/raytracing/detector.py
class detector():
    """
    A class to represent a detector.
    """


    def __init__(
                 self,
                 colors = 3,
                 center = torch.tensor([0., 0., 0.]),
                 tilt = torch.tensor([0., 0., 0.]),
                 size = torch.tensor([10., 10.]),
                 resolution = torch.tensor([100, 100]),
                 device = torch.device('cpu')
                ):
        """
        Parameters
        ----------
        colors         : int
                         Number of color channels to register (e.g., RGB).
        center         : torch.tensor
                         Center point of the detector [3].
        tilt           : torch.tensor
                         Tilt angles of the surface in degrees [3].
        size           : torch.tensor
                         Size of the detector [2].
        resolution     : torch.tensor
                         Resolution of the detector.
        device         : torch.device
                         Device for computation (e.g., cuda, cpu).
        """
        self.device = device
        self.colors = colors
        self.resolution = resolution.to(self.device)
        self.surface_center = center.to(self.device)
        self.surface_tilt = tilt.to(self.device)
        self.size = size.to(self.device)
        self.pixel_size = torch.tensor([
                                        self.size[0] / self.resolution[0],
                                        self.size[1] / self.resolution[1]
                                       ], device  = self.device)
        self.pixel_diagonal_size = torch.sqrt(self.pixel_size[0] ** 2 + self.pixel_size[1] ** 2)
        self.pixel_diagonal_half_size = self.pixel_diagonal_size / 2.
        self.threshold = torch.nn.Threshold(self.pixel_diagonal_size, 1)
        self.plane = define_plane(
                                  point = self.surface_center,
                                  angles = self.surface_tilt
                                 )
        self.pixel_locations, _, _, _ = grid_sample(
                                                    size = self.size.tolist(),
                                                    no = self.resolution.tolist(),
                                                    center = self.surface_center.tolist(),
                                                    angles = self.surface_tilt.tolist()
                                                   )
        self.pixel_locations = self.pixel_locations.to(self.device)
        self.relu = torch.nn.ReLU()
        self.clear()


    def intersect(self, rays, color = 0):
        """
        Function to intersect rays with the detector


        Parameters
        ----------
        rays            : torch.tensor
                          Rays to be intersected with a detector.
                          Expected size is [1 x 2 x 3] or [m x 2 x 3].
        color           : int
                          Color channel to register.

        Returns
        -------
        points          : torch.tensor
                          Intersection points with the image detector [k x 3].
        """
        normals, _ = intersect_w_surface(rays, self.plane)
        points = normals[:, 0]
        distances_xyz = torch.abs(points.unsqueeze(1) - self.pixel_locations.unsqueeze(0))
        distances_x = 1e6 * self.relu( - (distances_xyz[:, :, 0] - self.pixel_size[0]))
        distances_y = 1e6 * self.relu( - (distances_xyz[:, :, 1] - self.pixel_size[1]))
        hit_x = torch.clamp(distances_x, min = 0., max = 1.)
        hit_y = torch.clamp(distances_y, min = 0., max = 1.)
        hit = hit_x * hit_y
        image = torch.sum(hit, dim = 0)
        self.image[color] += image.reshape(
                                           self.image.shape[-2], 
                                           self.image.shape[-1]
                                          )
        distances = torch.sum((points.unsqueeze(1) - self.pixel_locations.unsqueeze(0)) ** 2, dim = 2)
        distance_image = distances
#        distance_image = distances.reshape(
#                                           -1,
#                                           self.image.shape[-2],
#                                           self.image.shape[-1]
#                                          )
        return points, image, distance_image


    def get_image(self):
        """
        Function to return the detector image.

        Returns
        -------
        image           : torch.tensor
                          Detector image.
        """
        image = (self.image - self.image.min()) / (self.image.max() - self.image.min())
        return image


    def clear(self):
        """
        Internal function to clear a detector.
        """
        self.image = torch.zeros(

                                 self.colors,
                                 self.resolution[0],
                                 self.resolution[1],
                                 device = self.device,
                                )

__init__(colors=3, center=torch.tensor([0.0, 0.0, 0.0]), tilt=torch.tensor([0.0, 0.0, 0.0]), size=torch.tensor([10.0, 10.0]), resolution=torch.tensor([100, 100]), device=torch.device('cpu'))

Parameters:

  • colors
             Number of color channels to register (e.g., RGB).
    
  • center
             Center point of the detector [3].
    
  • tilt
             Tilt angles of the surface in degrees [3].
    
  • size
             Size of the detector [2].
    
  • resolution
             Resolution of the detector.
    
  • device
             Device for computation (e.g., cuda, cpu).
    
Source code in odak/learn/raytracing/detector.py
def __init__(
             self,
             colors = 3,
             center = torch.tensor([0., 0., 0.]),
             tilt = torch.tensor([0., 0., 0.]),
             size = torch.tensor([10., 10.]),
             resolution = torch.tensor([100, 100]),
             device = torch.device('cpu')
            ):
    """
    Parameters
    ----------
    colors         : int
                     Number of color channels to register (e.g., RGB).
    center         : torch.tensor
                     Center point of the detector [3].
    tilt           : torch.tensor
                     Tilt angles of the surface in degrees [3].
    size           : torch.tensor
                     Size of the detector [2].
    resolution     : torch.tensor
                     Resolution of the detector.
    device         : torch.device
                     Device for computation (e.g., cuda, cpu).
    """
    self.device = device
    self.colors = colors
    self.resolution = resolution.to(self.device)
    self.surface_center = center.to(self.device)
    self.surface_tilt = tilt.to(self.device)
    self.size = size.to(self.device)
    self.pixel_size = torch.tensor([
                                    self.size[0] / self.resolution[0],
                                    self.size[1] / self.resolution[1]
                                   ], device  = self.device)
    self.pixel_diagonal_size = torch.sqrt(self.pixel_size[0] ** 2 + self.pixel_size[1] ** 2)
    self.pixel_diagonal_half_size = self.pixel_diagonal_size / 2.
    self.threshold = torch.nn.Threshold(self.pixel_diagonal_size, 1)
    self.plane = define_plane(
                              point = self.surface_center,
                              angles = self.surface_tilt
                             )
    self.pixel_locations, _, _, _ = grid_sample(
                                                size = self.size.tolist(),
                                                no = self.resolution.tolist(),
                                                center = self.surface_center.tolist(),
                                                angles = self.surface_tilt.tolist()
                                               )
    self.pixel_locations = self.pixel_locations.to(self.device)
    self.relu = torch.nn.ReLU()
    self.clear()

clear()

Internal function to clear a detector.

Source code in odak/learn/raytracing/detector.py
def clear(self):
    """
    Internal function to clear a detector.
    """
    self.image = torch.zeros(

                             self.colors,
                             self.resolution[0],
                             self.resolution[1],
                             device = self.device,
                            )

get_image()

Function to return the detector image.

Returns:

  • image ( tensor ) –

    Detector image.

Source code in odak/learn/raytracing/detector.py
def get_image(self):
    """
    Function to return the detector image.

    Returns
    -------
    image           : torch.tensor
                      Detector image.
    """
    image = (self.image - self.image.min()) / (self.image.max() - self.image.min())
    return image

intersect(rays, color=0)

Function to intersect rays with the detector

Parameters:

  • rays
              Rays to be intersected with a detector.
              Expected size is [1 x 2 x 3] or [m x 2 x 3].
    
  • color
              Color channel to register.
    

Returns:

  • points ( tensor ) –

    Intersection points with the image detector [k x 3].

Source code in odak/learn/raytracing/detector.py
    def intersect(self, rays, color = 0):
        """
        Function to intersect rays with the detector


        Parameters
        ----------
        rays            : torch.tensor
                          Rays to be intersected with a detector.
                          Expected size is [1 x 2 x 3] or [m x 2 x 3].
        color           : int
                          Color channel to register.

        Returns
        -------
        points          : torch.tensor
                          Intersection points with the image detector [k x 3].
        """
        normals, _ = intersect_w_surface(rays, self.plane)
        points = normals[:, 0]
        distances_xyz = torch.abs(points.unsqueeze(1) - self.pixel_locations.unsqueeze(0))
        distances_x = 1e6 * self.relu( - (distances_xyz[:, :, 0] - self.pixel_size[0]))
        distances_y = 1e6 * self.relu( - (distances_xyz[:, :, 1] - self.pixel_size[1]))
        hit_x = torch.clamp(distances_x, min = 0., max = 1.)
        hit_y = torch.clamp(distances_y, min = 0., max = 1.)
        hit = hit_x * hit_y
        image = torch.sum(hit, dim = 0)
        self.image[color] += image.reshape(
                                           self.image.shape[-2], 
                                           self.image.shape[-1]
                                          )
        distances = torch.sum((points.unsqueeze(1) - self.pixel_locations.unsqueeze(0)) ** 2, dim = 2)
        distance_image = distances
#        distance_image = distances.reshape(
#                                           -1,
#                                           self.image.shape[-2],
#                                           self.image.shape[-1]
#                                          )
        return points, image, distance_image

planar_mesh

Source code in odak/learn/raytracing/mesh.py
class planar_mesh():


    def __init__(
                 self,
                 size = [1., 1.],
                 number_of_meshes = [10, 10],
                 angles = torch.tensor([0., 0., 0.]),
                 offset = torch.tensor([0., 0., 0.]),
                 device = torch.device('cpu'),
                 heights = None
                ):
        """
        Definition to generate a plane with meshes.


        Parameters
        -----------
        number_of_meshes  : torch.tensor
                            Number of squares over plane.
                            There are two triangles at each square.
        size              : torch.tensor
                            Size of the plane.
        angles            : torch.tensor
                            Rotation angles in degrees.
        offset            : torch.tensor
                            Offset along XYZ axes.
                            Expected dimension is [1 x 3] or offset for each triangle [m x 3].
                            m here refers to `2 * number_of_meshes[0]` times  `number_of_meshes[1]`.
        device            : torch.device
                            Computational resource to be used (e.g., cpu, cuda).
        heights           : torch.tensor
                            Load surface heights from a tensor.
        """
        self.device = device
        self.angles = angles.to(self.device)
        self.offset = offset.to(self.device)
        self.size = size.to(self.device)
        self.number_of_meshes = number_of_meshes.to(self.device)
        self.init_heights(heights)


    def init_heights(self, heights = None):
        """
        Internal function to initialize a height map.
        Note that self.heights is a differentiable variable, and can be optimized or learned.
        See unit test `test/test_learn_ray_detector.py` or `test/test_learn_ray_mesh.py` as examples.
        """
        if not isinstance(heights, type(None)):
            self.heights = heights.to(self.device)
            self.heights.requires_grad = True
        else:
            self.heights = torch.zeros(
                                       (self.number_of_meshes[0], self.number_of_meshes[1], 1),
                                       requires_grad = True,
                                       device = self.device,
                                      )
        x = torch.linspace(-self.size[0] / 2., self.size[0] / 2., self.number_of_meshes[0], device = self.device) 
        y = torch.linspace(-self.size[1] / 2., self.size[1] / 2., self.number_of_meshes[1], device = self.device)
        X, Y = torch.meshgrid(x, y, indexing = 'ij')
        self.X = X.unsqueeze(-1)
        self.Y = Y.unsqueeze(-1)


    def save_heights(self, filename = 'heights.pt'):
        """
        Function to save heights to a file.

        Parameters
        ----------
        filename          : str
                            Filename.
        """
        save_torch_tensor(filename, self.heights.detach().clone())


    def save_heights_as_PLY(self, filename = 'mesh.ply'):
        """
        Function to save mesh to a PLY file.

        Parameters
        ----------
        filename          : str
                            Filename.
        """
        triangles = self.get_triangles()
        write_PLY(triangles, filename)


    def get_squares(self):
        """
        Internal function to initiate squares over a plane.

        Returns
        -------
        squares     : torch.tensor
                      Squares over a plane.
                      Expected size is [m x n x 3].
        """
        squares = torch.cat((
                             self.X,
                             self.Y,
                             self.heights
                            ), dim = -1)
        return squares


    def get_triangles(self):
        """
        Internal function to get triangles.
        """ 
        squares = self.get_squares()
        triangles = torch.zeros(2, self.number_of_meshes[0], self.number_of_meshes[1], 3, 3, device = self.device)
        for i in range(0, self.number_of_meshes[0] - 1):
            for j in range(0, self.number_of_meshes[1] - 1):
                first_triangle = torch.cat((
                                            squares[i + 1, j].unsqueeze(0),
                                            squares[i + 1, j + 1].unsqueeze(0),
                                            squares[i, j + 1].unsqueeze(0),
                                           ), dim = 0)
                second_triangle = torch.cat((
                                             squares[i + 1, j].unsqueeze(0),
                                             squares[i, j + 1].unsqueeze(0),
                                             squares[i, j].unsqueeze(0),
                                            ), dim = 0)
                triangles[0, i, j], _, _, _ = rotate_points(first_triangle, angles = self.angles)
                triangles[1, i, j], _, _, _ = rotate_points(second_triangle, angles = self.angles)
        triangles = triangles.view(-1, 3, 3) + self.offset
        return triangles 


    def mirror(self, rays):
        """
        Function to bounce light rays off the meshes.

        Parameters
        ----------
        rays              : torch.tensor
                            Rays to be bounced.
                            Expected size is [2 x 3], [1 x 2 x 3] or [m x 2 x 3].

        Returns
        -------
        reflected_rays    : torch.tensor
                            Reflected rays.
                            Expected size is [2 x 3], [1 x 2 x 3] or [m x 2 x 3].
        reflected_normals : torch.tensor
                            Reflected normals.
                            Expected size is [2 x 3], [1 x 2 x 3] or [m x 2 x 3].

        """
        if len(rays.shape) == 2:
            rays = rays.unsqueeze(0)
        triangles = self.get_triangles()
        reflected_rays = torch.empty((0, 2, 3), requires_grad = True, device = self.device)
        reflected_normals = torch.empty((0, 2, 3), requires_grad = True, device = self.device)
        for triangle in triangles:
            _, _, intersecting_rays, intersecting_normals, check = intersect_w_triangle(
                                                                                        rays,
                                                                                        triangle
                                                                                       ) 
            triangle_reflected_rays = reflect(intersecting_rays, intersecting_normals)
            if triangle_reflected_rays.shape[0] > 0:
                reflected_rays = torch.cat((
                                            reflected_rays,
                                            triangle_reflected_rays
                                          ))
                reflected_normals = torch.cat((
                                               reflected_normals,
                                               intersecting_normals
                                              ))
        return reflected_rays, reflected_normals

__init__(size=[1.0, 1.0], number_of_meshes=[10, 10], angles=torch.tensor([0.0, 0.0, 0.0]), offset=torch.tensor([0.0, 0.0, 0.0]), device=torch.device('cpu'), heights=None)

Definition to generate a plane with meshes.

Parameters:

  • number_of_meshes
                Number of squares over plane.
                There are two triangles at each square.
    
  • size
                Size of the plane.
    
  • angles
                Rotation angles in degrees.
    
  • offset
                Offset along XYZ axes.
                Expected dimension is [1 x 3] or offset for each triangle [m x 3].
                m here refers to `2 * number_of_meshes[0]` times  `number_of_meshes[1]`.
    
  • device
                Computational resource to be used (e.g., cpu, cuda).
    
  • heights
                Load surface heights from a tensor.
    
Source code in odak/learn/raytracing/mesh.py
def __init__(
             self,
             size = [1., 1.],
             number_of_meshes = [10, 10],
             angles = torch.tensor([0., 0., 0.]),
             offset = torch.tensor([0., 0., 0.]),
             device = torch.device('cpu'),
             heights = None
            ):
    """
    Definition to generate a plane with meshes.


    Parameters
    -----------
    number_of_meshes  : torch.tensor
                        Number of squares over plane.
                        There are two triangles at each square.
    size              : torch.tensor
                        Size of the plane.
    angles            : torch.tensor
                        Rotation angles in degrees.
    offset            : torch.tensor
                        Offset along XYZ axes.
                        Expected dimension is [1 x 3] or offset for each triangle [m x 3].
                        m here refers to `2 * number_of_meshes[0]` times  `number_of_meshes[1]`.
    device            : torch.device
                        Computational resource to be used (e.g., cpu, cuda).
    heights           : torch.tensor
                        Load surface heights from a tensor.
    """
    self.device = device
    self.angles = angles.to(self.device)
    self.offset = offset.to(self.device)
    self.size = size.to(self.device)
    self.number_of_meshes = number_of_meshes.to(self.device)
    self.init_heights(heights)

get_squares()

Internal function to initiate squares over a plane.

Returns:

  • squares ( tensor ) –

    Squares over a plane. Expected size is [m x n x 3].

Source code in odak/learn/raytracing/mesh.py
def get_squares(self):
    """
    Internal function to initiate squares over a plane.

    Returns
    -------
    squares     : torch.tensor
                  Squares over a plane.
                  Expected size is [m x n x 3].
    """
    squares = torch.cat((
                         self.X,
                         self.Y,
                         self.heights
                        ), dim = -1)
    return squares

get_triangles()

Internal function to get triangles.

Source code in odak/learn/raytracing/mesh.py
def get_triangles(self):
    """
    Internal function to get triangles.
    """ 
    squares = self.get_squares()
    triangles = torch.zeros(2, self.number_of_meshes[0], self.number_of_meshes[1], 3, 3, device = self.device)
    for i in range(0, self.number_of_meshes[0] - 1):
        for j in range(0, self.number_of_meshes[1] - 1):
            first_triangle = torch.cat((
                                        squares[i + 1, j].unsqueeze(0),
                                        squares[i + 1, j + 1].unsqueeze(0),
                                        squares[i, j + 1].unsqueeze(0),
                                       ), dim = 0)
            second_triangle = torch.cat((
                                         squares[i + 1, j].unsqueeze(0),
                                         squares[i, j + 1].unsqueeze(0),
                                         squares[i, j].unsqueeze(0),
                                        ), dim = 0)
            triangles[0, i, j], _, _, _ = rotate_points(first_triangle, angles = self.angles)
            triangles[1, i, j], _, _, _ = rotate_points(second_triangle, angles = self.angles)
    triangles = triangles.view(-1, 3, 3) + self.offset
    return triangles 

init_heights(heights=None)

Internal function to initialize a height map. Note that self.heights is a differentiable variable, and can be optimized or learned. See unit test test/test_learn_ray_detector.py or test/test_learn_ray_mesh.py as examples.

Source code in odak/learn/raytracing/mesh.py
def init_heights(self, heights = None):
    """
    Internal function to initialize a height map.
    Note that self.heights is a differentiable variable, and can be optimized or learned.
    See unit test `test/test_learn_ray_detector.py` or `test/test_learn_ray_mesh.py` as examples.
    """
    if not isinstance(heights, type(None)):
        self.heights = heights.to(self.device)
        self.heights.requires_grad = True
    else:
        self.heights = torch.zeros(
                                   (self.number_of_meshes[0], self.number_of_meshes[1], 1),
                                   requires_grad = True,
                                   device = self.device,
                                  )
    x = torch.linspace(-self.size[0] / 2., self.size[0] / 2., self.number_of_meshes[0], device = self.device) 
    y = torch.linspace(-self.size[1] / 2., self.size[1] / 2., self.number_of_meshes[1], device = self.device)
    X, Y = torch.meshgrid(x, y, indexing = 'ij')
    self.X = X.unsqueeze(-1)
    self.Y = Y.unsqueeze(-1)

mirror(rays)

Function to bounce light rays off the meshes.

Parameters:

  • rays
                Rays to be bounced.
                Expected size is [2 x 3], [1 x 2 x 3] or [m x 2 x 3].
    

Returns:

  • reflected_rays ( tensor ) –

    Reflected rays. Expected size is [2 x 3], [1 x 2 x 3] or [m x 2 x 3].

  • reflected_normals ( tensor ) –

    Reflected normals. Expected size is [2 x 3], [1 x 2 x 3] or [m x 2 x 3].

Source code in odak/learn/raytracing/mesh.py
def mirror(self, rays):
    """
    Function to bounce light rays off the meshes.

    Parameters
    ----------
    rays              : torch.tensor
                        Rays to be bounced.
                        Expected size is [2 x 3], [1 x 2 x 3] or [m x 2 x 3].

    Returns
    -------
    reflected_rays    : torch.tensor
                        Reflected rays.
                        Expected size is [2 x 3], [1 x 2 x 3] or [m x 2 x 3].
    reflected_normals : torch.tensor
                        Reflected normals.
                        Expected size is [2 x 3], [1 x 2 x 3] or [m x 2 x 3].

    """
    if len(rays.shape) == 2:
        rays = rays.unsqueeze(0)
    triangles = self.get_triangles()
    reflected_rays = torch.empty((0, 2, 3), requires_grad = True, device = self.device)
    reflected_normals = torch.empty((0, 2, 3), requires_grad = True, device = self.device)
    for triangle in triangles:
        _, _, intersecting_rays, intersecting_normals, check = intersect_w_triangle(
                                                                                    rays,
                                                                                    triangle
                                                                                   ) 
        triangle_reflected_rays = reflect(intersecting_rays, intersecting_normals)
        if triangle_reflected_rays.shape[0] > 0:
            reflected_rays = torch.cat((
                                        reflected_rays,
                                        triangle_reflected_rays
                                      ))
            reflected_normals = torch.cat((
                                           reflected_normals,
                                           intersecting_normals
                                          ))
    return reflected_rays, reflected_normals

save_heights(filename='heights.pt')

Function to save heights to a file.

Parameters:

  • filename
                Filename.
    
Source code in odak/learn/raytracing/mesh.py
def save_heights(self, filename = 'heights.pt'):
    """
    Function to save heights to a file.

    Parameters
    ----------
    filename          : str
                        Filename.
    """
    save_torch_tensor(filename, self.heights.detach().clone())

save_heights_as_PLY(filename='mesh.ply')

Function to save mesh to a PLY file.

Parameters:

  • filename
                Filename.
    
Source code in odak/learn/raytracing/mesh.py
def save_heights_as_PLY(self, filename = 'mesh.ply'):
    """
    Function to save mesh to a PLY file.

    Parameters
    ----------
    filename          : str
                        Filename.
    """
    triangles = self.get_triangles()
    write_PLY(triangles, filename)

center_of_triangle(triangle)

Definition to calculate center of a triangle.

Parameters:

  • triangle
            An array that contains three points defining a triangle (Mx3). 
            It can also parallel process many triangles (NxMx3).
    

Returns:

  • centers ( tensor ) –

    Triangle centers.

Source code in odak/learn/raytracing/primitives.py
def center_of_triangle(triangle):
    """
    Definition to calculate center of a triangle.

    Parameters
    ----------
    triangle      : torch.tensor
                    An array that contains three points defining a triangle (Mx3). 
                    It can also parallel process many triangles (NxMx3).

    Returns
    -------
    centers       : torch.tensor
                    Triangle centers.
    """
    if len(triangle.shape) == 2:
        triangle = triangle.view((1, 3, 3))
    center = torch.mean(triangle, axis=1)
    return center

create_ray(xyz, abg, direction=False)

Definition to create a ray.

Parameters:

  • xyz
           List that contains X,Y and Z start locations of a ray.
           Size could be [1 x 3], [3], [m x 3].
    
  • abg
           List that contains angles in degrees with respect to the X,Y and Z axes.
           Size could be [1 x 3], [3], [m x 3].
    
  • direction
           If set to True, cosines of `abg` is not calculated.
    

Returns:

  • ray ( tensor ) –

    Array that contains starting points and cosines of a created ray. Size will be either [1 x 3] or [m x 3].

Source code in odak/learn/raytracing/ray.py
def create_ray(xyz, abg, direction = False):
    """
    Definition to create a ray.

    Parameters
    ----------
    xyz          : torch.tensor
                   List that contains X,Y and Z start locations of a ray.
                   Size could be [1 x 3], [3], [m x 3].
    abg          : torch.tensor
                   List that contains angles in degrees with respect to the X,Y and Z axes.
                   Size could be [1 x 3], [3], [m x 3].
    direction    : bool
                   If set to True, cosines of `abg` is not calculated.

    Returns
    ----------
    ray          : torch.tensor
                   Array that contains starting points and cosines of a created ray.
                   Size will be either [1 x 3] or [m x 3].
    """
    points = xyz
    angles = abg
    if len(xyz) == 1:
        points = xyz.unsqueeze(0)
    if len(abg) == 1:
        angles = abg.unsqueeze(0)
    ray = torch.zeros(points.shape[0], 2, 3, device = points.device)
    ray[:, 0] = points
    if direction:
        ray[:, 1] = abg
    else:
        ray[:, 1] = torch.cos(torch.deg2rad(abg))
    return ray

create_ray_from_all_pairs(x0y0z0, x1y1z1)

Creates rays from all possible pairs of points in x0y0z0 and x1y1z1.

Parameters:

  • x0y0z0
           Tensor that contains X, Y, and Z start locations of rays.
           Size should be [m x 3].
    
  • x1y1z1
           Tensor that contains X, Y, and Z end locations of rays.
           Size should be [n x 3].
    

Returns:

  • rays ( tensor ) –

    Array that contains starting points and cosines of a created ray(s). Size of [n*m x 2 x 3]

Source code in odak/learn/raytracing/ray.py
def create_ray_from_all_pairs(x0y0z0, x1y1z1):
    """
    Creates rays from all possible pairs of points in x0y0z0 and x1y1z1.

    Parameters
    ----------
    x0y0z0       : torch.tensor
                   Tensor that contains X, Y, and Z start locations of rays.
                   Size should be [m x 3].
    x1y1z1       : torch.tensor
                   Tensor that contains X, Y, and Z end locations of rays.
                   Size should be [n x 3].

    Returns
    ----------
    rays         : torch.tensor
                   Array that contains starting points and cosines of a created ray(s). Size of [n*m x 2 x 3]
    """

    if len(x0y0z0.shape) == 1:
        x0y0z0 = x0y0z0.unsqueeze(0)
    if len(x1y1z1.shape) == 1:
        x1y1z1 = x1y1z1.unsqueeze(0)

    m, n = x0y0z0.shape[0], x1y1z1.shape[0]
    start_points = x0y0z0.unsqueeze(1).expand(-1, n, -1).reshape(-1, 3)
    end_points = x1y1z1.unsqueeze(0).expand(m, -1, -1).reshape(-1, 3)

    directions = end_points - start_points
    norms = torch.norm(directions, p=2, dim=1, keepdim=True)
    norms[norms == 0] = float('nan')

    normalized_directions = directions / norms

    rays = torch.zeros(m * n, 2, 3, device=x0y0z0.device)
    rays[:, 0, :] = start_points
    rays[:, 1, :] = normalized_directions

    return rays

create_ray_from_grid_w_luminous_angle(center, size, no, tilt, num_ray_per_light, angle_limit)

Generate a 2D array of lights, each emitting rays within a specified solid angle and tilt.

Parameters:

center : torch.tensor The center point of the light array, shape [3]. size : list[int] The size of the light array [height, width] no : list[int] The number of the light arary [number of lights in height , number of lights inwidth] tilt : torch.tensor The tilt angles in degrees along x, y, z axes for the rays, shape [3]. angle_limit : float The maximum angle in degrees from the initial direction vector within which to emit rays. num_rays_per_light : int The number of rays each light should emit.

Returns:

rays : torch.tensor Array that contains starting points and cosines of a created ray(s). Size of [n x 2 x 3]

Source code in odak/learn/raytracing/ray.py
def create_ray_from_grid_w_luminous_angle(center, size, no, tilt, num_ray_per_light, angle_limit):
    """
    Generate a 2D array of lights, each emitting rays within a specified solid angle and tilt.

    Parameters:
    ----------
    center              : torch.tensor
                          The center point of the light array, shape [3].
    size                : list[int]
                          The size of the light array [height, width]
    no                  : list[int]
                          The number of the light arary [number of lights in height , number of lights inwidth]
    tilt                : torch.tensor
                          The tilt angles in degrees along x, y, z axes for the rays, shape [3].
    angle_limit         : float
                          The maximum angle in degrees from the initial direction vector within which to emit rays.
    num_rays_per_light  : int
                          The number of rays each light should emit.

    Returns:
    ----------
    rays : torch.tensor
           Array that contains starting points and cosines of a created ray(s). Size of [n x 2 x 3]
    """

    samples = torch.zeros((no[0], no[1], 3))

    x = torch.linspace(-size[0] / 2., size[0] / 2., no[0])
    y = torch.linspace(-size[1] / 2., size[1] / 2., no[1])
    X, Y = torch.meshgrid(x, y, indexing='ij')

    samples[:, :, 0] = X.detach().clone()
    samples[:, :, 1] = Y.detach().clone()
    samples = samples.reshape((no[0]*no[1], 3))

    samples, *_ = rotate_points(samples, angles=tilt)

    samples = samples + center
    angle_limit = torch.as_tensor(angle_limit)
    cos_alpha = torch.cos(angle_limit * torch.pi / 180)
    tilt = tilt * torch.pi / 180

    theta = torch.acos(1 - 2 * torch.rand(num_ray_per_light*samples.size(0)) * (1-cos_alpha))
    phi = 2 * torch.pi * torch.rand(num_ray_per_light*samples.size(0))  

    directions = torch.stack([
        torch.sin(theta) * torch.cos(phi),  
        torch.sin(theta) * torch.sin(phi),  
        torch.cos(theta)                    
    ], dim=1)

    c, s = torch.cos(tilt), torch.sin(tilt)

    Rx = torch.tensor([
        [1, 0, 0],
        [0, c[0], -s[0]],
        [0, s[0], c[0]]
    ])

    Ry = torch.tensor([
        [c[1], 0, s[1]],
        [0, 1, 0],
        [-s[1], 0, c[1]]
    ])

    Rz = torch.tensor([
        [c[2], -s[2], 0],
        [s[2], c[2], 0],
        [0, 0, 1]
    ])

    origins = samples.repeat(num_ray_per_light, 1)

    directions = torch.matmul(directions, (Rz@Ry@Rx).T)


    rays = torch.zeros(num_ray_per_light*samples.size(0), 2, 3)
    rays[:, 0, :] = origins
    rays[:, 1, :] = directions

    return rays

create_ray_from_point_w_luminous_angle(origin, num_ray, tilt, angle_limit)

Generate rays from a point, tilted by specific angles along x, y, z axes, within a specified solid angle.

Parameters:

origin : torch.tensor The origin point of the rays, shape [3]. num_rays : int The total number of rays to generate. tilt : torch.tensor The tilt angles in degrees along x, y, z axes, shape [3]. angle_limit : float The maximum angle in degrees from the initial direction vector within which to emit rays.

Returns:

rays : torch.tensor Array that contains starting points and cosines of a created ray(s). Size of [n x 2 x 3]

Source code in odak/learn/raytracing/ray.py
def create_ray_from_point_w_luminous_angle(origin, num_ray, tilt, angle_limit):
    """
    Generate rays from a point, tilted by specific angles along x, y, z axes, within a specified solid angle.

    Parameters:
    ----------
    origin      : torch.tensor
                  The origin point of the rays, shape [3].
    num_rays    : int
                  The total number of rays to generate.
    tilt        : torch.tensor
                  The tilt angles in degrees along x, y, z axes, shape [3].
    angle_limit : float
                  The maximum angle in degrees from the initial direction vector within which to emit rays.

    Returns:
    ----------
    rays : torch.tensor
           Array that contains starting points and cosines of a created ray(s). Size of [n x 2 x 3]
    """
    angle_limit = torch.as_tensor(angle_limit) 
    cos_alpha = torch.cos(angle_limit * torch.pi / 180)
    tilt = tilt * torch.pi / 180

    theta = torch.acos(1 - 2 * torch.rand(num_ray) * (1-cos_alpha))
    phi = 2 * torch.pi * torch.rand(num_ray)  


    directions = torch.stack([
        torch.sin(theta) * torch.cos(phi),  
        torch.sin(theta) * torch.sin(phi),  
        torch.cos(theta)                    
    ], dim=1)

    c, s = torch.cos(tilt), torch.sin(tilt)

    Rx = torch.tensor([
        [1, 0, 0],
        [0, c[0], -s[0]],
        [0, s[0], c[0]]
    ])

    Ry = torch.tensor([
        [c[1], 0, s[1]],
        [0, 1, 0],
        [-s[1], 0, c[1]]
    ])

    Rz = torch.tensor([
        [c[2], -s[2], 0],
        [s[2], c[2], 0],
        [0, 0, 1]
    ])

    origins = origin.repeat(num_ray, 1)
    directions = torch.matmul(directions, (Rz@Ry@Rx).T)


    rays = torch.zeros(num_ray, 2, 3)
    rays[:, 0, :] = origins
    rays[:, 1, :] = directions

    return rays

create_ray_from_two_points(x0y0z0, x1y1z1)

Definition to create a ray from two given points. Note that both inputs must match in shape.

Parameters:

  • x0y0z0
           List that contains X,Y and Z start locations of a ray.
           Size could be [1 x 3], [3], [m x 3].
    
  • x1y1z1
           List that contains X,Y and Z ending locations of a ray or batch of rays.
           Size could be [1 x 3], [3], [m x 3].
    

Returns:

  • ray ( tensor ) –

    Array that contains starting points and cosines of a created ray(s).

Source code in odak/learn/raytracing/ray.py
def create_ray_from_two_points(x0y0z0, x1y1z1):
    """
    Definition to create a ray from two given points. Note that both inputs must match in shape.

    Parameters
    ----------
    x0y0z0       : torch.tensor
                   List that contains X,Y and Z start locations of a ray.
                   Size could be [1 x 3], [3], [m x 3].
    x1y1z1       : torch.tensor
                   List that contains X,Y and Z ending locations of a ray or batch of rays.
                   Size could be [1 x 3], [3], [m x 3].

    Returns
    ----------
    ray          : torch.tensor
                   Array that contains starting points and cosines of a created ray(s).
    """
    if len(x0y0z0.shape) == 1:
        x0y0z0 = x0y0z0.unsqueeze(0)
    if len(x1y1z1.shape) == 1:
        x1y1z1 = x1y1z1.unsqueeze(0)
    xdiff = x1y1z1[:, 0] - x0y0z0[:, 0]
    ydiff = x1y1z1[:, 1] - x0y0z0[:, 1]
    zdiff = x1y1z1[:, 2] - x0y0z0[:, 2]
    s = (xdiff ** 2 + ydiff ** 2 + zdiff ** 2) ** 0.5
    s[s == 0] = float('nan')
    cosines = torch.zeros_like(x0y0z0 * x1y1z1)
    cosines[:, 0] = xdiff / s
    cosines[:, 1] = ydiff / s
    cosines[:, 2] = zdiff / s
    ray = torch.zeros(xdiff.shape[0], 2, 3, device = x0y0z0.device)
    ray[:, 0] = x0y0z0
    ray[:, 1] = cosines
    return ray

define_circle(center, radius, angles)

Definition to describe a circle in a single variable packed form.

Parameters:

  • center
      Center of a circle to be defined in 3D space.
    
  • radius
      Radius of a circle to be defined.
    
  • angles
      Angular tilt of a circle represented by rotations about x, y, and z axes.
    

Returns:

  • circle ( list ) –

    Single variable packed form.

Source code in odak/learn/raytracing/primitives.py
def define_circle(center, radius, angles):
    """
    Definition to describe a circle in a single variable packed form.

    Parameters
    ----------
    center  : torch.Tensor
              Center of a circle to be defined in 3D space.
    radius  : float
              Radius of a circle to be defined.
    angles  : torch.Tensor
              Angular tilt of a circle represented by rotations about x, y, and z axes.

    Returns
    ----------
    circle  : list
              Single variable packed form.
    """
    points = define_plane(center, angles=angles)
    circle = [
        points,
        center,
        torch.tensor([radius])
    ]
    return circle

define_plane(point, angles=torch.tensor([0.0, 0.0, 0.0]))

Definition to generate a rotation matrix along X axis.

Parameters:

  • point
           A point that is at the center of a plane.
    
  • angles
           Rotation angles in degrees.
    

Returns:

  • plane ( tensor ) –

    Points defining plane.

Source code in odak/learn/raytracing/primitives.py
def define_plane(point, angles = torch.tensor([0., 0., 0.])):
    """ 
    Definition to generate a rotation matrix along X axis.

    Parameters
    ----------
    point        : torch.tensor
                   A point that is at the center of a plane.
    angles       : torch.tensor
                   Rotation angles in degrees.

    Returns
    ----------
    plane        : torch.tensor
                   Points defining plane.
    """
    plane = torch.tensor([
                          [10., 10., 0.],
                          [0., 10., 0.],
                          [0.,  0., 0.]
                         ], device = point.device)
    for i in range(0, plane.shape[0]):
        plane[i], _, _, _ = rotate_points(plane[i], angles = angles.to(point.device))
        plane[i] = plane[i] + point
    return plane

define_plane_mesh(number_of_meshes=[10, 10], size=[1.0, 1.0], angles=torch.tensor([0.0, 0.0, 0.0]), offset=torch.tensor([[0.0, 0.0, 0.0]]))

Definition to generate a plane with meshes.

Parameters:

  • number_of_meshes
                Number of squares over plane.
                There are two triangles at each square.
    
  • size
                Size of the plane.
    
  • angles
                Rotation angles in degrees.
    
  • offset
                Offset along XYZ axes.
                Expected dimension is [1 x 3] or offset for each triangle [m x 3].
                m here refers to `2 * number_of_meshes[0]` times  `number_of_meshes[1]`.
    

Returns:

  • triangles ( tensor ) –

    Triangles [m x 3 x 3], where m is 2 * number_of_meshes[0] times number_of_meshes[1].

Source code in odak/learn/raytracing/primitives.py
def define_plane_mesh(
                      number_of_meshes = [10, 10], 
                      size = [1., 1.], 
                      angles = torch.tensor([0., 0., 0.]), 
                      offset = torch.tensor([[0., 0., 0.]])
                     ):
    """
    Definition to generate a plane with meshes.


    Parameters
    -----------
    number_of_meshes  : torch.tensor
                        Number of squares over plane.
                        There are two triangles at each square.
    size              : list
                        Size of the plane.
    angles            : torch.tensor
                        Rotation angles in degrees.
    offset            : torch.tensor
                        Offset along XYZ axes.
                        Expected dimension is [1 x 3] or offset for each triangle [m x 3].
                        m here refers to `2 * number_of_meshes[0]` times  `number_of_meshes[1]`. 

    Returns
    -------
    triangles         : torch.tensor
                        Triangles [m x 3 x 3], where m is `2 * number_of_meshes[0]` times  `number_of_meshes[1]`.
    """
    triangles = torch.zeros(2, number_of_meshes[0], number_of_meshes[1], 3, 3)
    step = [size[0] / number_of_meshes[0], size[1] / number_of_meshes[1]]
    for i in range(0, number_of_meshes[0] - 1):
        for j in range(0, number_of_meshes[1] - 1):
            first_triangle = torch.tensor([
                                           [       -size[0] / 2. + step[0] * i,       -size[1] / 2. + step[0] * j, 0.],
                                           [ -size[0] / 2. + step[0] * (i + 1),       -size[1] / 2. + step[0] * j, 0.],
                                           [       -size[0] / 2. + step[0] * i, -size[1] / 2. + step[0] * (j + 1), 0.]
                                          ])
            second_triangle = torch.tensor([
                                            [ -size[0] / 2. + step[0] * (i + 1), -size[1] / 2. + step[0] * (j + 1), 0.],
                                            [ -size[0] / 2. + step[0] * (i + 1),       -size[1] / 2. + step[0] * j, 0.],
                                            [       -size[0] / 2. + step[0] * i, -size[1] / 2. + step[0] * (j + 1), 0.]
                                           ])
            triangles[0, i, j], _, _, _ = rotate_points(first_triangle, angles = angles)
            triangles[1, i, j], _, _, _ = rotate_points(second_triangle, angles = angles)
    triangles = triangles.view(-1, 3, 3) + offset
    return triangles

define_sphere(center=torch.tensor([[0.0, 0.0, 0.0]]), radius=torch.tensor([1.0]))

Definition to define a sphere.

Parameters:

  • center
          Center of the sphere(s) along XYZ axes.
          Expected size is [3], [1, 3] or [m, 3].
    
  • radius
          Radius of that sphere(s).
          Expected size is [1], [1, 1], [m] or [m, 1].
    

Returns:

  • parameters ( tensor ) –

    Parameters of defined sphere(s). Expected size is [1, 3] or [m x 3].

Source code in odak/learn/raytracing/primitives.py
def define_sphere(center = torch.tensor([[0., 0., 0.]]), radius = torch.tensor([1.])):
    """
    Definition to define a sphere.

    Parameters
    ----------
    center      : torch.tensor
                  Center of the sphere(s) along XYZ axes.
                  Expected size is [3], [1, 3] or [m, 3].
    radius      : torch.tensor
                  Radius of that sphere(s).
                  Expected size is [1], [1, 1], [m] or [m, 1].

    Returns
    -------
    parameters  : torch.tensor
                  Parameters of defined sphere(s).
                  Expected size is [1, 3] or [m x 3].
    """
    if len(radius.shape) == 1:
        radius = radius.unsqueeze(0)
    if len(center.shape) == 1:
        center = center.unsqueeze(1)
    parameters = torch.cat((center, radius), dim = 1)
    return parameters

distance_between_two_points(point1, point2)

Definition to calculate distance between two given points.

Parameters:

  • point1
          First point in X,Y,Z.
    
  • point2
          Second point in X,Y,Z.
    

Returns:

  • distance ( Tensor ) –

    Distance in between given two points.

Source code in odak/learn/tools/vector.py
def distance_between_two_points(point1, point2):
    """
    Definition to calculate distance between two given points.

    Parameters
    ----------
    point1      : torch.Tensor
                  First point in X,Y,Z.
    point2      : torch.Tensor
                  Second point in X,Y,Z.

    Returns
    ----------
    distance    : torch.Tensor
                  Distance in between given two points.
    """
    point1 = torch.tensor(point1) if not isinstance(point1, torch.Tensor) else point1
    point2 = torch.tensor(point2) if not isinstance(point2, torch.Tensor) else point2

    if len(point1.shape) == 1 and len(point2.shape) == 1:
        distance = torch.sqrt(torch.sum((point1 - point2) ** 2))
    elif len(point1.shape) == 2 or len(point2.shape) == 2:
        distance = torch.sqrt(torch.sum((point1 - point2) ** 2, dim=-1))

    return distance

get_sphere_normal_torch(point, sphere)

Definition to get a normal of a point on a given sphere.

Parameters:

  • point
            Point on sphere in X,Y,Z.
    
  • sphere
            Center defined in X,Y,Z and radius.
    

Returns:

  • normal_vector ( tensor ) –

    Normal vector.

Source code in odak/learn/raytracing/boundary.py
def get_sphere_normal_torch(point, sphere):
    """
    Definition to get a normal of a point on a given sphere.

    Parameters
    ----------
    point         : torch.tensor
                    Point on sphere in X,Y,Z.
    sphere        : torch.tensor
                    Center defined in X,Y,Z and radius.

    Returns
    ----------
    normal_vector : torch.tensor
                    Normal vector.
    """
    if len(point.shape) == 1:
        point = point.reshape((1, 3))
    normal_vector = create_ray_from_two_points(point, sphere[0:3])
    return normal_vector

get_triangle_normal(triangle, triangle_center=None)

Definition to calculate surface normal of a triangle.

Parameters:

  • triangle
              Set of points in X,Y and Z to define a planar surface (3,3). It can also be list of triangles (mx3x3).
    
  • triangle_center (tensor, default: None ) –
              Center point of the given triangle. See odak.learn.raytracing.center_of_triangle for more. In many scenarios you can accelerate things by precomputing triangle centers.
    

Returns:

  • normal ( tensor ) –

    Surface normal at the point of intersection.

Source code in odak/learn/raytracing/boundary.py
def get_triangle_normal(triangle, triangle_center=None):
    """
    Definition to calculate surface normal of a triangle.

    Parameters
    ----------
    triangle        : torch.tensor
                      Set of points in X,Y and Z to define a planar surface (3,3). It can also be list of triangles (mx3x3).
    triangle_center : torch.tensor
                      Center point of the given triangle. See odak.learn.raytracing.center_of_triangle for more. In many scenarios you can accelerate things by precomputing triangle centers.

    Returns
    ----------
    normal          : torch.tensor
                      Surface normal at the point of intersection.
    """
    if len(triangle.shape) == 2:
        triangle = triangle.view((1, 3, 3))
    normal = torch.zeros((triangle.shape[0], 2, 3)).to(triangle.device)
    direction = torch.linalg.cross(
                                   triangle[:, 0] - triangle[:, 1], 
                                   triangle[:, 2] - triangle[:, 1]
                                  )
    if type(triangle_center) == type(None):
        normal[:, 0] = center_of_triangle(triangle)
    else:
        normal[:, 0] = triangle_center
    normal[:, 1] = direction / torch.sum(direction, axis=1)[0]
    if normal.shape[0] == 1:
        normal = normal.view((2, 3))
    return normal

grid_sample(no=[10, 10], size=[100.0, 100.0], center=[0.0, 0.0, 0.0], angles=[0.0, 0.0, 0.0])

Definition to generate samples over a surface.

Parameters:

  • no
          Number of samples.
    
  • size
          Physical size of the surface.
    
  • center
          Center location of the surface.
    
  • angles
          Tilt of the surface.
    

Returns:

  • samples ( tensor ) –

    Samples generated.

  • rotx ( tensor ) –

    Rotation matrix at X axis.

  • roty ( tensor ) –

    Rotation matrix at Y axis.

  • rotz ( tensor ) –

    Rotation matrix at Z axis.

Source code in odak/learn/tools/sample.py
def grid_sample(
                no = [10, 10],
                size = [100., 100.], 
                center = [0., 0., 0.], 
                angles = [0., 0., 0.]):
    """
    Definition to generate samples over a surface.

    Parameters
    ----------
    no          : list
                  Number of samples.
    size        : list
                  Physical size of the surface.
    center      : list
                  Center location of the surface.
    angles      : list
                  Tilt of the surface.

    Returns
    -------
    samples     : torch.tensor
                  Samples generated.
    rotx        : torch.tensor
                  Rotation matrix at X axis.
    roty        : torch.tensor
                  Rotation matrix at Y axis.
    rotz        : torch.tensor
                  Rotation matrix at Z axis.
    """
    center = torch.tensor(center)
    angles = torch.tensor(angles)
    size = torch.tensor(size)
    samples = torch.zeros((no[0], no[1], 3))
    x = torch.linspace(-size[0] / 2., size[0] / 2., no[0])
    y = torch.linspace(-size[1] / 2., size[1] / 2., no[1])
    X, Y = torch.meshgrid(x, y, indexing='ij')
    samples[:, :, 0] = X.detach().clone()
    samples[:, :, 1] = Y.detach().clone()
    samples = samples.reshape((samples.shape[0] * samples.shape[1], samples.shape[2]))
    samples, rotx, roty, rotz = rotate_points(samples, angles = angles, offset = center)
    return samples, rotx, roty, rotz

intersect_w_circle(ray, circle)

Definition to find intersection point of a ray with a circle. Returns distance as zero if there isn't an intersection.

Parameters:

  • ray
           A vector/ray.
    
  • circle
           A list that contains (0) Set of points in X,Y and Z to define plane of a circle, (1) circle center, and (2) circle radius.
    

Returns:

  • normal ( Tensor ) –

    Surface normal at the point of intersection.

  • distance ( Tensor ) –

    Distance in between a starting point of a ray and the intersection point with a given triangle.

Source code in odak/learn/raytracing/boundary.py
def intersect_w_circle(ray, circle):
    """
    Definition to find intersection point of a ray with a circle. 
    Returns distance as zero if there isn't an intersection.

    Parameters
    ----------
    ray          : torch.Tensor
                   A vector/ray.
    circle       : list
                   A list that contains (0) Set of points in X,Y and Z to define plane of a circle, (1) circle center, and (2) circle radius.

    Returns
    ----------
    normal       : torch.Tensor
                   Surface normal at the point of intersection.
    distance     : torch.Tensor
                   Distance in between a starting point of a ray and the intersection point with a given triangle.
    """
    normal, distance = intersect_w_surface(ray, circle[0])

    if len(normal.shape) == 2:
        normal = normal.unsqueeze(0)

    distance_to_center = distance_between_two_points(normal[:, 0], circle[1])
    mask = distance_to_center > circle[2]
    distance[mask] = 0

    if len(ray.shape) == 2:
        normal = normal.squeeze(0)

    return normal, distance

intersect_w_sphere(ray, sphere, learning_rate=0.2, number_of_steps=5000, error_threshold=0.01)

Definition to find the intersection between ray(s) and sphere(s).

Parameters:

  • ray
                  Input ray(s).
                  Expected size is [1 x 2 x 3] or [m x 2 x 3].
    
  • sphere
                  Input sphere.
                  Expected size is [1 x 4].
    
  • learning_rate
                  Learning rate used in the optimizer for finding the propagation distances of the rays.
    
  • number_of_steps
                  Number of steps used in the optimizer.
    
  • error_threshold
                  The error threshold that will help deciding intersection or no intersection.
    

Returns:

  • intersecting_ray ( tensor ) –

    Ray(s) that intersecting with the given sphere. Expected size is [n x 2 x 3], where n could be any real number.

  • intersecting_normal ( tensor ) –

    Normal(s) for the ray(s) intersecting with the given sphere Expected size is [n x 2 x 3], where n could be any real number.

Source code in odak/learn/raytracing/boundary.py
def intersect_w_sphere(ray, sphere, learning_rate = 2e-1, number_of_steps = 5000, error_threshold = 1e-2):
    """
    Definition to find the intersection between ray(s) and sphere(s).

    Parameters
    ----------
    ray                 : torch.tensor
                          Input ray(s).
                          Expected size is [1 x 2 x 3] or [m x 2 x 3].
    sphere              : torch.tensor
                          Input sphere.
                          Expected size is [1 x 4].
    learning_rate       : float
                          Learning rate used in the optimizer for finding the propagation distances of the rays.
    number_of_steps     : int
                          Number of steps used in the optimizer.
    error_threshold     : float
                          The error threshold that will help deciding intersection or no intersection.

    Returns
    -------
    intersecting_ray    : torch.tensor
                          Ray(s) that intersecting with the given sphere.
                          Expected size is [n x 2 x 3], where n could be any real number.
    intersecting_normal : torch.tensor
                          Normal(s) for the ray(s) intersecting with the given sphere
                          Expected size is [n x 2 x 3], where n could be any real number.

    """
    if len(ray.shape) == 2:
        ray = ray.unsqueeze(0)
    if len(sphere.shape) == 1:
        sphere = sphere.unsqueeze(0)
    distance = torch.zeros(ray.shape[0], device = ray.device, requires_grad = True)
    loss_l2 = torch.nn.MSELoss(reduction = 'sum')
    optimizer = torch.optim.AdamW([distance], lr = learning_rate)    
    t = tqdm(range(number_of_steps), leave = False, dynamic_ncols = True)
    for step in t:
        optimizer.zero_grad()
        propagated_ray = propagate_ray(ray, distance)
        test = torch.abs((propagated_ray[:, 0, 0] - sphere[:, 0]) ** 2 + (propagated_ray[:, 0, 1] - sphere[:, 1]) ** 2 + (propagated_ray[:, 0, 2] - sphere[:, 2]) ** 2 - sphere[:, 3] ** 2)
        loss = loss_l2(
                       test,
                       torch.zeros_like(test)
                      )
        loss.backward(retain_graph = True)
        optimizer.step()
        t.set_description('Sphere intersection loss: {}'.format(loss.item()))
    check = test < error_threshold
    intersecting_ray = propagate_ray(ray[check == True], distance[check == True])
    intersecting_normal = create_ray_from_two_points(
                                                     sphere[:, 0:3],
                                                     intersecting_ray[:, 0]
                                                    )
    return intersecting_ray, intersecting_normal, distance, check

intersect_w_surface(ray, points)

Definition to find intersection point inbetween a surface and a ray. For more see: http://geomalgorithms.com/a06-_intersect-2.html

Parameters:

  • ray
           A vector/ray.
    
  • points
           Set of points in X,Y and Z to define a planar surface.
    

Returns:

  • normal ( tensor ) –

    Surface normal at the point of intersection.

  • distance ( float ) –

    Distance in between starting point of a ray with it's intersection with a planar surface.

Source code in odak/learn/raytracing/boundary.py
def intersect_w_surface(ray, points):
    """
    Definition to find intersection point inbetween a surface and a ray. For more see: http://geomalgorithms.com/a06-_intersect-2.html

    Parameters
    ----------
    ray          : torch.tensor
                   A vector/ray.
    points       : torch.tensor
                   Set of points in X,Y and Z to define a planar surface.

    Returns
    ----------
    normal       : torch.tensor
                   Surface normal at the point of intersection.
    distance     : float
                   Distance in between starting point of a ray with it's intersection with a planar surface.
    """
    normal = get_triangle_normal(points)
    if len(ray.shape) == 2:
        ray = ray.unsqueeze(0)
    if len(points.shape) == 2:
        points = points.unsqueeze(0)
    if len(normal.shape) == 2:
        normal = normal.unsqueeze(0)
    f = normal[:, 0] - ray[:, 0]
    distance = (torch.mm(normal[:, 1], f.T) / torch.mm(normal[:, 1], ray[:, 1].T)).T
    new_normal = torch.zeros_like(ray)
    new_normal[:, 0] = ray[:, 0] + distance * ray[:, 1]
    new_normal[:, 1] = normal[:, 1]
    new_normal = torch.nan_to_num(
                                  new_normal,
                                  nan = float('nan'),
                                  posinf = float('nan'),
                                  neginf = float('nan')
                                 )
    distance = torch.nan_to_num(
                                distance,
                                nan = float('nan'),
                                posinf = float('nan'),
                                neginf = float('nan')
                               )
    return new_normal, distance

intersect_w_surface_batch(ray, triangle)

Parameters:

  • ray
           A vector/ray (2 x 3). It can also be a list of rays (n x 2 x 3).
    
  • triangle
           Set of points in X,Y and Z to define a planar surface. It can also be a list of triangles (m x 3 x 3).
    

Returns:

  • normal ( tensor ) –

    Surface normal at the point of intersection (m x n x 2 x 3).

  • distance ( tensor ) –

    Distance in between starting point of a ray with it's intersection with a planar surface (m x n).

Source code in odak/learn/raytracing/boundary.py
def intersect_w_surface_batch(ray, triangle):
    """
    Parameters
    ----------
    ray          : torch.tensor
                   A vector/ray (2 x 3). It can also be a list of rays (n x 2 x 3).
    triangle     : torch.tensor
                   Set of points in X,Y and Z to define a planar surface. It can also be a list of triangles (m x 3 x 3).

    Returns
    ----------
    normal       : torch.tensor
                   Surface normal at the point of intersection (m x n x 2 x 3).
    distance     : torch.tensor
                   Distance in between starting point of a ray with it's intersection with a planar surface (m x n).
    """
    normal = get_triangle_normal(triangle)
    if len(ray.shape) == 2:
        ray = ray.unsqueeze(0)
    if len(triangle.shape) == 2:
        triangle = triangle.unsqueeze(0)
    if len(normal.shape) == 2:
        normal = normal.unsqueeze(0)

    f = normal[:, None, 0] - ray[None, :, 0]
    distance = (torch.bmm(normal[:, None, 1], f.permute(0, 2, 1)).squeeze(1) / torch.mm(normal[:, 1], ray[:, 1].T)).T

    new_normal = torch.zeros((triangle.shape[0], )+ray.shape)
    new_normal[:, :, 0] = ray[None, :, 0] + (distance[:, :, None] * ray[:, None, 1]).permute(1, 0, 2)
    new_normal[:, :, 1] = normal[:, None, 1]
    new_normal = torch.nan_to_num(
                                  new_normal,
                                  nan = float('nan'),
                                  posinf = float('nan'),
                                  neginf = float('nan')
                                 )
    distance = torch.nan_to_num(
                                distance,
                                nan = float('nan'),
                                posinf = float('nan'),
                                neginf = float('nan')
                               )
    return new_normal, distance.T

intersect_w_triangle(ray, triangle)

Definition to find intersection point of a ray with a triangle.

Parameters:

  • ray
                  A ray [1 x 2 x 3] or a batch of ray [m x 2 x 3].
    
  • triangle
                  Set of points in X,Y and Z to define a single triangle [1 x 3 x 3].
    

Returns:

  • normal ( tensor ) –

    Surface normal at the point of intersection with the surface of triangle. This could also involve surface normals that are not on the triangle. Expected size is [1 x 2 x 3] or [m x 2 x 3] depending on the input.

  • distance ( float ) –

    Distance in between a starting point of a ray and the intersection point with a given triangle. Expected size is [1 x 1] or [m x 1] depending on the input.

  • intersecting_ray ( tensor ) –

    Rays that intersect with the triangle plane and on the triangle. Expected size is [1 x 2 x 3] or [m x 2 x 3] depending on the input.

  • intersecting_normal ( tensor ) –

    Normals that intersect with the triangle plane and on the triangle. Expected size is [1 x 2 x 3] or [m x 2 x 3] depending on the input.

  • check ( tensor ) –

    A list that provides a bool as True or False for each ray used as input. A test to see is a ray could be on the given triangle. Expected size is [1] or [m].

Source code in odak/learn/raytracing/boundary.py
def intersect_w_triangle(ray, triangle):
    """
    Definition to find intersection point of a ray with a triangle. 

    Parameters
    ----------
    ray                 : torch.tensor
                          A ray [1 x 2 x 3] or a batch of ray [m x 2 x 3].
    triangle            : torch.tensor
                          Set of points in X,Y and Z to define a single triangle [1 x 3 x 3].

    Returns
    ----------
    normal              : torch.tensor
                          Surface normal at the point of intersection with the surface of triangle.
                          This could also involve surface normals that are not on the triangle.
                          Expected size is [1 x 2 x 3] or [m x 2 x 3] depending on the input.
    distance            : float
                          Distance in between a starting point of a ray and the intersection point with a given triangle.
                          Expected size is [1 x 1] or [m x 1] depending on the input.
    intersecting_ray    : torch.tensor
                          Rays that intersect with the triangle plane and on the triangle.
                          Expected size is [1 x 2 x 3] or [m x 2 x 3] depending on the input.
    intersecting_normal : torch.tensor
                          Normals that intersect with the triangle plane and on the triangle.
                          Expected size is [1 x 2 x 3] or [m x 2 x 3] depending on the input.
    check               : torch.tensor
                          A list that provides a bool as True or False for each ray used as input.
                          A test to see is a ray could be on the given triangle.
                          Expected size is [1] or [m].
    """
    if len(triangle.shape) == 2:
       triangle = triangle.unsqueeze(0)
    if len(ray.shape) == 2:
       ray = ray.unsqueeze(0)
    normal, distance = intersect_w_surface(ray, triangle)
    check = is_it_on_triangle(normal[:, 0], triangle)
    intersecting_ray = ray.unsqueeze(0)
    intersecting_ray = intersecting_ray.repeat(triangle.shape[0], 1, 1, 1)
    intersecting_ray = intersecting_ray[check == True]
    intersecting_normal = normal.unsqueeze(0)
    intersecting_normal = intersecting_normal.repeat(triangle.shape[0], 1, 1, 1)
    intersecting_normal = intersecting_normal[check ==  True]
    return normal, distance, intersecting_ray, intersecting_normal, check

intersect_w_triangle_batch(ray, triangle)

Definition to find intersection points of rays with triangles. Returns False for each variable if the rays doesn't intersect with given triangles.

Parameters:

  • ray
           vectors/rays (n x 2 x 3).
    
  • triangle
           Set of points in X,Y and Z to define triangles (m x 3 x 3).
    

Returns:

  • normal ( tensor ) –

    Surface normal at the point of intersection (m x n x 2 x 3).

  • distance ( List ) –

    Distance in between starting point of a ray with it's intersection with a planar surface (m x n).

  • intersect_ray ( List ) –

    List of intersecting rays (k x 2 x 3) where k <= n.

  • intersect_normal ( List ) –

    List of intersecting normals (k x 2 x 3) where k <= n*m.

  • check ( tensor ) –

    Boolean tensor (m x n) indicating whether each ray intersects with a triangle or not.

Source code in odak/learn/raytracing/boundary.py
def intersect_w_triangle_batch(ray, triangle):
    """
    Definition to find intersection points of rays with triangles. Returns False for each variable if the rays doesn't intersect with given triangles.

    Parameters
    ----------
    ray          : torch.tensor
                   vectors/rays (n x 2 x 3).
    triangle     : torch.tensor
                   Set of points in X,Y and Z to define triangles (m x 3 x 3).

    Returns
    ----------
    normal          : torch.tensor
                      Surface normal at the point of intersection (m x n x 2 x 3).
    distance        : List
                      Distance in between starting point of a ray with it's intersection with a planar surface (m x n).
    intersect_ray   : List
                      List of intersecting rays (k x 2 x 3) where k <= n.
    intersect_normal: List
                      List of intersecting normals (k x 2 x 3) where k <= n*m.
    check           : torch.tensor
                      Boolean tensor (m x n) indicating whether each ray intersects with a triangle or not.
    """
    if len(triangle.shape) == 2:
       triangle = triangle.unsqueeze(0)
    if len(ray.shape) == 2:
       ray = ray.unsqueeze(0)

    normal, distance = intersect_w_surface_batch(ray, triangle)

    check = is_it_on_triangle_batch(normal[:, :, 0], triangle)

    flat_check = check.flatten()
    flat_normal = normal.view(-1, normal.size(-2), normal.size(-1))
    flat_ray = ray.repeat(normal.size(0), 1, 1)
    flat_distance = distance.flatten()

    filtered_normal = torch.masked_select(flat_normal, flat_check.unsqueeze(-1).unsqueeze(-1).repeat(1, 2, 3))
    filtered_ray = torch.masked_select(flat_ray, flat_check.unsqueeze(-1).unsqueeze(-1).repeat(1, 2, 3))
    filtered_distnace = torch.masked_select(flat_distance, flat_check)

    check_count = check.sum(dim=1).tolist()
    split_size_ray_and_normal = [count * 2 * 3 for count in check_count]
    split_size_distance = [count for count in check_count]

    normal_grouped = torch.split(filtered_normal, split_size_ray_and_normal)
    ray_grouped = torch.split(filtered_ray, split_size_ray_and_normal)
    distance_grouped = torch.split(filtered_distnace, split_size_distance)

    intersecting_normal = [g.view(-1, 2, 3) for g in normal_grouped if g.numel() > 0]
    intersecting_ray = [g.view(-1, 2, 3) for g in ray_grouped if g.numel() > 0]
    new_distance = [g for g in distance_grouped if g.numel() > 0]

    return normal, new_distance, intersecting_ray, intersecting_normal, check

is_it_on_triangle(point_to_check, triangle)

Definition to check if a given point is inside a triangle. If the given point is inside a defined triangle, this definition returns True. For more details, visit: https://blackpawn.com/texts/pointinpoly/.

Parameters:

  • point_to_check
              Point(s) to check.
              Expected size is [3], [1 x 3] or [m x 3].
    
  • triangle
              Triangle described with three points.
              Expected size is [3 x 3], [1 x 3 x 3] or [m x 3 x3].
    

Returns:

  • result ( tensor ) –

    Is it on a triangle? Returns NaN if condition not satisfied. Expected size is [1] or [m] depending on the input.

Source code in odak/learn/raytracing/primitives.py
def is_it_on_triangle(point_to_check, triangle):
    """
    Definition to check if a given point is inside a triangle. 
    If the given point is inside a defined triangle, this definition returns True.
    For more details, visit: [https://blackpawn.com/texts/pointinpoly/](https://blackpawn.com/texts/pointinpoly/).

    Parameters
    ----------
    point_to_check  : torch.tensor
                      Point(s) to check.
                      Expected size is [3], [1 x 3] or [m x 3].
    triangle        : torch.tensor
                      Triangle described with three points.
                      Expected size is [3 x 3], [1 x 3 x 3] or [m x 3 x3].

    Returns
    -------
    result          : torch.tensor
                      Is it on a triangle? Returns NaN if condition not satisfied.
                      Expected size is [1] or [m] depending on the input.
    """
    if len(point_to_check.shape) == 1:
        point_to_check = point_to_check.unsqueeze(0)
    if len(triangle.shape) == 2:
        triangle = triangle.unsqueeze(0)
    v0 = triangle[:, 2] - triangle[:, 0]
    v1 = triangle[:, 1] - triangle[:, 0]
    v2 = point_to_check - triangle[:, 0]
    if len(v0.shape) == 1:
        v0 = v0.unsqueeze(0)
    if len(v1.shape) == 1:
        v1 = v1.unsqueeze(0)
    if len(v2.shape) == 1:
        v2 = v2.unsqueeze(0)
    dot00 = torch.mm(v0, v0.T)
    dot01 = torch.mm(v0, v1.T)
    dot02 = torch.mm(v0, v2.T) 
    dot11 = torch.mm(v1, v1.T)
    dot12 = torch.mm(v1, v2.T)
    invDenom = 1. / (dot00 * dot11 - dot01 * dot01)
    u = (dot11 * dot02 - dot01 * dot12) * invDenom
    v = (dot00 * dot12 - dot01 * dot02) * invDenom
    result = (u >= 0.) & (v >= 0.) & ((u + v) < 1)
    return result

is_it_on_triangle_batch(point_to_check, triangle)

Definition to check if given points are inside triangles. If the given points are inside defined triangles, this definition returns True.

Parameters:

  • point_to_check
              Points to check (m x n x 3).
    
  • triangle
              Triangles (m x 3 x 3).
    

Returns:

  • result ( torch.tensor (m x n) ) –
Source code in odak/learn/raytracing/primitives.py
def is_it_on_triangle_batch(point_to_check, triangle):
    """
    Definition to check if given points are inside triangles. If the given points are inside defined triangles, this definition returns True.

    Parameters
    ----------
    point_to_check  : torch.tensor
                      Points to check (m x n x 3).
    triangle        : torch.tensor 
                      Triangles (m x 3 x 3).

    Returns
    ----------
    result          : torch.tensor (m x n)

    """
    if len(point_to_check.shape) == 1:
        point_to_check = point_to_check.unsqueeze(0)
    if len(triangle.shape) == 2:
        triangle = triangle.unsqueeze(0)
    v0 = triangle[:, 2] - triangle[:, 0]
    v1 = triangle[:, 1] - triangle[:, 0]
    v2 = point_to_check - triangle[:, None, 0]
    if len(v0.shape) == 1:
        v0 = v0.unsqueeze(0)
    if len(v1.shape) == 1:
        v1 = v1.unsqueeze(0)
    if len(v2.shape) == 1:
        v2 = v2.unsqueeze(0)

    dot00 = torch.bmm(v0.unsqueeze(1), v0.unsqueeze(1).permute(0, 2, 1)).squeeze(1)
    dot01 = torch.bmm(v0.unsqueeze(1), v1.unsqueeze(1).permute(0, 2, 1)).squeeze(1)
    dot02 = torch.bmm(v0.unsqueeze(1), v2.permute(0, 2, 1)).squeeze(1)
    dot11 = torch.bmm(v1.unsqueeze(1), v1.unsqueeze(1).permute(0, 2, 1)).squeeze(1)
    dot12 = torch.bmm(v1.unsqueeze(1), v2.permute(0, 2, 1)).squeeze(1)
    invDenom = 1. / (dot00 * dot11 - dot01 * dot01)
    u = (dot11 * dot02 - dot01 * dot12) * invDenom
    v = (dot00 * dot12 - dot01 * dot02) * invDenom
    result = (u >= 0.) & (v >= 0.) & ((u + v) < 1)

    return result

propagate_ray(ray, distance)

Definition to propagate a ray at a certain given distance.

Parameters:

  • ray
         A ray with a size of [2 x 3], [1 x 2 x 3] or a batch of rays with [m x 2 x 3].
    
  • distance
         Distance with a size of [1], [1, m] or distances with a size of [m], [1, m].
    

Returns:

  • new_ray ( tensor ) –

    Propagated ray with a size of [1 x 2 x 3] or batch of rays with [m x 2 x 3].

Source code in odak/learn/raytracing/ray.py
def propagate_ray(ray, distance):
    """
    Definition to propagate a ray at a certain given distance.

    Parameters
    ----------
    ray        : torch.tensor
                 A ray with a size of [2 x 3], [1 x 2 x 3] or a batch of rays with [m x 2 x 3].
    distance   : torch.tensor
                 Distance with a size of [1], [1, m] or distances with a size of [m], [1, m].

    Returns
    ----------
    new_ray    : torch.tensor
                 Propagated ray with a size of [1 x 2 x 3] or batch of rays with [m x 2 x 3].
    """
    if len(ray.shape) == 2:
        ray = ray.unsqueeze(0)
    if len(distance.shape) == 2:
        distance = distance.squeeze(-1)
    new_ray = torch.zeros_like(ray)
    new_ray[:, 0, 0] = distance * ray[:, 1, 0] + ray[:, 0, 0]
    new_ray[:, 0, 1] = distance * ray[:, 1, 1] + ray[:, 0, 1]
    new_ray[:, 0, 2] = distance * ray[:, 1, 2] + ray[:, 0, 2]
    return new_ray

reflect(input_ray, normal)

Definition to reflect an incoming ray from a surface defined by a surface normal. Used method described in G.H. Spencer and M.V.R.K. Murty, "General Ray-Tracing Procedure", 1961.

Parameters:

  • input_ray
           A ray or rays.
           Expected size is [2 x 3], [1 x 2 x 3] or [m x 2 x 3].
    
  • normal
           A surface normal(s).
           Expected size is [2 x 3], [1 x 2 x 3] or [m x 2 x 3].
    

Returns:

  • output_ray ( tensor ) –

    Array that contains starting points and cosines of a reflected ray. Expected size is [1 x 2 x 3] or [m x 2 x 3].

Source code in odak/learn/raytracing/boundary.py
def reflect(input_ray, normal):
    """ 
    Definition to reflect an incoming ray from a surface defined by a surface normal. 
    Used method described in G.H. Spencer and M.V.R.K. Murty, "General Ray-Tracing Procedure", 1961.


    Parameters
    ----------
    input_ray    : torch.tensor
                   A ray or rays.
                   Expected size is [2 x 3], [1 x 2 x 3] or [m x 2 x 3].
    normal       : torch.tensor
                   A surface normal(s).
                   Expected size is [2 x 3], [1 x 2 x 3] or [m x 2 x 3].

    Returns
    ----------
    output_ray   : torch.tensor
                   Array that contains starting points and cosines of a reflected ray.
                   Expected size is [1 x 2 x 3] or [m x 2 x 3].
    """
    if len(input_ray.shape) == 2:
        input_ray = input_ray.unsqueeze(0)
    if len(normal.shape) == 2:
        normal = normal.unsqueeze(0)
    mu = 1
    div = normal[:, 1, 0]**2 + normal[:, 1, 1]**2 + normal[:, 1, 2]**2 + 1e-8
    a = mu * (input_ray[:, 1, 0] * normal[:, 1, 0] + input_ray[:, 1, 1] * normal[:, 1, 1] + input_ray[:, 1, 2] * normal[:, 1, 2]) / div
    a = a.unsqueeze(1)
    n = int(torch.amax(torch.tensor([normal.shape[0], input_ray.shape[0]])))
    output_ray = torch.zeros((n, 2, 3)).to(input_ray.device)
    output_ray[:, 0] = normal[:, 0]
    output_ray[:, 1] = input_ray[:, 1] - 2 * a * normal[:, 1]
    return output_ray

refract(vector, normvector, n1, n2, error=0.01)

Definition to refract an incoming ray. Used method described in G.H. Spencer and M.V.R.K. Murty, "General Ray-Tracing Procedure", 1961.

Parameters:

  • vector
             Incoming ray.
             Expected size is [2, 3], [1, 2, 3] or [m, 2, 3].
    
  • normvector
             Normal vector.
             Expected size is [2, 3], [1, 2, 3] or [m, 2, 3]].
    
  • n1
             Refractive index of the incoming medium.
    
  • n2
             Refractive index of the outgoing medium.
    
  • error
             Desired error.
    

Returns:

  • output ( tensor ) –

    Refracted ray. Expected size is [1, 2, 3]

Source code in odak/learn/raytracing/boundary.py
def refract(vector, normvector, n1, n2, error = 0.01):
    """
    Definition to refract an incoming ray.
    Used method described in G.H. Spencer and M.V.R.K. Murty, "General Ray-Tracing Procedure", 1961.


    Parameters
    ----------
    vector         : torch.tensor
                     Incoming ray.
                     Expected size is [2, 3], [1, 2, 3] or [m, 2, 3].
    normvector     : torch.tensor
                     Normal vector.
                     Expected size is [2, 3], [1, 2, 3] or [m, 2, 3]].
    n1             : float
                     Refractive index of the incoming medium.
    n2             : float
                     Refractive index of the outgoing medium.
    error          : float 
                     Desired error.

    Returns
    -------
    output         : torch.tensor
                     Refracted ray.
                     Expected size is [1, 2, 3]
    """
    if len(vector.shape) == 2:
        vector = vector.unsqueeze(0)
    if len(normvector.shape) == 2:
        normvector = normvector.unsqueeze(0)
    mu    = n1 / n2
    div   = normvector[:, 1, 0] ** 2  + normvector[:, 1, 1] ** 2 + normvector[:, 1, 2] ** 2
    a     = mu * (vector[:, 1, 0] * normvector[:, 1, 0] + vector[:, 1, 1] * normvector[:, 1, 1] + vector[:, 1, 2] * normvector[:, 1, 2]) / div
    b     = (mu ** 2 - 1) / div
    to    = - b * 0.5 / a
    num   = 0
    eps   = torch.ones(vector.shape[0], device = vector.device) * error * 2
    while len(eps[eps > error]) > 0:
       num   += 1
       oldto  = to
       v      = to ** 2 + 2 * a * to + b
       deltav = 2 * (to + a)
       to     = to - v / deltav
       eps    = abs(oldto - to)
    output = torch.zeros_like(vector)
    output[:, 0, 0] = normvector[:, 0, 0]
    output[:, 0, 1] = normvector[:, 0, 1]
    output[:, 0, 2] = normvector[:, 0, 2]
    output[:, 1, 0] = mu * vector[:, 1, 0] + to * normvector[:, 1, 0]
    output[:, 1, 1] = mu * vector[:, 1, 1] + to * normvector[:, 1, 1]
    output[:, 1, 2] = mu * vector[:, 1, 2] + to * normvector[:, 1, 2]
    return output

rotate_points(point, angles=torch.tensor([[0, 0, 0]]), mode='XYZ', origin=torch.tensor([[0, 0, 0]]), offset=torch.tensor([[0, 0, 0]]))

Definition to rotate a given point. Note that rotation is always with respect to 0,0,0.

Parameters:

  • point
           A point with size of [3] or [1, 3] or [m, 3].
    
  • angles
           Rotation angles in degrees.
    
  • mode
           Rotation mode determines ordering of the rotations at each axis.
           There are XYZ,YXZ,ZXY and ZYX modes.
    
  • origin
           Reference point for a rotation.
           Expected size is [3] or [1, 3].
    
  • offset
           Shift with the given offset.
           Expected size is [3] or [1, 3] or [m, 3].
    

Returns:

  • result ( tensor ) –

    Result of the rotation [1 x 3] or [m x 3].

  • rotx ( tensor ) –

    Rotation matrix along X axis [3 x 3].

  • roty ( tensor ) –

    Rotation matrix along Y axis [3 x 3].

  • rotz ( tensor ) –

    Rotation matrix along Z axis [3 x 3].

Source code in odak/learn/tools/transformation.py
def rotate_points(
                 point,
                 angles = torch.tensor([[0, 0, 0]]), 
                 mode='XYZ', 
                 origin = torch.tensor([[0, 0, 0]]), 
                 offset = torch.tensor([[0, 0, 0]])
                ):
    """
    Definition to rotate a given point. Note that rotation is always with respect to 0,0,0.

    Parameters
    ----------
    point        : torch.tensor
                   A point with size of [3] or [1, 3] or [m, 3].
    angles       : torch.tensor
                   Rotation angles in degrees. 
    mode         : str
                   Rotation mode determines ordering of the rotations at each axis.
                   There are XYZ,YXZ,ZXY and ZYX modes.
    origin       : torch.tensor
                   Reference point for a rotation.
                   Expected size is [3] or [1, 3].
    offset       : torch.tensor
                   Shift with the given offset.
                   Expected size is [3] or [1, 3] or [m, 3].

    Returns
    ----------
    result       : torch.tensor
                   Result of the rotation [1 x 3] or [m x 3].
    rotx         : torch.tensor
                   Rotation matrix along X axis [3 x 3].
    roty         : torch.tensor
                   Rotation matrix along Y axis [3 x 3].
    rotz         : torch.tensor
                   Rotation matrix along Z axis [3 x 3].
    """
    origin = origin.to(point.device)
    offset = offset.to(point.device)
    if len(point.shape) == 1:
        point = point.unsqueeze(0)
    if len(angles.shape) == 1:
        angles = angles.unsqueeze(0)
    rotx = rotmatx(angles[:, 0])
    roty = rotmaty(angles[:, 1])
    rotz = rotmatz(angles[:, 2])
    new_point = (point - origin).T
    if mode == 'XYZ':
        result = torch.mm(rotz, torch.mm(roty, torch.mm(rotx, new_point))).T
    elif mode == 'XZY':
        result = torch.mm(roty, torch.mm(rotz, torch.mm(rotx, new_point))).T
    elif mode == 'YXZ':
        result = torch.mm(rotz, torch.mm(rotx, torch.mm(roty, new_point))).T
    elif mode == 'ZXY':
        result = torch.mm(roty, torch.mm(rotx, torch.mm(rotz, new_point))).T
    elif mode == 'ZYX':
        result = torch.mm(rotx, torch.mm(roty, torch.mm(rotz, new_point))).T
    result += origin
    result += offset
    return result, rotx, roty, rotz

same_side(p1, p2, a, b)

Definition to figure which side a point is on with respect to a line and a point. See http://www.blackpawn.com/texts/pointinpoly/ for more. If p1 and p2 are on the sameside, this definition returns True.

Parameters:

  • p1
          Point(s) to check.
    
  • p2
          This is the point check against.
    
  • a
          First point that forms the line.
    
  • b
          Second point that forms the line.
    
Source code in odak/learn/tools/vector.py
def same_side(p1, p2, a, b):
    """
    Definition to figure which side a point is on with respect to a line and a point. See http://www.blackpawn.com/texts/pointinpoly/ for more. If p1 and p2 are on the sameside, this definition returns True.

    Parameters
    ----------
    p1          : list
                  Point(s) to check.
    p2          : list
                  This is the point check against.
    a           : list
                  First point that forms the line.
    b           : list
                  Second point that forms the line.
    """
    ba = torch.subtract(b, a)
    p1a = torch.subtract(p1, a)
    p2a = torch.subtract(p2, a)
    cp1 = torch.cross(ba, p1a)
    cp2 = torch.cross(ba, p2a)
    test = torch.dot(cp1, cp2)
    if len(p1.shape) > 1:
        return test >= 0
    if test >= 0:
        return True
    return False

save_torch_tensor(fn, tensor)

Definition to save a torch tensor.

Parameters:

  • fn
           Filename.
    
  • tensor
           Torch tensor to be saved.
    
Source code in odak/learn/tools/file.py
def save_torch_tensor(fn, tensor):
    """
    Definition to save a torch tensor.


    Parameters
    ----------
    fn           : str
                   Filename.
    tensor       : torch.tensor
                   Torch tensor to be saved.
    """ 
    torch.save(tensor, expanduser(fn))

write_PLY(triangles, savefn='output.ply')

Definition to generate a PLY file from given points.

Parameters:

  • triangles
          List of triangles with the size of Mx3x3.
    
  • savefn
          Filename for a PLY file.
    
Source code in odak/tools/asset.py
def write_PLY(triangles, savefn = 'output.ply'):
    """
    Definition to generate a PLY file from given points.

    Parameters
    ----------
    triangles   : ndarray
                  List of triangles with the size of Mx3x3.
    savefn      : string
                  Filename for a PLY file.
    """
    tris = []
    pnts = []
    color = [255, 255, 255]
    for tri_id in range(triangles.shape[0]):
        tris.append(
            (
                [3*tri_id, 3*tri_id+1, 3*tri_id+2],
                color[0],
                color[1],
                color[2]
            )
        )
        for i in range(0, 3):
            pnts.append(
                (
                    float(triangles[tri_id][i][0]),
                    float(triangles[tri_id][i][1]),
                    float(triangles[tri_id][i][2])
                )
            )
    tris = np.asarray(tris, dtype=[
                          ('vertex_indices', 'i4', (3,)), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
    pnts = np.asarray(pnts, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
    # Save mesh.
    el1 = PlyElement.describe(pnts, 'vertex', comments=['Vertex data'])
    el2 = PlyElement.describe(tris, 'face', comments=['Face data'])
    PlyData([el1, el2], text="True").write(savefn)

get_sphere_normal_torch(point, sphere)

Definition to get a normal of a point on a given sphere.

Parameters:

  • point
            Point on sphere in X,Y,Z.
    
  • sphere
            Center defined in X,Y,Z and radius.
    

Returns:

  • normal_vector ( tensor ) –

    Normal vector.

Source code in odak/learn/raytracing/boundary.py
def get_sphere_normal_torch(point, sphere):
    """
    Definition to get a normal of a point on a given sphere.

    Parameters
    ----------
    point         : torch.tensor
                    Point on sphere in X,Y,Z.
    sphere        : torch.tensor
                    Center defined in X,Y,Z and radius.

    Returns
    ----------
    normal_vector : torch.tensor
                    Normal vector.
    """
    if len(point.shape) == 1:
        point = point.reshape((1, 3))
    normal_vector = create_ray_from_two_points(point, sphere[0:3])
    return normal_vector

get_triangle_normal(triangle, triangle_center=None)

Definition to calculate surface normal of a triangle.

Parameters:

  • triangle
              Set of points in X,Y and Z to define a planar surface (3,3). It can also be list of triangles (mx3x3).
    
  • triangle_center (tensor, default: None ) –
              Center point of the given triangle. See odak.learn.raytracing.center_of_triangle for more. In many scenarios you can accelerate things by precomputing triangle centers.
    

Returns:

  • normal ( tensor ) –

    Surface normal at the point of intersection.

Source code in odak/learn/raytracing/boundary.py
def get_triangle_normal(triangle, triangle_center=None):
    """
    Definition to calculate surface normal of a triangle.

    Parameters
    ----------
    triangle        : torch.tensor
                      Set of points in X,Y and Z to define a planar surface (3,3). It can also be list of triangles (mx3x3).
    triangle_center : torch.tensor
                      Center point of the given triangle. See odak.learn.raytracing.center_of_triangle for more. In many scenarios you can accelerate things by precomputing triangle centers.

    Returns
    ----------
    normal          : torch.tensor
                      Surface normal at the point of intersection.
    """
    if len(triangle.shape) == 2:
        triangle = triangle.view((1, 3, 3))
    normal = torch.zeros((triangle.shape[0], 2, 3)).to(triangle.device)
    direction = torch.linalg.cross(
                                   triangle[:, 0] - triangle[:, 1], 
                                   triangle[:, 2] - triangle[:, 1]
                                  )
    if type(triangle_center) == type(None):
        normal[:, 0] = center_of_triangle(triangle)
    else:
        normal[:, 0] = triangle_center
    normal[:, 1] = direction / torch.sum(direction, axis=1)[0]
    if normal.shape[0] == 1:
        normal = normal.view((2, 3))
    return normal

intersect_w_circle(ray, circle)

Definition to find intersection point of a ray with a circle. Returns distance as zero if there isn't an intersection.

Parameters:

  • ray
           A vector/ray.
    
  • circle
           A list that contains (0) Set of points in X,Y and Z to define plane of a circle, (1) circle center, and (2) circle radius.
    

Returns:

  • normal ( Tensor ) –

    Surface normal at the point of intersection.

  • distance ( Tensor ) –

    Distance in between a starting point of a ray and the intersection point with a given triangle.

Source code in odak/learn/raytracing/boundary.py
def intersect_w_circle(ray, circle):
    """
    Definition to find intersection point of a ray with a circle. 
    Returns distance as zero if there isn't an intersection.

    Parameters
    ----------
    ray          : torch.Tensor
                   A vector/ray.
    circle       : list
                   A list that contains (0) Set of points in X,Y and Z to define plane of a circle, (1) circle center, and (2) circle radius.

    Returns
    ----------
    normal       : torch.Tensor
                   Surface normal at the point of intersection.
    distance     : torch.Tensor
                   Distance in between a starting point of a ray and the intersection point with a given triangle.
    """
    normal, distance = intersect_w_surface(ray, circle[0])

    if len(normal.shape) == 2:
        normal = normal.unsqueeze(0)

    distance_to_center = distance_between_two_points(normal[:, 0], circle[1])
    mask = distance_to_center > circle[2]
    distance[mask] = 0

    if len(ray.shape) == 2:
        normal = normal.squeeze(0)

    return normal, distance

intersect_w_sphere(ray, sphere, learning_rate=0.2, number_of_steps=5000, error_threshold=0.01)

Definition to find the intersection between ray(s) and sphere(s).

Parameters:

  • ray
                  Input ray(s).
                  Expected size is [1 x 2 x 3] or [m x 2 x 3].
    
  • sphere
                  Input sphere.
                  Expected size is [1 x 4].
    
  • learning_rate
                  Learning rate used in the optimizer for finding the propagation distances of the rays.
    
  • number_of_steps
                  Number of steps used in the optimizer.
    
  • error_threshold
                  The error threshold that will help deciding intersection or no intersection.
    

Returns:

  • intersecting_ray ( tensor ) –

    Ray(s) that intersecting with the given sphere. Expected size is [n x 2 x 3], where n could be any real number.

  • intersecting_normal ( tensor ) –

    Normal(s) for the ray(s) intersecting with the given sphere Expected size is [n x 2 x 3], where n could be any real number.

Source code in odak/learn/raytracing/boundary.py
def intersect_w_sphere(ray, sphere, learning_rate = 2e-1, number_of_steps = 5000, error_threshold = 1e-2):
    """
    Definition to find the intersection between ray(s) and sphere(s).

    Parameters
    ----------
    ray                 : torch.tensor
                          Input ray(s).
                          Expected size is [1 x 2 x 3] or [m x 2 x 3].
    sphere              : torch.tensor
                          Input sphere.
                          Expected size is [1 x 4].
    learning_rate       : float
                          Learning rate used in the optimizer for finding the propagation distances of the rays.
    number_of_steps     : int
                          Number of steps used in the optimizer.
    error_threshold     : float
                          The error threshold that will help deciding intersection or no intersection.

    Returns
    -------
    intersecting_ray    : torch.tensor
                          Ray(s) that intersecting with the given sphere.
                          Expected size is [n x 2 x 3], where n could be any real number.
    intersecting_normal : torch.tensor
                          Normal(s) for the ray(s) intersecting with the given sphere
                          Expected size is [n x 2 x 3], where n could be any real number.

    """
    if len(ray.shape) == 2:
        ray = ray.unsqueeze(0)
    if len(sphere.shape) == 1:
        sphere = sphere.unsqueeze(0)
    distance = torch.zeros(ray.shape[0], device = ray.device, requires_grad = True)
    loss_l2 = torch.nn.MSELoss(reduction = 'sum')
    optimizer = torch.optim.AdamW([distance], lr = learning_rate)    
    t = tqdm(range(number_of_steps), leave = False, dynamic_ncols = True)
    for step in t:
        optimizer.zero_grad()
        propagated_ray = propagate_ray(ray, distance)
        test = torch.abs((propagated_ray[:, 0, 0] - sphere[:, 0]) ** 2 + (propagated_ray[:, 0, 1] - sphere[:, 1]) ** 2 + (propagated_ray[:, 0, 2] - sphere[:, 2]) ** 2 - sphere[:, 3] ** 2)
        loss = loss_l2(
                       test,
                       torch.zeros_like(test)
                      )
        loss.backward(retain_graph = True)
        optimizer.step()
        t.set_description('Sphere intersection loss: {}'.format(loss.item()))
    check = test < error_threshold
    intersecting_ray = propagate_ray(ray[check == True], distance[check == True])
    intersecting_normal = create_ray_from_two_points(
                                                     sphere[:, 0:3],
                                                     intersecting_ray[:, 0]
                                                    )
    return intersecting_ray, intersecting_normal, distance, check

intersect_w_surface(ray, points)

Definition to find intersection point inbetween a surface and a ray. For more see: http://geomalgorithms.com/a06-_intersect-2.html

Parameters:

  • ray
           A vector/ray.
    
  • points
           Set of points in X,Y and Z to define a planar surface.
    

Returns:

  • normal ( tensor ) –

    Surface normal at the point of intersection.

  • distance ( float ) –

    Distance in between starting point of a ray with it's intersection with a planar surface.

Source code in odak/learn/raytracing/boundary.py
def intersect_w_surface(ray, points):
    """
    Definition to find intersection point inbetween a surface and a ray. For more see: http://geomalgorithms.com/a06-_intersect-2.html

    Parameters
    ----------
    ray          : torch.tensor
                   A vector/ray.
    points       : torch.tensor
                   Set of points in X,Y and Z to define a planar surface.

    Returns
    ----------
    normal       : torch.tensor
                   Surface normal at the point of intersection.
    distance     : float
                   Distance in between starting point of a ray with it's intersection with a planar surface.
    """
    normal = get_triangle_normal(points)
    if len(ray.shape) == 2:
        ray = ray.unsqueeze(0)
    if len(points.shape) == 2:
        points = points.unsqueeze(0)
    if len(normal.shape) == 2:
        normal = normal.unsqueeze(0)
    f = normal[:, 0] - ray[:, 0]
    distance = (torch.mm(normal[:, 1], f.T) / torch.mm(normal[:, 1], ray[:, 1].T)).T
    new_normal = torch.zeros_like(ray)
    new_normal[:, 0] = ray[:, 0] + distance * ray[:, 1]
    new_normal[:, 1] = normal[:, 1]
    new_normal = torch.nan_to_num(
                                  new_normal,
                                  nan = float('nan'),
                                  posinf = float('nan'),
                                  neginf = float('nan')
                                 )
    distance = torch.nan_to_num(
                                distance,
                                nan = float('nan'),
                                posinf = float('nan'),
                                neginf = float('nan')
                               )
    return new_normal, distance

intersect_w_surface_batch(ray, triangle)

Parameters:

  • ray
           A vector/ray (2 x 3). It can also be a list of rays (n x 2 x 3).
    
  • triangle
           Set of points in X,Y and Z to define a planar surface. It can also be a list of triangles (m x 3 x 3).
    

Returns:

  • normal ( tensor ) –

    Surface normal at the point of intersection (m x n x 2 x 3).

  • distance ( tensor ) –

    Distance in between starting point of a ray with it's intersection with a planar surface (m x n).

Source code in odak/learn/raytracing/boundary.py
def intersect_w_surface_batch(ray, triangle):
    """
    Parameters
    ----------
    ray          : torch.tensor
                   A vector/ray (2 x 3). It can also be a list of rays (n x 2 x 3).
    triangle     : torch.tensor
                   Set of points in X,Y and Z to define a planar surface. It can also be a list of triangles (m x 3 x 3).

    Returns
    ----------
    normal       : torch.tensor
                   Surface normal at the point of intersection (m x n x 2 x 3).
    distance     : torch.tensor
                   Distance in between starting point of a ray with it's intersection with a planar surface (m x n).
    """
    normal = get_triangle_normal(triangle)
    if len(ray.shape) == 2:
        ray = ray.unsqueeze(0)
    if len(triangle.shape) == 2:
        triangle = triangle.unsqueeze(0)
    if len(normal.shape) == 2:
        normal = normal.unsqueeze(0)

    f = normal[:, None, 0] - ray[None, :, 0]
    distance = (torch.bmm(normal[:, None, 1], f.permute(0, 2, 1)).squeeze(1) / torch.mm(normal[:, 1], ray[:, 1].T)).T

    new_normal = torch.zeros((triangle.shape[0], )+ray.shape)
    new_normal[:, :, 0] = ray[None, :, 0] + (distance[:, :, None] * ray[:, None, 1]).permute(1, 0, 2)
    new_normal[:, :, 1] = normal[:, None, 1]
    new_normal = torch.nan_to_num(
                                  new_normal,
                                  nan = float('nan'),
                                  posinf = float('nan'),
                                  neginf = float('nan')
                                 )
    distance = torch.nan_to_num(
                                distance,
                                nan = float('nan'),
                                posinf = float('nan'),
                                neginf = float('nan')
                               )
    return new_normal, distance.T

intersect_w_triangle(ray, triangle)

Definition to find intersection point of a ray with a triangle.

Parameters:

  • ray
                  A ray [1 x 2 x 3] or a batch of ray [m x 2 x 3].
    
  • triangle
                  Set of points in X,Y and Z to define a single triangle [1 x 3 x 3].
    

Returns:

  • normal ( tensor ) –

    Surface normal at the point of intersection with the surface of triangle. This could also involve surface normals that are not on the triangle. Expected size is [1 x 2 x 3] or [m x 2 x 3] depending on the input.

  • distance ( float ) –

    Distance in between a starting point of a ray and the intersection point with a given triangle. Expected size is [1 x 1] or [m x 1] depending on the input.

  • intersecting_ray ( tensor ) –

    Rays that intersect with the triangle plane and on the triangle. Expected size is [1 x 2 x 3] or [m x 2 x 3] depending on the input.

  • intersecting_normal ( tensor ) –

    Normals that intersect with the triangle plane and on the triangle. Expected size is [1 x 2 x 3] or [m x 2 x 3] depending on the input.

  • check ( tensor ) –

    A list that provides a bool as True or False for each ray used as input. A test to see is a ray could be on the given triangle. Expected size is [1] or [m].

Source code in odak/learn/raytracing/boundary.py
def intersect_w_triangle(ray, triangle):
    """
    Definition to find intersection point of a ray with a triangle. 

    Parameters
    ----------
    ray                 : torch.tensor
                          A ray [1 x 2 x 3] or a batch of ray [m x 2 x 3].
    triangle            : torch.tensor
                          Set of points in X,Y and Z to define a single triangle [1 x 3 x 3].

    Returns
    ----------
    normal              : torch.tensor
                          Surface normal at the point of intersection with the surface of triangle.
                          This could also involve surface normals that are not on the triangle.
                          Expected size is [1 x 2 x 3] or [m x 2 x 3] depending on the input.
    distance            : float
                          Distance in between a starting point of a ray and the intersection point with a given triangle.
                          Expected size is [1 x 1] or [m x 1] depending on the input.
    intersecting_ray    : torch.tensor
                          Rays that intersect with the triangle plane and on the triangle.
                          Expected size is [1 x 2 x 3] or [m x 2 x 3] depending on the input.
    intersecting_normal : torch.tensor
                          Normals that intersect with the triangle plane and on the triangle.
                          Expected size is [1 x 2 x 3] or [m x 2 x 3] depending on the input.
    check               : torch.tensor
                          A list that provides a bool as True or False for each ray used as input.
                          A test to see is a ray could be on the given triangle.
                          Expected size is [1] or [m].
    """
    if len(triangle.shape) == 2:
       triangle = triangle.unsqueeze(0)
    if len(ray.shape) == 2:
       ray = ray.unsqueeze(0)
    normal, distance = intersect_w_surface(ray, triangle)
    check = is_it_on_triangle(normal[:, 0], triangle)
    intersecting_ray = ray.unsqueeze(0)
    intersecting_ray = intersecting_ray.repeat(triangle.shape[0], 1, 1, 1)
    intersecting_ray = intersecting_ray[check == True]
    intersecting_normal = normal.unsqueeze(0)
    intersecting_normal = intersecting_normal.repeat(triangle.shape[0], 1, 1, 1)
    intersecting_normal = intersecting_normal[check ==  True]
    return normal, distance, intersecting_ray, intersecting_normal, check

intersect_w_triangle_batch(ray, triangle)

Definition to find intersection points of rays with triangles. Returns False for each variable if the rays doesn't intersect with given triangles.

Parameters:

  • ray
           vectors/rays (n x 2 x 3).
    
  • triangle
           Set of points in X,Y and Z to define triangles (m x 3 x 3).
    

Returns:

  • normal ( tensor ) –

    Surface normal at the point of intersection (m x n x 2 x 3).

  • distance ( List ) –

    Distance in between starting point of a ray with it's intersection with a planar surface (m x n).

  • intersect_ray ( List ) –

    List of intersecting rays (k x 2 x 3) where k <= n.

  • intersect_normal ( List ) –

    List of intersecting normals (k x 2 x 3) where k <= n*m.

  • check ( tensor ) –

    Boolean tensor (m x n) indicating whether each ray intersects with a triangle or not.

Source code in odak/learn/raytracing/boundary.py
def intersect_w_triangle_batch(ray, triangle):
    """
    Definition to find intersection points of rays with triangles. Returns False for each variable if the rays doesn't intersect with given triangles.

    Parameters
    ----------
    ray          : torch.tensor
                   vectors/rays (n x 2 x 3).
    triangle     : torch.tensor
                   Set of points in X,Y and Z to define triangles (m x 3 x 3).

    Returns
    ----------
    normal          : torch.tensor
                      Surface normal at the point of intersection (m x n x 2 x 3).
    distance        : List
                      Distance in between starting point of a ray with it's intersection with a planar surface (m x n).
    intersect_ray   : List
                      List of intersecting rays (k x 2 x 3) where k <= n.
    intersect_normal: List
                      List of intersecting normals (k x 2 x 3) where k <= n*m.
    check           : torch.tensor
                      Boolean tensor (m x n) indicating whether each ray intersects with a triangle or not.
    """
    if len(triangle.shape) == 2:
       triangle = triangle.unsqueeze(0)
    if len(ray.shape) == 2:
       ray = ray.unsqueeze(0)

    normal, distance = intersect_w_surface_batch(ray, triangle)

    check = is_it_on_triangle_batch(normal[:, :, 0], triangle)

    flat_check = check.flatten()
    flat_normal = normal.view(-1, normal.size(-2), normal.size(-1))
    flat_ray = ray.repeat(normal.size(0), 1, 1)
    flat_distance = distance.flatten()

    filtered_normal = torch.masked_select(flat_normal, flat_check.unsqueeze(-1).unsqueeze(-1).repeat(1, 2, 3))
    filtered_ray = torch.masked_select(flat_ray, flat_check.unsqueeze(-1).unsqueeze(-1).repeat(1, 2, 3))
    filtered_distnace = torch.masked_select(flat_distance, flat_check)

    check_count = check.sum(dim=1).tolist()
    split_size_ray_and_normal = [count * 2 * 3 for count in check_count]
    split_size_distance = [count for count in check_count]

    normal_grouped = torch.split(filtered_normal, split_size_ray_and_normal)
    ray_grouped = torch.split(filtered_ray, split_size_ray_and_normal)
    distance_grouped = torch.split(filtered_distnace, split_size_distance)

    intersecting_normal = [g.view(-1, 2, 3) for g in normal_grouped if g.numel() > 0]
    intersecting_ray = [g.view(-1, 2, 3) for g in ray_grouped if g.numel() > 0]
    new_distance = [g for g in distance_grouped if g.numel() > 0]

    return normal, new_distance, intersecting_ray, intersecting_normal, check

reflect(input_ray, normal)

Definition to reflect an incoming ray from a surface defined by a surface normal. Used method described in G.H. Spencer and M.V.R.K. Murty, "General Ray-Tracing Procedure", 1961.

Parameters:

  • input_ray
           A ray or rays.
           Expected size is [2 x 3], [1 x 2 x 3] or [m x 2 x 3].
    
  • normal
           A surface normal(s).
           Expected size is [2 x 3], [1 x 2 x 3] or [m x 2 x 3].
    

Returns:

  • output_ray ( tensor ) –

    Array that contains starting points and cosines of a reflected ray. Expected size is [1 x 2 x 3] or [m x 2 x 3].

Source code in odak/learn/raytracing/boundary.py
def reflect(input_ray, normal):
    """ 
    Definition to reflect an incoming ray from a surface defined by a surface normal. 
    Used method described in G.H. Spencer and M.V.R.K. Murty, "General Ray-Tracing Procedure", 1961.


    Parameters
    ----------
    input_ray    : torch.tensor
                   A ray or rays.
                   Expected size is [2 x 3], [1 x 2 x 3] or [m x 2 x 3].
    normal       : torch.tensor
                   A surface normal(s).
                   Expected size is [2 x 3], [1 x 2 x 3] or [m x 2 x 3].

    Returns
    ----------
    output_ray   : torch.tensor
                   Array that contains starting points and cosines of a reflected ray.
                   Expected size is [1 x 2 x 3] or [m x 2 x 3].
    """
    if len(input_ray.shape) == 2:
        input_ray = input_ray.unsqueeze(0)
    if len(normal.shape) == 2:
        normal = normal.unsqueeze(0)
    mu = 1
    div = normal[:, 1, 0]**2 + normal[:, 1, 1]**2 + normal[:, 1, 2]**2 + 1e-8
    a = mu * (input_ray[:, 1, 0] * normal[:, 1, 0] + input_ray[:, 1, 1] * normal[:, 1, 1] + input_ray[:, 1, 2] * normal[:, 1, 2]) / div
    a = a.unsqueeze(1)
    n = int(torch.amax(torch.tensor([normal.shape[0], input_ray.shape[0]])))
    output_ray = torch.zeros((n, 2, 3)).to(input_ray.device)
    output_ray[:, 0] = normal[:, 0]
    output_ray[:, 1] = input_ray[:, 1] - 2 * a * normal[:, 1]
    return output_ray

refract(vector, normvector, n1, n2, error=0.01)

Definition to refract an incoming ray. Used method described in G.H. Spencer and M.V.R.K. Murty, "General Ray-Tracing Procedure", 1961.

Parameters:

  • vector
             Incoming ray.
             Expected size is [2, 3], [1, 2, 3] or [m, 2, 3].
    
  • normvector
             Normal vector.
             Expected size is [2, 3], [1, 2, 3] or [m, 2, 3]].
    
  • n1
             Refractive index of the incoming medium.
    
  • n2
             Refractive index of the outgoing medium.
    
  • error
             Desired error.
    

Returns:

  • output ( tensor ) –

    Refracted ray. Expected size is [1, 2, 3]

Source code in odak/learn/raytracing/boundary.py
def refract(vector, normvector, n1, n2, error = 0.01):
    """
    Definition to refract an incoming ray.
    Used method described in G.H. Spencer and M.V.R.K. Murty, "General Ray-Tracing Procedure", 1961.


    Parameters
    ----------
    vector         : torch.tensor
                     Incoming ray.
                     Expected size is [2, 3], [1, 2, 3] or [m, 2, 3].
    normvector     : torch.tensor
                     Normal vector.
                     Expected size is [2, 3], [1, 2, 3] or [m, 2, 3]].
    n1             : float
                     Refractive index of the incoming medium.
    n2             : float
                     Refractive index of the outgoing medium.
    error          : float 
                     Desired error.

    Returns
    -------
    output         : torch.tensor
                     Refracted ray.
                     Expected size is [1, 2, 3]
    """
    if len(vector.shape) == 2:
        vector = vector.unsqueeze(0)
    if len(normvector.shape) == 2:
        normvector = normvector.unsqueeze(0)
    mu    = n1 / n2
    div   = normvector[:, 1, 0] ** 2  + normvector[:, 1, 1] ** 2 + normvector[:, 1, 2] ** 2
    a     = mu * (vector[:, 1, 0] * normvector[:, 1, 0] + vector[:, 1, 1] * normvector[:, 1, 1] + vector[:, 1, 2] * normvector[:, 1, 2]) / div
    b     = (mu ** 2 - 1) / div
    to    = - b * 0.5 / a
    num   = 0
    eps   = torch.ones(vector.shape[0], device = vector.device) * error * 2
    while len(eps[eps > error]) > 0:
       num   += 1
       oldto  = to
       v      = to ** 2 + 2 * a * to + b
       deltav = 2 * (to + a)
       to     = to - v / deltav
       eps    = abs(oldto - to)
    output = torch.zeros_like(vector)
    output[:, 0, 0] = normvector[:, 0, 0]
    output[:, 0, 1] = normvector[:, 0, 1]
    output[:, 0, 2] = normvector[:, 0, 2]
    output[:, 1, 0] = mu * vector[:, 1, 0] + to * normvector[:, 1, 0]
    output[:, 1, 1] = mu * vector[:, 1, 1] + to * normvector[:, 1, 1]
    output[:, 1, 2] = mu * vector[:, 1, 2] + to * normvector[:, 1, 2]
    return output

A class to represent a detector.

Source code in odak/learn/raytracing/detector.py
class detector():
    """
    A class to represent a detector.
    """


    def __init__(
                 self,
                 colors = 3,
                 center = torch.tensor([0., 0., 0.]),
                 tilt = torch.tensor([0., 0., 0.]),
                 size = torch.tensor([10., 10.]),
                 resolution = torch.tensor([100, 100]),
                 device = torch.device('cpu')
                ):
        """
        Parameters
        ----------
        colors         : int
                         Number of color channels to register (e.g., RGB).
        center         : torch.tensor
                         Center point of the detector [3].
        tilt           : torch.tensor
                         Tilt angles of the surface in degrees [3].
        size           : torch.tensor
                         Size of the detector [2].
        resolution     : torch.tensor
                         Resolution of the detector.
        device         : torch.device
                         Device for computation (e.g., cuda, cpu).
        """
        self.device = device
        self.colors = colors
        self.resolution = resolution.to(self.device)
        self.surface_center = center.to(self.device)
        self.surface_tilt = tilt.to(self.device)
        self.size = size.to(self.device)
        self.pixel_size = torch.tensor([
                                        self.size[0] / self.resolution[0],
                                        self.size[1] / self.resolution[1]
                                       ], device  = self.device)
        self.pixel_diagonal_size = torch.sqrt(self.pixel_size[0] ** 2 + self.pixel_size[1] ** 2)
        self.pixel_diagonal_half_size = self.pixel_diagonal_size / 2.
        self.threshold = torch.nn.Threshold(self.pixel_diagonal_size, 1)
        self.plane = define_plane(
                                  point = self.surface_center,
                                  angles = self.surface_tilt
                                 )
        self.pixel_locations, _, _, _ = grid_sample(
                                                    size = self.size.tolist(),
                                                    no = self.resolution.tolist(),
                                                    center = self.surface_center.tolist(),
                                                    angles = self.surface_tilt.tolist()
                                                   )
        self.pixel_locations = self.pixel_locations.to(self.device)
        self.relu = torch.nn.ReLU()
        self.clear()


    def intersect(self, rays, color = 0):
        """
        Function to intersect rays with the detector


        Parameters
        ----------
        rays            : torch.tensor
                          Rays to be intersected with a detector.
                          Expected size is [1 x 2 x 3] or [m x 2 x 3].
        color           : int
                          Color channel to register.

        Returns
        -------
        points          : torch.tensor
                          Intersection points with the image detector [k x 3].
        """
        normals, _ = intersect_w_surface(rays, self.plane)
        points = normals[:, 0]
        distances_xyz = torch.abs(points.unsqueeze(1) - self.pixel_locations.unsqueeze(0))
        distances_x = 1e6 * self.relu( - (distances_xyz[:, :, 0] - self.pixel_size[0]))
        distances_y = 1e6 * self.relu( - (distances_xyz[:, :, 1] - self.pixel_size[1]))
        hit_x = torch.clamp(distances_x, min = 0., max = 1.)
        hit_y = torch.clamp(distances_y, min = 0., max = 1.)
        hit = hit_x * hit_y
        image = torch.sum(hit, dim = 0)
        self.image[color] += image.reshape(
                                           self.image.shape[-2], 
                                           self.image.shape[-1]
                                          )
        distances = torch.sum((points.unsqueeze(1) - self.pixel_locations.unsqueeze(0)) ** 2, dim = 2)
        distance_image = distances
#        distance_image = distances.reshape(
#                                           -1,
#                                           self.image.shape[-2],
#                                           self.image.shape[-1]
#                                          )
        return points, image, distance_image


    def get_image(self):
        """
        Function to return the detector image.

        Returns
        -------
        image           : torch.tensor
                          Detector image.
        """
        image = (self.image - self.image.min()) / (self.image.max() - self.image.min())
        return image


    def clear(self):
        """
        Internal function to clear a detector.
        """
        self.image = torch.zeros(

                                 self.colors,
                                 self.resolution[0],
                                 self.resolution[1],
                                 device = self.device,
                                )

__init__(colors=3, center=torch.tensor([0.0, 0.0, 0.0]), tilt=torch.tensor([0.0, 0.0, 0.0]), size=torch.tensor([10.0, 10.0]), resolution=torch.tensor([100, 100]), device=torch.device('cpu'))

Parameters:

  • colors
             Number of color channels to register (e.g., RGB).
    
  • center
             Center point of the detector [3].
    
  • tilt
             Tilt angles of the surface in degrees [3].
    
  • size
             Size of the detector [2].
    
  • resolution
             Resolution of the detector.
    
  • device
             Device for computation (e.g., cuda, cpu).
    
Source code in odak/learn/raytracing/detector.py
def __init__(
             self,
             colors = 3,
             center = torch.tensor([0., 0., 0.]),
             tilt = torch.tensor([0., 0., 0.]),
             size = torch.tensor([10., 10.]),
             resolution = torch.tensor([100, 100]),
             device = torch.device('cpu')
            ):
    """
    Parameters
    ----------
    colors         : int
                     Number of color channels to register (e.g., RGB).
    center         : torch.tensor
                     Center point of the detector [3].
    tilt           : torch.tensor
                     Tilt angles of the surface in degrees [3].
    size           : torch.tensor
                     Size of the detector [2].
    resolution     : torch.tensor
                     Resolution of the detector.
    device         : torch.device
                     Device for computation (e.g., cuda, cpu).
    """
    self.device = device
    self.colors = colors
    self.resolution = resolution.to(self.device)
    self.surface_center = center.to(self.device)
    self.surface_tilt = tilt.to(self.device)
    self.size = size.to(self.device)
    self.pixel_size = torch.tensor([
                                    self.size[0] / self.resolution[0],
                                    self.size[1] / self.resolution[1]
                                   ], device  = self.device)
    self.pixel_diagonal_size = torch.sqrt(self.pixel_size[0] ** 2 + self.pixel_size[1] ** 2)
    self.pixel_diagonal_half_size = self.pixel_diagonal_size / 2.
    self.threshold = torch.nn.Threshold(self.pixel_diagonal_size, 1)
    self.plane = define_plane(
                              point = self.surface_center,
                              angles = self.surface_tilt
                             )
    self.pixel_locations, _, _, _ = grid_sample(
                                                size = self.size.tolist(),
                                                no = self.resolution.tolist(),
                                                center = self.surface_center.tolist(),
                                                angles = self.surface_tilt.tolist()
                                               )
    self.pixel_locations = self.pixel_locations.to(self.device)
    self.relu = torch.nn.ReLU()
    self.clear()

clear()

Internal function to clear a detector.

Source code in odak/learn/raytracing/detector.py
def clear(self):
    """
    Internal function to clear a detector.
    """
    self.image = torch.zeros(

                             self.colors,
                             self.resolution[0],
                             self.resolution[1],
                             device = self.device,
                            )

get_image()

Function to return the detector image.

Returns:

  • image ( tensor ) –

    Detector image.

Source code in odak/learn/raytracing/detector.py
def get_image(self):
    """
    Function to return the detector image.

    Returns
    -------
    image           : torch.tensor
                      Detector image.
    """
    image = (self.image - self.image.min()) / (self.image.max() - self.image.min())
    return image

intersect(rays, color=0)

Function to intersect rays with the detector

Parameters:

  • rays
              Rays to be intersected with a detector.
              Expected size is [1 x 2 x 3] or [m x 2 x 3].
    
  • color
              Color channel to register.
    

Returns:

  • points ( tensor ) –

    Intersection points with the image detector [k x 3].

Source code in odak/learn/raytracing/detector.py
    def intersect(self, rays, color = 0):
        """
        Function to intersect rays with the detector


        Parameters
        ----------
        rays            : torch.tensor
                          Rays to be intersected with a detector.
                          Expected size is [1 x 2 x 3] or [m x 2 x 3].
        color           : int
                          Color channel to register.

        Returns
        -------
        points          : torch.tensor
                          Intersection points with the image detector [k x 3].
        """
        normals, _ = intersect_w_surface(rays, self.plane)
        points = normals[:, 0]
        distances_xyz = torch.abs(points.unsqueeze(1) - self.pixel_locations.unsqueeze(0))
        distances_x = 1e6 * self.relu( - (distances_xyz[:, :, 0] - self.pixel_size[0]))
        distances_y = 1e6 * self.relu( - (distances_xyz[:, :, 1] - self.pixel_size[1]))
        hit_x = torch.clamp(distances_x, min = 0., max = 1.)
        hit_y = torch.clamp(distances_y, min = 0., max = 1.)
        hit = hit_x * hit_y
        image = torch.sum(hit, dim = 0)
        self.image[color] += image.reshape(
                                           self.image.shape[-2], 
                                           self.image.shape[-1]
                                          )
        distances = torch.sum((points.unsqueeze(1) - self.pixel_locations.unsqueeze(0)) ** 2, dim = 2)
        distance_image = distances
#        distance_image = distances.reshape(
#                                           -1,
#                                           self.image.shape[-2],
#                                           self.image.shape[-1]
#                                          )
        return points, image, distance_image

planar_mesh

Source code in odak/learn/raytracing/mesh.py
class planar_mesh():


    def __init__(
                 self,
                 size = [1., 1.],
                 number_of_meshes = [10, 10],
                 angles = torch.tensor([0., 0., 0.]),
                 offset = torch.tensor([0., 0., 0.]),
                 device = torch.device('cpu'),
                 heights = None
                ):
        """
        Definition to generate a plane with meshes.


        Parameters
        -----------
        number_of_meshes  : torch.tensor
                            Number of squares over plane.
                            There are two triangles at each square.
        size              : torch.tensor
                            Size of the plane.
        angles            : torch.tensor
                            Rotation angles in degrees.
        offset            : torch.tensor
                            Offset along XYZ axes.
                            Expected dimension is [1 x 3] or offset for each triangle [m x 3].
                            m here refers to `2 * number_of_meshes[0]` times  `number_of_meshes[1]`.
        device            : torch.device
                            Computational resource to be used (e.g., cpu, cuda).
        heights           : torch.tensor
                            Load surface heights from a tensor.
        """
        self.device = device
        self.angles = angles.to(self.device)
        self.offset = offset.to(self.device)
        self.size = size.to(self.device)
        self.number_of_meshes = number_of_meshes.to(self.device)
        self.init_heights(heights)


    def init_heights(self, heights = None):
        """
        Internal function to initialize a height map.
        Note that self.heights is a differentiable variable, and can be optimized or learned.
        See unit test `test/test_learn_ray_detector.py` or `test/test_learn_ray_mesh.py` as examples.
        """
        if not isinstance(heights, type(None)):
            self.heights = heights.to(self.device)
            self.heights.requires_grad = True
        else:
            self.heights = torch.zeros(
                                       (self.number_of_meshes[0], self.number_of_meshes[1], 1),
                                       requires_grad = True,
                                       device = self.device,
                                      )
        x = torch.linspace(-self.size[0] / 2., self.size[0] / 2., self.number_of_meshes[0], device = self.device) 
        y = torch.linspace(-self.size[1] / 2., self.size[1] / 2., self.number_of_meshes[1], device = self.device)
        X, Y = torch.meshgrid(x, y, indexing = 'ij')
        self.X = X.unsqueeze(-1)
        self.Y = Y.unsqueeze(-1)


    def save_heights(self, filename = 'heights.pt'):
        """
        Function to save heights to a file.

        Parameters
        ----------
        filename          : str
                            Filename.
        """
        save_torch_tensor(filename, self.heights.detach().clone())


    def save_heights_as_PLY(self, filename = 'mesh.ply'):
        """
        Function to save mesh to a PLY file.

        Parameters
        ----------
        filename          : str
                            Filename.
        """
        triangles = self.get_triangles()
        write_PLY(triangles, filename)


    def get_squares(self):
        """
        Internal function to initiate squares over a plane.

        Returns
        -------
        squares     : torch.tensor
                      Squares over a plane.
                      Expected size is [m x n x 3].
        """
        squares = torch.cat((
                             self.X,
                             self.Y,
                             self.heights
                            ), dim = -1)
        return squares


    def get_triangles(self):
        """
        Internal function to get triangles.
        """ 
        squares = self.get_squares()
        triangles = torch.zeros(2, self.number_of_meshes[0], self.number_of_meshes[1], 3, 3, device = self.device)
        for i in range(0, self.number_of_meshes[0] - 1):
            for j in range(0, self.number_of_meshes[1] - 1):
                first_triangle = torch.cat((
                                            squares[i + 1, j].unsqueeze(0),
                                            squares[i + 1, j + 1].unsqueeze(0),
                                            squares[i, j + 1].unsqueeze(0),
                                           ), dim = 0)
                second_triangle = torch.cat((
                                             squares[i + 1, j].unsqueeze(0),
                                             squares[i, j + 1].unsqueeze(0),
                                             squares[i, j].unsqueeze(0),
                                            ), dim = 0)
                triangles[0, i, j], _, _, _ = rotate_points(first_triangle, angles = self.angles)
                triangles[1, i, j], _, _, _ = rotate_points(second_triangle, angles = self.angles)
        triangles = triangles.view(-1, 3, 3) + self.offset
        return triangles 


    def mirror(self, rays):
        """
        Function to bounce light rays off the meshes.

        Parameters
        ----------
        rays              : torch.tensor
                            Rays to be bounced.
                            Expected size is [2 x 3], [1 x 2 x 3] or [m x 2 x 3].

        Returns
        -------
        reflected_rays    : torch.tensor
                            Reflected rays.
                            Expected size is [2 x 3], [1 x 2 x 3] or [m x 2 x 3].
        reflected_normals : torch.tensor
                            Reflected normals.
                            Expected size is [2 x 3], [1 x 2 x 3] or [m x 2 x 3].

        """
        if len(rays.shape) == 2:
            rays = rays.unsqueeze(0)
        triangles = self.get_triangles()
        reflected_rays = torch.empty((0, 2, 3), requires_grad = True, device = self.device)
        reflected_normals = torch.empty((0, 2, 3), requires_grad = True, device = self.device)
        for triangle in triangles:
            _, _, intersecting_rays, intersecting_normals, check = intersect_w_triangle(
                                                                                        rays,
                                                                                        triangle
                                                                                       ) 
            triangle_reflected_rays = reflect(intersecting_rays, intersecting_normals)
            if triangle_reflected_rays.shape[0] > 0:
                reflected_rays = torch.cat((
                                            reflected_rays,
                                            triangle_reflected_rays
                                          ))
                reflected_normals = torch.cat((
                                               reflected_normals,
                                               intersecting_normals
                                              ))
        return reflected_rays, reflected_normals

__init__(size=[1.0, 1.0], number_of_meshes=[10, 10], angles=torch.tensor([0.0, 0.0, 0.0]), offset=torch.tensor([0.0, 0.0, 0.0]), device=torch.device('cpu'), heights=None)

Definition to generate a plane with meshes.

Parameters:

  • number_of_meshes
                Number of squares over plane.
                There are two triangles at each square.
    
  • size
                Size of the plane.
    
  • angles
                Rotation angles in degrees.
    
  • offset
                Offset along XYZ axes.
                Expected dimension is [1 x 3] or offset for each triangle [m x 3].
                m here refers to `2 * number_of_meshes[0]` times  `number_of_meshes[1]`.
    
  • device
                Computational resource to be used (e.g., cpu, cuda).
    
  • heights
                Load surface heights from a tensor.
    
Source code in odak/learn/raytracing/mesh.py
def __init__(
             self,
             size = [1., 1.],
             number_of_meshes = [10, 10],
             angles = torch.tensor([0., 0., 0.]),
             offset = torch.tensor([0., 0., 0.]),
             device = torch.device('cpu'),
             heights = None
            ):
    """
    Definition to generate a plane with meshes.


    Parameters
    -----------
    number_of_meshes  : torch.tensor
                        Number of squares over plane.
                        There are two triangles at each square.
    size              : torch.tensor
                        Size of the plane.
    angles            : torch.tensor
                        Rotation angles in degrees.
    offset            : torch.tensor
                        Offset along XYZ axes.
                        Expected dimension is [1 x 3] or offset for each triangle [m x 3].
                        m here refers to `2 * number_of_meshes[0]` times  `number_of_meshes[1]`.
    device            : torch.device
                        Computational resource to be used (e.g., cpu, cuda).
    heights           : torch.tensor
                        Load surface heights from a tensor.
    """
    self.device = device
    self.angles = angles.to(self.device)
    self.offset = offset.to(self.device)
    self.size = size.to(self.device)
    self.number_of_meshes = number_of_meshes.to(self.device)
    self.init_heights(heights)

get_squares()

Internal function to initiate squares over a plane.

Returns:

  • squares ( tensor ) –

    Squares over a plane. Expected size is [m x n x 3].

Source code in odak/learn/raytracing/mesh.py
def get_squares(self):
    """
    Internal function to initiate squares over a plane.

    Returns
    -------
    squares     : torch.tensor
                  Squares over a plane.
                  Expected size is [m x n x 3].
    """
    squares = torch.cat((
                         self.X,
                         self.Y,
                         self.heights
                        ), dim = -1)
    return squares

get_triangles()

Internal function to get triangles.

Source code in odak/learn/raytracing/mesh.py
def get_triangles(self):
    """
    Internal function to get triangles.
    """ 
    squares = self.get_squares()
    triangles = torch.zeros(2, self.number_of_meshes[0], self.number_of_meshes[1], 3, 3, device = self.device)
    for i in range(0, self.number_of_meshes[0] - 1):
        for j in range(0, self.number_of_meshes[1] - 1):
            first_triangle = torch.cat((
                                        squares[i + 1, j].unsqueeze(0),
                                        squares[i + 1, j + 1].unsqueeze(0),
                                        squares[i, j + 1].unsqueeze(0),
                                       ), dim = 0)
            second_triangle = torch.cat((
                                         squares[i + 1, j].unsqueeze(0),
                                         squares[i, j + 1].unsqueeze(0),
                                         squares[i, j].unsqueeze(0),
                                        ), dim = 0)
            triangles[0, i, j], _, _, _ = rotate_points(first_triangle, angles = self.angles)
            triangles[1, i, j], _, _, _ = rotate_points(second_triangle, angles = self.angles)
    triangles = triangles.view(-1, 3, 3) + self.offset
    return triangles 

init_heights(heights=None)

Internal function to initialize a height map. Note that self.heights is a differentiable variable, and can be optimized or learned. See unit test test/test_learn_ray_detector.py or test/test_learn_ray_mesh.py as examples.

Source code in odak/learn/raytracing/mesh.py
def init_heights(self, heights = None):
    """
    Internal function to initialize a height map.
    Note that self.heights is a differentiable variable, and can be optimized or learned.
    See unit test `test/test_learn_ray_detector.py` or `test/test_learn_ray_mesh.py` as examples.
    """
    if not isinstance(heights, type(None)):
        self.heights = heights.to(self.device)
        self.heights.requires_grad = True
    else:
        self.heights = torch.zeros(
                                   (self.number_of_meshes[0], self.number_of_meshes[1], 1),
                                   requires_grad = True,
                                   device = self.device,
                                  )
    x = torch.linspace(-self.size[0] / 2., self.size[0] / 2., self.number_of_meshes[0], device = self.device) 
    y = torch.linspace(-self.size[1] / 2., self.size[1] / 2., self.number_of_meshes[1], device = self.device)
    X, Y = torch.meshgrid(x, y, indexing = 'ij')
    self.X = X.unsqueeze(-1)
    self.Y = Y.unsqueeze(-1)

mirror(rays)

Function to bounce light rays off the meshes.

Parameters:

  • rays
                Rays to be bounced.
                Expected size is [2 x 3], [1 x 2 x 3] or [m x 2 x 3].
    

Returns:

  • reflected_rays ( tensor ) –

    Reflected rays. Expected size is [2 x 3], [1 x 2 x 3] or [m x 2 x 3].

  • reflected_normals ( tensor ) –

    Reflected normals. Expected size is [2 x 3], [1 x 2 x 3] or [m x 2 x 3].

Source code in odak/learn/raytracing/mesh.py