I'm working on a ray tracing project in Python and have encountered a performance bottleneck. I believe implementing frustum culling in my Camera class could significantly improve rendering times.
Camera:
class Camera():
def __init__(self, look_from, look_at, screen_width = 400 ,screen_height = 300, field_of_view = 90., aperture = 0., focal_distance = 1.):
self.screen_width = screen_width
self.screen_height = screen_height
self.aspect_ratio = float(screen_width) / screen_height
self.look_from = look_from
self.look_at = look_at
self.camera_width = np.tan(field_of_view * np.pi/180 /2.)*2.
self.camera_height = self.camera_width/self.aspect_ratio
self.cameraFwd = (look_at - look_from).normalize()
self.cameraRight = (self.cameraFwd.cross(vec3(0.,1.,0.))).normalize()
self.cameraUp = self.cameraRight.cross(self.cameraFwd)
self.lens_radius = aperture / 2.
self.focal_distance = focal_distance
self.x = np.linspace(-self.camera_width/2., self.camera_width/2., self.screen_width)
self.y = np.linspace(self.camera_height/2., -self.camera_height/2., self.screen_height)
xx,yy = np.meshgrid(self.x,self.y)
self.x = xx.flatten()
self.y = yy.flatten()
def get_ray(self,n):
x = self.x + (np.random.rand(len(self.x )) - 0.5)*self.camera_width /(self.screen_width)
y = self.y + (np.random.rand(len(self.y )) - 0.5)*self.camera_height /(self.screen_height)
r = np.sqrt(np.random.rand(x.shape[0]))
phi = np.random.rand(x.shape[0])*2*np.pi
ray_origin = self.look_from + self.cameraRight *r * np.cos(phi)* self.lens_radius + self.cameraUp *r * np.sin(phi)* self.lens_radius
return Ray(origin=ray_origin, dir=(self.look_from + self.cameraUp*y*self.focal_distance + self.cameraRight*x*self.focal_distance + self.cameraFwd*self.focal_distance - ray_origin ).normalize(), depth=0, n=n, reflections = 0, transmissions = 0, diffuse_reflections = 0)
You just need to modify the Camera
class as following:
class Camera():
def __init__(self, look_from, look_at, screen_width = 400 ,screen_height = 300, field_of_view = 90., aperture = 0., focal_distance = 1.):
self.screen_width = screen_width
self.screen_height = screen_height
self.aspect_ratio = float(screen_width) / screen_height
self.look_from = look_from
self.look_at = look_at
self.camera_width = np.tan(field_of_view * np.pi / 180 / 2.) * 2.
self.camera_height = self.camera_width / self.aspect_ratio
self.cameraFwd = (look_at - look_from).normalize()
self.cameraRight = (self.cameraFwd.cross(vec3(0., 1., 0.))).normalize()
self.cameraUp = self.cameraRight.cross(self.cameraFwd)
self.lens_radius = aperture / 2.
self.focal_distance = focal_distance
self.near = .1
self.far = 100.
self.x = np.linspace(-self.camera_width / 2., self.camera_width / 2., self.screen_width)
self.y = np.linspace(self.camera_height / 2., -self.camera_height / 2., self.screen_height)
xx, yy = np.meshgrid(self.x, self.y)
self.x = xx.flatten()
self.y = yy.flatten()
def get_ray(self,n):
x = self.x + (np.random.rand(len(self.x)) - 0.5) * self.camera_width / (self.screen_width)
y = self.y + (np.random.rand(len(self.y)) - 0.5) * self.camera_height / (self.screen_height)
ray_origin = self.look_from + self.cameraRight * x * self.near + self.cameraUp * y * self.near
return Ray(origin=ray_origin, dir=(self.look_from + self.cameraUp * y * self.focal_distance +self.cameraRight * x * self.focal_distance +self.cameraFwd * self.focal_distance - ray_origin).normalize(), depth=0, n=n, reflections=0, transmissions=0, diffuse_reflections=0)