From 6a4f173604ac4871444f8b9eb9bf90b0f56b65dc Mon Sep 17 00:00:00 2001 From: Michael Zhang Date: Sun, 30 Apr 2023 16:01:21 -0500 Subject: [PATCH] exam 2 progress --- exam-2/exam2.md | 130 +++++++++++++++++++++++++++++++++++++++++++----- exam-2/exam2.py | 50 +++++++++++++++++++ 2 files changed, 167 insertions(+), 13 deletions(-) create mode 100644 exam-2/exam2.py diff --git a/exam-2/exam2.md b/exam-2/exam2.md index 1053f55..815f649 100644 --- a/exam-2/exam2.md +++ b/exam-2/exam2.md @@ -11,29 +11,55 @@ author: | --- \renewcommand{\c}[1]{\textcolor{gray}{#1}} +\newcommand{\now}[1]{\textcolor{blue}{#1}} ## Reflection and Refraction -1. Consider a sphere $S$ made of solid glass ($\eta$ = 1.5) that has radius $r = +1. \c{Consider a sphere $S$ made of solid glass ($\eta$ = 1.5) that has radius $r = 3$ and is centered at the location $s = (2, 2, 10)$ in a vaccum ($\eta = 1.0$). If a ray emanating from the point $e = (0, 0, 0)$ intersects $S$ at a - point $p = (1, 4, 8)$: + point $p = (1, 4, 8)$:} - a. (2 points) What is the angle of incidence $\theta_i$ ? + a. \c{(2 points) What is the angle of incidence $\theta_i$?} - First, the normal at the point $(1, 4, 8)$ is determined by subtracting - that point from the center $(2, 2, 10)$, which gets us $N = (2 - 1, 2 - 4, - 10 - 8) = (1, -2, 2)$. Then, to determine the angle between + The incoming ray is in the direction $I = p - e = (1, 4, 8)$, and the normal at + that point is $N = p - s = (1, 4, 8) - (2, 2, 10) = (1, -2, 2)$. The angle can + be found by taking the opposite of the incoming ray $-I$ and using the + formula $\cos \theta_i = \frac{-I \cdot N}{|I| |N|} = \frac{(-1, -4, -8) + \cdot (1, -2, 2)}{9 \cdot 3} = \frac{-1 + 8 - 16}{27} = -\frac{1}{3}$. So the + angle $\boxed{\theta_i = \cos^{-1}(-\frac{1}{3})}$. - b. (1 points) What is the angle of reflection $\theta_r$ ? + b. \c{(1 points) What is the angle of reflection $\theta_r$?} - c. (3 points) What is the direction of the reflected ray? - d. (3 points) What is the angle of transmission $\theta_t$ ? - e. (4 points) What is the direction of the transmitted ray? + The angle of reflection always equals the angle of incidence, $\theta_r = + \theta_i = \boxed{cos^{-1}(-\frac{1}{3})}$. - Using Snell's law, we know that $\eta_1 \sin \theta_1 = \eta_2 \sin - \theta_2$. In this case, let material 1 be the vacuum, and material 2 be - the glass. Then, we have $1.0 \times \sin \theta_1$ + c. \c{(3 points) What is the direction of the reflected ray?} + + The reflected ray can be found by first projecting the incident ray $-I$ onto + the normalized normal $N$, which is $v = N \times |-I|\cos(\theta_i) = + (\frac{1}{3}, -\frac{2}{3}, \frac{2}{3}) \times 9 \times \frac{1}{3} = (-1, + 2, -2)$. Then, we know the point on N where this happened is $p' = p + v = + (1, 4, 8) + (-1, 2, -2) = (0, 6, 6)$. + + Now, we can subtract this point from where the ray originated to know the + direction to add in the other direction, which is still $(0, 6, 6)$ in this + case since the ray starts at the origin. Adding this to the point $p'$ gets + us $(0, 12, 12)$, which means a point from the origin will get reflected to + $(0, 12, 12)$. + + Finally, subtract the point to get the final answer $(0, 12, 12) - (1, 4, 8) + = \boxed{(-1, 8, 4)}$. + + d. \c{(3 points) What is the angle of transmission $\theta_t$?} + + Using Snell's law, we know that $\eta_i \sin \theta_i = \eta_t \sin \theta_t + = 1.0 \times \sin(\cos^{-1}(-\frac{1}{3})) = 1.5 \times \sin(\theta_t)$. To + find the angle $\theta_t$ we can just solve: $\theta_t = + \sin^{-1}(\frac{2}{3} \times \sin(\cos^{-1}(-\frac{1}{3}))) \approx + \boxed{0.6796}$ (in radians). + + e. \c{(4 points) What is the direction of the transmitted ray?} ## Geometric Transformations @@ -66,6 +92,26 @@ author: | Since the direction of flight was originally $(0, 0, 1)$, we have to transform it to $(2, 1, -2)$. +8. Consider the perspective projection-normalization matrix P which maps the + contents of the viewing frustum into a cube that extends from –1 to 1 in $x, + y, z$ (called normalized device coordinates). + + Suppose you want to define a square, symmetric viewing frustum with a near + clipping plane located 0.5 units in front of the camera, a far clipping plane + located 20 units from the front of the camera, a 60 ̊vertical field of view, + and a 60 ̊horizontal field of view. + + a. \c{(2 points) What are the entries in P?} + + $$\begin{bmatrix} + \end{bmatrix}$$ + + b. (3 points) How should be matrix P be re-defined if the viewing window is + re-sized to be twice as tall as it is wide? + + c. (3 points) What are the new horizontal and vertical fields of view after + this change has been made? + ## Clipping 9. \c{Consider the triangle whose vertex positions, after the viewport @@ -90,4 +136,62 @@ author: | $p_6 = (10, 8)$. Which of these would be considered to lie inside the triangle, according to the methods taught in class?} +10. \c{When a model contains many triangles that form a smoothly curving surface + patch, it can be inefficient to separately represent each triangle in the + patch independently as a set of three vertices because memory is wasted when + the same vertex location has to be specified multiple times. A triangle + strip offers a memory-efficient method for representing connected ‘strips’ + of triangles. For example, in the diagram below, the six vertices v0 .. v5 + define four adjacent triangles: (v0, v1, v2), (v2, v1, v3), (v2, v3, v4), + (v4, v3, v5). [Notice that the vertex order is switched in every other + triangle to maintain a consistent counter-clockwise orientation.] Ordinarily + one would need to pass 12 vertex locations to the GPU to represent this + surface patch (three vertices for each triangle), but when the patch is + encoded as a triangle strip, only the six vertices need to be sent and the + geometry they represent will be interpreted using the correspondence pattern + just described.} + \c{(5 points) When triangle strips are clipped, however, things + can get complicated. Consider the short triangle strip shown below in the + context of a clipping cube.} + + - \c{After the six vertices v0 .. v5 are sent to be clipped, what will the + vertex list be after clipping process has finished?} + + - \c{How can this new result be expressed as a triangle strip? (Try to be as + efficient as possible)} + + - \c{How many triangles will be encoded in the clipped triangle strip?} + +## Ray Tracing vs Scan Conversion + +11. \c{(8 points) List the essential steps in the scan-conversion (raster + graphics) rendering pipeline, starting with vertex processing and ending + with the assignment of a color to a pixel in a displayed image. For each + step briefly describe, in your own words, what is accomplished and how. You + do not need to include steps that we did not discuss in class, such as + tessellation (subdividing an input triangle into multiple subtriangles), + instancing (creating new geometric primitives from existing input vertices), + but you should not omit any steps that are essential to the process of + generating an image of a provided list of triangles.} + +12. \c{(6 points) Compare and contrast the process of generating an image of a + scene using ray tracing versus scan conversion. Include a discussion of + outcomes that can be achieved using a ray tracing approach but not using a + scan-conversion approach, or vice versa, and explain the reasons why and why + not.} + + With ray tracing, the process of generating pixels is very hierarchical. + The basic ray tracer was very simple, but the moment we even added shadows, + there were recursive rays that needed to be cast, not to mention the + jittering. None of those could be parallelized with the main one, because in + order to even figure out where to start, you need to have already performed + a lot of the calculations. (For my ray tracer implementation, I already + parallelized as much as I could using the work-stealing library `rayon`) + + But with scan conversion, the majority of the transformations are just done + with matrix transformations over the geometries, which can be performed + completely in parallel with minimal branching (only depth testing is not + exactly) The rasterization process is also massively parallelizable. This + makes it faster to do on GPUs which are able to do a lot of independent + operations. diff --git a/exam-2/exam2.py b/exam-2/exam2.py new file mode 100644 index 000000000..25616c1 --- /dev/null +++ b/exam-2/exam2.py @@ -0,0 +1,50 @@ +import numpy as np + +unit = lambda v: v/np.linalg.norm(v) + +def problem_1(): + p = np.array([1, 4, 8]) + e = np.array([0, 0, 0]) + s = np.array([2, 2, 10]) + + i = p - e + print("incoming", i) + print("|I| =", np.linalg.norm(i)) + n = s - p + print("normal", n) + + n_norm = unit(n) + print("normal_norm", n_norm) + cos_theta_i = np.dot(-i, n) / (np.linalg.norm(i) * np.linalg.norm(n)) + print("part a = cos^{-1} of ", cos_theta_i) + print(np.arccos(cos_theta_i)) + + proj = n_norm * np.linalg.norm(i) * cos_theta_i + print("proj", proj) + + p_ = p + proj + print("proj point", p_) + + v2 = p_ - e + print("v2", v2) + + sin_theta_i = np.sin(np.arccos(cos_theta_i)) + print("sin theta_i =", sin_theta_i) + + print("approx answer for part d", np.arcsin(1.0 / 1.5 * sin_theta_i)) + +def problem_8(): + def P(left, right, bottom, top, near, far): + return np.array([ + [2.0 * near / (right - left), 0, (right + left) / (right - left), 0], + [0, 2.0 * near / (top - bottom), (top + bottom) / (top - bottom), 0], + [0, 0, -(far + near) / (far - near), -(2.0 * far * near) / (far - near)], + [0, 0, -1, 0], + ]) + + near, far = left, right = bottom, top = -1, 1 + print("part 8a", P(left, right, bottom, top, near, far)) + print() + +problem_8() +problem_1()