This commit is contained in:
Michael Zhang 2023-05-02 02:15:45 -05:00
parent fd36b2aec2
commit 28d85930db
Signed by: michael
GPG key ID: BDA47A31A3C8EE6B
12 changed files with 538 additions and 133 deletions

BIN
exam-2/10a.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

BIN
exam-2/10b.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

BIN
exam-2/7a.blend Normal file

Binary file not shown.

BIN
exam-2/7a.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

BIN
exam-2/7b.blend Normal file

Binary file not shown.

BIN
exam-2/7b.blend1 Normal file

Binary file not shown.

BIN
exam-2/7b.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

BIN
exam-2/7c.blend Normal file

Binary file not shown.

BIN
exam-2/7c.blend1 Normal file

Binary file not shown.

BIN
exam-2/7c.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

View file

@ -14,20 +14,20 @@ author: |
\newcommand{\now}[1]{\textcolor{blue}{#1}}
\newcommand{\todo}[0]{\textcolor{red}{\textbf{TODO}}}
[ 1 2 3 4 6 7 8 9 10 ]
[ 1 2 3 6 8 9 ]
## Reflection and Refraction
1. \c{Consider a sphere $S$ made of solid glass ($\eta$ = 1.5) that has radius $r =
3$ and is centered at the location $s = (2, 2, 10)$ in a vaccum ($\eta =
1. \c{Consider a sphere $S$ made of solid glass ($\eta$ = 1.5) that has radius
$r = 3$ and is centered at the location $s = (2, 2, 10)$ in a vaccum ($\eta =
1.0$). If a ray emanating from the point $e = (0, 0, 0)$ intersects $S$ at a
point $p = (1, 4, 8)$:}
a. \c{(2 points) What is the angle of incidence $\theta_i$?}
The incoming ray is in the direction $I = p - e = (1, 4, 8)$, and the normal at
that point is $N = p - s = (1, 4, 8) - (2, 2, 10) = (1, -2, 2)$. The angle can
be found by taking the opposite of the incoming ray $-I$ and using the
The incoming ray is in the direction $I = p - e = (1, 4, 8)$, and the normal
at that point is $N = p - s = (1, 4, 8) - (2, 2, 10) = (1, -2, 2)$. The angle
can be found by taking the opposite of the incoming ray $-I$ and using the
formula $\cos \theta_i = \frac{-I \cdot N}{|I| |N|} = \frac{(-1, -4, -8)
\cdot (1, -2, 2)}{9 \cdot 3} = \frac{-1 + 8 - 16}{27} = -\frac{1}{3}$. So the
angle $\boxed{\theta_i = \cos^{-1}(-\frac{1}{3})}$.
@ -198,21 +198,45 @@ author: |
pointing in the direction $(1, -1, -1)$, and the camera's 'up' direction is
$(0, 1, 0)$, what are the entries in $V$?}
First we can calculate $n$ and $u$:
- Viewing direction is $(1, -1, -1)$.
- Normalized $n = (\frac{1}{\sqrt{3}}, -\frac{1}{\sqrt{3}}, -\frac{1}{\sqrt{3}})$.
- $u = up \times n = (-\frac{1}{\sqrt{2}}, 0, -\frac{1}{\sqrt{2}})$.
- $v = n \times u = (\frac{\sqrt{6}}{6}, \frac{\sqrt{6}}{3}, -\frac{\sqrt{6}}{6})$
- $u = up \times n = (\frac{1}{\sqrt{2}}, 0, \frac{1}{\sqrt{2}})$.
- $v = n \times u = (\frac{\sqrt{6}}{6}, \frac{2}{\sqrt{6}}, -\frac{\sqrt{6}}{6})$
- $d_x = - (eye \cdot u) = - (2 \times \frac{1}{\sqrt{2}} + 5 \times \frac{1}{\sqrt{2}}) = -\frac{7}{\sqrt{2}}$
- $d_y = - (eye \cdot v) = - (2 \times \frac{1}{\sqrt{6}} + 3 \times
\frac{2}{\sqrt{6}} - 5 \times \frac{1}{\sqrt{6}}) = -\frac{3}{\sqrt{6}}$
- $d_z = - (eye \cdot n) = - (2 \times \frac{1}{\sqrt{3}} - 3 \times
\frac{1}{\sqrt{3}} - 5 \times \frac{1}{\sqrt{3}}) = -\frac{6}{\sqrt{3}}$
$$
\begin{bmatrix}
-\frac{1}{\sqrt{2}} & 0 & -\frac{1}{\sqrt{2}} & d_x \\
1 & -1 & -1 & d_y \\
\frac{1}{\sqrt{2}} & 0 & \frac{1}{\sqrt{2}} & -\frac{7}{\sqrt{2}} \\
\frac{1}{\sqrt{6}} & \frac{2}{\sqrt{6}} & -\frac{1}{\sqrt{6}} & -\frac{3}{\sqrt{6}} \\
-\frac{1}{\sqrt{3}} & \frac{1}{\sqrt{3}} & \frac{1}{\sqrt{3}} & -\frac{6}{\sqrt{3}} \\
0 & 0 & 0 & 1 \\
\end{bmatrix}
$$
\todo
Also solved using a Python script:
```py
def view_matrix(camera_pos, view_dir, up_dir):
n = unit(-view_dir)
u = unit(np.cross(up_dir, n))
v = np.cross(n, u)
return np.array([
[u[0], u[1], u[2], -np.dot(camera_pos, u)],
[v[0], v[1], v[2], -np.dot(camera_pos, v)],
[n[0], n[1], n[2], -np.dot(camera_pos, n)],
[0, 0, 0, 1],
])
camera_pos = np.array([2, 3, 5])
view_dir = np.array([1, -1, -1])
up_dir = np.array([0, 1, 0])
V = view_matrix(camera_pos, view_dir, up_dir)
print(V)
```
b. \c{(2 points) How will this matrix change if the eye moves forward in the
direction of view? [which elements in V will stay the same? which elements
@ -346,9 +370,9 @@ author: |
6. \c{Consider a cube of width $2\sqrt{3}$ centered at the point $(0, 0,
-3\sqrt{3})$, whose faces are colored light grey on the top and bottom $(y =
\pm\sqrt{3})$, dark grey on the front and back $(z = -2\sqrt{3} \textrm{and}
z = -4\sqrt{3})$, red on the right $(x = \sqrt{3})$, and green on the left $(x
= -\sqrt{3})$.}
\pm\sqrt{3})$, dark grey on the front and back ($z = -2\sqrt{3}$ and $z =
-4\sqrt{3}$), red on the right $(x = \sqrt{3})$, and green on the left $(x =
-\sqrt{3})$.}
a. \c{Show how you could project the vertices of this cube to the plane $z =
0$ using an orthographic parallel projection:}
@ -356,29 +380,243 @@ author: |
i) \c{(2 points) Where will the six vertex locations be after such a
projection, omitting the normalization step?}
- $[\begin{matrix}-1.732 & -1.732 & -6.928\end{matrix}]$ $\rightarrow$ $[\begin{matrix}-1.0 & -1.0 & 7.0\end{matrix}]$
- $[\begin{matrix}-1.732 & -1.732 & -3.464\end{matrix}]$ $\rightarrow$ $[\begin{matrix}-1.0 & -1.0 & 5.0\end{matrix}]$
- $[\begin{matrix}-1.732 & 1.732 & -6.928\end{matrix}]$ $\rightarrow$ $[\begin{matrix}-1.0 & 1.0 & 7.0\end{matrix}]$
- $[\begin{matrix}-1.732 & 1.732 & -3.464\end{matrix}]$ $\rightarrow$ $[\begin{matrix}-1.0 & 1.0 & 5.0\end{matrix}]$
- $[\begin{matrix}1.732 & -1.732 & -6.928\end{matrix}]$ $\rightarrow$ $[\begin{matrix}1.0 & -1.0 & 7.0\end{matrix}]$
- $[\begin{matrix}1.732 & -1.732 & -3.464\end{matrix}]$ $\rightarrow$ $[\begin{matrix}1.0 & -1.0 & 5.0\end{matrix}]$
- $[\begin{matrix}1.732 & 1.732 & -6.928\end{matrix}]$ $\rightarrow$ $[\begin{matrix}1.0 & 1.0 & 7.0\end{matrix}]$
- $[\begin{matrix}1.732 & 1.732 & -3.464\end{matrix}]$ $\rightarrow$ $[\begin{matrix}1.0 & 1.0 & 5.0\end{matrix}]$
ii) \c{(1 points) Sketch the result, being as accurate as possible and
labeling the colors of each of the visible faces.}
This is just a square with the dark gray side facing the camera. The other
sides are not visible because the cube is parallel to the axis, and when you
do an orthographic projection, those faces are lost.
iii) \c{(2 points) Show how you could achieve this transformation using one or
more matrix multiplication operations. Specify the matrix entries you would
use, and, if using multiple matrices, the order in which they would be
multiplied.}
b. Show how you could project the vertices of this cube to the plane $z = 0$
using an oblique parallel projection in the direction $d = (1, 0, \sqrt{3})$:
Actually, I got the numbers above by using the three transformation matrices
in this Python script:
i) (3 points) Where will the six vertex locations be after such a projection,
omitting the normalization step?
```py
def ortho(points):
left = min(map(lambda p: p[0], points))
right = max(map(lambda p: p[0], points))
bottom = min(map(lambda p: p[1], points))
top = max(map(lambda p: p[1], points))
near = min(map(lambda p: p[2], points))
far = max(map(lambda p: p[2], points))
ii) (2 points) Sketch the result, being as accurate as possible and labeling
the colors of each of the visible faces.
step_1 = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1],
])
iii) (4 points) Show how you could achieve this transformation using one or
more matrix multiplication operations. Specify the matrix entries you would
use, and, if using multiple matrices, the order in which they would be
multiplied.
step_2 = np.array([
[1, 0, 0, -(left + right) / 2.0],
[0, 1, 0, -(bottom + top) / 2.0],
[0, 0, 1, -(near + far) / 2.0],
[0, 0, 0, 1],
])
7.
step_3 = np.array([
[2.0 / (right - left), 0, 0, 0],
[0, 2.0 / (top - bottom), 0, 0],
[0, 0, 2.0 / (far - near), 0],
[0, 0, 0, 1],
])
M_ortho = step_3 @ step_2 @ step_1
```
b. \c{Show how you could project the vertices of this cube to the plane $z =
0$ using an oblique parallel projection in the direction $d = (1, 0,
\sqrt{3})$:}
i) \c{(3 points) Where will the six vertex locations be after such a
projection, omitting the normalization step?}
ii) \c{(2 points) Sketch the result, being as accurate as possible and
labeling the colors of each of the visible faces.}
iii) \c{(4 points) Show how you could achieve this transformation using one
or more matrix multiplication operations. Specify the matrix entries you
would use, and, if using multiple matrices, the order in which they would be
multiplied.}
7. \c{Consider the simple scene shown in the image below, where two cubes, one
of height 1 and one of height 2, are both resting on a horizontal groundplane
($y = -\frac{1}{2}$), with the smaller cubes front face aligned with $z =
-4$ and the larger cubes front face aligned with $z = -7$.}
a. \c{(5 points) Let the camera location be (0, 0, 0), looking down the $-z$
axis, with the field of view set at $90^\circ$. Determine the points, in the
image plane, to which each of the cube vertices will be projected and sketch
the result to scale. Please clearly label the coordinates to avoid
ambiguity.}
For this part, I reimplemented the perspective rendering algorithm using
Python.
```py
def perspective_matrix(left, right, bottom, top, near, far):
return np.array([
[2.0 * near / (right - left), 0, (right + left) / (right - left), 0],
[0, 2.0 * near / (top - bottom), (top + bottom) / (top - bottom), 0],
[0, 0, -(far + near) / (far - near), -(2.0 * far * near) / (far - near)],
[0, 0, -1, 0],
])
def view_matrix(camera_pos, view_dir, up_dir):
n = unit(-view_dir)
u = unit(np.cross(up_dir, n))
v = np.cross(n, u)
return np.array([
[u[0], u[1], u[2], -np.dot(camera_pos, u)],
[v[0], v[1], v[2], -np.dot(camera_pos, v)],
[n[0], n[1], n[2], -np.dot(camera_pos, n)],
[0, 0, 0, 1],
])
```
The perspective and view matrices are:
$$
PV =
\begin{bmatrix}
1.0 & 0.0 & 0.0 & 0.0 \\
0.0 & 1.0 & 0.0 & 0.0 \\
0.0 & 0.0 & -1.2222 & -2.2222 \\
0.0 & 0.0 & -1.0 & 0.0 \\
\end{bmatrix}
\begin{bmatrix}
1.0 & 0.0 & 0.0 & -0.0 \\
0.0 & 1.0 & 0.0 & -0.0 \\
0.0 & 0.0 & 1.0 & -0.0 \\
0.0 & 0.0 & 0.0 & 1.0 \\
\end{bmatrix}
$$
Then I ran the transformation using the data given in this particular scene:
```py
def compute_view(near, vfov, hfov):
left = -math.tan(hfov / 2.0) * near
right = math.tan(hfov / 2.0) * near
bottom = -math.tan(vfov / 2.0) * near
top = math.tan(vfov / 2.0) * near
return left, right, bottom, top
def solve(camera_pos, angle):
angle_radians = math.radians(angle)
near = 1
far = 10
view_dir = np.array([0, 0, -1])
up_dir = np.array([0, 1, 0])
left, right, bottom, top = compute_view(near, angle_radians, angle_radians)
P = perspective_matrix(left, right, bottom, top, near, far)
V = view_matrix(camera_pos, view_dir, up_dir)
return P @ V
camera_pos = np.array([0, 0, 0])
angle = 90
m = np.around(solve(camera_pos, angle), 4)
```
This performed the transformation on the front face of the small cube:
- $[\begin{matrix}0.5 & 0.5 & -4.0\end{matrix}]$ $\rightarrow$ $[\begin{matrix}0.5 & 0.5 & 2.6666\end{matrix}]$
- $[\begin{matrix}0.5 & -0.5 & -4.0\end{matrix}]$ $\rightarrow$ $[\begin{matrix}0.5 & -0.5 & 2.6666\end{matrix}]$
- $[\begin{matrix}-0.5 & -0.5 & -4.0\end{matrix}]$ $\rightarrow$ $[\begin{matrix}-0.5 & -0.5 & 2.6666\end{matrix}]$
- $[\begin{matrix}-0.5 & 0.5 & -4.0\end{matrix}]$ $\rightarrow$ $[\begin{matrix}-0.5 & 0.5 & 2.6666\end{matrix}]$
and this transformation on the front face of the large cube:
- $[\begin{matrix}1.0 & 1.5 & -7.0\end{matrix}]$ $\rightarrow$ $[\begin{matrix}1.0 & 1.5 & 6.3332\end{matrix}]$
- $[\begin{matrix}1.0 & -0.5 & -7.0\end{matrix}]$ $\rightarrow$ $[\begin{matrix}1.0 & -0.5 & 6.3332\end{matrix}]$
- $[\begin{matrix}-1.0 & -0.5 & -7.0\end{matrix}]$ $\rightarrow$ $[\begin{matrix}-1.0 & -0.5 & 6.3332\end{matrix}]$
- $[\begin{matrix}-1.0 & 1.5 & -7.0\end{matrix}]$ $\rightarrow$ $[\begin{matrix}-1.0 & 1.5 & 6.3332\end{matrix}]$
Here's a render using Blender:
![](7a.jpg){width=40%}
b. \c{(4 points) How would the image change if the camera were moved forward by
2 units, leaving all of the other parameter settings the same? Determine the
points, in the image plane, to which each of the cube vertices would be
projected in this case and sketch the result to scale. Please clearly label
the coordinates to avoid ambiguity.}
Here is the updated Blender render:
![](7b.jpg){width=40%}
As you can see, the cubes now take up more of the frame, and in particular
the red cube has been warped to take up more camera width than the blue.
c. \c{(4 points) How would the image change if, instead of moving the camera,
the field of view were reduced by half, to $45^\circ$, leaving all of the
other parameter settings the same? Determine the points, in the image plane,
to which each of the cube vertices would be projected and sketch the result
to scale. Please clearly label the coordinates to avoid ambiguity.}
Here is the updated Blender render:
![](7c.jpg){width=40%}
Because of the reduced FOV, there is less of the scene shown so the cubes
take up more of the view. However, there is less of the perspective
foreshortening effect, so the front cube doesn't get warped into being wider
or bigger than the back cube.
d. (2 points)
- \c{Briefly describe what you notice.}
The cubes aren't warped except when they change distance from the eye.
- \c{When looking at two cube faces that are equal sizes in reality (e.g. front
and back) does one appear smaller than the other when one is more distant
from the camera than the other?}
Yes.
- \c{When looking at two objects that are resting on a common horizontal
groundplane, does the groundplane appear to be tiled in the image, so that
the objects that are farther away appear to be resting on a base that is
higher as their distance from the camera increases?}
Yes.
- \c{What changes do you observe in the relative heights, in the image, of the
smaller and larger cubes as the camera position changes?}
When the camera position is closer to the cubes, the front cube takes up
more space overall and so it takes up more height as well. But once the
camera is far, the big cube has a bigger relative height since their
heights aren't really warped from each other anymore.
- \c{Is there a point at which the camera could be so close to the smaller cube
(but not touching it) that the larger cube would be completely obscured in
the cameras image?}
Yes. You can imagine that if the camera was a microscopically small
distance from the front cube (and the $near$ value was also small enough to
accommodate!), then the front cube would take up the entire image.
- \c{Based on these insights, what can you say about the idea to create an
illusion of "getting closer" to an object in a photographed scene by
zooming in on the image and cropping it so that the object looks bigger?}
It's not entirely accurate, because of perspective warp.
8. \c{Consider the perspective projection-normalization matrix $P$ which maps
the contents of the viewing frustum into a cube that extends from -1 to 1 in
@ -477,11 +715,24 @@ author: |
- \c{After the six vertices v0 .. v5 are sent to be clipped, what will the
vertex list be after clipping process has finished?}
![](10a.jpg){width=40%}
- \c{How can this new result be expressed as a triangle strip? (Try to be as
efficient as possible)}
The only way to get this to be represented as a triangle strip is to
change around the some of the existing lines. Otherwise, the order of the
vertices prevents the exact same configuration from working.
See below for a working version (only consider the green lines, ignore the
red lines):
![](10b.jpg){width=40%}
- \c{How many triangles will be encoded in the clipped triangle strip?}
Based on the image above, 8 triangles will be used.
## Ray Tracing vs Scan Conversion
11. \c{(8 points) List the essential steps in the scan-conversion (raster

View file

@ -1,9 +1,61 @@
import itertools
import numpy as np
from sympy import *
import math
unit = lambda v: v/np.linalg.norm(v)
def perspective_matrix(vfov, width, height, left, right, bottom, top, near, far):
aspect = width / height
return np.array([
[1.0 / math.tan(vfov / 2.0) / aspect, 0, 0, 0],
[0, 1.0 / math.tan(vfov / 2.0), 0, 0],
[0, 0, -(far + near) / (far - near), -2.0 * far * near / (far - near)],
[0, 0, -1, 0]
])
# return np.array([
# [2.0 * near / (right - left), 0, (right + left) / (right - left), 0],
# [0, 2.0 * near / (top - bottom), (top + bottom) / (top - bottom), 0],
# [0, 0, -(far + near) / (far - near), -(2.0 * far * near) / (far - near)],
# [0, 0, -1, 0],
# ])
def view_matrix(camera_pos, view_dir, up_dir):
n = unit(-view_dir)
u = unit(np.cross(up_dir, n))
v = np.cross(n, u)
return np.array([
[u[0], u[1], u[2], -np.dot(camera_pos, u)],
[v[0], v[1], v[2], -np.dot(camera_pos, v)],
[n[0], n[1], n[2], -np.dot(camera_pos, n)],
[0, 0, 0, 1],
])
def print_trans(before, after):
def style_vec(v):
start = "$[\\begin{matrix}"
mid = str(v[0]) + " & " + str(v[1]) + " & " + str(v[2])
end = "\\end{matrix}]$"
return f"{start}{mid}{end}"
return style_vec(before) + " $\\rightarrow$ " + style_vec(after)
def compute_view(near, vfov, hfov):
width = 2.0 * near * math.tan(hfov / 2.0)
height = 2.0 * near * math.tan(hfov / 2.0)
left = -width / 2.0
right = width / 2.0
bottom = -height / 2.0
top = height / 2.0
return width, height, left, right, bottom, top
def print_bmatrix(arr):
for row in arr:
for j, col in enumerate(row):
end = " " if j == len(row) - 1 else " & "
print(col, end=end)
print("\\\\")
def problem_1():
p = np.array([1, 4, 8])
e = np.array([0, 0, 0])
@ -46,20 +98,15 @@ def problem_1():
print("1e answer", unit(answer_1e))
def problem_4():
print("part 4a.")
up = np.array([0, 1, 0])
viewing_dir = np.array([1, -1, -1])
n = unit(viewing_dir)
print(f"{n = }")
camera_pos = np.array([2, 3, 5])
view_dir = np.array([1, -1, -1])
up_dir = np.array([0, 1, 0])
V = view_matrix(camera_pos, view_dir, up_dir)
print(V)
u = unit(np.cross(up, n))
print(f"{u = }")
f = np.vectorize(lambda c: (c * c))
print(f(V))
v = np.cross(n, u)
print(f"{v = }")
print(math.sqrt(1 / 6.0))
print(math.sqrt(2 / 3.0))
def build_translation_matrix(vec):
return np.array([
@ -105,32 +152,139 @@ def problem_5():
print(f"{dx = }, {dy = }, {dz = }")
def problem_8():
def P(left, right, bottom, top, near, far):
return np.array([
[2.0 * near / (right - left), 0, (right + left) / (right - left), 0],
[0, 2.0 * near / (top - bottom), (top + bottom) / (top - bottom), 0],
[0, 0, -(far + near) / (far - near), -(2.0 * far * near) / (far - near)],
[0, 0, -1, 0],
])
near = 0.5
far = 20
def compute_view(vfov, hfov):
left = -math.tan(hfov) * near
right = math.tan(hfov) * near
bottom = -math.tan(vfov) * near
top = math.tan(vfov) * near
return left, right, bottom, top
print("part 8a")
vfov = hfov = math.radians(60)
left, right, bottom, top = compute_view(vfov, hfov)
print(P(left, right, bottom, top, near, far))
width, height, left, right, bottom, top = compute_view(near, vfov, hfov)
print(perspective_matrix(vfov, width, height, left, right, bottom, top, near, far))
print()
print("\nPROBLEM 4 -------------------------"); problem_4()
def problem_7():
def solve(camera_pos, angle):
angle_radians = math.radians(angle)
near = 1
far = 10
view_dir = np.array([0, 0, -1])
up_dir = np.array([0, 1, 0])
width, height, left, right, bottom, top = compute_view(near, angle_radians, angle_radians)
print("faces of the viewing frustum", left, right, bottom, top)
P = perspective_matrix(angle_radians, width, height, left, right, bottom, top, near, far)
V = view_matrix(camera_pos, view_dir, up_dir)
print("P")
print_bmatrix(np.around(P, 4))
print("V")
print_bmatrix(np.around(V, 4))
m = P @ V
points = [
np.array([0.5, 0.5, -4]),
np.array([0.5, -0.5, -4]),
np.array([-0.5, -0.5, -4]),
np.array([-0.5, 0.5, -4]),
np.array([0.5, 0.5, -5]),
np.array([0.5, -0.5, -5]),
np.array([-0.5, -0.5, -5]),
np.array([-0.5, 0.5, -5]),
np.array([1, 1.5, -7]),
np.array([1, -0.5, -7]),
np.array([-1, -0.5, -7]),
np.array([-1, 1.5, -7]),
np.array([1, 1.5, -9]),
np.array([1, -0.5, -9]),
np.array([-1, -0.5, -9]),
np.array([-1, 1.5, -9]),
]
for point in points:
point_ = np.r_[point, [1]]
trans = m @ point_
def style_vec(v):
start = "$[\\begin{matrix}"
mid = str(v[0]) + " & " + str(v[1]) + " & " + str(v[2])
end = "\\end{matrix}]$"
return f"{start}{mid}{end}"
print("-", style_vec(point), "$\\rightarrow$", style_vec(np.around(trans[:3], 4)))
print("Part A")
camera_pos = np.array([0, 0, 0])
angle = 90
solve(camera_pos, angle)
print("Part B")
camera_pos = np.array([0, 0, -2])
angle = 90
solve(camera_pos, angle)
print("Part C")
camera_pos = np.array([0, 0, 0])
angle = 45
solve(camera_pos, angle)
def problem_6():
def calculate(points):
left = min(map(lambda p: p[0], points))
right = max(map(lambda p: p[0], points))
bottom = min(map(lambda p: p[1], points))
top = max(map(lambda p: p[1], points))
near = min(map(lambda p: p[2], points))
far = max(map(lambda p: p[2], points))
step_1 = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1],
])
step_2 = np.array([
[1, 0, 0, -(left + right) / 2.0],
[0, 1, 0, -(bottom + top) / 2.0],
[0, 0, 1, -(near + far) / 2.0],
[0, 0, 0, 1],
])
step_3 = np.array([
[2.0 / (right - left), 0, 0, 0],
[0, 2.0 / (top - bottom), 0, 0],
[0, 0, 2.0 / (far - near), 0],
[0, 0, 0, 1],
])
M_ortho = step_3 @ step_2 @ step_1
for point in points:
point_ = np.r_[point, [1]]
trans_ = M_ortho @ point_
trans = trans_[:3]
print("-", print_trans(np.around(point, 3), np.around(trans, 3)))
sqrt3 = math.sqrt(3)
width = 2.0 * sqrt3
cube_center = np.array([0, 0, -3.0 * sqrt3])
print("HELLOSU")
points = []
for (dx, dy, dz) in itertools.product([-1, 1], [-1, 1], [-1, 1]):
point = cube_center + np.array([dx * width / 2, dy * width / 2, dz * width / 2])
points.append(point)
calculate(points)
def problem_9():
pass
print("\nPROBLEM 8 -------------------------"); problem_8()
print("\nPROBLEM 1 -------------------------"); problem_1()
print("\nPROBLEM 5 -------------------------"); problem_5()
print("\nPROBLEM 9 -------------------------"); problem_9()
print("\nPROBLEM 7 -------------------------"); problem_7()
print("\nPROBLEM 6 -------------------------"); problem_6()
print("\nPROBLEM 4 -------------------------"); problem_4()