Reworked quantum handout

This commit is contained in:
Mark 2024-02-15 16:23:25 -08:00
parent 42a5626d97
commit e8eb1a783e
Signed by: Mark
GPG Key ID: C6D63995FE72FD80
10 changed files with 1014 additions and 966 deletions

View File

@ -1,207 +0,0 @@
\section*{Part 0: Vector Basics}
\definition{Vectors}
An $n$-dimensional \textit{vector} is an element of $\mathbb{R}^n$. In this handout, we'll write vectors as columns. \par
For example, $\left[\begin{smallmatrix} 1 \\ 3 \\ 2 \end{smallmatrix}\right]$ is a vector in $\mathbb{R}^3$.
\definition{Euclidean norm}
The length of an $n$-dimensional vector $v$ is computed as follows:
\begin{equation*}
|v| = \sqrt{v_1^2 + ... + v_n^2}
\end{equation*}
Where $v_1$ through $v_n$ represent individual components of this vector. For example,
\begin{equation*}
\left|\left[\begin{smallmatrix} 1 \\ 3 \\ 2 \end{smallmatrix}\right]\right| = \sqrt{1^2 + 3^2 + 2^2} = \sqrt{14}
\end{equation*}
\definition{Transpose}
The \textit{transpose} of a vector $v$ is $v^\text{T}$, given as follows:
\begin{equation*}
\left[\begin{smallmatrix} 1 \\ 3 \\ 2 \end{smallmatrix}\right]^\text{T}
=
\left[\begin{smallmatrix} 1 & 3 & 2 \end{smallmatrix}\right]
\end{equation*}
That is, we rewrite the vector with its rows as columns and its columns as rows. \par
We can transpose matrices too, of course, but we'll get to that later.
\problem{}
What is the length of $\left[\frac{1}{\sqrt{2}}, \frac{1}{\sqrt{2}}\right]^\text{T}$? \par
\vfill
\definition{}
We say a vector $v$ is a \textit{unit vector} or a \textit{normalized} vector if $|v| = 1$.
\pagebreak
\definition{Vector products}
The \textit{dot product} of two $n$-dimensional vectors $v$ and $u$ is computed as follows:
\begin{equation*}
v \cdot u = v_0u_0 + v_1u_1 + ... + v_nu_n
\end{equation*}
\vfill
\definition{Vector angles}<vectorangle>
For any two vectors $a$ and $b$, the following holds:
\null\hfill
\begin{minipage}{0.48\textwidth}
\begin{equation*}
\cos{(\phi)} = \frac{a \cdot b}{|a| \times |b|}
\end{equation*}
\end{minipage}
\hfill
\begin{minipage}{0.48\textwidth}
\begin{center}
\begin{tikzpicture}[scale=1.5]
\draw[->] (0, 0) -- (0.707, 0.707);
\draw[->, gray] (0.5, 0.0) arc (0:45:0.5);
\node[gray] at (0.6, 0.22) {$\phi$};
\draw[->] (0, 0) -- (1.2, 0);
\node[right] at (1.2, 0) {$a$};
\node[right] at (0.707, 0.707) {$b$};
\end{tikzpicture}
\end{center}
\end{minipage}
\hfill\null
This can easily be shown using the law of cosines. \par
For the sake of time, we'll skip the proof---it isn't directly relevant to this handout.
\definition{Orthogonal vectors}
We say two vectors are \textit{perpendicular} or \textit{orthogonal} if the angle between them is $90^\circ$. \par
Note that this definition works with vectors of any dimension.
\note{
In fact, we don't need to think about other dimensions: two vectors in an $n$-dimensional space nearly always
define a unique two-dimensional plane (with two exceptions: $\phi = 0^\circ$ and $\phi = 180^\circ$).
}
\problem{}
What is the dot product of two orthogonal vectors?
\vfill
\pagebreak
\definition{Linear combinations}
A \textit{linear combination} of two or more vectors $v_1, v_2, ..., v_k$ is the weighted sum
\begin{equation*}
a_1v_1 + a_2v_2 + ... + a_kv_k
\end{equation*}
where $a_i$ are arbitrary real numbers.
\definition{Linear dependence}
We say a set of vectors $\{v_1, v_2, ..., v_k\}$ is \textit{linearly dependent} if we can write $0$ as a nontrivial
linear combination of these vectors. For example, the following set is linearly dependent
\begin{equation*}
\Bigl\{
\left[\begin{smallmatrix} 1 \\ 0 \end{smallmatrix}\right],
\left[\begin{smallmatrix} 0 \\ 1 \end{smallmatrix}\right],
\left[\begin{smallmatrix} 0.5 \\ 0.5 \end{smallmatrix}\right]
\Bigr\}
\end{equation*}
Since $
\left[\begin{smallmatrix} 1 \\ 0 \end{smallmatrix}\right] +
\left[\begin{smallmatrix} 0 \\ 1 \end{smallmatrix}\right] -
2 \left[\begin{smallmatrix} 0.5 \\ 0.5 \end{smallmatrix}\right]
= 0
$. A graphical representation of this is below.
\null\hfill
\begin{minipage}{0.48\textwidth}
\begin{center}
\begin{tikzpicture}[scale=1]
\fill[color = black] (0, 0) circle[radius=0.05];
\node[right] at (1, 0) {$\left[\begin{smallmatrix} 1 \\ 0 \end{smallmatrix}\right]$};
\node[above] at (0, 1) {$\left[\begin{smallmatrix} 0 \\ 1 \end{smallmatrix}\right]$};
\draw[->] (0, 0) -- (1, 0);
\draw[->] (0, 0) -- (0, 1);
\draw[->] (0, 0) -- (0.5, 0.5);
\node[above right] at (0.5, 0.5) {$\left[\begin{smallmatrix} 0.5 \\ 0.5 \end{smallmatrix}\right]$};
\end{tikzpicture}
\end{center}
\end{minipage}
\hfill
\begin{minipage}{0.48\textwidth}
\begin{center}
\begin{tikzpicture}[scale=1]
\fill[color = black] (0, 0) circle[radius=0.05];
\node[below] at (0.5, 0) {$\left[\begin{smallmatrix} 1 \\ 0 \end{smallmatrix}\right]$};
\node[right] at (1, 0.5) {$\left[\begin{smallmatrix} 0 \\ 1 \end{smallmatrix}\right]$};
\draw[->] (0, 0) -- (0.95, 0);
\draw[->] (1, 0) -- (1, 0.95);
\draw[->] (1, 1) -- (0.55, 0.55);
\draw[->] (0.5, 0.5) -- (0.05, 0.05);
\node[above left] at (0.5, 0.5) {$-2\left[\begin{smallmatrix} 0.5 \\ 0.5 \end{smallmatrix}\right]$};
\end{tikzpicture}
\end{center}
\end{minipage}
\hfill\null
\problem{}
Find a linearly independent set of vectors in $\mathbb{R}^3$
\vfill
\definition{Coordinates}
Say we have a set of linearly independent vectors $B = \{b_1, ..., b_k\}$. \par
We can write linear combinations of $B$ as \textit{coordinates} with respect to this set:
\vspace{2mm}
If we have a vector $v = x_1b_1 + x_2b_2 + ... + x_kb_k$, we can write $v = (x_1, x_2, ..., x_k)$ with respect to $B$.
\vspace{4mm}
For example, take
$B = \biggl\{
\left[\begin{smallmatrix} 1 \\ 0 \\ 0 \end{smallmatrix}\right],
\left[\begin{smallmatrix} 0 \\ 1 \\ 0\end{smallmatrix}\right],
\left[\begin{smallmatrix} 0 \\ 0 \\ 1 \end{smallmatrix}\right]
\biggr\}$ and $v = \left[\begin{smallmatrix} 8 \\ 3 \\ 9 \end{smallmatrix}\right]$
The coordinates of $v$ with respect to $B$ are, of course, $(8, 3, 9)$.
\problem{}
What are the coordinates of $v$ with respect to the basis
$B = \biggl\{
\left[\begin{smallmatrix} 1 \\ 0 \\ 1 \end{smallmatrix}\right],
\left[\begin{smallmatrix} 0 \\ 1 \\ 0\end{smallmatrix}\right],
\left[\begin{smallmatrix} 0 \\ 0 \\ -1 \end{smallmatrix}\right]
\biggr\}$?
%For example, the set $\{[1,0,0], [0,1,0], [0,0,1]\}$ (which we usually call $\{x, y, z\})$
%forms an orthonormal basis of $\mathbb{R}^3$. Every element of $\mathbb{R}^3$ can be written as a linear combination of these vectors:
%
%\begin{equation*}
% \left[\begin{smallmatrix} a \\ b \\ c \end{smallmatrix}\right]
% =
% a \left[\begin{smallmatrix} 1 \\ 0 \\ 0 \end{smallmatrix}\right] +
% b \left[\begin{smallmatrix} 0 \\ 1 \\ 0 \end{smallmatrix}\right] +
% c \left[\begin{smallmatrix} 0 \\ 0 \\ 1 \end{smallmatrix}\right]
%\end{equation*}
%
%The tuple $[a,b,c]$ is called the \textit{coordinate} of a point with respect to this basis.
\vfill
\pagebreak

View File

@ -1,199 +1,591 @@
\section{One Bit}
Before we discuss quantum computation, we first need to construct a few tools. \par
To keep things simple, we'll use regular (usually called \textit{classical}) bits for now.
\section{Probabilistic Bits}
\definition{}
\definition{Binary Digits}
$\mathbb{B}$ is the set of binary digits. In other words, $\mathbb{B} = \{\texttt{0}, \texttt{1}\}$. \par
\note[Note]{We've seen $\mathbb{B}$ before---it's the set of integers mod 2.}
As we already know, a \textit{classical bit} may take the values \texttt{0} and \texttt{1}. \par
We can model this with a two-sided coin, one face of which is labeled \texttt{0}, and the other, \texttt{1}. \par
\vspace{2mm}
Of course, if we toss such a \say{bit-coin,} we'll get either \texttt{0} or \texttt{1}. \par
We'll denote the probability of getting \texttt{0} as $p_0$, and the probability of getting \texttt{1} as $p_1$. \par
As with all probabilities, $p_0 + p_1$ must be equal to 1.
\vfill
\definition{}
\definition{Cartesian Products}
Let $A$ and $B$ be sets. \par
The \textit{cartesian product} $A \times B$ is the set of all pairs $(a, b)$ where $a \in A$ and $b \in B$. \par
As usual, we can write $A \times A \times A$ as $A^3$. \par
Say we toss a \say{bit-coin} and don't observe the result. We now have a \textit{probabilistic bit}, with a probability $p_0$
of being \texttt{0}, and a probability $p_1$ of being \texttt{1}.
\vspace{2mm}
In this handout, we'll often see the following sets:
We'll represent this probabilistic bit's \textit{state} as a vector:
$\left[\begin{smallmatrix}
p_0 \\ p_1
\end{smallmatrix}\right]$ \par
We do \textbf{not} assume this coin is fair, and thus $p_0$ might not equal $p_1$.
\note{
This may seem a bit redundant: since $p_0 + p_1$, we can always calculate one probability given the other. \\
We'll still include both probabilities in the state vector, since this provides a clearer analogy to quantum bits.
}
\vfill
\definition{}
The simplest probabilistic bit states are of course $[0]$ and $[1]$, defined as follows:
\begin{itemize}
\item $\mathbb{R}^2$, a two-dimensional plane
\item $\mathbb{R}^n$, an n-dimensional space
\item $\mathbb{B}^2$, the set
$\{(\texttt{0},\texttt{0}), (\texttt{0},\texttt{1}), (\texttt{1},\texttt{0}), (\texttt{1},\texttt{1})\}$
\item $\mathbb{B}^n$, the set of all possible states of $n$ bits.
\item $[0] = \left[\begin{smallmatrix} 1 \\ 0 \end{smallmatrix}\right]$
\item $[1] = \left[\begin{smallmatrix} 0 \\ 1 \end{smallmatrix}\right]$
\end{itemize}
That is, $[0]$ represents a bit that we known to be \texttt{0}, \par
and $[1]$ represents a bit we know to be \texttt{1}.
\vfill
\definition{}
$[0]$ and $[1]$ form a \textit{basis} for all possible probabilistic bit states: \par
Every other probabilistic bit can be written as a \textit{linear combination} of $[0]$ and $[1]$:
\begin{equation*}
\begin{bmatrix} p_0 \\ p_1 \end{bmatrix}
=
p_0 \begin{bmatrix} 1 \\ 0 \end{bmatrix} +
p_1 \begin{bmatrix} 0 \\ 1 \end{bmatrix}
=
p_0 [0] + p_1 [1]
\end{equation*}
\vfill
\pagebreak
\problem{}
Every possible state of a probabilistic bit is a two-dimensional vector. \par
Draw all possible states on the axis below.
\begin{center}
\begin{tikzpicture}[scale = 2.0]
\fill[color = black] (0, 0) circle[radius=0.05];
\node[below left] at (0, 0) {$\left[\begin{smallmatrix} 0 \\ 0 \end{smallmatrix}\right]$};
\draw[->] (0, 0) -- (1.2, 0);
\node[right] at (1.2, 0) {$p_0$};
\fill[color = oblue] (1, 0) circle[radius=0.05];
\node[below] at (1, 0) {$[0]$};
\draw[->] (0, 0) -- (0, 1.2);
\node[above] at (0, 1.2) {$p_1$};
\fill[color = oblue] (0, 1) circle[radius=0.05];
\node[left] at (0, 1) {$[1]$};
\end{tikzpicture}
\end{center}
\begin{solution}
\begin{center}
\begin{tikzpicture}[scale = 2.0]
\fill[color = black] (0, 0) circle[radius=0.05];
\node[below left] at (0, 0) {$\left[\begin{smallmatrix} 0 \\ 0 \end{smallmatrix}\right]$};
\draw[ored, -, line width = 2] (0, 1) -- (1, 0);
\draw[->] (0, 0) -- (1.2, 0);
\node[right] at (1.2, 0) {$p_0$};
\fill[color = oblue] (1, 0) circle[radius=0.05];
\node[below] at (1, 0) {$[0]$};
\draw[->] (0, 0) -- (0, 1.2);
\node[above] at (0, 1.2) {$p_1$};
\fill[color = oblue] (0, 1) circle[radius=0.05];
\node[left] at (0, 1) {$[1]$};
\end{tikzpicture}
\end{center}
\end{solution}
\vfill
\pagebreak
\section{Measuring Probabilistic Bits}
\definition{}
As we noted before, a probabilistic bit represents a coin we've tossed but haven't looked at. \par
We do not know whether the bit is \texttt{0} or \texttt{1}, but we do know the probability of both of these outcomes. \par
\vspace{2mm}
If we \textit{measure} (or \textit{observe}) a probabilistic bit, we see either \texttt{0} or \texttt{1}---and thus our
knowledge of its state is updated to either $[0]$ or $[1]$, since we now certainly know what face the coin landed on.
\vspace{2mm}
Since measurement changes what we know about a probabilistic bit, it changes the probabilistic bit's state.
When we measure a bit, it's state \textit{collapses} to either $[0]$ or $[1]$, and the original state of the
bit vanishes. We \textit{cannot} recover the state $[x_0, x_1]$ from a measured probabilistic bit.
\definition{Multiple bits}
Say we have two probabilistic bits, $x$ and $y$, \par
with states
$[x]=[ x_0, x_1]$
and
$[y]=[y_0, y_1]$
\vspace{2mm}
The \textit{compound state} of $[x]$ and $[y]$ is exactly what it sounds like: \par
It is the probabilistic two-bit state $\ket{xy}$, where the probabilities of the first bit are
determined by $[x]$, and the probabilities of the second are determined by $[y]$.
\problem{}<firstcompoundstate>
Say $[x] = [\nicefrac{2}{3}, \nicefrac{1}{3}]$ and $[y] = [\nicefrac{3}{4}, \nicefrac{1}{4}]$. \par
\begin{itemize}[itemsep = 1mm]
\item If we measure $x$ and $y$ simultaneously, \par
what is the probability of getting each of \texttt{00}, \texttt{01}, \texttt{10}, and \texttt{11}?
\item If we measure $y$ first and observe \texttt{1}, \par
what is the probability of getting each of \texttt{00}, \texttt{01}, \texttt{10}, and \texttt{11}?
\end{itemize}
\note[Note]{$[x]$ and $[y]$ are column vectors, but I've written them horizontally to save space.}
\vfill
\problem{}
With $x$ and $y$ defined as above, find the probability of measuring each of \texttt{00}, \texttt{01}, \texttt{10}, and \texttt{11}.
\vfill
\problem{}
Say $[x] = [\nicefrac{2}{3}, \nicefrac{1}{3}]$ and $[y] = [\nicefrac{3}{4}, \nicefrac{1}{4}]$. \par
What is the probability that $x$ and $y$ produce different outcomes?
\vfill
\pagebreak
\section{Tensor Products}
\definition{Tensor Products}
The \textit{tensor product} of two vectors is defined as follows:
\begin{equation*}
\begin{bmatrix}
x_1 \\ x_2
\end{bmatrix}
\otimes
\begin{bmatrix}
y_1 \\ y_2
\end{bmatrix}
=
\begin{bmatrix}
x_1
\begin{bmatrix}
y_1 \\ y_2
\end{bmatrix}
\\[4mm]
x_2
\begin{bmatrix}
y_1 \\ y_2
\end{bmatrix}
\end{bmatrix}
=
\begin{bmatrix}
x_1y_1 \\[1mm]
x_1y_2 \\[1mm]
x_2y_1 \\[1mm]
x_2y_2 \\[0.5mm]
\end{bmatrix}
\end{equation*}
That is, we take our first vector, multiply the second
vector by each of its components, and stack the result.
You could think of this as a generalization of scalar
mulitiplication, where scalar mulitiplication is a
tensor product with a vector in $\mathbb{R}^1$:
\begin{equation*}
a
\begin{bmatrix}
x_1 \\ x_2
\end{bmatrix}
=
\begin{bmatrix}
a_1
\end{bmatrix}
\otimes
\begin{bmatrix}
y_1 \\ y_2
\end{bmatrix}
=
\begin{bmatrix}
a_1
\begin{bmatrix}
y_1 \\ y_2
\end{bmatrix}
\end{bmatrix}
=
\begin{bmatrix}
a_1y_1 \\[1mm]
a_1y_2
\end{bmatrix}
\end{equation*}
\problem{}
Say $x \in \mathbb{R}^n$ and $y \in \mathbb{R}^m$. \par
What is the dimension of $x \otimes y$?
\vfill
\problem{}<basistp>
What is the pairwise tensor product
$
\Bigl\{
\left[
\begin{smallmatrix}
1 \\ 0 \\ 0
\end{smallmatrix}
\right],
\left[
\begin{smallmatrix}
0 \\ 1 \\ 0
\end{smallmatrix}
\right],
\left[
\begin{smallmatrix}
0 \\ 0 \\ 1
\end{smallmatrix}
\right]
\Bigr\}
\otimes
\Bigl\{
\left[
\begin{smallmatrix}
1 \\ 0
\end{smallmatrix}
\right],
\left[
\begin{smallmatrix}
0 \\ 1
\end{smallmatrix}
\right]
\Bigr\}
$?
\note{in other words, distribute the tensor product between every pair of vectors.}
\vfill
\problem{}
What is the \textit{span} of the vectors we found in \ref{basistp}? \par
In other words, what is the set of vectors that can be written as linear combinations of the vectors above?
\vfill
Look through the above problems and convince yourself of the following fact: \par
If $a$ is a basis of $A$ and $b$ is a basis of $B$, $a \otimes b$ is a basis of $A \times B$. \par
\note{If you don't understand what this says, ask an instructor. \\ This is the reason we did the last few problems!}
\begin{instructornote}
\textbf{The idea here is as follows:}
If $a$ is in $\{\texttt{0}, \texttt{1}\}$ and $b$ is in $\{\texttt{0}, \texttt{1}\}$,
the values $ab$ can take are
$\{\texttt{0}, \texttt{1}\} \times \{\texttt{0}, \texttt{1}\} = \{\texttt{00}, \texttt{01}, \texttt{10}, \texttt{11}\}$.
\vspace{2mm}
The same is true of any other state set: if $a$ takes values in $A$ and $b$ takes values in $B$, \par
the compound state $(a,b)$ takes values in $A \times B$.
\vspace{2mm}
We would like to do the same with probabilistic bits. \par
Given bits $\ket{a}$ and $\ket{b}$, how should we represent the state of $\ket{ab}$?
\end{instructornote}
\pagebreak
\problem{}
Say $[x] = [\nicefrac{2}{3}, \nicefrac{1}{3}]$ and $[y] = [\nicefrac{3}{4}, \nicefrac{1}{4}]$. \par
What is $[x] \otimes [y]$? How does this relate to \ref{firstcompoundstate}?
\vfill
\problem{}
The compound state of two vector-form bits is their tensor product. \par
Compute the following. Is the result what we'd expect?
\begin{itemize}
\item $[0] \otimes [0]$
\item $[0] \otimes [1]$
\item $[1] \otimes [0]$
\item $[1] \otimes [1]$
\end{itemize}
\hint{
Remember that
$[0] = \left[\begin{smallmatrix} 1 \\ 0 \end{smallmatrix}\right]$
and
$[1] = \left[\begin{smallmatrix} 0 \\ 1 \end{smallmatrix}\right]$.
}
\vfill
\problem{}<fivequant>
Of course, writing $[0] \otimes [1]$ is a bit excessive. We'll shorten this notation to $[01]$. \par
\vspace{2mm}
In fact, we could go further: if we wanted to write the set of bits $[1] \otimes [1] \otimes [0] \otimes [1]$, \par
we could write $[1101]$---but a shorter alternative is $[13]$, since $13$ is \texttt{1101} in binary.
\vspace{2mm}
Write $[5]$ as three-bit probabilistic state. \par
\begin{solution}
$[5] = [101] = [1] \otimes [0] \otimes [1] = [0,0,0,0,0,1,0,0]^T$ \par
Notice how we're counting from the top, with $[000] = [1,0,...,0]$ and $[111] = [0, ..., 0, 1]$.
\end{solution}
\vfill
\problem{}
Write the three-bit states $[0]$ through $[7]$ as column vectors. \par
\hint{You do not need to compute every tensor product. Do a few and find the pattern.}
\vfill
\pagebreak
\section{Operations on Probabilistic Bits}
Now that we can write probabilistic bits as vectors, we can represent operations on these bits
with linear transformations---in other words, as matrices.
\definition{}
Consider the NOT gate, which operates as follows: \par
\begin{itemize}
\item $\text{NOT}[0] = [1]$
\item $\text{NOT}[1] = [0]$
\end{itemize}
What should NOT do to a probabilistic bit $[x_0, x_1]$? \par
If we return to our coin analogy, we can think of the NOT operation as
flipping a coin we have already tossed, without looking at it's state.
Thus,
\begin{equation*}
\text{NOT} \begin{bmatrix}
x_0 \\ x_1
\end{bmatrix} = \begin{bmatrix}
x_1 \\ x_0
\end{bmatrix}
\end{equation*}
\begin{ORMCbox}{Review: Matrix Multiplication}{black!10!white}{black!65!white}
Matrix multiplication works as follows:
\begin{equation*}
AB =
\begin{bmatrix}
1 & 2 \\
3 & 4 \\
\end{bmatrix}
\begin{bmatrix}
a_0 & b_0 \\
a_1 & b_1 \\
\end{bmatrix}
=
\begin{bmatrix}
1a_0 + 2a_1 & 1b_0 + 2b_1 \\
3a_0 + 4a_1 & 3b_0 + 4b_1 \\
\end{bmatrix}
\end{equation*}
Note that this is very similar to multiplying each column of $B$ by $A$. \par
The product $AB$ is simply $Ac$ for every column $c$ in $B$:
\begin{equation*}
Ac_0 =
\begin{bmatrix}
1 & 2 \\
3 & 4 \\
\end{bmatrix}
\begin{bmatrix}
a_0 \\ a_1
\end{bmatrix}
=
\begin{bmatrix}
1a_0 + 2a_1 \\
3a_0 + 4a_1
\end{bmatrix}
\end{equation*}
This is exactly the first column of the matrix product. \par
Also, note that each element of $Ac_0$ is the dot product of a row in $A$ and a column in $c_0$.
\end{ORMCbox}
\problem{}
Compute the following product:
\begin{equation*}
\begin{bmatrix}
1 & 0.5 \\ 0 & 1
\end{bmatrix}
\begin{bmatrix}
3 \\ 2
\end{bmatrix}
\end{equation*}
\vfill
\generic{Remark:}
Also, recall that every matrix is linear map, and that every linear map may be written as a matrix. \par
We often use the terms \textit{matrix}, \textit{transformation}, and \textit{linear map} interchangably.
\pagebreak
\problem{}
Find the matrix that represents the NOT operation on one probabilistic bit.
\begin{solution}
\begin{equation*}
\begin{bmatrix}
0 & 1 \\ 1 & 0
\end{bmatrix}
\end{equation*}
\end{solution}
\vfill
\problem{Extension by linearity}
Say we have an arbitrary operation $A$. \par
If we know how $A$ acts on $[1]$ and $[0]$, can we compute $A[x]$ for an arbitrary state $[x]$? \par
Say $[x] = [x_0, x_1]$.
\begin{itemize}
\item What is the probability we observe $0$ when we measure $x$?
\item What is the probability that we observe $M[0]$ when we measure $Mx$?
\end{itemize}
\vfill
\problem{}<linearextension>
Write $M[x_0, x_1]$ in terms of $M[0]$, $M[1]$, $x_0$, and $x_1$.
\begin{solution}
\begin{equation*}
M \begin{bmatrix}
x_0 \\ x_1
\end{bmatrix}
=
x_0 M \begin{bmatrix}
1 \\ 0
\end{bmatrix}
+
x_1 M \begin{bmatrix}
0 \\ 1
\end{bmatrix}
=
x_0 M [0] +
x_1 M [1]
\end{equation*}
\end{solution}
\problem{}
What is the size of $\mathbb{B}^n$?
\vfill
\pagebreak
% NOTE: this is time-travelled later in the handout.
% if you edit this, edit that too.
\generic{Remark:}
Consider a single classical bit. It takes states in $\{\texttt{0}, \texttt{1}\}$, picking one at a time. \par
We'll write the states \texttt{0} and \texttt{1} as orthogonal unit vectors, labeled $\vec{e}_0$ and $\vec{e}_1$:
Every matrix represents a \textit{linear} map, so the following is always true:
\begin{equation*}
A \times (px + qy) = pAx + qAy
\end{equation*}
\ref{linearextension} is just a special case of this fact.
\begin{center}
\begin{tikzpicture}[scale=1.5]
\fill[color = black] (0, 0) circle[radius=0.05];
\draw[->] (0, 0) -- (1.5, 0);
\node[right] at (1.5, 0) {$\vec{e}_0$ axis};
\fill[color = oblue] (1, 0) circle[radius=0.05];
\node[below] at (1, 0) {\texttt{0}};
\draw[->] (0, 0) -- (0, 1.5);
\node[above] at (0, 1.5) {$\vec{e}_1$ axis};
\fill[color = oblue] (0, 1) circle[radius=0.05];
\node[left] at (0, 1) {\texttt{1}};
\end{tikzpicture}
\end{center}
The point marked $1$ is at $[0, 1]$. It is no parts $\vec{e}_0$, and all parts $\vec{e}_1$. \par
Of course, we can say something similar about the point marked $0$: \par
It is at $[1, 0] = (1 \times \vec{e}_0) + (0 \times \vec{e}_1)$, and is thus all $\vec{e}_0$ and no $\vec{e}_1$. \par
\note[Note]{$[0, 1]$ and $[1, 0]$ are coordinates in the basis $\{\vec{e}_0, \vec{e}_1\}$}
\vspace{2mm}
We could, of course, mark the point \texttt{x} at $[1, 1]$ which is equal parts $\vec{e}_0$ and $\vec{e}_1$: \par
\begin{center}
\begin{tikzpicture}[scale=1.5]
\fill[color = black] (0, 0) circle[radius=0.05];
\draw[->] (0, 0) -- (1.5, 0);
\node[right] at (1.5, 0) {$\vec{e}_0$};
\draw[->] (0, 0) -- (0, 1.5);
\node[above] at (0, 1.5) {$\vec{e}_1$};
\fill[color = oblue] (1, 0) circle[radius=0.05];
\node[below] at (1, 0) {\texttt{0}};
\fill[color = oblue] (0, 1) circle[radius=0.05];
\node[left] at (0, 1) {\texttt{1}};
\draw[dashed, color = gray, ->] (0, 0) -- (0.9, 0.9);
\fill[color = oblue] (1, 1) circle[radius=0.05];
\node[above right] at (1, 1) {\texttt{x}};
\end{tikzpicture}
\end{center}
\vspace{4mm}
But \texttt{x} isn't a member of $\mathbb{B}$---it's not a state that a classical bit can take. \par
By our current definitions, the \textit{only} valid states of a bit are $\texttt{0} = [1, 0]$ and $\texttt{1} = [0, 1]$.
\vfill
\pagebreak
\definition{Vectored Bits}
This brings us to what we'll call the \textit{vectored representation} of a bit. \par
Instead of writing our bits as just \texttt{0} and \texttt{1}, we'll break them into their $\vec{e}_0$ and $\vec{e}_1$ components: \par
\null\hfill
\begin{minipage}{0.48\textwidth}
\[ \ket{0} = \begin{bmatrix} 1 \\ 0 \end{bmatrix} = (1 \times \vec{e}_0) + (0 \times \vec{e}_1) \]
\end{minipage}
\hfill
\begin{minipage}{0.48\textwidth}
\[ \ket{1} = \begin{bmatrix} 0 \\ 1 \end{bmatrix} = (0 \times \vec{e}_0) + (1 \times \vec{e}_1) \]
\end{minipage}
\hfill\null
\vspace{2mm}
This may seem needlessly complex---and it is, for classical bits. \par
We'll see why this is useful soon enough.
\vspace{4mm}
The $\ket{~}$ you see in the two expressions above is called a \say{ket,} and denotes a column vector. \par
$\ket{0}$ is pronounced \say{ket zero,} and $\ket{1}$ is pronounced \say{ket one.} This is called bra-ket notation. \par
\note[Note]{$\bra{0}$ is called a \say{bra,} but we won't worry about that for now.}
\problem{}
Write \texttt{x} and \texttt{y} in the diagram below in terms of $\ket{0}$ and $\ket{1}$. \par
\begin{center}
\begin{tikzpicture}[scale=1.5]
\fill[color = black] (0, 0) circle[radius=0.05];
\draw[->] (0, 0) -- (1.5, 0);
\node[right] at (1.5, 0) {$\vec{e}_0$ axis};
\draw[->] (0, 0) -- (0, 1.5);
\node[above] at (0, 1.5) {$\vec{e}_1$ axis};
\fill[color = oblue] (1, 0) circle[radius=0.05];
\node[below] at (1, 0) {$\ket{0}$};
\fill[color = oblue] (0, 1) circle[radius=0.05];
\node[left] at (0, 1) {$\ket{1}$};
\draw[dashed, color = gray, ->] (0, 0) -- (0.9, 0.9);
\fill[color = ored] (1, 1) circle[radius=0.05];
\node[above right] at (1, 1) {\texttt{x}};
\draw[dashed, color = gray, ->] (0, 0) -- (-0.9, 0.9);
\fill[color = ored] (-1, 1) circle[radius=0.05];
\node[above right] at (-1, 1) {\texttt{y}};
\end{tikzpicture}
\end{center}
\vfill
\pagebreak

View File

@ -0,0 +1,347 @@
\section{One Qubit}
Quantum bits (or \textit{qubits}) are very similar to probabilistic bits, but have one major difference: \par
probabilities are replaced with \textit{amplitudes}.
\vspace{2mm}
Of course, a qubit can take the values \texttt{0} and \texttt{1}, which are denoted $\ket{0}$ and $\ket{1}$. \par
Like probabilistic bits, a quantum bit is written as a linear combination of $\ket{0}$ and $\ket{1}$:
\begin{equation*}
\ket{\psi} = \psi_0\ket{0} + \psi_1\ket{1}
\end{equation*}
Such linear combinations are called \textit{superpositions}.
\vspace{2mm}
The $\ket{~}$ you see in the expressions above is called a \say{ket,} and denotes a column vector. \par
$\ket{0}$ is pronounced \say{ket zero,} and $\ket{1}$ is pronounced \say{ket one.} This is called bra-ket notation. \par
\note[Note]{$\bra{0}$ is called a \say{bra,} but we won't worry about that for now.}
\vspace{2mm}
This is very similiar to the \say{box} $[~]$ notation we used for probabilistic bits. \par
As before, we will write $\ket{0} = \left[\begin{smallmatrix} 1 \\ 0 \end{smallmatrix}\right]$
and $\ket{1} = \left[\begin{smallmatrix} 0 \\ 1 \end{smallmatrix}\right]$.
\vspace{8mm}
Recall that probabilistic bits are subject to the restriction that $p_0 + p_1 = 1$. \par
Quantum bits have a similar condition: $\psi_0^2 + \psi_1^2 = 1$. \par
Note that this implies that $\psi_0$ and $\psi_1$ are both in $[-1, 1]$: \par
Quantum amplitudes may be negative, but probabilistic bit probabilities cannot.
\vspace{2mm}
If we plot the set of valid quantum states on our plane, we get a unit circle centered at the origin:
\begin{center}
\begin{tikzpicture}[scale=1.5]
\draw[dashed] (0,0) circle(1);
\fill[color = black] (0, 0) circle[radius=0.05];
\draw[->] (0, 0) -- (1.2, 0);
\fill[color = oblue] (1, 0) circle[radius=0.05];
\node[below right] at (1, 0) {$\ket{0}$};
\draw[->] (0, 0) -- (0, 1.2);
\fill[color = oblue] (0, 1) circle[radius=0.05];
\node[above left] at (0, 1) {$\ket{1}$};
\fill[color = ored] (0.87, 0.5) circle[radius=0.05];
\node[above right] at (0.87, 0.5) {$\ket{\psi}$};
\end{tikzpicture}
\end{center}
Recall that the set of probabilistic bits forms a line instead:
\begin{center}
\begin{tikzpicture}[scale = 1.5]
\fill[color = black] (0, 0) circle[radius=0.05];
\node[below left] at (0, 0) {$\left[\begin{smallmatrix} 0 \\ 0 \end{smallmatrix}\right]$};
\draw[ored, -, line width = 2] (0, 1) -- (1, 0);
\draw[->] (0, 0) -- (1.2, 0);
\node[right] at (1.2, 0) {$p_0$};
\fill[color = oblue] (1, 0) circle[radius=0.05];
\node[below] at (1, 0) {$[0]$};
\draw[->] (0, 0) -- (0, 1.2);
\node[above] at (0, 1.2) {$p_1$};
\fill[color = oblue] (0, 1) circle[radius=0.05];
\node[left] at (0, 1) {$[1]$};
\end{tikzpicture}
\end{center}
\problem{}
In the above unit circle, the counterclockwise angle from $\ket{0}$ to $\ket{\psi}$ is $30^\circ$\hspace{-1ex}. \par
Write $\ket{\psi}$ as a linear combination of $\ket{0}$ and $\ket{1}$.
\vfill
\pagebreak
\definition{Measurement I}
Just like a probabilistic bit, we must observed $\ket{0}$ or $\ket{1}$ when we measure a qubit. \par
If we were to measure $\ket{\psi} = \psi_0\ket{0} + \psi_1\ket{1}$, we'd observe either $\ket{0}$ or $\ket{1}$, \par
with the following probabilities:
\begin{itemize}[itemsep = 2mm, topsep = 2mm]
\item $\mathcal{P}(\ket{1}) = \psi_1^2$
\item $\mathcal{P}(\ket{0}) = \psi_0^2$
\end{itemize}
\note{Note that $\mathcal{P}(\ket{0}) + \mathcal{P}(\ket{1}) = 1$.}
\vspace{2mm}
As before, $\ket{\psi}$ \textit{collapses} when it is measured: its state becomes that which we observed in our measurement,
leaving no trace of the previous superposition. \par
\problem{}
\begin{itemize}
\item What is the probability we observe $\ket{0}$ when we measure $\ket{\psi}$? \par
\item What can we observe if we measure $\ket{\psi}$ a second time? \par
\item What are these probabilities for $\ket{\varphi}$?
\end{itemize}
\begin{center}
\begin{tikzpicture}[scale=1.5]
\draw[dashed] (0,0) circle(1);
\fill[color = black] (0, 0) circle[radius=0.05];
\draw[->] (0, 0) -- (1.2, 0);
\fill[color = oblue] (1, 0) circle[radius=0.05];
\node[below right] at (1, 0) {$\ket{0}$};
\draw[->] (0, 0) -- (0, 1.2);
\fill[color = oblue] (0, 1) circle[radius=0.05];
\node[above left] at (0, 1) {$\ket{1}$};
\draw[dotted] (0, 0) -- (0.87, 0.5);
\draw[color=gray,->] (0.5, 0.0) arc (0:30:0.5);
\node[right, color=gray] at (0.47, 0.12) {$30^\circ$};
\fill[color = ored] (0.87, 0.5) circle[radius=0.05];
\node[above right] at (0.87, 0.5) {$\ket{\psi}$};
\draw[dotted] (0, 0) -- (-0.707, -0.707);
\draw[color=gray,->] (0.25, 0.0) arc (0:-135:0.25);
\node[below, color=gray] at (0.2, -0.2) {$135^\circ$};
\fill[color = ored] (-0.707, -0.707) circle[radius=0.05];
\node[below left] at (-0.707, -0.707) {$\ket{\varphi}$};
\end{tikzpicture}
\end{center}
\vfill
As you may have noticed, we don't need two coordinates to fully define a quibit's state. \par
We can get by with one coordinate just as well.
Instead of referring to each state using its cartesian coordinates $\psi_0$ and $\psi_1$, \par
we can address it using its \textit{polar angle} $\theta$, measured from $\ket{0}$ counterclockwise:
\begin{center}
\begin{tikzpicture}[scale=1.5]
\draw[dashed] (0,0) circle(1);
\fill[color = black] (0, 0) circle[radius=0.05];
\draw[dotted] (0, 0) -- (0.707, 0.707);
\draw[color=gray,->] (0.5, 0.0) arc (0:45:0.5);
\node[above right, color=gray] at (0.5, 0) {$\theta$};
\draw[->] (0, 0) -- (1.2, 0);
\fill[color = oblue] (1, 0) circle[radius=0.05];
\node[below right] at (1, 0) {$\ket{0}$};
\draw[->] (0, 0) -- (0, 1.2);
\fill[color = oblue] (0, 1) circle[radius=0.05];
\node[above left] at (0, 1) {$\ket{1}$};
\fill[color = ored] (0.707, 0.707) circle[radius=0.05];
\node[above right] at (0.707, 0.707) {$\ket{\psi}$};
\end{tikzpicture}
\end{center}
\problem{}
Find $\psi_0$ and $\psi_1$ in terms of $\theta$ for an arbitrary qubit $\psi$.
\vfill
\pagebreak
\problem{}
Consider the following qubit states:
\null\hfill\begin{minipage}{0.48\textwidth}
\begin{equation*}
\ket{+} = \frac{\ket{0} + \ket{1}}{\sqrt{2}}
\end{equation*}
\end{minipage}\hfill\begin{minipage}{0.48\textwidth}
\begin{equation*}
\ket{-} = \frac{\ket{0} - \ket{1}}{\sqrt{2}}
\end{equation*}
\end{minipage}\hfill\null
\begin{itemize}
\item Where are these on the unit circle?
\item What are their polar angles?
\item What are the probabilities of observing $\ket{0}$ and $\ket{1}$ when measuring $\ket{+}$ and $\ket{-}$?
\end{itemize}
\vfill
\begin{center}
\begin{tikzpicture}[scale = 2.5]
\draw[dashed] (0,0) circle(1);
\fill[color = black] (0, 0) circle[radius=0.05];
\draw[->] (0, 0) -- (1.2, 0);
\fill[color = oblue] (1, 0) circle[radius=0.05];
\node[below right] at (1, 0) {$\ket{0}$};
\draw[->] (0, 0) -- (0, 1.2);
\fill[color = oblue] (0, 1) circle[radius=0.05];
\node[above left] at (0, 1) {$\ket{1}$};
\end{tikzpicture}
\end{center}
\vfill
\vfill
\pagebreak
\section{Operations on One Qubit}
We may apply transformations to qubits just as we apply transformations to probabilistic bits.
Again, we'll represent transformations as $2 \times 2$ matrices, since we want to map
one qubit state to another. \par
\note{In other words, we want to map elements of $\mathbb{R}^2$ to elements of $\mathbb{R}^2$.} \par
We will call such maps \textit{quantum gates,} since they are the quantum equivalent of classical logic gates.
\vspace{2mm}
There are two conditions a valid quantum gate $G$ must satisfy:
\begin{itemize}[itemsep = 1mm]
\item For any valid state $\ket{\psi}$, $G\ket{\psi}$ is a valid state. \par
Namely, $G$ must preserve the length of any vector it is applied to. \par
Recall that the set of valid quantum states is the set of unit vectors in $\mathbb{R}^2$
\item Any quantum gate must be \textit{invertible}. \par
We'll skip this condition for now, and return to it later.
\end{itemize}
In short, a quantum gate is a linear map that maps the unit circle to itself. \par
There are only two kinds of linear maps that do this: reflections and rotations.
\problem{}
The $X$ gate is the quantum analog of the \texttt{not} gate, defined by the following table:
\begin{itemize}
\item $X\ket{0} = \ket{1}$
\item $X\ket{1} = \ket{0}$
\end{itemize}
Find the matrix $X$.
\begin{solution}
\begin{equation*}
\begin{bmatrix}
0 & 1 \\ 1 & 0
\end{bmatrix}
\end{equation*}
\end{solution}
\vfill
\problem{}
What is $X\ket{+}$ and $X\ket{-}$? \par
\hint{Remember that all matrices are linear maps. What does this mean?}
\begin{solution}
$X\ket{+} = \ket{+}$ and $X\ket{-} = -\ket{-}$ (that is, negative ket-minus). \par
Most notably, remember that $G(a\ket{0} + b\ket{1}) = aG\ket{0} + bG\ket{1}$
\end{solution}
\vfill
\problem{}
In terms of geometric transformations, what does $X$ do to the unit circle?
\begin{solution}
It is a reflection about the $45^\circ$ axis.
\end{solution}
\vfill
\pagebreak
\problem{}
Let $Z$ be a quantum gate defined by the following table: \par
\begin{itemize}
\item $Z\ket{0} = \ket{0}$,
\item $Z\ket{1} = -\ket{1}$.
\end{itemize}
What is the matrix $Z$? What are $Z\ket{+}$ and $Z\ket{-}$? \par
What is $Z$ as a geometric transformation?
\vfill
\problem{}
Is the map $B$ defined by the table below a valid quantum gate?
\begin{itemize}
\item $B\ket{0} = \ket{0}$
\item $B\ket{1} = \ket{+}$
\end{itemize}
\hint{Find a $\ket{\psi}$ so that $B\ket{\psi}$ is not a valid qubit state}
\begin{solution}
$B\ket{+} = \frac{1 + \sqrt{2}}{2}\ket{0} + \frac{1}{2}\ket{1}$, which has a non-unit length of $\frac{\sqrt{2} + 1}{\sqrt{2}}$.
\end{solution}
\vfill
\problem{Rotation}
As we noted earlier, any rotation about the center is a valid quantum gate. \par
Let's derive all transformations of this form.
\begin{itemize}[itemsep = 1mm]
\item Let $U_\phi$ be the matrix that represents a counterclockwise rotation of $\phi$ degrees. \par
What is $U\ket{0}$ and $U\ket{1}$?
\item Find the matrix $U_\phi$ for an arbitrary $\phi$.
\end{itemize}
\vfill
\problem{}
Say we have a qubit that is either $\ket{+}$ or $\ket{-}$. We do not know which of the two states it is in. \par
Using one operation and one measurement, how can we find out, for certain, which qubit we received? \par
\vfill
\pagebreak

View File

@ -1,283 +0,0 @@
\section{Two Bits}
\problem{}<compoundclassicalbits>
As we already know, the set of states a single bit can take is $\mathbb{B} = \{\texttt{0}, \texttt{1}\}$. \par
What is the set of compound states \textit{two} bits can take? How about $n$ bits? \par
\hint{Cartesian product.}
\vspace{5cm}
Of course, \ref{compoundclassicalbits} is fairly easy: \par
If $a$ is in $\{\texttt{0}, \texttt{1}\}$ and $b$ is in $\{\texttt{0}, \texttt{1}\}$,
the values $ab$ can take are
$\{\texttt{0}, \texttt{1}\} \times \{\texttt{0}, \texttt{1}\} = \{\texttt{00}, \texttt{01}, \texttt{10}, \texttt{11}\}$.
\vspace{2mm}
The same is true of any other state set: if $a$ takes values in $A$ and $b$ takes values in $B$, \par
the compound state $(a,b)$ takes values in $A \times B$.
\vspace{2mm}
We would like to do the same in vector notation. Given bits $\ket{a}$ and $\ket{b}$,
how should we represent the state of $\ket{ab}$? We'll spend the rest of this section solving this problem.
\problem{}
When we have two bits, we have four orthogonal states:
$\overrightarrow{00}$, $\overrightarrow{01}$, $\overrightarrow{10}$, and $\overrightarrow{11}$. \par
\vspace{2mm}
Write $\ket{00}$, $\ket{01}$, $\ket{10}$, and $\ket{11}$ as column vectors \par
with respect to the orthonormal basis $\{\overrightarrow{00}, \overrightarrow{01}, \overrightarrow{10}, \overrightarrow{11}\}$.
\vfill
\pagebreak
\definition{Tensor Products}
The \textit{tensor product} of two vectors is defined as follows:
\begin{equation*}
\begin{bmatrix}
x_1 \\ x_2
\end{bmatrix}
\otimes
\begin{bmatrix}
y_1 \\ y_2
\end{bmatrix}
=
\begin{bmatrix}
x_1
\begin{bmatrix}
y_1 \\ y_2
\end{bmatrix}
\\[4mm]
x_2
\begin{bmatrix}
y_1 \\ y_2
\end{bmatrix}
\end{bmatrix}
=
\begin{bmatrix}
x_1y_1 \\[1mm]
x_1y_2 \\[1mm]
x_2y_1 \\[1mm]
x_2y_2 \\[0.5mm]
\end{bmatrix}
\end{equation*}
That is, we take our first vector, multiply the second
vector by each of its components, and stack the result.
You could think of this as a generalization of scalar
mulitiplication, where scalar mulitiplication is a
tensor product with a vector in $\mathbb{R}^1$:
\begin{equation*}
a
\begin{bmatrix}
x_1 \\ x_2
\end{bmatrix}
=
\begin{bmatrix}
a_1
\end{bmatrix}
\otimes
\begin{bmatrix}
y_1 \\ y_2
\end{bmatrix}
=
\begin{bmatrix}
a_1
\begin{bmatrix}
y_1 \\ y_2
\end{bmatrix}
\end{bmatrix}
=
\begin{bmatrix}
a_1y_1 \\[1mm]
a_1y_2
\end{bmatrix}
\end{equation*}
\vspace{2mm}
Also, note that the tensor product is very similar to the
Cartesian product: if we take $x$ and $y$ as sets, with
$x = \{x_1, x_2\}$ and $y = \{y_1, y_2\}$, the Cartesian product
contains the same elements as the tensor product---every possible
pairing of an element in $x$ with an element in $y$:
\begin{equation*}
x \times y = \{~(x_1,y_1), (x_1,y_2), (x_2,y_1), (x_2y_2)~\}
\end{equation*}
In fact, these two operations are (in a sense) essentially identical. \par
Let's quickly demonstrate this.
\problem{}
Say $x \in \mathbb{R}^n$ and $y \in \mathbb{R}^m$. \par
What is the dimension of $x \otimes y$?
\vfill
\problem{}<basistp>
What is the pairwise tensor product
$
\Bigl\{
\left[
\begin{smallmatrix}
1 \\ 0 \\ 0
\end{smallmatrix}
\right],
\left[
\begin{smallmatrix}
0 \\ 1 \\ 0
\end{smallmatrix}
\right],
\left[
\begin{smallmatrix}
0 \\ 0 \\ 1
\end{smallmatrix}
\right]
\Bigr\}
\otimes
\Bigl\{
\left[
\begin{smallmatrix}
1 \\ 0
\end{smallmatrix}
\right],
\left[
\begin{smallmatrix}
0 \\ 1
\end{smallmatrix}
\right]
\Bigr\}
$?
\note{in other words, distribute the tensor product between every pair of vectors.}
\vfill
\problem{}
What is the \textit{span} of the vectors we found in \ref{basistp}? \par
In other words, what is the set of vectors that can be written as linear combinations of the vectors above?
\vfill
Look through the above problems and convince yourself of the following fact: \par
If $a$ is a basis of $A$ and $b$ is a basis of $B$, $a \otimes b$ is a basis of $A \times B$.
\pagebreak
\problem{}
The compound state of two vector-form bits is their tensor product. \par
Compute the following. Is the result what we'd expect?
\begin{itemize}
\item $\ket{0} \otimes \ket{0}$
\item $\ket{0} \otimes \ket{1}$
\item $\ket{1} \otimes \ket{0}$
\item $\ket{1} \otimes \ket{1}$
\end{itemize}
\hint{
Remember that the coordinates of
$\ket{0}$ are $\left[\begin{smallmatrix} 1 \\ 0 \end{smallmatrix}\right]$,
and the coordinates of
$\ket{1}$ are $\left[\begin{smallmatrix} 0 \\ 1 \end{smallmatrix}\right]$.
}
\vfill
\problem{}<fivequant>
Of course, writing $\ket{0} \otimes \ket{1}$ is a bit excessive. We'll shorten this notation to $\ket{01}$. \par
\vspace{2mm}
In fact, we could go further: if we wanted to write the set of bits $\ket{1} \otimes \ket{1} \otimes \ket{0} \otimes \ket{1}$, \par
we could write $\ket{1101}$---but a shorter alternative is $\ket{13}$, since $13$ is \texttt{1101} in binary.
\vspace{2mm}
Write $\ket{5}$ as three-bit state vector. \par
\begin{solution}
$\ket{5} = \ket{101} = \ket{1} \otimes \ket{0} \otimes \ket{1} = [0,0,0,0,0,1,0,0]^T$ \par
Notice how we're counting from the top, with $\ket{000} = [1,0,...,0]$ and $\ket{111} = [0, ..., 0, 1]$.
\end{solution}
\vfill
\problem{}
Write the three-bit states $\ket{0}$ through $\ket{7}$ as column vectors. \par
\hint{You do not need to compute every tensor product. Do a few and find the pattern.}
\vfill
\pagebreak

View File

@ -1,254 +0,0 @@
\section{Half a Qubit}
\begin{tcolorbox}[
enhanced,
breakable,
colback=white,
colframe=ored,
boxrule=0.6mm,
arc=0mm,
outer arc=0mm,
]
\color{ored}
\begingroup
\large\centering
\textbf{Disclaimer:} \par
\endgroup
\vspace{1ex}
The \say{qubits} we're about to define aren't \textit{really} qubits. The proper definition is a bit more
complicated, but don't worry about that yet. For now, take what I say as truth---we'll get to
the complex definition soon enough.
\vspace{2mm}
The information provided in this handout does not, and is not intended to, constitute legal advice.
All information, content, and material in this document is for general informational purposes only.
\end{tcolorbox}
\generic{Remark:}
Just like a classical bit, a \textit{quantum bit} (or \textit{qubit}) can take the values $\ket{0}$ and $\ket{1}$. \par
However, \texttt{0} and \texttt{1} aren't the only states a qubit may have.
\vspace{2mm}
We'll make sense of quantum bits by extending the \say{vectored} bit representation we developed in the previous section.
First, let's look at a diagram we drew a few pages ago:
\begin{ORMCbox}{Time Travel (Page 5)}{black!10!white}{black!65!white}
A classical bit takes states in $\{\texttt{0}, \texttt{1}\}$, picking one at a time. \par
We'll represent \texttt{0} and \texttt{1} as perpendicular unit vectors $\ket{0}$ and $\ket{1}$,
show below.
\begin{center}
\begin{tikzpicture}[scale=1.5]
\fill[color = black] (0, 0) circle[radius=0.05];
\draw[->] (0, 0) -- (1.2, 0);
\node[right] at (1.2, 0) {$\ket{0}$};
\fill[color = oblue] (1, 0) circle[radius=0.05];
\node[below] at (1, 0) {\texttt{0}};
\draw[->] (0, 0) -- (0, 1.2);
\node[above] at (0, 1.2) {$\ket{1}$};
\fill[color = oblue] (0, 1) circle[radius=0.05];
\node[left] at (0, 1) {\texttt{1}};
\end{tikzpicture}
\end{center}
The point marked $1$ is at $[0, 1]$. It is no parts $\ket{0}$, and all parts $\ket{1}$. \par
Of course, we can say something similar about the point marked $0$: \par
It is at $[1, 0] = (1 \times \ket{0}) + (0 \times \ket{1})$, and is thus all $\ket{0}$ and no $\ket{1}$. \par
\end{ORMCbox}
The diagram in the box above can also be used to describe the state of a qubit. \par
Like classical bits, qubits have the \textit{basis states} $\ket{0}$ and $\ket{1}$. \par
Unlike classical bits, qubits may take values that are some combination of both.
\vspace{2mm}
Namely, every possible state of a qubit is a \textit{normalized linear combination} of $\ket{0}$ and $\ket{1}$. \par
Such states are called \textit{superpositions} of $\ket{0}$ and $\ket{1}$, since they partially contain both states.
\vfill
\pagebreak
\definition{}
The state of a quantum bit is the column unit vector
$
\ket{\psi}
= \left[\begin{smallmatrix} a \\ b \end{smallmatrix}\right]
= a\ket{0} + b\ket{1}$ for $a, b \in \mathbb{R}
$. \par
Note that the length of $\ket{\psi}$ must always be $1$, which is the same as saying that $a^2 + b^2 = 1$.
\vspace{2mm}
If we plot the set of valid quantum states on our plane, we get a unit circle centered at the origin: \par
\begin{center}
\begin{tikzpicture}[scale=1.5]
\draw[dashed] (0,0) circle(1);
\fill[color = black] (0, 0) circle[radius=0.05];
\draw[->] (0, 0) -- (1.2, 0);
\fill[color = oblue] (1, 0) circle[radius=0.05];
\node[below right] at (1, 0) {$\ket{0}$};
\draw[->] (0, 0) -- (0, 1.2);
\fill[color = oblue] (0, 1) circle[radius=0.05];
\node[above left] at (0, 1) {$\ket{1}$};
\fill[color = ored] (0.87, 0.5) circle[radius=0.05];
\node[above right] at (0.87, 0.5) {$\ket{\psi}$};
\end{tikzpicture}
\end{center}
\problem{}
In the above diagram, the counterclockwise angle from $\ket{0}$ to $\ket{\psi}$ is $30^\circ$\hspace{-1ex}. \par
Write $\ket{\psi}$ as a linear combination of $\ket{0}$ and $\ket{1}$.
\vfill
\definition{Measurement I}
Although a qubit may have many states, it must be $\ket{0}$ or $\ket{1}$ when we measure it. \par
\vspace{2mm}
As a trivial example, say $\ket{\psi}$ = $\ket{0}$. \par
If we were to measure $\ket{\psi}$, we'd get $\ket{0}$, and the state of the qubit wouldn't change.
\vspace{2mm}
However, something interesting happens when $\ket{\psi} = a\ket{0} + b\ket{1}$. \par
Our measurement again returns either $\ket{0}$ or $\ket{1}$, with the following probabilities: \par
\begin{itemize}[itemsep = 2mm, topsep = 2mm]
\item $\mathcal{P}(\ket{1}) = a^2$
\item $\mathcal{P}(\ket{0}) = b^2$
\end{itemize}
\note{
Note that $\mathcal{P}(\ket{0}) + \mathcal{P}(\ket{1}) = 1$. \\
As you already know, this is true of any probability function.
}
\vspace{2mm}
In addition, $\ket{\psi}$ \textit{collapses} when it is measured: it instantly changes its state to the result of the measurement,
leaving no trace of its previous state. \par
If we measure $\ket{\psi}$ and get $\ket{1}$, $\ket{\psi}$ becomes $\ket{1}$---and
it will remain in that state until it is changed.
Quantum bits cannot be measured without their state collapsing. \par
\pagebreak
\problem{}
\begin{itemize}
\item What is the probability we get $\ket{0}$ when we measure $\ket{\psi_0}$? \par
\item What outcomes can we get if we measure it a second time? \par
\item What are these probabilities for $\ket{\psi_1}$?
\end{itemize}
\begin{center}
\begin{tikzpicture}[scale=1.5]
\draw[dashed] (0,0) circle(1);
\fill[color = black] (0, 0) circle[radius=0.05];
\draw[->] (0, 0) -- (1.2, 0);
\fill[color = oblue] (1, 0) circle[radius=0.05];
\node[below right] at (1, 0) {$\ket{0}$};
\draw[->] (0, 0) -- (0, 1.2);
\fill[color = oblue] (0, 1) circle[radius=0.05];
\node[above left] at (0, 1) {$\ket{1}$};
\draw[dotted] (0, 0) -- (0.87, 0.5);
\draw[color=gray,->] (0.5, 0.0) arc (0:30:0.5);
\node[right, color=gray] at (0.47, 0.12) {$30^\circ$};
\fill[color = ored] (0.87, 0.5) circle[radius=0.05];
\node[above right] at (0.87, 0.5) {$\ket{\psi_0}$};
\draw[dotted] (0, 0) -- (-0.707, -0.707);
\draw[color=gray,->] (0.25, 0.0) arc (0:-135:0.25);
\node[below, color=gray] at (0.2, -0.2) {$135^\circ$};
\fill[color = ored] (-0.707, -0.707) circle[radius=0.05];
\node[below left] at (-0.707, -0.707) {$\ket{\psi_1}$};
\end{tikzpicture}
\end{center}
\vfill
As you may have noticed, we don't need two coordinates to fully define a quibit's state. \par
We can get by with one coordinate just as well.
\vspace{2mm}
Instead of referring to each state using its cartesian coordinates $a$ and $b$, \par
we can address it using its \textit{polar angle} $\theta$, measured from $\ket{0}$ counterclockwise:
\begin{center}
\begin{tikzpicture}[scale=1.5]
\draw[dashed] (0,0) circle(1);
\fill[color = black] (0, 0) circle[radius=0.05];
\draw[dotted] (0, 0) -- (0.707, 0.707);
\draw[color=gray,->] (0.5, 0.0) arc (0:45:0.5);
\node[above right, color=gray] at (0.5, 0) {$\theta$};
\draw[->] (0, 0) -- (1.2, 0);
\fill[color = oblue] (1, 0) circle[radius=0.05];
\node[below right] at (1, 0) {$\ket{0}$};
\draw[->] (0, 0) -- (0, 1.2);
\fill[color = oblue] (0, 1) circle[radius=0.05];
\node[above left] at (0, 1) {$\ket{1}$};
\fill[color = ored] (0.707, 0.707) circle[radius=0.05];
\node[above right] at (0.707, 0.707) {$\ket{\psi}$};
\end{tikzpicture}
\end{center}
\problem{}
Find $a$ and $b$ in terms of $\theta$ for an arbitrary qubit.
\vfill
\pagebreak

View File

@ -1,4 +1,4 @@
\section{Two Halves of a Qubit}
\section{Two Qubits}
\definition{}
@ -59,7 +59,7 @@ $\ket{\psi} = \frac{1}{\sqrt{2}} \ket{00} + \frac{1}{2} \ket{01} + \frac{\sqrt{3
\problem{}
Again, consider the two-qubit state
$\ket{\psi} = \frac{1}{\sqrt{2}} \ket{00} + \frac{1}{2} \ket{01} + \frac{\sqrt{3}}{4} \ket{10} + \frac{1}{4} \ket{11}$ \par
If we measure the first qubit of $\ket{\psi}$ and get $\ket{0}$, what is the resulting state of $\ket{\phi}$? \par
If we measure the first qubit of $\ket{\psi}$ and get $\ket{0}$, what is the resulting state of $\ket{\psi}$? \par
What would the state be if we'd measured $\ket{1}$ instead?
\vfill

View File

@ -1,8 +1,7 @@
\section{Logic Gates}
Now that we know how to write vectored bits, let's look at the ways we can change them.
\definition{Matrices}
A few weeks ago, we talked about matrices. Recall that every linear map may be written as a matrix,
Throughout this handout, we've been using matrices. Again, recall that every linear map may be written as a matrix,
and that every matrix represents a linear map. For example, if $f: \mathbb{R}^2 \to \mathbb{R}^2$ is a linear
map, we can write it as follows:
\begin{equation*}
@ -26,7 +25,7 @@ map, we can write it as follows:
\definition{}
Before discussing quantum gates, we need to review to classical logic. \par
Before we discussing multi-qubit quantum gates, we need to review to classical logic. \par
Of course, a classical logic gate is a linear map from $\mathbb{B}^m$ to $\mathbb{B}^n$
@ -290,7 +289,7 @@ We could draw the above transformation as a combination $X$ and $I$ (identity) g
\end{tikzpicture}
\end{center}
We can even omit the $I$ gate, since we now know that transforms affect the whole state: \par
We can even omit the $I$ gate, since we now know that transformations affect the whole state: \par
\begin{center}
\begin{tikzpicture}[scale=0.8]
\node[qubit] (a) at (0, 0) {$\ket{0}$};

View File

@ -38,7 +38,7 @@ the following holds: \par
G\bigl(a_0 \ket{0} + a_1\ket{1}\bigr) = a_0G\ket{0} + a_1G\ket{1}
\end{equation*}
\problem{}
\problem{}<cnot>
Consider the \textit{controlled not} (or \textit{cnot}) gate, defined by the following table: \par
\begin{itemize}
\item $\text{X}_\text{c}\ket{00} = \ket{00}$
@ -51,7 +51,7 @@ Find the matrix that applies the cnot gate.
\begin{solution}
\begin{equation*}
\text{CNOT} = \left[\begin{smallmatrix}
\text{X}_\text{c} = \left[\begin{smallmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & 0 & 1 \\
@ -127,43 +127,26 @@ If we measure the result of \ref{applycnot}, what are the probabilities of getti
\vfill
\generic{Remark:}
As we just saw, a quantum gate is fully defined by the place it maps our basis states $\ket{0}$ and $\ket{1}$ \par
(or, $\ket{00...0}$ through $\ket{11...1}$ for multi-qubit gates). This directly follows from \ref{qgateislinear}.
\problem{}
Finally, modify the original cnot gate so that the roles of its bits are reversed: \par
$\text{X}_\text{c, flipped} \ket{ab}$ should invert $\ket{a}$ iff $\ket{b}$ is $\ket{1}$.
\begin{solution}
\begin{equation*}
\text{X}_\text{c, flipped} = \begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 \\
0 & 0 & 1 & 0 \\
0 & 1 & 0 & 0 \\
\end{bmatrix}
\end{equation*}
\end{solution}
\vfill
\pagebreak
%\problem{}
%Now, modify the CNOT gate so that it inverts $\ket{a}$ whenever it is applied.
%
%\begin{solution}
% \begin{equation*}
% \text{CNOT}_{\text{mod}} = \begin{bmatrix}
% 0 & 1 & 0 & 0 \\
% 1 & 0 & 0 & 0 \\
% 0 & 0 & 1 & 0 \\
% 0 & 0 & 0 & 1
% \end{bmatrix}
% \end{equation*}
%\end{solution}
%\problem{}
%Finally, modify the original CNOT gate so that the roles of its bits are reversed: \par
%$\text{CNOT}_{\text{flip}} \ket{ab}$ should invert $\ket{a}$ iff $\ket{b}$ is $\ket{1}$.
%
%
%\begin{solution}
% \begin{equation*}
% \text{CNOT}_{\text{flip}} = \begin{bmatrix}
% 1 & 0 & 0 & 0 \\
% 0 & 0 & 0 & 1 \\
% 0 & 0 & 1 & 0 \\
% 0 & 1 & 0 & 0 \\
% \end{bmatrix}
% \end{equation*}
%\end{solution}
%
%\vfill
@ -232,6 +215,12 @@ Using this result, find $H^{-1}$.
\vfill
\problem{}
What geometric transformation does $H$ apply to the unit circle? \par
\hint{Rotation or reflection? How much, or about which axis?}
\vfill
\problem{}
What are $H\ket{0}$ and $H\ket{1}$? \par
Are these states entangled?

View File

@ -0,0 +1,67 @@
% Copyright (C) 2023 <Mark (mark@betalupi.com)>
%
% This program is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% You may have received a copy of the GNU General Public License
% along with this program. If not, see <https://www.gnu.org/licenses/>.
%
%
%
% If you edit this, please give credit!
% Quality handouts take time to make.
% use the [nosolutions] flag to hide solutions,
% use the [solutions] flag to show solutions.
\documentclass[
solutions,
singlenumbering
]{../../resources/ormc_handout}
\usepackage{../../resources/macros}
\def\ket#1{\left|#1\right\rangle}
\def\bra#1{\left\langle#1\right|}
\usepackage{units}
\input{tikzset}
\uptitlel{Advanced 2}
\uptitler{Winter 2022}
\title{Intro to Quantum Computing I}
\subtitle{Prepared by \githref{Mark} on \today{}}
\begin{document}
\maketitle
\input{parts/01 bits}
\input{parts/02 qubit}
\input{parts/03 two qubits}
\input{parts/04 logic gates}
\input{parts/05 quantum gates}
\section{Bonus Problems (Putnam)}
\problem{}
Suppose $A$ is a real, square matrix that satisfies $A^3 = A + I$. \par
Show that $\text{det}(A)$ is positive.
\vfill
\problem{}
Suppose $A, B$ are $2 \times 2$ complex matrices satisfying $AB = BA$, \par
and assume $A$ is not of the form $aI$ for some complex $a$. \par
Show that $B = xA + yI$ for complex $x$ and $y$.
\vfill
\problem{}
Is there an infinite sequence of real numbers $a_1, a_2, ...$ so that \par
$a_1^m + a_2^m + ... = m$ for every positive integer $m$?
\vfill
\end{document}

View File

@ -17,7 +17,8 @@
% use the [solutions] flag to show solutions.
\documentclass[
solutions,
singlenumbering
singlenumbering,
shortwarning
]{../../resources/ormc_handout}
\usepackage{../../resources/macros}
@ -30,7 +31,7 @@
\uptitlel{Advanced 2}
\uptitler{Winter 2022}
\title{Intro to Quantum Computing I}
\title{Intro to Quantum Computing II}
\subtitle{Prepared by \githref{Mark} on \today{}}
@ -38,13 +39,10 @@
\maketitle
\input{parts/00 vectors}
\input{parts/01 bits}
\input{parts/02 two bits}
\input{parts/03 half a qubit}
\input{parts/04 two halves}
\input{parts/05 logic gates}
\input{parts/06 quantum gates}
\input{parts/04 logic gates}
\input{parts/05 quantum gates}
\input{parts/06 hxh}
%\section{Superdense Coding}
%TODO