[AD] Update summary to new version of helpers

This commit is contained in:
2025-09-26 12:31:55 +02:00
parent 33f034fdd1
commit eecaab61fc
16 changed files with 110 additions and 109 deletions

View File

@@ -14,11 +14,11 @@
\end{itemize} \end{itemize}
\item \textbf{Time Complexity:} \item \textbf{Time Complexity:}
\begin{itemize} \begin{itemize}
\item Search: \tct{\log n}. \item Search: $\tct{\log n}$.
\item Insert: \tct{\log n}. \item Insert: $\tct{\log n}$.
\item Delete: \tct{\log n}. \item Delete: $\tct{\log n}$.
\end{itemize} \end{itemize}
\item \textbf{Height:} The height of an AVL Tree with $n$ nodes is \tct{\log n}. \item \textbf{Height:} The height of an AVL Tree with $n$ nodes is $\tct{\log n}$.
\end{itemize} \end{itemize}
\end{properties} \end{properties}

View File

@@ -16,9 +16,9 @@
\end{itemize} \end{itemize}
\item \textbf{Time Complexity (basic operations):} \item \textbf{Time Complexity (basic operations):}
\begin{itemize} \begin{itemize}
\item Search: \tct{h}, where $h$ is the height of the tree. \item Search: $\tct{h}$, where $h$ is the height of the tree.
\item Insert: \tct{h}. \item Insert: $\tct{h}$.
\item Delete: \tct{h}. \item Delete: $\tct{h}$.
\end{itemize} \end{itemize}
\item \textbf{Height:} The height of a binary tree is the longest path from the root to a leaf node. \item \textbf{Height:} The height of a binary tree is the longest path from the root to a leaf node.
\end{itemize} \end{itemize}

View File

@@ -55,10 +55,10 @@ The same disadvantages as for single linked lists apply here as well
\toprule \toprule
\textbf{Operation} & \textbf{Array} & \textbf{Single Linked List} & \textbf{Double Linked List} \\ \textbf{Operation} & \textbf{Array} & \textbf{Single Linked List} & \textbf{Double Linked List} \\
\midrule \midrule
$\textsc{insert}(k, L)$ & \tco{1} & \tco{1} & \tco{1} \\ $\textsc{insert}(k, L)$ & $\tco{1}$ & $\tco{1}$ & $\tco{1}$ \\
$\textsc{get}(i, L)$ & \tco{1} & \tco{l} & \tco{l} \\ $\textsc{get}(i, L)$ & $\tco{1}$ & $\tco{l}$ & $\tco{l}$ \\
$\textsc{insertAfter}(k, k', L)$ & \tco{l} & \tco{1} & \tco{1} \\ $\textsc{insertAfter}(k, k', L)$ & $\tco{l}$ & $\tco{1}$ & $\tco{1}$ \\
$\textsc{delete}(k, L)$ & \tco{l} & \tco{l} & \tco{1} \\ $\textsc{delete}(k, L)$ & $\tco{l}$ & $\tco{l}$ & $\tco{1}$ \\
\bottomrule \bottomrule
\end{tabular} \end{tabular}
\smallskip \smallskip
@@ -115,32 +115,32 @@ A \textbf{dictionary} stores a collection of key-value pairs.
\item \textbf{Insertion (\texttt{insert(key,\ value)})}: \item \textbf{Insertion (\texttt{insert(key,\ value)})}:
\begin{itemize} \begin{itemize}
\item Adds a new key-value pair to the dictionary. If the key already exists, it may update the existing value. \item Adds a new key-value pair to the dictionary. If the key already exists, it may update the existing value.
\item Time Complexity: Average case \tct{1} (with hashing), worst case \tcl{n} (when all keys hash to the same bucket). \item Time Complexity: Average case $\tct{1}$ (with hashing), worst case $\tcl{n}$ (when all keys hash to the same bucket).
\end{itemize} \end{itemize}
\item \textbf{Deletion (\texttt{remove(key)})}: \item \textbf{Deletion (\texttt{remove(key)})}:
\begin{itemize} \begin{itemize}
\item Removes a key-value pair from the dictionary by key. \item Removes a key-value pair from the dictionary by key.
\item Time Complexity: Average case \tct{1}, worst case \tco{n}. \item Time Complexity: Average case $\tct{1}$, worst case $\tco{n}$.
\end{itemize} \end{itemize}
\item \textbf{Search/Access (\texttt{get(key)} or \texttt{find(key)})}: \item \textbf{Search/Access (\texttt{get(key)} or \texttt{find(key)})}:
\begin{itemize} \begin{itemize}
\item Retrieves the value associated with a given key. \item Retrieves the value associated with a given key.
\item Time Complexity: Average case \tct{1}, worst case \tco{n}. \item Time Complexity: Average case $\tct{1}$, worst case $\tco{n}$.
\end{itemize} \end{itemize}
\item \textbf{Contains (\texttt{containsKey(key)})}: \item \textbf{Contains (\texttt{containsKey(key)})}:
\begin{itemize} \begin{itemize}
\item Checks if a key is present in the dictionary. \item Checks if a key is present in the dictionary.
\item Time Complexity: Average case \tct{1}, worst case \tco{n}. \item Time Complexity: Average case $\tct{1}$, worst case $\tco{n}$.
\end{itemize} \end{itemize}
\item \textbf{Size/Length}: \item \textbf{Size/Length}:
\begin{itemize} \begin{itemize}
\item Returns the number of key-value pairs in the dictionary. \item Returns the number of key-value pairs in the dictionary.
\item Time Complexity: \tco{1} (stored separately). \item Time Complexity: $\tco{1}$ (stored separately).
\end{itemize} \end{itemize}
\item \textbf{Clear}: \item \textbf{Clear}:
\begin{itemize} \begin{itemize}
\item Removes all key-value pairs from the dictionary. \item Removes all key-value pairs from the dictionary.
\item Time Complexity: \tco{n} (depends on implementation, some implementations might be faster). \item Time Complexity: $\tco{n}$ (depends on implementation, some implementations might be faster).
\end{itemize} \end{itemize}
\end{enumerate} \end{enumerate}
@@ -152,19 +152,19 @@ to implement the dictionary.
\begin{enumerate} \begin{enumerate}
\item \textbf{Hash Table:} \item \textbf{Hash Table:}
\begin{itemize} \begin{itemize}
\item Average case: \tct{n}, where $n$ is the number of key-value pairs. \item Average case: $\tct{n}$, where $n$ is the number of key-value pairs.
\item Additional space can be used for maintaining hash table buckets, which may lead to higher constant factors but not asymptotic growth in complexity. \item Additional space can be used for maintaining hash table buckets, which may lead to higher constant factors but not asymptotic growth in complexity.
\end{itemize} \end{itemize}
\item \textbf{Balanced Binary Search Tree (e.g., AVL tree or Red-Black Tree):} \item \textbf{Balanced Binary Search Tree (e.g., AVL tree or Red-Black Tree):}
\begin{itemize} \begin{itemize}
\item Space Complexity: \tco{n}. \item Space Complexity: $\tco{n}$.
\item This structure uses more space compared to a hash table due to the need for storing balance information at each node. \item This structure uses more space compared to a hash table due to the need for storing balance information at each node.
\end{itemize} \end{itemize}
\item \textbf{Dynamic Array:} \item \textbf{Dynamic Array:}
\begin{itemize} \begin{itemize}
\item This is not a common implementation for dictionaries due to inefficiencies in search and insertion operations compared to hash tables or balanced trees. \item This is not a common implementation for dictionaries due to inefficiencies in search and insertion operations compared to hash tables or balanced trees.
\item Space Complexity: \tco{n}. \item Space Complexity: $\tco{n}$.
\item Time complexity for insertion, deletion, and access can degrade significantly to \tco{n} without additional optimizations. \item Time complexity for insertion, deletion, and access can degrade significantly to $\tco{n}$ without additional optimizations.
\end{itemize} \end{itemize}
\end{enumerate} \end{enumerate}
@@ -177,9 +177,9 @@ to implement the dictionary.
\toprule \toprule
\textbf{Operation} & \textbf{Unsorted Arrays} & \textbf{Sorted Arrays} & \textbf{Doubly Linked Lists} & \textbf{Binary Trees} & \textbf{AVL Trees} \\ \textbf{Operation} & \textbf{Unsorted Arrays} & \textbf{Sorted Arrays} & \textbf{Doubly Linked Lists} & \textbf{Binary Trees} & \textbf{AVL Trees} \\
\midrule \midrule
Insertion & \tco{1} & \tco{n} & \tco{1} & \tco{h} & \tco{\log n} \\ Insertion & $\tco{1}$ & $\tco{n}$ & $\tco{1}$ & $\tco{h}$ & $\tco{\log n}$ \\
Deletion & \tco{n} & \tco{(n)} & \tco{n} & \tco{h} & \tco{\log n} \\ Deletion & $\tco{n}$ & $\tco{(n)}$ & $\tco{n}$ & $\tco{h}$ & $\tco{\log n}$ \\
Search & \tco{n} & \tco{\log n} & \tco{n} & \tco{h} & \tco{\log n} \\ Search & $\tco{n}$ & $\tco{\log n}$ & $\tco{n}$ & $\tco{h}$ & $\tco{\log n}$ \\
\bottomrule \bottomrule
\end{tabular} \end{tabular}
\caption{Time Complexities of Dictionary Operations for Various Data Structures} \caption{Time Complexities of Dictionary Operations for Various Data Structures}

View File

@@ -43,13 +43,13 @@ Here, the concept is to either choose an element or not to do so, i.e. the recur
The same algorithm can also be adapted for minimum subarray sum, or other problems using the same idea. The same algorithm can also be adapted for minimum subarray sum, or other problems using the same idea.
\timecomplexity \tct{n} (Polynomial) \timecomplexity $\tct{n}$ (Polynomial)
\subsubsection{Jump Game} \subsubsection{Jump Game}
We want to return the minimum number of jumps to get to a position $n$. Each field at an index $i$ has a number $A[i]$ that tells us how far we can jump at most. We want to return the minimum number of jumps to get to a position $n$. Each field at an index $i$ has a number $A[i]$ that tells us how far we can jump at most.
A somewhat efficient way to solve this problem is the recurrence relation $M[k] = \max\{i + A[i] | 1 \leq i \leq M[k - 1]\}$, but an even more efficient one is based on $M[k] = \max \{i + A[i] | M[k - 2] < i \leq M[k - 1]\}$, which essentially uses the fact that we only need to look at all $i$ that can be reached with \textit{exactly} $l - 1$ jumps, since $i \leq M[k - 2]$ can already be reached with $k - 2$ jumps. While the first one has time complexity \tco{n^2}, the new one has \tco{n} A somewhat efficient way to solve this problem is the recurrence relation $M[k] = \max\{i + A[i] | 1 \leq i \leq M[k - 1]\}$, but an even more efficient one is based on $M[k] = \max \{i + A[i] | M[k - 2] < i \leq M[k - 1]\}$, which essentially uses the fact that we only need to look at all $i$ that can be reached with \textit{exactly} $l - 1$ jumps, since $i \leq M[k - 2]$ can already be reached with $k - 2$ jumps. While the first one has time complexity $\tco{n^2}$, the new one has $\tco{n}$
\newpage \newpage
@@ -80,7 +80,7 @@ A somewhat efficient way to solve this problem is the recurrence relation $M[k]
To find the actual solution (in the sense of which letters are in the longest common subsequence), we need to use backtracking, i.e. finding which letters we picked. To find the actual solution (in the sense of which letters are in the longest common subsequence), we need to use backtracking, i.e. finding which letters we picked.
\timecomplexity \tct{n \cdot m} (Polynomial) \timecomplexity $\tct{n \cdot m}$ (Polynomial)
\subsubsection{Editing distance} \subsubsection{Editing distance}
@@ -95,12 +95,12 @@ The recurrence relation is $ED(i, j) = \min \begin{cases}
\end{cases} \end{cases}
\end{cases}$ \end{cases}$
\timecomplexity \tct{n \cdot m} (Polynomial) \timecomplexity $\tct{n \cdot m}$ (Polynomial)
\subsubsection{Subset sum} \subsubsection{Subset sum}
We want to find a subset of a set $A[1], \ldots, A[n]$ such that the sum of them equals a number $b$. Its recurrence relation is $T(i, s) = T(i - 1, s) \lor T(i - 1, s - A[i])$, where $i$ is the $i$-th entry in the array and $s$ the current sum. Base cases are $T(0, s) = false$ and $T(0, 0) = true$. In our DP-Table, we store if the subset sum can be constructed up to this element. Therefore, the DP table is a boolean table and the value $T(n, b)$ only tells us if we have a solution or not. To find the solution, we need to backtrack again. We want to find a subset of a set $A[1], \ldots, A[n]$ such that the sum of them equals a number $b$. Its recurrence relation is $T(i, s) = T(i - 1, s) \lor T(i - 1, s - A[i])$, where $i$ is the $i$-th entry in the array and $s$ the current sum. Base cases are $T(0, s) = false$ and $T(0, 0) = true$. In our DP-Table, we store if the subset sum can be constructed up to this element. Therefore, the DP table is a boolean table and the value $T(n, b)$ only tells us if we have a solution or not. To find the solution, we need to backtrack again.
\timecomplexity \tct{n \cdot b} (Pseudopolynomial) \timecomplexity $\tct{n \cdot b}$ (Pseudopolynomial)
\subsubsection{Knapsack problem} \subsubsection{Knapsack problem}
@@ -111,7 +111,7 @@ The recurrence relation is $DP(i, w) = \begin{cases}
\max\{DP(i - 1, w), P[i] + DP(i - 1, w - W[i])\} & \text{else} \max\{DP(i - 1, w), P[i] + DP(i - 1, w - W[i])\} & \text{else}
\end{cases}$. The solution can be found in $P(n, W)$, where $W$ is the weight limit. \end{cases}$. The solution can be found in $P(n, W)$, where $W$ is the weight limit.
\timecomplexity \tct{n \cdot W} (Pseudopolynomial) \timecomplexity $\tct{n \cdot W}$ (Pseudopolynomial)
\newpage \newpage
@@ -144,7 +144,7 @@ We can use approximation to solve the Knapsack problem in polynomial time. For t
\end{spacing} \end{spacing}
\end{algorithm} \end{algorithm}
\timecomplexity \tco{n \cdot \log(n)} \timecomplexity $\tco{n \cdot \log(n)}$
\subsection{Matrix chain multiplication} \subsection{Matrix chain multiplication}
@@ -156,4 +156,5 @@ The recurrence relation for this problem is $M(i, j) = \begin{cases}
\end{cases}$ \end{cases}$
\timecomplexity \tco{n^3} \timecomplexity $\tco{n^3}$

View File

@@ -27,7 +27,7 @@ We can also use $n$-times dijkstra or any other shortest path algorithm, or any
\begin{properties}[]{Characteristics of Floyd-Warshall Algorithm} \begin{properties}[]{Characteristics of Floyd-Warshall Algorithm}
\begin{itemize} \begin{itemize}
\item \textbf{Time Complexity:} \tco{|V|^3}. \item \textbf{Time Complexity:} $\tco{|V|^3}$.
\item Works for graphs with negative edge weights but no negative weight cycles. \item Works for graphs with negative edge weights but no negative weight cycles.
\item Computes shortest paths for all pairs in one execution. \item Computes shortest paths for all pairs in one execution.
\end{itemize} \end{itemize}
@@ -82,7 +82,7 @@ We can also use $n$-times dijkstra or any other shortest path algorithm, or any
\item Reweight edges to remove negative weights. \item Reweight edges to remove negative weights.
\item Use Dijkstra's algorithm for each vertex to find shortest paths. \item Use Dijkstra's algorithm for each vertex to find shortest paths.
\end{enumerate} \end{enumerate}
\item \textbf{Time Complexity:} \tco{|V| \cdot (|E| + |V| \log |V|)}. \item \textbf{Time Complexity:} $\tco{|V| \cdot (|E| + |V| \log |V|)}$.
\item Efficient for sparse graphs compared to Floyd-Warshall. \item Efficient for sparse graphs compared to Floyd-Warshall.
\end{itemize} \end{itemize}
\end{properties} \end{properties}
@@ -140,8 +140,8 @@ We can also use $n$-times dijkstra or any other shortest path algorithm, or any
\toprule \toprule
\textbf{Algorithm} & \textbf{Primary Use} & \textbf{Time Complexity} & \textbf{Remarks} \\ \textbf{Algorithm} & \textbf{Primary Use} & \textbf{Time Complexity} & \textbf{Remarks} \\
\midrule \midrule
Floyd-Warshall & AP-SP & \tco{|V|^3} & Handles negative weights \\ Floyd-Warshall & AP-SP & $\tco{|V|^3}$ & Handles negative weights \\
Johnsons Algorithm & AP-SP (sparse graphs) & \tco{|V|(|E| + |V| \log |V|)} & Requires reweighting \\ Johnsons Algorithm & AP-SP (sparse graphs) & $\tco{|V|(|E| + |V| \log |V|)}$ & Requires reweighting \\
\bottomrule \bottomrule
\end{tabular} \end{tabular}
\caption{Comparison of the All-Pair Shortest path (AP-SP) algorithms discussed in the lecture} \caption{Comparison of the All-Pair Shortest path (AP-SP) algorithms discussed in the lecture}

View File

@@ -43,7 +43,7 @@
\begin{properties}[]{Depth-First Search} \begin{properties}[]{Depth-First Search}
\begin{itemize} \begin{itemize}
\item Can be implemented recursively or iteratively (using a stack). \item Can be implemented recursively or iteratively (using a stack).
\item Time complexity: \tco{|V| + |E|}, where $|V|$ is the number of vertices and $|E|$ is the number of edges. \item Time complexity: $\tco{|V| + |E|}$, where $|V|$ is the number of vertices and $|E|$ is the number of edges.
\item Used for: \item Used for:
\begin{itemize} \begin{itemize}
\item Detecting cycles in directed and undirected graphs. \item Detecting cycles in directed and undirected graphs.
@@ -81,7 +81,7 @@
\begin{properties}[]{Breadth-First Search} \begin{properties}[]{Breadth-First Search}
\begin{itemize} \begin{itemize}
\item Implements a queue-based approach for level-order traversal. \item Implements a queue-based approach for level-order traversal.
\item Time complexity: \tco{|V| + |E|}. \item Time complexity: $\tco{|V| + |E|}$.
\item Used for: \item Used for:
\begin{itemize} \begin{itemize}
\item Finding shortest paths in unweighted graphs. \item Finding shortest paths in unweighted graphs.

View File

@@ -7,8 +7,8 @@
\begin{properties}[]{Characteristics of Strassens Algorithm} \begin{properties}[]{Characteristics of Strassens Algorithm}
\begin{itemize} \begin{itemize}
\item \textbf{Standard Multiplication:} Requires \tco{n^3} time for two $n \times n$ matrices. \item \textbf{Standard Multiplication:} Requires $\tco{n^3}$ time for two $n \times n$ matrices.
\item \textbf{Strassens Approach:} Reduces the complexity to \tco{n^{\log_2 7}} (approximately \tco{n^{2.81}}). \item \textbf{Strassens Approach:} Reduces the complexity to $\tco{n^{\log_2 7}}$ (approximately $\tco{n^{2.81}}$).
\item \textbf{Idea:} Uses divide-and-conquer to reduce the number of scalar multiplications from $8$ to $7$ in each recursive step. \item \textbf{Idea:} Uses divide-and-conquer to reduce the number of scalar multiplications from $8$ to $7$ in each recursive step.
\item Useful for applications involving large matrix multiplications. \item Useful for applications involving large matrix multiplications.
\end{itemize} \end{itemize}

View File

@@ -8,8 +8,8 @@
\begin{itemize} \begin{itemize}
\item \textbf{Graph Type:} Works on undirected, weighted graphs. \item \textbf{Graph Type:} Works on undirected, weighted graphs.
\item \textbf{Approach:} Greedy, component-centric. \item \textbf{Approach:} Greedy, component-centric.
\item \textbf{Time Complexity:} \tct{(|V| + |E|) \log(|V|)}. \item \textbf{Time Complexity:} $\tct{(|V| + |E|) \log(|V|)}$.
\item \textbf{Space Complexity:} Depends on the graph representation, typically \tct{E + V}. \item \textbf{Space Complexity:} Depends on the graph representation, typically $\tct{E + V}$.
\item \textbf{Limitations:} Efficient for parallel implementations but less commonly used in practice compared to Kruskal's and Prim's. \item \textbf{Limitations:} Efficient for parallel implementations but less commonly used in practice compared to Kruskal's and Prim's.
\end{itemize} \end{itemize}
\end{properties} \end{properties}

View File

@@ -8,9 +8,9 @@
\begin{itemize} \begin{itemize}
\item \textbf{Graph Type:} Works on undirected, weighted graphs. \item \textbf{Graph Type:} Works on undirected, weighted graphs.
\item \textbf{Approach:} Greedy, edge-centric. \item \textbf{Approach:} Greedy, edge-centric.
\item \textbf{Time Complexity:} \tco{|E| \log (|E|)} (for sort), \tco{|V| \log(|V|)} (for union find data structure).\\ \item \textbf{Time Complexity:} $\tco{|E| \log (|E|)}$ (for sort), $\tco{|V| \log(|V|)}$ (for union find data structure).\\
\timecomplexity \tco{|E| \log(|E|) + |V| \log(|V|)} \timecomplexity $\tco{|E| \log(|E|) + |V| \log(|V|)}$
\item \textbf{Space Complexity:} Depends on the graph representation, typically \tct{E + V}. \item \textbf{Space Complexity:} Depends on the graph representation, typically $\tct{E + V}$.
\item \textbf{Limitations:} Requires sorting of edges, which can dominate runtime. \item \textbf{Limitations:} Requires sorting of edges, which can dominate runtime.
\end{itemize} \end{itemize}
\end{properties} \end{properties}
@@ -127,8 +127,8 @@
\begin{properties}[]{Performance} \begin{properties}[]{Performance}
\begin{itemize} \begin{itemize}
\item \textsc{make}$(V)$: Initialize data structure \tco{n} \item \textsc{make}$(V)$: Initialize data structure $\tco{n}$
\item \textsc{same}$(u, v)$: Check if two components belong to the same set \tco{1} or \tco{n}, depending on if the representant is stored in an array or not \item \textsc{same}$(u, v)$: Check if two components belong to the same set $\tco{1}$ or $\tco{n}$, depending on if the representant is stored in an array or not
\item \textsc{union}$(u, v)$: Combine two sets, \tco{\log(n)}, in Kruskal we call this \tco{n} times, so total number (amortised) is \tco{n \log(n)} \item \textsc{union}$(u, v)$: Combine two sets, $\tco{\log(n)}$, in Kruskal we call this $\tco{n}$ times, so total number (amortised) is $\tco{n \log(n)}$
\end{itemize} \end{itemize}
\end{properties} \end{properties}

View File

@@ -12,10 +12,10 @@
\item \textbf{Approach:} Greedy, vertex-centric. \item \textbf{Approach:} Greedy, vertex-centric.
\item \textbf{Time Complexity:} \item \textbf{Time Complexity:}
\begin{itemize} \begin{itemize}
\item With an adjacency matrix: \tct{V^2}. \item With an adjacency matrix: $\tct{V^2}$.
\item With a priority queue and adjacency list: \tct{(|V| + |E|) \log(|V|)}. \item With a priority queue and adjacency list: $\tct{(|V| + |E|) \log(|V|)}$.
\end{itemize} \end{itemize}
\item \textbf{Space Complexity:} Depends on the graph representation, typically \tct{E + V}. \item \textbf{Space Complexity:} Depends on the graph representation, typically $\tct{E + V}$.
\item \textbf{Limitations:} Less efficient than Kruskal's for sparse graphs using adjacency matrices. \item \textbf{Limitations:} Less efficient than Kruskal's for sparse graphs using adjacency matrices.
\end{itemize} \end{itemize}
\end{properties} \end{properties}

View File

@@ -29,8 +29,8 @@
\begin{itemize} \begin{itemize}
\item \textbf{Time Complexity:} \item \textbf{Time Complexity:}
\begin{itemize} \begin{itemize}
\item \tco{|V|^2} for a simple implementation. \item $\tco{|V|^2}$ for a simple implementation.
\item \tco{(|V| + |E|) \log |V|} using a priority queue. \item $\tco{(|V| + |E|) \log |V|}$ using a priority queue.
\end{itemize} \end{itemize}
\item Only works with non-negative edge weights. \item Only works with non-negative edge weights.
\item Greedy algorithm that processes vertices in increasing order of distance. \item Greedy algorithm that processes vertices in increasing order of distance.
@@ -112,7 +112,7 @@
\begin{properties}[]{Characteristics of Bellman-Ford Algorithm} \begin{properties}[]{Characteristics of Bellman-Ford Algorithm}
\begin{itemize} \begin{itemize}
\item \textbf{Time Complexity:} \tco{|V| \cdot |E|}. \item \textbf{Time Complexity:} $\tco{|V| \cdot |E|}$.
\item Can handle graphs with negative edge weights but not graphs with negative weight cycles. \item Can handle graphs with negative edge weights but not graphs with negative weight cycles.
\item Used for: \item Used for:
\begin{itemize} \begin{itemize}
@@ -161,7 +161,7 @@
\midrule \midrule
Handles Negative Weights & No & Yes \\ Handles Negative Weights & No & Yes \\
Detects Negative Cycles & No & Yes \\ Detects Negative Cycles & No & Yes \\
Time Complexity & \tco{(|V| + |E|) \log |V|} & \tco{|V| \cdot |E|} \\ Time Complexity & $\tco{(|V| + |E|) \log |V|}$ & $\tco{|V| \cdot |E|}$ \\
Algorithm Type & Greedy & Dynamic Programming \\ Algorithm Type & Greedy & Dynamic Programming \\
\bottomrule \bottomrule
\end{tabular} \end{tabular}

View File

@@ -16,8 +16,8 @@
\subsection{Asymptotic Growth} \subsection{Asymptotic Growth}
$f$ grows asymptotically slower than $g$ if $\displaystyle\lim_{m \rightarrow \infty} \frac{f(m)}{g(m)} = 0$. $f$ grows asymptotically slower than $g$ if $\displaystyle\lim_{m \rightarrow \infty} \frac{f(m)}{g(m)} = 0$.
We can remark that $f$ is upper-bounded by $g$, thus $f \leq$\tco{g} and we can say $g$ is lower bounded by $f$, thus $g \geq$ \tcl{f}. We can remark that $f$ is upper-bounded by $g$, thus $f \leq \tco{g}$ and we can say $g$ is lower bounded by $f$, thus $g \geq \tcl{f}$.
If two functions grow equally fast asymptotically, \tct{f} $= g$ If two functions grow equally fast asymptotically, $\tct{f} = g$
\subsection{Runtime evaluation} \subsection{Runtime evaluation}
@@ -89,5 +89,5 @@ Therefore, the summation evaluates to $15$.
\subsection{Specific examples} \subsection{Specific examples}
\begin{align*} \begin{align*}
\frac{n}{\log(n)} \geq \Omega(\sqrt{n}) \Leftrightarrow \sqrt{n} \leq \text{\tco{\frac{n}{\log(n)}}} \frac{n}{\log(n)} \geq \Omega(\sqrt{n}) \Leftrightarrow \sqrt{n} \leq \tco{\frac{n}{\log(n)}}
\end{align*} \end{align*}

View File

@@ -54,7 +54,7 @@ The concept for this algorithm is selecting an element (that being the largest o
\end{spacing} \end{spacing}
\end{algorithm} \end{algorithm}
\tc{n^2} because we have runtime \tco{n} for the search of the maximal entry and run through the loop \tco{n} times, but we have saved some runtime elsewhere, which is not visible in the asymptotic time complexity compared to bubble sort. \tc{n^2} because we have runtime $\tco{n}$ for the search of the maximal entry and run through the loop $\tco{n}$ times, but we have saved some runtime elsewhere, which is not visible in the asymptotic time complexity compared to bubble sort.
@@ -66,16 +66,16 @@ The concept for this algorithm is selecting an element (that being the largest o
\end{definition} \end{definition}
\begin{properties}[]{Characteristics and Performance} \begin{properties}[]{Characteristics and Performance}
\begin{itemize}
\item \textbf{Efficiency:} Works well for small datasets or nearly sorted arrays.
\item \textbf{Time Complexity:}
\begin{itemize} \begin{itemize}
\item Best case (already sorted): \tcl{n\log(n)} \item \textbf{Efficiency:} Works well for small datasets or nearly sorted arrays.
\item Worst case (reversed order): \tco{n^2} \item \textbf{Time Complexity:}
\item Average case: \tct{n^2} \begin{itemize}
\item Best case (already sorted): $\tcl{n\log(n)}$
\item Worst case (reversed order): $\tco{n^2}$
\item Average case: $\tct{n^2}$
\end{itemize}
\item \textbf{Limitations:} Inefficient on large datasets due to its $\tct{n^2}$ time complexity and requires additional effort for linked list implementations.
\end{itemize} \end{itemize}
\item \textbf{Limitations:} Inefficient on large datasets due to its \tct{n^2} time complexity and requires additional effort for linked list implementations.
\end{itemize}
\end{properties} \end{properties}
\begin{algorithm} \begin{algorithm}
@@ -83,15 +83,15 @@ The concept for this algorithm is selecting an element (that being the largest o
\caption{\textsc{insertionSort(A)}} \caption{\textsc{insertionSort(A)}}
\begin{algorithmic}[1] \begin{algorithmic}[1]
\Procedure{InsertionSort}{$A$} \Procedure{InsertionSort}{$A$}
\For{$i \gets 2$ to $n$} \Comment{Iterate over the array} \For{$i \gets 2$ to $n$} \Comment{Iterate over the array}
\State $key \gets A[i]$ \Comment{Element to be inserted} \State $key \gets A[i]$ \Comment{Element to be inserted}
\State $j \gets i - 1$ \State $j \gets i - 1$
\While{$j > 0$ and $A[j] > key$} \While{$j > 0$ and $A[j] > key$}
\State $A[j+1] \gets A[j]$ \Comment{Shift elements} \State $A[j+1] \gets A[j]$ \Comment{Shift elements}
\State $j \gets j - 1$ \State $j \gets j - 1$
\EndWhile \EndWhile
\State $A[j+1] \gets key$ \Comment{Insert element} \State $A[j+1] \gets key$ \Comment{Insert element}
\EndFor \EndFor
\EndProcedure \EndProcedure
\end{algorithmic} \end{algorithmic}
\end{spacing} \end{spacing}
@@ -104,21 +104,21 @@ The concept for this algorithm is selecting an element (that being the largest o
\newpage \newpage
\subsubsection{Merge Sort} \subsubsection{Merge Sort}
\begin{definition}[]{Definition of Merge Sort} \begin{definition}[]{Definition of Merge Sort}
Merge Sort is a divide-and-conquer algorithm that splits the input array into two halves, recursively sorts each half, and then merges the two sorted halves into a single sorted array. This process continues until the base case of a single element or an empty array is reached, as these are inherently sorted. Merge Sort is a divide-and-conquer algorithm that splits the input array into two halves, recursively sorts each half, and then merges the two sorted halves into a single sorted array. This process continues until the base case of a single element or an empty array is reached, as these are inherently sorted.
\end{definition} \end{definition}
\begin{properties}[]{Characteristics and Performance of Merge Sort} \begin{properties}[]{Characteristics and Performance of Merge Sort}
\begin{itemize}
\item \textbf{Efficiency:} Suitable for large datasets due to its predictable time complexity.
\item \textbf{Time Complexity:}
\begin{itemize} \begin{itemize}
\item Best case: \tcl{n \log n} \item \textbf{Efficiency:} Suitable for large datasets due to its predictable time complexity.
\item Worst case: \tco{n \log n} \item \textbf{Time Complexity:}
\item Average case: \tct{n \log n} \begin{itemize}
\item Best case: $\tcl{n \log n}$
\item Worst case: $\tco{n \log n}$
\item Average case: $\tct{n \log n}$
\end{itemize}
\item \textbf{Space Complexity:} Requires additional memory for temporary arrays, typically $\tct{n}$.
\item \textbf{Limitations:} Not in-place, and memory overhead can be significant for large datasets.
\end{itemize} \end{itemize}
\item \textbf{Space Complexity:} Requires additional memory for temporary arrays, typically \tct{n}.
\item \textbf{Limitations:} Not in-place, and memory overhead can be significant for large datasets.
\end{itemize}
\end{properties} \end{properties}
\begin{algorithm} \begin{algorithm}
@@ -135,7 +135,7 @@ Merge Sort is a divide-and-conquer algorithm that splits the input array into tw
\State \Call{Merge}{$A, l, m, r$} \State \Call{Merge}{$A, l, m, r$}
\EndProcedure \EndProcedure
\Procedure{Merge}{$A[1..n], l, m, r$} \Comment{Runtime: \tco{n}} \Procedure{Merge}{$A[1..n], l, m, r$} \Comment{Runtime: $\tco{n}$}
\State $result \gets$ new array of size $r - l + 1$ \State $result \gets$ new array of size $r - l + 1$
\State $i \gets l$ \State $i \gets l$
\State $j \gets m + 1$ \State $j \gets m + 1$
@@ -161,12 +161,12 @@ Merge Sort is a divide-and-conquer algorithm that splits the input array into tw
\centering \centering
\begin{tabular}{lccccc} \begin{tabular}{lccccc}
\toprule \toprule
\textbf{Algorithm} & \textbf{Comparisons} & \textbf{Operations} & \textbf{Space Complexity} & \textbf{Locality} & \textbf{Time complexity}\\ \textbf{Algorithm} & \textbf{Comparisons} & \textbf{Operations} & \textbf{Space Complexity} & \textbf{Locality} & \textbf{Time complexity} \\
\midrule \midrule
\textit{Bubble-Sort} & \tco{n^2} & \tco{n^2} & \tco{1} & good & \tco{n^2}\\ \textit{Bubble-Sort} & $\tco{n^2}$ & $\tco{n^2}$ & $\tco{1}$ & good & $\tco{n^2}$ \\
\textit{Selection-Sort} & \tco{n^2} & \tco{n} & \tco{1} & good & \tco{n^2}\\ \textit{Selection-Sort} & $\tco{n^2}$ & $\tco{n}$ & $\tco{1}$ & good & $\tco{n^2}$ \\
\textit{Insertion-Sort} & \tco{n \cdot \log(n)} & \tco{n^2} & \tco{1} & good & \tco{n^2}\\ \textit{Insertion-Sort} & $\tco{n \cdot \log(n)}$ & $\tco{n^2}$ & $\tco{1}$ & good & $\tco{n^2}$ \\
\textit{Merge-Sort} & \tco{n\cdot \log(n)} & \tco{n \cdot \log(n)} & \tco{n} & good & \tco{n \cdot \log(n)}\\ \textit{Merge-Sort} & $\tco{n\cdot \log(n)}$ & $\tco{n \cdot \log(n)}$ & $\tco{n}$ & good & $\tco{n \cdot \log(n)}$ \\
\bottomrule \bottomrule
\end{tabular} \end{tabular}
\caption{Comparison of four comparison-based sorting algorithms discussed in the lecture. Operations designates the number of write operations in RAM} \caption{Comparison of four comparison-based sorting algorithms discussed in the lecture. Operations designates the number of write operations in RAM}

View File

@@ -9,11 +9,11 @@
\item \textbf{Efficiency:} Excellent for in-place sorting with predictable performance. \item \textbf{Efficiency:} Excellent for in-place sorting with predictable performance.
\item \textbf{Time Complexity:} \item \textbf{Time Complexity:}
\begin{itemize} \begin{itemize}
\item Best case: \tcl{n \log n} \item Best case: $\tcl{n \log n}$
\item Worst case: \tco{n \log n} \item Worst case: $\tco{n \log n}$
\item Average case: \tct{n \log n} \item Average case: $\tct{n \log n}$
\end{itemize} \end{itemize}
\item \textbf{Space Complexity:} In-place sorting requires \tct{1} additional space. \item \textbf{Space Complexity:} In-place sorting requires $\tct{1}$ additional space.
\item \textbf{Limitations:} Inefficient compared to Quick Sort for most practical datasets. \item \textbf{Limitations:} Inefficient compared to Quick Sort for most practical datasets.
\end{itemize} \end{itemize}
\end{properties} \end{properties}
@@ -46,11 +46,11 @@ The lecture does not cover the implementation of a heap tree. See the specific s
\item \textbf{Efficiency:} Performs well for uniformly distributed datasets. \item \textbf{Efficiency:} Performs well for uniformly distributed datasets.
\item \textbf{Time Complexity:} \item \textbf{Time Complexity:}
\begin{itemize} \begin{itemize}
\item Best case: \tcl{n + k} (for uniform distribution and $k$ buckets) \item Best case: $\tcl{n + k}$ (for uniform distribution and $k$ buckets)
\item Worst case: \tco{n^2} (when all elements fall into a single bucket) \item Worst case: $\tco{n^2}$ (when all elements fall into a single bucket)
\item Average case: \tct{n + k} \item Average case: $\tct{n + k}$
\end{itemize} \end{itemize}
\item \textbf{Space Complexity:} Requires \tct{n + k} additional space. \item \textbf{Space Complexity:} Requires $\tct{n + k}$ additional space.
\item \textbf{Limitations:} Performance depends on the choice of bucket size and distribution of input elements. \item \textbf{Limitations:} Performance depends on the choice of bucket size and distribution of input elements.
\end{itemize} \end{itemize}
\end{properties} \end{properties}
@@ -99,9 +99,9 @@ The lecture does not cover the implementation of a heap tree. See the specific s
\end{itemize} \end{itemize}
\item \textbf{Time Complexity:} \item \textbf{Time Complexity:}
\begin{itemize} \begin{itemize}
\item Insert: \tct{\log n}. \item Insert: $\tct{\log n}$.
\item Extract Min/Max: \tct{\log n}. \item Extract Min/Max: $\tct{\log n}$.
\item Build Heap: \tct{n}. \item Build Heap: $\tct{n}$.
\end{itemize} \end{itemize}
\end{itemize} \end{itemize}
\end{properties} \end{properties}

View File

@@ -9,11 +9,11 @@
\item \textbf{Efficiency:} Performs well on average and for in-place sorting but can degrade on specific inputs. \item \textbf{Efficiency:} Performs well on average and for in-place sorting but can degrade on specific inputs.
\item \textbf{Time Complexity:} \item \textbf{Time Complexity:}
\begin{itemize} \begin{itemize}
\item Best case: \tcl{n \log n} \item Best case: $\tcl{n \log n}$
\item Worst case: \tco{n^2} (when the pivot is poorly chosen) \item Worst case: $\tco{n^2}$ (when the pivot is poorly chosen)
\item Average case: \tct{n \log n} \item Average case: $\tct{n \log n}$
\end{itemize} \end{itemize}
\item \textbf{Space Complexity:} In-place sorting typically requires \tct{\log n} additional space for recursion. \item \textbf{Space Complexity:} In-place sorting typically requires $\tct{\log n}$ additional space for recursion.
\item \textbf{Limitations:} Performance depends heavily on pivot selection. \item \textbf{Limitations:} Performance depends heavily on pivot selection.
\end{itemize} \end{itemize}
\end{properties} \end{properties}