mirror of
https://github.com/janishutz/eth-summaries.git
synced 2026-01-11 19:48:27 +00:00
[TI] Cleaner structure
This commit is contained in:
BIN
semester3/ti/cheatsheet/res/title.png
Normal file
BIN
semester3/ti/cheatsheet/res/title.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 110 KiB |
BIN
semester3/ti/cheatsheet/ti-cheatsheet.pdf
Normal file
BIN
semester3/ti/cheatsheet/ti-cheatsheet.pdf
Normal file
Binary file not shown.
749
semester3/ti/cheatsheet/ti-cheatsheet.tex
Normal file
749
semester3/ti/cheatsheet/ti-cheatsheet.tex
Normal file
@@ -0,0 +1,749 @@
|
||||
\documentclass{article}
|
||||
|
||||
% TODO: extract chapters into seperate files
|
||||
|
||||
% Language setting
|
||||
\usepackage[english]{babel}
|
||||
|
||||
% Set page size and margins
|
||||
\usepackage[a4paper,top=1cm,bottom=1.5cm,left=1cm,right=1cm,marginparwidth=1.75cm]{geometry}
|
||||
|
||||
% Useful packages
|
||||
\usepackage{amsmath}
|
||||
\usepackage{amsfonts} % contains 'mathbb' among others
|
||||
\usepackage{amssymb} % subsetneq, among others
|
||||
\usepackage{wasysym} % \cent for TMs
|
||||
\usepackage{graphicx}
|
||||
\usepackage[colorlinks=true, allcolors=black]{hyperref}
|
||||
\usepackage{multicol}
|
||||
\usepackage{parskip} % Disables new paragraph indent
|
||||
\usepackage[T1]{fontenc}
|
||||
\usepackage{lmodern}
|
||||
|
||||
% Automata Theory relation symbols
|
||||
\usepackage{turnstile}
|
||||
|
||||
% MIPS excerpts
|
||||
\usepackage{listings}
|
||||
\usepackage{xcolor}
|
||||
|
||||
\renewcommand{\familydefault}{\sfdefault}
|
||||
% Make bold text a different color
|
||||
% \NewCommandCopy\oldtextbf\textbf
|
||||
% \renewcommand{\textbf}[1]{\textcolor{orange}{\oldtextbf{#1}}}
|
||||
|
||||
% General
|
||||
\def\iffdef{\overset{\text{def}}{\iff}}
|
||||
\def\sep{\ |\ }
|
||||
% Languages
|
||||
\def\rev{\text{R}}
|
||||
\def\bool{\Sigma_{\text{bool}}}
|
||||
\def\bools{(\Sigma_\text{bool})^*}
|
||||
\def\comp{\text{C}}
|
||||
% Finite Automata
|
||||
\def\Kl{\text{Kl}}
|
||||
\def\Num{\text{Num}}
|
||||
% Reductions
|
||||
\def\leqee{\leq_\text{EE}}
|
||||
\def\leqr{\leq_\text{R}}
|
||||
\def\leqp{\leq_\text{p}}
|
||||
% Language classes
|
||||
\def\Lr{\mathcal{L}_\text{R}}
|
||||
\def\Lre{\mathcal{L}_\text{RE}}
|
||||
\def\Lreg{\mathcal{L}_{\text{EA}}}
|
||||
|
||||
% Colors for Def/Lemma/Theorem/Note
|
||||
\definecolor{bisque}{rgb}{1.0, 0.89, 0.77}
|
||||
\definecolor{lightpastelpurple}{rgb}{0.69, 0.61, 0.85}
|
||||
\definecolor{lightmauve}{rgb}{0.86, 0.82, 1.0}
|
||||
\definecolor{pastelgreen}{rgb}{0.47, 0.87, 0.47}
|
||||
\definecolor{pastelblue}{rgb}{0.68, 0.78, 0.81}
|
||||
\definecolor{pastelred}{rgb}{1.0, 0.41, 0.38}
|
||||
|
||||
% \newcommand{\intuition}{\fcolorbox{white}{bisque}{\textbf{\textcolor{black}{Intuition:}}} }
|
||||
\newcommand{\Def}{\fcolorbox{white}{pastelblue}{\textbf{\textcolor{black}{Def:}}} }
|
||||
\newcommand{\Note}{\fcolorbox{white}{lightmauve}{\textbf{\textcolor{black}{Note:}}} }
|
||||
\newcommand{\Lemma}{\fcolorbox{white}{pastelgreen}{\textbf{\textcolor{black}{Lemma:}}} }
|
||||
\newcommand{\Theorem}{\fcolorbox{white}{pastelred}{\textbf{\textcolor{black}{Theorem:}}} }
|
||||
|
||||
% Force empty
|
||||
\title{}
|
||||
\author{}
|
||||
\date{}
|
||||
% Custom TOC header text, if needed
|
||||
\addto\captionsenglish{ \renewcommand*\contentsname{Contents} }
|
||||
|
||||
\begin{document}
|
||||
|
||||
\begin{titlepage}
|
||||
\begin{center}
|
||||
\vspace*{1cm}
|
||||
|
||||
\Huge
|
||||
\textbf{Theoretische Informatik}
|
||||
|
||||
\vspace{0.5cm}
|
||||
\LARGE
|
||||
Cheatsheet
|
||||
|
||||
\vspace{1.5cm}
|
||||
|
||||
\includegraphics[width=0.6\linewidth]{res/title.png}
|
||||
|
||||
\vfill
|
||||
\small\begin{multicols}{2}
|
||||
\tableofcontents
|
||||
\end{multicols}\normalsize
|
||||
|
||||
\vspace{0.8cm}
|
||||
|
||||
\end{center}
|
||||
\textbf{Einleitung}\\
|
||||
Der Sinn dieses Dokuments ist, alle Resultate und Definitionen schnell auffindbar an einem Ort zu haben, z.B. für Hausaufgaben. Dieses Dokument ist keine Zusammenfassung, enthält aber einige Kommentare und Intuitive Erläuterungen (\color{gray}Text in grau\color{black}).\\
|
||||
Gute ausführliche Zusammenfassungen existieren bereits: z.B. die von Nicolas Wehrli, auf Community Solutions.
|
||||
|
||||
Wie immer: Keine Garantie auf Komplettheit (primär Inhalt für HS$25$ Mid/Enterm) oder Korrektheit.
|
||||
\vspace{0.5cm}
|
||||
|
||||
Robin Bacher\\
|
||||
\hrule
|
||||
\Large
|
||||
ETH Zürich, HS25\\\\
|
||||
\small
|
||||
Basierend auf:\\
|
||||
Theoretische Informatik, J. Hromkovic\\
|
||||
TheoInf Summary, N. Wehrli
|
||||
\end{titlepage}
|
||||
|
||||
\section{Formale Sprachen}
|
||||
|
||||
Grundlage, nötig für die Formalisierung von Algorithmen.
|
||||
|
||||
\begin{multicols}{2}
|
||||
|
||||
\Def \textbf{Alphabet} $\Sigma \iffdef \Sigma$ endlich und $\Sigma \neq \emptyset$ \\
|
||||
$x \in \Sigma$ heisst Buchstabe, Zeichen, Symbol.
|
||||
|
||||
\Def \textbf{Wort} $w$ über $\Sigma \iffdef w = (x_1, \ldots, x_n)$ endlich, $x_i \in \Sigma$.\\
|
||||
$\Sigma^* := \{ w\ |\ w \text{ ist Wort über } \Sigma\}$\\
|
||||
$\lambda := w$ s.d. $|w| = 0$ (Leeres Wort)\\
|
||||
$\Sigma^+ := \Sigma^* \setminus \{\lambda\}$
|
||||
|
||||
\begin{tabular}{llcl}
|
||||
\textbf{Teilwort} & $v$ von $w$ &$\iffdef$& $\exists x,y \in \Sigma^*: w = xvy$\\
|
||||
\textbf{Präfix} & $v$ von $w$ &$\iffdef$& $\exists y \in \Sigma^*: w = vy$\\
|
||||
\textbf{Suffix} & $v$ von $w$ &$\iffdef$& $\exists x \in \Sigma^*: w = xv$
|
||||
\end{tabular}
|
||||
|
||||
\Note Notation: $x_1x_2\ldots x_n = (x_1,x_2,\ldots,x_n)$
|
||||
|
||||
$\text{Nummer}(x) := \sum_{i=1}^{n} x_i \cdot 2^{n-i}$.\\
|
||||
Bin$(m) := $ kürzeste Binärkodierung von $m$ in $\Sigma_{\text{Bool}}$.\\
|
||||
Bin$(0) := 0$
|
||||
|
||||
\Def \textbf{Konkatenation} Kon$(x,y) = x \cdot y = xy$\\
|
||||
$\forall w: \lambda \cdot w = w$\\
|
||||
Kon ist assoziativ
|
||||
|
||||
\Def \textbf{Reversal} Für $a \in \Sigma^*$: $a^\rev := a_na_{n-1}\ldots a_1$
|
||||
|
||||
\Def \textbf{Iteration} Für $x \in \Sigma^*$: $x^0 := \lambda, x^1 := x, x^i := xx^{i-1}$
|
||||
|
||||
\Def \textbf{Vorkommen} von $a$ in $w \in \Sigma^*$: $|w|_a := |\{i \ |\ w_i = a\}|$
|
||||
|
||||
\end{multicols}
|
||||
|
||||
|
||||
\Def \textbf{Kanonische Ordnung} von $\Sigma^*$: Sei $<$ eine Ordnung über $\Sigma$. $u,v \in \Sigma^*$. $x, u', v' \in \Sigma^*$ und $i < j$.
|
||||
$$ u < v \iff |u| < |v| \quad \lor \quad |u| = |v| \land u = x \cdot s_i \cdot u' \land v = x \cdot s_j \cdot v'$$
|
||||
|
||||
\begin{multicols}{2}
|
||||
|
||||
\Def \textbf{Sprache} $L$ über $\Sigma \iffdef L \subset \Sigma^*$
|
||||
|
||||
\Def \textbf{Komplement} $L^\comp := \Sigma^* \setminus L$
|
||||
|
||||
\Def \textbf{Konkatenation} von $L_1,L_2$: $L^0 := L_\lambda, L^i := L^{i-1}\cdot L$
|
||||
|
||||
\Def \textbf{Kleene'scher Stern} $L^* := \underset{i \geq 0}{\bigcup} L^i, \quad L^+ := L^* \setminus \{\lambda\}$
|
||||
|
||||
\end{multicols}
|
||||
|
||||
\Lemma $\cup$ ist distributiv über Sprachen:
|
||||
$$L_1L_2 \cup L_1L_3 = L_1(L_2 \cup L_3)$$
|
||||
|
||||
\subsection{Algorithmische Probleme}
|
||||
TODO
|
||||
|
||||
\subsection{Kolmogorov Komplexität}
|
||||
Ziel: Komprimierung von Wörtern, Schliessen auf Informationsdichte basierend auf Komprimierbarkeits-schwierigkeit.
|
||||
|
||||
\Def \textbf{Kolmogorov Komplexität} $\forall x \in \bool^*: K(x) :=$ kürzestes \verb|Pascal|-Programm für $x$.\\
|
||||
\scriptsize\color{gray}
|
||||
Vermeidet die Festlegung auf einen spezifischen Komprimieralgorithmus. Buch beweist ebenfalls, dass \verb|Pascal| hier \textit{keine} Einschränkung ist.
|
||||
\normalsize\color{black}
|
||||
|
||||
\begin{multicols}{2}
|
||||
|
||||
\Def \text{$K(x)$ von Natürlichen Zahlen}: $K(n) := K(\text{Bin}(n))$
|
||||
|
||||
$\text{Bin}(|x|)$ hat Länge $\lceil \log_2(|x|+1) \rceil$
|
||||
\newcolumn
|
||||
|
||||
\Lemma $\exists d\ \forall x \in \bool^*: \quad K(x) \leq |x| + d$
|
||||
|
||||
\Lemma $\forall n \geq 1\ \exists w_n \in \bool^n:\quad K(w_n) \geq |w_n| = n$\\
|
||||
\footnotesize\color{gray}
|
||||
Intuitiv: Es existieren unkomprimierbare $w$ jeder Länge.
|
||||
\normalsize\color{black}
|
||||
|
||||
\end{multicols}
|
||||
|
||||
\subsection{Anwendungen der Kolmogorov Komplexität}
|
||||
|
||||
\Def \textbf{Zufällig} $\iffdef x \in \bool^*$ erfüllt $K(x) \geq |x|$\\
|
||||
$n \geq 0$ ist zufällig $\iffdef K(n) = K(\text{Bin}(n)) \geq \lceil \log_2(n+1) \rceil - 1$\\
|
||||
\scriptsize\color{gray}
|
||||
Diese Definition hat intuitiv nichts mit dem Zufallsbegriff aus der Wahrscheinlichkeit zu tun, hier geht es um den Informationsgehalt.
|
||||
\normalsize\color{black}
|
||||
|
||||
\Theorem \textbf{(2.2)} $\exists \text{ Programmm } A_L \text{ welches } (\bool, L) \text{ löst } \implies \forall n \geq 1: K(z_n) \leq \lceil \log_2(n+1) \rceil + c$\\
|
||||
\footnotesize\color{gray}
|
||||
$L \subset \bool^*$. $z_n := n$-tes Wort bzgl. kan. Ordnung. $(\bool, L)$ ist ein Entscheidungsproblem.\\
|
||||
Vereinfacht häufig Beweise zur Kolmogorov Komplexität stark.
|
||||
\normalsize\color{black}
|
||||
|
||||
\Theorem $\underset{n \to \infty}{\lim} \frac{\text{Prim}(n)}{n / \ln(n)} = 1$\\
|
||||
\scriptsize\color{gray}
|
||||
Prim$(x) :=$ Anzahl Primzahlen kleiner $x$. Intuitiv: Anzahl Primzahlen wächst gleich schnell wie Anzahl Zahlen.
|
||||
\normalsize\color{black}
|
||||
|
||||
\newpage
|
||||
\section{Endliche Automaten (EA)}
|
||||
|
||||
\begin{multicols}{2}
|
||||
|
||||
\Def \textbf{Endlicher Automat} $M := (Q,\Sigma, \delta, q_0, F)$
|
||||
|
||||
\begin{tabular}{llcl}
|
||||
\textbf{Zustände}& $Q$ \textit{(endlich)} \\
|
||||
\textbf{Eingabealphabet}& $\Sigma$ \textit{(Alphabet)} \\
|
||||
\textbf{Anfangszustand}& $q_0 \in Q$ \\
|
||||
\textbf{Akzeptierende Zustände}& $F \subseteq Q$\\
|
||||
\textbf{Übergangsfunktion}& $\delta: Q \times \Sigma \to Q$
|
||||
\end{tabular}
|
||||
|
||||
$\delta(q, a) = p \implies$ im Zustand $q$ bei Eingabe $a$, gehe zu $p$.
|
||||
|
||||
Darstellungsformen: \verb|goto|-Programm, gerichteter Graph
|
||||
|
||||
\Def \textbf{Konfiguration} von $M$: $(q, w) \in Q \times \Sigma^*$\\
|
||||
\footnotesize\color{gray}
|
||||
Intuitiv: M hat Zustand $q$ und liest noch den Suffix $w$
|
||||
\normalsize\color{black}
|
||||
|
||||
\newcolumn
|
||||
|
||||
$(q,w)$ Endkonfiguration $\iffdef (q,w) \in Q \times \{\lambda\}$
|
||||
|
||||
\Def \textbf{Schritt} $:=\ \sststile{M}{}\ \subseteq (Q \times \Sigma^*) \times (Q \times \Sigma^*)$\\
|
||||
wobei $(q,w)\ \sststile{M}{}\ (p,x) \iffdef w = ax \land a \in \Sigma \land \delta(q,a) = p$\\
|
||||
\footnotesize\color{gray}
|
||||
Intuitiv: Übergangsfunktion auf $M$ im Zustand $q$ anwenden, bei $a$.
|
||||
\normalsize\color{black}
|
||||
|
||||
\Def \textbf{Berechnung} $C := C_0,\ldots,C_n$ s.d. $C_i$ Konfiguration\\
|
||||
wobei $\forall i \leq n-1: C_i\ \sststile{M}{}\ C_{i+1}$ gilt
|
||||
|
||||
\end{multicols}
|
||||
|
||||
\begin{multicols}{2}
|
||||
|
||||
\Def \textbf{Akzeptierte Sprache} $L(M) := \{w \in \Sigma^*\ |\ \hat{\delta}(q_0,w) \in F\}$\\
|
||||
\footnotesize\color{gray}
|
||||
Intuitiv: Menge aller Wörter, die $M$ akzeptiert
|
||||
\normalsize\color{black}
|
||||
|
||||
Klasse regulärer Sprachen: $\mathcal{L}_{EA} := \{L(M)\ |\ \text{M ist EA}\}$
|
||||
|
||||
\Def \textbf{Klasse} $\text{Kl}[p] := \{w \in \Sigma^* \ |\ \hat{\delta}(q_0,w) = p\}$\\
|
||||
\footnotesize\color{gray}
|
||||
Klassen bilden eine Partition von $\Sigma^*$. Ähnlich zu Äquivalenzklassen aus DM.
|
||||
\normalsize\color{black}
|
||||
|
||||
\Lemma $L(M) = \underset{p \in F}{\bigcup} \text{Kl}[p]$
|
||||
|
||||
\newcolumn
|
||||
|
||||
\Def \textbf{Relationen} bzgl. endlichen Automaten
|
||||
|
||||
$(q,w)\ \sststile{M}{*}\ (p,u) \iffdef \exists $ Berechnung in $M$ von $(q,w)$ zu $(p,u)$.\\
|
||||
\footnotesize\color{gray}
|
||||
Die formelle Definition ist sehr lang. (S.54)
|
||||
\normalsize\color{black}
|
||||
|
||||
$\hat{\delta}(q,w) = p \iffdef (q,w)\ \sststile{M}{*}\ (p, \lambda)$\\
|
||||
\footnotesize\color{gray}
|
||||
Intuitiv: Wenn $M$ in Zustand $q$ Wort $w$ liest, endet $M$ in $p$
|
||||
\normalsize\color{black}
|
||||
\\
|
||||
|
||||
\Lemma $\forall \odot \in \{\cup,\cap,- \}\ \exists M:\quad L(M) = L(M_1) \odot L(M_2)$\\
|
||||
\footnotesize
|
||||
Für alle EA $M_1,M_2$ über $\Sigma$\\
|
||||
\scriptsize\color{gray}
|
||||
Wegen diesem Lemma ist es möglich, EAs aus Teilautomaten zu bauen
|
||||
\normalsize\color{black}
|
||||
\newcolumn
|
||||
\end{multicols}
|
||||
|
||||
\subsection{Irregularität beweisen}
|
||||
Einige Ansätze um Aussagen der Art $L \notin \mathcal{L}_{\text{EA}}$ zu beweisen:
|
||||
|
||||
\Lemma \textbf{(Direkt via Zustände)} Sei $A = (Q, \Sigma, \delta_A, q_0, F)$, $x \neq y \in \Sigma^*$:
|
||||
\begin{align*}
|
||||
\exists p \in Q:\ \underbrace{(q_0, x) \ \sststile{A}{*}\ (p, \lambda)\ \land\ (q_0, y)\ \sststile{A}{*}\ (p, \lambda)}_{\hat{\delta}_A(q_0, x)\ =\ \hat{\delta}_A(q_0,y)\ =\ p \text{ und } x,y\ \in\ \Kl[p] } \implies \forall z \in \Sigma^*,\ \exists r \in Q:\quad xz \in L(A) \iff yz \in L(A)
|
||||
\end{align*}
|
||||
\footnotesize\color{gray}
|
||||
Intuitiv: Wenn man für zwei (auch unterschiedliche!) Eingaben die selbe Konfiguration erreicht, ist der weitere Verlauf identisch. Dieses Lemma formalisiert die intuitiv klare "Gedächtnislosigkeit" von EAs, d.h. dass ein EA keinen Speicher (ausser dem aktuellen Zustand) besitzt.
|
||||
\normalsize\color{black}
|
||||
|
||||
\Lemma \textbf{(Pumping)} für $L \in \Lreg$:$\quad \exists n_0 \in \mathbb{N}$ s.d. $\forall w \in \Sigma^*$ mit $|w| \geq n_0: \ \exists x,y,z: w = yxz$ und:
|
||||
\begin{align*}
|
||||
(i)\ & |yx| \leq n_0 \quad\quad (ii)\ |x| \geq 1 \\
|
||||
(iii)\ & \{yx^kz \ |\ k \in \mathbb{N}\} \in L \text{ oder } \{yx^kz \ |\ k \in \mathbb{N}\} \cap L = \emptyset
|
||||
\end{align*}
|
||||
\footnotesize\color{gray}
|
||||
Intuitiv: Alle Wörter länger als $n_0$ lassen sich als $w=yxz$ zerlegen: wenn $w$ (nicht) akzeptiert wird müssen alle anderen $w_k=yx^kz$ auch (nicht) akzeptiert werden. Ein $n_0$ welches diese Zerlegung erlaubt existiert immer, wenn $L$ regulär ist.
|
||||
\normalsize\color{black}
|
||||
|
||||
\Theorem \textbf{(Kolmogorov)} Sei $L \subseteq (\bool)^*$ regulär. Sei $L_x = \{y \in (\bool)^* \ |\ xy \in L \}$, $y_n$ das $n$-te Wort in $L_x$
|
||||
\begin{align*}
|
||||
\exists c\quad \forall x,y \in (\bool)^*: \quad K(y_n) \leq \lceil \log_2(n+1) \rceil + c
|
||||
\end{align*}
|
||||
\footnotesize\color{gray}
|
||||
Intuitiv: Suffixe von Wörtern einer regulären Sprache besitzen eine kleine Kolmogorov-Komplexität. Man versucht meistens eine unendliche Menge unterschiedlicher $y_1$ zu finden, was dann einen Widerspruch bildet zu diesem Satz.
|
||||
\normalsize\color{black}
|
||||
|
||||
\subsection{Nicht-deterministische endliche Automaten (NEA)}
|
||||
|
||||
\begin{multicols}{2}
|
||||
|
||||
\Def \textbf{NEA} $M = (Q, \Sigma, \delta, q_0, F)$
|
||||
|
||||
\begin{tabular}{llcl}
|
||||
\textbf{Übergangsfunktion}& $\delta: Q \times \Sigma \to \mathcal{P}(Q)$
|
||||
\end{tabular}\\
|
||||
\color{gray}\footnotesize
|
||||
Intuitiv: $\delta$ gibt alle möglichen Zustände, statt nur Einen.
|
||||
\color{black}\normalsize
|
||||
|
||||
\Def \textbf{Relationen} bzgl. NEAs
|
||||
|
||||
$\hat{\delta}(q, \lambda) := \{q\}\quad\forall q \in Q$\\
|
||||
$\hat{\delta}(q, wa) := \{ p \in Q \sep \exists r \in \hat{\delta}(q, w): p \in \delta(r, a)\}$\\
|
||||
\color{gray}\footnotesize
|
||||
Intuitiv: $\hat{\delta}$ gibt alle möglichen Endzustände, statt nur Einen.
|
||||
\color{black}\normalsize
|
||||
|
||||
\newcolumn
|
||||
|
||||
\Def \textbf{Akzeptierte Sprache} in NEAs
|
||||
$$ L(M_{\text{NEA}}) := \{w \in \Sigma^* \sep \hat{\delta}(q, w) \cap F \neq \emptyset\} $$
|
||||
\color{gray}\footnotesize
|
||||
Intuitiv: Alle Wörter mit möglichen Berechnungsweg zu $q \in F$.\\
|
||||
D.h. Akzeptierte Wörter müssen nicht immer akzeptiert werden.
|
||||
\color{black}\normalsize
|
||||
|
||||
\Theorem \textbf{(3.2) Potenzmengen Konstruktion}
|
||||
$$\mathcal{L}_{\text{EA}} = \mathcal{L}_{\text{NEA}}$$
|
||||
\color{gray}\footnotesize
|
||||
Intuitiv: Für jeden NEA gibt es einen äquivalenten EA.
|
||||
\color{black}\normalsize
|
||||
|
||||
|
||||
\end{multicols}
|
||||
|
||||
|
||||
\newpage
|
||||
\section{Turing-Maschinen}
|
||||
Eine Formalisierung des Begriffs "Algorithmus".
|
||||
|
||||
|
||||
|
||||
\begin{multicols}{2}
|
||||
|
||||
\Def \textbf{Turing Maschine} $ M := (Q, \Sigma, \Gamma, \delta, q_0, q_\text{accept}, q_\text{reject})$
|
||||
|
||||
\begin{tabular}{llcl}
|
||||
\textbf{Zustände}& $Q$ \textit{(endlich)} \\
|
||||
\textbf{Eingabealphabet}& $\Sigma$ \textit{(Alphabet)} \\
|
||||
\textbf{Arbeitsalphabet}& $\Gamma$ s.d. $\Sigma \subset \Gamma, \Gamma \cap Q = \emptyset$\\
|
||||
\textbf{Anfangszustand}& $q_0 \in Q$ \\
|
||||
\textbf{Akzeptierende Zustände}& $F \subseteq Q$\\
|
||||
\textbf{Übergangsfunktion}& $\delta: Q \times \Sigma \to Q$
|
||||
\end{tabular}
|
||||
|
||||
\newcolumn
|
||||
|
||||
\Def $\textbf{Konf}(M) := \{\cent\} \cdot \Gamma^* \cdot Q \cdot \Gamma^+ \cup Q \cdot \{\cent\} \cdot \Gamma^* $
|
||||
|
||||
\small
|
||||
\textbf{Beispiel:} $\cent w_1 qa w_2 \in \text{Konf}(M)$ heisst:\\
|
||||
$M$ in Zustand $q$, hat Kopf bei $|w_1|+1$ auf $a$. Bandinhalt: $\cent w_1 a w_2$
|
||||
\normalsize
|
||||
|
||||
\Def \textbf{Äquivalenz} TMs $A,B$ s.d. $\Sigma_A = \Sigma_B$:
|
||||
\begin{enumerate}
|
||||
\item $x \in L(A) \iff x \in L(B)$
|
||||
\item $A$ hält nicht auf $x \iff B$ hält nicht auf $x$
|
||||
\end{enumerate}
|
||||
\color{gray}\footnotesize
|
||||
D.h. $\lnot(L(A) = L(B) \implies A, B$ äquivalent$)$.
|
||||
\color{black}\normalsize
|
||||
|
||||
\end{multicols}
|
||||
|
||||
\Theorem \textbf{(Church'sche These)} Turing-Maschinen formalisieren tatsächlich das intuitive Konzept "Algorithmus".\\
|
||||
\color{gray}\footnotesize
|
||||
Paraphrasiert, bedeutet dass das Modell der TMs (vermutlich) alle möglichen Algorithmen abbildet.
|
||||
\color{black}\normalsize
|
||||
|
||||
|
||||
\subsection{Mehrband Turing-Maschinen}
|
||||
|
||||
\begin{multicols}{2}
|
||||
|
||||
\Def \textbf{Mehrband TMs}
|
||||
\begin{enumerate}
|
||||
\item Endliche Kontroll-logik
|
||||
\item Endliches Eingabeband
|
||||
\item $k$ nach rechts unendliche Arbeitsbänder
|
||||
\end{enumerate}
|
||||
\color{gray}\footnotesize
|
||||
Die Formelle Definition im Skript ist sehr lang.\\
|
||||
Intuitiv bleiben alle Definitionen gleich, akzeptieren aber nun $k$ Bänder.
|
||||
\color{black}\normalsize
|
||||
|
||||
\newcolumn
|
||||
|
||||
\Lemma $\forall$ TM $A: \exists $ MTM $B$ s.d. $A,B$ äquivalent.\\
|
||||
\Lemma $\forall$ MTM $B: \exists$ TM $A$ s.d. $A, B$ äquivalent.
|
||||
|
||||
\Theorem TMs und MTMs sind äquivalente Modelle.\\
|
||||
\color{gray}\footnotesize
|
||||
D.h. es existiert immer eine äquivalente Maschine im jeweils anderen Modell.
|
||||
\color{black}\normalsize
|
||||
|
||||
\end{multicols}
|
||||
|
||||
|
||||
\subsection{Nicht-deterministische TMs}
|
||||
|
||||
Definitionen analog zu TMs, $w \in L(\text{NTM})$ falls \textit{irgendeine} akzeptierende Berechnung existiert.
|
||||
|
||||
\Theorem $\forall $ NTM $M: \exists$ TM $A$ s.d.
|
||||
\begin{enumerate}
|
||||
\item $L(M) = L(A)$
|
||||
\item $A$ hält immer falls $M$ keine unendlichen Berechnungen hat
|
||||
\end{enumerate}
|
||||
\color{gray}\footnotesize
|
||||
D.h. auch NTMs sind konzeptuell äquivalent zu regulären TMs.
|
||||
\color{black}\normalsize
|
||||
|
||||
|
||||
\subsection{Sprach-Klassen}
|
||||
|
||||
Relevant für Kapitel $5$.
|
||||
|
||||
\Def \textbf{Rekursiv aufzählbar} $\Lre := \{ L(M) \sep M \text{ ist TM } \}$
|
||||
|
||||
\Def \textbf{Rekursiv entscheidbar} $\Lr := \{ L(M) \sep M \text{ ist TM, hält immer } \}$
|
||||
|
||||
\Lemma $L \in \Lre \land L^\comp \in \Lre \iff L \in \Lr$\\
|
||||
\color{gray}\footnotesize
|
||||
Sehr nützlich für Beweise der Form $L \notin \Lre$.
|
||||
\color{black}\normalsize
|
||||
|
||||
\newpage
|
||||
|
||||
\section{Berechenbarkeit}
|
||||
Methoden zur Klassifizierung Algorithmischer Lösbarkeit.
|
||||
|
||||
\subsection{Diagonalisierung}
|
||||
DM Repetition.
|
||||
|
||||
\begin{multicols}{2}
|
||||
|
||||
|
||||
\Def \textbf{Mächtigkeit}
|
||||
|
||||
\begin{tabular}{lcl}
|
||||
$|A| \leq |B|$ & $\iffdef$ & $\exists f : A \to B$ injektiv \\
|
||||
$|A| = |B|$ & $\iffdef$ & $|A| \leq |B| \land |B| \leq |A|$ \\
|
||||
$|A| < |B|$ & $\iffdef$ & $|A| \leq |B| \land \lnot |B| \leq |A|$
|
||||
\end{tabular}
|
||||
|
||||
\Lemma $A \subset B \implies |A| \leq |B|$
|
||||
|
||||
\Lemma $\leq$ ist Transitiv.
|
||||
|
||||
\Def \textbf{Abzählbarkeit} $\iffdef |A| = |\mathbb{N}| \lor A$ endlich
|
||||
|
||||
\Lemma $\forall \Sigma: \Sigma^*$ ist abzählbar\\
|
||||
\color{gray}\footnotesize
|
||||
Intuitiv: Da $\Sigma$ endlich ist.
|
||||
\color{black}\normalsize
|
||||
|
||||
Weitere abzählbare Mengen: $\mathbb{Z}, \mathbb{N}^k, \mathbb{Q}, \text{KodTM}$
|
||||
|
||||
Überabzählbare Mengen: $\mathbb{R}, [0,1], \mathcal{P}((\bool)^*)$
|
||||
|
||||
\Theorem $|\text{KodTM}| \leq \mathcal{P}((\bool)^*)$\\
|
||||
\color{gray}\footnotesize
|
||||
D.h. existieren unendliche viele nicht rekursiv aufzählbare Sprachen.
|
||||
\color{black}\normalsize
|
||||
|
||||
\end{multicols}
|
||||
|
||||
\subsection{Reduktion}
|
||||
Ansatz für Beweise von Aussagen der Form $L \in \Lr$ oder $L \notin \Lr$.
|
||||
|
||||
\Def \textbf{Rekursive Reduzierbarkeit}\\
|
||||
$L_1 \leqr L_2 \iffdef L_2 \in \mathcal{L}_\text{R} \implies L_1 \in \mathcal{L}_\text{R}$\\
|
||||
\color{gray}\footnotesize
|
||||
D.h. $L_2$ zu lösen, bedeuted auch $L_1$ zu lösen.
|
||||
\color{black}\normalsize
|
||||
|
||||
\Def \textbf{Eingabe-zu-Eingabe Reduzierbarkeit}\\
|
||||
$L_1 \leqee L_2 \iffdef \exists M \text{ (TM)}: \exists f_M: \Sigma^*_1 \to \Sigma^*_2$ s.d. $x \in L_1 \iff f_M(x) \in L_2$\\
|
||||
\color{gray}\footnotesize
|
||||
D.h. Es existiert eine TM $M$, die eine Abbildung $f_M$ darstellt, mit welcher man $L_1$ via $L_2$ direkt bestimmen kann.
|
||||
\color{black}\normalsize
|
||||
|
||||
\begin{multicols}{2}
|
||||
|
||||
\Lemma $L_1 \leqee L_2 \implies L_1 \leqr L_2$\\
|
||||
\color{gray}\footnotesize
|
||||
Gilt nicht umgekehrt!
|
||||
\color{black}\normalsize
|
||||
|
||||
\Lemma $\leqee$ ist Transitiv.
|
||||
|
||||
\Lemma $\forall L \subseteq \Sigma^*: L \leqr L^\comp\ \land\ L^\comp \leqr L$\\
|
||||
\color{gray}\footnotesize
|
||||
Also $L \in \Lr \iff L^\comp \in \Lr$
|
||||
\color{black}\normalsize
|
||||
|
||||
\Lemma $\mathcal{L}_\text{R} \subsetneq \mathcal{L}_\text{RE}$
|
||||
|
||||
\Def \textbf{Universelle Sprache}
|
||||
|
||||
$L_\text{U} := \{ \text{Kod}(M)\#w \sep w \in \bool^* \land w \in L(M) \}$
|
||||
|
||||
\Theorem $L_\text{U} \in \mathcal{L}_\text{RE}$ aber $L_\text{U} \notin \mathcal{L}_\text{R}$\\
|
||||
\color{gray}\footnotesize
|
||||
D.h. man kann nicht (in endlicher Zeit) prüfen, ob $M$ ein Wort $w$ akzeptiert.
|
||||
\color{black}\normalsize
|
||||
|
||||
\newcolumn
|
||||
|
||||
\Def \textbf{Halteproblem}
|
||||
|
||||
$L_\text{H} := \{\text{Kod}(M)\#x \sep x \in \bool^* \land M \text{ hält auf } x\}$
|
||||
|
||||
\Theorem $L_\text{H} \notin \mathcal{L}_\text{R}$\\
|
||||
\color{gray}\footnotesize
|
||||
D.h. man kann nie wissen, ob eine Turingmaschine anhalten wird.
|
||||
\color{black}\normalsize
|
||||
|
||||
\Def $L_\text{Empty} := \{ \text{Kod}(M) \sep L(M) = \emptyset \}$
|
||||
|
||||
\Theorem $(L_\text{Empty})^\comp \in L_\text{RE}$ aber $(L_\text{Empty})^\comp \notin \mathcal{L}_\text{R}$
|
||||
|
||||
\Def \textbf{Äquivalenzproblem}
|
||||
|
||||
$L_\text{EQ} = \{ \text{Kod}(M)\#\text{Kod}(\overline{M}) \sep L(M) = L(\overline{M}) \}$
|
||||
|
||||
\Theorem $L_\text{EQ} \notin \mathcal{L}_\text{R}$\\
|
||||
\color{gray}\footnotesize
|
||||
D.h. man kann nicht 2 TMs auf Äquivalenz prüfen, in endlicher Zeit.
|
||||
\color{black}\normalsize
|
||||
|
||||
TODO: reformat Def, Theorems as a table for languages
|
||||
|
||||
\end{multicols}
|
||||
|
||||
\subsection{Rice}
|
||||
Ansatz für Beweise von Aussagen der Form $L \notin \Lr$, für $L \subseteq \text{KodTM}$.
|
||||
|
||||
\Def \textbf{Semantisch nicht-triviales Entscheidungsproblem über TMs}
|
||||
$$L \subseteq \text{KodTM}: \underbrace{(\exists \text{ TM } M_1: \text{Kod}(M_1) \in L)}_{L \neq \emptyset} \land \underbrace{(\exists \text{ TM } M_2 : \text{Kod}(M_2) \notin L)}_{L \neq \text{KodTM}} \land \underbrace{(\forall \text{ TM } A,B: L(A) = L(B) \implies (A \in L \iff B \in L))}_{L \text{ behandelt semantisch gleiche TMs gleich}}$$
|
||||
|
||||
\begin{multicols}{2}
|
||||
|
||||
\Def $L_{\text{H, } \lambda} := \{\text{Kod}(M) \ |\ M \text{ hält auf } \lambda \}$
|
||||
|
||||
\Theorem $L_{\text{H}, \lambda} \notin \mathcal{L}_\text{R}$
|
||||
|
||||
\end{multicols}
|
||||
|
||||
\Theorem \textbf{Satz von Rice}: \text{ Alle sem. nicht-triv. Probleme } $L$ \text{ sind unentscheidbar}. ($L \notin \Lr$)\\
|
||||
\color{gray}\footnotesize
|
||||
D.h. es reicht aus zu zeigen, dass $L \subseteq \text{KodTM}$ die Bedingungen oben erfüllt, um $L \notin \Lr$ zu zeigen.
|
||||
\color{black}\normalsize
|
||||
|
||||
|
||||
\subsection{Kolmogorov}
|
||||
|
||||
\Theorem \textbf{Unlösbarkeit von Kolmogorov}: Das Problem, $\forall x \in \bools$ die Komplexität $K(x)$ zu berechnen, ist unlösbar.\\
|
||||
\color{gray}\footnotesize
|
||||
Ein Alternativer Ansatz um Unlösbarkeit zu zeigen, unabhängig von Diagonalisierung.
|
||||
\color{black}\normalsize
|
||||
\newpage
|
||||
\section{Komplexität}
|
||||
Eine Formalisierung der "Schwierigkeit" von Algorithmisch lösbaren Problemen.
|
||||
|
||||
\subsection{Zeit \& Speicher}
|
||||
\Def $\textbf{Time}_\textbf{M}(x) := k - 1$
|
||||
|
||||
\Def $\textbf{Time}_\textbf{M}(n) := \text{max}\{ \text{Time}_\text{M}(x) \sep x \in \Sigma^n \}$\\
|
||||
\color{gray}\footnotesize
|
||||
Intuitiv: Die Komplexität im Schlechtesten Fall einer Eingabe der Länge $n$
|
||||
\color{black}\normalsize
|
||||
|
||||
$\text{Wobei: } M \text{ immer hält, } x \in \Sigma^* \text{ und } D = C_1C_2\ldots C_k \text{ Die Berechnung von } M \text{ auf } x $
|
||||
|
||||
\Def $\textbf{Space}_\textbf{M}(C) := \text{max}\{ |\alpha_i| \sep i=1,\ldots,k \}$\\
|
||||
\color{gray}\footnotesize
|
||||
Intuitiv: Die Länge des längsten Arbeitsbandes in $M$, bei der Konfiguration $C$.
|
||||
\color{black}\normalsize
|
||||
|
||||
\Def $\textbf{Space}_\textbf{M}(x) := \text{max}\{ \text{Space}_M(C_i) \sep i = 1,\ldots,l \}$
|
||||
|
||||
\Def $\textbf{Space}_\textbf{M}(n) := \text{max}\{ \text{Space}_M(x) \sep x \in \Sigma^n \}$
|
||||
|
||||
Wobei $M$ eine $k$-Band MTM, $C=(q, x, i, \alpha_1, i_1\ldots, \alpha_k, i_k)$ eine Konfiguration.
|
||||
|
||||
\Lemma $\forall\ k$-MTM $A: \exists\ \text{äquivalente } 1$-MTM $B: \text{Space}_B(n) \leq \text{Space}_A(n)$
|
||||
|
||||
\Lemma $\forall\ k$-MTM $A: \exists \text{ äquivalente } k$-MTM $B: \text{Space}_B(n) \leq \frac{\text{Space}_A(n)}{2} + 2$\\
|
||||
\color{gray}\footnotesize
|
||||
Intuitiv: Die Speicherkomplexität von $M$ lässt sich für jedes $d \in \mathbb{N}$ um den Faktor $d$ verkleinern. Das selbe gilt für $\text{Time}_M(n)$.
|
||||
\color{black}\normalsize
|
||||
|
||||
\Theorem $\exists (L, \bool)\ \forall \text{ MTM } A$ s.d. $L(A) = L$: $\exists \text{ MTM } B$ s.d. $L(B) = L$ und $\text{Time}_B(n) \leq \log_2(\text{Time}_A(n))$\\
|
||||
\color{gray}\footnotesize
|
||||
Intuitiv: Es gibt Probleme, wobei wir einen Lösungsalgorithmus unendlich oft signifikant verbessern können. D.h. macht es keinen Sinn allgemein von einem "Besten Algorithmus" für ein Problem zu reden.
|
||||
\color{black}\normalsize
|
||||
|
||||
\subsection{O-Notation}
|
||||
|
||||
\Def $\mathcal{O}(f(n)) := \{ r: \mathbb{N} \to \mathbb{R}^+ \sep \exists n_0 \in \mathbb{N}, \exists c \in \mathbb{N} \text{ s.d. } \forall n \geq n_0:\quad r(n) \leq c \cdot f(n) \}$\\
|
||||
\Def $\Omega(f(n)) := \{ r: \mathbb{N} \to \mathbb{R}^+ \sep \exists n_0 \in \mathbb{N}, \exists c \in \mathbb{N} \text{ s.d. } \forall n \geq n_0:\quad r(n) \geq \frac{1}{c}\cdot f(n) \}$\\
|
||||
\Def $\Theta(f(n)) := \mathcal{O}(f(n))\ \cap\ \Omega(f(n))$\\
|
||||
\Def $\text{o}(f(n)) := \{ r: \mathbb{N} \to \mathbb{R}^+ \sep \underset{n \to \infty}{\lim} \frac{r(n)}{f(n)} = 0\}$\\
|
||||
\color{gray}\footnotesize
|
||||
Intuitiv: $f$ wächst asymptotisch schneller als $r$.
|
||||
\color{black}\normalsize
|
||||
|
||||
\subsection{Komplexitätsklassen}
|
||||
$f,g: \mathbb{N} \to \mathbb{R}^+$
|
||||
|
||||
\begin{multicols}{2}
|
||||
\Def $\textbf{TIME}(f) := \{ L(M) \sep M \text{ s.d. } \text{Time}_M(n) \in \mathcal{O}(f(n)) \}$\\
|
||||
\Def $\textbf{SPACE}(f) := \{ L(M) \sep M \text{ s.d } \text{Space}_M(n) \in \mathcal{O}(f(n))) \}$\\
|
||||
\Def $\textbf{P} := \underset{c \in \mathbb{N}}{\bigcup} \text{TIME}(n^c)$\\
|
||||
\Def $\textbf{PSPACE} := \underset{c \in \mathbb{N}}{\bigcup} \text{SPACE}(n^c)$\\
|
||||
\Def $\textbf{EXPTIME} := \underset{d \in \mathbb{N}}{\bigcup} \text{TIME}(2^{n^d})$
|
||||
\end{multicols}
|
||||
|
||||
\begin{multicols}{2}
|
||||
|
||||
\Lemma $\forall t: \mathbb{N} \to \mathbb{R}^+:\quad \text{TIME}(t(n)) \subseteq \text{SPACE}(t(n))$
|
||||
|
||||
\Lemma DLOG $\subseteq$ P $\subseteq$ PSPACE $\subseteq$ EXPTIME
|
||||
|
||||
\end{multicols}
|
||||
|
||||
TODO: Konstruierbarkeit
|
||||
|
||||
\subsection{Nicht-deterministische Komplexität}
|
||||
$M :=$ Nicht deterministische (M)TM. $C = C_1\ldots C_m$ ist eine akzeptierende Berechnung auf $x$.
|
||||
|
||||
\Def $\textbf{Time}_M(x) := $ Länge kürzester akzept. Berechnung für $x$.\\
|
||||
\Def $\textbf{Time}_M(n) := \max(\{ \text{Time}_M(x) \sep x \in L(M) \land |x|=n \} \cup \{0\})$
|
||||
|
||||
\Def $\textbf{Space}_M(C) := \max\{ \text{Space}_M(C_i) \sep i \leq m \}$\\
|
||||
\Def $\textbf{Space}_M(x) := \min\{ C \sep C \text{ akzeptiert } x \}$\\
|
||||
\Def $\textbf{Space}_M(n) := \max(\{ x \in L(M) \land |x|=n\} \cup \{0\})$
|
||||
|
||||
\Def \textbf{Kompelxitätsklassen}: \textbf{NTIME}, \textbf{NSPACE}, \textbf{NLOG}, \textbf{NP}, \textbf{NSPACE} analog zur detereministischen Definition.
|
||||
|
||||
\Theorem \textbf{NP} $=$ \textbf{VP} (Polynomielle Verifizierer)\\
|
||||
\color{gray}\footnotesize
|
||||
D.h. ein polynomieller Verifizierer für $L$ beweist direkt, dass $L$ in \textbf{NP} ist.
|
||||
\color{black}\normalsize
|
||||
|
||||
\newpage
|
||||
\subsection{NP-Vollständigkeit}
|
||||
Unter der Annahme: $P \subsetneq NP$, kann man Beweise der Form $L \notin P$ machen.
|
||||
|
||||
\Def \textbf{Polynomielle Reduzierbarkeit}\\
|
||||
$L_1 \leqp L_2 \iffdef \exists \text{ polynomielle } M \text{ s.d. } \forall x \in (\Sigma_1)^*:\quad x \in L_1 \iff M(x) \in L_2$\\
|
||||
\color{gray}\footnotesize
|
||||
Intuitiv: EE-Reduktion, muss aber polynomielle Zeitkomplexität haben.
|
||||
\color{black}\normalsize
|
||||
|
||||
\begin{multicols}{2}
|
||||
|
||||
\Def \textbf{NP-Schwer} $L$ s.d. $\forall L' \in \text{NP}: L' \leqp L$\\
|
||||
\color{gray}\footnotesize
|
||||
$L$ NP-Schwer bedeutet \textit{nicht}, dass $L$ in NP ist.
|
||||
\color{black}\normalsize
|
||||
|
||||
\Def \textbf{NP-Vollständigkeit} $L$ s.d. $L \in \text{NP}$ und NP-Schwer
|
||||
|
||||
\end{multicols}
|
||||
|
||||
\Lemma $\exists L:\ L \in \text{P} \land \text{NP-Schwer} \implies \text{P} = \text{NP}$\\
|
||||
\color{gray}\footnotesize
|
||||
Ein NP-Schweres Problem polynomiell zu lösen beutet alle NP-Probleme polynomiell zu lösen.
|
||||
\color{black}\normalsize
|
||||
|
||||
\Lemma $L_1 \leqp L_2 \implies\quad ( L_1 \text{ NP-schwer } \implies L_2 \text{ NP-schwer} ) $\\
|
||||
\color{gray}\footnotesize
|
||||
D.h. Mit P-Reduktionen kann man beweisen, dass $L_2$ NP-Schwer ist.
|
||||
\color{black}\normalsize
|
||||
|
||||
\Def $\textbf{SAT} := \{ x \in (\Sigma_\text{logic})^* \sep x \text{ kodiert erfüllbare Formel in KNF} \}$
|
||||
|
||||
\Theorem \textbf{(Cook)} SAT ist NP-vollständig.\\
|
||||
\color{gray}\footnotesize
|
||||
Der Beweis ist sehr lang. Im Endeffekt bedeutet dies, Boole'sche Formeln sind enorm ausdrucksstark.
|
||||
\color{black}\normalsize
|
||||
|
||||
Weitere NP-Schwere Probleme: SAT-Variationen (3SAT, E3SAT), Clique, Vertex-Cover, Dominating Sets
|
||||
|
||||
\subsection{Klausel-Formeln}
|
||||
Nützliche Gleichungen für Beweise mit KNF-Formeln
|
||||
|
||||
\newpage
|
||||
\section{Grammatiken}
|
||||
Wurden in HS25 nur kurz angesprochen.
|
||||
|
||||
\Def \textbf{Grammatik} $G := (\Sigma_\text{N}, \Sigma_\text{T}, P , S)$
|
||||
|
||||
\begin{tabular}{ll}
|
||||
\textbf{Nicht-Terminale} & $\Sigma_\text{N}$ \\
|
||||
\textbf{Terminale} & $\Sigma_\text{T}$ \\
|
||||
\small
|
||||
s.d. $ \Sigma_\text{N} \cap \Sigma_\text{T} = \emptyset$ &\\
|
||||
\normalsize
|
||||
\textbf{Startsymbol} & $S \in \Sigma_\text{N}$ \\
|
||||
\textbf{Ableitungsregeln} & $P \subseteq \Sigma^*\Sigma_\text{N}\Sigma^* \times \Sigma^*$
|
||||
\end{tabular}
|
||||
|
||||
Wobei $\Sigma := \Sigma_\text{N} \cup \Sigma_\text{T}$
|
||||
|
||||
\end{document}
|
||||
83
semester3/ti/compact/parts/01_words-alphabets.tex
Normal file
83
semester3/ti/compact/parts/01_words-alphabets.tex
Normal file
@@ -0,0 +1,83 @@
|
||||
\newsection
|
||||
\section{Alphabets, Words, etc}
|
||||
\stepcounter{subsection}
|
||||
\subsection{Alphabets, Words, Languages}
|
||||
\fancydef{Alphabet} Set $\Sigma$.
|
||||
Important alphabets: $\alphabetbool$, $\alphabets{lat}$ (all latin chars), $\alphabets{Keyboard}$ (all chars on keyboard), $\Sigma_m$ ($m$-ary numbers)
|
||||
|
||||
\fancydef{Word} Possibly empty (denoted $\lambda$) sequences of characters from $\Sigma$. $|w|$ is the length,
|
||||
$\Sigma^*$ is the set of all words and $\Sigma^+ = \Sigma^* - \{ \lambda \}$
|
||||
|
||||
\fancydef{Konkatenation} $\text{Kon}(x, y) = xy$, (so like string concat). $(xy)^n$ is $n$-times repeated concat.
|
||||
|
||||
\fancydef{Reversal} $a^R$, simply read the word backwards.
|
||||
|
||||
\stepLabelNumber{definition}
|
||||
\fancydef{Prefix, Suffix, Subword} $v$ in $w = vy$; $s$ in $w = xs$; Subword $u$ in $w = xuy$; $x$, $y$ possibly $\lambda$
|
||||
|
||||
\fancydef{Appearance} $|x|_a$ is the number of times $a \in \Sigma$ appears in $x$
|
||||
|
||||
\fancydef{Canonical ordering} Ordered by length and then by first non-common letter:
|
||||
\rmvspace
|
||||
\begin{align*}
|
||||
u < v \Longleftrightarrow |u| < |v| \lor (|u| = |v| \land u = x \cdot s_i \cdot u' \land v = x \cdot s_j \cdot v') \text{ for any } x, u', v' \in \word \text{ and } i < j
|
||||
\end{align*}
|
||||
|
||||
\drmvspace
|
||||
\fancydef{Language} $L \subseteq \word$, and we define $L^C = \word - L$ as the complement, with $L_{\emptyset}$ being the empty language,
|
||||
whereas $L_\lambda$ is the language with just the empty word in it.
|
||||
|
||||
\bi{Concatenation}: $L_1 \cdot L_2 = \{ vw | v \in L_1 \land w \in L_2 \}$ and $L^{i + 1} = L^i \cdot L \ \forall i \in \N$.
|
||||
|
||||
\bi{Cleen Star}: $L^* = \bigcup_{i \in \N} L^i$ and $L^+ = L \cdot L^*$
|
||||
|
||||
Of note is that there are irregular languages whose Cleen Star is regular, most notably,
|
||||
the language $L = \{ w \in \{ 0 \}^* \divides |w| \text{ is prime} \}$'s Cleen Star is regular,
|
||||
due to the fact that the prime factorization is regular
|
||||
|
||||
|
||||
\inlinelemma $L_1L_2 \cup L_1 L_2 = L_1(L_2 \cup L_3)$
|
||||
\inlinelemma $L_1(K_2 \cap L_3) \subseteq L_1 L_2 \cap L_1 L_3$
|
||||
|
||||
For multiple choice questions, really think of how the sets would look to determine if they fulfill a requirement.
|
||||
|
||||
|
||||
\stepcounter{subsection}
|
||||
\subsection{Kolmogorov-Complexity}
|
||||
\setLabelNumber{definition}{17}
|
||||
\fancydef{Kolmogorov-Complexity} $K(x)$ for $x \in \wordbool$ is the minimum of all binary lengths of Pascal programs that output $x$,
|
||||
where the Program doesn't have to compile, i.e. we can describe processes informally
|
||||
|
||||
\stepLabelNumber{lemma}
|
||||
\inlinelemma For each word $x$ exists constant $d$ s.t. $K(x) \leq |x| + d$, for which we can use a program that simply includes a \texttt{write(x)} command
|
||||
|
||||
\fancydef{Of natural number} $K(n) = K(\text{Bin}(x))$ with $|\text{Bin}(x)| = \ceil{\log_2(x + 1)}$
|
||||
|
||||
\inlinelemma For each $n \in \N \exists w_n \in (\alphabetbool)^n$ s.t. $K(w_n) \geq |w_n| = n$, i.e. exists a non-compressible word.
|
||||
|
||||
\inlinetheorem Kolmogorov-Complexity doesn't depend on programming language. It only differs in constant
|
||||
|
||||
\fancydef{Randomness} $x \in \wordbool$ random if $K(x) \geq |x|$, thus for $n \in \N$, $K(n) \geq \ceil{\log_2(n + 1)} - 1$
|
||||
|
||||
\stepLabelNumber{theorem}
|
||||
\fancytheorem{Prime number} $\displaystyle \limni \frac{\text{Prime}(n)}{\frac{n}{\ln(n)}} = 1$ with $\text{Prime}(n)$ the number of prime numbers on $[0, n] \subseteq \N$
|
||||
|
||||
\fhlc{Cyan}{Proofs} Proofs in which we need to show a lower bound for Kolmogorov-Complexity (almost) always work as follows:
|
||||
Assume for contradiction that there are no words with $K(w) > f$ for all $w \in W$.
|
||||
We count the number $m$ of words in $W$ and the number $n$ of programs of length $\leq f$ ($f$ being the given, lower bound).
|
||||
We will have $m - n > 0$, which means, there are more different words than there are Programs with Kolmogorov-Complexity $\leq f$,
|
||||
which is a contradiction to our assumption.
|
||||
|
||||
There are $\floor{\frac{n}{k}} + 1$ numbers divisible by $k$ in the set $\{ 0, 1, \ldots, n \}$.
|
||||
|
||||
\shade{Orange}{Laws of logarithm}
|
||||
\drmvspace
|
||||
\begin{multicols}{3}
|
||||
\begin{itemize}[noitemsep]
|
||||
\item $\log_a(x) + \log_a(y) = \log_a(x \cdot y)$
|
||||
\item $\log_a(x) - \log_a(y) = \log_a(x \div y)$
|
||||
\item $y \log_a(x) = \log_a(x^y)$
|
||||
\item $\log_a(x) = \frac{\ln(x)}{\ln(a)}$
|
||||
\item $\log_a(1) = 0$
|
||||
\end{itemize}
|
||||
\end{multicols}
|
||||
168
semester3/ti/compact/parts/02_finite-automata.tex
Normal file
168
semester3/ti/compact/parts/02_finite-automata.tex
Normal file
@@ -0,0 +1,168 @@
|
||||
\newsection
|
||||
\section{Finite Automata}
|
||||
\stepcounter{subsection}
|
||||
\subsection{Representation}
|
||||
We can note the automata using graphical notation similar to graphs or as a series of instructions like this:
|
||||
\rmvspace
|
||||
\begin{align*}
|
||||
\texttt{select } input & = a_1 \texttt{ goto } i_1 \\[-0.2cm]
|
||||
\vdots \\
|
||||
input & = a_k \texttt{ goto } i_k
|
||||
\end{align*}
|
||||
|
||||
\drmvspace
|
||||
\fancydef{Finite Automaton} $A = (Q, \Sigma, \delta, q_0, F)$ with
|
||||
\drmvspace
|
||||
\begin{multicols}{2}
|
||||
\begin{itemize}[noitemsep]
|
||||
\item $Q$ set of states
|
||||
\item $\Sigma$ input alphabet
|
||||
\item $\delta(q, a) = p$ transition from $q$ on reading $a$ to $p$
|
||||
\item $q_0$ initial state
|
||||
\item $F \subseteq Q$ accepting states
|
||||
\item $\cL_{EA}$ regular languages (accepted by FA)
|
||||
\end{itemize}
|
||||
\end{multicols}
|
||||
|
||||
\drmvspace
|
||||
$\hat{\delta}(q_0, w) = p$ is the end state reached when we process word $w$ from state $q_0$, and $(q, w) \bigvdash{M}{*} (p, \lambda)$ is the formal definition,
|
||||
with $\bigvdash{M}{*}$ representing any number of steps $\bigvdash{M}{}$ executed (transitive hull).
|
||||
|
||||
The class $\class[q_i]$ represents all possible words for which the FA is in this state.
|
||||
Be cautious when defining them, make sure that no extra words from other classes could appear in the current class, if this is not intended.
|
||||
|
||||
Sometimes, we need to combine two (or more) FA to form one larger one.
|
||||
We can do this easily with product automata. To create one from two automata $M_1$ (states $q_i$) and $M_2$ (states $p_j$) we do the following steps:
|
||||
\rmvspace
|
||||
\begin{enumerate}[noitemsep]
|
||||
\item Write down the states as tuples of the form $(q_i, p_j)$ (i.e. form a grid by writing down one of the automata vertically and the other horizontally)
|
||||
\item From each state, the automata on the horizontal axis decides for the input symbol if we move left or right,
|
||||
whereas the automata on the vertical axis decides if we move up or down.
|
||||
\end{enumerate}
|
||||
|
||||
\input{parts/02a_example-automata.tex}
|
||||
|
||||
|
||||
\stepcounter{subsection}
|
||||
\subsection{Proofs of nonexistence}
|
||||
We have three approaches to prove non-regularity of words.
|
||||
Below is an informal guide as to how to do proofs using each of the methods and possible pitfalls.
|
||||
|
||||
For all of them start by assuming that $L$ is regular.
|
||||
|
||||
\fhlc{Cyan}{Lemma 3.3}
|
||||
\setLabelNumber{lemma}{3}
|
||||
\begin{lemma}[]{Regular words}
|
||||
Let $A$ be a FA over $\Sigma$ and let $x \neq y \in \Sigma^*$, such that $\hdelta_A (q_0, x) = \hdelta(q_0, y)$.
|
||||
Then for each $z \in \Sigma^*$ there exists an $r \in Q$, such that $xz, yz \in \class[r]$, and we thus have
|
||||
\rmvspace
|
||||
\begin{align*}
|
||||
xz \in L(A) \Longleftrightarrow yz \in L(A)
|
||||
\end{align*}
|
||||
\end{lemma}
|
||||
\begin{enumerate}[noitemsep]
|
||||
\item Pick a FA $A$ over $\Sigma$ and say that $L(A) = L$
|
||||
\item Pick $|Q| + 1$ words $x$ such that $xy = w \in L$ with $|y| > 0$.
|
||||
\item State that via pigeonhole principle there exists w.l.o.g $i < j \in \{ 1, \ldots, |Q| + 1 \}$, s.t. $\hdelta_A(q_0, x_i) = \hdelta_A(q_0, x_j)$.
|
||||
\item Build contradiction by picking $z$ such that $x_i z \in L$.
|
||||
\item Then, if $z$ was picked properly, since $i < j$, we have that $x_j z \notin L$, since the lengths do not match
|
||||
\end{enumerate}
|
||||
|
||||
\rmvspace
|
||||
That is a contradiction, which concludes our proof
|
||||
|
||||
|
||||
\fhlc{Cyan}{Pumping Lemma}
|
||||
\begin{lemma}[]{Pumping-Lemma für reguläre Sprachen}
|
||||
Let $L$ be regular. Then there exists a constant $n_0 \in \N$, such that each word $w \in \word$ with $|w| \geq n_0$ can be decomposed into $w = yxz$, with
|
||||
\drmvspace
|
||||
\begin{multicols}{2}
|
||||
\begin{enumerate}[label=\textit{(\roman*)}]
|
||||
\item $|yx| \leq n_0$
|
||||
\item $|x| \geq 1$
|
||||
\item For $X = \{ yx^kz \divides k\in \N \}$ \textit{either} $X \subseteq L$ or $X \cap L = \emptyset$ applies
|
||||
\end{enumerate}
|
||||
\end{multicols}
|
||||
\end{lemma}
|
||||
|
||||
\begin{enumerate}[noitemsep]
|
||||
\item State that according to Lemma 3.4 there exists a constant $n_0$ such that $|w| \geq n_0$.
|
||||
\item Choose a word $w \in L$ that is sufficiently long to enable a sensible decomposition for the next step.
|
||||
\item Choose a decomposition, such that $|yx| = n_0$ (makes it quite easy later). Specify $y$ and $x$ in such a way that for $|y| = l$ and $|x| = m$ we have $l + m \leq n_0$
|
||||
\item According to Lemma 3.4 (ii), $m \geq 1$ and thus $|x| \geq 1$. Fix $z$ to be the suffix of $w = yxz$
|
||||
\item Then according to Lemma 3.4 (iii), fill in for $X = \{ yx^k z \divides k \in \N \}$ we have $X \subseteq L$.
|
||||
\item This will lead to a contradiction commonly when setting $k = 0$, as for a language like $0^n1^n$, we have $0^{(n_0 - m) + km}1^{n_0}$ as the word (with $n_0 - m = l$),
|
||||
which for $k = 0$ is $u= 0^{n_0 - m} 1^{n_0}$ and since $m \geq 1$, $u \notin L$ and thus by Lemma 3.4, $X \cap L = \emptyset$,
|
||||
but that is also not true, as the intersection is not empty (for $k = 1$)
|
||||
\end{enumerate}
|
||||
|
||||
|
||||
\fhlc{Cyan}{Kolmogorov Complexity}
|
||||
\begin{enumerate}[noitemsep]
|
||||
\item We first need to choose an $x$ such that $L_x = \{ y \divides xy \in L \}$.
|
||||
If not immediately apparent, choosing $x = a^{\alpha + 1}$ for $a \in \Sigma$
|
||||
and $\alpha$ being the exponent of the exponent of the words in the language after a variable rename.
|
||||
For example, for $\{ 0^{n^2 + 2n} \divides n \in \N \}$, $\alpha(m) = m^2 + 2m$.
|
||||
Another common way to do this is for languages of the form $\{ a^n b^n \divides n \in \N \}$ to use $x = a^m$ and
|
||||
$L_{0^m} = \{ y \divides 0^m y \in L \} = \{ 0^j 1^{m + j} \divides j \in \N \}$.
|
||||
\item Find the first word $y_1 \in L_x$. In the first example, this word would be $y_1 = 0^{(m + 1)^2 \cdot 2(m + 1) - m^2 \cdot 2m + 1}$,
|
||||
or in general $a^{\alpha(m + 1) - \alpha(m) + 1}$.
|
||||
For the second example, the word would be $y_1 = 1^m$, i.e. with $j = 0$
|
||||
\item According to Theorem 3.1, there exists constant $c$ such that $K(y_k) \leq \ceil{\log_2(k + 1)} + c$. We often choose $k = 1$,
|
||||
so we have $K(y_1) \leq \ceil{\log_2(1 + 1)} + c = 1 + c$ and with $d = 1 + c$, $K(y_1) \leq d$
|
||||
\item This however leads to a contradiction, since the number of programs with length $\leq d$ is at most $2^d$ and thus finite, but our set $L_x$ is infinite.
|
||||
\end{enumerate}
|
||||
|
||||
|
||||
\newpage
|
||||
\fhlc{Cyan}{Minimum number of states}
|
||||
|
||||
To show that a language needs \textit{at least} $n$ states, use Lemma 3.3 and $n$ words. We thus again do a proof by contradiction:
|
||||
\begin{enumerate}
|
||||
\item Assume that there exists FA with $|Q| < n$. We now choose $n$ words (as short as possible), as we would for non-regularity proofs using Lemma 3.3 (i.e. find some prefixes).
|
||||
It is usually beneficial to choose prefixes with $|w|$ small (consider just one letter, $\lambda$, then two and more letter words).
|
||||
An ``easy'' way to find the prefixes is to construct a finite automaton and then picking a prefix from each class
|
||||
\item Construct a table for the suffixes using the $n$ chosen words such that one of the words at entry $x_{ij}$ is in the language and the other is not. ($n \times n$ matrix, see below in example)
|
||||
\item Conclude that we have reached a contradiction as every field $x_{ij}$ contains a suffix such that one of the two words is in the language and the other one is not.
|
||||
\end{enumerate}
|
||||
\inlineex Let $L = \{ x1y \divides x \in \wordbool, y \in \{ 0, 1 \}^2 \}$. Show that any FA that accepts $L$ needs at least four states.
|
||||
|
||||
Assume for contradiction that there exists EA $A = (Q, \alphabetbool, \delta_A, q_0, F)$ with $|Q| < 4$.
|
||||
Let's take the $4$ words $00, 01, 10, 11$. Then according to Lemma 3.3, there needs to exist a $z$ such that $xz \in L(A) \Longleftrightarrow yz \in L(A)$
|
||||
with $\hdelta_A(q_0, x) = \hdelta_A(q_0, y)$ for $x, y \in \{ 00, 01, 10, 11 \}$.
|
||||
|
||||
This however is a contradiction, as we can find a $z$ for each of the pairs $(x, y)$, such that $xz \in L(A)$, but $yz \notin L(A)$.
|
||||
See for reference the below table (it contains suffixes $z$ fulfilling prior condition):
|
||||
|
||||
\begin{tables}{c|cccc}{ & $00$ & $01$ & $10$ & $11$}
|
||||
$00$ & - & $00$ & $0$ & $0$ \\
|
||||
$01$ & & - & $0$ & $0$ \\
|
||||
$10$ & & & - & $00$ \\
|
||||
$11$ & & & & - \\
|
||||
\end{tables}
|
||||
Thus, all four words have to lay in pairwise distinct states and we thus need at least $4$ states to detect this language.
|
||||
|
||||
|
||||
|
||||
|
||||
\subsection{Non-determinism}
|
||||
The most notable differences between deterministic and non-deterministic FA is that the transition function is different: $\delta: Q \times \Sigma \rightarrow \cP(Q)$.
|
||||
I.e., there can be any number of transitions for one symbol of $\Sigma$ for each state.
|
||||
This is (in graphical notation) represented by arrows that have the same label going to different nodes.
|
||||
|
||||
It is also possible for there to not be a transition function for a certain element of the input alphabet.
|
||||
In that case, regardless of state, the NFA rejects, as it ``gets stuck'' in a state and can't finish processing.
|
||||
|
||||
Additionally, the NFA accepts $x$ if it has at least one accepting calculation on $x$.
|
||||
|
||||
\stepLabelNumber{theorem}
|
||||
\inlinetheorem For every NFA $M$ there exists a FA $A$ such that $L(M) = L(A)$. They are then called \bi{equivalent}
|
||||
|
||||
|
||||
\fhlc{Cyan}{Potenzmengenkonstruktion}
|
||||
States are no now sets of states of the NFA in which the NFA could be in after processing the preceding input elements and we have a special state called $q_{\text{trash}}$.
|
||||
|
||||
For each state, the set of states $P = \hdelta(q_0, z)$ for $|z| = n$ represents all possible states that the NFA could be in after doing the first $n$ calculations.
|
||||
|
||||
Correspondingly, we add new states if there is no other state that is in the same branch of the calculation tree $\cB_M(x)$.
|
||||
So, in other words, we execute BFS on the calculation tree.
|
||||
81
semester3/ti/compact/parts/02a_example-automata.tex
Normal file
81
semester3/ti/compact/parts/02a_example-automata.tex
Normal file
@@ -0,0 +1,81 @@
|
||||
\begin{center}
|
||||
\begin{tikzpicture}[node distance = 1cm and 2cm, >={Stealth[round]}]
|
||||
\node[state, initial left, accepting] (q0p0) {$q_0, p_0$};
|
||||
\node[state] (q0p1) [right=of q0p0] {$q_0, p_1$};
|
||||
\node[state] (q0p2) [right=of q0p1] {$q_0, p_2$};
|
||||
\node[state, accepting] (q1p0) [below=of q0p0] {$q_1, p_0$};
|
||||
\node[state] (q1p1) [right=of q1p0] {$q_1, p_1$};
|
||||
\node[state] (q1p2) [right=of q1p1] {$q_1, p_2$};
|
||||
\node[state, accepting] (q2p0) [below=of q1p0] {$q_2, p_0$};
|
||||
\node[state, accepting] (q2p1) [right=of q2p0] {$q_2, p_1$};
|
||||
\node[state, accepting] (q2p2) [right=of q2p1] {$q_2, p_2$};
|
||||
|
||||
\path[->]
|
||||
% Level 0
|
||||
(q0p0) edge node [above] {a} (q0p1)
|
||||
(q0p1) edge node [above] {a} (q0p2)
|
||||
(q0p2) edge [bend right] node [above] {a} (q0p0)
|
||||
% Level 0 to level 1
|
||||
(q0p0) edge node [right] {b} (q1p0)
|
||||
(q0p1) edge node [right] {b} (q1p1)
|
||||
(q0p2) edge node [right] {b} (q1p2)
|
||||
% Level 1 to level 2
|
||||
(q1p0) edge node [above] {a} (q2p1)
|
||||
(q1p1) edge node [above] {a} (q2p2)
|
||||
(q1p2) edge node [right, xshift=0.3cm] {a} (q2p0)
|
||||
% Level 2 to level 1
|
||||
(q2p0) edge node [right] {b} (q1p0)
|
||||
(q2p1) edge node [above left, yshift=0.1cm] {b} (q1p1)
|
||||
(q2p2) edge node [right] {b} (q1p2)
|
||||
% Level 2
|
||||
(q2p0) edge node [above] {a} (q2p1)
|
||||
(q2p1) edge node [above] {a} (q2p2)
|
||||
(q2p2) edge [bend left] node [below] {a} (q2p0)
|
||||
% ────────────────────────────────────────────────────────────────────
|
||||
% Loops on level 1
|
||||
(q1p0) edge [loop left] node {b} ()
|
||||
(q1p1) edge [loop left] node {b} ()
|
||||
(q1p2) edge [loop left] node {b} ();
|
||||
\end{tikzpicture}
|
||||
\end{center}
|
||||
|
||||
For the automata
|
||||
\begin{figure}[h!]
|
||||
\begin{subfigure}{0.49\textwidth}
|
||||
\begin{center}
|
||||
\begin{tikzpicture}[node distance = 1cm, >={Stealth[round]}]
|
||||
\node[state, initial left, accepting] (p_0) {$p_0$};
|
||||
\node[state] (p_1) [right=of p_0] {$p_1$};
|
||||
\node[state] (p_2) [right=of p_1] {$p_2$};
|
||||
|
||||
\path[->]
|
||||
(p_0) edge node [above] {a} (p_1)
|
||||
(p_1) edge node [above] {a} (p_2)
|
||||
(p_2) edge [bend left] node [below] {a} (p_0)
|
||||
(p_0) edge [loop above] node {b} ()
|
||||
(p_1) edge [loop above] node {b} ()
|
||||
(p_2) edge [loop above] node {b} ();
|
||||
\end{tikzpicture}
|
||||
\end{center}
|
||||
\caption{Module to compute $|w|_b \equiv |w| (\text{mod } 3$). States $q \in Q_a$}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}{0.49\textwidth}
|
||||
\begin{center}
|
||||
\begin{tikzpicture}[node distance = 1cm, >={Stealth[round]}]
|
||||
\node[state, initial left] (q_0) {$q_0$};
|
||||
\node[state] (q_1) [right=of q_0] {$q_1$};
|
||||
\node[state, accepting] (q_2) [right=of q_1] {$q_2$};
|
||||
|
||||
\path[->]
|
||||
(q_0) edge node [above] {b} (q_1)
|
||||
(q_1) edge [bend left] node [above] {a} (q_2)
|
||||
(q_2) edge [bend left] node [below] {b} (q_1)
|
||||
(q_0) edge [loop above] node {a} ()
|
||||
(q_1) edge [loop above] node {b} ()
|
||||
(q_2) edge [loop above] node {a} ();
|
||||
\end{tikzpicture}
|
||||
\end{center}
|
||||
\caption{Module to compute $w$ contains sub. $ba$ and ends in $a$. States $p \in Q_b$}
|
||||
\end{subfigure}
|
||||
\caption{Graphical representation of the Finite Automaton of Task 9 in 2025}
|
||||
\end{figure}
|
||||
49
semester3/ti/compact/parts/03_turing-machines.tex
Normal file
49
semester3/ti/compact/parts/03_turing-machines.tex
Normal file
@@ -0,0 +1,49 @@
|
||||
\newsection
|
||||
\section{Turing Machines}
|
||||
\setcounter{subsection}{2}
|
||||
\subsection{Representation}
|
||||
Turing machines are much more capable than FA and NFA. A full definition of them can be found in the book on pages 96 - 98 (= pages 110 - 112 in the PDF).
|
||||
|
||||
For example, to detect a recursive language like $\{ 0^n 1^n \divides n \in \N \}$ we simply replace the left and rightmost symbol with a different one
|
||||
and repeat until we only have the new symbol, at which point we accept, or there are no more $0$s or $1$s, at which point we reject.
|
||||
|
||||
The Turing Machines have an accepting $\qacc$ and a rejecting state $\qrej$ and a configuration is an element of
|
||||
$\{ \{ \cent \}\cdot \Gamma^* \cdot Q \cdot \Gamma^+ \cup Q \cdot \{ \cent \} \cdot \Gamma^+ \}$ with $\cdot$ being the concatenation and $\cent$ the marker of the start of the band.
|
||||
\rmvspace
|
||||
\begin{align*}
|
||||
\cL_{RE} & = \{ L(M) \divides M \text{ is a TM} \}\\
|
||||
\cL_R & = \{ L(M) \divides M \text{ is a TM and it always halts} \}
|
||||
\end{align*}
|
||||
|
||||
|
||||
\subsection{Multi-tape TM and Church's Thesis}
|
||||
$k$-Tape Turing machines have $k$ extra tapes that can be written to and read from, called memory tapes. They \textit{cannot} write to the input tape.
|
||||
Initially the memory tapes are empty and we are in state $q_0$.
|
||||
All read/write-heads of the memory tapes can move in either direction, granted they have not reached the far left end, marked with $\cent$.
|
||||
|
||||
As with normal TMs, the Turing Machine $M$ accepts $w$ if and only if $M$ reaches the state $\qacc$ and rejects if it does not terminate or reaches the state $\qrej$
|
||||
|
||||
\inlinelemma There exists an equivalent $1$-Tape-TM for every TM.
|
||||
|
||||
\inlinelemma There exists an equivalent TM for each Multi-tape TM.
|
||||
|
||||
|
||||
Church's Thesis states that the Turing Machines are a formalization of the term ``Algorithm''.
|
||||
It is the only axiom specific to Computer Science.
|
||||
|
||||
All the words that can be accepted by a Turing Machine are elements of $\cL_{RE}$ and are called \bi{recursively enumerable}.
|
||||
|
||||
|
||||
\subsection{Non-Deterministic Turing Machines}
|
||||
The same ideas as with NFA apply here. The transition function also maps into the power set:
|
||||
\rmvspace
|
||||
\begin{align*}
|
||||
\delta : (Q - \{ \qacc, \qrej \}) \times \Gamma \rightarrow \cP(Q \times \Gamma \times \{ L, R, N \})
|
||||
\end{align*}
|
||||
|
||||
\drmvspace
|
||||
Again, when constructing a normal TM from a NTM (which is not required at the Midterm, or any other exam for that matter in this course),
|
||||
we again apply BFS to the NTM's calculation tree.
|
||||
|
||||
\stepLabelNumber{theorem}
|
||||
\inlinetheorem For an NTM $M$ exists a TM $A$ s.t. $L(M) = L(A)$ and if $M$ doesn't contain infinite calculations on words of $(L(M))^C$, then $A$ always stops.
|
||||
192
semester3/ti/compact/parts/04_computability.tex
Normal file
192
semester3/ti/compact/parts/04_computability.tex
Normal file
@@ -0,0 +1,192 @@
|
||||
\newsection
|
||||
\section{Computability}
|
||||
\stepcounter{subsection}
|
||||
\subsection{Diagonalization}
|
||||
The \bi{set of binary encodings of all TMs} is denoted $\text{KodTM}$ and $\text{KodTM} \subseteq \wordbool$ and the upper bound of the cardinality is $|\wordbool|$,
|
||||
as there are infinitely many TMs.
|
||||
|
||||
Below is a list of countable objects. They all have corresponding Lemmas in the script, but omitted here:
|
||||
\drmvspace
|
||||
\begin{multicols}{4}
|
||||
\begin{itemize}
|
||||
\item $\word$ for any $\Sigma$
|
||||
\item $\text{KodTM}$
|
||||
\item $\N \times \N$
|
||||
\item $\Q^+$
|
||||
\end{itemize}
|
||||
\end{multicols}
|
||||
|
||||
\rmvspace
|
||||
\drmvspace
|
||||
The following objects are uncountable: $[0, 1]$, $\R$, $\cP(\wordbool)$
|
||||
|
||||
\inlinecorollary $|\text{KodTM}| < |\cP(\wordbool)|$ and thus there exist infinitely many not recursively enumerable languages over $\alphabetbool$
|
||||
|
||||
\setLabelNumber{theorem}{3}
|
||||
\inlinetheorem $L_\text{diag} \notin \cL_{RE}$
|
||||
|
||||
\fhlc{Cyan}{Proof of $L$ (not) recursively enumerable}
|
||||
|
||||
Proving that a language \textit{is} recursively enumerable is as difficult as providing a Turing Machine that accepts it.
|
||||
|
||||
Proving that a language is \textit{not} recursively enumerable is likely easier. For it, let $d_{ij} = 1 \Longleftrightarrow M_i$ accepts $w_j$.
|
||||
|
||||
\inlineex Assume towards contradiction that $L_\text{diag} \in \cL_{RE}$. Let
|
||||
\rmvspace
|
||||
\begin{align*}
|
||||
L_{\text{diag}} & = \{ w \in \wordbool \divides w = w_i \text{ for an } i \in \N - \{ 0 \} \text{ and $M_i$ does not accept } w_i \} \\
|
||||
& = \{ w \in \wordbool \divides w = w_i \text{ for an } i \in \N - \{ 0 \} \text{ and } d_{ii} = 0\}
|
||||
\end{align*}
|
||||
Thus assume that, $L_\text{diag} = L(M)$ for a Turing Machine $M$.
|
||||
Since $M$ is a Turing Machine in the canonical ordering of all Turing Machines, so there exists an $i \in \N - \{ 0 \}$, such that $M = M_i$.
|
||||
|
||||
This however leads to a contradiction, as $w_i \in L_\text{diag} \Longleftrightarrow d_{ii} = 0 \Longleftrightarrow w_i \notin L(M_i)$.
|
||||
|
||||
In other words, $w_i$ is in $L_\text{diag}$ if and only if $w_i$ is not in $L(M_i)$, which contradicts our statement above, in which we assumed that $L_\text{diag} \in \cL_{RE}$.
|
||||
|
||||
In other, more different, words, $w_i$ being in $L_\text{diag}$ implies (from the definition) that $d_{ii} = 0$, which from its definition implies that $w_i \notin L(M_i)$.
|
||||
|
||||
\drmvspace
|
||||
\proven
|
||||
|
||||
|
||||
Another result (not formally proven in the script, but there is a proof by intimidation) that can come in useful, especially when trying to show $L \notin \cL_{RE}$ is:
|
||||
\rmvspace
|
||||
\begin{align*}
|
||||
L, L^C \in \cL_{RE} \Longleftrightarrow L \in \cL_R
|
||||
\end{align*}
|
||||
|
||||
\drmvspace
|
||||
Additionally, as a reminder, $\cL_{RE} = \{ L(M) \divides M \text{ is a TM} \}$, so to prove that a language $L \notin \cL_{RE}$,
|
||||
we only need to show that there exists no TM $M$, for which $L(M) \in \cL_{RE}$.
|
||||
|
||||
|
||||
|
||||
% ────────────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
\newpage
|
||||
\subsection{Reductions}
|
||||
\label{sec:reductions}
|
||||
This is the start of the topics that are explicitly part of the endterm.
|
||||
|
||||
For the reductions, it is important to get the order right.\\
|
||||
To show that a language $L_1$ is not part of e.g. $\cL_R$, show that there exists a reduction into a language $L_2 \notin \cL_R$, i.e. e.g. show $L_2 \leq_R L_1$.\\
|
||||
To show that a language $L_1$ is part of e.g. $\cL_R$, show that there exists a reduction into a language $L_2 \in \cL_R$, i.e. e.g. show $L_1 \leq_R L_2$.
|
||||
|
||||
For a language to be in $\cL_R$, in contrast to $L \in \cL_{RE}$, the TM has to halt also for \texttt{no} instances, i.e. it has to be an algorithm.
|
||||
In other words: A TM $A$ can enumerate all valid strings of a \textit{recursively enumerable language} ($L \in \cL_{RE}$),
|
||||
where for \textit{recursive languages}, it has to be able to difinitively answer for both \texttt{yes} and \texttt{no} and thus halt in finite time for both.
|
||||
|
||||
First off, a list of important languages for this and the next section:
|
||||
\begin{itemize}
|
||||
\item $L_U = \{ \text{Kod}(M)\# w \divides w \in \wordbool \text{ and TM $M$ accepts } w \}$ ($\in \cL_{RE}$, but $\notin \cL_R$)
|
||||
\item $L_H = \{ \text{Kod}(M)\# x \divides x \in \wordbool \text{ and TM $M$ halts on } x \}$ ($\in \cL_{RE}$, but $\notin \cL_R$)
|
||||
\item $L_{\text{diag}} = \{ w \in \wordbool \divides w = w_i \text{ for an } i \in \N - \{ 0 \} \text{ and $M_i$ does not accept } w_i \}$ ($\notin \cL_{RE}$ and thus $\notin \cL_R$)
|
||||
\item $(L_{\text{diag}})^C$ ($\in \cL_{RE}$, but $\notin \cL_R$)
|
||||
\item $L_{EQ} = \{ \text{Kod}(M)\# \text{Kod}(\overline{M}) \divides L(M) = L(\overline{M}) \}$ ($\notin \cL_{RE}$, and thus $\notin \cL_R$)
|
||||
\item $(L_{EQ})^C = \{ \text{Kod}(M)\# \text{Kod}(\overline{M}) \divides L(M) \neq L(\overline{M}) \}$ ($\notin \cL_{RE}$, and thus $\notin \cL_R$)
|
||||
\item $\lempty = \{ \text{Kod}(M) \divides L(M) = \emptyset \}$ ($\in \cL_{RE}$, but $\notin \cL_R$)
|
||||
\item $(\lempty)^C = \{ x \in \wordbool \divides x \notin \text{Kod}(\overline{M}) \forall \text{ TM } \overline{M} \text{ or } x = \text{Kod}(M) \text{ and } L(M) \neq \emptyset \}$
|
||||
($\in \cL_{RE}$, but $\notin \cL_R$)
|
||||
\item $L_{H, \lambda} = \{ \text{Kod}(M) \divides M \text{ halts on } \lambda \}$ ($\in \cL_{RE}$, but $\notin \cL_R$)
|
||||
\end{itemize}
|
||||
An important consequence of the fact that both $L_{EQ}$ and its complement are $\notin \cL_{RE}$ is that it is not guaranteed
|
||||
for a language's complement to \textit{necessarily} be in $\cL_{RE}$, if the language is not.
|
||||
|
||||
|
||||
\setLabelNumber{definition}{3}
|
||||
\fancydef{Recursively reducible languages} $L_1 \leq_R L_2$ ($L_1$ reducible into $L_2$), if $L_2 \in \cL_R \Rightarrow L_1 \in \cL_R$
|
||||
|
||||
\fancydef{$EE$-Reductions} $L_1 \leq_{EE} L_2$ if there exists a TM $M$ that implements image $f_M : \word_1 \rightarrow \word_2$,
|
||||
for which we have $x \in L_1 \Leftrightarrow f_M(x) \in L_2$ for all $x \in \wordbool_1$
|
||||
|
||||
\setLabelNumber{lemma}{3}
|
||||
\inlinelemma If $L_1 \leq_{EE} L_2$ then also $L_1 \leq_R L_2$
|
||||
|
||||
\inlinelemma For each language $L \subseteq \word$ we have: $L \leq_R L^C$ and $L^C \leq_R L$
|
||||
|
||||
\setLabelNumber{theorem}{6}
|
||||
\fancytheorem{Universal TM} A TM $U$, such that $L(U) = L_U$
|
||||
|
||||
|
||||
\fhlc{Cyan}{Showing reductions} First, a general guide to reductions and below what else we need to keep in mind for specific reductions:
|
||||
\begin{enumerate}
|
||||
\item We construct a TM $A$ that:
|
||||
\begin{enumerate}
|
||||
\item Checks if the input has the right form and if it does not, returns some output that is $\notin L_2$
|
||||
\item Applies the transformation to all remaining input
|
||||
\end{enumerate}
|
||||
\item We show $x \in L_1 \Leftrightarrow A(x) \in L_2$ by showing the implications:
|
||||
\begin{enumerate}
|
||||
\item For $\Rightarrow$, we show it directly, by assuming that $x \in L_1$ (obviously) and we can ignore the invalid input (as that $\notin L_1$ anyway)
|
||||
\item For $\Leftarrow$, we have two options (mention what happens to invalid input here):
|
||||
\begin{itemize}
|
||||
\item We show $A(x) \in L_2 \Rightarrow x \in L_1$ directly (usually harder)
|
||||
\item We show $x \notin L_1 \Rightarrow A(x) \notin L_2$ (contraposition)
|
||||
\end{itemize}
|
||||
\end{enumerate}
|
||||
\item Show that the TM always halts (for $P$, $EE$ and $R$ reductions at least)
|
||||
\end{enumerate}
|
||||
|
||||
\shade{Cyan}{$EE$-reductions} They follow the above scheme exactly
|
||||
|
||||
\shade{Cyan}{$R$-reductions} It is usually a good idea to draw the setup here. We have a TM $C$ that basically executes an $EE$-reduction and we have a TM $A$ that can check $L_2$.
|
||||
Then, we have a TM $B$ that wraps the whole thing: It first executes TM $C$, which will either output an transformation of $L_1$ for $L_2$ (i.e. execute an $EE$-reduction) or
|
||||
output some encoding for \textit{invalid word}.
|
||||
If it outputs the encoding for \textit{invalid word}, $B$ will output $x \notin L_1$.
|
||||
|
||||
If $C$ does not output an encoding for \textit{invalid word}, then $B$ will execute $A$ on the output of $C$ and then use the output of $A$ (either accepting or rejecting)
|
||||
to output the same (i.e. if $A$ accepts, then $B$ will output $x \in L_1$ and if $A$ rejects, $B$ outputs $x \notin L_1$)
|
||||
|
||||
\inlineintuition In $R$-reductions, we construct a full verifier for $L_1$ using the verifier for $L_2$, i.e. we can use TM $B$ directly to check if a given word is in $L_1$
|
||||
given that the transformed word is also in $L_2$.
|
||||
|
||||
|
||||
\shade{Cyan}{$P$-reductions} (Used in Chapter \ref{sec:complexity}). We need to also show that $A$ terminates in polynomial time.
|
||||
|
||||
\shade{orange}{Tips \& Tricks:}
|
||||
\begin{itemize}
|
||||
\item The TM $A$ has to terminate always
|
||||
\item Check the input for the correct form first
|
||||
\item For the correctness, show $x \in L_1 \Leftrightarrow A(x) \in L_2$
|
||||
\item The following tricks can be useful:
|
||||
\begin{itemize}
|
||||
\item Transitions into $\qacc$ and $\qrej$ can be redirected to $\qacc / \qrej$ or into an infinite loop
|
||||
\item Construct TM $M'$ that ignores input and does the same, regardless of input
|
||||
\end{itemize}
|
||||
\item Generate encoding of a $TM$ with special properties (e.g. accepts all input, never halts, \dots)
|
||||
\end{itemize}
|
||||
|
||||
|
||||
% ────────────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
\subsection{Rice's Theorem}
|
||||
\setLabelNumber{definition}{7}
|
||||
\inlinedef $L$ is called a \bi{semantically non-trivial decision problem}, if these conditions apply:
|
||||
\begin{enumerate}[label=\textit{(\roman*)}]
|
||||
\item There exists a TM $M_1$, such that $\text{Kod}(M_1) \in L$ (i.e. $L \neq \emptyset$)
|
||||
\item There exists a TM $M_2$, such that $\text{Kod}(M_2) \notin L$ (not all encodings are in $L$)
|
||||
\item For two TM $A$ and $B$: $L(A) = L(B) \Rightarrow \text{Kod}(A) \in L \Rightarrow \text{Kod}(B) \in L$
|
||||
\end{enumerate}
|
||||
|
||||
|
||||
\setLabelNumber{theorem}{9}
|
||||
\fancytheorem{Rice's Theorem} Every semantically non-trivial decision problem over TMs is undecidable
|
||||
|
||||
\fhlc{Cyan}{Using Rice's Theorem} We only need to show that a language is semantically non-trivial, which we do by checking the above conditions.
|
||||
For the third condition, intuitively, we only need to check if in the definition of $L$ only $L(M)$ appears and nowhere $M$ directly (except of course, to say that $M$ has to be a TM),
|
||||
or the condition can be restated such that only $L(M)$ is described by it.
|
||||
|
||||
For a more formal proof of that condition, simply show that the implication holds
|
||||
|
||||
|
||||
\stepcounter{subsection}
|
||||
\subsection{The method of the Kolmogorov-Complexity}
|
||||
\inlinetheorem The problem of computing the Kolmogorov-Complexity $K(x)$ for each $x$ is algorithmically unsolvable.
|
||||
|
||||
\inlinelemma If $L_H \in \cL_R$, then there exists an algorithm to compute the Kolmogorov-Complexity $K(x)$ for each $x \in \wordbool$
|
||||
|
||||
|
||||
As of HS2025, chapters 5.5 and 5.7 are not relevant for the Endterm or Session exam, so they are omitted here
|
||||
193
semester3/ti/compact/parts/05_complexity.tex
Normal file
193
semester3/ti/compact/parts/05_complexity.tex
Normal file
@@ -0,0 +1,193 @@
|
||||
\newsection
|
||||
\section{Complexity}
|
||||
\label{sec:complexity}
|
||||
\stepcounter{subsection}
|
||||
\subsection{Measurements of Complexity}
|
||||
\compactdef{Time complexity} For a computation $D = C_1, \ldots, C_k$ of $M$ on $x$ is defined by $\text{Time}_M(x) = k - 1$.
|
||||
For the TM $M$ itself, we have $\text{Time}_M(n) = \max\{ \text{Time}_M(x) \divides x \in \Sigma^n \}$
|
||||
|
||||
\begin{definition}[]{Space complexity}
|
||||
Let $C = (q, x, i, \alpha_1, i_1, \ldots, \alpha_k, i_k)$,
|
||||
with $0 \leq i \leq |x| + 1$ and $0 \leq i_j \leq |\alpha_j|$ for $j = 1, \ldots, k$ be a configuration.
|
||||
|
||||
The space complexity of configuration $C$ is $\text{Space}_M(C) = \max\{ |\alpha_i| \divides i = 1, \ldots, k \}$.
|
||||
|
||||
The space complexity of a calculation $D = C_1, \ldots, C_l$ on $x$ is $\text{Space}_M(x) = \max\{ \text{Space}_M(C_i) \divides i = 1, \ldots, l \}$
|
||||
|
||||
The space complexity of a TM $M$ is $\text{Space}_M(n) = \max\{ \text{Space}_M(x) \divides x \in \Sigma^n \}$
|
||||
\end{definition}
|
||||
|
||||
\inlinelemma For every $k$-tape-TM $A$, there exists an equivalent $1$-tape-TM $B$ such that $\text{Space}_B(n) \leq \text{Space}_A(n)$
|
||||
|
||||
\inlinelemma For every $k$-tape-TM $A$, $\exists$ a $k$-tape-TM such that $L(A) = L(B)$ and $\text{Space}_B(n) \leq \frac{\text{Space}_A(n)}{2} + 2$
|
||||
|
||||
\inlinedef The big-O-notation is defined as in A\&D, we however write $\text{Time}_A(n) \in \tco{g(n)}$, etc
|
||||
|
||||
\inlinetheorem There exists decision problem $(\alphabetbool, L)$, such that for each MTM $A$ that decides it,
|
||||
there exists an MTM $B$ that also decides it and for which $\text{Time}_B(n) \leq \log_2(\text{Time}_A(n))$
|
||||
|
||||
\inlinedef An MTM $C$ is \bi{optimal} for $L$, if $\text{Time}_C(n) \in \tco{f(n)}$ and $\tcl(f(n))$ is a lower bound for the time complexity of $L$
|
||||
|
||||
|
||||
% ────────────────────────────────────────────────────────────────────
|
||||
|
||||
\subsection{Complexity classes}
|
||||
Below is a list of complexity classes
|
||||
\begin{definition}[]{Complexity classes}
|
||||
\drmvspace\rmvspace
|
||||
\begin{align*}
|
||||
\text{TIME}(f) & = \{ L(B) \divides B \text{ is an MTM with } \tc_B(n) \in \tco{f(n)} \} \\
|
||||
\text{SPACE}(g) & = \{ L(A) \divides A \text{ is an MTM with } \spc_A(n) \in \tco{g(n)} \} \\
|
||||
\text{DLOG} & = \text{SPACE}(\log_2(n)) \\
|
||||
\text{P} & = \bigcup_{c \in \N} \text{TIME}(n^c) \\
|
||||
\text{PSPACE} & = \bigcup_{c \in \N} \text{SPACE}(n^c) \\
|
||||
\text{EXPTIME} & = \bigcup_{d \in \N} \text{TIME}(2^{n^d})
|
||||
\end{align*}
|
||||
\end{definition}
|
||||
|
||||
\inlinelemma For any function $t : \N \rightarrow \R^+$, we have $\text{TIME}(t(n)) \subseteq \text{SPACE}(t(n))$.
|
||||
|
||||
A list of relationships for these classes:
|
||||
\rmvspace
|
||||
\begin{multicols}{2}
|
||||
\begin{itemize}
|
||||
\item $P \subseteq \text{PSPACE}$
|
||||
\item $\text{DLOG} \subseteq P$
|
||||
\item $\text{PSPACE} \subseteq \text{EXPTIME}$
|
||||
\item $\text{DLOG} \subseteq P \subseteq \text{PSPACE} \subseteq \text{EXPTIME}$
|
||||
\end{itemize}
|
||||
\end{multicols}
|
||||
|
||||
\drmvspace
|
||||
\begin{definition}[]{Space- and time-constructible}
|
||||
Let $s, t : \N \rightarrow \N$. $s$ is called \bi{space-constructible} if there exists $1$-Band-TM $M$, such that
|
||||
\begin{enumerate}
|
||||
\item $\text{Space}_M(n) \leq s(n) \ \forall n \in \N$
|
||||
\item for each input $0^n$ with $n \in \N$, $M$ generates the word $0^{s(n)}$ on its memory tape and stops in $\qacc$
|
||||
\end{enumerate}
|
||||
|
||||
$t$ is called \bi{time-constructible}, if there exists an MTM $A$, such that
|
||||
\begin{enumerate}
|
||||
\item $\text{Time}_A(n) \in \tco{t(n)}$
|
||||
\item For each input $0^n$ with $n \in \N$, $A$ generates $0^{t(n)}$ on its first memory tape and stops in $\qacc$
|
||||
\end{enumerate}
|
||||
\end{definition}
|
||||
|
||||
\inlinelemma Let $s$ be space-constructible, $M$ an MTM with $\text{Space}_M(x) \leq s(|x|) \ \forall x \in L(M)$.
|
||||
Then exists MTM $A$ with $L(A) = L(M)$ and $\text{Space}_A(n) \leq s(n)$, i.e. we have
|
||||
$\text{Space}_A(y) \leq s(|y|) \ \forall y \in \Sigma_M$.
|
||||
|
||||
\inlinelemma Let $t$ be time-constructible, $M$ an MTM with $\text{Time}_M(x) \leq t(|x|) \ \forall x \in L(M)$.
|
||||
Then exists MTM $A$ with $L(A) = L(M)$ and $\text{Time}_A(n) \in \tco{t(n)}$
|
||||
|
||||
\inlinetheorem $\forall s : \N \rightarrow \N$ with $s(n) \geq \log_2(n)$, we have $\text{SPACE}(s(n)) \subseteq \bigcup_{c \in \N} \text{TIME}(c^{s(n)})$
|
||||
|
||||
\inlinetheorem Given $s_1, s_2: \N \rightarrow \N$ with properties $s_2(n) \geq \log_2(n)$, $s_2$ is space-constructible and $s_1(n) = o(s_2(n))$
|
||||
($s_2(n)$ grows asymptotically faster than $s_1$).
|
||||
Then we have $\text{SPACE}(s_1) \subsetneq \text{SPACE}(s_2)$
|
||||
|
||||
|
||||
\inlinetheorem Given $t_1, t_2: \N \rightarrow \N$ with properties $t_2$ is time-constructible and $t_1(n) \cdot \log_2(t_1(n)) = o(t_2(n))$
|
||||
Then we have $\text{TIME}(s_1) \subsetneq \text{TIME}(s_2)$
|
||||
|
||||
|
||||
|
||||
% ────────────────────────────────────────────────────────────────────
|
||||
|
||||
\subsection{Non-deterministic measurements of complexity}
|
||||
\inlinedef For NMTM or NTM, the time complexity is the length of the shortest accepting calculation of $M$ on $x$ and the same applies to space complexity as well.
|
||||
The rest of the definition is equivalent to the one for deterministic TM and MTM.
|
||||
|
||||
\begin{definition}[]{Complexity classes}
|
||||
For all $f, g: \N \rightarrow \R^+$, we define
|
||||
\rmvspace
|
||||
\begin{align*}
|
||||
\text{NTIME}(f) & = \{ L(M) \divides M \text{ is an NMTM with } \tc_M(n) \in \tco{f(n)} \} \\
|
||||
\text{NSPACE}(g) & = \{ L(M) \divides M \text{ is an NMTM with } \spc_M(n) \in \tco{g(n)} \} \\
|
||||
\text{NLOG} & = \text{NSPACE}(\log_2(n)) \\
|
||||
\text{NP} & = \bigcup_{c \in \N} \text{NTIME}(n^c) \\
|
||||
\text{NPSPACE} & = \bigcup_{c \in \N} \text{NSPACE}(n^c)
|
||||
\end{align*}
|
||||
\end{definition}
|
||||
|
||||
\inlinelemma For all $t$ and $s$ with $s(n) \geq \log_2(n)$: $\text{NTIME} \subseteq \text{NSPACE}(t)$ and
|
||||
$\text{NSPACE}(s) \subseteq \bigcup_{c \in \N} \text{NTIME}(c^{s(n)})$
|
||||
|
||||
For $t: \N \rightarrow \R^+$ and every space-constructible $s$ with $s(n) \geq \log_2(n)$, we have:
|
||||
\rmvspace
|
||||
\begin{multicols}{2}
|
||||
\begin{enumerate}
|
||||
\item $\text{TIME}(t) \subseteq \text{NTIME}(t)$
|
||||
\item $\text{SPACE}(t) \subseteq \text{NSPACE}(t)$
|
||||
\item $\text{NTIME}(s(n)) \subseteq \text{SPACE}(s(n)) \subseteq \bigcup_{c \in \N} \text{TIME}(c^{s(n)})$
|
||||
\item $\text{NP} \subseteq \text{PSPACE}$
|
||||
\item $\text{NSPACE}(s(n)) \subseteq \bigcup_{c \in \N} \text{TIME}(c^{s(n)})$
|
||||
\item $\text{NLOG} \subseteq P$
|
||||
\item $\text{NPSPACE} \subseteq \text{EXPTIME}$
|
||||
\item $\text{NSPACE}(s(n)) \subseteq \text{SPACE}(s(n)^2)$ (Savitch)
|
||||
\item $\text{PSPACE} = \text{NPSPACE}$
|
||||
\end{enumerate}
|
||||
\end{multicols}
|
||||
|
||||
\rmvspace
|
||||
If we combine some of the above results, we get:
|
||||
\rmvspace
|
||||
\begin{align*}
|
||||
\text{DLOG} \subseteq \text{NLOG} \subseteq P \subseteq NP \subseteq \text{PSPACE} \subseteq \text{EXPTIME}
|
||||
\end{align*}
|
||||
|
||||
|
||||
% ────────────────────────────────────────────────────────────────────
|
||||
|
||||
\subsection{Proof verification}
|
||||
\begin{definition}[]{$p$-Verifier}
|
||||
And MTM $A$ is a $p$-Verifier (for $p : \N \rightarrow \N$) and $V(A) = L$ for $L \subseteq \word$, if $A$ has the following properties and works on all inputs $\word \times \wordbool$:
|
||||
\begin{enumerate}[label=\textit{(\roman*)}]
|
||||
\item $\text{Time}_A(w, x) \leq p(|w|)$ for each input $(w, x) \in \word \times \wordbool$
|
||||
\item $\forall w \in L, \exists x \in \wordbool$, such that $|x| \leq p(|w|)$ and $(w, x) \in L(A)$. $x$ is \bi{proof} of the claim $w \in L$
|
||||
\item $\forall y \notin L$ we have $(y, z) \notin L(A)$ for all $z \in \wordbool$
|
||||
\item If $p(n) \in \tco{n^k}, k \in \N$, then $p$ is a polynomial time verifier. The class of polynomial time verifiers is
|
||||
\rmvspace
|
||||
\begin{align*}
|
||||
VP = \{ V(A) \divides A \text{ is polynomial time verifier } \}
|
||||
\end{align*}
|
||||
\end{enumerate}
|
||||
\end{definition}
|
||||
|
||||
\inlinetheorem $VP = NP$
|
||||
|
||||
\subsection{NP-Completeness}
|
||||
\fancydef{Polynomial Reduction} $L_1 \leq_p L_2$, if there exists polynomial TM $A$ with $x \in L_1 \Leftrightarrow A(x) \in L_2$.
|
||||
$A$ is called a polynomial reduction of $L_1$ into $L_2$
|
||||
|
||||
\begin{definition}[]{$NP$-Hard and $NP$-Complete}
|
||||
A language $L$ is called $NP$-hard, if for all $L' \in NP$, we have $L' \leq_p L$
|
||||
|
||||
A language $L$ is called $NP$-complete, if it is $NP$-hard and and $L \in NP$
|
||||
\end{definition}
|
||||
|
||||
\inlinelemma If $L \in P$ and $L$ is $NP$-hard, then $P = NP$
|
||||
|
||||
\fancydef{Cook} $SAT$ is $NP$-complete
|
||||
|
||||
\inlinelemma If $L_1 \leq_p L_2$ and $L_1$ is $NP$-hard, then $L_2$ is $NP$-hard
|
||||
|
||||
A few languages commonly used to show $NP$-completeness:
|
||||
\begin{itemize}
|
||||
\item $SAT = \{ \Phi \divides \Phi \text{ is a satisfiable formula in CNF} \}$
|
||||
\item $3SAT = \{ \Phi \divides \Phi \text{ is a satisfiable formula in CNF with all clauses containing \textit{at most} three literals} \}$
|
||||
\item $\text{CLIQUE} = \{ (G, k) \divides G \text{ is an undirected graph that contains a $k$-clique } \}$
|
||||
\item $VC = \{ (G, k) \divides G \text{ is an undirected graph with a vertex cover of size $\leq k$ } \}$
|
||||
\item $SCP = \{ (X, \cS, k) \divides X \text{ has a set cover $\cC \subseteq \cS$ such that $|\cC| \leq k$ } \}$
|
||||
\item $DS = \{ (G, k) \divides G \text{ has a dominating set $D$ such that } |D| \leq k \}$
|
||||
\end{itemize}
|
||||
where a $k$-clique is a complete subgraph consisting of $k$ vertices in $G$, with $k \leq |V|$;
|
||||
where a subset $\cC \subseteq \cS$ is a \textit{set cover} of $X$ if $X = \bigcup_{S \in \cC}$;
|
||||
where a \textit{dominating} set is is a set $D \subseteq V$ such that for every vertex $v \in V$, $v \in D$ or exists $w \in D$ such that $\{ v, w \} \in E$
|
||||
and where a vertex cover is any set $U \subseteq V$ where all edges $\{ u, v \} \in E$ have at least one endpoint $u, v \in U$
|
||||
|
||||
We have $SAT \leq_p \text{CLIQUE}$, $SAT \leq_p 3SAT$, $\text{CLIQUE} \leq_p VC$, $VC \leq_p SCP$ and $SCP \leq_p DS$.
|
||||
Logically, we also have $SAT \leq_p DS$, etc, since $\leq_p$ is transitive (in fact, all reductions that we covered are transitive)
|
||||
|
||||
Additionally, $\text{MAX-SAT}$ and $\text{MAX-CL}$, the problem to determine the maximum number of fulfillable clauses in a formula $\Phi$
|
||||
and the problem to determine the maximum clique, respectively, are $NP$-hard
|
||||
27
semester3/ti/compact/parts/06_grammars.tex
Normal file
27
semester3/ti/compact/parts/06_grammars.tex
Normal file
@@ -0,0 +1,27 @@
|
||||
\newsection
|
||||
\section{Grammars}
|
||||
\fancydef{Grammar} $G := (\Sigma_\text{N}, \Sigma_\text{T}, P, S)$
|
||||
|
||||
\begin{enumerate}[noitemsep]
|
||||
\item \bi{Non-Terminals} $\Sigma_\text{N}$ (Are used for the rules)
|
||||
\item \bi{Terminals} $\Sigma_\text{T}$ (The symbols at the end (i.e. only they can be remaining after the last derivation))
|
||||
\item \bi{Start symbol} $S \in \Sigma_\text{N}$
|
||||
\item \bi{Derivation rules} $P \subseteq \Sigma^*\Sigma_\text{N}\Sigma^* \times \Sigma^*$
|
||||
\end{enumerate}
|
||||
where $\Sigma_\text{N} \cap \Sigma_\text{T} = \emptyset$ and $\Sigma := \Sigma_\text{N} \cup \Sigma_\text{T}$
|
||||
|
||||
\begin{definition}[]{Types of grammars}
|
||||
\begin{enumerate}
|
||||
\item $G$ is a \bi{Type-0-Grammar} if it has no further restrictions.
|
||||
\item $G$ is a \bi{Type-1-Grammar} (or \bi{context-sensitive} (= Kontextsensitiv) Grammar) if we cannot replace a subword $\alpha$ with a shorter subword $\beta$.
|
||||
\item $G$ is a \bi{Type-2-Grammar} (or \bi{context-free} (= Kontextfrei) Grammar) if all rules have the form $X \rightarrow \beta$ for a non-terminal $X$.
|
||||
\item $G$ is a \bi{Type-3-Grammar} (or \bi{regular} (= regulär) Grammar) if all rules have the form $X \rightarrow u$ or $X \rightarrow uY$
|
||||
\end{enumerate}
|
||||
\end{definition}
|
||||
A few examples to highlight what kind of derivation rules are allowed. The rules disallowed in $n$ are also disallowed in $n + 1$:
|
||||
\begin{enumerate}
|
||||
\item All kind of rules are allowed
|
||||
\item Rules like $X \rightarrow \lambda$ or $0Y1 \rightarrow 00$ are not allowed (they shorten the output)
|
||||
\item Rules like $aA \rightarrow Sb$ are not allowed, as they are not context-free (i.e. all rules have to be of form $X \rightarrow \ldots$)
|
||||
\item Rules like $S \rightarrow abbAB$ are not allowed, as two non-terminals appear
|
||||
\end{enumerate}
|
||||
BIN
semester3/ti/compact/ti-compact.pdf
Normal file
BIN
semester3/ti/compact/ti-compact.pdf
Normal file
Binary file not shown.
75
semester3/ti/compact/ti-compact.tex
Normal file
75
semester3/ti/compact/ti-compact.tex
Normal file
@@ -0,0 +1,75 @@
|
||||
\documentclass{article}
|
||||
|
||||
\input{~/projects/latex/dist/recommended.tex}
|
||||
|
||||
\usetikzlibrary{automata, positioning, arrows.meta}
|
||||
\newcommand{\hdelta}{\hat{\delta}}
|
||||
\newcommand{\qacc}{q_{\text{accept}}}
|
||||
\newcommand{\qrej}{q_{\text{reject}}}
|
||||
\newcommand{\ldiag}{L_{\text{diag}}}
|
||||
\newcommand{\lempty}{L_{\text{empty}}}
|
||||
\renewcommand{\tc}{\text{Time}}
|
||||
\newcommand{\spc}{\text{Space}}
|
||||
|
||||
\setup{Theoretical Computer Science - Compact}
|
||||
|
||||
\begin{document}
|
||||
\startDocument
|
||||
\usetcolorboxes
|
||||
|
||||
\vspace{2cm}
|
||||
\begin{Huge}
|
||||
\begin{center}
|
||||
TITLE PAGE COMING SOON
|
||||
\end{center}
|
||||
\end{Huge}
|
||||
|
||||
|
||||
\vspace{4cm}
|
||||
\begin{center}
|
||||
\begin{Large}
|
||||
``\textit{Sie können also alle C Programme in Kanonischer Ordnung aufzählen. Sollten Sie dies tun. Wahrscheinlich nicht. Was aber zählt ist, sie \textbf{können} es tun}''
|
||||
\end{Large}
|
||||
|
||||
\hspace{3cm} - Prof. Dr. Dennis Komm, 2025
|
||||
\end{center}
|
||||
|
||||
\vspace{3cm}
|
||||
\begin{center}
|
||||
HS2025, ETHZ\\[0.2cm]
|
||||
\begin{Large}
|
||||
Compact Summary of the book \color{MidnightBlue}\fbox{\href{https://link.springer.com/book/10.1007/978-3-658-06433-4}{Theoretische Informatik}}\color{black}
|
||||
\end{Large}\\[0.2cm]
|
||||
by Prof. Dr. Juraj Hromkovic
|
||||
\end{center}
|
||||
|
||||
|
||||
\newpage
|
||||
\printtoc{Orange}
|
||||
\section{Introduction}
|
||||
This summary aims to provide a simple, easy to understand and short overview over the topics covered, with approaches for proofs, important theorems and lemmas,
|
||||
as well as definitions.
|
||||
|
||||
It does not aim to serve as a full replacement for the book or my main summary, but as a supplement to both of them.
|
||||
|
||||
It also lacks some formalism and is only intended to give some intuition, six pages are really not enough for a formal and complete overview of the topic.
|
||||
|
||||
As general recommendations, try to substitute possibly ``weird'' definitions in multiple choice to see a definition from the book.
|
||||
|
||||
All content up to Chapter \ref{sec:reductions} is relevant for the midterm directly.
|
||||
|
||||
The content for the endterm exam as of HS2025 starts in Chapter \ref{sec:reductions}.
|
||||
All prior content is still relevent to the extent that you need an understanding of the concepts treated there
|
||||
|
||||
|
||||
|
||||
\input{parts/01_words-alphabets.tex}
|
||||
\input{parts/02_finite-automata.tex}
|
||||
\input{parts/03_turing-machines.tex}
|
||||
\input{parts/04_computability.tex}
|
||||
\input{parts/05_complexity.tex}
|
||||
\input{parts/06_grammars.tex}
|
||||
|
||||
|
||||
|
||||
\end{document}
|
||||
|
Before Width: | Height: | Size: 190 KiB After Width: | Height: | Size: 190 KiB |
|
Before Width: | Height: | Size: 442 KiB After Width: | Height: | Size: 442 KiB |
Reference in New Issue
Block a user