mirror of
https://github.com/janishutz/eth-summaries.git
synced 2026-01-11 19:48:27 +00:00
[NumCS] Finish quadrature, a few fixes
This commit is contained in:
@@ -1,33 +1,4 @@
|
||||
import numpy as np
|
||||
import scipy as sp
|
||||
|
||||
|
||||
def fastbroyd(x0, F, J, tol=1e-12, maxit=20):
|
||||
x = x0.copy() # make sure we do not change the iput
|
||||
lup = sp.linalg.lu_factor(J) # LU decomposition of J
|
||||
s = sp.linalg.lu_solve(lup, F(x)) # start with a Newton corection
|
||||
sn = np.dot(s, s) # squared norm of the correction
|
||||
x -= s
|
||||
f = F(x) # start with a full Newton step
|
||||
dx = np.zeros((maxit, len(x))) # containers for storing corrections s and their sn:
|
||||
dxn = np.zeros(maxit)
|
||||
k = 0
|
||||
dx[k] = s
|
||||
dxn[k] = sn
|
||||
k += 1 # the number of the Broyden iteration
|
||||
|
||||
# Broyden iteration
|
||||
while sn > tol and k < maxit:
|
||||
w = sp.linalg.lu_solve(lup, f) # f = F (actual Broyden iteration x)
|
||||
# Using the Sherman-Morrison-Woodbury formel
|
||||
for r in range(1, k):
|
||||
w += dx[r] * (np.dot(dx[r - 1], w)) / dxn[r - 1]
|
||||
z = np.dot(s, w)
|
||||
s = (1 + z / (sn - z)) * w
|
||||
sn = np.dot(s, s)
|
||||
dx[k] = s
|
||||
dxn[k] = sn
|
||||
x -= s
|
||||
f = F(x)
|
||||
k += 1 # update x and iteration number k
|
||||
return x, k # return the final value and the numbers of iterations needed
|
||||
def simpson(f, a, b, N):
|
||||
x, h = np.linspace(a, b, 2 * int(N) + 1, retstep=True)
|
||||
I = h / 3.0 * (np.sum(f(x[::2])) + 4.0 * np.sum(f(x[1::2])) + f(x[0]) - f(x[-1]))
|
||||
return I
|
||||
|
||||
Binary file not shown.
@@ -136,7 +136,9 @@ Moral of the story: Use descriptive variable names and do NOT use $t$, $tt$, $tt
|
||||
\newsection
|
||||
\section{Numerische Quadratur}
|
||||
\input{parts/02_quadrature/00_introduction.tex}
|
||||
\input{parts/02_quadrature/01_equidistant-nodes.tex}
|
||||
\input{parts/02_quadrature/01_equidistant-nodes/00_intro.tex}
|
||||
\input{parts/02_quadrature/01_equidistant-nodes/01_summed.tex}
|
||||
\input{parts/02_quadrature/01_equidistant-nodes/02_romberg-scheme.tex}
|
||||
\input{parts/02_quadrature/02_non-equidistant/00_gauss.tex}
|
||||
\input{parts/02_quadrature/02_non-equidistant/01_clenshaw-curtis.tex}
|
||||
\input{parts/02_quadrature/03_adaptive.tex}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
% │ AUTHOR: Janis Hutz<info@janishutz.com> │
|
||||
% └ ┘
|
||||
|
||||
\newsection
|
||||
\newsectionNoPB
|
||||
\subsection{Rechenaufwand}
|
||||
In NumCS wird die Anzahl elementarer Operationen wie Addition, Multiplikation, etc benutzt, um den Rechenaufwand zu beschreiben.
|
||||
Wie in Algorithmen und * ist auch hier wieder $\tco{\ldots}$ der Worst Case.
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
% ┌ ┐
|
||||
% │ Author: Robin Bacher │
|
||||
% └ ┘
|
||||
|
||||
\newsection
|
||||
\subsection{Äquidistante Punkte}
|
||||
\label{sec:equidistant-nodes}
|
||||
% TODO: If it is not in the lecture notes, include table from slide 41 of TA. Really handy
|
||||
@@ -0,0 +1,31 @@
|
||||
\newsection
|
||||
\subsection{Äquidistante Punkte}
|
||||
\label{sec:equidistant-nodes}
|
||||
Untenstehend eine Liste verschiedener Quadraturverfahren (Reminder: Eine Funktion der Orgnung $2$ ist eine exakte Approximation einer konstanten oder linearen Funktion):
|
||||
|
||||
\begin{tables}{cccc}{Eigenschaft & Mittelpunkt & Trapez & Simpson}
|
||||
Knoten & 1 & 2 & 3 \\
|
||||
Ordnung & 2 & 2 & 4 \\
|
||||
Fehler & $\tco{h^2}$ & $\tco{h^2}$ & $\tco{h^4}$ \\
|
||||
Symmetrisch & Ja & Ja & Ja \\
|
||||
\end{tables}
|
||||
|
||||
\shade{gray}{Mittelpunkt-Regel} $\displaystyle Q^M(f; a, b) = (b - a) f\left( \frac{a + b}{2} \right)$.
|
||||
Gewicht: $\omega = b - a$
|
||||
|
||||
\shade{gray}{Trapez-Regel} $\displaystyle Q^T(f; a, b) = \frac{b - a}{2} (f(a) + f(b))$.
|
||||
Fehler: $\displaystyle E(n) = \left| -\frac{1}{12} (b - a)^3 f^{(2)}(\xi) \right| \text{ mit } \xi \in [a, b]$\\
|
||||
Fehlerabschätzung: $\displaystyle |E_n| \leq \frac{(b - a)^3}{12} ||f''||_\infty$.
|
||||
Gewichte: $\omega_1 = \omega_2 = \frac{b - a}{2}$
|
||||
|
||||
\shade{gray}{Simpson-Regel} $\displaystyle Q^S(f; a, b) = \frac{b - a}{6} \left( f(a) + 4f\left( \frac{a + b}{2} + f(b) \right) \right)$.
|
||||
Fehler: $\displaystyle E(n) = \left| -\frac{1}{90} \left( \frac{b - a}{2} f^{(4)} \right) f^{(4)}(\xi) \right|$\\
|
||||
Fehlerabschätzung: $\displaystyle |E_n| \leq \left( \frac{b - a}{2} \right)^2 f^{(4)}(\xi)$.
|
||||
Gewichte: $\frac{b - a}{6}, \frac{4(b - a)}{6}, \frac{b - a}{6}$
|
||||
|
||||
|
||||
\inlineremark Die Schranken für den Fehler erhält man aus den Lagrange-Polynomen vom Grad $n - 1$:
|
||||
\rmvspace
|
||||
\begin{align*}
|
||||
f \in \C^n([a, b]) \Rightarrow \left| f(t) \dx t - Q_n(f) \right| \leq \frac{1}{n!} (b - a)^{n + 1} ||f^{(n)}||_{L^\infty([a, b])}
|
||||
\end{align*}
|
||||
@@ -0,0 +1,43 @@
|
||||
\drmvspace
|
||||
\subsubsection{Summierte Quadratur}
|
||||
Mit Anwendung von Divide and Conquer kann die Präzision der Integration verbessert werden, man unterteilt dazu einfach das Integrationsintervall in viele kleine Intervalle:
|
||||
\rmvspace
|
||||
\begin{align*}
|
||||
\int_{a}^{b} f(x) \dx x = \sum_{i = 0}^{N - 1} \int_{x_i}^{x_{i + 1}} f(x) \dx x = \sum_{i = 0}^{N - 1} Q_n (f; x_i, x_{i + 1})
|
||||
\end{align*}
|
||||
|
||||
\drmvspace
|
||||
Im Folgenden ist $h = \frac{b - a}{N}$, $x_0 = a$, $x_i = x_0 + ih$ und $x_N = b$
|
||||
|
||||
\shade{gray}{Summierte Mittelpunkt-Regel}
|
||||
$\displaystyle I(f; a, b) \approx \sum_{i = 0}^{N - 1} h \cdot f \left( \frac{x_i + x_{i + 1}}{2} \right)$
|
||||
|
||||
\shade{gray}{Summierte Trapez-Regel}
|
||||
$\displaystyle I(f; a, b) \approx \sum_{i = 0}^{N - 1} \frac{h}{2} (f(x_i) + f(x_{i + 1})) = \frac{h}{2} \left( f(a) + 2 \sum_{i = 1}^{N - 1} f(x_i) + f(b) \right)$\\
|
||||
Fehler: $\displaystyle E(n) \leq \frac{h^2}{12} (b - a) \max_{x \in [a, b]} |f''(x)|$. Ist exakt bei periodischen, unendlich oft differenzierbaren Funktionen.
|
||||
Untenstehend eine Implementation der Trapez-Regel in Numpy
|
||||
|
||||
\rmvspace
|
||||
\begin{code}{python}
|
||||
def trapezoidal(f, a, b, N):
|
||||
x, h = np.linspace(a, b, int(N) + 1, retstep=True)
|
||||
I = h / 2.0 * (f(x[0]) + 2.0 * np.sum(f(x[1:-1])) + f(x[-1]))
|
||||
return I
|
||||
\end{code}
|
||||
|
||||
\newpage
|
||||
\shade{gray}{Summierte Simpson-Regel}
|
||||
\rmvspace
|
||||
\begin{align*}
|
||||
I(f; a, b) & \approx \frac{h}{6} \left( f(a) + 2 \sum_{i = 1}^{N - 1} f(x_i) + 4 \sum_{i = 1}^{N} f\left( \frac{x_{i - 1} + x_i}{2} \right) + f(b) \right) \\
|
||||
& = \frac{\tilde{h}}{3} \left( f(\tilde{x_0}) + 2 \sum_{i = 1}^{N - 1} f(\tilde{x}_{2i}) + 4 \sum_{i = 1}^{N} f(\tilde{x}_{2i - 1}) + f(\tilde{x}_{2N}) \right)
|
||||
\mediumhspace \text{ mit } \tilde{h} = \frac{h}{2} \text{, } \tilde{x}_{2i} = x_i \text{ und } \tilde{x}_{2i - 1} = \frac{x_{i - 1} + x_i}{2}
|
||||
\end{align*}
|
||||
|
||||
\drmvspace Untenstehend eine Implementation der Simpson-Regel
|
||||
\begin{code}{python}
|
||||
def simpson(f, a, b, N):
|
||||
x, h = np.linspace(a, b, 2 * int(N) + 1, retstep=True)
|
||||
I = h / 3.0 * (np.sum(f(x[::2])) + 4.0 * np.sum(f(x[1::2])) + f(x[0]) - f(x[-1]))
|
||||
return I
|
||||
\end{code}
|
||||
@@ -0,0 +1,45 @@
|
||||
\subsubsection{Romberg Schema}
|
||||
Für glatte Funktionen haben wir: $T (h) = I [f] + c_1 h^2 + c_2 h^4 + \ldots + c_p h^{2p} + \tco{h^{2p+2}}$
|
||||
|
||||
Die Idee des Romberg-Schemas ist es, die führenden Fehlerterme durch Linearkombinationen zu eliminieren
|
||||
|
||||
\bg{orange}{Schritt 1} Berechnung von $T(h)$ und $T(\frac{h}{2})$:
|
||||
\rmvspace
|
||||
\begin{align*}
|
||||
T(h) & = I + c_1 h^2 + c_2 h^4 + \ldots \\
|
||||
T\left( \frac{h}{2} \right) & = I + c_1 \frac{h^2}{4} + c_2 \frac{h^4}{16} + \ldots \\
|
||||
\end{align*}
|
||||
|
||||
\drmvspace
|
||||
\bg{orange}{Schritt 2} Linearkombination zur Elimination des $h^2$-Terms (Ordnung dann $4$):
|
||||
\rmvspace
|
||||
\begin{align*}
|
||||
R_{1, 1} = \frac{4 T(h / 2) - T(h)}{3} = I + c_2' h^4 + \ldots
|
||||
\end{align*}
|
||||
|
||||
\drmvspace
|
||||
\bg{orange}{Schritt 3} Wiederholen bis zur gewünschten Präzision mit der allgemeinen Rekursionsformel:
|
||||
\rmvspace
|
||||
\begin{align*}
|
||||
R_{l, k} = \frac{4^k R_{l, k - 1} - R_{l - 1, k - 1}}{4^k - 1}
|
||||
\end{align*}
|
||||
|
||||
\drmvspace
|
||||
Der Einfachheit halber können die Terme auch in das sogenannte ``Romberg-Tableau'' eingefüllt werden:
|
||||
\begin{table}[h!]
|
||||
\begin{center}
|
||||
\begin{tabular}[c]{c|cccc}
|
||||
k & 0 & 1 & 2 & 3\\
|
||||
\hline
|
||||
0 & $T(h)$ & $R_{0, 1}$ \\
|
||||
1 & $T(h / 2)$ & $R_{1, 1}$ & $R_{1, 2}$ \\
|
||||
1 & $T(h / 4)$ & $R_{2, 1}$ & $R_{2, 2}$ & $R_{2, 3}$ \\
|
||||
1 & $T(h / 8)$ & $R_{3, 1}$ & $R_{3, 2}$ & $R_{3, 3}$ \\
|
||||
\end{tabular}
|
||||
\end{center}
|
||||
\end{table}
|
||||
Das Romberg-Schema konvergiert sehr schnell für glatte Funktionen.
|
||||
|
||||
|
||||
\subsubsection{Anwendung}
|
||||
In der Praxis keine Newton-Cotes höherer Ordnung mit äquidistanten Stützpunkten
|
||||
Reference in New Issue
Block a user