diff --git a/semester3/numcs/numcs-summary.pdf b/semester3/numcs/numcs-summary.pdf index 1e0ee4f..4a061b9 100644 Binary files a/semester3/numcs/numcs-summary.pdf and b/semester3/numcs/numcs-summary.pdf differ diff --git a/semester3/numcs/parts/03_zeros/06_newton-nd.tex b/semester3/numcs/parts/03_zeros/06_newton-nd.tex index 5edb47d..8598bb5 100644 --- a/semester3/numcs/parts/03_zeros/06_newton-nd.tex +++ b/semester3/numcs/parts/03_zeros/06_newton-nd.tex @@ -16,14 +16,13 @@ Wichtig ist dabei, dass wir \bi{niemals} das Inverse der Jacobi-Matrix (oder irg sondern immer das Gleichungssystem $As = b$ lösen sollten, da dies effizienter ist: \begin{code}{python} -def newton(x, F, DF, tol=1e-12, maxit=50): - x = np.atleast_2d(x) # ’solve’ erwartet x als 2-dimensionaler numpy array - # Newton Iteration - for _ in range(maxit): - s = np.linal.solve(DF(x), F(x)) +def newton_2d(x: np.ndarray, F, DF, tol=1e-12, maxIter=50): + """ Newton method in 2d using Jacobi Matrix of F""" + for i in range(maxIter): + s = np.linalg.solve(DF(x[0], x[1]), F(x[0], x[1])) x -= s - if np.linalg.norm(s) < tol * np.linalg.norm(x): - return x + if np.linalg.norm(s) < tol * np.linalg.norm(x): return x, i + return x, maxIter \end{code} Wollen wir aber garantiert einen Fehler kleiner als unsere Toleranz $\tau$ können wir das Abbruchkriterium diff --git a/semester3/numcs/parts/03_zeros/08_quasi-newton.tex b/semester3/numcs/parts/03_zeros/08_quasi-newton.tex index dba29ff..526e260 100644 --- a/semester3/numcs/parts/03_zeros/08_quasi-newton.tex +++ b/semester3/numcs/parts/03_zeros/08_quasi-newton.tex @@ -20,33 +20,36 @@ Die Implementierung erzielt man folgendermassen mit der \bi{Sherman-Morrison-Woo Das Broyden-Quasi-Newton-Verfahren konvergiert langsamer als das Newton-Verfahren, aber schneller als das vereinfachte Newton-Verfahren. (\texttt{sp} ist \texttt{Scipy} und \texttt{np} logischerweise \texttt{Numpy} im untenstehenden code) \begin{code}{python} -def fastbroyd(x0, F, J, tol=1e-12, maxit=20): - x = x0.copy() # make sure we do not change the iput - lup = sp.linalg.lu_factor(J) # LU decomposition of J - s = sp.linalg.lu_solve(lup, F(x)) # start with a Newton corection - sn = np.dot(s, s) # squared norm of the correction +def fast_broyden(x0: np.ndarray, F, J, tol=1e-12, maxIter=20): + x = x0.copy() + lup = lu_factor(J) + + s = lu_solve(lup, F(x)) + sn = np.dot(s, s) x -= s - f = F(x) # start with a full Newton step - dx = np.zeros((maxit, len(x))) # containers for storing corrections s and their sn: - dxn = np.zeros(maxit) - k = 0 - dx[k] = s - dxn[k] = sn - k += 1 # the number of the Broyden iteration - - # Broyden iteration - while sn > tol and k < maxit: - w = sp.linalg.lu_solve(lup, f) # f = F (actual Broyden iteration x) - # Using the Sherman-Morrison-Woodbury formel + + # Book keeping, for Broyden Update + dx = np.zeros((maxIter, len(x))) + dxn = np.zeros(maxIter) + dx[0] = s + dxn[0] = sn + k = 1 + + while sn > tol and k < maxIter: + w = lu_solve(lup, F(x)) # Simplified Newton Update + + # Apply Broyden correction (Shermann-Morrison-Woodbury formula) for r in range(1, k): - w += dx[r] * (np.dot(dx[r - 1], w)) / dxn[r - 1] - z = np.dot(s, w) - s = (1 + z / (sn - z)) * w - sn = np.dot(s, s) - dx[k] = s - dxn[k] = sn - x -= s - f = F(x) - k += 1 # update x and iteration number k - return x, k # return the final value and the numbers of iterations needed + w += dx[r] * np.dot(dx[r-1], w) / dxn[r-1] + z = np.dot(s, w) + s = (1 + z/(sn-z)) * w + x -= s # Apply the iteration + + # Book keeping again + sn = np.dot(s, s) + dx[k] = s + dxn[k] = sn + k += 1 + + return x, k \end{code}