mirror of
https://github.com/janishutz/eth-summaries.git
synced 2026-03-14 10:50:05 +01:00
[NumCS] Add code (Gauss-Newton)
This commit is contained in:
Binary file not shown.
@@ -20,60 +20,72 @@ Die Iterationsvorschrift ist gegeben durch:
|
|||||||
\end{align*}
|
\end{align*}
|
||||||
|
|
||||||
\begin{code}{python}
|
\begin{code}{python}
|
||||||
import numpy as np
|
def newton(x: np.ndarray, GF, HF, tol=1e1-6, maxIter=50):
|
||||||
|
""" Newton method requires the Gradient & Hessian, more accurate """
|
||||||
|
s = np.linalg.solve(HF(x), GF(x))
|
||||||
|
x -= s
|
||||||
|
k = 1
|
||||||
|
while np.linalg.norm(s) > tol * np.linalg.norm(x) and k < maxIter:
|
||||||
|
s = np.linalg.solve(HF(x), GF(x))
|
||||||
|
x -= s
|
||||||
|
k += 1
|
||||||
|
return x, k
|
||||||
|
\end{code}
|
||||||
|
|
||||||
def gauss_newton(start_vec, Func, Jacobian, tolerance):
|
\begin{code}{python}
|
||||||
# Start vector has to be chosen intelligently
|
def gauss_newton(x: np.ndarray, F, DF, tol=1e-6, maxIter=50):
|
||||||
s = np.linalg.lstsq(Jacobian(start_vec), Func(start_vec))[0]
|
""" Gauss-Newton algorithm to solve non-linear problem, needs 'only' the Jacobian """
|
||||||
start_vec = start_vec - s
|
s = np.linalg.lstsq(DF(x), F(x))[0]
|
||||||
# now we perform the iteration
|
x = x-s
|
||||||
while np.linalg.norm(s) > tolerance * np.linalg.norm(start_vec):
|
k = 1
|
||||||
# every time we update x by subtracting s, found with the least square method
|
while np.linalg.norm(s) > tol * np.linalg.norm(x) and k < maxIter:
|
||||||
s = np.linalg.lstsq(Jacobian(start_vec), Func(start_vec))[0]
|
s = np.linalg.lstsq(DF(x), F(x))[0]
|
||||||
start_vec = start_vec - s
|
x = x-s
|
||||||
return start_vec
|
k += 1
|
||||||
|
return x, k
|
||||||
\end{code}
|
\end{code}
|
||||||
|
|
||||||
Der Vorteil ist, dass die zweite Ableitung nicht benötigt wird, jedoch ist die Konvergenzordnung niedrieger ($p \leq 2$)
|
Der Vorteil ist, dass die zweite Ableitung nicht benötigt wird, jedoch ist die Konvergenzordnung niedrieger ($p \leq 2$)
|
||||||
|
|
||||||
\newpage
|
\newpage
|
||||||
\setLabelNumber{all}{3}
|
\setLabelNumber{all}{3}
|
||||||
\inlineex Wir haben zwei Modellfunktionen, $F_1(t) = a_1 + b_1 e^{-c_1 t}$ and $F_2(t) = a_2 - b_2 e^{-c_2 t}$. ($F_1$ ist ein Heizvorgang, $F_2$ ist ein Abkühlvorgang).
|
\inlineex Wir haben zwei Modellfunktionen, $F_1(t) = a_1 + b_1 e^{-c_1 t}$ und $F_2(t) = a_2 - b_2 e^{-c_2 t}$. ($F_1$ ist ein Heizvorgang, $F_2$ ist ein Abkühlvorgang).
|
||||||
Untenstehender code berechnet die Lösung des nichtlinearen Ausgleichsproblems
|
Untenstehender code berechnet die Lösung des nichtlinearen Ausgleichsproblems
|
||||||
\begin{code}{python}
|
\begin{code}{python}
|
||||||
import numpy as np
|
# Data given
|
||||||
|
|
||||||
t = np.arange(0, 30, 5); n = len(t)
|
t = np.arange(0, 30, 5); n = len(t)
|
||||||
curr_heating = np.array([24.34, 18.93, 17.09, 16.27, 15.97, 15.91])
|
heat = np.array([24.34, 18.93, 17.09, 16.27, 15.97, 15.91])
|
||||||
curr_cooling = np.array([9.66, 18.8, 22.36, 24.07, 24.59, 24.91])
|
cool = np.array([9.66, 18.8, 22.36, 24.07, 24.59, 24.91])
|
||||||
# define the functions that have to be minimized
|
|
||||||
F_1 = lambda a: a[0] + a[1] * np.exp(-a[2] * t) - curr_heating
|
# Functions & Jacobian
|
||||||
F_2 = lambda a: a[0] - a[1] * np.exp(-a[2] * t) - curr_cooling
|
F_1 = lambda a: a[0] + a[1]*np.exp( -a[2]*t ) - heat
|
||||||
|
F_2 = lambda a: a[0] - a[1]*np.exp( -a[2]*t ) - cool
|
||||||
|
|
||||||
# define the corresponding Jacobi matrices
|
|
||||||
def J_1(a):
|
def J_1(a):
|
||||||
mat = np.zeros((n, 3))
|
J = np.zeros((n, 3))
|
||||||
for k in range(n):
|
for k in range(n):
|
||||||
mat[k, 0] = 1.0
|
J[k, 0] = 1
|
||||||
mat[k, 1] = np.exp(-t[k] * a[2])
|
J[k, 1] = np.exp( -t[k] * a[2] )
|
||||||
mat[k, 2] = -t[k] * a[1] * np.exp(-t[k] * a[2])
|
J[k, 2] = -t[k]*a[1] * np.exp( -t[k]*a[2] )
|
||||||
return mat
|
return J
|
||||||
|
|
||||||
def J_2(a):
|
def J_2(a):
|
||||||
mat = np.zeros((n, 3))
|
J = np.zeros((n, 3))
|
||||||
for k in range(n):
|
for k in range(n):
|
||||||
mat[k, 0] = 1.0
|
J[k, 0] = 1
|
||||||
mat[k, 1] = -np.exp(-t[k] * a[2])
|
J[k, 1] = -np.exp( -t[k]*a[2] )
|
||||||
mat[k, 2] = t[k] * a[1] * np.exp(-t[k] * a[2])
|
J[k, 2] = t[k]*a[1] * np.exp( -t[k]*a[2] )
|
||||||
return mat
|
return J
|
||||||
|
|
||||||
# guess starting vector
|
# Start vectors (Guessed)
|
||||||
x_1 = np.array([10.0, 5.0, 0.0])
|
x_1 = np.array([10, 5, 0])
|
||||||
x_2 = np.array([30.0, 10.0, 0.0])
|
x_2 = np.array([30, 10, 0])
|
||||||
|
|
||||||
# use the Gauss-Newton algorithm declared above
|
# Coefficients via Gauss-Newton
|
||||||
a_1 = gauss_newton(x_1, F_1, J_1, tolerance=10e-6)
|
a_1, it_1 = gauss_newton(x_1, F_1, J_1)
|
||||||
a_2 = gauss_newton(x_2, F_2, J_2, tolerance=10e-6)
|
a_2, it_2 = gauss_newton(x_2, F_2, J_2)
|
||||||
print("Heating ", a_1)
|
|
||||||
print("Cooling ", a_2)
|
# The final functions found via Gauss-Newton
|
||||||
|
f_1 = lambda x: a_1[0] + a_1[1]*np.exp( -a_1[2]*x )
|
||||||
|
f_2 = lambda x: a_2[0] - a_2[1]*np.exp( -a_2[2]*x )
|
||||||
\end{code}
|
\end{code}
|
||||||
|
|||||||
Reference in New Issue
Block a user