Commit befb266d authored by c7021101's avatar c7021101
Browse files

Change parameters

parent 5b9a3cb0
......@@ -3,8 +3,11 @@ from funcs import reg, tikhonov, critical_point
import matplotlib.pyplot as plt
import tqdm
plt.ioff()
def convergence(xtrue, ytrue, ydelta, x0, A, niter_newton, niter_grad, nu, name):
# Calculate kernel of A
U, sigma, Vh = np.linalg.svd(A)
if A.shape[0] < A.shape[1]:
sigma = np.concatenate((sigma, np.zeros(A.shape[1] - A.shape[0])))
......@@ -12,7 +15,7 @@ def convergence(xtrue, ytrue, ydelta, x0, A, niter_newton, niter_grad, nu, name)
kernel = Vh[idx, ...]
DELTA = [1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9, 1e-10, 1e-11, 1e-12, 1e-13, 1e-14, 1e-16]
Q = [0.5, 1, 1.5]
Q = [1, 1.5]
hist = {
"delta": DELTA[:-1],
"q": Q,
......@@ -35,13 +38,16 @@ def convergence(xtrue, ytrue, ydelta, x0, A, niter_newton, niter_grad, nu, name)
xk, errk, gradk = critical_point(x0, func=tik, niter_newton=niter_newton, niter_grad=niter_grad, nu=nu)
X.append(xk)
plt.semilogy(gradk)
temp[delta] = {
"x": xk,
"err": errk,
"grad": gradk,
"data": data
}
plt.savefig(f"results/{name}_gradient_size_{q}.pdf")
plt.close()
# Approximate limit of sequence
if kernel.shape[0] > 0:
xlim = X[-1].copy()
......@@ -55,9 +61,9 @@ def convergence(xtrue, ytrue, ydelta, x0, A, niter_newton, niter_grad, nu, name)
hist[q] = temp
# Plot convergence in signal space
plt.ioff()
for q in Q:
plt.loglog(hist["delta"], hist[q]["dist"], label=fr'$\alpha^{ {q}}$')
plt.loglog(hist["delta"], hist[q]["dist"], label=fr'$\alpha(\delta) = \delta^{ {q}}$')
plt.legend()
plt.savefig(f"results/convergence_xerror_{name}.pdf", dpi=500)
plt.close()
......@@ -79,7 +85,7 @@ def convergence(xtrue, ytrue, ydelta, x0, A, niter_newton, niter_grad, nu, name)
# Calculate projection of gradient of regularizer at xlim onto kernel of A
g = reg(xlim, grad=True)
plt.semilogy( np.abs(kernel @ g))
plt.semilogy(np.abs(kernel @ g))
plt.savefig(f"results/{name}_gradient.pdf", dpi=500)
plt.close()
......
......@@ -26,13 +26,14 @@ def newton(x0, func, niter=10):
for _ in range(niter):
g = func(temp, grad=True)
H = func(temp, hess=True)
temp = temp - np.linalg.inv(H) @ g
v = np.linalg.solve(H, g)
temp = temp - v
err.append(func(temp))
grad.append(np.sum(func(temp, grad=True) ** 2))
return temp, err, grad
def gradient_descent(x0, func, niter=100, nu=1e-1, gamma=0.9):
def gradient_descent(x0, func, niter=100, nu=1e-1, gamma=0.95):
temp = x0.copy()
v0 = np.zeros_like(temp)
err = [func(temp)]
......
......@@ -3,18 +3,22 @@ import numpy as np
from funcs import get_operator
from stability import stability
from convergence import convergence
import matplotlib
matplotlib.rc('font', **{'size': 15})
def main(N=512, niter_grad=5000, niter_newton=50, nu=0.5):
def main(N=512, niter_grad=10000, niter_newton=200, nu=1):
problems = ["inpainting", "cumsum"]
# Set up data
ts = np.linspace(-1, 1, N)
f = lambda t: np.exp(-t ** 2) * np.cos(t) * (t - 0.5) ** 2 + np.sin(t ** 2)
xtrue = f(ts)
x0 = np.zeros(N)
x0 = np.ones(N) * 0
for problem in problems:
nu = 2 if problem == "cumsum" else 1
A = get_operator(N=N, problem=problem)
ytrue = A @ xtrue
ydelta = ytrue + np.random.normal(0, 1, ytrue.shape) * 0.001
......
......@@ -3,6 +3,8 @@ from funcs import critical_point, tikhonov, reg
import matplotlib.pyplot as plt
import tqdm
plt.ioff()
def stability(xtrue, ytrue, ydelta, x0, A, niter_newton, niter_grad, nu, name):
DELTA = [1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9, 1e-10, 1e-11, 1e-12, 1e-13, 1e-14, 0]
......@@ -25,6 +27,8 @@ def stability(xtrue, ytrue, ydelta, x0, A, niter_newton, niter_grad, nu, name):
xk, errk, gradk = critical_point(x0, func=tik, niter_newton=niter_newton, niter_grad=niter_grad, nu=nu)
X.append(xk)
plt.semilogy(gradk)
temp[delta] = {
"x": xk,
"err": errk,
......@@ -38,8 +42,10 @@ def stability(xtrue, ytrue, ydelta, x0, A, niter_newton, niter_grad, nu, name):
temp["dist"] = dist
hist[alpha] = temp
plt.savefig(f"results/{name}_gradient_size_{alpha}.pdf")
plt.close()
# Plot results
plt.ioff()
for al in ALPHA:
plt.loglog(hist["delta"], hist[al]["dist"], label=fr'$\alpha = {al}$')
plt.legend()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment