Commit 5b9a3cb0 authored by c7021101's avatar c7021101
Browse files

Update plots

parent 9c281fc3
import numpy as np
from funcs import reg, tikhonov, get_operator, critical_point
from funcs import reg, tikhonov, critical_point
import matplotlib.pyplot as plt
import tqdm
def convergence(xtrue, ytrue, ydelta, x0, A, niter_newton, niter_grad, nu, name):
U, sigma, Vh = np.linalg.svd(A)
if A.shape[0] < A.shape[1]:
sigma = np.concatenate((sigma, np.zeros(A.shape[1] - A.shape[0])))
idx = np.where(sigma <= 1e-14)[0]
kernel = Vh[idx, ...]
......@@ -29,7 +31,7 @@ def convergence(xtrue, ytrue, ydelta, x0, A, niter_newton, niter_grad, nu, name)
noise = np.random.normal(0, 1, ytrue.shape)
noise /= np.sqrt(np.sum(noise ** 2))
data = ytrue + delta * noise
tik = tikhonov(data, A, alpha=alpha(delta))
tik = tikhonov(data, A, alpha=alpha(delta), reg=reg)
xk, errk, gradk = critical_point(x0, func=tik, niter_newton=niter_newton, niter_grad=niter_grad, nu=nu)
X.append(xk)
......@@ -43,9 +45,6 @@ def convergence(xtrue, ytrue, ydelta, x0, A, niter_newton, niter_grad, nu, name)
# Approximate limit of sequence
if kernel.shape[0] > 0:
xlim = X[-1].copy()
for _ in range(100):
grad = kernel.T @ kernel @ reg(xlim, grad=True)
xlim = xlim - 1e-3 * grad
else:
xlim = xtrue
......@@ -71,4 +70,28 @@ def convergence(xtrue, ytrue, ydelta, x0, A, niter_newton, niter_grad, nu, name)
plt.close()
np.save(f"results/convergence_{name}", hist)
pass
if kernel.shape[0] > 0:
plt.plot(xtrue)
plt.plot(xlim)
plt.savefig(f"results/{name}_example.pdf", dpi=500)
plt.close()
# Calculate projection of gradient of regularizer at xlim onto kernel of A
g = reg(xlim, grad=True)
plt.semilogy( np.abs(kernel @ g))
plt.savefig(f"results/{name}_gradient.pdf", dpi=500)
plt.close()
if name == "inpainting":
# Plot entry of inpainting problem solution to show that not necessarily an R-minimizing solution
i = idx[0]
t0 = xlim[i]
ts = np.linspace(-3, 3, 1024)
xs = [reg(t) for t in ts]
plt.semilogy(ts, xs)
plt.semilogy(t0, reg(t0), 'o')
plt.savefig(f"results/{name}_non_global.pdf")
plt.close()
return hist
......@@ -46,7 +46,7 @@ def gradient_descent(x0, func, niter=100, nu=1e-1, gamma=0.9):
return temp, err, grad
def tikhonov(y, A, alpha=1e-2):
def tikhonov(y, A, alpha=1e-2, reg=reg):
def func(x, grad=False, hess=False):
if grad:
return A.T @ (A @ x - y) + alpha * reg(x, grad=True)
......@@ -61,17 +61,16 @@ def tikhonov(y, A, alpha=1e-2):
def get_operator(N, problem="inpainting"):
np.random.seed(42)
if problem == "inpainting":
p = 0.1
p = 0.5
A = np.eye(N)
for i in range(N):
if np.random.uniform() <= p:
A[i, i] = 0
elif problem == "cumsum":
A = np.tril(np.ones((N, N))) / N
elif problem == "difference":
A = np.eye(N) - np.eye(N, k=1)
A[N-1, 0] = -1
A /= N
elif problem == "deblur":
w = np.array([0.5, 0.5])
A = np.kron(np.eye(N//2), w)
else:
A = np.random.normal(0, 1, (N, N))
return A
......@@ -82,10 +81,3 @@ def critical_point(x0, func, niter_grad=500, nu=0.5, niter_newton=20):
temp, err, grad = gradient_descent(temp, func, niter=niter_grad, nu=nu)
temp, errnewton, gradnewton = newton(temp, func, niter=niter_newton)
return temp, err + errnewton, grad + gradnewton
def fredholm_matrix(func, s, t):
m = np.zeros((len(s), len(t)))
for i in range(len(s)):
m[i, ...] = func(s[i], t)
return m
......@@ -15,29 +15,28 @@ def main(N=512, niter_grad=5000, niter_newton=50, nu=0.5):
x0 = np.zeros(N)
for problem in problems:
print(x0)
A = get_operator(N=N, problem=problem)
ytrue = A @ xtrue
ydelta = ytrue + np.random.normal(0, 1, N) * 0.001
ydelta = ytrue + np.random.normal(0, 1, ytrue.shape) * 0.001
stability(xtrue=xtrue,
ytrue=ytrue,
ydelta=ydelta,
x0=x0,
A=A,
niter_newton=niter_newton,
niter_grad=niter_grad,
nu=nu,
name=problem)
convergence(xtrue=xtrue,
ytrue=ytrue,
ydelta=ydelta,
x0=x0,
A=A,
niter_newton=niter_newton,
niter_grad=niter_grad,
nu=nu,
name=problem)
hist_stability = stability(xtrue=xtrue,
ytrue=ytrue,
ydelta=ydelta,
x0=x0,
A=A,
niter_newton=niter_newton,
niter_grad=niter_grad,
nu=nu,
name=problem)
hist_convergence = convergence(xtrue=xtrue,
ytrue=ytrue,
ydelta=ydelta,
x0=x0,
A=A,
niter_newton=niter_newton,
niter_grad=niter_grad,
nu=nu,
name=problem)
if __name__ == '__main__':
......
import numpy as np
from funcs import critical_point, tikhonov, get_operator
from funcs import critical_point, tikhonov, reg
import matplotlib.pyplot as plt
import tqdm
......@@ -21,7 +21,7 @@ def stability(xtrue, ytrue, ydelta, x0, A, niter_newton, niter_grad, nu, name):
noise = np.random.normal(0, 1, ytrue.shape)
noise /= np.sqrt(np.sum(noise ** 2))
data = ydelta + delta * noise
tik = tikhonov(data, A, alpha=alpha)
tik = tikhonov(data, A, alpha=alpha, reg=reg)
xk, errk, gradk = critical_point(x0, func=tik, niter_newton=niter_newton, niter_grad=niter_grad, nu=nu)
X.append(xk)
......@@ -47,4 +47,4 @@ def stability(xtrue, ytrue, ydelta, x0, A, niter_newton, niter_grad, nu, name):
plt.close()
np.save(f"results/stability_{name}", hist)
pass
return hist
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment