Commit 9c281fc3 authored by c7021101's avatar c7021101
Browse files

Add main file

parent 4cd2e11b
......@@ -3,87 +3,72 @@ from funcs import reg, tikhonov, get_operator, critical_point
import matplotlib.pyplot as plt
import tqdm
problem = "cumsum"
N = 512
niter_gradient = 5000
niter_newton = 50
nu = 1
A = get_operator(N, problem=problem)
U, sigma, Vh = np.linalg.svd(A)
idx = np.where(sigma <= 1e-14)[0]
kernel = Vh[idx, ...]
# Set up data
ts = np.linspace(-1, 1, N)
f = lambda t: np.exp(-t ** 2) * np.cos(t) * (t - 0.5) ** 2 + np.sin(t ** 2)
def convergence(xtrue, ytrue, ydelta, x0, A, niter_newton, niter_grad, nu, name):
U, sigma, Vh = np.linalg.svd(A)
idx = np.where(sigma <= 1e-14)[0]
kernel = Vh[idx, ...]
xtrue = f(ts)
ytrue = A @ xtrue
DELTA = [1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9, 1e-10, 1e-11, 1e-12, 1e-13, 1e-14, 1e-16]
Q = [0.5, 1, 1.5]
hist = {
"delta": DELTA[:-1],
"q": Q,
"xtrue": xtrue,
"ytrue": ytrue,
"x0": x0
}
# Convergence
x0 = -1 * np.ones(N)
DELTA = [1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9, 1e-10, 1e-11, 1e-12, 1e-16]
Q = [0.5, 1, 1.5]
hist = {
"delta": DELTA[:-1],
"q": Q,
"xtrue": xtrue,
"ytrue": ytrue,
"x0": x0
}
for q in tqdm.tqdm(Q):
alpha = lambda delta: delta ** q
temp = {}
for q in tqdm.tqdm(Q):
alpha = lambda delta: delta ** q
temp = {}
X = []
X = []
ERR = []
GRAD = []
REG = []
for delta in DELTA:
noise = np.random.normal(0, 1, ytrue.shape)
noise /= np.sqrt(np.sum(noise ** 2))
data = ytrue + delta * noise
tik = tikhonov(data, A, alpha=alpha(delta))
xk, errk, gradk = critical_point(x0, func=tik, niter_newton=niter_newton, niter_grad=niter_grad, nu=nu)
X.append(xk)
for delta in DELTA:
noise = np.random.normal(0, 1, N)
noise /= np.sqrt(np.sum(noise ** 2))
data = ytrue + delta * noise
tik = tikhonov(data, A, alpha=alpha(delta))
xk, errk, gradk = critical_point(x0, func=tik, niter_newton=niter_newton, niter_grad=niter_gradient, nu=nu)
X.append(xk)
temp[delta] = {
"x": xk,
"err": errk,
"grad": gradk,
"data": data
}
temp[delta] = {
"x": xk,
"err": errk,
"grad": gradk,
"data": data
}
# Approximate limit of sequence
if kernel.shape[0] > 0:
xlim = X[-1].copy()
for _ in range(100):
grad = kernel.T @ kernel @ reg(xlim, grad=True)
xlim = xlim - 1e-3 * grad
else:
xlim = xtrue
# Approximate limit of sequence
if kernel.shape[0] > 0:
xlim = X[-1].copy()
for _ in range(100):
grad = kernel.T @ kernel @ reg(xlim, grad=True)
xlim = xlim - 1e-3 * grad
else:
xlim = xtrue
# Calculate distance to limit point and data error
temp["xlim"] = xlim
temp["dist"] = [np.sqrt(np.sum((x - xlim) ** 2)) for x in X[:-1]]
temp["dataerr"] = [np.sqrt(np.sum((A @ x - ytrue) ** 2)) for x in X[:-1]]
hist[q] = temp
# Calculate distance to limit point and data error
temp["xlim"] = xlim
temp["dist"] = [np.sqrt(np.sum((x - xlim) ** 2)) for x in X[:-1]]
temp["dataerr"] = [np.sqrt(np.sum((A @ x - ytrue) ** 2)) for x in X[:-1]]
hist[q] = temp
# Plot convergence in signal space
plt.ioff()
for q in Q:
plt.loglog(hist["delta"], hist[q]["dist"], label=fr'$\alpha^{ {q}}$')
plt.legend()
plt.savefig(f"results/convergence_xerror_{name}.pdf", dpi=500)
plt.close()
# Plot convergence in signal space
plt.ioff()
for q in Q:
plt.loglog(hist["delta"], hist[q]["dist"], label=fr'$\alpha^{ {q} }$')
plt.legend()
plt.savefig(f"results/convergence_xerror_{problem}.pdf", dpi=500)
plt.close()
# Plot convergence in data space
for q in Q:
plt.loglog(hist["delta"], hist[q]["dataerr"], label=fr'$\alpha(\delta) = \delta^{ {q}}$')
plt.legend()
plt.savefig(f"results/convergence_dataerror_{name}.pdf", dpi=500)
plt.close()
# Plot convergence in data space
for q in Q:
plt.loglog(hist["delta"], hist[q]["dataerr"], label=fr'$\alpha^{ {q} }$')
plt.legend()
plt.savefig(f"results/convergence_dataerror_{problem}.pdf", dpi=500)
plt.close()
np.save(f"results/convergence_{problem}", hist)
np.save(f"results/convergence_{name}", hist)
pass
import os
import numpy as np
from funcs import get_operator
from stability import stability
from convergence import convergence
def main(N=512, niter_grad=5000, niter_newton=50, nu=0.5):
problems = ["inpainting", "cumsum"]
# Set up data
ts = np.linspace(-1, 1, N)
f = lambda t: np.exp(-t ** 2) * np.cos(t) * (t - 0.5) ** 2 + np.sin(t ** 2)
xtrue = f(ts)
x0 = np.zeros(N)
for problem in problems:
print(x0)
A = get_operator(N=N, problem=problem)
ytrue = A @ xtrue
ydelta = ytrue + np.random.normal(0, 1, N) * 0.001
stability(xtrue=xtrue,
ytrue=ytrue,
ydelta=ydelta,
x0=x0,
A=A,
niter_newton=niter_newton,
niter_grad=niter_grad,
nu=nu,
name=problem)
convergence(xtrue=xtrue,
ytrue=ytrue,
ydelta=ydelta,
x0=x0,
A=A,
niter_newton=niter_newton,
niter_grad=niter_grad,
nu=nu,
name=problem)
if __name__ == '__main__':
if not os.path.exists("results/"):
os.mkdir("results")
np.random.seed(42)
main()
......@@ -3,62 +3,48 @@ from funcs import critical_point, tikhonov, get_operator
import matplotlib.pyplot as plt
import tqdm
N = 512
problem = "inpainting"
niter_gradient = 5000
niter_newton = 50
nu = 1
A = get_operator(N, problem=problem)
# Set up data
ts = np.linspace(-1, 1, N)
f = lambda t: np.exp(-t ** 2) * np.cos(t) * (t - 0.5) ** 2 + np.sin(t ** 2)
xtrue = f(ts)
ytrue = A @ xtrue
ydelta = ytrue + np.random.normal(0, 1, N) * 0.001
# Stability
x0 = np.zeros(N)
DELTA = [1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 0]
ALPHA = [1e-2, 1e-3, 1e-4]
hist = {
"delta": DELTA[:-1],
"alpha": ALPHA,
"xtrue": xtrue,
"ytrue": ytrue,
"x0": x0
}
for alpha in tqdm.tqdm(ALPHA):
temp = {}
X = []
for delta in DELTA:
noise = np.random.normal(0, 1, N)
noise /= np.sqrt(np.sum(noise ** 2))
data = ydelta + delta * noise
tik = tikhonov(data, A, alpha=alpha)
xk, errk, gradk = critical_point(x0, func=tik, niter_newton=niter_newton, niter_grad=niter_gradient, nu=nu)
X.append(xk)
temp[delta] = {
"x": xk,
"err": errk,
"grad": gradk,
"data": data
}
xlim = X[-1]
dist = [np.sqrt(np.sum((x - xlim) ** 2)) for x in X[:-1]]
temp["xlim"] = xlim
temp["dist"] = dist
hist[alpha] = temp
# Plot results
plt.ioff()
for al in ALPHA:
plt.loglog(hist["delta"], hist[al]["dist"], label=fr'$\alpha = {al}$')
plt.legend()
plt.savefig(f"results/stability_{problem}.pdf", dpi=500)
plt.close()
np.save(f"results/stability_{problem}", hist)
def stability(xtrue, ytrue, ydelta, x0, A, niter_newton, niter_grad, nu, name):
DELTA = [1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9, 1e-10, 1e-11, 1e-12, 1e-13, 1e-14, 0]
ALPHA = [1e-2, 1e-3, 1e-4]
hist = {
"delta": DELTA[:-1],
"alpha": ALPHA,
"xtrue": xtrue,
"ytrue": ytrue,
"x0": x0
}
for alpha in tqdm.tqdm(ALPHA):
temp = {}
X = []
for delta in DELTA:
noise = np.random.normal(0, 1, ytrue.shape)
noise /= np.sqrt(np.sum(noise ** 2))
data = ydelta + delta * noise
tik = tikhonov(data, A, alpha=alpha)
xk, errk, gradk = critical_point(x0, func=tik, niter_newton=niter_newton, niter_grad=niter_grad, nu=nu)
X.append(xk)
temp[delta] = {
"x": xk,
"err": errk,
"grad": gradk,
"data": data
}
xlim = X[-1]
dist = [np.sqrt(np.sum((x - xlim) ** 2)) for x in X[:-1]]
temp["xlim"] = xlim
temp["dist"] = dist
hist[alpha] = temp
# Plot results
plt.ioff()
for al in ALPHA:
plt.loglog(hist["delta"], hist[al]["dist"], label=fr'$\alpha = {al}$')
plt.legend()
plt.savefig(f"results/stability_{name}.pdf", dpi=500)
plt.close()
np.save(f"results/stability_{name}", hist)
pass
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment