Commit 4cd2e11b authored by c7021101's avatar c7021101
Browse files

initial commit

parent 4bedb204
# Default ignored files
/shelf/
/workspace.xml
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>
\ No newline at end of file
<component name="InspectionProjectProfileManager">
<profile version="1.0">
<option name="myName" value="Project Default" />
<inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
<option name="ignoredPackages">
<value>
<list size="6">
<item index="0" class="java.lang.String" itemvalue="tqdm" />
<item index="1" class="java.lang.String" itemvalue="scipy" />
<item index="2" class="java.lang.String" itemvalue="tensorboard" />
<item index="3" class="java.lang.String" itemvalue="ipython" />
<item index="4" class="java.lang.String" itemvalue="torch" />
<item index="5" class="java.lang.String" itemvalue="torchvision" />
</list>
</value>
</option>
</inspection_tool>
<inspection_tool class="PyPep8Inspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
<option name="ignoredErrors">
<list>
<option value="W605" />
</list>
</option>
</inspection_tool>
</profile>
</component>
\ No newline at end of file
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.9 (pytorch)" project-jdk-type="Python SDK" />
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/critical-point-regularization.iml" filepath="$PROJECT_DIR$/.idea/critical-point-regularization.iml" />
</modules>
</component>
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project>
\ No newline at end of file
import numpy as np
from funcs import reg, tikhonov, get_operator, critical_point
import matplotlib.pyplot as plt
import tqdm
problem = "cumsum"
N = 512
niter_gradient = 5000
niter_newton = 50
nu = 1
A = get_operator(N, problem=problem)
U, sigma, Vh = np.linalg.svd(A)
idx = np.where(sigma <= 1e-14)[0]
kernel = Vh[idx, ...]
# Set up data
ts = np.linspace(-1, 1, N)
f = lambda t: np.exp(-t ** 2) * np.cos(t) * (t - 0.5) ** 2 + np.sin(t ** 2)
xtrue = f(ts)
ytrue = A @ xtrue
# Convergence
x0 = -1 * np.ones(N)
DELTA = [1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9, 1e-10, 1e-11, 1e-12, 1e-16]
Q = [0.5, 1, 1.5]
hist = {
"delta": DELTA[:-1],
"q": Q,
"xtrue": xtrue,
"ytrue": ytrue,
"x0": x0
}
for q in tqdm.tqdm(Q):
alpha = lambda delta: delta ** q
temp = {}
X = []
ERR = []
GRAD = []
REG = []
for delta in DELTA:
noise = np.random.normal(0, 1, N)
noise /= np.sqrt(np.sum(noise ** 2))
data = ytrue + delta * noise
tik = tikhonov(data, A, alpha=alpha(delta))
xk, errk, gradk = critical_point(x0, func=tik, niter_newton=niter_newton, niter_grad=niter_gradient, nu=nu)
X.append(xk)
temp[delta] = {
"x": xk,
"err": errk,
"grad": gradk,
"data": data
}
# Approximate limit of sequence
if kernel.shape[0] > 0:
xlim = X[-1].copy()
for _ in range(100):
grad = kernel.T @ kernel @ reg(xlim, grad=True)
xlim = xlim - 1e-3 * grad
else:
xlim = xtrue
# Calculate distance to limit point and data error
temp["xlim"] = xlim
temp["dist"] = [np.sqrt(np.sum((x - xlim) ** 2)) for x in X[:-1]]
temp["dataerr"] = [np.sqrt(np.sum((A @ x - ytrue) ** 2)) for x in X[:-1]]
hist[q] = temp
# Plot convergence in signal space
plt.ioff()
for q in Q:
plt.loglog(hist["delta"], hist[q]["dist"], label=fr'$\alpha^{ {q} }$')
plt.legend()
plt.savefig(f"results/convergence_xerror_{problem}.pdf", dpi=500)
plt.close()
# Plot convergence in data space
for q in Q:
plt.loglog(hist["delta"], hist[q]["dataerr"], label=fr'$\alpha^{ {q} }$')
plt.legend()
plt.savefig(f"results/convergence_dataerror_{problem}.pdf", dpi=500)
plt.close()
np.save(f"results/convergence_{problem}", hist)
import numpy as np
def phi(t, rho=2, grad=False, hess=False):
if grad:
return 2 * (t - rho) * (t + rho / 2) ** 2 + 2 * (t + rho / 2) * (t - rho) ** 2
elif hess:
return 8 * (t - rho) * (t + rho / 2) + 2 * (t - rho) ** 2 + 2 * (t + rho / 2) ** 2
else:
return (t - rho) ** 2 * (t + rho / 2) ** 2
def reg(x, grad=False, hess=False, beta=1e-1, **kwargs):
if grad:
return beta * x + phi(x, grad=True, **kwargs)
elif hess:
return beta * np.eye(len(x)) + np.diag(phi(x, hess=True, **kwargs))
else:
return np.sum(beta / 2 * x ** 2 + phi(x, **kwargs))
def newton(x0, func, niter=10):
temp = x0.copy()
err = [func(temp)]
grad = [np.sum(func(temp, grad=True) ** 2)]
for _ in range(niter):
g = func(temp, grad=True)
H = func(temp, hess=True)
temp = temp - np.linalg.inv(H) @ g
err.append(func(temp))
grad.append(np.sum(func(temp, grad=True) ** 2))
return temp, err, grad
def gradient_descent(x0, func, niter=100, nu=1e-1, gamma=0.9):
temp = x0.copy()
v0 = np.zeros_like(temp)
err = [func(temp)]
grad = [np.sum(func(temp, grad=True) ** 2)]
for _ in range(niter):
g = func(temp - gamma * v0, grad=True)
v0 = gamma * v0 + nu * g
temp = temp - v0
err.append(func(temp))
grad.append(np.sum(func(temp, grad=True) ** 2))
return temp, err, grad
def tikhonov(y, A, alpha=1e-2):
def func(x, grad=False, hess=False):
if grad:
return A.T @ (A @ x - y) + alpha * reg(x, grad=True)
elif hess:
return A.T @ A + alpha * reg(x, hess=True)
else:
return 0.5 * np.sum((A @ x - y) ** 2) + alpha * reg(x)
return func
def get_operator(N, problem="inpainting"):
np.random.seed(42)
if problem == "inpainting":
p = 0.1
A = np.eye(N)
for i in range(N):
if np.random.uniform() <= p:
A[i, i] = 0
elif problem == "cumsum":
A = np.tril(np.ones((N, N))) / N
elif problem == "difference":
A = np.eye(N) - np.eye(N, k=1)
A[N-1, 0] = -1
A /= N
else:
A = np.random.normal(0, 1, (N, N))
return A
def critical_point(x0, func, niter_grad=500, nu=0.5, niter_newton=20):
temp = x0.copy()
temp, err, grad = gradient_descent(temp, func, niter=niter_grad, nu=nu)
temp, errnewton, gradnewton = newton(temp, func, niter=niter_newton)
return temp, err + errnewton, grad + gradnewton
def fredholm_matrix(func, s, t):
m = np.zeros((len(s), len(t)))
for i in range(len(s)):
m[i, ...] = func(s[i], t)
return m
import numpy as np
from funcs import critical_point, tikhonov, get_operator
import matplotlib.pyplot as plt
import tqdm
N = 512
problem = "inpainting"
niter_gradient = 5000
niter_newton = 50
nu = 1
A = get_operator(N, problem=problem)
# Set up data
ts = np.linspace(-1, 1, N)
f = lambda t: np.exp(-t ** 2) * np.cos(t) * (t - 0.5) ** 2 + np.sin(t ** 2)
xtrue = f(ts)
ytrue = A @ xtrue
ydelta = ytrue + np.random.normal(0, 1, N) * 0.001
# Stability
x0 = np.zeros(N)
DELTA = [1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 0]
ALPHA = [1e-2, 1e-3, 1e-4]
hist = {
"delta": DELTA[:-1],
"alpha": ALPHA,
"xtrue": xtrue,
"ytrue": ytrue,
"x0": x0
}
for alpha in tqdm.tqdm(ALPHA):
temp = {}
X = []
for delta in DELTA:
noise = np.random.normal(0, 1, N)
noise /= np.sqrt(np.sum(noise ** 2))
data = ydelta + delta * noise
tik = tikhonov(data, A, alpha=alpha)
xk, errk, gradk = critical_point(x0, func=tik, niter_newton=niter_newton, niter_grad=niter_gradient, nu=nu)
X.append(xk)
temp[delta] = {
"x": xk,
"err": errk,
"grad": gradk,
"data": data
}
xlim = X[-1]
dist = [np.sqrt(np.sum((x - xlim) ** 2)) for x in X[:-1]]
temp["xlim"] = xlim
temp["dist"] = dist
hist[alpha] = temp
# Plot results
plt.ioff()
for al in ALPHA:
plt.loglog(hist["delta"], hist[al]["dist"], label=fr'$\alpha = {al}$')
plt.legend()
plt.savefig(f"results/stability_{problem}.pdf", dpi=500)
plt.close()
np.save(f"results/stability_{problem}", hist)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment