Commit ca0fd1fd authored by Elvira Clement's avatar Elvira Clement
Browse files

Adding some unit-test

parent 21453995
python -m unittest
\ No newline at end of file
# -*- coding: utf-8 -*-
\ No newline at end of file
# -*- coding: utf-8 -*-
import unittest
import numpy as np
from src.dictionaries import generate_dic
from src.solver.slope import primal_func, dual_func, slope_gp
from src.solver.parameters import SlopeParameters
from src.screening.singletest import GapSphereSingleTest
from src.screening.gap_ptest import GAP_Ptest
import src.utils as utils
class TestSolver(unittest.TestCase):
def test_gp_cost_decrease(self):
""" Run a non accelerated proximal gradient algorithm
assess that the cost function decreaes
"""
# 1. Create problem
m = 20
n = 50
matA = generate_dic("gaussian", m, n, True)
vecy = np.random.randn(m)
vec_gammas = np.linspace(.5, 1, n)[::-1]
# 2. Compute lambda_max
lbd_max = utils.get_lambda_max(vecy, matA, vec_gammas)
lbd = .6 * lbd_max
# 3. Eval solution of slope problem
algParameters = SlopeParameters()
algParameters.gap_stopping = 1e-12
algParameters.max_it = 100000
algParameters.accelerated = False
out = slope_gp(vecy, matA, .5*lbd_max, vec_gammas, algParameters)
vecu = vecy - matA @ out["sol"]
beta_dual = np.sort(np.abs(matA.T @ vecu))[::-1]
beta_dual = np.cumsum(beta_dual) / np.cumsum(lbd * vec_gammas)
vecu /= np.max(beta_dual)
Atu = matA.T @ vecu
pval = primal_func(vecy, matA @ out["sol"], out["sol"], lbd, vec_gammas)
dval = dual_func(vecy, np.linalg.norm(vecy, 2)**2, vecu)
gap = np.abs(pval - dval)
# 4. Start screening test
test1 = GapSphereSingleTest()
test2 = GAP_Ptest(np.cumsum(vec_gammas))
out1 = test1.apply_test(Atu, gap, lbd, vec_gammas)
out2 = test2.apply_test(Atu, gap, lbd, vec_gammas)
# 1e-15 due to machine precision error
self.assertTrue( (out2 >= out1).all() )
if __name__ == '__main__':
unittest.main()
\ No newline at end of file
# -*- coding: utf-8 -*-
import unittest
import numpy as np
import src.utils as utils
from src.dictionaries import generate_dic
from src.solver.slope import slope_gp
from src.solver.parameters import SlopeParameters
class TestSolver(unittest.TestCase):
def test_gp_cost_decrease(self):
""" Run a non accelerated proximal gradient algorithm
assess that the cost function decreaes
"""
# 1. Create problem
m = 20
n = 50
matA = generate_dic("gaussian", m, n, True)
vecy = np.random.randn(m)
vec_gammas = np.linspace(0, 1, n)[::-1]
# 2. Compute lambda_max
lbd_max = utils.get_lambda_max(vecy, matA, vec_gammas)
# 3. Eval solution of slope problem
algParameters = SlopeParameters()
algParameters.max_it = 1000
algParameters.accelerated = False
out = slope_gp(vecy, matA, .5*lbd_max, vec_gammas, algParameters)
# 4. Assert that the zero vector is solution
vec_cost = out["cost_function"]
vec_diff = (vec_cost[1:] - vec_cost[:-1])
# 1e-15 due to machine precision error
self.assertTrue( (vec_diff <= 1e-14).all() )
if __name__ == '__main__':
unittest.main()
\ No newline at end of file
# -*- coding: utf-8 -*-
import unittest
import numpy as np
import src.utils as utils
from src.dictionaries import generate_dic
from src.solver.slope import slope_gp
class TestUtilsModule(unittest.TestCase):
def test_coherence(self):
""" Compute the coherence manually and compare it
to the value output by the module
"""
# 1. Create dictionary
m = 20
n = 50
matA = generate_dic("gaussian", m, n, True)
# 2. Compute coherence with module
mu = utils.compute_coherence(matA)
# 3. Compute coherence with brute force
mu_prime = np.max(np.abs((matA.T @ matA) - np.eye(n)))
# 4. Test
self.assertAlmostEqual(mu, mu_prime, 14)
def test_cound_coherence_function(self):
""" Compute the coherence manually and compare it
to the value output by the module
"""
# 1. Create dictionary
m = 20
n = 50
matA = generate_dic("gaussian", m, n, True)
# 2. Compute coherence with module
mu = utils.compute_coherence(matA)
coherence_func = utils.compute_coherence_function(matA)
# 3. Test
self.assertAlmostEqual(mu, coherence_func[1], 14)
self.assertTrue( (coherence_func <= mu * np.arange(n)).all() )
def test_coherences_pulse(self):
"""
test on analytic dictionary defined in tropp's paper "greed is good"
(unnormalzied dictionary)
"""
# 1. Create dictionary
m = 50
n = 30
matA = np.zeros((m, n))
beta = .5
for j in range(n):
for i in range(j, m):
matA[i, j] = np.sqrt(1 - beta**2) * beta**(i-j)
# 2. Compute coherence with module
mu = utils.compute_coherence(matA)
mu_star = matA[:, 0] @ matA[:, 1]
# 3. Test
self.assertAlmostEqual(mu, mu_star, 14)
def test_get_lambda_max(self):
"""
"""
# 1. Create problem
m = 20
n = 50
matA = generate_dic("gaussian", m, n, True)
vecy = np.random.randn(m)
vec_gammas = np.linspace(0, 1, n)[::-1]
# 2. Compute lambda_max
lbd_max = utils.get_lambda_max(vecy, matA, vec_gammas)
# 3. Eval solution of slope problem
out = slope_gp(vecy, matA, lbd_max, vec_gammas)
# 4. Assert that the zero vector is solution
self.assertTrue( (out["sol"] == np.zeros(n)).all() )
if __name__ == '__main__':
unittest.main()
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment