Skip to content

Commit 8a997b7

Browse files
author
joeljonsson
committed
added gradient tests for APRNet layers
1 parent 24e0fd5 commit 8a997b7

File tree

5 files changed

+170
-1
lines changed

5 files changed

+170
-1
lines changed

pyapr/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,4 +30,4 @@
3030
from .numerics import *
3131
#from .viewer import *
3232

33-
__all__ = ['data_containers', 'io', 'nn', 'viewer', 'converter', 'numerics']
33+
__all__ = ['data_containers', 'io', 'nn', 'viewer', 'converter', 'numerics', 'tests']

pyapr/nn/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1 +1,3 @@
11
from .APRNet import *
2+
3+
__all__ = ['APRNet', 'testing']

pyapr/nn/testing.py

Lines changed: 90 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,90 @@
1+
import torch
2+
from torch.autograd.gradcheck import get_analytical_jacobian, get_numerical_jacobian, zero_gradients, make_jacobian
3+
4+
5+
def get_analytical_jacobian_params(output, target):
6+
"""
7+
Computes the analytical jacobian with respect to all tensors in `target`, which can hold some or all of the
8+
parameters of a module used to compute `output`.
9+
10+
output: torch.tensor output from which to backpropagate the gradients
11+
target: torch.tensor or iterable containing torch.tensor for which to compute the gradients
12+
"""
13+
14+
jacobian = make_jacobian(target, output.numel())
15+
grad_output = torch.zeros_like(output)
16+
flat_grad_output = grad_output.view(-1)
17+
18+
for i in range(flat_grad_output.numel()):
19+
flat_grad_output.zero_()
20+
flat_grad_output[i] = 1
21+
22+
zero_gradients(target)
23+
torch.autograd.backward(output, grad_output, retain_graph=True)
24+
25+
for j in range(len(jacobian)):
26+
jacobian[j][:, i] = target[j].grad.clone().flatten()
27+
28+
return jacobian
29+
30+
31+
def gradcheck(m, input, eps=1e-6, atol=1e-5, rtol=1e-3, raise_exception=True):
32+
"""
33+
Compare analytical gradients of a module to numerical gradients computed via central finite differences.
34+
35+
Disclaimer: this is a modified version of torch.autograd.gradcheck::gradcheck
36+
(https://pytorch.org/docs/stable/_modules/torch/autograd/gradcheck.html, 2019-06-04) distributed under license
37+
https://github.com/pytorch/pytorch/blob/master/LICENSE
38+
:param m:
39+
:param input:
40+
:param eps:
41+
:param atol:
42+
:param rtol:
43+
:param raise_exception:
44+
:return:
45+
"""
46+
def fail_test(msg):
47+
if raise_exception:
48+
raise RuntimeError(msg)
49+
return False
50+
51+
def fn(input):
52+
return m(*input)
53+
54+
output = fn(input)
55+
56+
for i, o in enumerate(output):
57+
if not o.requires_grad:
58+
continue
59+
60+
# compare input gradients
61+
analytical, reentrant, correct_grad_sizes = get_analytical_jacobian(input, o)
62+
numerical = get_numerical_jacobian(fn, input, eps=eps)
63+
64+
if not correct_grad_sizes:
65+
return fail_test('Analytical gradient has incorrect size')
66+
67+
for j, (a, n) in enumerate(zip(analytical, numerical)):
68+
if a.numel() != 0 or n.numel() != 0:
69+
if not torch.allclose(a, n, rtol, atol):
70+
return fail_test('Jacobian mismatch for output %d with respect to input %d,\n'
71+
'numerical:%s\nanalytical:%s\n' % (i, j, n, a))
72+
73+
if not reentrant:
74+
return fail_test('Backward is not reentrant, i.e., running backward with same '
75+
'input and grad_output multiple times gives different values, '
76+
'although analytical gradient matches numerical gradient')
77+
78+
# compare parameter gradients
79+
pars = [t for t in m.parameters()]
80+
81+
if pars:
82+
numerical = get_numerical_jacobian(fn, input, target=pars)
83+
analytical = get_analytical_jacobian_params(output, pars)
84+
85+
for j, (a, n) in enumerate(zip(analytical, numerical)):
86+
if a.numel() != 0 or n.numel() != 0:
87+
if not torch.allclose(a, n, rtol, atol):
88+
return fail_test('Jacobian mismatch for output %d with respect to parameter %d,\n'
89+
'numerical:%s\nanalytical:%s\n' % (i, j, n, a))
90+
return True

pyapr/tests/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
from .test_nn_modules import *

pyapr/tests/test_nn_modules.py

Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
import pyapr
2+
import pyapr.nn as aprnn
3+
import numpy as np
4+
from demo.io import read_tiff
5+
import unittest
6+
import pyapr.nn.testing as testing
7+
8+
9+
class TestAPRNetModules(unittest.TestCase):
10+
11+
def __init__(self, *args, **kwargs):
12+
super(TestAPRNetModules, self).__init__(*args, **kwargs)
13+
14+
# Load in an image and extract a small tile
15+
img = read_tiff('../../LibAPR/test/files/Apr/sphere_120/sphere_original.tif')
16+
img = img[20:52, 40:72, 53].astype(np.float32)
17+
18+
# Ensure that pixel values are distinct to avoid problems with finite difference of max pooling
19+
for i in range(32):
20+
for j in range(32):
21+
img[i, j] += 0.001 * (i*32 + j)
22+
23+
# Initialize objects for APR conversion
24+
apr = pyapr.APR()
25+
parts = pyapr.FloatParticles()
26+
par = pyapr.APRParameters()
27+
converter = pyapr.converter.FloatConverter()
28+
29+
# Set some parameters
30+
par.auto_parameters = False
31+
par.rel_error = 0.1
32+
par.Ip_th = 0
33+
par.gradient_smoothing = 2
34+
par.sigma_th = 50
35+
par.sigma_th_max = 20
36+
converter.set_parameters(par)
37+
converter.set_verbose(False)
38+
39+
# Compute APR and sample particle values
40+
converter.get_apr(apr, img)
41+
apr.init_tree()
42+
parts.sample_image(apr, img)
43+
44+
apr_arr = np.empty(1, dtype=object)
45+
parts_arr = np.empty(1, dtype=object)
46+
apr_arr[0] = apr
47+
parts_arr[0] = parts
48+
49+
x, dlvl = aprnn.APRInputLayer()(apr_arr, parts_arr, dtype=np.float64)
50+
x.requires_grad = True
51+
52+
self.aprs = apr_arr
53+
self.x = x
54+
self.dlvl = dlvl
55+
56+
def test_gradients_conv1x1(self):
57+
m = aprnn.APRConv(1, 4, 1, 2)
58+
assert testing.gradcheck(m, (self.x, self.aprs, self.dlvl))
59+
60+
def test_gradients_conv3x3(self):
61+
m = aprnn.APRConv(1, 4, 3, 2)
62+
assert testing.gradcheck(m, (self.x, self.aprs, self.dlvl))
63+
64+
def test_gradients_conv5x5(self):
65+
m = aprnn.APRConv(1, 4, 5, 2)
66+
assert testing.gradcheck(m, (self.x, self.aprs, self.dlvl))
67+
68+
def test_gradients_maxpool(self):
69+
m = aprnn.APRMaxPool(increment_level_delta=False)
70+
assert testing.gradcheck(m, (self.x, self.aprs, self.dlvl))
71+
72+
73+
if __name__ == '__main__':
74+
unittest.main()
75+
76+

0 commit comments

Comments
 (0)