Skip to content

Commit dea6b39

Browse files
committed
implement basic tests for all optimization methods
1 parent 49715b2 commit dea6b39

File tree

2 files changed

+111
-58
lines changed

2 files changed

+111
-58
lines changed

src/numericalnim/optimize.nim

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ proc line_search*[U, T](alpha: var U, p: Tensor[T], x0: Tensor[U], f: proc(x: Te
183183

184184

185185

186-
proc steepestDescent*[U; T: not Tensor](f: proc(x: Tensor[U]): T, x0: Tensor[U], alpha: U = U(0.1), tol: U = U(1e-6), fastMode: bool = false, criterion: LineSearchCriterion = None): Tensor[U] =
186+
proc steepestDescent*[U; T: not Tensor](f: proc(x: Tensor[U]): T, x0: Tensor[U], alpha: U = U(0.001), tol: U = U(1e-6), fastMode: bool = false, criterion: LineSearchCriterion = None): Tensor[U] =
187187
## Minimize scalar-valued function f.
188188
var alpha = alpha
189189
var x = x0.clone()
@@ -443,6 +443,8 @@ proc levmarq*[U; T: not Tensor](f: proc(params: Tensor[U], x: U): T, params0: Te
443443
gradNorm = newGradNorm
444444
resNorm = vectorNorm(residuals)
445445
iters += 1
446+
if iters == 10000:
447+
echo "levmarq reached maximum number of iterations!"
446448
result = params
447449

448450

tests/test_optimize.nim

Lines changed: 108 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -1,61 +1,112 @@
1-
import unittest, math, sequtils, arraymancer
1+
import std/[unittest, math, sequtils, random]
2+
import arraymancer
23
import numericalnim
34

4-
test "steepest_descent func":
5-
proc df(x: float): float = 4 * x^3 - 9.0 * x^2
6-
let start = 6.0
7-
let gamma = 0.01
8-
let precision = 0.00001
9-
let max_iters = 10000
10-
let correct = 2.24996
11-
let value = steepest_descent(df, start, gamma, precision, max_iters)
12-
check isClose(value, correct, tol = 1e-5)
13-
14-
test "steepest_descent func starting at zero":
15-
proc df(x: float): float = 4 * x^3 - 9.0 * x^2 + 4
16-
let start = 0.0
17-
let correct = -0.59301
18-
let value = steepest_descent(df, start)
19-
check isClose(value, correct, tol = 1e-5)
20-
21-
#[
22-
test "conjugate_gradient func":
23-
var A = toSeq([4.0, 1.0, 1.0, 3.0]).toTensor.reshape(2,2).astype(float64)
24-
var x = toSeq([2.0, 1.0]).toTensor.reshape(2,1)
25-
var b = toSeq([1.0,2.0]).toTensor.reshape(2,1)
26-
let tol = 0.001
27-
let correct = toSeq([0.090909, 0.636363]).toTensor.reshape(2,1).astype(float64)
28-
29-
let value = conjugate_gradient(A, b, x, tol)
30-
check isClose(value, correct, tol = 1e-5)
31-
]#
32-
test "Newtons 1 dimension func":
33-
proc f(x:float64): float64 = (1.0 / 3.0) * x ^ 3 - 2 * x ^ 2 + 3 * x
34-
proc df(x:float64): float64 = x ^ 2 - 4 * x + 3
35-
let x = 0.5
36-
let correct = 0.0
37-
let value = newtons(f, df, x, 0.000001, 1000)
38-
check isClose(value, correct, tol=1e-5)
39-
40-
test "Newtons 1 dimension func default args":
41-
proc f(x:float64): float64 = (1.0 / 3.0) * x ^ 3 - 2 * x ^ 2 + 3 * x
42-
proc df(x:float64): float64 = x ^ 2 - 4 * x + 3
43-
let x = 0.5
44-
let correct = 0.0
45-
let value = newtons(f, df, x)
46-
check isClose(value, correct, tol=1e-5)
47-
48-
test "Newtons unable to find a root":
49-
proc bad_f(x:float64): float64 = pow(E, x) + 1
50-
proc bad_df(x:float64): float64 = pow(E, x)
51-
expect(ArithmeticError):
52-
discard newtons(bad_f, bad_df, 0, 0.000001, 1000)
53-
54-
test "Secant 1 dimension func":
55-
proc f(x:float64): float64 = (1.0 / 3.0) * x ^ 3 - 2 * x ^ 2 + 3 * x
56-
let x = [1.0, 0.5]
57-
let correct = 0.0
58-
let value = secant(f, x, 1e-5, 10)
59-
check isClose(value, correct, tol=1e-5)
5+
suite "1D":
6+
test "steepest_descent func":
7+
proc df(x: float): float = 4 * x^3 - 9.0 * x^2
8+
let start = 6.0
9+
let gamma = 0.01
10+
let precision = 0.00001
11+
let max_iters = 10000
12+
let correct = 2.24996
13+
let value = steepest_descent(df, start, gamma, precision, max_iters)
14+
check isClose(value, correct, tol = 1e-5)
15+
16+
test "steepest_descent func starting at zero":
17+
proc df(x: float): float = 4 * x^3 - 9.0 * x^2 + 4
18+
let start = 0.0
19+
let correct = -0.59301
20+
let value = steepest_descent(df, start)
21+
check isClose(value, correct, tol = 1e-5)
22+
23+
#[
24+
test "conjugate_gradient func":
25+
var A = toSeq([4.0, 1.0, 1.0, 3.0]).toTensor.reshape(2,2).astype(float64)
26+
var x = toSeq([2.0, 1.0]).toTensor.reshape(2,1)
27+
var b = toSeq([1.0,2.0]).toTensor.reshape(2,1)
28+
let tol = 0.001
29+
let correct = toSeq([0.090909, 0.636363]).toTensor.reshape(2,1).astype(float64)
30+
31+
let value = conjugate_gradient(A, b, x, tol)
32+
check isClose(value, correct, tol = 1e-5)
33+
]#
34+
test "Newtons 1 dimension func":
35+
proc f(x:float64): float64 = (1.0 / 3.0) * x ^ 3 - 2 * x ^ 2 + 3 * x
36+
proc df(x:float64): float64 = x ^ 2 - 4 * x + 3
37+
let x = 0.5
38+
let correct = 0.0
39+
let value = newtons(f, df, x, 0.000001, 1000)
40+
check isClose(value, correct, tol=1e-5)
41+
42+
test "Newtons 1 dimension func default args":
43+
proc f(x:float64): float64 = (1.0 / 3.0) * x ^ 3 - 2 * x ^ 2 + 3 * x
44+
proc df(x:float64): float64 = x ^ 2 - 4 * x + 3
45+
let x = 0.5
46+
let correct = 0.0
47+
let value = newtons(f, df, x)
48+
check isClose(value, correct, tol=1e-5)
49+
50+
test "Newtons unable to find a root":
51+
proc bad_f(x:float64): float64 = pow(E, x) + 1
52+
proc bad_df(x:float64): float64 = pow(E, x)
53+
expect(ArithmeticError):
54+
discard newtons(bad_f, bad_df, 0, 0.000001, 1000)
55+
56+
test "Secant 1 dimension func":
57+
proc f(x:float64): float64 = (1.0 / 3.0) * x ^ 3 - 2 * x ^ 2 + 3 * x
58+
let x = [1.0, 0.5]
59+
let correct = 0.0
60+
let value = secant(f, x, 1e-5, 10)
61+
check isClose(value, correct, tol=1e-5)
62+
63+
64+
###############################
65+
## Multi-dimensional methods ##
66+
###############################
67+
68+
suite "Multi-dim":
69+
proc bananaFunc(x: Tensor[float]): float =
70+
## Function of 2 variables with minimum at (1, 1)
71+
## And it looks like a banana 🍌
72+
result = (1 - x[0])^2 + 100*(x[1] - x[0]^2)^2
73+
74+
let x0 = [-1.0, -1.0].toTensor()
75+
let correct = [1.0, 1.0].toTensor()
76+
77+
test "Steepest Gradient":
78+
let xSol = steepestDescent(bananaFunc, x0.clone)
79+
for x in abs(correct - xSol):
80+
check x < 2e-2
81+
82+
test "Newton":
83+
let xSol = newton(bananaFunc, x0.clone)
84+
for x in abs(correct - xSol):
85+
check x < 3e-10
86+
87+
test "BFGS":
88+
let xSol = bfgs(bananaFunc, x0.clone)
89+
for x in abs(correct - xSol):
90+
check x < 3e-7
91+
92+
test "L-BFGS":
93+
let xSol = lbfgs(bananaFunc, x0.clone)
94+
for x in abs(correct - xSol):
95+
check x < 7e-10
96+
97+
let correctParams = [10.4, -0.45].toTensor()
98+
proc fitFunc(params: Tensor[float], x: float): float =
99+
params[0] * exp(params[1] * x)
100+
101+
let xData = arraymancer.linspace(0.0, 10.0, 100)
102+
randomize(1337)
103+
let yData = correctParams[0] * exp(correctParams[1] * xData) + randomNormalTensor([100], 0.0, 1e-2)
104+
105+
let params0 = [0.0, 0.0].toTensor()
106+
107+
test "levmarq":
108+
let paramsSol = levmarq(fitFunc, params0, xData, yData)
109+
for x in abs(paramsSol - correctParams):
110+
check x < 9e-4
60111

61112

0 commit comments

Comments
 (0)