Skip to content

Commit 14124d3

Browse files
committed
Restore total elapsed time, switch to time per iteration in iteration verbosity
1 parent 7170ee6 commit 14124d3

File tree

3 files changed

+12
-7
lines changed

3 files changed

+12
-7
lines changed

src/cg.jl

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,7 @@ function optimize(fg, x, alg::ConjugateGradient;
101101
@info @sprintf("CG: initializing with f = %.12f, ‖∇f‖ = %.4e", f, normgrad)
102102
local xprev, gprev, Pgprev, ηprev
103103
while !(_hasconverged || _shouldstop)
104+
told = t
104105
# compute new search direction
105106
if precondition === _precondition
106107
Pg = g
@@ -140,6 +141,7 @@ function optimize(fg, x, alg::ConjugateGradient;
140141
push!(fhistory, f)
141142
push!(normgradhistory, normgrad)
142143
t = time() - t₀
144+
Δt = t - told
143145
_hasconverged = hasconverged(x, f, g, normgrad)
144146
_shouldstop = shouldstop(x, f, g, numfg, numiter, t)
145147

@@ -148,8 +150,8 @@ function optimize(fg, x, alg::ConjugateGradient;
148150
break
149151
end
150152
verbosity >= 3 &&
151-
@info @sprintf("CG: iter %4d, time %7.2f s: f = %.12f, ‖∇f‖ = %.4e, α = %.2e, β = %.2e, nfg = %d",
152-
numiter, t, f, normgrad, α, β, nfg)
153+
@info @sprintf("CG: iter %4d, Δt %7.2f s: f = %.12f, ‖∇f‖ = %.4e, α = %.2e, β = %.2e, nfg = %d",
154+
numiter, Δt, f, normgrad, α, β, nfg)
153155

154156
# transport gprev, ηprev and vectors in Hessian approximation to x
155157
gprev = transport!(gprev, xprev, ηprev, α, x)

src/gd.jl

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,7 @@ function optimize(fg, x, alg::GradientDescent;
7878
verbosity >= 2 &&
7979
@info @sprintf("GD: initializing with f = %.12f, ‖∇f‖ = %.4e", f, normgrad)
8080
while !(_hasconverged || _shouldstop)
81+
told = t
8182
# compute new search direction
8283
Pg = precondition(x, deepcopy(g))
8384
η = scale!(Pg, -1) # we don't need g or Pg anymore, so we can overwrite it
@@ -97,6 +98,7 @@ function optimize(fg, x, alg::GradientDescent;
9798
push!(fhistory, f)
9899
push!(normgradhistory, normgrad)
99100
t = time() - t₀
101+
Δt = t - told
100102
_hasconverged = hasconverged(x, f, g, normgrad)
101103
_shouldstop = shouldstop(x, f, g, numfg, numiter, t)
102104

@@ -105,8 +107,8 @@ function optimize(fg, x, alg::GradientDescent;
105107
break
106108
end
107109
verbosity >= 3 &&
108-
@info @sprintf("GD: iter %4d, time %7.2f s: f = %.12f, ‖∇f‖ = %.4e, α = %.2e, nfg = %d",
109-
numiter, t, f, normgrad, α, nfg)
110+
@info @sprintf("GD: iter %4d, Δt %7.2f s: f = %.12f, ‖∇f‖ = %.4e, α = %.2e, nfg = %d",
111+
numiter, Δt, f, normgrad, α, nfg)
110112

111113
# increase α for next step
112114
α = 2 * α

src/lbfgs.jl

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ function optimize(fg, x, alg::LBFGS;
8383
@info @sprintf("LBFGS: initializing with f = %.12f, ‖∇f‖ = %.4e", f, normgrad)
8484

8585
while !(_hasconverged || _shouldstop)
86-
t₀ = time()
86+
told = t
8787
# compute new search direction
8888
if length(H) > 0
8989
Hg = let x = x
@@ -118,6 +118,7 @@ function optimize(fg, x, alg::LBFGS;
118118
push!(fhistory, f)
119119
push!(normgradhistory, normgrad)
120120
t = time() - t₀
121+
Δt = t - told
121122
_hasconverged = hasconverged(x, f, g, normgrad)
122123
_shouldstop = shouldstop(x, f, g, numfg, numiter, t)
123124

@@ -126,8 +127,8 @@ function optimize(fg, x, alg::LBFGS;
126127
break
127128
end
128129
verbosity >= 3 &&
129-
@info @sprintf("LBFGS: iter %4d, time %7.2f s: f = %.12f, ‖∇f‖ = %.4e, α = %.2e, m = %d, nfg = %d",
130-
numiter, t, f, normgrad, α, length(H), nfg)
130+
@info @sprintf("LBFGS: iter %4d, Δt %7.2f: f = %.12f, ‖∇f‖ = %.4e, α = %.2e, m = %d, nfg = %d",
131+
numiter, Δt, f, normgrad, α, length(H), nfg)
131132

132133
# transport gprev, ηprev and vectors in Hessian approximation to x
133134
gprev = transport!(gprev, xprev, ηprev, α, x)

0 commit comments

Comments
 (0)