2017-09-01 21:06:51 +00:00
|
|
|
|
call(f, xs...) = f(xs...)
|
|
|
|
|
|
2017-10-12 08:31:38 +00:00
|
|
|
|
# note for optimisers: set to zero
|
|
|
|
|
# p.Δ at the end of the weigths update
|
2017-09-03 21:10:04 +00:00
|
|
|
|
function optimiser(ps, fs...)
|
|
|
|
|
ps = [Param(p) for p in ps]
|
2017-09-01 21:06:51 +00:00
|
|
|
|
fs = map(ps) do p
|
|
|
|
|
os = map(f -> f(p), fs)
|
|
|
|
|
() -> foreach(call, os)
|
|
|
|
|
end
|
|
|
|
|
() -> foreach(call, fs)
|
|
|
|
|
end
|
|
|
|
|
|
2017-10-18 11:07:43 +00:00
|
|
|
|
"""
|
2017-10-12 08:31:38 +00:00
|
|
|
|
SGD(params, η = 0.1; decay = 0)
|
2017-10-18 11:07:43 +00:00
|
|
|
|
|
2017-10-12 08:31:38 +00:00
|
|
|
|
Classic gradient descent optimiser with learning rate `η`.
|
|
|
|
|
For each parameter `p` and its gradient `δp`, this runs `p -= η*δp`.
|
2017-10-18 11:07:43 +00:00
|
|
|
|
|
2017-10-12 08:31:38 +00:00
|
|
|
|
Supports inverse decaying learning rate if the `decay` argument is provided.
|
2017-10-18 11:07:43 +00:00
|
|
|
|
"""
|
2017-10-12 08:31:38 +00:00
|
|
|
|
SGD(ps, η = 0.1; decay = 0) =
|
|
|
|
|
optimiser(ps, p -> invdecay(p, decay), p -> descent(p,η))
|
2017-10-18 11:07:43 +00:00
|
|
|
|
|
|
|
|
|
"""
|
2017-10-12 08:31:38 +00:00
|
|
|
|
Momentum(params, η = 0.01; ρ = 0.9, decay = 0)
|
2017-10-18 11:07:43 +00:00
|
|
|
|
|
2017-10-12 08:31:38 +00:00
|
|
|
|
SGD with learning rate `η`, momentum `ρ` and optional learning rate inverse decay.
|
2017-10-18 11:07:43 +00:00
|
|
|
|
"""
|
2017-10-12 08:31:38 +00:00
|
|
|
|
Momentum(ps, η = 0.01; ρ = 0.9, decay = 0) =
|
|
|
|
|
optimiser(ps, p->invdecay(p,decay), p->momentum(p, ρ, η), p->descent(p,1))
|
2017-10-18 11:07:43 +00:00
|
|
|
|
|
|
|
|
|
"""
|
2017-10-12 08:31:38 +00:00
|
|
|
|
Nesterov(params, η = 0.01; ρ = 0.9, decay = 0)
|
2017-10-18 11:07:43 +00:00
|
|
|
|
|
2017-10-12 08:31:38 +00:00
|
|
|
|
SGD with learning rate `η`, Nesterov momentum `ρ` and optional learning rate inverse decay.
|
2017-10-18 11:07:43 +00:00
|
|
|
|
"""
|
2017-10-12 08:31:38 +00:00
|
|
|
|
Nesterov(ps, η = 0.01; ρ = 0.9, decay = 0) =
|
|
|
|
|
optimiser(ps, p->invdecay(p,decay), p->nesterov(p, ρ, η), p->descent(p,1))
|
2017-10-18 11:07:43 +00:00
|
|
|
|
|
|
|
|
|
"""
|
2017-10-12 08:31:38 +00:00
|
|
|
|
RMSProp(params, η = 0.001; ρ = 0.9, ϵ = 1e-8, decay = 0)
|
2017-10-18 11:07:43 +00:00
|
|
|
|
|
|
|
|
|
[RMSProp](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
|
|
|
|
|
optimiser. Parameters other than learning rate don't need tuning. Often a good
|
|
|
|
|
choice for recurrent networks.
|
|
|
|
|
"""
|
2017-10-18 16:07:49 +00:00
|
|
|
|
RMSProp(ps, η = 0.001; ρ = 0.9, ϵ = 1e-8, decay = 0) =
|
2017-10-12 08:31:38 +00:00
|
|
|
|
optimiser(ps, p->rmsprop(p; η=η, ρ=ρ, ϵ=ϵ), p->invdecay(p,decay), p->descent(p,1))
|
2017-10-18 11:07:43 +00:00
|
|
|
|
|
|
|
|
|
"""
|
2017-12-04 08:34:27 +00:00
|
|
|
|
ADAM(params, η = 0.001; β1 = 0.9, β2 = 0.999, ϵ = 1e-08, decay = 0)
|
2017-10-18 11:07:43 +00:00
|
|
|
|
|
|
|
|
|
[ADAM](https://arxiv.org/abs/1412.6980v8) optimiser.
|
|
|
|
|
"""
|
2017-10-18 16:44:21 +00:00
|
|
|
|
ADAM(ps, η = 0.001; β1 = 0.9, β2 = 0.999, ϵ = 1e-08, decay = 0) =
|
2017-10-12 08:31:38 +00:00
|
|
|
|
optimiser(ps, p->adam(p; η=η, β1=β1, β2=β2, ϵ=ϵ), p->invdecay(p,decay), p->descent(p,1))
|
2017-10-18 11:07:43 +00:00
|
|
|
|
|
|
|
|
|
"""
|
2017-10-12 08:31:38 +00:00
|
|
|
|
ADAGrad(params, η = 0.01; ϵ = 1e-8, decay = 0)
|
2017-10-18 11:07:43 +00:00
|
|
|
|
|
|
|
|
|
[ADAGrad](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf) optimiser.
|
|
|
|
|
Parameters don't need tuning.
|
|
|
|
|
"""
|
2017-10-12 08:31:38 +00:00
|
|
|
|
ADAGrad(ps, η = 0.01; ϵ = 1e-8, decay = 0) =
|
|
|
|
|
optimiser(ps, p->adagrad(p; η=η, ϵ=ϵ), p->invdecay(p,decay), p->descent(p,1))
|
2017-10-18 11:07:43 +00:00
|
|
|
|
|
|
|
|
|
"""
|
2017-10-12 08:31:38 +00:00
|
|
|
|
ADADelta(params; ρ = 0.9, ϵ = 1e-8, decay = 0)
|
2017-10-18 11:07:43 +00:00
|
|
|
|
|
|
|
|
|
[ADADelta](http://arxiv.org/abs/1212.5701) optimiser. Parameters don't need
|
|
|
|
|
tuning.
|
|
|
|
|
"""
|
2017-10-12 08:31:38 +00:00
|
|
|
|
ADADelta(ps; ρ = 0.9, ϵ = 1e-8, decay = 0) =
|
|
|
|
|
optimiser(ps, p->adadelta(p; ρ=ρ, ϵ=ϵ), p->descent(p,1))
|
2017-12-04 08:17:05 +00:00
|
|
|
|
|
2017-12-04 08:34:27 +00:00
|
|
|
|
"""
|
|
|
|
|
AMSGrad(params; η = 0.001, β1 = 0.9, β2 = 0.999, ϵ = 1e-08, decay = 0)
|
2017-12-04 08:17:05 +00:00
|
|
|
|
|
2017-12-04 08:34:27 +00:00
|
|
|
|
[AMSGrad](https://openreview.net/forum?id=ryQu7f-RZ) optimiser. Parameters don't need
|
|
|
|
|
tuning.
|
|
|
|
|
"""
|
|
|
|
|
AMSGrad(ps, η = 0.001; β1 = 0.9, β2 = 0.999, ϵ = 1e-08, decay = 0) =
|
|
|
|
|
optimiser(ps, p -> amsgrad(p; η = η, β1 = β1, β2 = β2, ϵ = ϵ), p -> invdecay(p, decay), p -> descent(p, 1))
|