From 07ad7cfa406b809d8c2f0222f97f2d50eb554e7f Mon Sep 17 00:00:00 2001 From: Mike J Innes Date: Wed, 18 Oct 2017 17:07:49 +0100 Subject: [PATCH] learning rate as default arg --- src/optimise/interface.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/optimise/interface.jl b/src/optimise/interface.jl index 19dd35b3..8e63bb26 100644 --- a/src/optimise/interface.jl +++ b/src/optimise/interface.jl @@ -43,7 +43,7 @@ Nesterov(ps, ρ; decay = 0) = optimiser. Parameters other than learning rate don't need tuning. Often a good choice for recurrent networks. """ -RMSProp(ps; η = 0.001, ρ = 0.9, ϵ = 1e-8, decay = 0) = +RMSProp(ps, η = 0.001; ρ = 0.9, ϵ = 1e-8, decay = 0) = optimiser(ps, p -> rmsprop(p; η = η, ρ = ρ, ϵ = ϵ), p -> invdecay(p, decay), p -> descent(p, 1)) """