From f19066ee29afaf064579f3b3cb330dc00812324a Mon Sep 17 00:00:00 2001 From: Dhairya Gandhi Date: Thu, 10 Oct 2019 16:48:12 +0530 Subject: [PATCH] more docstrings --- src/optimise/optimisers.jl | 225 ++++++++++++++++++++++++++----------- 1 file changed, 161 insertions(+), 64 deletions(-) diff --git a/src/optimise/optimisers.jl b/src/optimise/optimisers.jl index 14cc3fec..64eee42a 100644 --- a/src/optimise/optimisers.jl +++ b/src/optimise/optimisers.jl @@ -7,23 +7,20 @@ const ϵ = 1e-8 # TODO: should use weak refs """ -# Descent + Descent(η) ## Description Classic gradient descent optimiser with learning rate `η`. For each parameter `p` and its gradient `δp`, this runs `p -= η*δp` -## Constructors - - `Descent()`: Use the default learning rate (η), as described in the parameters section. - - - `Descent(η)`: Provide a custom learning rate (η) to the Descent optimiser. - ## Parameters - - Learning rate (η): The amount by which the gradients are discounted before updating the weights. Defaults to `0.1`. + - Learning Rate (η): The amount by which the gradients are discounted before updating the weights. Defaults to `0.1`. ## Example ```julia-repl -opt = Descent() +opt = Descent() # uses default η (0.1) + +opt = Descent(0.3) # use provided η ps = params(model) @@ -47,11 +44,18 @@ end """ Momentum(η, ρ) - Calls to `Momentum()` default to: - - learning rate (η): 0.01 - - decay (ρ): 0.9 - Gradient descent with learning rate `η` and momentum `ρ`. + +## Parameters + - Learning Rate (`η`): Amount by which gradients are discounted before updating the weights. Defaults to `0.01`. + - Momentum (`ρ`): Parameter that accelerates descent in the relevant direction and dampens oscillations. Defaults to `0.9`. + +## Examples +```julia +opt = Momentum() # uses defaults of η = 0.01 and ρ = 0.9 + +opt = Momentum(0.01, 0.99) +``` """ mutable struct Momentum eta::Float64 @@ -71,11 +75,18 @@ end """ Nesterov(η, ρ) - Calls to `Nesterov()` default to: - - learning rate (η): 0.001 - - nesterov momentum (ρ): 0.9 - Gradient descent with learning rate `η` and Nesterov momentum `ρ`. + +## Parameters + - Learning Rate (η): Amount by which the gradients are dicsounted berfore updating the weights. Defaults to `0.001`. + - Nesterov Momentum (ρ): Paramters controlling the amount of nesterov momentum to be applied. Defaults to `0.9`. + +## Examples +```julia +opt = Nesterov() # uses defaults η = 0.001 and ρ = 0.9 + +opt = Nesterov(0.003, 0.95) +``` """ mutable struct Nesterov eta::Float64 @@ -96,13 +107,21 @@ end """ RMSProp(η, ρ) - Calls to `RMSProp()` default to: - - learning rate (η): 0.001 - - rho (ρ): 0.9 +Implements the RMSProp algortihm. Often a good choice for recurrent networks. Paramters other than learning rate generally don't need tuning. +## Parameters + - Learning Rate (η): Defaults to `0.001`. + - Rho (ρ): Defaults to `0.9`. + +## Examples +```julia +opt = RMSProp() # uses default η = 0.001 and ρ = 0.9 + +opt = RMSProp(0.002, 0.95) +``` + +## References [RMSProp](https://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf) -optimiser. Parameters other than learning rate don't need tuning. Often a good -choice for recurrent networks. """ mutable struct RMSProp eta::Float64 @@ -122,10 +141,20 @@ end """ ADAM(η, β::Tuple) - Calls to `ADAM()` default to: - - learning rate (η): 0.001 - - (beta1, beta2) (β): (0.9, 0.999) +Implements the ADAM optimiser. +## Paramters + - Learning Rate (`η`): Defaults to `0.001`. + - Beta (`β::Tuple`): The first element refers to β1 and the second to β2. Defaults to `(0.9, 0.999)`. + +## Examples + +```julia +opt = ADAM() # uses the default η = 0.001 and β = (0.9, 0.999) + +opt = ADAM(0.001, (0.9, 0.8)) +``` +## References [ADAM](https://arxiv.org/abs/1412.6980v8) optimiser. """ mutable struct ADAM @@ -149,10 +178,21 @@ end """ RADAM(η, β::Tuple) - Calls to `RADAM()` default to: - - learning rate (η): 0.001 - - (beta1, beta2) (β): (0.9, 0.999) +Implements the rectified ADAM optimizer. +## Parameters + - Learning Rate (η): Defaults to `0.001` + - Beta (β::Tuple): The first element refers to β1 and the second to β2. Defaults to `(0.9, 0.999)`. + +## Examples + +```julia +opt = RADAM() # uses the default η = 0.001 and β = (0.9, 0.999) + +opt = RADAM(0.001, (0.9, 0.8)) +``` + +## References [RADAM](https://arxiv.org/pdf/1908.03265v1.pdf) optimiser (Rectified ADAM). """ mutable struct RADAM @@ -183,12 +223,20 @@ end """ AdaMax(η, β::Tuple) - Calls to `AdaMax()` default to: - - learning rate (η): 0.001 - - (beta1, beta2) (β): (0.9, 0.999) +Variant of ADAM based on ∞-norm. -[AdaMax](https://arxiv.org/abs/1412.6980v9) optimiser. Variant of ADAM based on -the ∞-norm. +## Parameters + - Learning Rate (η): Defaults to `0.001` + - Beta (β::Tuple): The first element refers to β1 and the second to β2. Defaults to `(0.9, 0.999)`. + +## Examples +```julia +opt = AdaMax() # uses default η and β + +opt = AdaMax(0.001, (0.9, 0.995)) +``` +## References +[AdaMax](https://arxiv.org/abs/1412.6980v9) optimiser. """ mutable struct AdaMax eta::Float64 @@ -211,9 +259,19 @@ end """ ADAGrad(η) - Calls to `AdaGrad()` default to: - - learning rate (η): 0.1 +Implements AdaGrad. It has parameter specific learning rates based on how frequently it is updated. +## Parameters + - Learning Rate (η): Defaults to `0.1` + +## Examples +```julia +opt = ADAGrad() # uses default η = 0.1 + +opt = ADAGrad(0.001) +``` + +## References [ADAGrad](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf) optimiser. Parameters don't need tuning. """ @@ -234,11 +292,19 @@ end """ ADADelta(ρ) - Calls to `ADADelta()` default to: - rho (ρ): 0.9 +Version of ADAGrad that adapts learning rate based on a window of past gradient updates. Parameters don't need tuning. -[ADADelta](https://arxiv.org/abs/1212.5701) optimiser. Parameters don't need -tuning. +## Parameters + - Rho (ρ): Factor by which gradient is decayed at each time step. Defaults to `0.9`. + +## Examples +```julia +opt = ADADelta() # uses default ρ = 0.9 +opt = ADADelta(0.89) +``` + +## References +[ADADelta](https://arxiv.org/abs/1212.5701) optimiser. """ mutable struct ADADelta rho::Float64 @@ -259,12 +325,20 @@ end """ AMSGrad(η, β::Tuple) - Calls to `AMSGrad()` default to: - - learning rate (η): 0.001 - - (beta1, beta2) (β): (0.9, 0.999) +Implements AMSGrad version of the ADAM optimiser. Parameters don't need tuning. -[AMSGrad](https://openreview.net/forum?id=ryQu7f-RZ) optimiser. Parameters don't need -tuning. +## Parameters + - Learning Rate (η): Defaults to `0.001`. + - Beta (β::Tuple): The first element refers to β1 and the second to β2. Defaults to `(0.9, 0.999)`. + +## Examples +```julia +opt = AMSGrad() # uses default η and β +opt = AMSGrad(0.001, (0.89, 0.995)) +``` + +## References +[AMSGrad](https://openreview.net/forum?id=ryQu7f-RZ) optimiser. """ mutable struct AMSGrad eta::Float64 @@ -286,12 +360,20 @@ end """ NADAM(η, β::Tuple) - Calls to `NADAM()` default to: - - learning rate (η): 0.001 - - (beta1, beta2) (β): (0.9, 0.999) +Nesterov variant of ADAM. Parameters don't need tuning. -[NADAM](http://cs229.stanford.edu/proj2015/054_report.pdf) optimiser. Parameters don't need -tuning. +## Parameters + - Learning Rate (η): Defaults to `0.001`. + - Beta (β::Tuple): The first element refers to β1 and the second to β2. Defaults to `(0.9, 0.999)`. + +## Examples +```julia +opt = NADAM() # uses default η and β +opt = NADAM(0.002, (0.89, 0.995)) +``` + +## References +[NADAM](http://cs229.stanford.edu/proj2015/054_report.pdf) optimiser. """ mutable struct NADAM eta::Float64 @@ -314,11 +396,21 @@ end """ ADAMW(η, β::Tuple, decay) - Calls to `ADAMW()` default to: - - learning rate (η) 0.001 - - (beta1, beta2) (β): (0.9, 0.999) +Variant of ADAM defined by fixing weight decay regularization. -[ADAMW](https://arxiv.org/abs/1711.05101) fixing weight decay regularization in Adam. +## Parameters + - Learning Rate (η): Defaults to `0.001`. + - Beta (β::Tuple): The first element refers to β1 and the second to β2. Defaults to (0.9, 0.999). + - decay: Decay applied to weights during optimisation. Defaults to 0. + +## Examples +```julia +opt = ADAMW() # uses default η, β and decay +opt = ADAMW(0.001, (0.89, 0.995), 0.1) +``` + +## References +[ADAMW](https://arxiv.org/abs/1711.05101) """ ADAMW(η = 0.001, β = (0.9, 0.999), decay = 0) = Optimiser(ADAM(η, β), WeightDecay(decay)) @@ -353,10 +445,12 @@ end """ InvDecay(γ) - Calls to `InvDecay()` default to: - - gamma (γ): 0.001 +Applies inverse time decay to an optimiser -Apply inverse time decay to an optimiser +## Parameters + - gamma (γ): Defaults to `0.001` + +## Example ```julia Optimiser(InvDecay(..), Opt(..)) ``` @@ -379,17 +473,20 @@ end """ ExpDecay(eta, decay, decay_step, clip) - Calls to `ExpDecay()` default to: - - learning rate (eta): 0.001 - - decay: 0.1 - - decay_step: 1000 - - clip: 1e-4 - Discount the learning rate `eta` by `decay` every `decay_step` till a minimum of `clip`. +## Parameters + - Learning Rate (eta): Defaults to `0.001`. + - decay: Factor by which the learning rate is discounted. Defaults to `0.1`. + - decay_step: Schedules decay operations by setting number of steps between two decay operations. Defaults to `1000`. + - clip: Minimum value of learning rate. Defaults to `1e-4`. + +## Example To apply exponential decay to an optimiser: ```julia Optimiser(ExpDecay(..), Opt(..)) + + opt = Optimiser(ExpDecay(), ADAM()) ``` """ mutable struct ExpDecay @@ -415,10 +512,10 @@ end """ WeightDecay(wd) - Calls to `WeightDecay()` default to: - - weight decay (wd): 0 +Decays the weight by `wd` -Decay the weight parameter by `wd` +## Parameters + - weight decay (wd): 0 """ mutable struct WeightDecay wd::Real