Minor fixes:
This commit is contained in:
parent
7e83852862
commit
c4f87ff15c
@ -1,6 +1,6 @@
|
|||||||
"""
|
"""
|
||||||
testmode!(m, val=true)
|
testmode!(m)
|
||||||
|
testmode!(m, false)
|
||||||
Put layers like [`Dropout`](@ref) and [`BatchNorm`](@ref) into testing mode
|
Put layers like [`Dropout`](@ref) and [`BatchNorm`](@ref) into testing mode
|
||||||
(or back to training mode with `false`).
|
(or back to training mode with `false`).
|
||||||
"""
|
"""
|
||||||
@ -13,11 +13,9 @@ _testmode!(m, test) = nothing
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
Dropout(p)
|
Dropout(p)
|
||||||
|
|
||||||
A Dropout layer. For each input, either sets that input to `0` (with probability
|
A Dropout layer. For each input, either sets that input to `0` (with probability
|
||||||
`p`) or scales it by `1/(1-p)`. This is used as a regularisation, i.e. it
|
`p`) or scales it by `1/(1-p)`. This is used as a regularisation, i.e. it
|
||||||
reduces overfitting during training.
|
reduces overfitting during training.
|
||||||
|
|
||||||
Does nothing to the input once in [`testmode!`](@ref).
|
Does nothing to the input once in [`testmode!`](@ref).
|
||||||
"""
|
"""
|
||||||
mutable struct Dropout{F}
|
mutable struct Dropout{F}
|
||||||
@ -43,9 +41,7 @@ end
|
|||||||
_testmode!(a::Dropout, test) = (a.active = !test)
|
_testmode!(a::Dropout, test) = (a.active = !test)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LayerNorm(h::Integer)
|
LayerNorm(h::Integer)
|
||||||
|
|
||||||
A [normalisation layer](https://arxiv.org/pdf/1607.06450.pdf) designed to be
|
A [normalisation layer](https://arxiv.org/pdf/1607.06450.pdf) designed to be
|
||||||
used with recurrent hidden states of size `h`. Normalises the mean/stddev of
|
used with recurrent hidden states of size `h`. Normalises the mean/stddev of
|
||||||
each input before applying a per-neuron gain/bias.
|
each input before applying a per-neuron gain/bias.
|
||||||
@ -69,23 +65,17 @@ end
|
|||||||
BatchNorm(channels::Integer, σ = identity;
|
BatchNorm(channels::Integer, σ = identity;
|
||||||
initβ = zeros, initγ = ones,
|
initβ = zeros, initγ = ones,
|
||||||
ϵ = 1e-8, momentum = .1)
|
ϵ = 1e-8, momentum = .1)
|
||||||
|
|
||||||
Batch Normalization layer. The `channels` input should be the size of the
|
Batch Normalization layer. The `channels` input should be the size of the
|
||||||
channel dimension in your data (see below).
|
channel dimension in your data (see below).
|
||||||
|
|
||||||
Given an array with `N` dimensions, call the `N-1`th the channel dimension. (For
|
Given an array with `N` dimensions, call the `N-1`th the channel dimension. (For
|
||||||
a batch of feature vectors this is just the data dimension, for `WHCN` images
|
a batch of feature vectors this is just the data dimension, for `WHCN` images
|
||||||
it's the usual channel dimension.)
|
it's the usual channel dimension.)
|
||||||
|
|
||||||
`BatchNorm` computes the mean and variance for each each `W×H×1×N` slice and
|
`BatchNorm` computes the mean and variance for each each `W×H×1×N` slice and
|
||||||
shifts them to have a new mean and variance (corresponding to the learnable,
|
shifts them to have a new mean and variance (corresponding to the learnable,
|
||||||
per-channel `bias` and `scale` parameters).
|
per-channel `bias` and `scale` parameters).
|
||||||
|
|
||||||
See [Batch Normalization: Accelerating Deep Network Training by Reducing
|
See [Batch Normalization: Accelerating Deep Network Training by Reducing
|
||||||
Internal Covariate Shift](https://arxiv.org/pdf/1502.03167.pdf).
|
Internal Covariate Shift](https://arxiv.org/pdf/1502.03167.pdf).
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
```julia
|
```julia
|
||||||
m = Chain(
|
m = Chain(
|
||||||
Dense(28^2, 64),
|
Dense(28^2, 64),
|
||||||
@ -93,32 +83,23 @@ m = Chain(
|
|||||||
Dense(64, 10),
|
Dense(64, 10),
|
||||||
BatchNorm(10),
|
BatchNorm(10),
|
||||||
softmax)
|
softmax)
|
||||||
|
|
||||||
y = m(rand(28^2, 10))
|
|
||||||
```
|
```
|
||||||
|
|
||||||
To use the layer at test time set [`testmode!(m, true)`](@ref).
|
|
||||||
"""
|
"""
|
||||||
mutable struct BatchNorm
|
mutable struct BatchNorm{F,V,W,N}
|
||||||
λ # activation function
|
λ::F # activation function
|
||||||
β # bias
|
β::V # bias
|
||||||
γ # scale
|
γ::V # scale
|
||||||
μ # moving mean
|
μ::W # moving mean
|
||||||
σ² # moving var
|
σ²::W # moving std
|
||||||
ϵ
|
ϵ::N
|
||||||
momentum
|
momentum::N
|
||||||
active::Bool
|
active::Bool
|
||||||
end
|
end
|
||||||
|
|
||||||
# NOTE: Keeping the ϵ smaller than 1e-5 is not supported by CUDNN
|
BatchNorm(chs::Integer, λ = identity;
|
||||||
function BatchNorm(chs::Integer, λ = identity;
|
initβ = (i) -> zeros(i), initγ = (i) -> ones(i), ϵ = 1e-5, momentum = .1) =
|
||||||
initβ = (i) -> zeros(i),
|
|
||||||
initγ = (i) -> ones(i),
|
|
||||||
ϵ = 1f-5,
|
|
||||||
momentum = 0.1)
|
|
||||||
BatchNorm(λ, param(initβ(chs)), param(initγ(chs)),
|
BatchNorm(λ, param(initβ(chs)), param(initγ(chs)),
|
||||||
zeros(Float32, chs), ones(Float32, chs), ϵ, momentum, true)
|
zeros(chs), ones(chs), ϵ, momentum, true)
|
||||||
end
|
|
||||||
|
|
||||||
function (BN::BatchNorm)(x)
|
function (BN::BatchNorm)(x)
|
||||||
size(x, ndims(x)-1) == length(BN.β) ||
|
size(x, ndims(x)-1) == length(BN.β) ||
|
||||||
@ -132,7 +113,7 @@ function (BN::BatchNorm)(x)
|
|||||||
|
|
||||||
if !BN.active
|
if !BN.active
|
||||||
μ = reshape(BN.μ, affine_shape...)
|
μ = reshape(BN.μ, affine_shape...)
|
||||||
σ = reshape(BN.σ, affine_shape...)
|
σ² = reshape(BN.σ², affine_shape...)
|
||||||
else
|
else
|
||||||
T = eltype(x)
|
T = eltype(x)
|
||||||
|
|
||||||
@ -143,8 +124,8 @@ function (BN::BatchNorm)(x)
|
|||||||
|
|
||||||
# update moving mean/std
|
# update moving mean/std
|
||||||
mtm = data(convert(T, BN.momentum))
|
mtm = data(convert(T, BN.momentum))
|
||||||
BN.μ = (1 - mtm) .* BN.μ .+ mtm .* dropdims(data(μ), dims = (axes...,))
|
BN.μ = (1 - mtm) .* BN.μ .+ mtm .* dropdims(data(μ), dims = axes)
|
||||||
BN.σ² = ((1 - mtm) .* BN.σ² .+ mtm .* dropdims(data(σ²), dims = (axes...)) .* m ./ (m - 1))
|
BN.σ² = ((1 - mtm) .* BN.σ² .+ mtm .* dropdims(data(σ²), dims = axes) .* m ./ (m - 1))
|
||||||
end
|
end
|
||||||
|
|
||||||
let λ = BN.λ
|
let λ = BN.λ
|
||||||
@ -152,7 +133,11 @@ function (BN::BatchNorm)(x)
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@treelike BatchNorm
|
children(BN::BatchNorm) =
|
||||||
|
(BN.λ, BN.β, BN.γ, BN.μ, BN.σ, BN.ϵ, BN.momentum, BN.active)
|
||||||
|
|
||||||
|
mapchildren(f, BN::BatchNorm) = # e.g. mapchildren(cu, BN)
|
||||||
|
BatchNorm(BN.λ, f(BN.β), f(BN.γ), f(BN.μ), f(BN.σ), BN.ϵ, BN.momentum, BN.active)
|
||||||
|
|
||||||
_testmode!(BN::BatchNorm, test) = (BN.active = !test)
|
_testmode!(BN::BatchNorm, test) = (BN.active = !test)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user