Flux.jl/src/layers/normalise.jl

162 lines
4.4 KiB
Julia
Raw Normal View History

2017-10-26 10:46:12 +00:00
"""
testmode!(m)
testmode!(m, false)
2017-10-30 05:33:01 +00:00
Put layers like [`Dropout`](@ref) and [`BatchNorm`](@ref) into testing mode
(or back to training mode with `false`).
2017-10-26 10:46:12 +00:00
"""
function testmode!(m, val::Bool=true)
prefor(x -> _testmode!(x, val), m)
return m
end
_testmode!(m, test) = nothing
"""
Dropout(p)
A Dropout layer. For each input, either sets that input to `0` (with probability
`p`) or scales it by `1/(1-p)`. This is used as a regularisation, i.e. it
reduces overfitting during training.
Does nothing to the input once in [`testmode!`](@ref).
"""
mutable struct Dropout{F}
p::F
active::Bool
end
function Dropout(p)
@assert 0 p 1
Dropout{typeof(p)}(p, true)
end
_dropout_kernel(y::T, p, q) where {T} = y > p ? T(1 / q) : T(0)
2017-10-26 10:46:12 +00:00
function (a::Dropout)(x)
a.active || return x
y = similar(x)
rand!(y)
y .= _dropout_kernel.(y, a.p, 1 - a.p)
return x .* y
2017-10-26 10:46:12 +00:00
end
_testmode!(a::Dropout, test) = (a.active = !test)
2017-10-23 11:53:07 +00:00
"""
2017-12-08 19:29:49 +00:00
2017-10-23 11:53:07 +00:00
LayerNorm(h::Integer)
A [normalisation layer](https://arxiv.org/pdf/1607.06450.pdf) designed to be
used with recurrent hidden states of size `h`. Normalises the mean/stddev of
each input before applying a per-neuron gain/bias.
"""
struct LayerNorm{T}
diag::Diagonal{T}
end
LayerNorm(h::Integer) =
LayerNorm(Diagonal(h))
treelike(LayerNorm)
(a::LayerNorm)(x) = a.diag(normalise(x))
function Base.show(io::IO, l::LayerNorm)
print(io, "LayerNorm(", length(l.diag.α), ")")
end
2017-12-08 19:29:49 +00:00
"""
2018-04-15 19:04:42 +00:00
BatchNorm(channels::Integer, σ = identity;
initβ = zeros, initγ = ones,
ϵ = 1e-8, momentum = .1)
2018-04-15 19:04:42 +00:00
Batch Normalization layer. The `channels` input should be the size of the
channel dimension in your data (see below).
Given an array with `N` dimensions, call the `N-1`th the channel dimension. (For
a batch of feature vectors this is just the data dimension, for `WHCN` images
it's the usual channel dimension.)
`BatchNorm` computes the mean and variance for each each `W×H×1×N` slice and
shifts them to have a new mean and variance (corresponding to the learnable,
per-channel `bias` and `scale` parameters).
See [Batch Normalization: Accelerating Deep Network Training by Reducing
2018-04-15 19:04:42 +00:00
Internal Covariate Shift](https://arxiv.org/pdf/1502.03167.pdf).
2018-04-15 19:04:42 +00:00
Example:
```julia
2017-12-08 19:34:34 +00:00
m = Chain(
Dense(28^2, 64),
2018-04-15 19:04:42 +00:00
BatchNorm(64, relu),
Dense(64, 10),
BatchNorm(10),
softmax)
```
"""
2018-04-15 19:04:42 +00:00
mutable struct BatchNorm{F,V,W,N}
λ::F # activation function
β::V # bias
γ::V # scale
μ::W # moving mean
σ::W # moving std
ϵ::N
momentum::N
2018-06-20 10:20:30 +00:00
cache
active::Bool
end
2018-04-15 19:04:42 +00:00
BatchNorm(chs::Integer, λ = identity;
initβ = zeros, initγ = ones, ϵ = 1e-5, momentum = .1) =
2018-04-15 19:04:42 +00:00
BatchNorm(λ, param(initβ(chs)), param(initγ(chs)),
2018-06-20 10:20:30 +00:00
zeros(chs), ones(chs), ϵ, momentum, nothing, true)
2018-06-20 10:20:30 +00:00
function batchnorm(γ, β, x, μ, σ, momentum; cache = nothing, alpha = 1, beta = 0, eps = 1.0e-5, training = true)
size(x, ndims(x)-1) == length(β) ||
2018-04-15 19:29:25 +00:00
error("BatchNorm expected $(length(BN.β)) channels, got $(size(x, ndims(x)-1))")
dims = length(size(x))
channels = size(x, dims-1)
affine_shape = ones(Int, dims)
affine_shape[end-1] = channels
m = prod(size(x)[1:end-2]) * size(x)[end]
2017-11-02 05:40:06 +00:00
2018-06-20 10:20:30 +00:00
if !training
μ_curr = reshape(μ, affine_shape...)
σ_curr = reshape(σ, affine_shape...)
else
T = eltype(x)
2018-06-20 10:20:30 +00:00
eps = Flux.data(convert(T, eps))
axes = [1:dims-2; dims] # axes to reduce along (all but channels axis)
2018-06-20 10:20:30 +00:00
μ_curr = mean(x, axes)
σ_curr = sqrt.(mean((x .- μ_curr).^2, axes) .+ eps)
# update moving mean/std
2018-06-20 10:20:30 +00:00
mtm = Flux.data(convert(T, momentum))
μ .= (1 - mtm) .* μ .+ mtm .* squeeze(Flux.data(μ_curr), (axes...))
σ .= (1 - mtm) .* σ .+ mtm .* squeeze(Flux.data(σ_curr), (axes...)) .* m ./ (m - 1)
2018-04-17 17:05:58 +00:00
end
2018-06-20 10:20:30 +00:00
reshape(γ, affine_shape...) .* ((x .- μ_curr) ./ σ_curr) .+ reshape(β, affine_shape...)
end
2018-06-20 10:20:30 +00:00
(BN::BatchNorm)(x) = BN.λ.(batchnorm(BN.γ, BN.β, x, BN.μ, BN.σ, BN.momentum; cache = BN.cache, alpha = 1, beta = 0, eps = BN.ϵ, training = BN.active))
2018-06-27 09:24:49 +00:00
Flux.treelike(BatchNorm)
2018-06-27 09:24:49 +00:00
# children(BN::BatchNorm) =
# (BN.λ, BN.β, BN.γ, BN.μ, BN.σ, BN.ϵ, BN.momentum, BN.active)
#
# mapchildren(f, BN::BatchNorm) = # e.g. mapchildren(cu, BN)
# BatchNorm(BN.λ, f(BN.β), f(BN.γ), f(BN.μ), f(BN.σ), BN.ϵ, BN.momentum, BN.active)
_testmode!(BN::BatchNorm, test) = (BN.active = !test)
function Base.show(io::IO, l::BatchNorm)
print(io, "BatchNorm($(join(size(l.β), ", "))")
(l.λ == identity) || print(io, ", λ = $(l.λ)")
print(io, ")")
end