2017-10-26 10:46:12 +00:00
|
|
|
|
"""
|
|
|
|
|
testmode!(m)
|
|
|
|
|
testmode!(m, false)
|
|
|
|
|
|
2017-10-30 05:33:01 +00:00
|
|
|
|
Put layers like [`Dropout`](@ref) and [`BatchNorm`](@ref) into testing mode
|
|
|
|
|
(or back to training mode with `false`).
|
2017-10-26 10:46:12 +00:00
|
|
|
|
"""
|
|
|
|
|
function testmode!(m, val::Bool=true)
|
|
|
|
|
prefor(x -> _testmode!(x, val), m)
|
|
|
|
|
return m
|
|
|
|
|
end
|
|
|
|
|
|
|
|
|
|
_testmode!(m, test) = nothing
|
|
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
Dropout(p)
|
|
|
|
|
|
|
|
|
|
A Dropout layer. For each input, either sets that input to `0` (with probability
|
|
|
|
|
`p`) or scales it by `1/(1-p)`. This is used as a regularisation, i.e. it
|
|
|
|
|
reduces overfitting during training.
|
|
|
|
|
|
|
|
|
|
Does nothing to the input once in [`testmode!`](@ref).
|
|
|
|
|
"""
|
|
|
|
|
mutable struct Dropout{F}
|
|
|
|
|
p::F
|
|
|
|
|
active::Bool
|
|
|
|
|
end
|
|
|
|
|
|
|
|
|
|
function Dropout(p)
|
|
|
|
|
@assert 0 ≤ p ≤ 1
|
|
|
|
|
Dropout{typeof(p)}(p, true)
|
|
|
|
|
end
|
|
|
|
|
|
|
|
|
|
function (a::Dropout)(x)
|
|
|
|
|
a.active || return x
|
|
|
|
|
y = similar(x)
|
|
|
|
|
rand!(y)
|
|
|
|
|
q = 1 - a.p
|
|
|
|
|
@inbounds for i=1:length(y)
|
|
|
|
|
y[i] = y[i] > a.p ? 1 / q : 0
|
|
|
|
|
end
|
|
|
|
|
return y .* x
|
|
|
|
|
end
|
|
|
|
|
|
|
|
|
|
_testmode!(a::Dropout, test) = (a.active = !test)
|
2017-10-17 09:26:15 +00:00
|
|
|
|
|
2017-10-23 11:53:07 +00:00
|
|
|
|
"""
|
2017-12-08 19:29:49 +00:00
|
|
|
|
|
2017-10-23 11:53:07 +00:00
|
|
|
|
LayerNorm(h::Integer)
|
|
|
|
|
|
|
|
|
|
A [normalisation layer](https://arxiv.org/pdf/1607.06450.pdf) designed to be
|
|
|
|
|
used with recurrent hidden states of size `h`. Normalises the mean/stddev of
|
|
|
|
|
each input before applying a per-neuron gain/bias.
|
|
|
|
|
"""
|
|
|
|
|
struct LayerNorm{T}
|
|
|
|
|
diag::Diagonal{T}
|
|
|
|
|
end
|
|
|
|
|
|
|
|
|
|
LayerNorm(h::Integer) =
|
|
|
|
|
LayerNorm(Diagonal(h))
|
|
|
|
|
|
|
|
|
|
treelike(LayerNorm)
|
|
|
|
|
|
|
|
|
|
(a::LayerNorm)(x) = a.diag(normalise(x))
|
|
|
|
|
|
|
|
|
|
function Base.show(io::IO, l::LayerNorm)
|
|
|
|
|
print(io, "LayerNorm(", length(l.diag.α), ")")
|
|
|
|
|
end
|
2017-12-08 19:29:49 +00:00
|
|
|
|
|
2017-10-17 09:26:15 +00:00
|
|
|
|
"""
|
2018-04-15 19:04:42 +00:00
|
|
|
|
BatchNorm(channels::Integer, σ = identity;
|
|
|
|
|
initβ = zeros, initγ = ones,
|
|
|
|
|
ϵ = 1e-8, momentum = .1)
|
2017-10-17 09:26:15 +00:00
|
|
|
|
|
2018-04-15 19:04:42 +00:00
|
|
|
|
Batch Normalization layer. The `channels` input should be the size of the
|
|
|
|
|
channel dimension in your data (see below).
|
|
|
|
|
|
|
|
|
|
Given an array with `N` dimensions, call the `N-1`th the channel dimension. (For
|
|
|
|
|
a batch of feature vectors this is just the data dimension, for `WHCN` images
|
|
|
|
|
it's the usual channel dimension.)
|
|
|
|
|
|
|
|
|
|
`BatchNorm` computes the mean and variance for each each `W×H×1×N` slice and
|
|
|
|
|
shifts them to have a new mean and variance (corresponding to the learnable,
|
|
|
|
|
per-channel `bias` and `scale` parameters).
|
2017-10-17 09:26:15 +00:00
|
|
|
|
|
|
|
|
|
See [Batch Normalization: Accelerating Deep Network Training by Reducing
|
2018-04-15 19:04:42 +00:00
|
|
|
|
Internal Covariate Shift](https://arxiv.org/pdf/1502.03167.pdf).
|
2017-10-17 09:26:15 +00:00
|
|
|
|
|
2018-04-15 19:04:42 +00:00
|
|
|
|
Example:
|
2017-10-17 09:26:15 +00:00
|
|
|
|
|
|
|
|
|
```julia
|
2017-12-08 19:34:34 +00:00
|
|
|
|
m = Chain(
|
2017-10-17 09:26:15 +00:00
|
|
|
|
Dense(28^2, 64),
|
2018-04-15 19:04:42 +00:00
|
|
|
|
BatchNorm(64, relu),
|
2017-10-17 09:26:15 +00:00
|
|
|
|
Dense(64, 10),
|
|
|
|
|
BatchNorm(10),
|
|
|
|
|
softmax)
|
|
|
|
|
```
|
|
|
|
|
"""
|
2018-04-15 19:04:42 +00:00
|
|
|
|
mutable struct BatchNorm{F,V,W,N}
|
2017-10-17 09:26:15 +00:00
|
|
|
|
λ::F # activation function
|
|
|
|
|
β::V # bias
|
|
|
|
|
γ::V # scale
|
2018-03-23 01:32:32 +00:00
|
|
|
|
μ::W # moving mean
|
|
|
|
|
σ::W # moving std
|
2017-10-30 03:34:51 +00:00
|
|
|
|
ϵ::N
|
|
|
|
|
momentum::N
|
2017-10-17 09:26:15 +00:00
|
|
|
|
active::Bool
|
|
|
|
|
end
|
|
|
|
|
|
2018-04-15 19:04:42 +00:00
|
|
|
|
BatchNorm(chs::Integer, λ = identity;
|
2017-10-17 09:26:15 +00:00
|
|
|
|
initβ = zeros, initγ = ones, ϵ = 1e-8, momentum = .1) =
|
2018-04-15 19:04:42 +00:00
|
|
|
|
BatchNorm(λ, param(initβ(chs)), param(initγ(chs)),
|
|
|
|
|
zeros(chs), ones(chs), ϵ, momentum, true)
|
2017-10-17 09:26:15 +00:00
|
|
|
|
|
|
|
|
|
function (BN::BatchNorm)(x)
|
2018-04-15 19:29:25 +00:00
|
|
|
|
size(x, ndims(x)-1) == length(BN.β) ||
|
|
|
|
|
error("BatchNorm expected $(length(BN.β)) channels, got $(size(x, ndims(x)-1))")
|
2017-11-02 05:40:06 +00:00
|
|
|
|
λ, γ, β = BN.λ, BN.γ, BN.β
|
2018-03-16 01:48:59 +00:00
|
|
|
|
dims = length(size(x))
|
|
|
|
|
channels = size(x, dims-1)
|
|
|
|
|
affine_shape = ones(Int, dims)
|
|
|
|
|
affine_shape[end-1] = channels
|
|
|
|
|
m = prod(size(x)[1:end-2]) * size(x)[end]
|
2017-11-02 05:40:06 +00:00
|
|
|
|
|
2017-10-17 09:26:15 +00:00
|
|
|
|
if !BN.active
|
2018-03-23 01:32:32 +00:00
|
|
|
|
μ = reshape(BN.μ, affine_shape...)
|
|
|
|
|
σ = reshape(BN.σ, affine_shape...)
|
2017-10-17 09:26:15 +00:00
|
|
|
|
else
|
2017-10-30 03:34:51 +00:00
|
|
|
|
T = eltype(x)
|
|
|
|
|
|
2018-02-13 14:02:35 +00:00
|
|
|
|
ϵ = data(convert(T, BN.ϵ))
|
2018-03-16 01:48:59 +00:00
|
|
|
|
axes = [1:dims-2; dims] # axes to reduce along (all but channels axis)
|
|
|
|
|
μ = mean(x, axes)
|
|
|
|
|
σ = sqrt.(mean((x .- μ).^2, axes) .+ ϵ)
|
2017-10-17 09:26:15 +00:00
|
|
|
|
|
|
|
|
|
# update moving mean/std
|
2018-02-13 14:02:35 +00:00
|
|
|
|
mtm = data(convert(T, BN.momentum))
|
2018-03-23 01:32:32 +00:00
|
|
|
|
BN.μ = (1 - mtm) .* BN.μ .+ mtm .* squeeze(data(μ), (axes...))
|
|
|
|
|
BN.σ = (1 - mtm) .* BN.σ .+ mtm .* squeeze(data(σ), (axes...)) .* m ./ (m - 1)
|
2017-10-17 09:26:15 +00:00
|
|
|
|
end
|
|
|
|
|
|
2018-03-16 01:48:59 +00:00
|
|
|
|
λ.(reshape(γ, affine_shape...) .* ((x .- μ) ./ σ) .+ reshape(β, affine_shape...))
|
2017-10-17 09:26:15 +00:00
|
|
|
|
end
|
|
|
|
|
|
|
|
|
|
children(BN::BatchNorm) =
|
2018-03-23 01:32:32 +00:00
|
|
|
|
(BN.λ, BN.β, BN.γ, BN.μ, BN.σ, BN.ϵ, BN.momentum, BN.active)
|
2017-10-30 03:34:51 +00:00
|
|
|
|
|
|
|
|
|
mapchildren(f, BN::BatchNorm) = # e.g. mapchildren(cu, BN)
|
2018-03-23 01:32:32 +00:00
|
|
|
|
BatchNorm(BN.λ, f(BN.β), f(BN.γ), f(BN.μ), f(BN.σ), BN.ϵ, BN.momentum, BN.active)
|
2017-10-17 09:26:15 +00:00
|
|
|
|
|
|
|
|
|
_testmode!(BN::BatchNorm, test) = (BN.active = !test)
|
|
|
|
|
|
|
|
|
|
function Base.show(io::IO, l::BatchNorm)
|
|
|
|
|
print(io, "BatchNorm($(join(size(l.β), ", "))")
|
|
|
|
|
(l.λ == identity) || print(io, ", λ = $(l.λ)")
|
|
|
|
|
print(io, ")")
|
|
|
|
|
end
|