""" testmode!(m) testmode!(m, false) Put layers like [`Dropout`](@ref) and [`BatchNorm`](@ref) into testing mode (or back to training mode with `false`). """ function testmode!(m, val::Bool=true) prefor(x -> _testmode!(x, val), m) return m end _testmode!(m, test) = nothing """ Dropout(p) A Dropout layer. For each input, either sets that input to `0` (with probability `p`) or scales it by `1/(1-p)`. This is used as a regularisation, i.e. it reduces overfitting during training. Does nothing to the input once in [`testmode!`](@ref). """ mutable struct Dropout{F} p::F active::Bool end function Dropout(p) @assert 0 ≤ p ≤ 1 Dropout{typeof(p)}(p, true) end _dropout_kernel(y::T, p, q) where {T} = y > p ? T(1 / q) : T(0) function (a::Dropout)(x) a.active || return x y = similar(x) rand!(y) y .= _dropout_kernel.(y, a.p, 1 - a.p) return x .* y end _testmode!(a::Dropout, test) = (a.active = !test) """ LayerNorm(h::Integer) A [normalisation layer](https://arxiv.org/pdf/1607.06450.pdf) designed to be used with recurrent hidden states of size `h`. Normalises the mean/stddev of each input before applying a per-neuron gain/bias. """ struct LayerNorm{T} diag::Diagonal{T} end LayerNorm(h::Integer) = LayerNorm(Diagonal(h)) treelike(LayerNorm) (a::LayerNorm)(x) = a.diag(normalise(x)) function Base.show(io::IO, l::LayerNorm) print(io, "LayerNorm(", length(l.diag.α), ")") end """ BatchNorm(channels::Integer, σ = identity; initβ = zeros, initγ = ones, ϵ = 1e-8, momentum = .1) Batch Normalization layer. The `channels` input should be the size of the channel dimension in your data (see below). Given an array with `N` dimensions, call the `N-1`th the channel dimension. (For a batch of feature vectors this is just the data dimension, for `WHCN` images it's the usual channel dimension.) `BatchNorm` computes the mean and variance for each each `W×H×1×N` slice and shifts them to have a new mean and variance (corresponding to the learnable, per-channel `bias` and `scale` parameters). See [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/pdf/1502.03167.pdf). Example: ```julia m = Chain( Dense(28^2, 64), BatchNorm(64, relu), Dense(64, 10), BatchNorm(10), softmax) ``` """ mutable struct BatchNorm{F,V,W,N} λ::F # activation function β::V # bias γ::V # scale μ::W # moving mean σ::W # moving std ϵ::N momentum::N cache active::Bool end BatchNorm(chs::Integer, λ = identity; initβ = zeros, initγ = ones, ϵ = 1e-5, momentum = .1) = BatchNorm(λ, param(initβ(chs)), param(initγ(chs)), zeros(chs), ones(chs), ϵ, momentum, nothing, true) function batchnorm(γ, β, x, μ, σ, momentum; cache = nothing, alpha = 1, beta = 0, eps = 1.0e-5, training = true) size(x, ndims(x)-1) == length(β) || error("BatchNorm expected $(length(BN.β)) channels, got $(size(x, ndims(x)-1))") dims = length(size(x)) channels = size(x, dims-1) affine_shape = ones(Int, dims) affine_shape[end-1] = channels m = prod(size(x)[1:end-2]) * size(x)[end] if !training μ_curr = reshape(μ, affine_shape...) σ_curr = reshape(σ, affine_shape...) else T = eltype(x) eps = Flux.data(convert(T, eps)) axes = [1:dims-2; dims] # axes to reduce along (all but channels axis) μ_curr = mean(x, axes) σ_curr = sqrt.(mean((x .- μ_curr).^2, axes) .+ eps) # update moving mean/std mtm = Flux.data(convert(T, momentum)) μ .= (1 - mtm) .* μ .+ mtm .* squeeze(Flux.data(μ_curr), (axes...)) σ .= (1 - mtm) .* σ .+ mtm .* squeeze(Flux.data(σ_curr), (axes...)) .* m ./ (m - 1) end reshape(γ, affine_shape...) .* ((x .- μ_curr) ./ σ_curr) .+ reshape(β, affine_shape...) end (BN::BatchNorm)(x) = BN.λ.(batchnorm(BN.γ, BN.β, x, BN.μ, BN.σ, BN.momentum; cache = BN.cache, alpha = 1, beta = 0, eps = BN.ϵ, training = BN.active)) Flux.treelike(BatchNorm) # children(BN::BatchNorm) = # (BN.λ, BN.β, BN.γ, BN.μ, BN.σ, BN.ϵ, BN.momentum, BN.active) # # mapchildren(f, BN::BatchNorm) = # e.g. mapchildren(cu, BN) # BatchNorm(BN.λ, f(BN.β), f(BN.γ), f(BN.μ), f(BN.σ), BN.ϵ, BN.momentum, BN.active) _testmode!(BN::BatchNorm, test) = (BN.active = !test) function Base.show(io::IO, l::BatchNorm) print(io, "BatchNorm($(join(size(l.β), ", "))") (l.λ == identity) || print(io, ", λ = $(l.λ)") print(io, ")") end