layer: implement BatchNorm layer
See [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/pdf/1502.03167.pdf)
This commit is contained in:
parent
e7a510da9a
commit
669273b008
@ -7,7 +7,7 @@ module Flux
|
||||
using Juno, Requires
|
||||
using Lazy: @forward
|
||||
|
||||
export Chain, Dense, RNN, LSTM, Dropout,
|
||||
export BatchNorm, Chain, Dense, RNN, LSTM, Dropout,
|
||||
SGD, ADAM, Momentum, Nesterov,
|
||||
param, params, mapleaves
|
||||
|
||||
|
@ -43,3 +43,72 @@ function (a::Dropout)(x)
|
||||
end
|
||||
|
||||
_testmode!(a::Dropout, test) = (a.active = !test)
|
||||
|
||||
"""
|
||||
BatchNorm(dims...; λ = identity,
|
||||
initβ = zeros, initγ = ones, ϵ = 1e-8, momentum = .1)
|
||||
|
||||
Batch Normalization Layer
|
||||
|
||||
See [Batch Normalization: Accelerating Deep Network Training by Reducing
|
||||
Internal Covariate Shift](https://arxiv.org/pdf/1502.03167.pdf)
|
||||
|
||||
In the example of MNIST,
|
||||
in order to normalize the input of other layer,
|
||||
put the `BatchNorm` layer before activation function.
|
||||
|
||||
```julia
|
||||
julia> m = Chain(
|
||||
Dense(28^2, 64),
|
||||
BatchNorm(64, λ = relu),
|
||||
Dense(64, 10),
|
||||
BatchNorm(10),
|
||||
softmax)
|
||||
Chain(Dense(784, 64), BatchNorm(64, λ = NNlib.relu), Dense(64, 10), BatchNorm(10), NNlib.softmax)
|
||||
```
|
||||
"""
|
||||
mutable struct BatchNorm{F,V}
|
||||
λ::F # activation function
|
||||
β::V # bias
|
||||
γ::V # scale
|
||||
μ # moving mean
|
||||
σ # moving std
|
||||
ϵ::Float64
|
||||
momentum::Float64
|
||||
active::Bool
|
||||
end
|
||||
|
||||
BatchNorm(dims::Integer...; λ = identity,
|
||||
initβ = zeros, initγ = ones, ϵ = 1e-8, momentum = .1) =
|
||||
BatchNorm(λ, param(initβ(dims)), param(initγ(dims)), 0., 1., momentum, ϵ, true)
|
||||
|
||||
function (BN::BatchNorm)(x)
|
||||
if !BN.active
|
||||
μ = BN.μ
|
||||
σ = BN.σ
|
||||
else
|
||||
m = size(x, 2) # batch size
|
||||
μ = sum(x, 2) ./ m
|
||||
σ = sqrt.(sum((x .- μ).^2, 2) ./ (m - 1) .+ BN.ϵ)
|
||||
|
||||
# update moving mean/std
|
||||
mtm = BN.momentum
|
||||
BN.μ = mtm .* μ.data .+ (1 - mtm) .* BN.μ
|
||||
BN.σ = mtm .* σ.data .+ (1 - mtm) .* BN.σ
|
||||
end
|
||||
|
||||
BN.λ.(BN.γ .* ((x .- μ) ./ σ) .+ BN.β)
|
||||
end
|
||||
|
||||
children(BN::BatchNorm) =
|
||||
(BN.λ, BN.β, BN.γ, BN.μ, BN.σ, BN.momentum, BN.ϵ, BN.active)
|
||||
mapchildren(f, BN::BatchNorm) =
|
||||
BatchNorm(λ, f(BN.β), f(BN.γ), BN.μ, BN.σ, BN.momentum, BN.ϵ, BN.active)
|
||||
|
||||
_testmode!(BN::BatchNorm, test) = (BN.active = !test)
|
||||
|
||||
function Base.show(io::IO, l::BatchNorm)
|
||||
print(io, "BatchNorm($(join(size(l.β), ", "))")
|
||||
(l.λ == identity) || print(io, ", λ = $(l.λ)")
|
||||
print(io, ")")
|
||||
end
|
||||
|
Loading…
Reference in New Issue
Block a user