Added AlphaDropout which is used in SNNs.
This commit is contained in:
parent
d6cf116a74
commit
97f874abcf
@ -43,6 +43,37 @@ end
|
||||
|
||||
_testmode!(a::Dropout, test) = (a.active = !test)
|
||||
|
||||
"""
|
||||
AlphaDropout(p)
|
||||
A dropout layer. It is used in Self-Normalizing Neural Networks.
|
||||
(https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf)
|
||||
The AlphaDropout layer ensures that mean and variance of activations remains the same as before.
|
||||
"""
|
||||
mutable struct AlphaDropout{F}
|
||||
p::F
|
||||
active::Bool
|
||||
end
|
||||
|
||||
function AlphaDropout(p)
|
||||
@assert 0 ≤ p ≤ 1
|
||||
AlphaDropout{typeof(p)}(p,true)
|
||||
end
|
||||
|
||||
function (a::AlphaDropout)(x)
|
||||
a.active || return x
|
||||
α = -1.75813631
|
||||
noise = randn(Float64, size(x.data))
|
||||
y = collect(x)
|
||||
y .= y .* (noise .> (1 - a.p)) + α .* (noise .<= (1 - a.p))
|
||||
A = (a.p + a.p * (1 - a.p) * α ^ 2)^0.5
|
||||
B = -A * α * (1 - a.p)
|
||||
y .= A .* y .+ B
|
||||
x1 = param(y)
|
||||
return x1
|
||||
end
|
||||
|
||||
_testmode!(a::AlphaDropout, test) = (a.active = !test)
|
||||
|
||||
"""
|
||||
LayerNorm(h::Integer)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user