diff --git a/src/layers/normalise.jl b/src/layers/normalise.jl index 95599867..c3a144f4 100644 --- a/src/layers/normalise.jl +++ b/src/layers/normalise.jl @@ -26,7 +26,7 @@ _dropout_shape(s, dims) = tuple((i ∉ dims ? 1 : si for (i, si) ∈ enumerate(s _dropout_kernel(y::T, p, q) where {T} = y > p ? T(1 / q) : T(0) -function (a::Dropout)(x) +function dropout(x, p; dims = :) istraining() || return x y = similar(x) rand!(y) @@ -34,6 +34,11 @@ function (a::Dropout)(x) return x .* y end +function (a::Dropout)(x) + istraining() || return x + return dropout(x, a.p; dims = a.dims) +end + """ AlphaDropout(p) A dropout layer. It is used in Self-Normalizing Neural Networks.