From d4b066fdf96bd503c7c2bb9bd29bed2b0ab8787a Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Tue, 12 Jun 2018 17:49:21 +0530 Subject: [PATCH 01/57] Forward Pass for BatchNorm Added --- src/cuda/cudnn.jl | 92 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 90 insertions(+), 2 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index bcadcf4f..d517024e 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -1,5 +1,7 @@ -using CuArrays.CUDNN: @check, libcudnn, cudnnStatus_t, libcudnn_handle, - cudnnDataType, TensorDesc, FilterDesc +using CuArrays.CUDNN: @check, libcudnn, cudnnStatus_t, cudnnTensorDescriptor_t, + cudnnBatchNormMode_t, cudnnHandle_t, libcudnn_handle, cudnnDataType, TensorDesc, FilterDesc +using CuArrays +using Flux mutable struct DropoutDesc ptr::Ptr{Void} @@ -22,6 +24,92 @@ function DropoutDesc(ρ::Real; seed::Integer=0) return desc end +CuParam{T,N} = Union{CuArray{T,N},TrackedArray{T,N,CuArray{T,N}}} +CuBatchNorm{T} = Flux.BatchNorm{<:Union{typeof(identity),typeof(relu)}, + <:CuParam{T,1},<:CuArray{T,1}, + <:Union{Float32,Float64}} + +CuBatchNorm(chs::Integer, λ = identity; + initβ = zeros, initγ = ones, ϵ = 1e-8, momentum = .1) = + BatchNorm(λ, param(cu(initβ(Float32,chs))), param(cu(initγ(Float32,chs))), + zeros(Float32,chs), ones(Float32,chs), ϵ, momentum, true) + +const BATCHNORM_SPATIAL = 1 +const BATCHNORM_ACTIVATION = 0 +const BATCHNORM_MIN_EPS = 1e-5 + +@inline _wsize(y) = ((1 for _=1:ndims(y)-2)..., size(y)[end-1], 1) + +mutable struct bncache + mean + ivar +end + +bncache() = bncache(nothing, nothing) + +(CuBN::CuBatchNorm)(x::CuArray{T}) where T<:Union{Float32, Float64} = + CuBN.λ.(cudnnBatchNormalizationForward(CuBN.γ, CuBN.β, x, CuBN.μ, CuBN.σ, CuBN.momentum, eps = CuBN.ϵ, training = CuBN.active)) + +function cudnnBatchNormalizationForward(g::CuArray{T}, b::CuArray{T}, x::CuArray{T}, + running_mean::CuArray{T}, running_var::CuArray{T}, + momentum::T; cache = nothing, + alpha = T(1), beta = T(0), + eps = T(1e-5), training = true) where T<:Union{Float32, Float64} + y = similar(x) + dims = _wsize(x) + + if(eps < BATCHNORM_MIN_EPS) + warn("eps ",eps," is too small for CuDNN so eps has been assigned the value ", BATCHNORM_MIN_EPS) + eps = BATCHNORM_MIN_EPS + end + + if(training) + + if(cache !== nothing) + mean = cu(zeros(T, dims...)) + ivar = cu(ones(T, dims...)) + else + mean = C_NULL + ivar = C_NULL + end + + @check ccall((:cudnnBatchNormalizationForwardTraining, libcudnn), cudnnStatus_t, + (cudnnHandle_t,cudnnBatchNormMode_t,Ptr{Void}, Ptr{Void}, + Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void}, + Ptr{Void},Ptr{Void},Ptr{Void}, + Cdouble,Ptr{Void},Ptr{Void}, + Cdouble,Ptr{Void},Ptr{Void}), + libcudnn_handle[], BATCHNORM_SPATIAL, + Ref(T(alpha)), Ref(T(beta)), + TensorDesc(x), x, + TensorDesc(y), y, + TensorDesc(g), g, b, + momentum, running_mean, running_var, + eps, mean, ivar) + + if(cache !== nothing) + cache.mean = mean + cache.invvar = ivar + end + else + + @check ccall((:cudnnBatchNormalizationForwardInference, libcudnn), cudnnStatus_t, + (cudnnHandle_t,cudnnBatchNormMode_t,Ptr{Void}, Ptr{Void}, + Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void}, + Ptr{Void},Ptr{Void},Ptr{Void}, + Ptr{Void},Ptr{Void}, + Cdouble), + libcudnn_handle[], BATCHNORM_SPATIAL, + Ref(T(alpha)), Ref(T(beta)), + TensorDesc(x), x, + TensorDesc(y), y, + TensorDesc(g), g, b, + running_mean, running_var, + eps) + end + y +end + const RNN_RELU = 0 # Stock RNN with ReLu activation const RNN_TANH = 1 # Stock RNN with tanh activation const LSTM = 2 # LSTM with no peephole connections From a83e5d696d534c2276add1e56d28656e66cce835 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Tue, 12 Jun 2018 17:51:52 +0530 Subject: [PATCH 02/57] Typo --- src/cuda/cudnn.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index d517024e..1ce16d55 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -89,7 +89,7 @@ function cudnnBatchNormalizationForward(g::CuArray{T}, b::CuArray{T}, x::CuArray if(cache !== nothing) cache.mean = mean - cache.invvar = ivar + cache.ivar = ivar end else From f12e367cab310a4966a7e7f22fc67f26547f2069 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Tue, 12 Jun 2018 18:26:09 +0530 Subject: [PATCH 03/57] Adding untested backward pass code --- src/cuda/cudnn.jl | 90 +++++++++++++++++++++++++++++++++++++---------- 1 file changed, 72 insertions(+), 18 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index 1ce16d55..fd0dd7a6 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -47,15 +47,25 @@ end bncache() = bncache(nothing, nothing) -(CuBN::CuBatchNorm)(x::CuArray{T}) where T<:Union{Float32, Float64} = - CuBN.λ.(cudnnBatchNormalizationForward(CuBN.γ, CuBN.β, x, CuBN.μ, CuBN.σ, CuBN.momentum, eps = CuBN.ϵ, training = CuBN.active)) +(CuBN::CuBatchNorm)(x::CuArray{T}; cache = nothing) where T<:Union{Float32, Float64} = + CuBN.λ.(cudnnBNForward(CuBN.γ, CuBN.β, x, CuBN.μ, CuBN.σ, CuBN.momentum, cache = cache, eps = CuBN.ϵ, training = CuBN.active)) -function cudnnBatchNormalizationForward(g::CuArray{T}, b::CuArray{T}, x::CuArray{T}, - running_mean::CuArray{T}, running_var::CuArray{T}, - momentum::T; cache = nothing, - alpha = T(1), beta = T(0), - eps = T(1e-5), training = true) where T<:Union{Float32, Float64} - y = similar(x) +function cudnnBNForward(g::CuArray{T}, b::CuArray{T}, x::CuArray{T}, + running_mean::CuArray{T}, running_var::CuArray{T}, + momentum::T; cache = nothing, + alpha = T(1), beta = T(0), + eps = T(1e-5), training = true) where T<:Union{Float32, Float64} + y = similar(x) + cudnnBNForward!(y, g, b, x, running_mean, running_var, momentum, cache = cache + alpha = alpha, beta = beta, eps = eps, training = training) + y +end + +function cudnnBNForward!(y::CuArray{T}, g::CuArray{T}, b::CuArray{T}, x::CuArray{T}, + running_mean::CuArray{T}, running_var::CuArray{T}, + momentum::T; cache = nothing, + alpha = T(1), beta = T(0), + eps = T(1e-5), training = true) where T<:Union{Float32, Float64} dims = _wsize(x) if(eps < BATCHNORM_MIN_EPS) @@ -74,11 +84,13 @@ function cudnnBatchNormalizationForward(g::CuArray{T}, b::CuArray{T}, x::CuArray end @check ccall((:cudnnBatchNormalizationForwardTraining, libcudnn), cudnnStatus_t, - (cudnnHandle_t,cudnnBatchNormMode_t,Ptr{Void}, Ptr{Void}, - Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void}, - Ptr{Void},Ptr{Void},Ptr{Void}, - Cdouble,Ptr{Void},Ptr{Void}, - Cdouble,Ptr{Void},Ptr{Void}), + (cudnnHandle_t,cudnnBatchNormMode_t, + Ptr{T}, Ptr{T}, + Ptr{Void}, Ptr{T}, + Ptr{Void}, Ptr{T}, + Ptr{Void}, Ptr{T}, Ptr{T}, + Cdouble, Ptr{T}, Ptr{T}, + Cdouble, Ptr{T}, Ptr{T}), libcudnn_handle[], BATCHNORM_SPATIAL, Ref(T(alpha)), Ref(T(beta)), TensorDesc(x), x, @@ -94,10 +106,12 @@ function cudnnBatchNormalizationForward(g::CuArray{T}, b::CuArray{T}, x::CuArray else @check ccall((:cudnnBatchNormalizationForwardInference, libcudnn), cudnnStatus_t, - (cudnnHandle_t,cudnnBatchNormMode_t,Ptr{Void}, Ptr{Void}, - Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Void}, - Ptr{Void},Ptr{Void},Ptr{Void}, - Ptr{Void},Ptr{Void}, + (cudnnHandle_t,cudnnBatchNormMode_t, + Ptr{T}, Ptr{T}, + Ptr{Void}, Ptr{T}, + Ptr{Void}, Ptr{T}, + Ptr{Void}, Ptr{T}, Ptr{T}, + Ptr{T}, Ptr{T}, Cdouble), libcudnn_handle[], BATCHNORM_SPATIAL, Ref(T(alpha)), Ref(T(beta)), @@ -107,7 +121,47 @@ function cudnnBatchNormalizationForward(g::CuArray{T}, b::CuArray{T}, x::CuArray running_mean, running_var, eps) end - y +end + +function cudnnBNBackward!(dg::CuArray{T}, g::CuArray{T}, db::CuArray{T}, + dx::CuArray{T}, x::CuArray{T}, dy::CuArray{T}, + running_mean::CuArray{T}, running_var::CuArray{T} + momentum; training = true, + cache = nothing, eps = T(1e-5), + alpha = T(1), beta = T(0), + dalpha = T(1), dbeta = T(0)) where T<:Union{Float32, Float64} + if(training) + + if cache !== nothing + mean, ivar = cache.mean, cache.ivar + cache_verbose && info("mean and ivar are fetched from the cache") + else + mean, ivar = C_NULL, C_NULL + end + + @check ccall((:cudnnBatchNormalizationBackward, libcudnn), cudnnStatus_t, + (cudnnHandle_t,cudnnBatchNormMode_t, + Ptr{T}, Ptr{T}, + Ptr{T}, Ptr{T}, + Ptr{Void}, Ptr{T}, + Ptr{Void}, Ptr{T}, + Ptr{Void}, Ptr{T}, + Ptr{Void}, Ptr{T}, Ptr{T}, Ptr{T}, + Cdouble, Ptr{T}, Ptr{T}), + libcudnn_handle[], BATCHNORM_SPATIAL, + Ref(T(alpha)), Ref(T(beta)), + Ref(T(dalpha)), Ref(T(dbeta)), + TensorDesc(x), x, + TensorDesc(dy), dy, + TensorDesc(dx), dx, + TensorDesc(g), g, dg, db, + eps, mean, ivar) + else + ivar = 1 ./ sqrt.(running_var .+ eps) + dx = dy .* g .* ivar + dg = sum(dy .* (x .- running_mean) .* ivar, _reddims(dy)) + db = sum(dy, _reddims(dy)) + end end const RNN_RELU = 0 # Stock RNN with ReLu activation From 24d13ac3262e6488227a63e93f971800d2fc756e Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Tue, 12 Jun 2018 21:32:56 +0530 Subject: [PATCH 04/57] Fix missing parenthesis --- src/cuda/cudnn.jl | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index fd0dd7a6..2c2be1d6 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -41,24 +41,24 @@ const BATCHNORM_MIN_EPS = 1e-5 @inline _wsize(y) = ((1 for _=1:ndims(y)-2)..., size(y)[end-1], 1) mutable struct bncache - mean - ivar + mean + ivar end bncache() = bncache(nothing, nothing) (CuBN::CuBatchNorm)(x::CuArray{T}; cache = nothing) where T<:Union{Float32, Float64} = - CuBN.λ.(cudnnBNForward(CuBN.γ, CuBN.β, x, CuBN.μ, CuBN.σ, CuBN.momentum, cache = cache, eps = CuBN.ϵ, training = CuBN.active)) + CuBN.λ.(cudnnBNForward(CuBN.γ, CuBN.β, x, CuBN.μ, CuBN.σ, CuBN.momentum, cache = cache, eps = CuBN.ϵ, training = CuBN.active)) function cudnnBNForward(g::CuArray{T}, b::CuArray{T}, x::CuArray{T}, running_mean::CuArray{T}, running_var::CuArray{T}, momentum::T; cache = nothing, alpha = T(1), beta = T(0), eps = T(1e-5), training = true) where T<:Union{Float32, Float64} - y = similar(x) - cudnnBNForward!(y, g, b, x, running_mean, running_var, momentum, cache = cache - alpha = alpha, beta = beta, eps = eps, training = training) - y + y = similar(x) + cudnnBNForward!(y, g, b, x, running_mean, running_var, momentum, cache = cache, + alpha = alpha, beta = beta, eps = eps, training = training) + y end function cudnnBNForward!(y::CuArray{T}, g::CuArray{T}, b::CuArray{T}, x::CuArray{T}, @@ -125,20 +125,20 @@ end function cudnnBNBackward!(dg::CuArray{T}, g::CuArray{T}, db::CuArray{T}, dx::CuArray{T}, x::CuArray{T}, dy::CuArray{T}, - running_mean::CuArray{T}, running_var::CuArray{T} - momentum; training = true, + running_mean::CuArray{T}, running_var::CuArray{T}, + momentum; training = true, cache = nothing, eps = T(1e-5), alpha = T(1), beta = T(0), dalpha = T(1), dbeta = T(0)) where T<:Union{Float32, Float64} if(training) - + if cache !== nothing mean, ivar = cache.mean, cache.ivar cache_verbose && info("mean and ivar are fetched from the cache") else mean, ivar = C_NULL, C_NULL end - + @check ccall((:cudnnBatchNormalizationBackward, libcudnn), cudnnStatus_t, (cudnnHandle_t,cudnnBatchNormMode_t, Ptr{T}, Ptr{T}, From c6dcf079ce30db70e3d398e40cb39f89a191420b Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Sun, 17 Jun 2018 11:47:49 +0530 Subject: [PATCH 05/57] Update file structure and make function calls correct --- src/cuda/cudnn.jl | 376 +--------------------------------------------- src/cuda/curnn.jl | 351 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 359 insertions(+), 368 deletions(-) create mode 100644 src/cuda/curnn.jl diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index 2c2be1d6..6faa8c95 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -24,16 +24,6 @@ function DropoutDesc(ρ::Real; seed::Integer=0) return desc end -CuParam{T,N} = Union{CuArray{T,N},TrackedArray{T,N,CuArray{T,N}}} -CuBatchNorm{T} = Flux.BatchNorm{<:Union{typeof(identity),typeof(relu)}, - <:CuParam{T,1},<:CuArray{T,1}, - <:Union{Float32,Float64}} - -CuBatchNorm(chs::Integer, λ = identity; - initβ = zeros, initγ = ones, ϵ = 1e-8, momentum = .1) = - BatchNorm(λ, param(cu(initβ(Float32,chs))), param(cu(initγ(Float32,chs))), - zeros(Float32,chs), ones(Float32,chs), ϵ, momentum, true) - const BATCHNORM_SPATIAL = 1 const BATCHNORM_ACTIVATION = 0 const BATCHNORM_MIN_EPS = 1e-5 @@ -47,23 +37,22 @@ end bncache() = bncache(nothing, nothing) -(CuBN::CuBatchNorm)(x::CuArray{T}; cache = nothing) where T<:Union{Float32, Float64} = - CuBN.λ.(cudnnBNForward(CuBN.γ, CuBN.β, x, CuBN.μ, CuBN.σ, CuBN.momentum, cache = cache, eps = CuBN.ϵ, training = CuBN.active)) +(BN::BatchNorm)(x::CuArray{T}; cache = nothing) where T<:Union{Float32, Float64} = + BN.λ.(cudnnBNForward(BN.γ, BN.β, x, BN.μ, BN.σ, BN.momentum, cache = cache, eps = BN.ϵ, training = BN.active)) -function cudnnBNForward(g::CuArray{T}, b::CuArray{T}, x::CuArray{T}, - running_mean::CuArray{T}, running_var::CuArray{T}, - momentum::T; cache = nothing, - alpha = T(1), beta = T(0), +function cudnnBNForward(g, b, x, running_mean::CuArray{T}, + running_var::CuArray{T}, momentum; + cache = nothing, alpha = T(1), beta = T(0), eps = T(1e-5), training = true) where T<:Union{Float32, Float64} y = similar(x) - cudnnBNForward!(y, g, b, x, running_mean, running_var, momentum, cache = cache, - alpha = alpha, beta = beta, eps = eps, training = training) + cudnnBNForward!(y, data(g), data(b), data(x), running_mean, running_var, momentum, cache = cache, + alpha = alpha, beta = beta, eps = eps, training = training) y end function cudnnBNForward!(y::CuArray{T}, g::CuArray{T}, b::CuArray{T}, x::CuArray{T}, running_mean::CuArray{T}, running_var::CuArray{T}, - momentum::T; cache = nothing, + momentum; cache = nothing, alpha = T(1), beta = T(0), eps = T(1e-5), training = true) where T<:Union{Float32, Float64} dims = _wsize(x) @@ -163,352 +152,3 @@ function cudnnBNBackward!(dg::CuArray{T}, g::CuArray{T}, db::CuArray{T}, db = sum(dy, _reddims(dy)) end end - -const RNN_RELU = 0 # Stock RNN with ReLu activation -const RNN_TANH = 1 # Stock RNN with tanh activation -const LSTM = 2 # LSTM with no peephole connections -const GRU = 3 # Using h' = tanh(r * Uh(t-1) + Wx) and h = (1 - z) * h' + z * h(t-1) - -const LINEAR_INPUT = 0 -const SKIP_INPUT = 1 - -const UNIDIRECTIONAL = 0 -const BIDIRECTIONAL = 1 - -const RNN_ALGO_STANDARD = 0 -const RNN_ALGO_PERSIST_STATIC = 1 -const RNN_ALGO_PERSIST_DYNAMIC = 2 - -# param layout: -# RNN: [weight, bias] × [input, hidden] -# GRU: [weight, bias] × [input, hidden] × [reset, update, newmem] -# LSTM: [weight, bias] × [input, hidden] × [input, forget, newmem, output] - -function params(w::CuVector, input, hidden, n = 1) - slice(offset, shape) = reshape(w[offset+(1:prod(shape))], shape) - wx = slice(0, (input, hidden*n)) - wh = slice(length(wx), (hidden, hidden*n)) - bias = w[length(wx)+length(wh) + (1:hidden*n)] - (wx, wh), bias -end - -mutable struct RNNDesc{T} - mode::Int - input::Int - hidden::Int - params::CuVector{T} - weights::NTuple{2,CuMatrix{T}} - bias::CuVector{T} - ptr::Ptr{Void} -end - -Base.unsafe_convert(::Type{Ptr{Void}}, d::RNNDesc) = d.ptr - -function rnnParamSize(T, r, input) - size = Csize_t[0] - @check ccall((:cudnnGetRNNParamsSize, libcudnn), cudnnStatus_t, (Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Csize_t},Cint), - libcudnn_handle[], r, TensorDesc(T, (1,input,1)), size, cudnnDataType(T)) - return Int(size[])÷sizeof(T) -end - -ngates(mode) = [1, 1, 4, 3][mode+1] -ngates(r::RNNDesc) = ngates(r.mode) - -function RNNDesc{T}(mode::Int, input::Int, hidden::Int; layers = 1) where T - d = [C_NULL] - @check ccall((:cudnnCreateRNNDescriptor,libcudnn),cudnnStatus_t,(Ptr{Ptr{Void}},),d) - - dropoutDesc = DropoutDesc(0) - inputMode = LINEAR_INPUT - direction = UNIDIRECTIONAL - algo = RNN_ALGO_STANDARD - @check ccall((:cudnnSetRNNDescriptor_v6,libcudnn), cudnnStatus_t, (Ptr{Void},Ptr{Void},Cint,Cint,Ptr{Void},Cint,Cint,Cint,Cint,Cint), - libcudnn_handle[],d[],hidden,layers,dropoutDesc,inputMode,direction,mode,algo,cudnnDataType(T)) - - w = cuzeros(T, rnnParamSize(T, d[], input)) - # TODO: avoid reserve allocation here - rd = RNNDesc{T}(mode, input, hidden, w, params(w, input, hidden, ngates(mode))..., d[]) - finalizer(rd, x -> - @check ccall((:cudnnDestroyRNNDescriptor,libcudnn),cudnnStatus_t,(Ptr{Void},),x)) - return rd -end - -function rnnWorkspaceSize(r::RNNDesc, seqlen, xdesc) - size = Csize_t[0] - @check ccall((:cudnnGetRNNWorkspaceSize, libcudnn), cudnnStatus_t, (Ptr{Void},Ptr{Void},Cint,Ptr{Ptr{Void}},Ptr{Csize_t}), - libcudnn_handle[], r, seqlen, xdesc, size) - return Int(size[]) -end - -const workspace = [CuVector{UInt8}(1)] - -getworkspace(bytes) = - length(workspace[]) ≥ bytes ? - workspace[] : - (workspace[] = CuVector{UInt8}(bytes)) - -getworkspace(r::RNNDesc, seqlen, xdesc) = - getworkspace(rnnWorkspaceSize(r, seqlen, xdesc)) - -function rnnTrainingReserveSize(r::RNNDesc, seqlen, xdesc) - size = Csize_t[0] - @check ccall((:cudnnGetRNNTrainingReserveSize,libcudnn), cudnnStatus_t, (Ptr{Void}, Ptr{Void}, Cint, Ptr{Ptr{Void}}, Ptr{Csize_t}), - libcudnn_handle[], r, seqlen, xdesc, size) - return Int(size[]) -end - -function cudnnRNNForward(rnn::RNNDesc{T}, seqlen, xd, x, hd, h, cd, c, wd, w, yd, y, hod, ho, cod, co, - workspace, reserve=nothing) where T - if reserve == nothing - @check ccall((:cudnnRNNForwardInference, libcudnn), cudnnStatus_t, - (Ptr{Void}, Ptr{Void}, Cint, - Ptr{Ptr{Void}}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Void}, Ptr{T}, - Ptr{Void}, Ptr{T}, Ptr{Ptr{Void}}, Ptr{T}, Ptr{Void}, Ptr{T}, - Ptr{Void}, Ptr{T}, - Ptr{Void}, Csize_t), - libcudnn_handle[], rnn, seqlen, - xd, x, hd, h, cd, c, wd, w, yd, y, hod, ho, cod, co, - workspace, length(workspace)) - else - @check ccall((:cudnnRNNForwardTraining, libcudnn), cudnnStatus_t, - (Ptr{Void}, Ptr{Void}, Cint, - Ptr{Ptr{Void}}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Ptr{Void}}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Void}, Ptr{T}, - Ptr{Void}, Csize_t, Ptr{Void}, Csize_t), - libcudnn_handle[], rnn, seqlen, - xd, x, hd, h, cd, c, wd, w, yd, y, hod, ho, cod, co, - workspace, length(workspace), reserve, length(reserve)) - end -end - -xDesc(x) = [TensorDesc(eltype(x), (1, size(x, 1), size(x, 2)))] - -hDesc(h::Void) = C_NULL, C_NULL -hDesc(x::Integer) = (@assert x == 0; hDesc(nothing)) -function hDesc(h::CuArray) - TensorDesc(eltype(h), (size(h, 1), size(h, 2), 1)), h -end - -# TODO: can we just manipulate strides here? -# TODO: should use repmat, but this isn't implemented. -hBatch(x::AbstractVector, h::CuVector) = h -hBatch(x::AbstractMatrix, h::CuVector) = h .* cuones(1, size(x, 2)) -hBatch(x::AbstractMatrix, h::CuMatrix) = h .* cuones(1, size(h,2) == 1 ? size(x,2) : 1) - -function forward(rnn::RNNDesc{T}, x::CuArray{T}, h_::CuArray{T}, c_ = nothing, train = Val{false}) where T - h = hBatch(x, h_) - c = c_ == nothing ? nothing : hBatch(x, c_) - @assert size(x, 1) == rnn.input - @assert size(h, 1) == rnn.hidden - @assert size(x, 2) == size(h, 2) - seqLength = 1 - xdesc = xDesc(x) - y = x isa AbstractVector ? similar(x, rnn.hidden) : similar(x, rnn.hidden, size(x, 2)) - ho = similar(h) - ydesc = xDesc(y) - workspace = getworkspace(rnn, seqLength, xdesc) - reserve = train == Val{true} ? - CuVector{UInt8}(rnnTrainingReserveSize(rnn, seqLength, xdesc)) : - nothing - co = c == nothing ? c : similar(c) - cudnnRNNForward(rnn, seqLength, - xdesc, x, - hDesc(h)..., - hDesc(c)..., - FilterDesc(T, (1, 1, length(rnn.params))), rnn.params, - ydesc, y, - hDesc(ho)..., - hDesc(co)..., - workspace, reserve) - result = c == nothing ? (y, ho) : (y, ho, co) - return train == Val{true} ? (reserve, result) : result -end - -forwardTrain(rnn::RNNDesc{T}, x::CuArray{T}, h::CuArray{T}, c = nothing) where T = - forward(rnn, x, h, c, Val{true}) - -function cudnnRNNBackwardData(rnn::RNNDesc{T}, seqlen, yd, y, dyd, dy, dhod, dho, dcod, dco, - wd, w, hd, h, cd, c, dxd, dx, dhd, dh, dcd, dc, ws, rs) where T - @check ccall((:cudnnRNNBackwardData,libcudnn),cudnnStatus_t, - (Ptr{Void}, Ptr{Void}, Cint, - Ptr{Ptr{Void}}, Ptr{T}, Ptr{Ptr{Void}}, Ptr{T}, Ptr{Void}, Ptr{T}, - Ptr{Void}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Void}, - Ptr{T}, Ptr{Ptr{Void}}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Void}, Ptr{T}, - Ptr{Void}, Csize_t, Ptr{Void}, Csize_t), - libcudnn_handle[], rnn, seqlen, yd, y, dyd, dy, dhod, dho, dcod, dco, - wd, w, hd, h, cd, c, dxd, dx, dhd, dh, dcd, dc, ws, length(ws), rs, length(rs)) -end - -function backwardData(rnn::RNNDesc{T}, y, dy_, dho, dco, h, c, reserve) where T - # Same as above, any more efficient way? - dy = dy_ isa Integer ? zeros(y) : dy_ - yd = xDesc(y) - dx = y isa AbstractVector ? similar(dy, rnn.input) : similar(dy, rnn.input, size(dy, 2)) - dh = similar(h) - dc = c == nothing ? nothing : similar(c) - cudnnRNNBackwardData(rnn, 1, - yd, y, yd, dy, hDesc(dho)..., hDesc(dco)..., - FilterDesc(T, (1, 1, length(rnn.params))), rnn.params, - hDesc(h)..., hDesc(c)..., xDesc(dx), dx, hDesc(dh)..., hDesc(dc)..., - workspace[], reserve) - return c == nothing ? (dx, dh) : (dx, dh, dc) -end - -backwardData(rnn, y, dy, dho, hx, reserve) = - backwardData(rnn, y, dy, dho, nothing, hx, nothing, reserve) - -function cudnnRNNBackwardWeights(rnn::RNNDesc{T}, seqlen, xd, x, hd, h, yd, y, dwd, dw, - workspace, reserve) where T - @check ccall((:cudnnRNNBackwardWeights,libcudnn), cudnnStatus_t, - (Ptr{Void}, Ptr{Void}, Cint, # handle, rnnDesc, seqLength - Ptr{Ptr{Void}}, Ptr{T}, #x - Ptr{Void}, Ptr{T}, #hx - Ptr{Ptr{Void}}, Ptr{T}, #y - Ptr{Void}, Csize_t, #ws - Ptr{Void}, Ptr{T}, #dw - Ptr{Void}, Csize_t), #rs - libcudnn_handle[], rnn, seqlen, xd, x, hd, h, yd, y, - workspace, length(workspace), dwd, dw, reserve, length(reserve)) -end - -function backwardWeights(rnn::RNNDesc{T}, x, h, y, reserve) where T - dw = zeros(rnn.params) - cudnnRNNBackwardWeights(rnn, 1, - xDesc(x), x, hDesc(h)..., xDesc(y), y, - FilterDesc(T, (1, 1, length(dw))), dw, - workspace[], reserve) - return params(dw, rnn.input, rnn.hidden, ngates(rnn)) -end - -# Interface - -import ..Flux: Flux, relu -import ..Tracker: TrackedArray -using CUDAnative -using CuArrays: @cuindex, cudims - -function copy_transpose!(dst::CuArray, src::CuArray) - function kernel(dst, src) - I = @cuindex dst - dst[I...] = src[reverse(I)...] - return - end - blk, thr = cudims(dst) - @cuda (blk, thr) kernel(dst, src) - return dst -end - -CuParam{T,N} = Union{CuArray{T,N},TrackedArray{T,N,CuArray{T,N}}} -CuRNN{T} = Flux.RNNCell{<:Union{typeof(tanh),typeof(relu)},<:CuParam{T,2},<:CuParam{T,1}} -CuGRU{T} = Flux.GRUCell{<:CuParam{T,2},<:CuParam{T,1}} -CuLSTM{T} = Flux.LSTMCell{<:CuParam{T,2},<:CuParam{T,1}} -CuRNNs{T} = Union{CuRNN{T},CuGRU{T},CuLSTM{T}} - -function copyparams!(m::CuRNNs, d::RNNDesc) - Wi, Wh = d.weights - copy_transpose!(Wi, Flux.data(m.Wi)) - copy_transpose!(Wh, Flux.data(m.Wh)) - copy_transpose!(d.bias, Flux.data(m.b)) - return -end - -function RNNDesc(m::CuRNNs{T}) where T - h, i = length(m.h), size(m.Wi, 2) - mode = m isa CuRNN ? - (m.σ == tanh ? RNN_TANH : RNN_RELU) : - m isa CuGRU ? GRU : LSTM - r = RNNDesc{T}(mode, i, h) - return r -end - -const descs = WeakKeyDict() - -function desc(rnn) - d = haskey(descs, rnn) ? descs[rnn] : (descs[rnn] = RNNDesc(rnn)) - copyparams!(rnn, d) - return d -end - -import Flux.Tracker: data, isleaf, istracked, track, back_, @back, unbroadcast - -mutable struct RNNCall{R} - rnn::R - reserve::CuVector{UInt8} - RNNCall{R}(rnn::R) where R = new(rnn) -end - -RNNCall(rnn) = RNNCall{typeof(rnn)}(rnn) - -function (c::RNNCall)(args...) - rs, result = forwardTrain(desc(c.rnn), args...) - c.reserve = rs - return result -end - -istrain(m::CuRNNs, args...) = any(x -> x isa TrackedArray, (m.Wi, m.Wh, m.b, args...)) - -function (m::CuRNN{T})(h::CuParam{T}, x::CuParam{T}) where T <: Union{Float32,Float64} - result = istrain(m, h, x) ? - track(RNNCall(m), x, h) : - forward(desc(m), x, h) - return result[2], result[1] -end - -function (m::CuGRU{T})(h::CuParam{T}, x::CuParam{T}) where T <: Union{Float32,Float64} - result = istrain(m, h, x) ? - track(RNNCall(m), x, h) : - forward(desc(m), x, h) - return result[2], result[1] -end - -function (m::CuLSTM{T})(h::NTuple{2,CuParam{T}}, x::CuParam{T}) where T <: Union{Float32,Float64} - result = istrain(m, h, x) ? - track(RNNCall(m), x, h[1], h[2]) : - forward(desc(m), x, h[1], h[2]) - return (result[2], result[3]), result[1] -end - -(m::CuRNN{T})(h::CuParam{T}, x) where T <: Union{Float32,Float64} = m(h, CuArray{T}(x)) -(m::CuGRU{T})(h::CuParam{T}, x) where T <: Union{Float32,Float64} = m(h, CuArray{T}(x)) -(m::CuLSTM{T})(h::NTuple{2,CuParam{T}}, x) where T <: Union{Float32,Float64} = m(h, CuArray{T}(x)) - -function accum_transpose!(dst::CuArray, src::CuArray) - function kernel(dst, src) - I = @cuindex dst - dst[I...] += src[reverse(I)...] - return - end - blk, thr = cudims(dst) - @cuda (blk, thr) kernel(dst, src) - return dst -end - -function back_(m::RNNCall{<:Union{CuRNN,CuGRU}}, y_, Δ, x, h) - y, ho = y_ - dy, dho = Δ - h_ = hBatch(x, data(h)) - dx, dh = backwardData(descs[m.rnn], y, dy, dho, h_, m.reserve) - @back(x, dx) - @back(h, unbroadcast(h, dh)) - (dWi, dWh), db = backwardWeights(descs[m.rnn], data(x), h_, y, m.reserve) - # We don't have to make this assumption, it's just slightly more complex. - @assert all(isleaf.((m.rnn.Wi, m.rnn.Wh, m.rnn.b))) - istracked(m.rnn.Wi) && accum_transpose!(m.rnn.Wi.grad, dWi) - istracked(m.rnn.Wh) && accum_transpose!(m.rnn.Wh.grad, dWh) - istracked(m.rnn.b) && accum_transpose!(m.rnn.b.grad, db) -end - -function back_(m::RNNCall{<:CuLSTM}, y_, Δ, x, h, c) - y, ho, co = y_ - dy, dho, dco = Δ - h_ = hBatch(x, data(h)) - c_ = hBatch(x, data(c)) - dx, dh, dc = backwardData(descs[m.rnn], y, dy, dho, dco, h_, c_, m.reserve) - @back(x, dx) - @back(h, unbroadcast(h, dh)) - @back(c, unbroadcast(h, dc)) - (dWi, dWh), db = backwardWeights(descs[m.rnn], data(x), h_, y, m.reserve) - @assert all(isleaf.((m.rnn.Wi, m.rnn.Wh, m.rnn.b))) - istracked(m.rnn.Wi) && accum_transpose!(m.rnn.Wi.grad, dWi) - istracked(m.rnn.Wh) && accum_transpose!(m.rnn.Wh.grad, dWh) - istracked(m.rnn.b) && accum_transpose!(m.rnn.b.grad, db) -end diff --git a/src/cuda/curnn.jl b/src/cuda/curnn.jl new file mode 100644 index 00000000..905b1ef4 --- /dev/null +++ b/src/cuda/curnn.jl @@ -0,0 +1,351 @@ +using CuArrays.CUDNN: @check, libcudnn, cudnnStatus_t, cudnnTensorDescriptor_t, + cudnnBatchNormMode_t, cudnnHandle_t, libcudnn_handle, cudnnDataType, TensorDesc, FilterDesc + +const RNN_RELU = 0 # Stock RNN with ReLu activation +const RNN_TANH = 1 # Stock RNN with tanh activation +const LSTM = 2 # LSTM with no peephole connections +const GRU = 3 # Using h' = tanh(r * Uh(t-1) + Wx) and h = (1 - z) * h' + z * h(t-1) + +const LINEAR_INPUT = 0 +const SKIP_INPUT = 1 + +const UNIDIRECTIONAL = 0 +const BIDIRECTIONAL = 1 + +const RNN_ALGO_STANDARD = 0 +const RNN_ALGO_PERSIST_STATIC = 1 +const RNN_ALGO_PERSIST_DYNAMIC = 2 + +# param layout: +# RNN: [weight, bias] × [input, hidden] +# GRU: [weight, bias] × [input, hidden] × [reset, update, newmem] +# LSTM: [weight, bias] × [input, hidden] × [input, forget, newmem, output] + +function params(w::CuVector, input, hidden, n = 1) + slice(offset, shape) = reshape(w[offset+(1:prod(shape))], shape) + wx = slice(0, (input, hidden*n)) + wh = slice(length(wx), (hidden, hidden*n)) + bias = w[length(wx)+length(wh) + (1:hidden*n)] + (wx, wh), bias +end + +mutable struct RNNDesc{T} + mode::Int + input::Int + hidden::Int + params::CuVector{T} + weights::NTuple{2,CuMatrix{T}} + bias::CuVector{T} + ptr::Ptr{Void} +end + +Base.unsafe_convert(::Type{Ptr{Void}}, d::RNNDesc) = d.ptr + +function rnnParamSize(T, r, input) + size = Csize_t[0] + @check ccall((:cudnnGetRNNParamsSize, libcudnn), cudnnStatus_t, (Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Csize_t},Cint), + libcudnn_handle[], r, TensorDesc(T, (1,input,1)), size, cudnnDataType(T)) + return Int(size[])÷sizeof(T) +end + +ngates(mode) = [1, 1, 4, 3][mode+1] +ngates(r::RNNDesc) = ngates(r.mode) + +function RNNDesc{T}(mode::Int, input::Int, hidden::Int; layers = 1) where T + d = [C_NULL] + @check ccall((:cudnnCreateRNNDescriptor,libcudnn),cudnnStatus_t,(Ptr{Ptr{Void}},),d) + + dropoutDesc = DropoutDesc(0) + inputMode = LINEAR_INPUT + direction = UNIDIRECTIONAL + algo = RNN_ALGO_STANDARD + @check ccall((:cudnnSetRNNDescriptor_v6,libcudnn), cudnnStatus_t, (Ptr{Void},Ptr{Void},Cint,Cint,Ptr{Void},Cint,Cint,Cint,Cint,Cint), + libcudnn_handle[],d[],hidden,layers,dropoutDesc,inputMode,direction,mode,algo,cudnnDataType(T)) + + w = cuzeros(T, rnnParamSize(T, d[], input)) + # TODO: avoid reserve allocation here + rd = RNNDesc{T}(mode, input, hidden, w, params(w, input, hidden, ngates(mode))..., d[]) + finalizer(rd, x -> + @check ccall((:cudnnDestroyRNNDescriptor,libcudnn),cudnnStatus_t,(Ptr{Void},),x)) + return rd +end + +function rnnWorkspaceSize(r::RNNDesc, seqlen, xdesc) + size = Csize_t[0] + @check ccall((:cudnnGetRNNWorkspaceSize, libcudnn), cudnnStatus_t, (Ptr{Void},Ptr{Void},Cint,Ptr{Ptr{Void}},Ptr{Csize_t}), + libcudnn_handle[], r, seqlen, xdesc, size) + return Int(size[]) +end + +const workspace = [CuVector{UInt8}(1)] + +getworkspace(bytes) = + length(workspace[]) ≥ bytes ? + workspace[] : + (workspace[] = CuVector{UInt8}(bytes)) + +getworkspace(r::RNNDesc, seqlen, xdesc) = + getworkspace(rnnWorkspaceSize(r, seqlen, xdesc)) + +function rnnTrainingReserveSize(r::RNNDesc, seqlen, xdesc) + size = Csize_t[0] + @check ccall((:cudnnGetRNNTrainingReserveSize,libcudnn), cudnnStatus_t, (Ptr{Void}, Ptr{Void}, Cint, Ptr{Ptr{Void}}, Ptr{Csize_t}), + libcudnn_handle[], r, seqlen, xdesc, size) + return Int(size[]) +end + +function cudnnRNNForward(rnn::RNNDesc{T}, seqlen, xd, x, hd, h, cd, c, wd, w, yd, y, hod, ho, cod, co, + workspace, reserve=nothing) where T + if reserve == nothing + @check ccall((:cudnnRNNForwardInference, libcudnn), cudnnStatus_t, + (Ptr{Void}, Ptr{Void}, Cint, + Ptr{Ptr{Void}}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Void}, Ptr{T}, + Ptr{Void}, Ptr{T}, Ptr{Ptr{Void}}, Ptr{T}, Ptr{Void}, Ptr{T}, + Ptr{Void}, Ptr{T}, + Ptr{Void}, Csize_t), + libcudnn_handle[], rnn, seqlen, + xd, x, hd, h, cd, c, wd, w, yd, y, hod, ho, cod, co, + workspace, length(workspace)) + else + @check ccall((:cudnnRNNForwardTraining, libcudnn), cudnnStatus_t, + (Ptr{Void}, Ptr{Void}, Cint, + Ptr{Ptr{Void}}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Ptr{Void}}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Void}, Ptr{T}, + Ptr{Void}, Csize_t, Ptr{Void}, Csize_t), + libcudnn_handle[], rnn, seqlen, + xd, x, hd, h, cd, c, wd, w, yd, y, hod, ho, cod, co, + workspace, length(workspace), reserve, length(reserve)) + end +end + +xDesc(x) = [TensorDesc(eltype(x), (1, size(x, 1), size(x, 2)))] + +hDesc(h::Void) = C_NULL, C_NULL +hDesc(x::Integer) = (@assert x == 0; hDesc(nothing)) +function hDesc(h::CuArray) + TensorDesc(eltype(h), (size(h, 1), size(h, 2), 1)), h +end + +# TODO: can we just manipulate strides here? +# TODO: should use repmat, but this isn't implemented. +hBatch(x::AbstractVector, h::CuVector) = h +hBatch(x::AbstractMatrix, h::CuVector) = h .* cuones(1, size(x, 2)) +hBatch(x::AbstractMatrix, h::CuMatrix) = h .* cuones(1, size(h,2) == 1 ? size(x,2) : 1) + +function forward(rnn::RNNDesc{T}, x::CuArray{T}, h_::CuArray{T}, c_ = nothing, train = Val{false}) where T + h = hBatch(x, h_) + c = c_ == nothing ? nothing : hBatch(x, c_) + @assert size(x, 1) == rnn.input + @assert size(h, 1) == rnn.hidden + @assert size(x, 2) == size(h, 2) + seqLength = 1 + xdesc = xDesc(x) + y = x isa AbstractVector ? similar(x, rnn.hidden) : similar(x, rnn.hidden, size(x, 2)) + ho = similar(h) + ydesc = xDesc(y) + workspace = getworkspace(rnn, seqLength, xdesc) + reserve = train == Val{true} ? + CuVector{UInt8}(rnnTrainingReserveSize(rnn, seqLength, xdesc)) : + nothing + co = c == nothing ? c : similar(c) + cudnnRNNForward(rnn, seqLength, + xdesc, x, + hDesc(h)..., + hDesc(c)..., + FilterDesc(T, (1, 1, length(rnn.params))), rnn.params, + ydesc, y, + hDesc(ho)..., + hDesc(co)..., + workspace, reserve) + result = c == nothing ? (y, ho) : (y, ho, co) + return train == Val{true} ? (reserve, result) : result +end + +forwardTrain(rnn::RNNDesc{T}, x::CuArray{T}, h::CuArray{T}, c = nothing) where T = + forward(rnn, x, h, c, Val{true}) + +function cudnnRNNBackwardData(rnn::RNNDesc{T}, seqlen, yd, y, dyd, dy, dhod, dho, dcod, dco, + wd, w, hd, h, cd, c, dxd, dx, dhd, dh, dcd, dc, ws, rs) where T + @check ccall((:cudnnRNNBackwardData,libcudnn),cudnnStatus_t, + (Ptr{Void}, Ptr{Void}, Cint, + Ptr{Ptr{Void}}, Ptr{T}, Ptr{Ptr{Void}}, Ptr{T}, Ptr{Void}, Ptr{T}, + Ptr{Void}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Void}, + Ptr{T}, Ptr{Ptr{Void}}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Void}, Ptr{T}, + Ptr{Void}, Csize_t, Ptr{Void}, Csize_t), + libcudnn_handle[], rnn, seqlen, yd, y, dyd, dy, dhod, dho, dcod, dco, + wd, w, hd, h, cd, c, dxd, dx, dhd, dh, dcd, dc, ws, length(ws), rs, length(rs)) +end + +function backwardData(rnn::RNNDesc{T}, y, dy_, dho, dco, h, c, reserve) where T + # Same as above, any more efficient way? + dy = dy_ isa Integer ? zeros(y) : dy_ + yd = xDesc(y) + dx = y isa AbstractVector ? similar(dy, rnn.input) : similar(dy, rnn.input, size(dy, 2)) + dh = similar(h) + dc = c == nothing ? nothing : similar(c) + cudnnRNNBackwardData(rnn, 1, + yd, y, yd, dy, hDesc(dho)..., hDesc(dco)..., + FilterDesc(T, (1, 1, length(rnn.params))), rnn.params, + hDesc(h)..., hDesc(c)..., xDesc(dx), dx, hDesc(dh)..., hDesc(dc)..., + workspace[], reserve) + return c == nothing ? (dx, dh) : (dx, dh, dc) +end + +backwardData(rnn, y, dy, dho, hx, reserve) = + backwardData(rnn, y, dy, dho, nothing, hx, nothing, reserve) + +function cudnnRNNBackwardWeights(rnn::RNNDesc{T}, seqlen, xd, x, hd, h, yd, y, dwd, dw, + workspace, reserve) where T + @check ccall((:cudnnRNNBackwardWeights,libcudnn), cudnnStatus_t, + (Ptr{Void}, Ptr{Void}, Cint, # handle, rnnDesc, seqLength + Ptr{Ptr{Void}}, Ptr{T}, #x + Ptr{Void}, Ptr{T}, #hx + Ptr{Ptr{Void}}, Ptr{T}, #y + Ptr{Void}, Csize_t, #ws + Ptr{Void}, Ptr{T}, #dw + Ptr{Void}, Csize_t), #rs + libcudnn_handle[], rnn, seqlen, xd, x, hd, h, yd, y, + workspace, length(workspace), dwd, dw, reserve, length(reserve)) +end + +function backwardWeights(rnn::RNNDesc{T}, x, h, y, reserve) where T + dw = zeros(rnn.params) + cudnnRNNBackwardWeights(rnn, 1, + xDesc(x), x, hDesc(h)..., xDesc(y), y, + FilterDesc(T, (1, 1, length(dw))), dw, + workspace[], reserve) + return params(dw, rnn.input, rnn.hidden, ngates(rnn)) +end + +# Interface + +import ..Flux: Flux, relu +import ..Tracker: TrackedArray +using CUDAnative +using CuArrays: @cuindex, cudims + +function copy_transpose!(dst::CuArray, src::CuArray) + function kernel(dst, src) + I = @cuindex dst + dst[I...] = src[reverse(I)...] + return + end + blk, thr = cudims(dst) + @cuda (blk, thr) kernel(dst, src) + return dst +end + +CuParam{T,N} = Union{CuArray{T,N},TrackedArray{T,N,CuArray{T,N}}} +CuRNN{T} = Flux.RNNCell{<:Union{typeof(tanh),typeof(relu)},<:CuParam{T,2},<:CuParam{T,1}} +CuGRU{T} = Flux.GRUCell{<:CuParam{T,2},<:CuParam{T,1}} +CuLSTM{T} = Flux.LSTMCell{<:CuParam{T,2},<:CuParam{T,1}} +CuRNNs{T} = Union{CuRNN{T},CuGRU{T},CuLSTM{T}} + +function copyparams!(m::CuRNNs, d::RNNDesc) + Wi, Wh = d.weights + copy_transpose!(Wi, Flux.data(m.Wi)) + copy_transpose!(Wh, Flux.data(m.Wh)) + copy_transpose!(d.bias, Flux.data(m.b)) + return +end + +function RNNDesc(m::CuRNNs{T}) where T + h, i = length(m.h), size(m.Wi, 2) + mode = m isa CuRNN ? + (m.σ == tanh ? RNN_TANH : RNN_RELU) : + m isa CuGRU ? GRU : LSTM + r = RNNDesc{T}(mode, i, h) + return r +end + +const descs = WeakKeyDict() + +function desc(rnn) + d = haskey(descs, rnn) ? descs[rnn] : (descs[rnn] = RNNDesc(rnn)) + copyparams!(rnn, d) + return d +end + +import Flux.Tracker: data, isleaf, istracked, track, back_, @back, unbroadcast + +mutable struct RNNCall{R} + rnn::R + reserve::CuVector{UInt8} + RNNCall{R}(rnn::R) where R = new(rnn) +end + +RNNCall(rnn) = RNNCall{typeof(rnn)}(rnn) + +function (c::RNNCall)(args...) + rs, result = forwardTrain(desc(c.rnn), args...) + c.reserve = rs + return result +end + +istrain(m::CuRNNs, args...) = any(x -> x isa TrackedArray, (m.Wi, m.Wh, m.b, args...)) + +function (m::CuRNN{T})(h::CuParam{T}, x::CuParam{T}) where T <: Union{Float32,Float64} + result = istrain(m, h, x) ? + track(RNNCall(m), x, h) : + forward(desc(m), x, h) + return result[2], result[1] +end + +function (m::CuGRU{T})(h::CuParam{T}, x::CuParam{T}) where T <: Union{Float32,Float64} + result = istrain(m, h, x) ? + track(RNNCall(m), x, h) : + forward(desc(m), x, h) + return result[2], result[1] +end + +function (m::CuLSTM{T})(h::NTuple{2,CuParam{T}}, x::CuParam{T}) where T <: Union{Float32,Float64} + result = istrain(m, h, x) ? + track(RNNCall(m), x, h[1], h[2]) : + forward(desc(m), x, h[1], h[2]) + return (result[2], result[3]), result[1] +end + +(m::CuRNN{T})(h::CuParam{T}, x) where T <: Union{Float32,Float64} = m(h, CuArray{T}(x)) +(m::CuGRU{T})(h::CuParam{T}, x) where T <: Union{Float32,Float64} = m(h, CuArray{T}(x)) +(m::CuLSTM{T})(h::NTuple{2,CuParam{T}}, x) where T <: Union{Float32,Float64} = m(h, CuArray{T}(x)) + +function accum_transpose!(dst::CuArray, src::CuArray) + function kernel(dst, src) + I = @cuindex dst + dst[I...] += src[reverse(I)...] + return + end + blk, thr = cudims(dst) + @cuda (blk, thr) kernel(dst, src) + return dst +end + +function back_(m::RNNCall{<:Union{CuRNN,CuGRU}}, y_, Δ, x, h) + y, ho = y_ + dy, dho = Δ + h_ = hBatch(x, data(h)) + dx, dh = backwardData(descs[m.rnn], y, dy, dho, h_, m.reserve) + @back(x, dx) + @back(h, unbroadcast(h, dh)) + (dWi, dWh), db = backwardWeights(descs[m.rnn], data(x), h_, y, m.reserve) + # We don't have to make this assumption, it's just slightly more complex. + @assert all(isleaf.((m.rnn.Wi, m.rnn.Wh, m.rnn.b))) + istracked(m.rnn.Wi) && accum_transpose!(m.rnn.Wi.grad, dWi) + istracked(m.rnn.Wh) && accum_transpose!(m.rnn.Wh.grad, dWh) + istracked(m.rnn.b) && accum_transpose!(m.rnn.b.grad, db) +end + +function back_(m::RNNCall{<:CuLSTM}, y_, Δ, x, h, c) + y, ho, co = y_ + dy, dho, dco = Δ + h_ = hBatch(x, data(h)) + c_ = hBatch(x, data(c)) + dx, dh, dc = backwardData(descs[m.rnn], y, dy, dho, dco, h_, c_, m.reserve) + @back(x, dx) + @back(h, unbroadcast(h, dh)) + @back(c, unbroadcast(h, dc)) + (dWi, dWh), db = backwardWeights(descs[m.rnn], data(x), h_, y, m.reserve) + @assert all(isleaf.((m.rnn.Wi, m.rnn.Wh, m.rnn.b))) + istracked(m.rnn.Wi) && accum_transpose!(m.rnn.Wi.grad, dWi) + istracked(m.rnn.Wh) && accum_transpose!(m.rnn.Wh.grad, dWh) + istracked(m.rnn.b) && accum_transpose!(m.rnn.b.grad, db) +end From af5ab7f9ef5c222b98ddc8014ea98f2b141a7da4 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Sun, 17 Jun 2018 12:28:02 +0530 Subject: [PATCH 06/57] Fix Tensor Descriptor Bug --- src/cuda/cudnn.jl | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index 6faa8c95..bd0c2198 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -56,11 +56,13 @@ function cudnnBNForward!(y::CuArray{T}, g::CuArray{T}, b::CuArray{T}, x::CuArray alpha = T(1), beta = T(0), eps = T(1e-5), training = true) where T<:Union{Float32, Float64} dims = _wsize(x) - if(eps < BATCHNORM_MIN_EPS) warn("eps ",eps," is too small for CuDNN so eps has been assigned the value ", BATCHNORM_MIN_EPS) eps = BATCHNORM_MIN_EPS end + xd = TensorDesc(x) + yd = TensorDesc(y) + gd = TensorDesc(T, (1,1,length(g),1)) if(training) @@ -82,9 +84,9 @@ function cudnnBNForward!(y::CuArray{T}, g::CuArray{T}, b::CuArray{T}, x::CuArray Cdouble, Ptr{T}, Ptr{T}), libcudnn_handle[], BATCHNORM_SPATIAL, Ref(T(alpha)), Ref(T(beta)), - TensorDesc(x), x, - TensorDesc(y), y, - TensorDesc(g), g, b, + xd, x, + yd, y, + gd, g, b, momentum, running_mean, running_var, eps, mean, ivar) @@ -93,9 +95,8 @@ function cudnnBNForward!(y::CuArray{T}, g::CuArray{T}, b::CuArray{T}, x::CuArray cache.ivar = ivar end else - @check ccall((:cudnnBatchNormalizationForwardInference, libcudnn), cudnnStatus_t, - (cudnnHandle_t,cudnnBatchNormMode_t, + (Ptr{cudnnHandle_t},cudnnBatchNormMode_t, Ptr{T}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Void}, Ptr{T}, @@ -104,9 +105,9 @@ function cudnnBNForward!(y::CuArray{T}, g::CuArray{T}, b::CuArray{T}, x::CuArray Cdouble), libcudnn_handle[], BATCHNORM_SPATIAL, Ref(T(alpha)), Ref(T(beta)), - TensorDesc(x), x, - TensorDesc(y), y, - TensorDesc(g), g, b, + xd, x, + yd, y, + gd, g, b, running_mean, running_var, eps) end From bc47d02b3f9ebe775b152e6ee14cdbc46a0e5607 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Sun, 17 Jun 2018 12:40:01 +0530 Subject: [PATCH 07/57] Remove uncessary imports --- src/cuda/cudnn.jl | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index bd0c2198..c8dc553a 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -1,7 +1,5 @@ using CuArrays.CUDNN: @check, libcudnn, cudnnStatus_t, cudnnTensorDescriptor_t, cudnnBatchNormMode_t, cudnnHandle_t, libcudnn_handle, cudnnDataType, TensorDesc, FilterDesc -using CuArrays -using Flux mutable struct DropoutDesc ptr::Ptr{Void} From 185f34d9fe0336c9372d6ec93e8aa9cd2360f24a Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Wed, 20 Jun 2018 12:09:54 +0530 Subject: [PATCH 08/57] Add working backward pass --- src/cuda/cudnn.jl | 152 ++++++++++++++++++++++++++-------------------- 1 file changed, 85 insertions(+), 67 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index c8dc553a..132e105f 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -73,20 +73,20 @@ function cudnnBNForward!(y::CuArray{T}, g::CuArray{T}, b::CuArray{T}, x::CuArray end @check ccall((:cudnnBatchNormalizationForwardTraining, libcudnn), cudnnStatus_t, - (cudnnHandle_t,cudnnBatchNormMode_t, - Ptr{T}, Ptr{T}, - Ptr{Void}, Ptr{T}, - Ptr{Void}, Ptr{T}, - Ptr{Void}, Ptr{T}, Ptr{T}, - Cdouble, Ptr{T}, Ptr{T}, - Cdouble, Ptr{T}, Ptr{T}), - libcudnn_handle[], BATCHNORM_SPATIAL, - Ref(T(alpha)), Ref(T(beta)), - xd, x, - yd, y, - gd, g, b, - momentum, running_mean, running_var, - eps, mean, ivar) + (cudnnHandle_t,cudnnBatchNormMode_t, + Ptr{T}, Ptr{T}, + Ptr{Void}, Ptr{T}, + Ptr{Void}, Ptr{T}, + Ptr{Void}, Ptr{T}, Ptr{T}, + Cdouble, Ptr{T}, Ptr{T}, + Cdouble, Ptr{T}, Ptr{T}), + libcudnn_handle[], BATCHNORM_SPATIAL, + Ref(T(alpha)), Ref(T(beta)), + xd, x, + yd, y, + gd, g, b, + momentum, running_mean, running_var, + eps, mean, ivar) if(cache !== nothing) cache.mean = mean @@ -94,60 +94,78 @@ function cudnnBNForward!(y::CuArray{T}, g::CuArray{T}, b::CuArray{T}, x::CuArray end else @check ccall((:cudnnBatchNormalizationForwardInference, libcudnn), cudnnStatus_t, - (Ptr{cudnnHandle_t},cudnnBatchNormMode_t, - Ptr{T}, Ptr{T}, - Ptr{Void}, Ptr{T}, - Ptr{Void}, Ptr{T}, - Ptr{Void}, Ptr{T}, Ptr{T}, - Ptr{T}, Ptr{T}, - Cdouble), - libcudnn_handle[], BATCHNORM_SPATIAL, - Ref(T(alpha)), Ref(T(beta)), - xd, x, - yd, y, - gd, g, b, - running_mean, running_var, - eps) + (Ptr{cudnnHandle_t},cudnnBatchNormMode_t, + Ptr{T}, Ptr{T}, + Ptr{Void}, Ptr{T}, + Ptr{Void}, Ptr{T}, + Ptr{Void}, Ptr{T}, Ptr{T}, + Ptr{T}, Ptr{T}, + Cdouble), + libcudnn_handle[], BATCHNORM_SPATIAL, + Ref(T(alpha)), Ref(T(beta)), + xd, x, + yd, y, + gd, g, b, + running_mean, running_var, + eps) end end -function cudnnBNBackward!(dg::CuArray{T}, g::CuArray{T}, db::CuArray{T}, - dx::CuArray{T}, x::CuArray{T}, dy::CuArray{T}, - running_mean::CuArray{T}, running_var::CuArray{T}, - momentum; training = true, - cache = nothing, eps = T(1e-5), - alpha = T(1), beta = T(0), - dalpha = T(1), dbeta = T(0)) where T<:Union{Float32, Float64} - if(training) - - if cache !== nothing - mean, ivar = cache.mean, cache.ivar - cache_verbose && info("mean and ivar are fetched from the cache") - else - mean, ivar = C_NULL, C_NULL - end - - @check ccall((:cudnnBatchNormalizationBackward, libcudnn), cudnnStatus_t, - (cudnnHandle_t,cudnnBatchNormMode_t, - Ptr{T}, Ptr{T}, - Ptr{T}, Ptr{T}, - Ptr{Void}, Ptr{T}, - Ptr{Void}, Ptr{T}, - Ptr{Void}, Ptr{T}, - Ptr{Void}, Ptr{T}, Ptr{T}, Ptr{T}, - Cdouble, Ptr{T}, Ptr{T}), - libcudnn_handle[], BATCHNORM_SPATIAL, - Ref(T(alpha)), Ref(T(beta)), - Ref(T(dalpha)), Ref(T(dbeta)), - TensorDesc(x), x, - TensorDesc(dy), dy, - TensorDesc(dx), dx, - TensorDesc(g), g, dg, db, - eps, mean, ivar) - else - ivar = 1 ./ sqrt.(running_var .+ eps) - dx = dy .* g .* ivar - dg = sum(dy .* (x .- running_mean) .* ivar, _reddims(dy)) - db = sum(dy, _reddims(dy)) - end +function cudnnBNBackward(g, b, x::CuArray{T}, dy::CuArray{T}, running_mean::CuArray{T}, + running_var::CuArray{T}, momentum; + training = true, cache = nothing, eps = T(1e-5), + alpha = T(1), beta = T(0)) where T<:Union{Float32, Float64} + dx = similar(x) + cudnnBNBackward!(g.grad, data(g), b.grad, dx, x, dy, running_mean, running_var, T(momentum), + training = training, cache = cache, eps = eps, alpha = alpha, beta = beta) + dx +end + +function cudnnBNBackward!(dg::CuArray{T}, g::CuArray{T}, db::CuArray{T}, + dx::CuArray{T}, x::CuArray{T}, dy::CuArray{T}, + running_mean::CuArray{T}, running_var::CuArray{T}, + momentum; training = true, + cache = nothing, eps = T(1e-5), + alpha = T(1), beta = T(0), + dalpha = T(1), dbeta = T(0)) where T<:Union{Float32, Float64} + if(training) + xd = TensorDesc(x) + dyd = TensorDesc(dy) + dxd = TensorDesc(dx) + gd = TensorDesc(T, (1,1,length(g),1)) + if cache !== nothing + mean, ivar = cache.mean, cache.ivar + info("mean and ivar are fetched from the cache") + else + mean, ivar = C_NULL, C_NULL + end + + if(eps < BATCHNORM_MIN_EPS) + warn("eps ",eps," is too small for CuDNN so eps has been assigned the value ", BATCHNORM_MIN_EPS) + eps = BATCHNORM_MIN_EPS + end + + @check ccall((:cudnnBatchNormalizationBackward, libcudnn), cudnnStatus_t, + (cudnnHandle_t,cudnnBatchNormMode_t, + Ptr{T}, Ptr{T}, + Ptr{T}, Ptr{T}, + Ptr{Void}, Ptr{T}, + Ptr{Void}, Ptr{T}, + Ptr{Void}, Ptr{T}, + Ptr{Void}, Ptr{T}, Ptr{T}, Ptr{T}, + Cdouble, Ptr{T}, Ptr{T}), + libcudnn_handle[], BATCHNORM_SPATIAL, + Ref(T(alpha)), Ref(T(beta)), + Ref(T(dalpha)), Ref(T(dbeta)), + xd, x, + dyd, dy, + dxd, dx, + gd, g, dg, db, + eps, mean, ivar) + else + ivar = 1 ./ sqrt.(reshape(running_var, (1, 1, length(running_var), 1)) .+ eps) + dx .= dy .* reshape(g, (1, 1, length(g), 1)) .* ivar + dg .= squeeze(sum(dy .* (x .- reshape(running_mean, (1, 1, length(running_mean), 1))) .* ivar, _reddims(dy)), (1,2,4)) + db .= squeeze(sum(dy, _reddims(dy)), (1,2,4)) + end end From 714ca23aba24a7926ca37257d185176ba884edd3 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Wed, 20 Jun 2018 12:11:22 +0530 Subject: [PATCH 09/57] Change default value of epsilon to prevent CuDNN BatchNorm warnings --- src/layers/normalise.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/layers/normalise.jl b/src/layers/normalise.jl index 54f5eb56..5e363454 100644 --- a/src/layers/normalise.jl +++ b/src/layers/normalise.jl @@ -108,7 +108,7 @@ mutable struct BatchNorm{F,V,W,N} end BatchNorm(chs::Integer, λ = identity; - initβ = zeros, initγ = ones, ϵ = 1e-8, momentum = .1) = + initβ = zeros, initγ = ones, ϵ = 1e-5, momentum = .1) = BatchNorm(λ, param(initβ(chs)), param(initγ(chs)), zeros(chs), ones(chs), ϵ, momentum, true) From 3339ad51812e56a9fc1f322b5340d0d6863ebb7f Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Wed, 20 Jun 2018 15:50:30 +0530 Subject: [PATCH 10/57] Integrate cudnn BatchNorm with Flux --- src/cuda/cudnn.jl | 61 +++++++++++++++++++++++++++++------------ src/layers/normalise.jl | 34 +++++++++++------------ 2 files changed, 61 insertions(+), 34 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index 132e105f..c7d997b9 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -1,5 +1,6 @@ using CuArrays.CUDNN: @check, libcudnn, cudnnStatus_t, cudnnTensorDescriptor_t, cudnnBatchNormMode_t, cudnnHandle_t, libcudnn_handle, cudnnDataType, TensorDesc, FilterDesc +import Flux.data mutable struct DropoutDesc ptr::Ptr{Void} @@ -27,6 +28,7 @@ const BATCHNORM_ACTIVATION = 0 const BATCHNORM_MIN_EPS = 1e-5 @inline _wsize(y) = ((1 for _=1:ndims(y)-2)..., size(y)[end-1], 1) +@inline _reddims(y) = ((i for i=1:ndims(y)-2)..., ndims(y)) mutable struct bncache mean @@ -35,15 +37,12 @@ end bncache() = bncache(nothing, nothing) -(BN::BatchNorm)(x::CuArray{T}; cache = nothing) where T<:Union{Float32, Float64} = - BN.λ.(cudnnBNForward(BN.γ, BN.β, x, BN.μ, BN.σ, BN.momentum, cache = cache, eps = BN.ϵ, training = BN.active)) - -function cudnnBNForward(g, b, x, running_mean::CuArray{T}, - running_var::CuArray{T}, momentum; - cache = nothing, alpha = T(1), beta = T(0), - eps = T(1e-5), training = true) where T<:Union{Float32, Float64} +function batchnorm(g::CuArray{T}, b::CuArray{T}, x::CuArray{T}, + running_mean::CuArray{T}, running_var::CuArray{T}, momentum; + cache = nothing, alpha = T(1), beta = T(0), + eps = T(1e-5), training = true) where T<:Union{Float32, Float64} y = similar(x) - cudnnBNForward!(y, data(g), data(b), data(x), running_mean, running_var, momentum, cache = cache, + cudnnBNForward!(y, g, b, x, running_mean, running_var, momentum, cache = cache, alpha = alpha, beta = beta, eps = eps, training = training) y end @@ -111,23 +110,24 @@ function cudnnBNForward!(y::CuArray{T}, g::CuArray{T}, b::CuArray{T}, x::CuArray end end -function cudnnBNBackward(g, b, x::CuArray{T}, dy::CuArray{T}, running_mean::CuArray{T}, - running_var::CuArray{T}, momentum; - training = true, cache = nothing, eps = T(1e-5), - alpha = T(1), beta = T(0)) where T<:Union{Float32, Float64} +function ∇batchnorm(g::CuArray{T}, b::CuArray{T}, x::CuArray{T}, dy::CuArray{T}, + running_mean::CuArray{T}, running_var::CuArray{T}, momentum; + cache = nothing, eps = T(1e-5), alpha = T(1), + beta = T(0), training = true) where T<:Union{Float32, Float64} + dg = similar(g) + db = similar(b) dx = similar(x) - cudnnBNBackward!(g.grad, data(g), b.grad, dx, x, dy, running_mean, running_var, T(momentum), + cudnnBNBackward!(dg, g, db, dx, x, dy, running_mean, running_var, T(momentum), training = training, cache = cache, eps = eps, alpha = alpha, beta = beta) - dx + (dx, db, dx) end function cudnnBNBackward!(dg::CuArray{T}, g::CuArray{T}, db::CuArray{T}, dx::CuArray{T}, x::CuArray{T}, dy::CuArray{T}, running_mean::CuArray{T}, running_var::CuArray{T}, - momentum; training = true, - cache = nothing, eps = T(1e-5), + momentum; cache = nothing, eps = T(1e-5), alpha = T(1), beta = T(0), - dalpha = T(1), dbeta = T(0)) where T<:Union{Float32, Float64} + dalpha = T(1), dbeta = T(0), training = true) where T<:Union{Float32, Float64} if(training) xd = TensorDesc(x) dyd = TensorDesc(dy) @@ -169,3 +169,30 @@ function cudnnBNBackward!(dg::CuArray{T}, g::CuArray{T}, db::CuArray{T}, db .= squeeze(sum(dy, _reddims(dy)), (1,2,4)) end end + +# Flux Interface + +import Flux.Tracker: track, back, @back, istracked + +_batchnorm(g, b, x, running_mean, running_var, momentum, + cache, alpha, beta, eps, training) = + batchnorm(g, b, x, running_mean, running_var, momentum, cache = cache, alpha = alpha, beta = beta, eps = eps, training = training) + +batchnorm(g::TrackedArray, b::TrackedArray, x::TrackedArray, running_mean::CuArray{T}, + running_var::CuArray{T}, momentum; + cache = nothing, alpha = T(1), beta = T(0), + eps = T(1e-5), training = true) where T<:Union{Float32, Float64} = + track(_batchnorm, g, b, x, running_mean, running_var, momentum, cache, alpha, beta, eps, training) + +batchnorm(g::TrackedArray, b::TrackedArray, x::CuArray{T}, running_mean::CuArray{T}, + running_var::CuArray{T}, momentum; cache = nothing, alpha = T(1), beta = T(0), + eps = T(1e-5), training = true) where T<:Union{Float32, Float64} = + track(_batchnorm, g, b, x, running_mean, running_var, momentum, cache, alpha, beta, eps, training) + +function back(::typeof(_batchnorm), Δ, g, b, x, running_mean, running_var, momentum, cache, alpha, beta, eps, training) + deriv_tup = ∇batchnorm(data(g), data(b), data(x), Δ, running_mean, running_var, momentum, + cache = cache, alpha = alpha, beta = beta, eps = eps, training = training) + istracked(x) && @back(x, deriv_tup[1]) + @back(b, deriv_tup[2]) + @back(g, deriv_tup[3]) +end diff --git a/src/layers/normalise.jl b/src/layers/normalise.jl index 5e363454..25832c07 100644 --- a/src/layers/normalise.jl +++ b/src/layers/normalise.jl @@ -104,46 +104,46 @@ mutable struct BatchNorm{F,V,W,N} σ::W # moving std ϵ::N momentum::N + cache active::Bool end BatchNorm(chs::Integer, λ = identity; initβ = zeros, initγ = ones, ϵ = 1e-5, momentum = .1) = BatchNorm(λ, param(initβ(chs)), param(initγ(chs)), - zeros(chs), ones(chs), ϵ, momentum, true) + zeros(chs), ones(chs), ϵ, momentum, nothing, true) -function (BN::BatchNorm)(x) - size(x, ndims(x)-1) == length(BN.β) || + +function batchnorm(γ, β, x, μ, σ, momentum; cache = nothing, alpha = 1, beta = 0, eps = 1.0e-5, training = true) + size(x, ndims(x)-1) == length(β) || error("BatchNorm expected $(length(BN.β)) channels, got $(size(x, ndims(x)-1))") - γ, β = BN.γ, BN.β dims = length(size(x)) channels = size(x, dims-1) affine_shape = ones(Int, dims) affine_shape[end-1] = channels m = prod(size(x)[1:end-2]) * size(x)[end] - if !BN.active - μ = reshape(BN.μ, affine_shape...) - σ = reshape(BN.σ, affine_shape...) + if !training + μ_curr = reshape(μ, affine_shape...) + σ_curr = reshape(σ, affine_shape...) else T = eltype(x) - ϵ = data(convert(T, BN.ϵ)) + eps = Flux.data(convert(T, eps)) axes = [1:dims-2; dims] # axes to reduce along (all but channels axis) - μ = mean(x, axes) - σ = sqrt.(mean((x .- μ).^2, axes) .+ ϵ) + μ_curr = mean(x, axes) + σ_curr = sqrt.(mean((x .- μ_curr).^2, axes) .+ eps) # update moving mean/std - mtm = data(convert(T, BN.momentum)) - BN.μ = (1 - mtm) .* BN.μ .+ mtm .* squeeze(data(μ), (axes...)) - BN.σ = (1 - mtm) .* BN.σ .+ mtm .* squeeze(data(σ), (axes...)) .* m ./ (m - 1) - end - - let λ = BN.λ - λ.(reshape(γ, affine_shape...) .* ((x .- μ) ./ σ) .+ reshape(β, affine_shape...)) + mtm = Flux.data(convert(T, momentum)) + μ .= (1 - mtm) .* μ .+ mtm .* squeeze(Flux.data(μ_curr), (axes...)) + σ .= (1 - mtm) .* σ .+ mtm .* squeeze(Flux.data(σ_curr), (axes...)) .* m ./ (m - 1) end + reshape(γ, affine_shape...) .* ((x .- μ_curr) ./ σ_curr) .+ reshape(β, affine_shape...) end +(BN::BatchNorm)(x) = BN.λ.(batchnorm(BN.γ, BN.β, x, BN.μ, BN.σ, BN.momentum; cache = BN.cache, alpha = 1, beta = 0, eps = BN.ϵ, training = BN.active)) + children(BN::BatchNorm) = (BN.λ, BN.β, BN.γ, BN.μ, BN.σ, BN.ϵ, BN.momentum, BN.active) From deb495026184b67e407e7e31adb164f3bd9f00cd Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Wed, 20 Jun 2018 15:54:38 +0530 Subject: [PATCH 11/57] Make cuDNN take only 4D arrays --- src/cuda/cudnn.jl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index c7d997b9..9948ef37 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -37,7 +37,10 @@ end bncache() = bncache(nothing, nothing) -function batchnorm(g::CuArray{T}, b::CuArray{T}, x::CuArray{T}, +# CuDNN supports only 4D and 5D Tensors for BatchNorm Operations +# so use the native julia code when doing batchnorm on a 2D Array + +function batchnorm(g::CuArray{T}, b::CuArray{T}, x::CuArray{T, 4}, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; cache = nothing, alpha = T(1), beta = T(0), eps = T(1e-5), training = true) where T<:Union{Float32, Float64} From a4e35e9e91b967fca248d58092d7d0538cda5881 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Wed, 20 Jun 2018 16:22:25 +0530 Subject: [PATCH 12/57] Adjust atol in tests --- test/layers/normalisation.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/layers/normalisation.jl b/test/layers/normalisation.jl index 0fdb1021..20a6332a 100644 --- a/test/layers/normalisation.jl +++ b/test/layers/normalisation.jl @@ -57,13 +57,13 @@ end # 2×1 Array{Float64,2}: # 1.14495 # 1.14495 - @test m.σ ≈ .1 .* std(x.data, 2, corrected=false) .* (3 / 2).+ .9 .* [1., 1.] + @test isapprox(m.σ, .1 .* std(x.data, 2, corrected=false) .* (3 / 2).+ .9 .* [1., 1.], atol = 1.0e-6) testmode!(m) @test !m.active x′ = m(x).data - @test x′[1] ≈ (1 - 0.3) / 1.1449489742783179 + @test isapprox(x′[1], (1 - 0.3) / 1.1449489742783179, atol = 1.0e-6) end # with activation function @@ -75,7 +75,7 @@ end @test !m.active x′ = m(x).data - @test x′[1] ≈ σ((1 - 0.3) / 1.1449489742783179) + @test isapprox(x′[1], σ((1 - 0.3) / 1.1449489742783179), atol = 1.0e-7) end let m = BatchNorm(2), x = param(reshape(1:6, 3, 2, 1)) From 91850a8baf37bc1961e15943100a98f86300844d Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Wed, 20 Jun 2018 18:46:42 +0530 Subject: [PATCH 13/57] Add missing path to curnn.jl --- src/cuda/cuda.jl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/cuda/cuda.jl b/src/cuda/cuda.jl index eaa3fe00..764bb96f 100644 --- a/src/cuda/cuda.jl +++ b/src/cuda/cuda.jl @@ -2,6 +2,9 @@ module CUDA using CuArrays -CuArrays.cudnn_available() && include("cudnn.jl") +if CuArrays.cudnn_available() + include("cudnn.jl") + include("curnn.jl") +end end From f29377123e8109a9ca20acda74f2fe0f44e49e05 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Fri, 22 Jun 2018 18:19:18 +0530 Subject: [PATCH 14/57] Add tests for CuDNN BatchNorm --- test/cuda/cuda.jl | 6 +++++- test/cuda/cudnn.jl | 52 ++++++---------------------------------------- test/cuda/curnn.jl | 46 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 47 deletions(-) create mode 100644 test/cuda/curnn.jl diff --git a/test/cuda/cuda.jl b/test/cuda/cuda.jl index d16ce8f2..159a12a2 100644 --- a/test/cuda/cuda.jl +++ b/test/cuda/cuda.jl @@ -32,4 +32,8 @@ cx = gpu(x) end -CuArrays.cudnn_available() && include("cudnn.jl") +if CuArrays.cudnn_available() + info("Testing Flux/CUDNN RNN") + include("cudnn.jl") + include("curnn.jl") +end diff --git a/test/cuda/cudnn.jl b/test/cuda/cudnn.jl index 91b5b972..4dbe116f 100644 --- a/test/cuda/cudnn.jl +++ b/test/cuda/cudnn.jl @@ -1,48 +1,8 @@ -using Flux, CuArrays, Base.Test +using Flux, Flux.Tracker, CuArrays, Base.Test +using Flux: gpu -info("Testing Flux/CUDNN") - -@testset "RNN" begin - @testset for R in [RNN, GRU, LSTM] - rnn = R(10, 5) - curnn = mapleaves(gpu, rnn) - @testset for batch_size in (1, 5) - Flux.reset!(rnn) - Flux.reset!(curnn) - x = batch_size == 1 ? - param(rand(10)) : - param(rand(10,batch_size)) - cux = gpu(x) - y = (rnn(x); rnn(x)) - cuy = (curnn(cux); curnn(cux)) - - @test y.data ≈ collect(cuy.data) - @test haskey(Flux.CUDA.descs, curnn.cell) - - Δ = randn(size(y)) - - Flux.back!(y, Δ) - Flux.back!(cuy, gpu(Δ)) - - @test x.grad ≈ collect(cux.grad) - @test rnn.cell.Wi.grad ≈ collect(curnn.cell.Wi.grad) - @test rnn.cell.Wh.grad ≈ collect(curnn.cell.Wh.grad) - @test rnn.cell.b.grad ≈ collect(curnn.cell.b.grad) - @test rnn.cell.h.grad ≈ collect(curnn.cell.h.grad) - if isdefined(rnn.cell, :c) - @test rnn.cell.c.grad ≈ collect(curnn.cell.c.grad) - end - - Flux.reset!(rnn) - Flux.reset!(curnn) - ohx = batch_size == 1 ? - Flux.onehot(rand(1:10), 1:10) : - Flux.onehotbatch(rand(1:10, batch_size), 1:10) - cuohx = gpu(ohx) - y = (rnn(ohx); rnn(ohx)) - cuy = (curnn(cuohx); curnn(cuohx)) - - @test y.data ≈ collect(cuy.data) - end - end +@testset "CUDNN BatchNorm" begin + x = gpu(rand(10, 10, 3, 1)) + m = gpu(BatchNorm(3)) + @test m(x) isa TrackedArray{Float32,4,CuArray{Float32,4}} end diff --git a/test/cuda/curnn.jl b/test/cuda/curnn.jl new file mode 100644 index 00000000..156b330d --- /dev/null +++ b/test/cuda/curnn.jl @@ -0,0 +1,46 @@ +using Flux, CuArrays, Base.Test + +@testset "RNN" begin + @testset for R in [RNN, GRU, LSTM] + rnn = R(10, 5) + curnn = mapleaves(gpu, rnn) + @testset for batch_size in (1, 5) + Flux.reset!(rnn) + Flux.reset!(curnn) + x = batch_size == 1 ? + param(rand(10)) : + param(rand(10,batch_size)) + cux = gpu(x) + y = (rnn(x); rnn(x)) + cuy = (curnn(cux); curnn(cux)) + + @test y.data ≈ collect(cuy.data) + @test haskey(Flux.CUDA.descs, curnn.cell) + + Δ = randn(size(y)) + + Flux.back!(y, Δ) + Flux.back!(cuy, gpu(Δ)) + + @test x.grad ≈ collect(cux.grad) + @test rnn.cell.Wi.grad ≈ collect(curnn.cell.Wi.grad) + @test rnn.cell.Wh.grad ≈ collect(curnn.cell.Wh.grad) + @test rnn.cell.b.grad ≈ collect(curnn.cell.b.grad) + @test rnn.cell.h.grad ≈ collect(curnn.cell.h.grad) + if isdefined(rnn.cell, :c) + @test rnn.cell.c.grad ≈ collect(curnn.cell.c.grad) + end + + Flux.reset!(rnn) + Flux.reset!(curnn) + ohx = batch_size == 1 ? + Flux.onehot(rand(1:10), 1:10) : + Flux.onehotbatch(rand(1:10, batch_size), 1:10) + cuohx = gpu(ohx) + y = (rnn(ohx); rnn(ohx)) + cuy = (curnn(cuohx); curnn(cuohx)) + + @test y.data ≈ collect(cuy.data) + end + end +end From 24ba1c4e6cb70b0200398866f03f9a0dc4d50b68 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Sat, 23 Jun 2018 11:02:41 +0530 Subject: [PATCH 15/57] Make changes as per the review --- src/cuda/cudnn.jl | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index 9948ef37..dd1775ad 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -27,15 +27,16 @@ const BATCHNORM_SPATIAL = 1 const BATCHNORM_ACTIVATION = 0 const BATCHNORM_MIN_EPS = 1e-5 -@inline _wsize(y) = ((1 for _=1:ndims(y)-2)..., size(y)[end-1], 1) -@inline _reddims(y) = ((i for i=1:ndims(y)-2)..., ndims(y)) +@inline _wsize(y) = (map(_ -> 1, size(y)[1:end-2])..., size(y)[end-1], 1) -mutable struct bncache +@inline _reddims(y) = (collect(1:ndims(y)-2)..., ndims(y)) + +mutable struct BNCache mean ivar end -bncache() = bncache(nothing, nothing) +BNCache() = BNCache(nothing, nothing) # CuDNN supports only 4D and 5D Tensors for BatchNorm Operations # so use the native julia code when doing batchnorm on a 2D Array @@ -56,7 +57,7 @@ function cudnnBNForward!(y::CuArray{T}, g::CuArray{T}, b::CuArray{T}, x::CuArray alpha = T(1), beta = T(0), eps = T(1e-5), training = true) where T<:Union{Float32, Float64} dims = _wsize(x) - if(eps < BATCHNORM_MIN_EPS) + if eps < BATCHNORM_MIN_EPS warn("eps ",eps," is too small for CuDNN so eps has been assigned the value ", BATCHNORM_MIN_EPS) eps = BATCHNORM_MIN_EPS end @@ -64,11 +65,11 @@ function cudnnBNForward!(y::CuArray{T}, g::CuArray{T}, b::CuArray{T}, x::CuArray yd = TensorDesc(y) gd = TensorDesc(T, (1,1,length(g),1)) - if(training) + if training - if(cache !== nothing) - mean = cu(zeros(T, dims...)) - ivar = cu(ones(T, dims...)) + if cache !== nothing + mean = zeros(CuArray{T}, dims...) + ivar = ones(CuArray{T}, dims...) else mean = C_NULL ivar = C_NULL @@ -90,7 +91,7 @@ function cudnnBNForward!(y::CuArray{T}, g::CuArray{T}, b::CuArray{T}, x::CuArray momentum, running_mean, running_var, eps, mean, ivar) - if(cache !== nothing) + if cache !== nothing cache.mean = mean cache.ivar = ivar end @@ -131,7 +132,7 @@ function cudnnBNBackward!(dg::CuArray{T}, g::CuArray{T}, db::CuArray{T}, momentum; cache = nothing, eps = T(1e-5), alpha = T(1), beta = T(0), dalpha = T(1), dbeta = T(0), training = true) where T<:Union{Float32, Float64} - if(training) + if training xd = TensorDesc(x) dyd = TensorDesc(dy) dxd = TensorDesc(dx) @@ -143,7 +144,7 @@ function cudnnBNBackward!(dg::CuArray{T}, g::CuArray{T}, db::CuArray{T}, mean, ivar = C_NULL, C_NULL end - if(eps < BATCHNORM_MIN_EPS) + if eps < BATCHNORM_MIN_EPS warn("eps ",eps," is too small for CuDNN so eps has been assigned the value ", BATCHNORM_MIN_EPS) eps = BATCHNORM_MIN_EPS end @@ -175,7 +176,8 @@ end # Flux Interface -import Flux.Tracker: track, back, @back, istracked +import ..Flux: Flux +import ..Tracker: track, back, @back, istracked, TrackedArray _batchnorm(g, b, x, running_mean, running_var, momentum, cache, alpha, beta, eps, training) = @@ -195,7 +197,7 @@ batchnorm(g::TrackedArray, b::TrackedArray, x::CuArray{T}, running_mean::CuArray function back(::typeof(_batchnorm), Δ, g, b, x, running_mean, running_var, momentum, cache, alpha, beta, eps, training) deriv_tup = ∇batchnorm(data(g), data(b), data(x), Δ, running_mean, running_var, momentum, cache = cache, alpha = alpha, beta = beta, eps = eps, training = training) - istracked(x) && @back(x, deriv_tup[1]) + @back(x, deriv_tup[1]) @back(b, deriv_tup[2]) @back(g, deriv_tup[3]) end From 9a168528de96105a9d6f98829ee1384b96daf911 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Sat, 23 Jun 2018 11:03:15 +0530 Subject: [PATCH 16/57] Add tests to make sure CPU and GPU versions have similar outputs --- test/cuda/cudnn.jl | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/test/cuda/cudnn.jl b/test/cuda/cudnn.jl index 4dbe116f..db4696c6 100644 --- a/test/cuda/cudnn.jl +++ b/test/cuda/cudnn.jl @@ -1,8 +1,24 @@ using Flux, Flux.Tracker, CuArrays, Base.Test +using Flux.Tracker: TrackedArray using Flux: gpu @testset "CUDNN BatchNorm" begin - x = gpu(rand(10, 10, 3, 1)) - m = gpu(BatchNorm(3)) - @test m(x) isa TrackedArray{Float32,4,CuArray{Float32,4}} + x = TrackedArray(rand(10, 10, 3, 1)) + m = BatchNorm(3) + cx = gpu(x) + cm = gpu(m) + + y = m(x) + cy = cm(cx) + + @test cy isa TrackedArray{Float32,4,CuArray{Float32,4}} + + @test cpu(cy) ≈ y + + Flux.back!(y, ones(y)) + Flux.back!(cy, ones(cy)) + + @test m.γ.grad ≈ cpu(cm.γ.grad) + @test m.β.grad ≈ cpu(cm.β.grad) + @test m.x.grad ≈ cpu(cm.x.grad) end From 4916c8e6da46d13078e4bfac6f10312a3fe44ce8 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Wed, 27 Jun 2018 14:54:49 +0530 Subject: [PATCH 17/57] Add treelike for now --- src/layers/normalise.jl | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/layers/normalise.jl b/src/layers/normalise.jl index 25832c07..e43c76b7 100644 --- a/src/layers/normalise.jl +++ b/src/layers/normalise.jl @@ -144,11 +144,13 @@ end (BN::BatchNorm)(x) = BN.λ.(batchnorm(BN.γ, BN.β, x, BN.μ, BN.σ, BN.momentum; cache = BN.cache, alpha = 1, beta = 0, eps = BN.ϵ, training = BN.active)) -children(BN::BatchNorm) = - (BN.λ, BN.β, BN.γ, BN.μ, BN.σ, BN.ϵ, BN.momentum, BN.active) +Flux.treelike(BatchNorm) -mapchildren(f, BN::BatchNorm) = # e.g. mapchildren(cu, BN) - BatchNorm(BN.λ, f(BN.β), f(BN.γ), f(BN.μ), f(BN.σ), BN.ϵ, BN.momentum, BN.active) +# children(BN::BatchNorm) = +# (BN.λ, BN.β, BN.γ, BN.μ, BN.σ, BN.ϵ, BN.momentum, BN.active) +# +# mapchildren(f, BN::BatchNorm) = # e.g. mapchildren(cu, BN) +# BatchNorm(BN.λ, f(BN.β), f(BN.γ), f(BN.μ), f(BN.σ), BN.ϵ, BN.momentum, BN.active) _testmode!(BN::BatchNorm, test) = (BN.active = !test) From 8f43258ab790aca0791c81725b0ba56d79ba47b3 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Thu, 28 Jun 2018 12:04:25 +0530 Subject: [PATCH 18/57] Get the batchnorm working without cache --- src/cuda/cudnn.jl | 19 +++++++++++++------ src/layers/normalise.jl | 40 +++++++++++++++++----------------------- 2 files changed, 30 insertions(+), 29 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index dd1775ad..088876e4 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -1,6 +1,6 @@ using CuArrays.CUDNN: @check, libcudnn, cudnnStatus_t, cudnnTensorDescriptor_t, cudnnBatchNormMode_t, cudnnHandle_t, libcudnn_handle, cudnnDataType, TensorDesc, FilterDesc -import Flux.data +import ..Flux: data mutable struct DropoutDesc ptr::Ptr{Void} @@ -63,7 +63,7 @@ function cudnnBNForward!(y::CuArray{T}, g::CuArray{T}, b::CuArray{T}, x::CuArray end xd = TensorDesc(x) yd = TensorDesc(y) - gd = TensorDesc(T, (1,1,length(g),1)) + gd = TensorDesc(T, dims) if training @@ -136,7 +136,7 @@ function cudnnBNBackward!(dg::CuArray{T}, g::CuArray{T}, db::CuArray{T}, xd = TensorDesc(x) dyd = TensorDesc(dy) dxd = TensorDesc(dx) - gd = TensorDesc(T, (1,1,length(g),1)) + gd = TensorDesc(T, _wsize(x)) if cache !== nothing mean, ivar = cache.mean, cache.ivar info("mean and ivar are fetched from the cache") @@ -167,9 +167,9 @@ function cudnnBNBackward!(dg::CuArray{T}, g::CuArray{T}, db::CuArray{T}, gd, g, dg, db, eps, mean, ivar) else - ivar = 1 ./ sqrt.(reshape(running_var, (1, 1, length(running_var), 1)) .+ eps) - dx .= dy .* reshape(g, (1, 1, length(g), 1)) .* ivar - dg .= squeeze(sum(dy .* (x .- reshape(running_mean, (1, 1, length(running_mean), 1))) .* ivar, _reddims(dy)), (1,2,4)) + ivar = 1 ./ sqrt.(reshape(running_var, _wsize(x)) .+ eps) + dx .= dy .* reshape(g, _wsize(x)) .* ivar + dg .= squeeze(sum(dy .* (x .- reshape(running_mean, _wsize(x))) .* ivar, _reddims(dy)), (1,2,4)) db .= squeeze(sum(dy, _reddims(dy)), (1,2,4)) end end @@ -179,6 +179,13 @@ end import ..Flux: Flux import ..Tracker: track, back, @back, istracked, TrackedArray +CuParam{T,N} = Union{CuArray{T,N},TrackedArray{T,N,CuArray{T,N}}} +CuParam45{T} = Union{CuParam{T,4},CuParam{T,5}} +CuBatchNorm{T} = Flux.BatchNorm{<:Union{typeof(identity),typeof(relu)},<:CuParam{T,1},<:CuParam{T,1},<:T} + +(BN::BatchNorm)(x::CuParam45{T}) = + batchnorm(BN.γ, BN.β, x, BN.μ, BN.σ, BN.momentum; cache = nothing, alpha = 1, beta = 0, eps = BN.ϵ, training = BN.active) + _batchnorm(g, b, x, running_mean, running_var, momentum, cache, alpha, beta, eps, training) = batchnorm(g, b, x, running_mean, running_var, momentum, cache = cache, alpha = alpha, beta = beta, eps = eps, training = training) diff --git a/src/layers/normalise.jl b/src/layers/normalise.jl index e43c76b7..04082a73 100644 --- a/src/layers/normalise.jl +++ b/src/layers/normalise.jl @@ -104,7 +104,6 @@ mutable struct BatchNorm{F,V,W,N} σ::W # moving std ϵ::N momentum::N - cache active::Bool end @@ -113,44 +112,39 @@ BatchNorm(chs::Integer, λ = identity; BatchNorm(λ, param(initβ(chs)), param(initγ(chs)), zeros(chs), ones(chs), ϵ, momentum, nothing, true) - -function batchnorm(γ, β, x, μ, σ, momentum; cache = nothing, alpha = 1, beta = 0, eps = 1.0e-5, training = true) - size(x, ndims(x)-1) == length(β) || +function (BN::BatchNorm)(x) + size(x, ndims(x)-1) == length(BN.β) || error("BatchNorm expected $(length(BN.β)) channels, got $(size(x, ndims(x)-1))") + γ, β = BN.γ, BN.β dims = length(size(x)) channels = size(x, dims-1) affine_shape = ones(Int, dims) affine_shape[end-1] = channels m = prod(size(x)[1:end-2]) * size(x)[end] - if !training - μ_curr = reshape(μ, affine_shape...) - σ_curr = reshape(σ, affine_shape...) + if !BN.active + μ = reshape(BN.μ, affine_shape...) + σ = reshape(BN.σ, affine_shape...) else T = eltype(x) - eps = Flux.data(convert(T, eps)) + ϵ = data(convert(T, BN.ϵ)) axes = [1:dims-2; dims] # axes to reduce along (all but channels axis) - μ_curr = mean(x, axes) - σ_curr = sqrt.(mean((x .- μ_curr).^2, axes) .+ eps) + μ = mean(x, axes) + σ = sqrt.(mean((x .- μ).^2, axes) .+ ϵ) # update moving mean/std - mtm = Flux.data(convert(T, momentum)) - μ .= (1 - mtm) .* μ .+ mtm .* squeeze(Flux.data(μ_curr), (axes...)) - σ .= (1 - mtm) .* σ .+ mtm .* squeeze(Flux.data(σ_curr), (axes...)) .* m ./ (m - 1) + mtm = data(convert(T, BN.momentum)) + BN.μ = (1 - mtm) .* BN.μ .+ mtm .* squeeze(data(μ), (axes...)) + BN.σ = (1 - mtm) .* BN.σ .+ mtm .* squeeze(data(σ), (axes...)) .* m ./ (m - 1) + end + + let λ = BN.λ + λ.(reshape(γ, affine_shape...) .* ((x .- μ) ./ σ) .+ reshape(β, affine_shape...)) end - reshape(γ, affine_shape...) .* ((x .- μ_curr) ./ σ_curr) .+ reshape(β, affine_shape...) end -(BN::BatchNorm)(x) = BN.λ.(batchnorm(BN.γ, BN.β, x, BN.μ, BN.σ, BN.momentum; cache = BN.cache, alpha = 1, beta = 0, eps = BN.ϵ, training = BN.active)) - -Flux.treelike(BatchNorm) - -# children(BN::BatchNorm) = -# (BN.λ, BN.β, BN.γ, BN.μ, BN.σ, BN.ϵ, BN.momentum, BN.active) -# -# mapchildren(f, BN::BatchNorm) = # e.g. mapchildren(cu, BN) -# BatchNorm(BN.λ, f(BN.β), f(BN.γ), f(BN.μ), f(BN.σ), BN.ϵ, BN.momentum, BN.active) +treelike(BatchNorm) _testmode!(BN::BatchNorm, test) = (BN.active = !test) From 681d8c4dfcafe311024425d85ab846d6ed89c251 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Thu, 28 Jun 2018 12:11:32 +0530 Subject: [PATCH 19/57] Remove cache --- src/layers/normalise.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/layers/normalise.jl b/src/layers/normalise.jl index 04082a73..8d2c3ffd 100644 --- a/src/layers/normalise.jl +++ b/src/layers/normalise.jl @@ -110,7 +110,7 @@ end BatchNorm(chs::Integer, λ = identity; initβ = zeros, initγ = ones, ϵ = 1e-5, momentum = .1) = BatchNorm(λ, param(initβ(chs)), param(initγ(chs)), - zeros(chs), ones(chs), ϵ, momentum, nothing, true) + zeros(chs), ones(chs), ϵ, momentum, true) function (BN::BatchNorm)(x) size(x, ndims(x)-1) == length(BN.β) || From 5ccde88ce61e777b125a3638c0621fd4a80c0031 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Thu, 28 Jun 2018 14:21:17 +0530 Subject: [PATCH 20/57] Minor fix for 5D support --- src/cuda/cuda.jl | 3 ++- src/cuda/cudnn.jl | 8 ++------ src/cuda/curnn.jl | 1 - 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/src/cuda/cuda.jl b/src/cuda/cuda.jl index 764bb96f..d0e14bf4 100644 --- a/src/cuda/cuda.jl +++ b/src/cuda/cuda.jl @@ -3,8 +3,9 @@ module CUDA using CuArrays if CuArrays.cudnn_available() - include("cudnn.jl") + CuParam{T,N} = Union{CuArray{T,N},TrackedArray{T,N,CuArray{T,N}}} include("curnn.jl") + include("cudnn.jl") end end diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index 088876e4..100f9f4b 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -41,7 +41,7 @@ BNCache() = BNCache(nothing, nothing) # CuDNN supports only 4D and 5D Tensors for BatchNorm Operations # so use the native julia code when doing batchnorm on a 2D Array -function batchnorm(g::CuArray{T}, b::CuArray{T}, x::CuArray{T, 4}, +function batchnorm(g::CuArray{T}, b::CuArray{T}, x::Union{CuArray{T, 4},CuArray{T,5}}, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; cache = nothing, alpha = T(1), beta = T(0), eps = T(1e-5), training = true) where T<:Union{Float32, Float64} @@ -179,11 +179,7 @@ end import ..Flux: Flux import ..Tracker: track, back, @back, istracked, TrackedArray -CuParam{T,N} = Union{CuArray{T,N},TrackedArray{T,N,CuArray{T,N}}} -CuParam45{T} = Union{CuParam{T,4},CuParam{T,5}} -CuBatchNorm{T} = Flux.BatchNorm{<:Union{typeof(identity),typeof(relu)},<:CuParam{T,1},<:CuParam{T,1},<:T} - -(BN::BatchNorm)(x::CuParam45{T}) = +(BN::BatchNorm)(x::Union{CuParam{T,4},CuParam{T,5}}) where T<:Union{Float32, Float64} = batchnorm(BN.γ, BN.β, x, BN.μ, BN.σ, BN.momentum; cache = nothing, alpha = 1, beta = 0, eps = BN.ϵ, training = BN.active) _batchnorm(g, b, x, running_mean, running_var, momentum, diff --git a/src/cuda/curnn.jl b/src/cuda/curnn.jl index 905b1ef4..94254f91 100644 --- a/src/cuda/curnn.jl +++ b/src/cuda/curnn.jl @@ -234,7 +234,6 @@ function copy_transpose!(dst::CuArray, src::CuArray) return dst end -CuParam{T,N} = Union{CuArray{T,N},TrackedArray{T,N,CuArray{T,N}}} CuRNN{T} = Flux.RNNCell{<:Union{typeof(tanh),typeof(relu)},<:CuParam{T,2},<:CuParam{T,1}} CuGRU{T} = Flux.GRUCell{<:CuParam{T,2},<:CuParam{T,1}} CuLSTM{T} = Flux.LSTMCell{<:CuParam{T,2},<:CuParam{T,1}} From 7ac9e191cbd2d9fb235d48bd023178c70778f7e5 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Thu, 28 Jun 2018 14:25:22 +0530 Subject: [PATCH 21/57] Revert 1 change --- src/cuda/cuda.jl | 1 - src/cuda/curnn.jl | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cuda/cuda.jl b/src/cuda/cuda.jl index d0e14bf4..f2b05aca 100644 --- a/src/cuda/cuda.jl +++ b/src/cuda/cuda.jl @@ -3,7 +3,6 @@ module CUDA using CuArrays if CuArrays.cudnn_available() - CuParam{T,N} = Union{CuArray{T,N},TrackedArray{T,N,CuArray{T,N}}} include("curnn.jl") include("cudnn.jl") end diff --git a/src/cuda/curnn.jl b/src/cuda/curnn.jl index 94254f91..905b1ef4 100644 --- a/src/cuda/curnn.jl +++ b/src/cuda/curnn.jl @@ -234,6 +234,7 @@ function copy_transpose!(dst::CuArray, src::CuArray) return dst end +CuParam{T,N} = Union{CuArray{T,N},TrackedArray{T,N,CuArray{T,N}}} CuRNN{T} = Flux.RNNCell{<:Union{typeof(tanh),typeof(relu)},<:CuParam{T,2},<:CuParam{T,1}} CuGRU{T} = Flux.GRUCell{<:CuParam{T,2},<:CuParam{T,1}} CuLSTM{T} = Flux.LSTMCell{<:CuParam{T,2},<:CuParam{T,1}} From d0b79e71e2c9dd0a99a0e545c49bcfdfd405654a Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Thu, 28 Jun 2018 14:27:50 +0530 Subject: [PATCH 22/57] fix load error --- src/cuda/cudnn.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index 100f9f4b..d5c2de09 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -179,7 +179,7 @@ end import ..Flux: Flux import ..Tracker: track, back, @back, istracked, TrackedArray -(BN::BatchNorm)(x::Union{CuParam{T,4},CuParam{T,5}}) where T<:Union{Float32, Float64} = +(BN::Flux.BatchNorm)(x::Union{CuParam{T,4},CuParam{T,5}}) where T<:Union{Float32, Float64} = batchnorm(BN.γ, BN.β, x, BN.μ, BN.σ, BN.momentum; cache = nothing, alpha = 1, beta = 0, eps = BN.ϵ, training = BN.active) _batchnorm(g, b, x, running_mean, running_var, momentum, From bcf094451c83e6b2ffbd91c009c2106c6d1d00db Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Thu, 28 Jun 2018 14:45:35 +0530 Subject: [PATCH 23/57] Fix typo --- src/cuda/cudnn.jl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index d5c2de09..6d15fa61 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -123,7 +123,7 @@ function ∇batchnorm(g::CuArray{T}, b::CuArray{T}, x::CuArray{T}, dy::CuArray{T dx = similar(x) cudnnBNBackward!(dg, g, db, dx, x, dy, running_mean, running_var, T(momentum), training = training, cache = cache, eps = eps, alpha = alpha, beta = beta) - (dx, db, dx) + (dx, db, dg) end function cudnnBNBackward!(dg::CuArray{T}, g::CuArray{T}, db::CuArray{T}, @@ -184,7 +184,8 @@ import ..Tracker: track, back, @back, istracked, TrackedArray _batchnorm(g, b, x, running_mean, running_var, momentum, cache, alpha, beta, eps, training) = - batchnorm(g, b, x, running_mean, running_var, momentum, cache = cache, alpha = alpha, beta = beta, eps = eps, training = training) + batchnorm(g, b, x, running_mean, running_var, momentum, cache = cache, + alpha = alpha, beta = beta, eps = eps, training = training) batchnorm(g::TrackedArray, b::TrackedArray, x::TrackedArray, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; From e3b10691d25d82503847f6dfc8d33058d77a6a6f Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Thu, 28 Jun 2018 15:27:59 +0530 Subject: [PATCH 24/57] make cache optional param --- src/cuda/cudnn.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index 6d15fa61..81c2bcb4 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -179,8 +179,8 @@ end import ..Flux: Flux import ..Tracker: track, back, @back, istracked, TrackedArray -(BN::Flux.BatchNorm)(x::Union{CuParam{T,4},CuParam{T,5}}) where T<:Union{Float32, Float64} = - batchnorm(BN.γ, BN.β, x, BN.μ, BN.σ, BN.momentum; cache = nothing, alpha = 1, beta = 0, eps = BN.ϵ, training = BN.active) +(BN::Flux.BatchNorm)(x::Union{CuParam{T,4},CuParam{T,5}}, cache = nothing) where T<:Union{Float32, Float64} = + batchnorm(BN.γ, BN.β, x, BN.μ, BN.σ, BN.momentum; cache = cache, alpha = 1, beta = 0, eps = BN.ϵ, training = BN.active) _batchnorm(g, b, x, running_mean, running_var, momentum, cache, alpha, beta, eps, training) = From b239fc684eccce02d49a5ad5b7ee38e1489e960c Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Wed, 4 Jul 2018 18:57:43 +0530 Subject: [PATCH 25/57] Update tests --- test/cuda/cuda.jl | 3 ++- test/cuda/cudnn.jl | 12 ++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/test/cuda/cuda.jl b/test/cuda/cuda.jl index 159a12a2..5c9ff964 100644 --- a/test/cuda/cuda.jl +++ b/test/cuda/cuda.jl @@ -33,7 +33,8 @@ cx = gpu(x) end if CuArrays.cudnn_available() - info("Testing Flux/CUDNN RNN") + info("Testing Flux/CUDNN BatchNorm") include("cudnn.jl") + info("Testing Flux/CUDNN RNN") include("curnn.jl") end diff --git a/test/cuda/cudnn.jl b/test/cuda/cudnn.jl index db4696c6..c2a70f9f 100644 --- a/test/cuda/cudnn.jl +++ b/test/cuda/cudnn.jl @@ -1,6 +1,5 @@ using Flux, Flux.Tracker, CuArrays, Base.Test -using Flux.Tracker: TrackedArray -using Flux: gpu +using Flux.Tracker: TrackedArray, data @testset "CUDNN BatchNorm" begin x = TrackedArray(rand(10, 10, 3, 1)) @@ -13,12 +12,13 @@ using Flux: gpu @test cy isa TrackedArray{Float32,4,CuArray{Float32,4}} - @test cpu(cy) ≈ y + @test cpu(data(cy)) ≈ data(y) - Flux.back!(y, ones(y)) - Flux.back!(cy, ones(cy)) + g = ones(size(y)...) + Flux.back!(y, g) + Flux.back!(cy, gpu(g))) @test m.γ.grad ≈ cpu(cm.γ.grad) @test m.β.grad ≈ cpu(cm.β.grad) - @test m.x.grad ≈ cpu(cm.x.grad) + @test x.grad ≈ cpu(x.grad) end From 185e9148b6f4f3338e2af2e61cb001a2015ea92f Mon Sep 17 00:00:00 2001 From: CarloLucibello Date: Sun, 15 Jul 2018 17:49:41 +0200 Subject: [PATCH 26/57] fix cpu batchnorm --- src/layers/normalise.jl | 54 +++++++++++++++++++----------------- test/layers/normalisation.jl | 23 ++++++++------- 2 files changed, 39 insertions(+), 38 deletions(-) diff --git a/src/layers/normalise.jl b/src/layers/normalise.jl index 54f5eb56..0d4296a6 100644 --- a/src/layers/normalise.jl +++ b/src/layers/normalise.jl @@ -96,56 +96,58 @@ m = Chain( softmax) ``` """ -mutable struct BatchNorm{F,V,W,N} - λ::F # activation function - β::V # bias - γ::V # scale - μ::W # moving mean - σ::W # moving std - ϵ::N - momentum::N +mutable struct BatchNorm + λ # activation function + β # bias + γ # scale + μ # moving mean + σ² # moving var + ϵ + momentum active::Bool end -BatchNorm(chs::Integer, λ = identity; - initβ = zeros, initγ = ones, ϵ = 1e-8, momentum = .1) = +function BatchNorm(chs::Integer, λ = identity; + initβ = x->zeros(Float32,x), + initγ = x->ones(Float32,x), + ϵ = 1f-8, + momentum = 0.1f0) BatchNorm(λ, param(initβ(chs)), param(initγ(chs)), - zeros(chs), ones(chs), ϵ, momentum, true) + zeros(Float32, chs), ones(Float32, chs), ϵ, momentum, true) +end function (BN::BatchNorm)(x) size(x, ndims(x)-1) == length(BN.β) || error("BatchNorm expected $(length(BN.β)) channels, got $(size(x, ndims(x)-1))") γ, β = BN.γ, BN.β - dims = length(size(x)) - channels = size(x, dims-1) + dims = ndims(x) affine_shape = ones(Int, dims) - affine_shape[end-1] = channels - m = prod(size(x)[1:end-2]) * size(x)[end] + affine_shape[end-1] = size(x, dims-1) + T = eltype(x) if !BN.active μ = reshape(BN.μ, affine_shape...) - σ = reshape(BN.σ, affine_shape...) + σ² = reshape(BN.σ², affine_shape...) else - T = eltype(x) - ϵ = data(convert(T, BN.ϵ)) axes = [1:dims-2; dims] # axes to reduce along (all but channels axis) + m = prod(size(x, axes...)) μ = mean(x, axes) - σ = sqrt.(mean((x .- μ).^2, axes) .+ ϵ) + σ² = sum((x.-μ).^2, axes) ./ m # update moving mean/std - mtm = data(convert(T, BN.momentum)) - BN.μ = (1 - mtm) .* BN.μ .+ mtm .* squeeze(data(μ), (axes...)) - BN.σ = (1 - mtm) .* BN.σ .+ mtm .* squeeze(data(σ), (axes...)) .* m ./ (m - 1) + mtm = convert(T, BN.momentum) + + BN.μ = ((1 - mtm) .* BN.μ .+ mtm .* squeeze(data(μ), (axes...))) |> data + BN.σ² = ((1 - mtm) .* BN.σ² .+ mtm .* squeeze(data(σ²), (axes...))*m/(m-1)) |> data end - let λ = BN.λ - λ.(reshape(γ, affine_shape...) .* ((x .- μ) ./ σ) .+ reshape(β, affine_shape...)) - end + ϵ = convert(T, BN.ϵ) + BN.λ.(reshape(γ, affine_shape...) .* ((x .- μ) ./ sqrt.(σ² .+ ϵ)) .+ reshape(β, affine_shape...)) end children(BN::BatchNorm) = - (BN.λ, BN.β, BN.γ, BN.μ, BN.σ, BN.ϵ, BN.momentum, BN.active) + (BN.λ, BN.β, BN.γ, BN.μ, BN.σ², BN.ϵ, BN.momentum, BN.active) mapchildren(f, BN::BatchNorm) = # e.g. mapchildren(cu, BN) BatchNorm(BN.λ, f(BN.β), f(BN.γ), f(BN.μ), f(BN.σ), BN.ϵ, BN.momentum, BN.active) diff --git a/test/layers/normalisation.jl b/test/layers/normalisation.jl index 0fdb1021..587686e8 100644 --- a/test/layers/normalisation.jl +++ b/test/layers/normalisation.jl @@ -1,4 +1,5 @@ using Flux: testmode! +using Flux.Tracker: data @testset "Dropout" begin x = [1.,2.,3.] @@ -28,7 +29,8 @@ using Flux: testmode! end @testset "BatchNorm" begin - let m = BatchNorm(2), x = param([1 2; 3 4; 5 6]') + let m = BatchNorm(2), x = param([1 3 5; + 2 4 6]) @test m.β.data == [0, 0] # initβ(2) @test m.γ.data == [1, 1] # initγ(2) @@ -53,29 +55,26 @@ end # .1 * 4 + 0 = .4 @test m.μ ≈ reshape([0.3, 0.4], 2, 1) - # julia> .1 .* std(x, 2, corrected=false) .* (3 / 2).+ .9 .* [1., 1.] - # 2×1 Array{Float64,2}: - # 1.14495 - # 1.14495 - @test m.σ ≈ .1 .* std(x.data, 2, corrected=false) .* (3 / 2).+ .9 .* [1., 1.] + @test m.σ² ≈ 0.1 .* var(x.data, 2, corrected=false)*3/2 + 0.9 .* [1., 1.] testmode!(m) @test !m.active - x′ = m(x).data - @test x′[1] ≈ (1 - 0.3) / 1.1449489742783179 + y = m(x).data + @test y ≈ data((x .- m.μ) ./ sqrt.(m.σ² .+ m.ϵ)) end # with activation function - let m = BatchNorm(2, σ), x = param([1 2; 3 4; 5 6]') + let m = BatchNorm(2, sigmoid), x = param([1 3 5; + 2 4 6]) @test m.active m(x) testmode!(m) @test !m.active - x′ = m(x).data - @test x′[1] ≈ σ((1 - 0.3) / 1.1449489742783179) + y = m(x).data + @test y ≈ data(sigmoid.((x .- m.μ) ./ sqrt.(m.σ² .+ m.ϵ))) end let m = BatchNorm(2), x = param(reshape(1:6, 3, 2, 1)) @@ -85,7 +84,7 @@ end end let m = BatchNorm(2), x = param(reshape(1:12, 2, 3, 2, 1)) - y = reshape(permutedims(x, [3, 1, 2, 4]), 2, :) + y = reshape(permutedims(x, [3, 1, 2, 4]), 2, :) y = permutedims(reshape(m(y), 2, 2, 3, 1), [2, 3, 1, 4]) @test m(x) == y end From 071dcdda879a74cfd3c1115ac2c92087b38d4ae9 Mon Sep 17 00:00:00 2001 From: CarloLucibello Date: Mon, 16 Jul 2018 07:32:13 +0200 Subject: [PATCH 27/57] update docs --- src/layers/normalise.jl | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/layers/normalise.jl b/src/layers/normalise.jl index 0d4296a6..1a40382b 100644 --- a/src/layers/normalise.jl +++ b/src/layers/normalise.jl @@ -1,6 +1,5 @@ """ - testmode!(m) - testmode!(m, false) + testmode!(m, val=true) Put layers like [`Dropout`](@ref) and [`BatchNorm`](@ref) into testing mode (or back to training mode with `false`). @@ -94,7 +93,11 @@ m = Chain( Dense(64, 10), BatchNorm(10), softmax) + +y = m(rand(28^2, 10)) ``` + +To use the layer at test time set [`testmode!(m, true)`](@ref). """ mutable struct BatchNorm λ # activation function From 0bb3eaa1f6cde340907532984764ee55191b6dbe Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Tue, 17 Jul 2018 09:40:20 +0530 Subject: [PATCH 28/57] Update CUDNN Batchnorm with new Flux AD --- src/cuda/cudnn.jl | 90 +++++------------------------------------------ src/cuda/curnn.jl | 82 ++++++++++++++---------------------------- 2 files changed, 36 insertions(+), 136 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index 92debbdd..20130b1d 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -123,7 +123,7 @@ function ∇batchnorm(g::CuArray{T}, b::CuArray{T}, x::CuArray{T}, dy::CuArray{T dx = similar(x) cudnnBNBackward!(dg, g, db, dx, x, dy, running_mean, running_var, T(momentum), training = training, cache = cache, eps = eps, alpha = alpha, beta = beta) - (dx, db, dg) + (dg, db, dx) end function cudnnBNBackward!(dg::CuArray{T}, g::CuArray{T}, db::CuArray{T}, @@ -176,94 +176,22 @@ end # Flux Interface -<<<<<<< HEAD import ..Flux: Flux import ..Tracker: track, back, @back, istracked, TrackedArray (BN::Flux.BatchNorm)(x::Union{CuParam{T,4},CuParam{T,5}}, cache = nothing) where T<:Union{Float32, Float64} = - batchnorm(BN.γ, BN.β, x, BN.μ, BN.σ, BN.momentum; cache = cache, alpha = 1, beta = 0, eps = BN.ϵ, training = BN.active) -======= -function desc(rnn) - d = haskey(descs, rnn) ? descs[rnn] : (descs[rnn] = RNNDesc(rnn)) - copyparams!(rnn, d) - return d -end + batchnorm(BN.γ, BN.β, x, BN.μ, BN.σ², BN.momentum; cache = cache, alpha = 1, beta = 0, eps = BN.ϵ, training = BN.active) -import Flux.Tracker -import Flux.Tracker: data, istracked, track, unbroadcast, @grad, nobacksies - -istrain(m::CuRNNs, args...) = any(x -> x isa TrackedArray, (m.Wi, m.Wh, m.b, args...)) - -function (m::CuRNN{T})(h::CuParam{T}, x::CuParam{T}) where T <: Union{Float32,Float64} - result = istrain(m, h, x) ? - track(m, x, h, m.Wi, m.Wh, m.b) : - forward(desc(m), x, h) - return result[2], result[1] -end - -function (m::CuGRU{T})(h::CuParam{T}, x::CuParam{T}) where T <: Union{Float32,Float64} - result = istrain(m, h, x) ? - track(m, x, h, m.Wi, m.Wh, m.b) : - forward(desc(m), x, h) - return result[2], result[1] -end - -function (m::CuLSTM{T})(h::NTuple{2,CuParam{T}}, x::CuParam{T}) where T <: Union{Float32,Float64} - result = istrain(m, h, x) ? - track(m, x, h[1], h[2], m.Wi, m.Wh, m.b) : - forward(desc(m), x, h[1], h[2]) - return (result[2], result[3]), result[1] -end ->>>>>>> 071dcdda879a74cfd3c1115ac2c92087b38d4ae9 - -_batchnorm(g, b, x, running_mean, running_var, momentum, - cache, alpha, beta, eps, training) = - batchnorm(g, b, x, running_mean, running_var, momentum, cache = cache, - alpha = alpha, beta = beta, eps = eps, training = training) - -<<<<<<< HEAD batchnorm(g::TrackedArray, b::TrackedArray, x::TrackedArray, running_mean::CuArray{T}, - running_var::CuArray{T}, momentum; - cache = nothing, alpha = T(1), beta = T(0), - eps = T(1e-5), training = true) where T<:Union{Float32, Float64} = - track(_batchnorm, g, b, x, running_mean, running_var, momentum, cache, alpha, beta, eps, training) + running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = + track(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) batchnorm(g::TrackedArray, b::TrackedArray, x::CuArray{T}, running_mean::CuArray{T}, - running_var::CuArray{T}, momentum; cache = nothing, alpha = T(1), beta = T(0), - eps = T(1e-5), training = true) where T<:Union{Float32, Float64} = - track(_batchnorm, g, b, x, running_mean, running_var, momentum, cache, alpha, beta, eps, training) + running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = + track(_batchnorm, g, b, x, running_mean, running_var, momentum, kw...) -function back(::typeof(_batchnorm), Δ, g, b, x, running_mean, running_var, momentum, cache, alpha, beta, eps, training) +@grad function batchnorm(g, b, x, running_mean, running_var, momentum; kw...) + y = batchnorm(data(g), data(b), data(x), running_mean, running_var, momentum; kw...) deriv_tup = ∇batchnorm(data(g), data(b), data(x), Δ, running_mean, running_var, momentum, cache = cache, alpha = alpha, beta = beta, eps = eps, training = training) - @back(x, deriv_tup[1]) - @back(b, deriv_tup[2]) - @back(g, deriv_tup[3]) -======= -@grad function (m::Union{CuRNN,CuGRU})(x, h, Wi, Wh, b) - reserve, result = forwardTrain(desc(m), data(x), data(h)) - result, function (Δ) - y, ho = result - dy, dho = Δ - h_ = hBatch(x, data(h)) - dx, dh = backwardData(descs[m], y, dy, dho, h_, reserve) - (dWi, dWh), db = backwardWeights(descs[m], data(x), h_, y, reserve) - nobacksies(:RNN, (dx, unbroadcast(size(h), dh), dWi.', dWh.', db)) - end -end - -@grad function (m::CuLSTM)(x, h, c, Wi, Wh, b) - reserve, result = forwardTrain(desc(m), data.((x, h, c))...) - result, function (Δ) - y, ho = result - dy, dho, dco = Δ - h_ = hBatch(x, data(h)) - c_ = hBatch(x, data(c)) - dx, dh, dc = backwardData(descs[m], y, dy, dho, dco, h_, c_, reserve) - (dWi, dWh), db = backwardWeights(descs[m], data(x), h_, y, reserve) - nobacksies(:RNN, - (dx, unbroadcast(size(h), dh), unbroadcast(size(c), dc), - dWi.', dWh.', db)) - end ->>>>>>> 071dcdda879a74cfd3c1115ac2c92087b38d4ae9 -end + y, Δ -> (nobacksies(:batchnorm, ∇batchnorm(data.(g, b, x, Δ), running_mean, running_var, momentum; kw...)), nothing, nothing, nothing) diff --git a/src/cuda/curnn.jl b/src/cuda/curnn.jl index 905b1ef4..ed65f5e7 100644 --- a/src/cuda/curnn.jl +++ b/src/cuda/curnn.jl @@ -265,41 +265,28 @@ function desc(rnn) return d end -import Flux.Tracker: data, isleaf, istracked, track, back_, @back, unbroadcast - -mutable struct RNNCall{R} - rnn::R - reserve::CuVector{UInt8} - RNNCall{R}(rnn::R) where R = new(rnn) -end - -RNNCall(rnn) = RNNCall{typeof(rnn)}(rnn) - -function (c::RNNCall)(args...) - rs, result = forwardTrain(desc(c.rnn), args...) - c.reserve = rs - return result -end +import Flux.Tracker +import Flux.Tracker: data, istracked, track, unbroadcast, @grad, nobacksies istrain(m::CuRNNs, args...) = any(x -> x isa TrackedArray, (m.Wi, m.Wh, m.b, args...)) function (m::CuRNN{T})(h::CuParam{T}, x::CuParam{T}) where T <: Union{Float32,Float64} result = istrain(m, h, x) ? - track(RNNCall(m), x, h) : + track(m, x, h, m.Wi, m.Wh, m.b) : forward(desc(m), x, h) return result[2], result[1] end function (m::CuGRU{T})(h::CuParam{T}, x::CuParam{T}) where T <: Union{Float32,Float64} result = istrain(m, h, x) ? - track(RNNCall(m), x, h) : + track(m, x, h, m.Wi, m.Wh, m.b) : forward(desc(m), x, h) return result[2], result[1] end function (m::CuLSTM{T})(h::NTuple{2,CuParam{T}}, x::CuParam{T}) where T <: Union{Float32,Float64} result = istrain(m, h, x) ? - track(RNNCall(m), x, h[1], h[2]) : + track(m, x, h[1], h[2], m.Wi, m.Wh, m.b) : forward(desc(m), x, h[1], h[2]) return (result[2], result[3]), result[1] end @@ -308,44 +295,29 @@ end (m::CuGRU{T})(h::CuParam{T}, x) where T <: Union{Float32,Float64} = m(h, CuArray{T}(x)) (m::CuLSTM{T})(h::NTuple{2,CuParam{T}}, x) where T <: Union{Float32,Float64} = m(h, CuArray{T}(x)) -function accum_transpose!(dst::CuArray, src::CuArray) - function kernel(dst, src) - I = @cuindex dst - dst[I...] += src[reverse(I)...] - return +@grad function (m::Union{CuRNN,CuGRU})(x, h, Wi, Wh, b) + reserve, result = forwardTrain(desc(m), data(x), data(h)) + result, function (Δ) + y, ho = result + dy, dho = Δ + h_ = hBatch(x, data(h)) + dx, dh = backwardData(descs[m], y, dy, dho, h_, reserve) + (dWi, dWh), db = backwardWeights(descs[m], data(x), h_, y, reserve) + nobacksies(:RNN, (dx, unbroadcast(size(h), dh), dWi.', dWh.', db)) end - blk, thr = cudims(dst) - @cuda (blk, thr) kernel(dst, src) - return dst end -function back_(m::RNNCall{<:Union{CuRNN,CuGRU}}, y_, Δ, x, h) - y, ho = y_ - dy, dho = Δ - h_ = hBatch(x, data(h)) - dx, dh = backwardData(descs[m.rnn], y, dy, dho, h_, m.reserve) - @back(x, dx) - @back(h, unbroadcast(h, dh)) - (dWi, dWh), db = backwardWeights(descs[m.rnn], data(x), h_, y, m.reserve) - # We don't have to make this assumption, it's just slightly more complex. - @assert all(isleaf.((m.rnn.Wi, m.rnn.Wh, m.rnn.b))) - istracked(m.rnn.Wi) && accum_transpose!(m.rnn.Wi.grad, dWi) - istracked(m.rnn.Wh) && accum_transpose!(m.rnn.Wh.grad, dWh) - istracked(m.rnn.b) && accum_transpose!(m.rnn.b.grad, db) -end - -function back_(m::RNNCall{<:CuLSTM}, y_, Δ, x, h, c) - y, ho, co = y_ - dy, dho, dco = Δ - h_ = hBatch(x, data(h)) - c_ = hBatch(x, data(c)) - dx, dh, dc = backwardData(descs[m.rnn], y, dy, dho, dco, h_, c_, m.reserve) - @back(x, dx) - @back(h, unbroadcast(h, dh)) - @back(c, unbroadcast(h, dc)) - (dWi, dWh), db = backwardWeights(descs[m.rnn], data(x), h_, y, m.reserve) - @assert all(isleaf.((m.rnn.Wi, m.rnn.Wh, m.rnn.b))) - istracked(m.rnn.Wi) && accum_transpose!(m.rnn.Wi.grad, dWi) - istracked(m.rnn.Wh) && accum_transpose!(m.rnn.Wh.grad, dWh) - istracked(m.rnn.b) && accum_transpose!(m.rnn.b.grad, db) +@grad function (m::CuLSTM)(x, h, c, Wi, Wh, b) + reserve, result = forwardTrain(desc(m), data.((x, h, c))...) + result, function (Δ) + y, ho = result + dy, dho, dco = Δ + h_ = hBatch(x, data(h)) + c_ = hBatch(x, data(c)) + dx, dh, dc = backwardData(descs[m], y, dy, dho, dco, h_, c_, reserve) + (dWi, dWh), db = backwardWeights(descs[m], data(x), h_, y, reserve) + nobacksies(:RNN, + (dx, unbroadcast(size(h), dh), unbroadcast(size(c), dc), + dWi.', dWh.', db)) + end end From da7fe93b313316a6bcb7c4b6a38de4f1067bbfa6 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Tue, 17 Jul 2018 09:47:45 +0530 Subject: [PATCH 29/57] Fix test --- test/layers/normalisation.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/layers/normalisation.jl b/test/layers/normalisation.jl index 21e040ec..10b2b37d 100644 --- a/test/layers/normalisation.jl +++ b/test/layers/normalisation.jl @@ -59,7 +59,7 @@ end # 2×1 Array{Float64,2}: # 1.14495 # 1.14495 - @test isapprox(m.σ², .1 .* std(x.data, 2, corrected=false) .* (3 / 2).+ .9 .* [1., 1.], atol = 1.0e-6) + @test m.σ² ≈ 0.1 .* var(x.data, 2, corrected=false)*3/2 + 0.9 .* [1., 1.] testmode!(m) @test !m.active From 8874d9cccd1e203e2b59694e782a916ea039f19f Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Tue, 17 Jul 2018 09:53:39 +0530 Subject: [PATCH 30/57] Fix GPU test --- test/cuda/cudnn.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/cuda/cudnn.jl b/test/cuda/cudnn.jl index c2a70f9f..722cbc5f 100644 --- a/test/cuda/cudnn.jl +++ b/test/cuda/cudnn.jl @@ -16,7 +16,7 @@ using Flux.Tracker: TrackedArray, data g = ones(size(y)...) Flux.back!(y, g) - Flux.back!(cy, gpu(g))) + Flux.back!(cy, gpu(g)) @test m.γ.grad ≈ cpu(cm.γ.grad) @test m.β.grad ≈ cpu(cm.β.grad) From 4035641f00f1d1acfc7bfeb313494f21acaf5623 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Tue, 17 Jul 2018 10:06:26 +0530 Subject: [PATCH 31/57] Remove imports --- src/cuda/cudnn.jl | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index 20130b1d..6b94a6a9 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -176,9 +176,6 @@ end # Flux Interface -import ..Flux: Flux -import ..Tracker: track, back, @back, istracked, TrackedArray - (BN::Flux.BatchNorm)(x::Union{CuParam{T,4},CuParam{T,5}}, cache = nothing) where T<:Union{Float32, Float64} = batchnorm(BN.γ, BN.β, x, BN.μ, BN.σ², BN.momentum; cache = cache, alpha = 1, beta = 0, eps = BN.ϵ, training = BN.active) @@ -188,10 +185,11 @@ batchnorm(g::TrackedArray, b::TrackedArray, x::TrackedArray, running_mean::CuArr batchnorm(g::TrackedArray, b::TrackedArray, x::CuArray{T}, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = - track(_batchnorm, g, b, x, running_mean, running_var, momentum, kw...) + track(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) @grad function batchnorm(g, b, x, running_mean, running_var, momentum; kw...) y = batchnorm(data(g), data(b), data(x), running_mean, running_var, momentum; kw...) deriv_tup = ∇batchnorm(data(g), data(b), data(x), Δ, running_mean, running_var, momentum, cache = cache, alpha = alpha, beta = beta, eps = eps, training = training) y, Δ -> (nobacksies(:batchnorm, ∇batchnorm(data.(g, b, x, Δ), running_mean, running_var, momentum; kw...)), nothing, nothing, nothing) +end From 531ecccd3860bc9f9793471ccd1b0a7987c4422d Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Tue, 17 Jul 2018 10:14:23 +0530 Subject: [PATCH 32/57] Error statement --- src/cuda/cudnn.jl | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index 6b94a6a9..6f1d8b9e 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -189,7 +189,5 @@ batchnorm(g::TrackedArray, b::TrackedArray, x::CuArray{T}, running_mean::CuArray @grad function batchnorm(g, b, x, running_mean, running_var, momentum; kw...) y = batchnorm(data(g), data(b), data(x), running_mean, running_var, momentum; kw...) - deriv_tup = ∇batchnorm(data(g), data(b), data(x), Δ, running_mean, running_var, momentum, - cache = cache, alpha = alpha, beta = beta, eps = eps, training = training) y, Δ -> (nobacksies(:batchnorm, ∇batchnorm(data.(g, b, x, Δ), running_mean, running_var, momentum; kw...)), nothing, nothing, nothing) end From 7dd5ec16c9ff64ec266811e95b3dac36ffa9dd52 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Tue, 17 Jul 2018 11:22:12 +0530 Subject: [PATCH 33/57] Fix --- src/cuda/cudnn.jl | 6 ++---- src/layers/normalise.jl | 3 ++- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index 6f1d8b9e..abcd6737 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -187,7 +187,5 @@ batchnorm(g::TrackedArray, b::TrackedArray, x::CuArray{T}, running_mean::CuArray running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = track(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) -@grad function batchnorm(g, b, x, running_mean, running_var, momentum; kw...) - y = batchnorm(data(g), data(b), data(x), running_mean, running_var, momentum; kw...) - y, Δ -> (nobacksies(:batchnorm, ∇batchnorm(data.(g, b, x, Δ), running_mean, running_var, momentum; kw...)), nothing, nothing, nothing) -end +@grad batchnorm(g, b, x, running_mean, running_var, momentum; kw...) = + batchnorm(data.((g, b, x))..., running_mean, running_var, momentum; kw...), Δ -> (nobacksies(:batchnorm, ∇batchnorm(data.((g, b, x, Δ))..., running_mean, running_var, momentum; kw...))..., nothing, nothing, nothing) diff --git a/src/layers/normalise.jl b/src/layers/normalise.jl index 40edaec6..44754815 100644 --- a/src/layers/normalise.jl +++ b/src/layers/normalise.jl @@ -110,10 +110,11 @@ mutable struct BatchNorm active::Bool end +# NOTE: Keeping the ϵ smaller than 1e-5 is not supported by CUDNN function BatchNorm(chs::Integer, λ = identity; initβ = x->zeros(Float32,x), initγ = x->ones(Float32,x), - ϵ = 1f-8, + ϵ = 1f-5, momentum = 0.1f0) BatchNorm(λ, param(initβ(chs)), param(initγ(chs)), zeros(Float32, chs), ones(Float32, chs), ϵ, momentum, true) From 2cc0f112f150d6857f0f799ae4c50fdb9d770d17 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Fri, 27 Jul 2018 20:12:49 +0530 Subject: [PATCH 34/57] Updates --- src/cuda/cudnn.jl | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index abcd6737..6e2c9e75 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -58,7 +58,7 @@ function cudnnBNForward!(y::CuArray{T}, g::CuArray{T}, b::CuArray{T}, x::CuArray eps = T(1e-5), training = true) where T<:Union{Float32, Float64} dims = _wsize(x) if eps < BATCHNORM_MIN_EPS - warn("eps ",eps," is too small for CuDNN so eps has been assigned the value ", BATCHNORM_MIN_EPS) + # warn("eps ",eps," is too small for CuDNN so eps has been assigned the value ", BATCHNORM_MIN_EPS) eps = BATCHNORM_MIN_EPS end xd = TensorDesc(x) @@ -145,7 +145,7 @@ function cudnnBNBackward!(dg::CuArray{T}, g::CuArray{T}, db::CuArray{T}, end if eps < BATCHNORM_MIN_EPS - warn("eps ",eps," is too small for CuDNN so eps has been assigned the value ", BATCHNORM_MIN_EPS) + # warn("eps ",eps," is too small for CuDNN so eps has been assigned the value ", BATCHNORM_MIN_EPS) eps = BATCHNORM_MIN_EPS end @@ -187,5 +187,25 @@ batchnorm(g::TrackedArray, b::TrackedArray, x::CuArray{T}, running_mean::CuArray running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = track(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) +batchnorm(g::TrackedArray, b::CuArray{T}, x::TrackedArray, running_mean::CuArray{T}, + running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = + track(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) + +batchnorm(g::CuArray{T}, b::TrackedArray, x::CuArray{T}, running_mean::CuArray{T}, + running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = + track(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) + +batchnorm(g::CuArray{T}, b::TrackedArray, x::TrackedArray, running_mean::CuArray{T}, + running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = + track(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) + +batchnorm(g::TrackedArray, b::CuArray{T}, x::CuArray{T}, running_mean::CuArray{T}, + running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = + track(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) + +batchnorm(g::CuArray{T}, b::CuArray{T}, x::TrackedArray, running_mean::CuArray{T}, + running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = + track(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) + @grad batchnorm(g, b, x, running_mean, running_var, momentum; kw...) = batchnorm(data.((g, b, x))..., running_mean, running_var, momentum; kw...), Δ -> (nobacksies(:batchnorm, ∇batchnorm(data.((g, b, x, Δ))..., running_mean, running_var, momentum; kw...))..., nothing, nothing, nothing) From 6a41f823c89ac238e9bcc2a2c08497eb41efa825 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Fri, 3 Aug 2018 19:06:05 +0530 Subject: [PATCH 35/57] Update track function --- src/cuda/cudnn.jl | 14 +++++++------- src/cuda/curnn.jl | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index 6e2c9e75..302da233 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -181,31 +181,31 @@ end batchnorm(g::TrackedArray, b::TrackedArray, x::TrackedArray, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = - track(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) + track_kw(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) batchnorm(g::TrackedArray, b::TrackedArray, x::CuArray{T}, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = - track(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) + track_kw(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) batchnorm(g::TrackedArray, b::CuArray{T}, x::TrackedArray, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = - track(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) + track_kw(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) batchnorm(g::CuArray{T}, b::TrackedArray, x::CuArray{T}, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = - track(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) + track_kw(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) batchnorm(g::CuArray{T}, b::TrackedArray, x::TrackedArray, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = - track(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) + track_kw(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) batchnorm(g::TrackedArray, b::CuArray{T}, x::CuArray{T}, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = - track(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) + track_kw(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) batchnorm(g::CuArray{T}, b::CuArray{T}, x::TrackedArray, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = - track(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) + track_kw(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) @grad batchnorm(g, b, x, running_mean, running_var, momentum; kw...) = batchnorm(data.((g, b, x))..., running_mean, running_var, momentum; kw...), Δ -> (nobacksies(:batchnorm, ∇batchnorm(data.((g, b, x, Δ))..., running_mean, running_var, momentum; kw...))..., nothing, nothing, nothing) diff --git a/src/cuda/curnn.jl b/src/cuda/curnn.jl index ed65f5e7..f58e3b05 100644 --- a/src/cuda/curnn.jl +++ b/src/cuda/curnn.jl @@ -266,7 +266,7 @@ function desc(rnn) end import Flux.Tracker -import Flux.Tracker: data, istracked, track, unbroadcast, @grad, nobacksies +import Flux.Tracker: data, istracked, track, unbroadcast, @grad, nobacksies, track_kw istrain(m::CuRNNs, args...) = any(x -> x isa TrackedArray, (m.Wi, m.Wh, m.b, args...)) From 3f6c0655230aa29e35eb0af7843dc11914916697 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Fri, 3 Aug 2018 19:32:21 +0530 Subject: [PATCH 36/57] Update test --- test/cuda/cudnn.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/cuda/cudnn.jl b/test/cuda/cudnn.jl index 722cbc5f..3a02ed79 100644 --- a/test/cuda/cudnn.jl +++ b/test/cuda/cudnn.jl @@ -14,7 +14,7 @@ using Flux.Tracker: TrackedArray, data @test cpu(data(cy)) ≈ data(y) - g = ones(size(y)...) + g = rand(size(y)) Flux.back!(y, g) Flux.back!(cy, gpu(g)) From 3affed8ef075a9bfa4b23a295ab16f5a056ad0d5 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Fri, 10 Aug 2018 03:21:05 +0530 Subject: [PATCH 37/57] Remove track_kw --- src/cuda/cudnn.jl | 14 +++++++------- src/cuda/curnn.jl | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index 302da233..6e2c9e75 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -181,31 +181,31 @@ end batchnorm(g::TrackedArray, b::TrackedArray, x::TrackedArray, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = - track_kw(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) + track(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) batchnorm(g::TrackedArray, b::TrackedArray, x::CuArray{T}, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = - track_kw(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) + track(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) batchnorm(g::TrackedArray, b::CuArray{T}, x::TrackedArray, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = - track_kw(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) + track(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) batchnorm(g::CuArray{T}, b::TrackedArray, x::CuArray{T}, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = - track_kw(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) + track(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) batchnorm(g::CuArray{T}, b::TrackedArray, x::TrackedArray, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = - track_kw(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) + track(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) batchnorm(g::TrackedArray, b::CuArray{T}, x::CuArray{T}, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = - track_kw(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) + track(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) batchnorm(g::CuArray{T}, b::CuArray{T}, x::TrackedArray, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; kw...) where T<:Union{Float32, Float64} = - track_kw(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) + track(batchnorm, g, b, x, running_mean, running_var, momentum; kw...) @grad batchnorm(g, b, x, running_mean, running_var, momentum; kw...) = batchnorm(data.((g, b, x))..., running_mean, running_var, momentum; kw...), Δ -> (nobacksies(:batchnorm, ∇batchnorm(data.((g, b, x, Δ))..., running_mean, running_var, momentum; kw...))..., nothing, nothing, nothing) diff --git a/src/cuda/curnn.jl b/src/cuda/curnn.jl index f58e3b05..ed65f5e7 100644 --- a/src/cuda/curnn.jl +++ b/src/cuda/curnn.jl @@ -266,7 +266,7 @@ function desc(rnn) end import Flux.Tracker -import Flux.Tracker: data, istracked, track, unbroadcast, @grad, nobacksies, track_kw +import Flux.Tracker: data, istracked, track, unbroadcast, @grad, nobacksies istrain(m::CuRNNs, args...) = any(x -> x isa TrackedArray, (m.Wi, m.Wh, m.b, args...)) From 4bd13c448fada794ead55aea83941157805b1299 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Sat, 11 Aug 2018 15:23:40 +0530 Subject: [PATCH 38/57] Add updates for julia0.7 --- src/cuda/cudnn.jl | 24 ++++++++-------- src/cuda/curnn.jl | 62 ++++++++++++++++++++--------------------- src/layers/normalise.jl | 4 +-- 3 files changed, 45 insertions(+), 45 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index 0682ac55..761b6d78 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -78,9 +78,9 @@ function cudnnBNForward!(y::CuArray{T}, g::CuArray{T}, b::CuArray{T}, x::CuArray @check ccall((:cudnnBatchNormalizationForwardTraining, libcudnn), cudnnStatus_t, (cudnnHandle_t,cudnnBatchNormMode_t, Ptr{T}, Ptr{T}, - Ptr{Void}, Ptr{T}, - Ptr{Void}, Ptr{T}, - Ptr{Void}, Ptr{T}, Ptr{T}, + Ptr{Nothing}, Ptr{T}, + Ptr{Nothing}, Ptr{T}, + Ptr{Nothing}, Ptr{T}, Ptr{T}, Cdouble, Ptr{T}, Ptr{T}, Cdouble, Ptr{T}, Ptr{T}), libcudnn_handle[], BATCHNORM_SPATIAL, @@ -99,9 +99,9 @@ function cudnnBNForward!(y::CuArray{T}, g::CuArray{T}, b::CuArray{T}, x::CuArray @check ccall((:cudnnBatchNormalizationForwardInference, libcudnn), cudnnStatus_t, (Ptr{cudnnHandle_t},cudnnBatchNormMode_t, Ptr{T}, Ptr{T}, - Ptr{Void}, Ptr{T}, - Ptr{Void}, Ptr{T}, - Ptr{Void}, Ptr{T}, Ptr{T}, + Ptr{Nothing}, Ptr{T}, + Ptr{Nothing}, Ptr{T}, + Ptr{Nothing}, Ptr{T}, Ptr{T}, Ptr{T}, Ptr{T}, Cdouble), libcudnn_handle[], BATCHNORM_SPATIAL, @@ -153,10 +153,10 @@ function cudnnBNBackward!(dg::CuArray{T}, g::CuArray{T}, db::CuArray{T}, (cudnnHandle_t,cudnnBatchNormMode_t, Ptr{T}, Ptr{T}, Ptr{T}, Ptr{T}, - Ptr{Void}, Ptr{T}, - Ptr{Void}, Ptr{T}, - Ptr{Void}, Ptr{T}, - Ptr{Void}, Ptr{T}, Ptr{T}, Ptr{T}, + Ptr{Nothing}, Ptr{T}, + Ptr{Nothing}, Ptr{T}, + Ptr{Nothing}, Ptr{T}, + Ptr{Nothing}, Ptr{T}, Ptr{T}, Ptr{T}, Cdouble, Ptr{T}, Ptr{T}), libcudnn_handle[], BATCHNORM_SPATIAL, Ref(T(alpha)), Ref(T(beta)), @@ -169,8 +169,8 @@ function cudnnBNBackward!(dg::CuArray{T}, g::CuArray{T}, db::CuArray{T}, else ivar = 1 ./ sqrt.(reshape(running_var, _wsize(x)) .+ eps) dx .= dy .* reshape(g, _wsize(x)) .* ivar - dg .= squeeze(sum(dy .* (x .- reshape(running_mean, _wsize(x))) .* ivar, _reddims(dy)), (1,2,4)) - db .= squeeze(sum(dy, _reddims(dy)), (1,2,4)) + dg .= squeeze(sum(dy .* (x .- reshape(running_mean, _wsize(x))) .* ivar, _reddims(dy)), dims = (1,2,4)) + db .= squeeze(sum(dy, _reddims(dy)), dims = (1,2,4)) end end diff --git a/src/cuda/curnn.jl b/src/cuda/curnn.jl index ed65f5e7..6c094047 100644 --- a/src/cuda/curnn.jl +++ b/src/cuda/curnn.jl @@ -36,14 +36,14 @@ mutable struct RNNDesc{T} params::CuVector{T} weights::NTuple{2,CuMatrix{T}} bias::CuVector{T} - ptr::Ptr{Void} + ptr::Ptr{Nothing} end -Base.unsafe_convert(::Type{Ptr{Void}}, d::RNNDesc) = d.ptr +Base.unsafe_convert(::Type{Ptr{Nothing}}, d::RNNDesc) = d.ptr function rnnParamSize(T, r, input) size = Csize_t[0] - @check ccall((:cudnnGetRNNParamsSize, libcudnn), cudnnStatus_t, (Ptr{Void},Ptr{Void},Ptr{Void},Ptr{Csize_t},Cint), + @check ccall((:cudnnGetRNNParamsSize, libcudnn), cudnnStatus_t, (Ptr{Nothing},Ptr{Nothing},Ptr{Nothing},Ptr{Csize_t},Cint), libcudnn_handle[], r, TensorDesc(T, (1,input,1)), size, cudnnDataType(T)) return Int(size[])÷sizeof(T) end @@ -53,26 +53,26 @@ ngates(r::RNNDesc) = ngates(r.mode) function RNNDesc{T}(mode::Int, input::Int, hidden::Int; layers = 1) where T d = [C_NULL] - @check ccall((:cudnnCreateRNNDescriptor,libcudnn),cudnnStatus_t,(Ptr{Ptr{Void}},),d) + @check ccall((:cudnnCreateRNNDescriptor,libcudnn),cudnnStatus_t,(Ptr{Ptr{Nothing}},),d) dropoutDesc = DropoutDesc(0) inputMode = LINEAR_INPUT direction = UNIDIRECTIONAL algo = RNN_ALGO_STANDARD - @check ccall((:cudnnSetRNNDescriptor_v6,libcudnn), cudnnStatus_t, (Ptr{Void},Ptr{Void},Cint,Cint,Ptr{Void},Cint,Cint,Cint,Cint,Cint), + @check ccall((:cudnnSetRNNDescriptor_v6,libcudnn), cudnnStatus_t, (Ptr{Nothing},Ptr{Nothing},Cint,Cint,Ptr{Nothing},Cint,Cint,Cint,Cint,Cint), libcudnn_handle[],d[],hidden,layers,dropoutDesc,inputMode,direction,mode,algo,cudnnDataType(T)) w = cuzeros(T, rnnParamSize(T, d[], input)) # TODO: avoid reserve allocation here rd = RNNDesc{T}(mode, input, hidden, w, params(w, input, hidden, ngates(mode))..., d[]) finalizer(rd, x -> - @check ccall((:cudnnDestroyRNNDescriptor,libcudnn),cudnnStatus_t,(Ptr{Void},),x)) + @check ccall((:cudnnDestroyRNNDescriptor,libcudnn),cudnnStatus_t,(Ptr{Nothing},),x)) return rd end function rnnWorkspaceSize(r::RNNDesc, seqlen, xdesc) size = Csize_t[0] - @check ccall((:cudnnGetRNNWorkspaceSize, libcudnn), cudnnStatus_t, (Ptr{Void},Ptr{Void},Cint,Ptr{Ptr{Void}},Ptr{Csize_t}), + @check ccall((:cudnnGetRNNWorkspaceSize, libcudnn), cudnnStatus_t, (Ptr{Nothing},Ptr{Nothing},Cint,Ptr{Ptr{Nothing}},Ptr{Csize_t}), libcudnn_handle[], r, seqlen, xdesc, size) return Int(size[]) end @@ -89,7 +89,7 @@ getworkspace(r::RNNDesc, seqlen, xdesc) = function rnnTrainingReserveSize(r::RNNDesc, seqlen, xdesc) size = Csize_t[0] - @check ccall((:cudnnGetRNNTrainingReserveSize,libcudnn), cudnnStatus_t, (Ptr{Void}, Ptr{Void}, Cint, Ptr{Ptr{Void}}, Ptr{Csize_t}), + @check ccall((:cudnnGetRNNTrainingReserveSize,libcudnn), cudnnStatus_t, (Ptr{Nothing}, Ptr{Nothing}, Cint, Ptr{Ptr{Nothing}}, Ptr{Csize_t}), libcudnn_handle[], r, seqlen, xdesc, size) return Int(size[]) end @@ -98,19 +98,19 @@ function cudnnRNNForward(rnn::RNNDesc{T}, seqlen, xd, x, hd, h, cd, c, wd, w, yd workspace, reserve=nothing) where T if reserve == nothing @check ccall((:cudnnRNNForwardInference, libcudnn), cudnnStatus_t, - (Ptr{Void}, Ptr{Void}, Cint, - Ptr{Ptr{Void}}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Void}, Ptr{T}, - Ptr{Void}, Ptr{T}, Ptr{Ptr{Void}}, Ptr{T}, Ptr{Void}, Ptr{T}, - Ptr{Void}, Ptr{T}, - Ptr{Void}, Csize_t), + (Ptr{Nothing}, Ptr{Nothing}, Cint, + Ptr{Ptr{Nothing}}, Ptr{T}, Ptr{Nothing}, Ptr{T}, Ptr{Nothing}, Ptr{T}, + Ptr{Nothing}, Ptr{T}, Ptr{Ptr{Nothing}}, Ptr{T}, Ptr{Nothing}, Ptr{T}, + Ptr{Nothing}, Ptr{T}, + Ptr{Nothing}, Csize_t), libcudnn_handle[], rnn, seqlen, xd, x, hd, h, cd, c, wd, w, yd, y, hod, ho, cod, co, workspace, length(workspace)) else @check ccall((:cudnnRNNForwardTraining, libcudnn), cudnnStatus_t, - (Ptr{Void}, Ptr{Void}, Cint, - Ptr{Ptr{Void}}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Ptr{Void}}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Void}, Ptr{T}, - Ptr{Void}, Csize_t, Ptr{Void}, Csize_t), + (Ptr{Nothing}, Ptr{Nothing}, Cint, + Ptr{Ptr{Nothing}}, Ptr{T}, Ptr{Nothing}, Ptr{T}, Ptr{Nothing}, Ptr{T}, Ptr{Nothing}, Ptr{T}, Ptr{Ptr{Nothing}}, Ptr{T}, Ptr{Nothing}, Ptr{T}, Ptr{Nothing}, Ptr{T}, + Ptr{Nothing}, Csize_t, Ptr{Nothing}, Csize_t), libcudnn_handle[], rnn, seqlen, xd, x, hd, h, cd, c, wd, w, yd, y, hod, ho, cod, co, workspace, length(workspace), reserve, length(reserve)) @@ -119,7 +119,7 @@ end xDesc(x) = [TensorDesc(eltype(x), (1, size(x, 1), size(x, 2)))] -hDesc(h::Void) = C_NULL, C_NULL +hDesc(h::Nothing) = C_NULL, C_NULL hDesc(x::Integer) = (@assert x == 0; hDesc(nothing)) function hDesc(h::CuArray) TensorDesc(eltype(h), (size(h, 1), size(h, 2), 1)), h @@ -166,18 +166,18 @@ forwardTrain(rnn::RNNDesc{T}, x::CuArray{T}, h::CuArray{T}, c = nothing) where T function cudnnRNNBackwardData(rnn::RNNDesc{T}, seqlen, yd, y, dyd, dy, dhod, dho, dcod, dco, wd, w, hd, h, cd, c, dxd, dx, dhd, dh, dcd, dc, ws, rs) where T @check ccall((:cudnnRNNBackwardData,libcudnn),cudnnStatus_t, - (Ptr{Void}, Ptr{Void}, Cint, - Ptr{Ptr{Void}}, Ptr{T}, Ptr{Ptr{Void}}, Ptr{T}, Ptr{Void}, Ptr{T}, - Ptr{Void}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Void}, - Ptr{T}, Ptr{Ptr{Void}}, Ptr{T}, Ptr{Void}, Ptr{T}, Ptr{Void}, Ptr{T}, - Ptr{Void}, Csize_t, Ptr{Void}, Csize_t), + (Ptr{Nothing}, Ptr{Nothing}, Cint, + Ptr{Ptr{Nothing}}, Ptr{T}, Ptr{Ptr{Nothing}}, Ptr{T}, Ptr{Nothing}, Ptr{T}, + Ptr{Nothing}, Ptr{T}, Ptr{Nothing}, Ptr{T}, Ptr{Nothing}, Ptr{T}, Ptr{Nothing}, + Ptr{T}, Ptr{Ptr{Nothing}}, Ptr{T}, Ptr{Nothing}, Ptr{T}, Ptr{Nothing}, Ptr{T}, + Ptr{Nothing}, Csize_t, Ptr{Nothing}, Csize_t), libcudnn_handle[], rnn, seqlen, yd, y, dyd, dy, dhod, dho, dcod, dco, wd, w, hd, h, cd, c, dxd, dx, dhd, dh, dcd, dc, ws, length(ws), rs, length(rs)) end function backwardData(rnn::RNNDesc{T}, y, dy_, dho, dco, h, c, reserve) where T # Same as above, any more efficient way? - dy = dy_ isa Integer ? zeros(y) : dy_ + dy = dy_ isa Integer ? zero(y) : dy_ yd = xDesc(y) dx = y isa AbstractVector ? similar(dy, rnn.input) : similar(dy, rnn.input, size(dy, 2)) dh = similar(h) @@ -196,19 +196,19 @@ backwardData(rnn, y, dy, dho, hx, reserve) = function cudnnRNNBackwardWeights(rnn::RNNDesc{T}, seqlen, xd, x, hd, h, yd, y, dwd, dw, workspace, reserve) where T @check ccall((:cudnnRNNBackwardWeights,libcudnn), cudnnStatus_t, - (Ptr{Void}, Ptr{Void}, Cint, # handle, rnnDesc, seqLength - Ptr{Ptr{Void}}, Ptr{T}, #x - Ptr{Void}, Ptr{T}, #hx - Ptr{Ptr{Void}}, Ptr{T}, #y - Ptr{Void}, Csize_t, #ws - Ptr{Void}, Ptr{T}, #dw - Ptr{Void}, Csize_t), #rs + (Ptr{Nothing}, Ptr{Nothing}, Cint, # handle, rnnDesc, seqLength + Ptr{Ptr{Nothing}}, Ptr{T}, #x + Ptr{Nothing}, Ptr{T}, #hx + Ptr{Ptr{Nothing}}, Ptr{T}, #y + Ptr{Nothing}, Csize_t, #ws + Ptr{Nothing}, Ptr{T}, #dw + Ptr{Nothing}, Csize_t), #rs libcudnn_handle[], rnn, seqlen, xd, x, hd, h, yd, y, workspace, length(workspace), dwd, dw, reserve, length(reserve)) end function backwardWeights(rnn::RNNDesc{T}, x, h, y, reserve) where T - dw = zeros(rnn.params) + dw = zero(rnn.params) cudnnRNNBackwardWeights(rnn, 1, xDesc(x), x, hDesc(h)..., xDesc(y), y, FilterDesc(T, (1, 1, length(dw))), dw, diff --git a/src/layers/normalise.jl b/src/layers/normalise.jl index e0d712bd..065187a1 100644 --- a/src/layers/normalise.jl +++ b/src/layers/normalise.jl @@ -141,8 +141,8 @@ function (BN::BatchNorm)(x) # update moving mean/std mtm = data(convert(T, BN.momentum)) - BN.μ = ((1 - mtm) .* BN.μ .+ mtm .* squeeze(data(μ), (axes...))) - BN.σ² = ((1 - mtm) .* BN.σ² .+ mtm .* squeeze(data(σ²), (axes...)) .* m ./ (m - 1)) + BN.μ = ((1 - mtm) .* BN.μ .+ mtm .* squeeze(data(μ), dims = (axes...))) + BN.σ² = ((1 - mtm) .* BN.σ² .+ mtm .* squeeze(data(σ²), dims = (axes...)) .* m ./ (m - 1)) end ϵ = convert(T, BN.ϵ) From 5fd8ffa47e2273fa38492beff3b8003b020478c4 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Tue, 11 Sep 2018 15:44:07 +0530 Subject: [PATCH 39/57] CuRNN updates --- src/cuda/curnn.jl | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/cuda/curnn.jl b/src/cuda/curnn.jl index 6c094047..363670ff 100644 --- a/src/cuda/curnn.jl +++ b/src/cuda/curnn.jl @@ -22,10 +22,10 @@ const RNN_ALGO_PERSIST_DYNAMIC = 2 # LSTM: [weight, bias] × [input, hidden] × [input, forget, newmem, output] function params(w::CuVector, input, hidden, n = 1) - slice(offset, shape) = reshape(w[offset+(1:prod(shape))], shape) + slice(offset, shape) = reshape(w[offset.+(1:prod(shape))], shape) wx = slice(0, (input, hidden*n)) wh = slice(length(wx), (hidden, hidden*n)) - bias = w[length(wx)+length(wh) + (1:hidden*n)] + bias = w[length(wx)+length(wh) .+ (1:hidden*n)] (wx, wh), bias end @@ -65,8 +65,9 @@ function RNNDesc{T}(mode::Int, input::Int, hidden::Int; layers = 1) where T w = cuzeros(T, rnnParamSize(T, d[], input)) # TODO: avoid reserve allocation here rd = RNNDesc{T}(mode, input, hidden, w, params(w, input, hidden, ngates(mode))..., d[]) - finalizer(rd, x -> - @check ccall((:cudnnDestroyRNNDescriptor,libcudnn),cudnnStatus_t,(Ptr{Nothing},),x)) + finalizer(rd) do x + @check ccall((:cudnnDestroyRNNDescriptor,libcudnn),cudnnStatus_t,(Ptr{Nothing},),x) + end return rd end @@ -220,17 +221,17 @@ end import ..Flux: Flux, relu import ..Tracker: TrackedArray -using CUDAnative -using CuArrays: @cuindex, cudims +using .CuArrays.CUDAnative +using .CuArrays: @cuindex, cudims -function copy_transpose!(dst::CuArray, src::CuArray) +function LinearAlgebra.copy_transpose!(dst::CuArray, src::CuArray) function kernel(dst, src) I = @cuindex dst dst[I...] = src[reverse(I)...] return end blk, thr = cudims(dst) - @cuda (blk, thr) kernel(dst, src) + @cuda blocks=blk threads=thr kernel(dst, src) return dst end @@ -303,7 +304,7 @@ end h_ = hBatch(x, data(h)) dx, dh = backwardData(descs[m], y, dy, dho, h_, reserve) (dWi, dWh), db = backwardWeights(descs[m], data(x), h_, y, reserve) - nobacksies(:RNN, (dx, unbroadcast(size(h), dh), dWi.', dWh.', db)) + nobacksies(:RNN, (dx, unbroadcast(size(h), dh), transpose(dWi), transpose(dWh), db)) end end @@ -318,6 +319,6 @@ end (dWi, dWh), db = backwardWeights(descs[m], data(x), h_, y, reserve) nobacksies(:RNN, (dx, unbroadcast(size(h), dh), unbroadcast(size(c), dc), - dWi.', dWh.', db)) + transpose(dWi), transpose(dWh), db)) end end From 7e83852862b029904be15098004d8d56b1dfee9c Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Tue, 11 Sep 2018 15:58:17 +0530 Subject: [PATCH 40/57] Fixes --- src/layers/normalise.jl | 25 ++++++++++++++----------- test/cuda/cudnn.jl | 2 -- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/src/layers/normalise.jl b/src/layers/normalise.jl index 3706f473..41252bc9 100644 --- a/src/layers/normalise.jl +++ b/src/layers/normalise.jl @@ -124,32 +124,35 @@ function (BN::BatchNorm)(x) size(x, ndims(x)-1) == length(BN.β) || error("BatchNorm expected $(length(BN.β)) channels, got $(size(x, ndims(x)-1))") γ, β = BN.γ, BN.β - dims = ndims(x) + dims = length(size(x)) + channels = size(x, dims-1) affine_shape = ones(Int, dims) - affine_shape[end-1] = size(x, dims-1) - T = eltype(x) + affine_shape[end-1] = channels + m = prod(size(x)[1:end-2]) * size(x)[end] if !BN.active μ = reshape(BN.μ, affine_shape...) - σ² = reshape(BN.σ², affine_shape...) + σ = reshape(BN.σ, affine_shape...) else + T = eltype(x) + ϵ = data(convert(T, BN.ϵ)) axes = [1:dims-2; dims] # axes to reduce along (all but channels axis) - m = prod(size(x, axes...)) - μ = mean(x, axes) - σ² = sum((x.-μ).^2, axes) ./ m + μ = mean(x, dims = axes) + σ² = sum((x .- μ) .^ 2, dims = axes) ./ m # update moving mean/std mtm = data(convert(T, BN.momentum)) - BN.μ = ((1 - mtm) .* BN.μ .+ mtm .* dropdims(data(μ), dims = (axes...))) + BN.μ = (1 - mtm) .* BN.μ .+ mtm .* dropdims(data(μ), dims = (axes...,)) BN.σ² = ((1 - mtm) .* BN.σ² .+ mtm .* dropdims(data(σ²), dims = (axes...)) .* m ./ (m - 1)) end - ϵ = convert(T, BN.ϵ) - BN.λ.(reshape(γ, affine_shape...) .* ((x .- μ) ./ sqrt.(σ² .+ ϵ)) .+ reshape(β, affine_shape...)) + let λ = BN.λ + λ.(reshape(γ, affine_shape...) .* ((x .- μ) ./ sqrt.(σ² .+ ϵ)) .+ reshape(β, affine_shape...)) + end end -treelike(BatchNorm) +@treelike BatchNorm _testmode!(BN::BatchNorm, test) = (BN.active = !test) diff --git a/test/cuda/cudnn.jl b/test/cuda/cudnn.jl index 3404ef5d..57b258b5 100644 --- a/test/cuda/cudnn.jl +++ b/test/cuda/cudnn.jl @@ -1,8 +1,6 @@ using Flux, Flux.Tracker, CuArrays, Test using Flux.Tracker: TrackedArray, data -@info "Testing Flux CUDNN" - @testset "CUDNN BatchNorm" begin x = TrackedArray(rand(10, 10, 3, 1)) m = BatchNorm(3) From c4f87ff15c9f4114b06718d35d9ac3fd8bfa35a9 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Tue, 11 Sep 2018 16:21:55 +0530 Subject: [PATCH 41/57] Minor fixes: --- src/layers/normalise.jl | 57 +++++++++++++++-------------------------- 1 file changed, 21 insertions(+), 36 deletions(-) diff --git a/src/layers/normalise.jl b/src/layers/normalise.jl index 41252bc9..1961fbe3 100644 --- a/src/layers/normalise.jl +++ b/src/layers/normalise.jl @@ -1,6 +1,6 @@ """ - testmode!(m, val=true) - + testmode!(m) + testmode!(m, false) Put layers like [`Dropout`](@ref) and [`BatchNorm`](@ref) into testing mode (or back to training mode with `false`). """ @@ -13,11 +13,9 @@ _testmode!(m, test) = nothing """ Dropout(p) - A Dropout layer. For each input, either sets that input to `0` (with probability `p`) or scales it by `1/(1-p)`. This is used as a regularisation, i.e. it reduces overfitting during training. - Does nothing to the input once in [`testmode!`](@ref). """ mutable struct Dropout{F} @@ -43,9 +41,7 @@ end _testmode!(a::Dropout, test) = (a.active = !test) """ - LayerNorm(h::Integer) - A [normalisation layer](https://arxiv.org/pdf/1607.06450.pdf) designed to be used with recurrent hidden states of size `h`. Normalises the mean/stddev of each input before applying a per-neuron gain/bias. @@ -69,23 +65,17 @@ end BatchNorm(channels::Integer, σ = identity; initβ = zeros, initγ = ones, ϵ = 1e-8, momentum = .1) - Batch Normalization layer. The `channels` input should be the size of the channel dimension in your data (see below). - Given an array with `N` dimensions, call the `N-1`th the channel dimension. (For a batch of feature vectors this is just the data dimension, for `WHCN` images it's the usual channel dimension.) - `BatchNorm` computes the mean and variance for each each `W×H×1×N` slice and shifts them to have a new mean and variance (corresponding to the learnable, per-channel `bias` and `scale` parameters). - See [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/pdf/1502.03167.pdf). - Example: - ```julia m = Chain( Dense(28^2, 64), @@ -93,32 +83,23 @@ m = Chain( Dense(64, 10), BatchNorm(10), softmax) - -y = m(rand(28^2, 10)) ``` - -To use the layer at test time set [`testmode!(m, true)`](@ref). """ -mutable struct BatchNorm - λ # activation function - β # bias - γ # scale - μ # moving mean - σ² # moving var - ϵ - momentum +mutable struct BatchNorm{F,V,W,N} + λ::F # activation function + β::V # bias + γ::V # scale + μ::W # moving mean + σ²::W # moving std + ϵ::N + momentum::N active::Bool end -# NOTE: Keeping the ϵ smaller than 1e-5 is not supported by CUDNN -function BatchNorm(chs::Integer, λ = identity; - initβ = (i) -> zeros(i), - initγ = (i) -> ones(i), - ϵ = 1f-5, - momentum = 0.1) +BatchNorm(chs::Integer, λ = identity; + initβ = (i) -> zeros(i), initγ = (i) -> ones(i), ϵ = 1e-5, momentum = .1) = BatchNorm(λ, param(initβ(chs)), param(initγ(chs)), - zeros(Float32, chs), ones(Float32, chs), ϵ, momentum, true) -end + zeros(chs), ones(chs), ϵ, momentum, true) function (BN::BatchNorm)(x) size(x, ndims(x)-1) == length(BN.β) || @@ -132,7 +113,7 @@ function (BN::BatchNorm)(x) if !BN.active μ = reshape(BN.μ, affine_shape...) - σ = reshape(BN.σ, affine_shape...) + σ² = reshape(BN.σ², affine_shape...) else T = eltype(x) @@ -143,8 +124,8 @@ function (BN::BatchNorm)(x) # update moving mean/std mtm = data(convert(T, BN.momentum)) - BN.μ = (1 - mtm) .* BN.μ .+ mtm .* dropdims(data(μ), dims = (axes...,)) - BN.σ² = ((1 - mtm) .* BN.σ² .+ mtm .* dropdims(data(σ²), dims = (axes...)) .* m ./ (m - 1)) + BN.μ = (1 - mtm) .* BN.μ .+ mtm .* dropdims(data(μ), dims = axes) + BN.σ² = ((1 - mtm) .* BN.σ² .+ mtm .* dropdims(data(σ²), dims = axes) .* m ./ (m - 1)) end let λ = BN.λ @@ -152,7 +133,11 @@ function (BN::BatchNorm)(x) end end -@treelike BatchNorm +children(BN::BatchNorm) = + (BN.λ, BN.β, BN.γ, BN.μ, BN.σ, BN.ϵ, BN.momentum, BN.active) + +mapchildren(f, BN::BatchNorm) = # e.g. mapchildren(cu, BN) + BatchNorm(BN.λ, f(BN.β), f(BN.γ), f(BN.μ), f(BN.σ), BN.ϵ, BN.momentum, BN.active) _testmode!(BN::BatchNorm, test) = (BN.active = !test) From 7e7a501efddf5d1cee1ee0e7dd49e43c9d827806 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Tue, 11 Sep 2018 16:32:14 +0530 Subject: [PATCH 42/57] Fix tests --- src/cuda/cudnn.jl | 2 -- src/cuda/curnn.jl | 2 ++ test/layers/normalisation.jl | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index 9b5eace2..9a39005a 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -2,8 +2,6 @@ using CuArrays.CUDNN: @check, libcudnn, cudnnStatus_t, cudnnTensorDescriptor_t, cudnnBatchNormMode_t, cudnnHandle_t, libcudnn_handle, cudnnDataType, TensorDesc, FilterDesc import ..Flux: data -using LinearAlgebra - mutable struct DropoutDesc ptr::Ptr{Nothing} states::CuVector{UInt8} diff --git a/src/cuda/curnn.jl b/src/cuda/curnn.jl index 363670ff..c097d6fe 100644 --- a/src/cuda/curnn.jl +++ b/src/cuda/curnn.jl @@ -1,6 +1,8 @@ using CuArrays.CUDNN: @check, libcudnn, cudnnStatus_t, cudnnTensorDescriptor_t, cudnnBatchNormMode_t, cudnnHandle_t, libcudnn_handle, cudnnDataType, TensorDesc, FilterDesc +using LinearAlgebra + const RNN_RELU = 0 # Stock RNN with ReLu activation const RNN_TANH = 1 # Stock RNN with tanh activation const LSTM = 2 # LSTM with no peephole connections diff --git a/test/layers/normalisation.jl b/test/layers/normalisation.jl index d736af42..033890ff 100644 --- a/test/layers/normalisation.jl +++ b/test/layers/normalisation.jl @@ -55,11 +55,11 @@ end # .1 * 4 + 0 = .4 @test m.μ ≈ reshape([0.3, 0.4], 2, 1) - # julia> .1 .* std(x, dims = 2, corrected=false) .* (3 / 2).+ .9 .* [1., 1.] + # julia> .1 .* var(x, dims = 2, corrected=false) .* (3 / 2).+ .9 .* [1., 1.] # 2×1 Array{Float64,2}: - # 1.14495 - # 1.14495 - @test m.σ² ≈ .1 .* std(x.data, dims = 2, corrected=false) .* (3 / 2).+ .9 .* [1., 1.] + # 1.3 + # 1.3 + @test m.σ² ≈ .1 .* var(x.data, dims = 2, corrected=false) .* (3 / 2).+ .9 .* [1., 1.] testmode!(m) @test !m.active From 7d06f654f0edc9d37d7ba9af59147cb3036aa2cd Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Tue, 11 Sep 2018 16:58:05 +0530 Subject: [PATCH 43/57] Fix tests --- src/layers/normalise.jl | 10 +++--- test/cuda/cuda.jl | 72 ++++++++++++++++++++--------------------- test/cuda/curnn.jl | 2 +- test/runtests.jl | 16 ++++----- 4 files changed, 50 insertions(+), 50 deletions(-) diff --git a/src/layers/normalise.jl b/src/layers/normalise.jl index 1961fbe3..396f474c 100644 --- a/src/layers/normalise.jl +++ b/src/layers/normalise.jl @@ -124,20 +124,20 @@ function (BN::BatchNorm)(x) # update moving mean/std mtm = data(convert(T, BN.momentum)) - BN.μ = (1 - mtm) .* BN.μ .+ mtm .* dropdims(data(μ), dims = axes) - BN.σ² = ((1 - mtm) .* BN.σ² .+ mtm .* dropdims(data(σ²), dims = axes) .* m ./ (m - 1)) + BN.μ = (1 - mtm) .* BN.μ .+ mtm .* reshape(data(μ), :) + BN.σ² = ((1 - mtm) .* BN.σ² .+ mtm .* reshape(data(σ²), :) .* m ./ (m - 1)) end let λ = BN.λ - λ.(reshape(γ, affine_shape...) .* ((x .- μ) ./ sqrt.(σ² .+ ϵ)) .+ reshape(β, affine_shape...)) + λ.(reshape(γ, affine_shape...) .* ((x .- μ) ./ sqrt.(σ² .+ BN.ϵ)) .+ reshape(β, affine_shape...)) end end children(BN::BatchNorm) = - (BN.λ, BN.β, BN.γ, BN.μ, BN.σ, BN.ϵ, BN.momentum, BN.active) + (BN.λ, BN.β, BN.γ, BN.μ, BN.σ², BN.ϵ, BN.momentum, BN.active) mapchildren(f, BN::BatchNorm) = # e.g. mapchildren(cu, BN) - BatchNorm(BN.λ, f(BN.β), f(BN.γ), f(BN.μ), f(BN.σ), BN.ϵ, BN.momentum, BN.active) + BatchNorm(BN.λ, f(BN.β), f(BN.γ), f(BN.μ), f(BN.σ²), BN.ϵ, BN.momentum, BN.active) _testmode!(BN::BatchNorm, test) = (BN.active = !test) diff --git a/test/cuda/cuda.jl b/test/cuda/cuda.jl index 01410313..ddc070f7 100644 --- a/test/cuda/cuda.jl +++ b/test/cuda/cuda.jl @@ -1,44 +1,44 @@ using Flux, Flux.Tracker, CuArrays, Test using Flux: gpu -@info "Testing GPU Support" - -@testset "CuArrays" begin - -CuArrays.allowscalar(false) - -x = param(randn(5, 5)) -cx = gpu(x) -@test cx isa TrackedArray && cx.data isa CuArray - -x = Flux.onehotbatch([1, 2, 3], 1:3) -cx = gpu(x) -@test cx isa Flux.OneHotMatrix && cx.data isa CuArray -@test (cx .+ 1) isa CuArray - -m = Chain(Dense(10, 5, tanh), Dense(5, 2), softmax) -cm = gpu(m) - -@test all(p isa TrackedArray && p.data isa CuArray for p in params(cm)) -@test cm(gpu(rand(10, 10))) isa TrackedArray{Float32,2,CuArray{Float32,2}} - -x = [1,2,3] -cx = gpu(x) -@test Flux.crossentropy(x,x) ≈ Flux.crossentropy(cx,cx) - -xs = param(rand(5,5)) -ys = Flux.onehotbatch(1:5,1:5) -@test collect(cu(xs) .+ cu(ys)) ≈ collect(xs .+ ys) - -c = gpu(Conv((2,2),3=>4)) -l = c(gpu(rand(10,10,3,2))) -Flux.back!(sum(l)) - -end +# @info "Testing GPU Support" +# +# @testset "CuArrays" begin +# +# CuArrays.allowscalar(false) +# +# x = param(randn(5, 5)) +# cx = gpu(x) +# @test cx isa TrackedArray && cx.data isa CuArray +# +# x = Flux.onehotbatch([1, 2, 3], 1:3) +# cx = gpu(x) +# @test cx isa Flux.OneHotMatrix && cx.data isa CuArray +# @test (cx .+ 1) isa CuArray +# +# m = Chain(Dense(10, 5, tanh), Dense(5, 2), softmax) +# cm = gpu(m) +# +# @test all(p isa TrackedArray && p.data isa CuArray for p in params(cm)) +# @test cm(gpu(rand(10, 10))) isa TrackedArray{Float32,2,CuArray{Float32,2}} +# +# x = [1,2,3] +# cx = gpu(x) +# @test Flux.crossentropy(x,x) ≈ Flux.crossentropy(cx,cx) +# +# xs = param(rand(5,5)) +# ys = Flux.onehotbatch(1:5,1:5) +# @test collect(cu(xs) .+ cu(ys)) ≈ collect(xs .+ ys) +# +# c = gpu(Conv((2,2),3=>4)) +# l = c(gpu(rand(10,10,3,2))) +# Flux.back!(sum(l)) +# +# end if CuArrays.cudnn_available() - info("Testing Flux/CUDNN BatchNorm") + @info "Testing Flux/CUDNN BatchNorm" include("cudnn.jl") - info("Testing Flux/CUDNN RNN") + @info "Testing Flux/CUDNN RNN" include("curnn.jl") end diff --git a/test/cuda/curnn.jl b/test/cuda/curnn.jl index 156b330d..3f5e1819 100644 --- a/test/cuda/curnn.jl +++ b/test/cuda/curnn.jl @@ -1,4 +1,4 @@ -using Flux, CuArrays, Base.Test +using Flux, CuArrays, Test @testset "RNN" begin @testset for R in [RNN, GRU, LSTM] diff --git a/test/runtests.jl b/test/runtests.jl index 7a55dca6..02bb6074 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -13,7 +13,7 @@ if Base.JLOptions().check_bounds == 1 exit() end -using Flux, Test, Random +using Flux, Test, Random, Statistics using Random Random.seed!(0) @@ -25,20 +25,20 @@ insert!(LOAD_PATH, 2, "@v#.#") @info "Testing Basics" -include("utils.jl") -include("onehot.jl") -include("optimise.jl") -include("data.jl") +# include("utils.jl") +# include("onehot.jl") +# include("optimise.jl") +# include("data.jl") @info "Testing Layers" include("layers/normalisation.jl") -include("layers/stateless.jl") -include("layers/conv.jl") +# include("layers/stateless.jl") +# include("layers/conv.jl") @info "Running Gradient Checks" -include("tracker.jl") +# include("tracker.jl") if Base.find_package("CuArrays") != nothing include("cuda/cuda.jl") From dd2fa77681bc589b6e01d6f0748ac6b4f5803dc5 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Tue, 11 Sep 2018 17:06:18 +0530 Subject: [PATCH 44/57] Fix tests --- test/cuda/cudnn.jl | 2 +- test/layers/normalisation.jl | 2 +- test/runtests.jl | 14 +++++++------- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/test/cuda/cudnn.jl b/test/cuda/cudnn.jl index 57b258b5..5a51a0b6 100644 --- a/test/cuda/cudnn.jl +++ b/test/cuda/cudnn.jl @@ -14,7 +14,7 @@ using Flux.Tracker: TrackedArray, data @test cpu(data(cy)) ≈ data(y) - g = rand(size(y)) + g = rand(size(y)...) Flux.back!(y, g) Flux.back!(cy, gpu(g)) diff --git a/test/layers/normalisation.jl b/test/layers/normalisation.jl index 033890ff..e3b9e88c 100644 --- a/test/layers/normalisation.jl +++ b/test/layers/normalisation.jl @@ -65,7 +65,7 @@ end @test !m.active x′ = m(x).data - @test x′[1] ≈ (1 .- 0.3) / 1.1449489742783179 + @test x′[1] ≈ (1 .- 0.3) / sqrt(1.3) end # with activation function diff --git a/test/runtests.jl b/test/runtests.jl index 02bb6074..892b9ffb 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -25,20 +25,20 @@ insert!(LOAD_PATH, 2, "@v#.#") @info "Testing Basics" -# include("utils.jl") -# include("onehot.jl") -# include("optimise.jl") -# include("data.jl") +include("utils.jl") +include("onehot.jl") +include("optimise.jl") +include("data.jl") @info "Testing Layers" include("layers/normalisation.jl") -# include("layers/stateless.jl") -# include("layers/conv.jl") +include("layers/stateless.jl") +include("layers/conv.jl") @info "Running Gradient Checks" -# include("tracker.jl") +include("tracker.jl") if Base.find_package("CuArrays") != nothing include("cuda/cuda.jl") From cc812a8f89f36be8b0d3e04cc21eacc8e5a21963 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Tue, 11 Sep 2018 17:30:54 +0530 Subject: [PATCH 45/57] Fix tests --- test/layers/normalisation.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/layers/normalisation.jl b/test/layers/normalisation.jl index e3b9e88c..18276140 100644 --- a/test/layers/normalisation.jl +++ b/test/layers/normalisation.jl @@ -65,7 +65,7 @@ end @test !m.active x′ = m(x).data - @test x′[1] ≈ (1 .- 0.3) / sqrt(1.3) + @test isapprox(x′[1], (1 .- 0.3) / sqrt(1.3), atol = 1.0e-5) end # with activation function From 9bd2c4e0062b99be0605283d6d15377e19afd993 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Sat, 6 Oct 2018 00:00:46 +0530 Subject: [PATCH 46/57] Update curnn.jl --- src/cuda/curnn.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cuda/curnn.jl b/src/cuda/curnn.jl index c097d6fe..b57e81f8 100644 --- a/src/cuda/curnn.jl +++ b/src/cuda/curnn.jl @@ -306,7 +306,7 @@ end h_ = hBatch(x, data(h)) dx, dh = backwardData(descs[m], y, dy, dho, h_, reserve) (dWi, dWh), db = backwardWeights(descs[m], data(x), h_, y, reserve) - nobacksies(:RNN, (dx, unbroadcast(size(h), dh), transpose(dWi), transpose(dWh), db)) + nobacksies(:RNN, (dx, unbroadcast(h, dh), transpose(dWi), transpose(dWh), db)) end end @@ -320,7 +320,7 @@ end dx, dh, dc = backwardData(descs[m], y, dy, dho, dco, h_, c_, reserve) (dWi, dWh), db = backwardWeights(descs[m], data(x), h_, y, reserve) nobacksies(:RNN, - (dx, unbroadcast(size(h), dh), unbroadcast(size(c), dc), + (dx, unbroadcast(h, dh), unbroadcast(c, dc), transpose(dWi), transpose(dWh), db)) end end From b838c0bc040e399bba72fdd3c75643c543a61c75 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Fri, 26 Oct 2018 10:24:30 +0530 Subject: [PATCH 47/57] Update the libcudnn_handle --- src/cuda/cudnn.jl | 13 ++++++------- src/cuda/curnn.jl | 18 +++++++++--------- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index 9a39005a..04d937d8 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -1,5 +1,5 @@ using CuArrays.CUDNN: @check, libcudnn, cudnnStatus_t, cudnnTensorDescriptor_t, - cudnnBatchNormMode_t, cudnnHandle_t, libcudnn_handle, cudnnDataType, TensorDesc, FilterDesc + cudnnBatchNormMode_t, cudnnHandle_t, handle, cudnnDataType, TensorDesc, FilterDesc import ..Flux: data mutable struct DropoutDesc @@ -13,11 +13,11 @@ function DropoutDesc(ρ::Real; seed::Integer=0) d = [C_NULL] s = Csize_t[0] @check ccall((:cudnnCreateDropoutDescriptor,libcudnn), cudnnStatus_t, (Ptr{Ptr{Nothing}},), d) - @check ccall((:cudnnDropoutGetStatesSize,libcudnn),cudnnStatus_t,(Ptr{Nothing},Ptr{Csize_t}),libcudnn_handle[],s) + @check ccall((:cudnnDropoutGetStatesSize,libcudnn),cudnnStatus_t,(Ptr{Nothing},Ptr{Csize_t}),handle(),s) states = CuArray{UInt8}(s[]) # TODO: can we drop this when ρ=0? desc = DropoutDesc(d[], states) @check ccall((:cudnnSetDropoutDescriptor,libcudnn),cudnnStatus_t,(Ptr{Nothing},Ptr{Nothing},Cfloat,Ptr{Nothing},Csize_t,Culonglong), - desc,libcudnn_handle[],ρ,states,length(states),seed) + desc,handle(),ρ,states,length(states),seed) finalizer(desc) do x @check ccall((:cudnnDestroyDropoutDescriptor,libcudnn),cudnnStatus_t,(Ptr{Nothing},),x) end @@ -84,7 +84,7 @@ function cudnnBNForward!(y::CuArray{T}, g::CuArray{T}, b::CuArray{T}, x::CuArray Ptr{Nothing}, Ptr{T}, Ptr{T}, Cdouble, Ptr{T}, Ptr{T}, Cdouble, Ptr{T}, Ptr{T}), - libcudnn_handle[], BATCHNORM_SPATIAL, + handle(), BATCHNORM_SPATIAL, Ref(T(alpha)), Ref(T(beta)), xd, x, yd, y, @@ -105,7 +105,7 @@ function cudnnBNForward!(y::CuArray{T}, g::CuArray{T}, b::CuArray{T}, x::CuArray Ptr{Nothing}, Ptr{T}, Ptr{T}, Ptr{T}, Ptr{T}, Cdouble), - libcudnn_handle[], BATCHNORM_SPATIAL, + handle(), BATCHNORM_SPATIAL, Ref(T(alpha)), Ref(T(beta)), xd, x, yd, y, @@ -146,7 +146,6 @@ function cudnnBNBackward!(dg::CuArray{T}, g::CuArray{T}, db::CuArray{T}, end if eps < BATCHNORM_MIN_EPS - # warn("eps ",eps," is too small for CuDNN so eps has been assigned the value ", BATCHNORM_MIN_EPS) eps = BATCHNORM_MIN_EPS end @@ -159,7 +158,7 @@ function cudnnBNBackward!(dg::CuArray{T}, g::CuArray{T}, db::CuArray{T}, Ptr{Nothing}, Ptr{T}, Ptr{Nothing}, Ptr{T}, Ptr{T}, Ptr{T}, Cdouble, Ptr{T}, Ptr{T}), - libcudnn_handle[], BATCHNORM_SPATIAL, + handle(), BATCHNORM_SPATIAL, Ref(T(alpha)), Ref(T(beta)), Ref(T(dalpha)), Ref(T(dbeta)), xd, x, diff --git a/src/cuda/curnn.jl b/src/cuda/curnn.jl index da94a192..957c63b6 100644 --- a/src/cuda/curnn.jl +++ b/src/cuda/curnn.jl @@ -1,5 +1,5 @@ using CuArrays.CUDNN: @check, libcudnn, cudnnStatus_t, cudnnTensorDescriptor_t, - cudnnBatchNormMode_t, cudnnHandle_t, libcudnn_handle, cudnnDataType, TensorDesc, FilterDesc + cudnnBatchNormMode_t, cudnnHandle_t, handle, cudnnDataType, TensorDesc, FilterDesc using LinearAlgebra @@ -46,7 +46,7 @@ Base.unsafe_convert(::Type{Ptr{Nothing}}, d::RNNDesc) = d.ptr function rnnParamSize(T, r, input) size = Csize_t[0] @check ccall((:cudnnGetRNNParamsSize, libcudnn), cudnnStatus_t, (Ptr{Nothing},Ptr{Nothing},Ptr{Nothing},Ptr{Csize_t},Cint), - libcudnn_handle[], r, TensorDesc(T, (1,input,1)), size, cudnnDataType(T)) + handle(), r, TensorDesc(T, (1,input,1)), size, cudnnDataType(T)) return Int(size[])÷sizeof(T) end @@ -62,7 +62,7 @@ function RNNDesc{T}(mode::Int, input::Int, hidden::Int; layers = 1) where T direction = UNIDIRECTIONAL algo = RNN_ALGO_STANDARD @check ccall((:cudnnSetRNNDescriptor_v6,libcudnn), cudnnStatus_t, (Ptr{Nothing},Ptr{Nothing},Cint,Cint,Ptr{Nothing},Cint,Cint,Cint,Cint,Cint), - libcudnn_handle[],d[],hidden,layers,dropoutDesc,inputMode,direction,mode,algo,cudnnDataType(T)) + handle(),d[],hidden,layers,dropoutDesc,inputMode,direction,mode,algo,cudnnDataType(T)) w = cuzeros(T, rnnParamSize(T, d[], input)) # TODO: avoid reserve allocation here @@ -76,7 +76,7 @@ end function rnnWorkspaceSize(r::RNNDesc, seqlen, xdesc) size = Csize_t[0] @check ccall((:cudnnGetRNNWorkspaceSize, libcudnn), cudnnStatus_t, (Ptr{Nothing},Ptr{Nothing},Cint,Ptr{Ptr{Nothing}},Ptr{Csize_t}), - libcudnn_handle[], r, seqlen, xdesc, size) + handle(), r, seqlen, xdesc, size) return Int(size[]) end @@ -93,7 +93,7 @@ getworkspace(r::RNNDesc, seqlen, xdesc) = function rnnTrainingReserveSize(r::RNNDesc, seqlen, xdesc) size = Csize_t[0] @check ccall((:cudnnGetRNNTrainingReserveSize,libcudnn), cudnnStatus_t, (Ptr{Nothing}, Ptr{Nothing}, Cint, Ptr{Ptr{Nothing}}, Ptr{Csize_t}), - libcudnn_handle[], r, seqlen, xdesc, size) + handle(), r, seqlen, xdesc, size) return Int(size[]) end @@ -106,7 +106,7 @@ function cudnnRNNForward(rnn::RNNDesc{T}, seqlen, xd, x, hd, h, cd, c, wd, w, yd Ptr{Nothing}, Ptr{T}, Ptr{Ptr{Nothing}}, Ptr{T}, Ptr{Nothing}, Ptr{T}, Ptr{Nothing}, Ptr{T}, Ptr{Nothing}, Csize_t), - libcudnn_handle[], rnn, seqlen, + handle(), rnn, seqlen, xd, x, hd, h, cd, c, wd, w, yd, y, hod, ho, cod, co, workspace, length(workspace)) else @@ -114,7 +114,7 @@ function cudnnRNNForward(rnn::RNNDesc{T}, seqlen, xd, x, hd, h, cd, c, wd, w, yd (Ptr{Nothing}, Ptr{Nothing}, Cint, Ptr{Ptr{Nothing}}, Ptr{T}, Ptr{Nothing}, Ptr{T}, Ptr{Nothing}, Ptr{T}, Ptr{Nothing}, Ptr{T}, Ptr{Ptr{Nothing}}, Ptr{T}, Ptr{Nothing}, Ptr{T}, Ptr{Nothing}, Ptr{T}, Ptr{Nothing}, Csize_t, Ptr{Nothing}, Csize_t), - libcudnn_handle[], rnn, seqlen, + handle(), rnn, seqlen, xd, x, hd, h, cd, c, wd, w, yd, y, hod, ho, cod, co, workspace, length(workspace), reserve, length(reserve)) end @@ -174,7 +174,7 @@ function cudnnRNNBackwardData(rnn::RNNDesc{T}, seqlen, yd, y, dyd, dy, dhod, dho Ptr{Nothing}, Ptr{T}, Ptr{Nothing}, Ptr{T}, Ptr{Nothing}, Ptr{T}, Ptr{Nothing}, Ptr{T}, Ptr{Ptr{Nothing}}, Ptr{T}, Ptr{Nothing}, Ptr{T}, Ptr{Nothing}, Ptr{T}, Ptr{Nothing}, Csize_t, Ptr{Nothing}, Csize_t), - libcudnn_handle[], rnn, seqlen, yd, y, dyd, dy, dhod, dho, dcod, dco, + handle(), rnn, seqlen, yd, y, dyd, dy, dhod, dho, dcod, dco, wd, w, hd, h, cd, c, dxd, dx, dhd, dh, dcd, dc, ws, length(ws), rs, length(rs)) end @@ -206,7 +206,7 @@ function cudnnRNNBackwardWeights(rnn::RNNDesc{T}, seqlen, xd, x, hd, h, yd, y, d Ptr{Nothing}, Csize_t, #ws Ptr{Nothing}, Ptr{T}, #dw Ptr{Nothing}, Csize_t), #rs - libcudnn_handle[], rnn, seqlen, xd, x, hd, h, yd, y, + handle(), rnn, seqlen, xd, x, hd, h, yd, y, workspace, length(workspace), dwd, dw, reserve, length(reserve)) end From 02efc264e7a0657a007bde98686c2615c7bed432 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Thu, 8 Nov 2018 19:12:38 +0530 Subject: [PATCH 48/57] Fix unintentional change to spaces --- src/layers/normalise.jl | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/layers/normalise.jl b/src/layers/normalise.jl index 396f474c..9201e991 100644 --- a/src/layers/normalise.jl +++ b/src/layers/normalise.jl @@ -1,6 +1,7 @@ """ testmode!(m) testmode!(m, false) + Put layers like [`Dropout`](@ref) and [`BatchNorm`](@ref) into testing mode (or back to training mode with `false`). """ @@ -13,9 +14,11 @@ _testmode!(m, test) = nothing """ Dropout(p) + A Dropout layer. For each input, either sets that input to `0` (with probability `p`) or scales it by `1/(1-p)`. This is used as a regularisation, i.e. it reduces overfitting during training. + Does nothing to the input once in [`testmode!`](@ref). """ mutable struct Dropout{F} @@ -42,6 +45,7 @@ _testmode!(a::Dropout, test) = (a.active = !test) """ LayerNorm(h::Integer) + A [normalisation layer](https://arxiv.org/pdf/1607.06450.pdf) designed to be used with recurrent hidden states of size `h`. Normalises the mean/stddev of each input before applying a per-neuron gain/bias. @@ -65,16 +69,21 @@ end BatchNorm(channels::Integer, σ = identity; initβ = zeros, initγ = ones, ϵ = 1e-8, momentum = .1) + Batch Normalization layer. The `channels` input should be the size of the channel dimension in your data (see below). + Given an array with `N` dimensions, call the `N-1`th the channel dimension. (For a batch of feature vectors this is just the data dimension, for `WHCN` images it's the usual channel dimension.) + `BatchNorm` computes the mean and variance for each each `W×H×1×N` slice and shifts them to have a new mean and variance (corresponding to the learnable, per-channel `bias` and `scale` parameters). + See [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/pdf/1502.03167.pdf). + Example: ```julia m = Chain( From 4d703b31a1ee458cd2599e7207555aedd8a2ba28 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Thu, 8 Nov 2018 19:23:07 +0530 Subject: [PATCH 49/57] Reshape 2D tensors to use cudnn batchnorm --- src/cuda/cudnn.jl | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index 04d937d8..94424421 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -39,8 +39,14 @@ end BNCache() = BNCache(nothing, nothing) -# CuDNN supports only 4D and 5D Tensors for BatchNorm Operations -# so use the native julia code when doing batchnorm on a 2D Array +# NOTE: CuDNN supports only 4D and 5D Tensors for BatchNorm Operations +# so reshape a 2D Tensor into 4D +batchnorm(g::CuArray{T}, b::CuArray{T}, x::CuArray{T, 2}, + running_mean::CuArray{T}, running_var::CuArray{T}, momentum; + cache = nothing, alpha = T(1), beta = T(0), + eps = T(1e-5), training = true) where T<:Union{Float32, Float64} = + batchnorm(g, b, reshape(x, 1, 1, size(x, 1), size(x, 2)), running_mean, running_var, momentum, + cache = cache, alpha = alpha, beta = beta, eps = eps, training = training) function batchnorm(g::CuArray{T}, b::CuArray{T}, x::Union{CuArray{T, 4},CuArray{T,5}}, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; @@ -115,6 +121,14 @@ function cudnnBNForward!(y::CuArray{T}, g::CuArray{T}, b::CuArray{T}, x::CuArray end end +∇batchnorm(g::CuArray{T}, b::CuArray{T}, x::CuArray{T, 2}, dy::CuArray{T, 2}, + running_mean::CuArray{T}, running_var::CuArray{T}, momentum; + cache = nothing, eps = T(1e-5), alpha = T(1), + beta = T(0), training = true) where T<:Union{Float32, Float64} = + ∇batchnorm(g, b, reshape(x, 1, 1, size(x, 1), size(x, 2)), reshape(dy, 1, 1, size(dy, 1), size(dy, 2)), + running_mean, running_var, momentum, cache = cache, eps = eps, alpha = alpha, beta = beta, + training = training) + function ∇batchnorm(g::CuArray{T}, b::CuArray{T}, x::CuArray{T}, dy::CuArray{T}, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; cache = nothing, eps = T(1e-5), alpha = T(1), @@ -176,7 +190,7 @@ end # Flux Interface -(BN::Flux.BatchNorm)(x::Union{CuParam{T,4},CuParam{T,5}}, cache = nothing) where T<:Union{Float32, Float64} = +(BN::Flux.BatchNorm)(x::Union{CuParam{T,2},CuParam{T,4},CuParam{T,5}}, cache = nothing) where T<:Union{Float32, Float64} = batchnorm(BN.γ, BN.β, x, BN.μ, BN.σ², BN.momentum; cache = cache, alpha = 1, beta = 0, eps = BN.ϵ, training = BN.active) batchnorm(g::TrackedArray, b::TrackedArray, x::TrackedArray, running_mean::CuArray{T}, From 3bc809f49e17e8463319920fbcef18725eea9d35 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Sat, 10 Nov 2018 11:25:37 +0530 Subject: [PATCH 50/57] dropdims to make the array 2d --- src/cuda/cudnn.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index 94424421..5d661889 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -45,8 +45,8 @@ batchnorm(g::CuArray{T}, b::CuArray{T}, x::CuArray{T, 2}, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; cache = nothing, alpha = T(1), beta = T(0), eps = T(1e-5), training = true) where T<:Union{Float32, Float64} = - batchnorm(g, b, reshape(x, 1, 1, size(x, 1), size(x, 2)), running_mean, running_var, momentum, - cache = cache, alpha = alpha, beta = beta, eps = eps, training = training) + dropdims(batchnorm(g, b, reshape(x, 1, 1, size(x, 1), size(x, 2)), running_mean, running_var, momentum, + cache = cache, alpha = alpha, beta = beta, eps = eps, training = training), dims = (1, 2)) function batchnorm(g::CuArray{T}, b::CuArray{T}, x::Union{CuArray{T, 4},CuArray{T,5}}, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; From e2ae8b4e8dfc00e47a72162aaa13c854095bd6db Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Sat, 10 Nov 2018 11:35:58 +0530 Subject: [PATCH 51/57] Fix dimensions --- src/cuda/cudnn.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index 5d661889..f71742a8 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -121,11 +121,11 @@ function cudnnBNForward!(y::CuArray{T}, g::CuArray{T}, b::CuArray{T}, x::CuArray end end -∇batchnorm(g::CuArray{T}, b::CuArray{T}, x::CuArray{T, 2}, dy::CuArray{T, 2}, +∇batchnorm(g::CuArray{T}, b::CuArray{T}, x::CuArray{T, 2}, dy::CuArray{T}, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; cache = nothing, eps = T(1e-5), alpha = T(1), beta = T(0), training = true) where T<:Union{Float32, Float64} = - ∇batchnorm(g, b, reshape(x, 1, 1, size(x, 1), size(x, 2)), reshape(dy, 1, 1, size(dy, 1), size(dy, 2)), + ∇batchnorm(g, b, reshape(x, 1, 1, size(x, 1), size(x, 2)), dy, running_mean, running_var, momentum, cache = cache, eps = eps, alpha = alpha, beta = beta, training = training) From d6aacf413584b8d9dcde197972f246e8f7b56c3d Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Sat, 10 Nov 2018 11:43:49 +0530 Subject: [PATCH 52/57] Fix reshape --- src/cuda/cudnn.jl | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index f71742a8..b14b1851 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -121,13 +121,15 @@ function cudnnBNForward!(y::CuArray{T}, g::CuArray{T}, b::CuArray{T}, x::CuArray end end -∇batchnorm(g::CuArray{T}, b::CuArray{T}, x::CuArray{T, 2}, dy::CuArray{T}, +function ∇batchnorm(g::CuArray{T}, b::CuArray{T}, x::CuArray{T, 2}, dy::CuArray{T, 2}, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; cache = nothing, eps = T(1e-5), alpha = T(1), - beta = T(0), training = true) where T<:Union{Float32, Float64} = - ∇batchnorm(g, b, reshape(x, 1, 1, size(x, 1), size(x, 2)), dy, - running_mean, running_var, momentum, cache = cache, eps = eps, alpha = alpha, beta = beta, - training = training) + beta = T(0), training = true) where T<:Union{Float32, Float64} + dg, db, dx = ∇batchnorm(g, b, reshape(x, 1, 1, size(x, 1), size(x, 2)), reshape(dy, 1, 1, size(dy, 1), + size(dy, 2)), running_mean, running_var, momentum, cache = cache, eps = eps, + alpha = alpha, beta = beta, training = training) + (dg, db, dropdims(dx, dims = (1, 2))) +end function ∇batchnorm(g::CuArray{T}, b::CuArray{T}, x::CuArray{T}, dy::CuArray{T}, running_mean::CuArray{T}, running_var::CuArray{T}, momentum; From 4df9e1051628428b9fda3ae85be48046312c9682 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Sat, 10 Nov 2018 11:52:23 +0530 Subject: [PATCH 53/57] Add test for 2D inputs --- test/cuda/cudnn.jl | 52 +++++++++++++++++++++++++++++++++------------- 1 file changed, 38 insertions(+), 14 deletions(-) diff --git a/test/cuda/cudnn.jl b/test/cuda/cudnn.jl index 5a51a0b6..c4cd60c7 100644 --- a/test/cuda/cudnn.jl +++ b/test/cuda/cudnn.jl @@ -2,23 +2,47 @@ using Flux, Flux.Tracker, CuArrays, Test using Flux.Tracker: TrackedArray, data @testset "CUDNN BatchNorm" begin - x = TrackedArray(rand(10, 10, 3, 1)) - m = BatchNorm(3) - cx = gpu(x) - cm = gpu(m) + @testset "4D Input" begin + x = TrackedArray(rand(10, 10, 3, 1)) + m = BatchNorm(3) + cx = gpu(x) + cm = gpu(m) - y = m(x) - cy = cm(cx) + y = m(x) + cy = cm(cx) - @test cy isa TrackedArray{Float32,4,CuArray{Float32,4}} + @test cy isa TrackedArray{Float32,4,CuArray{Float32,4}} - @test cpu(data(cy)) ≈ data(y) + @test cpu(data(cy)) ≈ data(y) - g = rand(size(y)...) - Flux.back!(y, g) - Flux.back!(cy, gpu(g)) + g = rand(size(y)...) + Flux.back!(y, g) + Flux.back!(cy, gpu(g)) - @test m.γ.grad ≈ cpu(cm.γ.grad) - @test m.β.grad ≈ cpu(cm.β.grad) - @test x.grad ≈ cpu(x.grad) + @test m.γ.grad ≈ cpu(cm.γ.grad) + @test m.β.grad ≈ cpu(cm.β.grad) + @test x.grad ≈ cpu(x.grad) + end + + @testset "2D Input" begin + x = TrackedArray(rand(3, 1)) + m = BatchNorm(3) + cx = gpu(x) + cm = gpu(m) + + y = m(x) + cy = cm(cx) + + @test cy isa TrackedArray{Float32,2,CuArray{Float32,2}} + + @test cpu(data(cy)) ≈ data(y) + + g = rand(size(y)...) + Flux.back!(y, g) + Flux.back!(cy, gpu(g)) + + @test m.γ.grad ≈ cpu(cm.γ.grad) + @test m.β.grad ≈ cpu(cm.β.grad) + @test x.grad ≈ cpu(x.grad) + end end From 9f12e8ec68e18af585183d508e2234b4cdca9924 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Sat, 10 Nov 2018 14:00:25 +0530 Subject: [PATCH 54/57] Make the test more reliable --- test/cuda/cudnn.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/cuda/cudnn.jl b/test/cuda/cudnn.jl index c4cd60c7..9a154961 100644 --- a/test/cuda/cudnn.jl +++ b/test/cuda/cudnn.jl @@ -3,7 +3,7 @@ using Flux.Tracker: TrackedArray, data @testset "CUDNN BatchNorm" begin @testset "4D Input" begin - x = TrackedArray(rand(10, 10, 3, 1)) + x = TrackedArray(Float64.(collect(reshape(1:12, 2, 2, 3, 1)))) m = BatchNorm(3) cx = gpu(x) cm = gpu(m) @@ -23,9 +23,9 @@ using Flux.Tracker: TrackedArray, data @test m.β.grad ≈ cpu(cm.β.grad) @test x.grad ≈ cpu(x.grad) end - + @testset "2D Input" begin - x = TrackedArray(rand(3, 1)) + x = TrackedArray(Float64.(collect(reshape(1:12, 3, 4)))) m = BatchNorm(3) cx = gpu(x) cm = gpu(m) From 1d5b3429eaa3c6ac4fce5dd74d9bc3b6b57fc091 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Tue, 20 Nov 2018 09:26:48 +0530 Subject: [PATCH 55/57] Missing brackets --- src/cuda/cuda.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cuda/cuda.jl b/src/cuda/cuda.jl index 9ffcef02..82982180 100644 --- a/src/cuda/cuda.jl +++ b/src/cuda/cuda.jl @@ -13,7 +13,7 @@ if CuArrays.libcudnn != nothing handle() = CuArrays.CUDNN.handle() end include("curnn.jl") - include("cudnn.jl" + include("cudnn.jl") else @warn("CUDNN is not installed, some functionality will not be available.") end From 7992de5cba171cc3b7b0b5f36bc16965d2435af6 Mon Sep 17 00:00:00 2001 From: Mike J Innes Date: Tue, 27 Nov 2018 18:31:05 -0500 Subject: [PATCH 56/57] update requires syntax --- src/cuda/cudnn.jl | 2 +- src/cuda/curnn.jl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cuda/cudnn.jl b/src/cuda/cudnn.jl index 57de9f01..8bd8135e 100644 --- a/src/cuda/cudnn.jl +++ b/src/cuda/cudnn.jl @@ -1,4 +1,4 @@ -using CuArrays.CUDNN: @check, libcudnn, cudnnStatus_t, cudnnTensorDescriptor_t, +using .CuArrays.CUDNN: @check, libcudnn, cudnnStatus_t, cudnnTensorDescriptor_t, cudnnBatchNormMode_t, cudnnHandle_t, cudnnDataType, TensorDesc, FilterDesc import ..Flux: data using LinearAlgebra diff --git a/src/cuda/curnn.jl b/src/cuda/curnn.jl index e76437d7..a47947e0 100644 --- a/src/cuda/curnn.jl +++ b/src/cuda/curnn.jl @@ -1,4 +1,4 @@ -using CuArrays.CUDNN: @check, libcudnn, cudnnStatus_t, cudnnTensorDescriptor_t, +using .CuArrays.CUDNN: @check, libcudnn, cudnnStatus_t, cudnnTensorDescriptor_t, cudnnBatchNormMode_t, cudnnHandle_t, cudnnDataType, TensorDesc, FilterDesc using LinearAlgebra From 1c36504768c3f7bde4ebb89dead1fe6ba4ade887 Mon Sep 17 00:00:00 2001 From: Mike J Innes Date: Tue, 27 Nov 2018 18:44:07 -0500 Subject: [PATCH 57/57] fixup --- src/cuda/curnn.jl | 3 +++ test/cuda/cuda.jl | 3 +-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/cuda/curnn.jl b/src/cuda/curnn.jl index a47947e0..210ddd7c 100644 --- a/src/cuda/curnn.jl +++ b/src/cuda/curnn.jl @@ -231,6 +231,9 @@ function LinearAlgebra.copy_transpose!(dst::CuArray, src::CuArray) dst[I...] = src[reverse(I)...] return end + blk, thr = cudims(dst) + @cuda blocks=blk threads=thr kernel(dst, src) + return dst end CuParam{T,N} = Union{CuArray{T,N},TrackedArray{T,N,CuArray{T,N}}} diff --git a/test/cuda/cuda.jl b/test/cuda/cuda.jl index aa422dfd..e266a81b 100644 --- a/test/cuda/cuda.jl +++ b/test/cuda/cuda.jl @@ -37,8 +37,7 @@ Flux.back!(sum(l)) end if CuArrays.libcudnn != nothing - @info "Testing Flux/CUDNN BatchNorm" + @info "Testing Flux/CUDNN" include("cudnn.jl") - @info "Testing Flux/CUDNN RNN" include("curnn.jl") end