Flux.jl/test/cuda/cudnn.jl

44 lines
1.1 KiB
Julia
Raw Normal View History

2019-03-08 14:49:28 +00:00
using Flux, CuArrays, Test
2019-06-13 13:14:46 +00:00
using Zygote
2019-06-12 16:04:42 +00:00
trainmode(f, x...) = forward(f, x...)[1]
2019-06-13 13:14:46 +00:00
@testset "CUDNN BatchNorm" begin
@testset "4D Input" begin
x = Float64.(collect(reshape(1:12, 2, 2, 3, 1)))
m = BatchNorm(3)
cx = gpu(x)
cm = gpu(m)
y = trainmode(m, x)
cy = trainmode(cm, cx)
@test cpu(data(cy)) data(y)
2019-07-12 15:17:43 +00:00
g = gradient(()->sum(m(x)), params(m))
2019-07-12 15:33:57 +00:00
cg = gradient(()->sum(cm(cx)), params(cm))
2019-06-13 13:14:46 +00:00
2019-07-12 15:17:43 +00:00
@test g.grads[m.γ] cpu(cg.grads[cm.γ])
@test g.grads[m.β] cpu(cg.grads[cm.β])
2019-06-13 13:14:46 +00:00
end
@testset "2D Input" begin
x = Float64.(collect(reshape(1:12, 3, 4)))
m = BatchNorm(3)
cx = gpu(x)
cm = gpu(m)
y = trainmode(m, x)
cy = trainmode(cm, cx)
@test cy isa CuArray{Float32,2}
@test cpu(data(cy)) data(y)
2019-07-12 15:17:43 +00:00
g = gradient(()->sum(m(x)), params(m))
2019-07-12 15:33:57 +00:00
cg = gradient(()->sum(cm(cx)), params(cm))
2019-06-13 13:14:46 +00:00
2019-07-12 15:17:43 +00:00
@test g.grads[m.γ] cpu(cg.grads[cm.γ])
@test g.grads[m.β] cpu(cg.grads[cm.β])
2019-06-13 13:14:46 +00:00
end
end