Check for CUDA availability at run time.

This commit is contained in:
Tim Besard 2019-11-02 11:18:06 +01:00
parent 7104fd9332
commit 39ab740fb7
6 changed files with 26 additions and 52 deletions

View File

@ -5,7 +5,7 @@ version = "0.9.0"
[deps]
AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
CUDAapi = "3895d2a7-ec45-59b8-82bb-cfc6a382f9b3"
CUDAdrv = "c5f51814-7f29-56b8-a69c-e4d8f6be1fde"
CodecZlib = "944b1d66-785c-5afd-91f1-9de20f533193"
Colors = "5ae59095-9a9b-59fe-a467-6f913c188581"
CuArrays = "3a865a2d-5b23-5a0f-bc46-62713ec82fae"

View File

@ -21,19 +21,9 @@ export SGD, Descent, ADAM, Momentum, Nesterov, RMSProp,
ADAMW, RADAM, InvDecay, ExpDecay, WeightDecay
allow_cuda() = parse(Bool, get(ENV, "FLUX_USE_CUDA", "true"))
const consider_cuda = allow_cuda()
using CUDAapi
const use_cuda = consider_cuda && has_cuda()
if use_cuda
try
using CuArrays
catch
@error "CUDA is installed, but CuArrays.jl fails to load. Please fix the issue, or load Flux with FLUX_USE_CUDA=false."
rethrow()
end
end
ENV["CUDA_INIT_SILENT"] = true
using CUDAdrv, CuArrays
const use_cuda = Ref(false)
include("utils.jl")
include("onehot.jl")
@ -49,21 +39,19 @@ include("data/Data.jl")
include("deprecations.jl")
if use_cuda
include("cuda/cuda.jl")
end
include("cuda/cuda.jl")
function __init__()
# check if the GPU usage conditions that are baked in the precompilation image
# match the current situation, and force a recompilation if not.
if (allow_cuda() != consider_cuda) || (consider_cuda && has_cuda() != use_cuda)
cachefile = if VERSION >= v"1.3-"
Base.compilecache_path(Base.PkgId(Flux))
else
abspath(DEPOT_PATH[1], Base.cache_file_entry(Base.PkgId(Flux)))
end
rm(cachefile)
error("Your set-up changed, and Flux.jl needs to be reconfigured. Please load the package again.")
if !CUDAdrv.functional()
@warn "CUDA available, but CUDAdrv.jl failed to load"
elseif length(devices()) == 0
@warn "CUDA available, but no GPU detected"
elseif !CuArrays.functional()
@warn "CUDA GPU available, but CuArrays.jl failed to load"
elseif !CuArrays.has_cudnn()
@warn "CUDA GPU available, but CuArrays.jl did not find libcudnn"
else
use_cuda[] = true
end
end

View File

@ -2,12 +2,8 @@ module CUDA
using ..CuArrays
if CuArrays.libcudnn !== nothing # TODO: use CuArrays.has_cudnn()
using CuArrays: CUDNN
include("curnn.jl")
include("cudnn.jl")
else
@warn "CUDNN is not installed, some functionality will not be available."
end
using CuArrays: CUDNN
include("curnn.jl")
include("cudnn.jl")
end

View File

@ -73,13 +73,7 @@ end
cpu(m) = fmap(x -> adapt(Array, x), m)
const gpu_adaptor = if use_cuda
CuArrays.cu
else
identity
end
gpu(x) = fmap(gpu_adaptor, x)
gpu(x) = use_cuda[] ? fmap(CuArrays.cu, x) : x
# Precision

View File

@ -37,12 +37,10 @@ import Adapt: adapt, adapt_structure
adapt_structure(T, xs::OneHotMatrix) = OneHotMatrix(xs.height, adapt(T, xs.data))
if use_cuda
import .CuArrays: CuArray, cudaconvert
import Base.Broadcast: BroadcastStyle, ArrayStyle
BroadcastStyle(::Type{<:OneHotMatrix{<:CuArray}}) = ArrayStyle{CuArray}()
cudaconvert(x::OneHotMatrix{<:CuArray}) = OneHotMatrix(x.height, cudaconvert(x.data))
end
import .CuArrays: CuArray, cudaconvert
import Base.Broadcast: BroadcastStyle, ArrayStyle
BroadcastStyle(::Type{<:OneHotMatrix{<:CuArray}}) = ArrayStyle{CuArray}()
cudaconvert(x::OneHotMatrix{<:CuArray}) = OneHotMatrix(x.height, cudaconvert(x.data))
"""
onehot(l, labels[, unk])

View File

@ -53,8 +53,6 @@ end
@test y[3,:] isa CuArray
end
if CuArrays.libcudnn != nothing
@info "Testing Flux/CUDNN"
include("cudnn.jl")
include("curnn.jl")
end
@info "Testing Flux/CUDNN"
include("cudnn.jl")
include("curnn.jl")