Merge remote-tracking branch 'upstream/master' into samepad

This commit is contained in:
DrChainsaw 2019-11-08 18:49:47 +01:00
commit 453ecd1f24
16 changed files with 431 additions and 182 deletions

View File

@ -1,51 +1,41 @@
before_script:
- export CI_DISABLE_CURNN_TEST=true
variables:
CI_IMAGE_TAG: 'cuda'
include:
- 'https://raw.githubusercontent.com/JuliaGPU/gitlab-ci/master/templates/v4/common.yml'
- 'https://raw.githubusercontent.com/JuliaGPU/gitlab-ci/master/templates/v6.yml'
.flux:
extends: .test
script:
- julia -e 'using InteractiveUtils;
versioninfo()'
- mkdir $JULIA_DEPOT_PATH # Pkg3.jl#325
- julia --project -e 'using Pkg;
Pkg.instantiate();
Pkg.build();
Pkg.test(; coverage=true);'
image: nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04
test:v1.0:
extends: .flux
variables:
CI_VERSION_TAG: 'v1.0'
test:v1.1:
extends: .flux
variables:
CI_VERSION_TAG: 'v1.1'
julia:1.0:
extends:
- .julia:1.0
- .test
tags:
- nvidia
test:v1.2:
extends: .flux
variables:
CI_VERSION_TAG: 'v1.2'
julia:1.1:
extends:
- .julia:1.1
- .test
tags:
- nvidia
test:v1.3:
extends: .flux
variables:
CI_VERSION_TAG: 'v1.3'
julia:1.2:
extends:
- .julia:1.2
- .test
tags:
- nvidia
test:v1.0:
extends: .flux
variables:
CI_VERSION_TAG: 'v1.0'
test:dev:
extends: .flux
variables:
CI_VERSION_TAG: 'dev'
julia:1.3:
extends:
- .julia:1.3
- .test
tags:
- nvidia
julia:nightly:
extends:
- .julia:nightly
- .test
tags:
- nvidia
allow_failure: true

View File

@ -6,7 +6,8 @@ os:
# - osx
julia:
- 1.1
- 1.2
- 1.3
- nightly
matrix:
@ -16,7 +17,7 @@ matrix:
jobs:
include:
- stage: "Documentation"
julia: 1.0
julia: 1.2
os: linux
script:
- julia --project=docs/ -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd()));

View File

@ -28,10 +28,10 @@ uuid = "9e28174c-4ba2-5203-b857-d8d62c4213ee"
version = "0.8.10"
[[BinaryProvider]]
deps = ["Libdl", "Logging", "SHA"]
git-tree-sha1 = "c7361ce8a2129f20b0e05a89f7070820cfed6648"
deps = ["Libdl", "SHA"]
git-tree-sha1 = "5b08ed6036d9d3f0ee6369410b830f8873d4024c"
uuid = "b99e7846-7c00-51b0-8f62-c81ae34c0232"
version = "0.5.6"
version = "0.5.8"
[[CEnum]]
git-tree-sha1 = "62847acab40e6855a9b5905ccb99c2b5cf6b3ebb"
@ -40,9 +40,9 @@ version = "0.2.0"
[[CSTParser]]
deps = ["Tokenize"]
git-tree-sha1 = "c69698c3d4a7255bc1b4bc2afc09f59db910243b"
git-tree-sha1 = "99dda94f5af21a4565dc2b97edf6a95485f116c3"
uuid = "00ebfdb7-1f24-5e51-bd34-a7502290713f"
version = "0.6.2"
version = "1.0.0"
[[CUDAapi]]
deps = ["Libdl", "Logging"]
@ -51,16 +51,16 @@ uuid = "3895d2a7-ec45-59b8-82bb-cfc6a382f9b3"
version = "1.2.0"
[[CUDAdrv]]
deps = ["CUDAapi", "Libdl", "Printf"]
git-tree-sha1 = "9ce99b5732c70e06ed97c042187baed876fb1698"
deps = ["CEnum", "Printf"]
git-tree-sha1 = "96eabc95ebb83e361311330ffb574a3e2df73251"
uuid = "c5f51814-7f29-56b8-a69c-e4d8f6be1fde"
version = "3.1.0"
version = "4.0.2"
[[CUDAnative]]
deps = ["Adapt", "CUDAapi", "CUDAdrv", "DataStructures", "InteractiveUtils", "LLVM", "Libdl", "Logging", "Printf", "TimerOutputs"]
git-tree-sha1 = "52ae1ce10ebfa686e227655c47b19add89308623"
deps = ["Adapt", "CEnum", "CUDAapi", "CUDAdrv", "DataStructures", "InteractiveUtils", "LLVM", "Libdl", "Printf", "TimerOutputs"]
git-tree-sha1 = "dd642afe5fd6633663a8c3d42f3b7638f2210b79"
uuid = "be33ccc6-a3ff-5ff2-a52e-74243cff1e17"
version = "2.3.1"
version = "2.5.3"
[[CodecZlib]]
deps = ["BinaryProvider", "Libdl", "TranscodingStreams"]
@ -88,9 +88,9 @@ version = "0.2.0"
[[Compat]]
deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"]
git-tree-sha1 = "84aa74986c5b9b898b0d1acaf3258741ee64754f"
git-tree-sha1 = "ed2c4abadf84c53d9e58510b5fc48912c2336fbb"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "2.1.0"
version = "2.2.0"
[[Conda]]
deps = ["JSON", "VersionParsing"]
@ -105,23 +105,21 @@ uuid = "a8cc5b0e-0ffa-5ad4-8c14-923d3ee1735f"
version = "4.0.0"
[[CuArrays]]
deps = ["AbstractFFTs", "Adapt", "CEnum", "CUDAapi", "CUDAdrv", "CUDAnative", "DataStructures", "GPUArrays", "LinearAlgebra", "MacroTools", "NNlib", "Printf", "Random", "Requires", "SparseArrays", "TimerOutputs"]
git-tree-sha1 = "45683305171430978c17f496969dc9b6d3094a51"
repo-rev = "master"
repo-url = "https://github.com/JuliaGPU/CuArrays.jl.git"
deps = ["AbstractFFTs", "Adapt", "CEnum", "CUDAapi", "CUDAdrv", "CUDAnative", "DataStructures", "GPUArrays", "Libdl", "LinearAlgebra", "MacroTools", "NNlib", "Printf", "Random", "Requires", "SparseArrays", "TimerOutputs"]
git-tree-sha1 = "bc94d6cb335d418088f12641751aab63ff56509d"
uuid = "3a865a2d-5b23-5a0f-bc46-62713ec82fae"
version = "1.3.0"
version = "1.4.2"
[[DataAPI]]
git-tree-sha1 = "8903f0219d3472543fc4b2f5ebaf675a07f817c0"
git-tree-sha1 = "674b67f344687a88310213ddfa8a2b3c76cc4252"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.0.1"
version = "1.1.0"
[[DataStructures]]
deps = ["InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "0809951a1774dc724da22d26e4289bbaab77809a"
git-tree-sha1 = "1fe8fad5fc84686dcbc674aa255bc867a64f8132"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.17.0"
version = "0.17.5"
[[Dates]]
deps = ["Printf"]
@ -155,9 +153,9 @@ version = "1.0.1"
[[FillArrays]]
deps = ["LinearAlgebra", "Random", "SparseArrays"]
git-tree-sha1 = "8fba6ddaf66b45dec830233cea0aae43eb1261ad"
git-tree-sha1 = "6827a8f73ff12707f209c920d204238a16892b55"
uuid = "1a297f60-69ca-5386-bcde-b61e274b549b"
version = "0.6.4"
version = "0.8.0"
[[FixedPointNumbers]]
git-tree-sha1 = "d14a6fa5890ea3a7e5dcab6811114f132fec2b4b"
@ -165,22 +163,22 @@ uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93"
version = "0.6.1"
[[ForwardDiff]]
deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "InteractiveUtils", "LinearAlgebra", "NaNMath", "Random", "SparseArrays", "SpecialFunctions", "StaticArrays", "Test"]
git-tree-sha1 = "4c4d727f1b7e0092134fabfab6396b8945c1ea5b"
deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "NaNMath", "Random", "SpecialFunctions", "StaticArrays"]
git-tree-sha1 = "adf88d6da1f0294058f38295becf8807986bb7d0"
uuid = "f6369f11-7733-5829-9624-2563aa707210"
version = "0.10.3"
version = "0.10.5"
[[GPUArrays]]
deps = ["Adapt", "FFTW", "FillArrays", "LinearAlgebra", "Printf", "Random", "Serialization", "StaticArrays", "Test"]
git-tree-sha1 = "77e27264276fe97a7e7fb928bf8999a145abc018"
deps = ["AbstractFFTs", "Adapt", "LinearAlgebra", "Printf", "Random", "Serialization"]
git-tree-sha1 = "a0a3b927b1a06e63fb8b91950cc7df340b7d912c"
uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7"
version = "1.0.3"
version = "2.0.0"
[[IRTools]]
deps = ["InteractiveUtils", "MacroTools", "Test"]
git-tree-sha1 = "e23faa71b8f54c3fdc99b230b9c2906cafdddca5"
git-tree-sha1 = "72421971e60917b8cd7737f9577c4f0f87eab306"
uuid = "7869d1d1-7146-5819-86e3-90919afe41df"
version = "0.2.3"
version = "0.3.0"
[[InteractiveUtils]]
deps = ["Markdown"]
@ -200,9 +198,9 @@ version = "0.7.2"
[[LLVM]]
deps = ["CEnum", "Libdl", "Printf", "Unicode"]
git-tree-sha1 = "4a05f742837779a00bd8c9a18da6817367c4245d"
git-tree-sha1 = "74fe444b8b6d1ac01d639b2f9eaf395bcc2e24fc"
uuid = "929cbde3-209d-540e-8aea-75f648917ca0"
version = "1.3.0"
version = "1.3.2"
[[LibGit2]]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
@ -234,9 +232,10 @@ uuid = "e89f7d12-3494-54d1-8411-f7d8b9ae1f27"
version = "0.5.0"
[[Missings]]
git-tree-sha1 = "29858ce6c8ae629cf2d733bffa329619a1c843d0"
deps = ["DataAPI"]
git-tree-sha1 = "de0a5ce9e5289f27df672ffabef4d1e5861247d5"
uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28"
version = "0.4.2"
version = "0.4.3"
[[Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
@ -261,12 +260,12 @@ version = "1.1.0"
[[Parsers]]
deps = ["Dates", "Test"]
git-tree-sha1 = "ef0af6c8601db18c282d092ccbd2f01f3f0cd70b"
git-tree-sha1 = "c56ecb484f286639f161e712b8311f5ab77e8d32"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "0.3.7"
version = "0.3.8"
[[Pkg]]
deps = ["Dates", "LibGit2", "Markdown", "Printf", "REPL", "Random", "SHA", "UUIDs"]
deps = ["Dates", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "UUIDs"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
[[Printf]]
@ -328,9 +327,9 @@ version = "0.8.0"
[[StaticArrays]]
deps = ["LinearAlgebra", "Random", "Statistics"]
git-tree-sha1 = "db23bbf50064c582b6f2b9b043c8e7e98ea8c0c6"
git-tree-sha1 = "1e9c5d89cba8047d518f1ffef432906ef1a3e8bd"
uuid = "90137ffa-7385-5640-81b9-e52037218182"
version = "0.11.0"
version = "0.12.0"
[[Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
@ -390,16 +389,12 @@ version = "0.8.3"
[[Zygote]]
deps = ["DiffRules", "FFTW", "FillArrays", "ForwardDiff", "IRTools", "InteractiveUtils", "LinearAlgebra", "MacroTools", "NNlib", "NaNMath", "Random", "Requires", "SpecialFunctions", "Statistics", "ZygoteRules"]
git-tree-sha1 = "38241b40ebd8748bcacad5e6c7ba3ab3cc7a15c9"
repo-rev = "master"
repo-url = "https://github.com/FluxML/Zygote.jl.git"
git-tree-sha1 = "b2e42a21dc3d1ecd3cbe8c83a454ca56fbf423c4"
uuid = "e88e6eb3-aa80-5325-afca-941959d7151f"
version = "0.3.4"
version = "0.4.0"
[[ZygoteRules]]
deps = ["MacroTools"]
git-tree-sha1 = "c4c29b30b8ff3be13d4244e78be7df2a42bc54d0"
repo-rev = "master"
repo-url = "https://github.com/FluxML/ZygoteRules.jl.git"
git-tree-sha1 = "b3b4882cc9accf6731a08cc39543fbc6b669dca8"
uuid = "700de1a5-db45-46bc-99cf-38207098b444"
version = "0.2.0"

View File

@ -5,7 +5,7 @@ version = "0.9.0"
[deps]
AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
CUDAapi = "3895d2a7-ec45-59b8-82bb-cfc6a382f9b3"
CUDAdrv = "c5f51814-7f29-56b8-a69c-e4d8f6be1fde"
CodecZlib = "944b1d66-785c-5afd-91f1-9de20f533193"
Colors = "5ae59095-9a9b-59fe-a467-6f913c188581"
CuArrays = "3a865a2d-5b23-5a0f-bc46-62713ec82fae"
@ -23,13 +23,12 @@ StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
ZipFile = "a5390f91-8eb1-5f08-bee0-b1d1ffed6cea"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
ZygoteRules = "700de1a5-db45-46bc-99cf-38207098b444"
[compat]
CUDAapi = "1.1"
CuArrays = "1.2"
CUDAdrv = "4.0.1"
CuArrays = "1.4.2"
NNlib = "0.6"
Zygote = "0.3"
Zygote = "0.4"
julia = "1"
[extras]

View File

@ -38,7 +38,7 @@ model = Chain(
loss(x, y) = crossentropy(model(x), y)
Flux.train!(loss, data, ADAM(...))
Flux.train!(loss, params(model), data, ADAM(...))
```
Yet you can easily strip away the layers, and directly write the mathematics for your problem. Flux will seamlessly take gradients of any Julia code, so your model looks just like the paper.

View File

@ -113,6 +113,6 @@ You can even store optimiser state alongside the model, to resume training
exactly where you left off.
```julia
opt = ADAM(params(model))
opt = ADAM()
@save "model-$(now()).bson" model opt
```

View File

@ -58,3 +58,83 @@ AMSGrad
NADAM
ADAMW
```
## Optimiser Interface
Flux's optimsers are built around a `struct` that holds all the optimiser parameters along with a definition of how to apply the update rule associated with it. We do this via the `apply!` function which takes the optimiser as the first argument followed by the parameter and its corresponding gradient.
In this manner Flux also allows one to create custom optimisers to be used seamlessly. Let's work this with a simple example.
```julia
mutable struct Momentum
eta
rho
velocity
end
Momentum(eta::Real, rho::Real) = Momentum(eta, rho, IdDict())
```
The `Momentum` type will act as our optimiser in this case. Notice that we have added all the parameters as fields, along with the velocity which we will use as our state dictionary. Each parameter in our models will get an entry in there. We can now define the rule applied when this optimiser is invoked.
```julia
function apply!(o::Momentum, x, Δ)
η, ρ = o.eta, o.rho
v = get!(o.velocity, x, zero(x))::typeof(x)
@. v = ρ * v - η * Δ
@. Δ = -v
end
```
This is the basic definition of a Momentum update rule given by:
```math
v = ρ * v - η * Δ
w = w - v
```
The `apply!` defines the update rules for an optimiser `opt`, given the parameters and gradients. It returns the updated gradients. Here, every parameter `x` is retrieved from the running state `v` and subsequently updates the state of the optimiser.
Flux internally calls on this function via the `update!` function. It shares the API with `apply!` but ensures that multiple parameters are handled gracefully.
## Composing Optimisers
Flux defines a special kind of optimiser called simply as `Optimiser` which takes in a arbitrary optimisers as input. Its behaviour is similar to the usual optimisers, but differs in that it acts by calling the optimisers listed in it sequentially. Each optimiser produces a modified gradient
that will be fed into the next, and the resultant update will be applied to the parameter as usual. A classic use case is where adding decays is desirable. Flux defines some basic decays including `ExpDecay`, `InvDecay` etc.
```julia
opt = Optimiser(ExpDecay(0.001, 0.1, 1000, 1e-4), Descent())
```
Here we apply exponential decay to the `Descent` optimser. The defaults of `ExpDecay` say that its learning rate will be decayed every 1000 steps.
It is then applied like any optimser.
```julia
w = randn(10, 10)
w1 = randn(10,10)
ps = Params([w, w1])
loss(x) = Flux.mse(w * x, w1 * x)
loss(rand(10)) # around 9
for t = 1:10^5
θ = Params([w, w1])
θ̄ = gradient(() -> loss(rand(10)), θ)
Flux.Optimise.update!(opt, θ, θ̄)
end
loss(rand(10)) # around 0.9
```
In this manner it is possible to compose optimisers for some added flexibility.
## Decays
Similar to optimisers, Flux also defines some simple decays that can be used in conjunction with other optimisers, or standalone.
```@docs
ExpDecay
InvDecay
WeightDecay
```

View File

@ -21,19 +21,9 @@ export SGD, Descent, ADAM, Momentum, Nesterov, RMSProp,
ADAMW, RADAM, InvDecay, ExpDecay, WeightDecay
allow_cuda() = parse(Bool, get(ENV, "FLUX_USE_CUDA", "true"))
const consider_cuda = allow_cuda()
using CUDAapi
const use_cuda = consider_cuda && has_cuda()
if use_cuda
try
using CuArrays
catch
@error "CUDA is installed, but CuArrays.jl fails to load. Please fix the issue, or load Flux with FLUX_USE_CUDA=false."
rethrow()
end
end
ENV["CUDA_INIT_SILENT"] = true
using CUDAdrv, CuArrays
const use_cuda = Ref(false)
include("utils.jl")
include("onehot.jl")
@ -49,21 +39,23 @@ include("data/Data.jl")
include("deprecations.jl")
if use_cuda
include("cuda/cuda.jl")
end
function __init__()
# check if the GPU usage conditions that are baked in the precompilation image
# match the current situation, and force a recompilation if not.
if (allow_cuda() != consider_cuda) || (consider_cuda && has_cuda() != use_cuda)
cachefile = if VERSION >= v"1.3-"
Base.compilecache_path(Base.PkgId(Flux))
else
abspath(DEPOT_PATH[1], Base.cache_file_entry(Base.PkgId(Flux)))
end
rm(cachefile)
error("Your set-up changed, and Flux.jl needs to be reconfigured. Please load the package again.")
if !CUDAdrv.functional()
@warn "CUDA available, but CUDAdrv.jl failed to load"
elseif length(devices()) == 0
@warn "CUDA available, but no GPU detected"
elseif !CuArrays.functional()
@warn "CUDA GPU available, but CuArrays.jl failed to load"
else
use_cuda[] = true
# FIXME: this functionality should be conditional at run time by checking `use_cuda`
# (or even better, get moved to CuArrays.jl as much as possible)
if CuArrays.has_cudnn()
include(joinpath(@__DIR__, "cuda/cuda.jl"))
else
@warn "CUDA GPU available, but CuArrays.jl did not find libcudnn. Some functionality will not be available."
end
end
end

View File

@ -2,12 +2,8 @@ module CUDA
using ..CuArrays
if CuArrays.libcudnn !== nothing # TODO: use CuArrays.has_cudnn()
using CuArrays: CUDNN
include("curnn.jl")
include("cudnn.jl")
else
@warn "CUDNN is not installed, some functionality will not be available."
end
using CuArrays: CUDNN
include("curnn.jl")
include("cudnn.jl")
end

View File

@ -73,13 +73,7 @@ end
cpu(m) = fmap(x -> adapt(Array, x), m)
const gpu_adaptor = if use_cuda
CuArrays.cu
else
identity
end
gpu(x) = fmap(gpu_adaptor, x)
gpu(x) = use_cuda[] ? fmap(CuArrays.cu, x) : x
# Precision

View File

@ -1,5 +1,5 @@
gate(h, n) = (1:h) .+ h*(n-1)
gate(x::AbstractVector, h, n) = x[gate(h,n)]
gate(x::AbstractVector, h, n) = @view x[gate(h,n)]
gate(x::AbstractMatrix, h, n) = x[gate(h,n),:]
# Stateful recurrence

View File

@ -4,10 +4,20 @@ using NNlib: logsoftmax, logσ
mse(, y) = sum(( .- y).^2) * 1 // length(y)
function crossentropy(::AbstractVecOrMat, y::AbstractVecOrMat; weight = 1)
-sum(y .* log.() .* weight) * 1 // size(y, 2)
function _crossentropy(::AbstractVecOrMat, y::AbstractVecOrMat, weight::Nothing)
return -sum(y .* log.()) * 1 // size(y, 2)
end
function _crossentropy(::AbstractVecOrMat, y::AbstractVecOrMat, weight::Number)
return -sum(y .* log.()) .* weight * 1 // size(y, 2)
end
function _crossentropy(::AbstractVecOrMat, y::AbstractVecOrMat, weight::AbstractVector)
return -sum(y .* log.() .* weight) * 1 // size(y, 2)
end
crossentropy(::AbstractVecOrMat, y::AbstractVecOrMat; weight=nothing) = _crossentropy(, y, weight)
function logitcrossentropy(logŷ::AbstractVecOrMat, y::AbstractVecOrMat; weight = 1)
return -sum(y .* logsoftmax(logŷ) .* weight) * 1 // size(y, 2)
end
@ -42,7 +52,25 @@ logitbinarycrossentropy(logŷ, y) = (1 - y)*logŷ - logσ(logŷ)
"""
normalise(x::AbstractArray; dims=1)
Normalises x to mean 0 and standard deviation 1, across the dimensions given by dims. Defaults to normalising over columns.
Normalises `x` to mean 0 and standard deviation 1, across the dimensions given by `dims`. Defaults to normalising over columns.
julia> a = reshape(collect(1:9), 3, 3)
3×3 Array{Int64,2}:
1 4 7
2 5 8
3 6 9
julia> normalise(a)
3×3 Array{Float64,2}:
-1.22474 -1.22474 -1.22474
0.0 0.0 0.0
1.22474 1.22474 1.22474
julia> normalise(a, dims=2)
3×3 Array{Float64,2}:
-1.22474 0.0 1.22474
-1.22474 0.0 1.22474
-1.22474 0.0 1.22474
"""
function normalise(x::AbstractArray; dims=1)
μ′ = mean(x, dims = dims)

View File

@ -37,12 +37,10 @@ import Adapt: adapt, adapt_structure
adapt_structure(T, xs::OneHotMatrix) = OneHotMatrix(xs.height, adapt(T, xs.data))
if use_cuda
import .CuArrays: CuArray, cudaconvert
import Base.Broadcast: BroadcastStyle, ArrayStyle
BroadcastStyle(::Type{<:OneHotMatrix{<:CuArray}}) = ArrayStyle{CuArray}()
cudaconvert(x::OneHotMatrix{<:CuArray}) = OneHotMatrix(x.height, cudaconvert(x.data))
end
import .CuArrays: CuArray, cudaconvert
import Base.Broadcast: BroadcastStyle, ArrayStyle
BroadcastStyle(::Type{<:OneHotMatrix{<:CuArray}}) = ArrayStyle{CuArray}()
cudaconvert(x::OneHotMatrix{<:CuArray}) = OneHotMatrix(x.height, cudaconvert(x.data))
"""
onehot(l, labels[, unk])

View File

@ -7,10 +7,28 @@ const ϵ = 1e-8
# TODO: should use weak refs
"""
Descent(η)
Descent(η)
Classic gradient descent optimiser with learning rate `η`.
For each parameter `p` and its gradient `δp`, this runs `p -= η*δp`.
For each parameter `p` and its gradient `δp`, this runs `p -= η*δp`
## Parameters
- Learning Rate (η): The amount by which the gradients are discounted before updating the weights. Defaults to `0.1`.
## Example
```julia-repl
opt = Descent() # uses default η (0.1)
opt = Descent(0.3) # use provided η
ps = params(model)
gs = gradient(ps) do
loss(x, y)
end
Flux.Optimise.update!(opt, ps, gs)
```
"""
mutable struct Descent
eta::Float64
@ -23,9 +41,20 @@ function apply!(o::Descent, x, Δ)
end
"""
Momentum(η = 0.01; ρ = 0.9)
Momentum(η, ρ)
Gradient descent with learning rate `η` and momentum `ρ`.
## Parameters
- Learning Rate (`η`): Amount by which gradients are discounted before updating the weights. Defaults to `0.01`.
- Momentum (`ρ`): Parameter that accelerates descent in the relevant direction and dampens oscillations. Defaults to `0.9`.
## Examples
```julia
opt = Momentum() # uses defaults of η = 0.01 and ρ = 0.9
opt = Momentum(0.01, 0.99)
```
"""
mutable struct Momentum
eta::Float64
@ -43,9 +72,20 @@ function apply!(o::Momentum, x, Δ)
end
"""
Nesterov(eta, ρ = 0.9)
Nesterov(η, ρ)
Gradient descent with learning rate `η` and Nesterov momentum `ρ`.
## Parameters
- Learning Rate (η): Amount by which the gradients are dicsounted berfore updating the weights. Defaults to `0.001`.
- Nesterov Momentum (ρ): Paramters controlling the amount of nesterov momentum to be applied. Defaults to `0.9`.
## Examples
```julia
opt = Nesterov() # uses defaults η = 0.001 and ρ = 0.9
opt = Nesterov(0.003, 0.95)
```
"""
mutable struct Nesterov
eta::Float64
@ -64,11 +104,23 @@ function apply!(o::Nesterov, x, Δ)
end
"""
RMSProp(η = 0.001, ρ = 0.9)
RMSProp(η, ρ)
Implements the RMSProp algortihm. Often a good choice for recurrent networks. Paramters other than learning rate generally don't need tuning.
## Parameters
- Learning Rate (η): Defaults to `0.001`.
- Rho (ρ): Defaults to `0.9`.
## Examples
```julia
opt = RMSProp() # uses default η = 0.001 and ρ = 0.9
opt = RMSProp(0.002, 0.95)
```
## References
[RMSProp](https://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
optimiser. Parameters other than learning rate don't need tuning. Often a good
choice for recurrent networks.
"""
mutable struct RMSProp
eta::Float64
@ -86,8 +138,22 @@ function apply!(o::RMSProp, x, Δ)
end
"""
ADAM(η = 0.001, β = (0.9, 0.999))
ADAM(η, β::Tuple)
Implements the ADAM optimiser.
## Paramters
- Learning Rate (`η`): Defaults to `0.001`.
- Beta (`β::Tuple`): The first element refers to β1 and the second to β2. Defaults to `(0.9, 0.999)`.
## Examples
```julia
opt = ADAM() # uses the default η = 0.001 and β = (0.9, 0.999)
opt = ADAM(0.001, (0.9, 0.8))
```
## References
[ADAM](https://arxiv.org/abs/1412.6980v8) optimiser.
"""
mutable struct ADAM
@ -109,8 +175,23 @@ function apply!(o::ADAM, x, Δ)
end
"""
RADAM(η = 0.001, β = (0.9, 0.999))
RADAM(η, β::Tuple)
Implements the rectified ADAM optimizer.
## Parameters
- Learning Rate (η): Defaults to `0.001`
- Beta (β::Tuple): The first element refers to β1 and the second to β2. Defaults to `(0.9, 0.999)`.
## Examples
```julia
opt = RADAM() # uses the default η = 0.001 and β = (0.9, 0.999)
opt = RADAM(0.001, (0.9, 0.8))
```
## References
[RADAM](https://arxiv.org/pdf/1908.03265v1.pdf) optimiser (Rectified ADAM).
"""
mutable struct RADAM
@ -139,10 +220,22 @@ function apply!(o::RADAM, x, Δ)
end
"""
AdaMax(params, η = 0.001; β1 = 0.9, β2 = 0.999, ϵ = 1e-08)
AdaMax(η, β::Tuple)
[AdaMax](https://arxiv.org/abs/1412.6980v9) optimiser. Variant of ADAM based on
the -norm.
Variant of ADAM based on -norm.
## Parameters
- Learning Rate (η): Defaults to `0.001`
- Beta (β::Tuple): The first element refers to β1 and the second to β2. Defaults to `(0.9, 0.999)`.
## Examples
```julia
opt = AdaMax() # uses default η and β
opt = AdaMax(0.001, (0.9, 0.995))
```
## References
[AdaMax](https://arxiv.org/abs/1412.6980v9) optimiser.
"""
mutable struct AdaMax
eta::Float64
@ -163,8 +256,21 @@ function apply!(o::AdaMax, x, Δ)
end
"""
ADAGrad(η = 0.1; ϵ = 1e-8)
ADAGrad(η)
Implements AdaGrad. It has parameter specific learning rates based on how frequently it is updated.
## Parameters
- Learning Rate (η): Defaults to `0.1`
## Examples
```julia
opt = ADAGrad() # uses default η = 0.1
opt = ADAGrad(0.001)
```
## References
[ADAGrad](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf) optimiser.
Parameters don't need tuning.
"""
@ -183,10 +289,21 @@ function apply!(o::ADAGrad, x, Δ)
end
"""
ADADelta(ρ = 0.9, ϵ = 1e-8)
ADADelta(ρ)
[ADADelta](https://arxiv.org/abs/1212.5701) optimiser. Parameters don't need
tuning.
Version of ADAGrad that adapts learning rate based on a window of past gradient updates. Parameters don't need tuning.
## Parameters
- Rho (ρ): Factor by which gradient is decayed at each time step. Defaults to `0.9`.
## Examples
```julia
opt = ADADelta() # uses default ρ = 0.9
opt = ADADelta(0.89)
```
## References
[ADADelta](https://arxiv.org/abs/1212.5701) optimiser.
"""
mutable struct ADADelta
rho::Float64
@ -205,10 +322,22 @@ function apply!(o::ADADelta, x, Δ)
end
"""
AMSGrad(η = 0.001, β = (0.9, 0.999))
AMSGrad(η, β::Tuple)
[AMSGrad](https://openreview.net/forum?id=ryQu7f-RZ) optimiser. Parameters don't need
tuning.
Implements AMSGrad version of the ADAM optimiser. Parameters don't need tuning.
## Parameters
- Learning Rate (η): Defaults to `0.001`.
- Beta (β::Tuple): The first element refers to β1 and the second to β2. Defaults to `(0.9, 0.999)`.
## Examples
```julia
opt = AMSGrad() # uses default η and β
opt = AMSGrad(0.001, (0.89, 0.995))
```
## References
[AMSGrad](https://openreview.net/forum?id=ryQu7f-RZ) optimiser.
"""
mutable struct AMSGrad
eta::Float64
@ -228,10 +357,22 @@ function apply!(o::AMSGrad, x, Δ)
end
"""
NADAM(η = 0.001, β = (0.9, 0.999))
NADAM(η, β::Tuple)
[NADAM](http://cs229.stanford.edu/proj2015/054_report.pdf) optimiser. Parameters don't need
tuning.
Nesterov variant of ADAM. Parameters don't need tuning.
## Parameters
- Learning Rate (η): Defaults to `0.001`.
- Beta (β::Tuple): The first element refers to β1 and the second to β2. Defaults to `(0.9, 0.999)`.
## Examples
```julia
opt = NADAM() # uses default η and β
opt = NADAM(0.002, (0.89, 0.995))
```
## References
[NADAM](http://cs229.stanford.edu/proj2015/054_report.pdf) optimiser.
"""
mutable struct NADAM
eta::Float64
@ -252,9 +393,23 @@ function apply!(o::NADAM, x, Δ)
end
"""
ADAMW((η = 0.001, β = (0.9, 0.999), decay = 0)
ADAMW(η, β::Tuple, decay)
[ADAMW](https://arxiv.org/abs/1711.05101) fixing weight decay regularization in Adam.
Variant of ADAM defined by fixing weight decay regularization.
## Parameters
- Learning Rate (η): Defaults to `0.001`.
- Beta (β::Tuple): The first element refers to β1 and the second to β2. Defaults to (0.9, 0.999).
- decay: Decay applied to weights during optimisation. Defaults to 0.
## Examples
```julia
opt = ADAMW() # uses default η, β and decay
opt = ADAMW(0.001, (0.89, 0.995), 0.1)
```
## References
[ADAMW](https://arxiv.org/abs/1711.05101)
"""
ADAMW(η = 0.001, β = (0.9, 0.999), decay = 0) =
Optimiser(ADAM(η, β), WeightDecay(decay))
@ -287,9 +442,14 @@ function apply!(o::Optimiser, x, Δ)
end
"""
`InvDecay(γ)`
InvDecay(γ)
Apply inverse time decay to an optimiser
Applies inverse time decay to an optimiser
## Parameters
- gamma (γ): Defaults to `0.001`
## Example
```julia
Optimiser(InvDecay(..), Opt(..))
```
@ -310,13 +470,22 @@ function apply!(o::InvDecay, x, Δ)
end
"""
`ExpDecay(eta, decay, decay_step, clip)`
ExpDecay(eta, decay, decay_step, clip)
Schedule the learning rate `eta` by `decay` every `decay_step` till a minimum of `clip`.
Discount the learning rate `eta` by `decay` every `decay_step` till a minimum of `clip`.
## Parameters
- Learning Rate (eta): Defaults to `0.001`.
- decay: Factor by which the learning rate is discounted. Defaults to `0.1`.
- decay_step: Schedules decay operations by setting number of steps between two decay operations. Defaults to `1000`.
- clip: Minimum value of learning rate. Defaults to `1e-4`.
## Example
To apply exponential decay to an optimiser:
```julia
Optimiser(ExpDecay(..), Opt(..))
opt = Optimiser(ExpDecay(), ADAM())
```
"""
mutable struct ExpDecay
@ -340,9 +509,12 @@ function apply!(o::ExpDecay, x, Δ)
end
"""
`WeightDecay(wd)`
WeightDecay(wd)
Decay the weight parameter by `wd`
Decays the weight by `wd`
## Parameters
- weight decay (wd): 0
"""
mutable struct WeightDecay
wd::Real

View File

@ -28,6 +28,8 @@ cm = gpu(m)
x = [1,2,3]
cx = gpu(x)
@test Flux.crossentropy(x,x) Flux.crossentropy(cx,cx)
@test Flux.crossentropy(x,x, weight=1.0) Flux.crossentropy(cx,cx, weight=1.0)
@test Flux.crossentropy(x,x, weight=[1.0;2.0;3.0]) Flux.crossentropy(cx,cx, weight=cu([1.0;2.0;3.0]))
xs = rand(5, 5)
ys = Flux.onehotbatch(1:5,1:5)
@ -51,8 +53,10 @@ end
@test y[3,:] isa CuArray
end
if CuArrays.libcudnn != nothing
if CuArrays.has_cudnn()
@info "Testing Flux/CUDNN"
include("cudnn.jl")
include("curnn.jl")
else
@warn "CUDNN unavailable, not testing GPU DNN support"
end

View File

@ -19,7 +19,7 @@ include("layers/normalisation.jl")
include("layers/stateless.jl")
include("layers/conv.jl")
if isdefined(Flux, :CUDA)
if Flux.use_cuda[]
include("cuda/cuda.jl")
else
@warn "CUDA unavailable, not testing GPU support"