From 263a3248f60706d5cfdfb718a8680f15f46bac42 Mon Sep 17 00:00:00 2001 From: David Ellison Date: Mon, 11 Mar 2019 19:52:05 -0700 Subject: [PATCH 1/4] add Flux.stop to training docs --- docs/src/training/training.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/src/training/training.md b/docs/src/training/training.md index ae483783..679bbd0b 100644 --- a/docs/src/training/training.md +++ b/docs/src/training/training.md @@ -93,3 +93,11 @@ evalcb() = @show(loss(test_x, test_y)) Flux.train!(objective, ps, data, opt, cb = throttle(evalcb, 5)) ``` + +Calling `Flux.stop()` in a callback will exit the training loop early. + +```julia +cb = function () + accuracy() > 0.9 && Flux.stop() +end +``` From f0cc4a328d8e8e1e79485505b72440f759f75d45 Mon Sep 17 00:00:00 2001 From: Lyndon White Date: Mon, 25 Mar 2019 16:02:46 +0000 Subject: [PATCH 2/4] make Maxout trainable --- src/layers/basic.jl | 2 ++ test/layers/basic.jl | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/src/layers/basic.jl b/src/layers/basic.jl index b39a0de2..005915bb 100644 --- a/src/layers/basic.jl +++ b/src/layers/basic.jl @@ -167,6 +167,8 @@ function Maxout(f, n_alts) return Maxout(over) end +@treelike Maxout + function (mo::Maxout)(input::AbstractArray) mapreduce(f -> f(input), (acc, out) -> max.(acc, out), mo.over) end diff --git a/test/layers/basic.jl b/test/layers/basic.jl index 3a3b1695..3c5229f4 100644 --- a/test/layers/basic.jl +++ b/test/layers/basic.jl @@ -53,5 +53,11 @@ using Test, Random target = [0.5, 0.7].*input @test mo(input) == target end + + @testset "params" begin + mo = Maxout(()->Dense(32, 64), 4) + ps = params(mo) + @test length(ps) == 8 #4 alts, each with weight and bias + end end end From cd3926755a4b2e91eb72acbf487a7ef66f219929 Mon Sep 17 00:00:00 2001 From: Lyndon White Date: Mon, 25 Mar 2019 16:13:11 +0000 Subject: [PATCH 3/4] add Maxout news item --- NEWS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/NEWS.md b/NEWS.md index 4cf755e7..681e701f 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,6 +1,7 @@ # v0.8.0 * New [ConvTranspose layer](https://github.com/FluxML/Flux.jl/pull/311). +* New [Maxout layer](https://github.com/FluxML/Flux.jl/pull/647) * Datasets are now [hash verified on download](https://github.com/FluxML/Flux.jl/pull/585) to avoid corruption. * We now [zero the initial state for RNNs](https://github.com/FluxML/Flux.jl/pull/590/). * [Normalisation can now work on arbitrary `dims`.](https://github.com/FluxML/Flux.jl/pull/592) From b5a6207350fc0be7526c741316050bb30486af0b Mon Sep 17 00:00:00 2001 From: Dhairya Gandhi Date: Tue, 26 Mar 2019 18:49:23 +0530 Subject: [PATCH 4/4] add initial GPU CI conf --- .gitlab-ci.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 .gitlab-ci.yml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 00000000..bf16f8ed --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,10 @@ +variables: + CI_IMAGE_TAG: 'cuda' + +include: + - 'https://raw.githubusercontent.com/JuliaGPU/gitlab-ci/master/templates/v1/common.yml' + - 'https://raw.githubusercontent.com/JuliaGPU/gitlab-ci/master/templates/v1/test_v1.0.yml' + - 'https://raw.githubusercontent.com/JuliaGPU/gitlab-ci/master/templates/v1/test_dev.yml' + +test:dev: + allow_failure: true