Merge branch 'master' of https://github.com/FluxML/Flux.jl
This commit is contained in:
commit
35431e3da9
10
.gitlab-ci.yml
Normal file
10
.gitlab-ci.yml
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
variables:
|
||||||
|
CI_IMAGE_TAG: 'cuda'
|
||||||
|
|
||||||
|
include:
|
||||||
|
- 'https://raw.githubusercontent.com/JuliaGPU/gitlab-ci/master/templates/v1/common.yml'
|
||||||
|
- 'https://raw.githubusercontent.com/JuliaGPU/gitlab-ci/master/templates/v1/test_v1.0.yml'
|
||||||
|
- 'https://raw.githubusercontent.com/JuliaGPU/gitlab-ci/master/templates/v1/test_dev.yml'
|
||||||
|
|
||||||
|
test:dev:
|
||||||
|
allow_failure: true
|
1
NEWS.md
1
NEWS.md
@ -1,6 +1,7 @@
|
|||||||
# v0.8.0
|
# v0.8.0
|
||||||
|
|
||||||
* New [ConvTranspose layer](https://github.com/FluxML/Flux.jl/pull/311).
|
* New [ConvTranspose layer](https://github.com/FluxML/Flux.jl/pull/311).
|
||||||
|
* New [Maxout layer](https://github.com/FluxML/Flux.jl/pull/647)
|
||||||
* Datasets are now [hash verified on download](https://github.com/FluxML/Flux.jl/pull/585) to avoid corruption.
|
* Datasets are now [hash verified on download](https://github.com/FluxML/Flux.jl/pull/585) to avoid corruption.
|
||||||
* We now [zero the initial state for RNNs](https://github.com/FluxML/Flux.jl/pull/590/).
|
* We now [zero the initial state for RNNs](https://github.com/FluxML/Flux.jl/pull/590/).
|
||||||
* [Normalisation can now work on arbitrary `dims`.](https://github.com/FluxML/Flux.jl/pull/592)
|
* [Normalisation can now work on arbitrary `dims`.](https://github.com/FluxML/Flux.jl/pull/592)
|
||||||
|
@ -93,3 +93,11 @@ evalcb() = @show(loss(test_x, test_y))
|
|||||||
Flux.train!(objective, ps, data, opt,
|
Flux.train!(objective, ps, data, opt,
|
||||||
cb = throttle(evalcb, 5))
|
cb = throttle(evalcb, 5))
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Calling `Flux.stop()` in a callback will exit the training loop early.
|
||||||
|
|
||||||
|
```julia
|
||||||
|
cb = function ()
|
||||||
|
accuracy() > 0.9 && Flux.stop()
|
||||||
|
end
|
||||||
|
```
|
||||||
|
@ -167,6 +167,8 @@ function Maxout(f, n_alts)
|
|||||||
return Maxout(over)
|
return Maxout(over)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@treelike Maxout
|
||||||
|
|
||||||
function (mo::Maxout)(input::AbstractArray)
|
function (mo::Maxout)(input::AbstractArray)
|
||||||
mapreduce(f -> f(input), (acc, out) -> max.(acc, out), mo.over)
|
mapreduce(f -> f(input), (acc, out) -> max.(acc, out), mo.over)
|
||||||
end
|
end
|
||||||
|
@ -53,5 +53,11 @@ using Test, Random
|
|||||||
target = [0.5, 0.7].*input
|
target = [0.5, 0.7].*input
|
||||||
@test mo(input) == target
|
@test mo(input) == target
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@testset "params" begin
|
||||||
|
mo = Maxout(()->Dense(32, 64), 4)
|
||||||
|
ps = params(mo)
|
||||||
|
@test length(ps) == 8 #4 alts, each with weight and bias
|
||||||
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
Loading…
Reference in New Issue
Block a user