diff --git a/README.md b/README.md index 94110087..4e06793e 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@

-[![Build Status](https://travis-ci.org/FluxML/Flux.jl.svg?branch=master)](https://travis-ci.org/FluxML/Flux.jl) [![](https://img.shields.io/badge/docs-stable-blue.svg)](https://fluxml.github.io/Flux.jl/stable/) [![](https://img.shields.io/badge/chat-on%20slack-yellow.svg)](https://slackinvite.julialang.org/) [![DOI](http://joss.theoj.org/papers/10.21105/joss.00602/status.svg)](https://doi.org/10.21105/joss.00602) +[![Build Status](https://travis-ci.org/FluxML/Flux.jl.svg?branch=master)](https://travis-ci.org/FluxML/Flux.jl) [![](https://img.shields.io/badge/docs-stable-blue.svg)](https://fluxml.github.io/Flux.jl/stable/) [![](https://img.shields.io/badge/chat-on%20slack-yellow.svg)](https://slackinvite.julialang.org/) [![DOI](https://joss.theoj.org/papers/10.21105/joss.00602/status.svg)](https://doi.org/10.21105/joss.00602) Flux is an elegant approach to machine learning. It's a 100% pure-Julia stack, and provides lightweight abstractions on top of Julia's native GPU and AD support. Flux makes the easy things easy while remaining fully hackable. @@ -10,7 +10,7 @@ Flux is an elegant approach to machine learning. It's a 100% pure-Julia stack, a julia> Pkg.add("Flux") ``` -See the [documentation](http://fluxml.github.io/Flux.jl/) or the [model zoo](https://github.com/FluxML/model-zoo/) for examples. +See the [documentation](https://fluxml.github.io/Flux.jl/) or the [model zoo](https://github.com/FluxML/model-zoo/) for examples. If you use Flux in research, please cite the following paper: diff --git a/paper/paper.bib b/paper/paper.bib index c8e6dbd5..d1e2995a 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -14,7 +14,7 @@ journal = {arXiv}, volume = {abs/11712.03112}, year = {2017}, - url = {http://arxiv.org/abs/1712.03112}, + url = {https://arxiv.org/abs/1712.03112}, } @online{MLPL, @@ -29,7 +29,7 @@ author = {Mike Innes and others}, title = {Generic GPU Kernels}, year = 2017, - url = {http://mikeinnes.github.io/2017/08/24/cudanative.html}, + url = {https://mikeinnes.github.io/2017/08/24/cudanative.html}, urldate = {2018-02-16} } diff --git a/src/data/cmudict.jl b/src/data/cmudict.jl index f89ded4f..e6266540 100644 --- a/src/data/cmudict.jl +++ b/src/data/cmudict.jl @@ -19,7 +19,7 @@ function load() @info "Downloading CMUDict dataset" mkpath(deps("cmudict")) for (x, hash) in suffixes_and_hashes - download_and_verify("$cache_prefix/http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-$version$x", + download_and_verify("$cache_prefix/https://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-$version$x", deps("cmudict", "cmudict$x"), hash) end end diff --git a/src/data/iris.jl b/src/data/iris.jl index 789ea696..3da90330 100644 --- a/src/data/iris.jl +++ b/src/data/iris.jl @@ -26,7 +26,7 @@ function load() isfile(deps("iris.data")) && return @info "Downloading iris dataset." - download_and_verify("$(cache_prefix)http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data", + download_and_verify("$(cache_prefix)https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data", deps("iris.data"), "6f608b71a7317216319b4d27b4d9bc84e6abd734eda7872b71a458569e2656c0") end diff --git a/src/layers/recurrent.jl b/src/layers/recurrent.jl index 4e23e9ee..61bbec4e 100644 --- a/src/layers/recurrent.jl +++ b/src/layers/recurrent.jl @@ -153,7 +153,7 @@ Base.show(io::IO, l::LSTMCell) = Long Short Term Memory recurrent layer. Behaves like an RNN but generally exhibits a longer memory span over sequences. -See [this article](http://colah.github.io/posts/2015-08-Understanding-LSTMs/) +See [this article](https://colah.github.io/posts/2015-08-Understanding-LSTMs/) for a good overview of the internals. """ LSTM(a...; ka...) = Recur(LSTMCell(a...; ka...)) @@ -194,7 +194,7 @@ Base.show(io::IO, l::GRUCell) = Gated Recurrent Unit layer. Behaves like an RNN but generally exhibits a longer memory span over sequences. -See [this article](http://colah.github.io/posts/2015-08-Understanding-LSTMs/) +See [this article](https://colah.github.io/posts/2015-08-Understanding-LSTMs/) for a good overview of the internals. """ GRU(a...; ka...) = Recur(GRUCell(a...; ka...)) diff --git a/src/optimise/optimisers.jl b/src/optimise/optimisers.jl index 40b8fd33..ec7c8aa6 100644 --- a/src/optimise/optimisers.jl +++ b/src/optimise/optimisers.jl @@ -66,7 +66,7 @@ end """ RMSProp(η = 0.001, ρ = 0.9) -[RMSProp](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf) +[RMSProp](https://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf) optimiser. Parameters other than learning rate don't need tuning. Often a good choice for recurrent networks. """ @@ -155,7 +155,7 @@ end """ ADADelta(ρ = 0.9, ϵ = 1e-8) -[ADADelta](http://arxiv.org/abs/1212.5701) optimiser. Parameters don't need +[ADADelta](https://arxiv.org/abs/1212.5701) optimiser. Parameters don't need tuning. """ mutable struct ADADelta