From e57180301de312e61e3be931a892c6afa5a88edd Mon Sep 17 00:00:00 2001
From: autodocs
Date: Wed, 1 Mar 2017 01:37:12 +0000
Subject: [PATCH] build based on aa17017
---
latest/apis/backends.html | 2 +-
latest/apis/batching.html | 2 +-
latest/apis/storage.html | 2 +-
latest/contributing.html | 2 +-
latest/examples/char-rnn.html | 31 ++++++++++++++++++++++++++++++-
latest/examples/logreg.html | 2 +-
latest/index.html | 2 +-
latest/internals.html | 2 +-
latest/models/basics.html | 2 +-
latest/models/debugging.html | 2 +-
latest/models/recurrent.html | 2 +-
latest/models/templates.html | 2 +-
latest/search_index.js | 2 +-
13 files changed, 42 insertions(+), 13 deletions(-)
diff --git a/latest/apis/backends.html b/latest/apis/backends.html
index a665c8da..5fd0fbef 100644
--- a/latest/apis/backends.html
+++ b/latest/apis/backends.html
@@ -150,7 +150,7 @@ Backends
-
+
diff --git a/latest/apis/batching.html b/latest/apis/batching.html
index 387e62e6..fb258c13 100644
--- a/latest/apis/batching.html
+++ b/latest/apis/batching.html
@@ -155,7 +155,7 @@ Batching
-
+
diff --git a/latest/apis/storage.html b/latest/apis/storage.html
index 0ae22814..42803f7d 100644
--- a/latest/apis/storage.html
+++ b/latest/apis/storage.html
@@ -139,7 +139,7 @@ Storing Models
-
+
diff --git a/latest/contributing.html b/latest/contributing.html
index db8d830e..750c452f 100644
--- a/latest/contributing.html
+++ b/latest/contributing.html
@@ -136,7 +136,7 @@ Contributing & Help
-
+
diff --git a/latest/examples/char-rnn.html b/latest/examples/char-rnn.html
index 8d53761a..982319ee 100644
--- a/latest/examples/char-rnn.html
+++ b/latest/examples/char-rnn.html
@@ -139,7 +139,7 @@ Char RNN
-
+
@@ -216,6 +216,35 @@ sample(model[1:end-1], 100)
sample
then produces a string of Shakespeare-like text. This won't produce great results after only a single epoch (though they will be recognisably different from the untrained model). Going for 30 epochs or so produces good results.
+
+Trained on
+
+a dataset from base Julia
+
+, the network can produce code like:
+
+function show(io::IO, md::Githompty)
+ Buffer(jowerTriangular(inals[i], initabs_indices), characters, side, nextfloat(typeof(x)))
+ isnull(r) && return
+ start::I!
+ for j = 1:length(b,1)
+ a = s->cosvect(code)
+ return
+ end
+ indsERenv | maximum(func,lsg))
+ for i = 1:last(Abjelar) && fname (=== nothing)
+ throw(ArgumentError("read is declave non-fast-a/remaining of not descride method names"))
+ end
+ if e.ht === Int
+ # update file to a stroducative, but is decould.
+ # xna i -GB =# [unsafe_color <c *has may num 20<11E 16/s
+ tuple | Expr(:(UnitLowerTriangular(transpose,(repl.ptr)))
+ dims = pipe_read(s,Int(a)...)
+ ex,0 + y.uilid_func & find_finwprevend(msg,:2)
+ ex = stage(c)
+ # uvvalue begin
+ end
+end
diff --git a/latest/examples/logreg.html b/latest/examples/logreg.html
index 6bdb5344..a74670a1 100644
--- a/latest/examples/logreg.html
+++ b/latest/examples/logreg.html
@@ -139,7 +139,7 @@ Logistic Regression
-
+
diff --git a/latest/index.html b/latest/index.html
index 6177bac5..14a9f357 100644
--- a/latest/index.html
+++ b/latest/index.html
@@ -147,7 +147,7 @@ Home
-
+
diff --git a/latest/internals.html b/latest/internals.html
index 8a00b02e..25f4329b 100644
--- a/latest/internals.html
+++ b/latest/internals.html
@@ -136,7 +136,7 @@ Internals
-
+
diff --git a/latest/models/basics.html b/latest/models/basics.html
index b22e7443..edb3d6bc 100644
--- a/latest/models/basics.html
+++ b/latest/models/basics.html
@@ -155,7 +155,7 @@ Model Building Basics
-
+
diff --git a/latest/models/debugging.html b/latest/models/debugging.html
index 711e815f..a3b2dff4 100644
--- a/latest/models/debugging.html
+++ b/latest/models/debugging.html
@@ -139,7 +139,7 @@ Debugging
-
+
diff --git a/latest/models/recurrent.html b/latest/models/recurrent.html
index a3d05658..2f4f1e0f 100644
--- a/latest/models/recurrent.html
+++ b/latest/models/recurrent.html
@@ -139,7 +139,7 @@ Recurrence
-
+
diff --git a/latest/models/templates.html b/latest/models/templates.html
index 71c3aab8..96489bab 100644
--- a/latest/models/templates.html
+++ b/latest/models/templates.html
@@ -155,7 +155,7 @@ Model Templates
-
+
diff --git a/latest/search_index.js b/latest/search_index.js
index 44d343ad..d2f12e98 100644
--- a/latest/search_index.js
+++ b/latest/search_index.js
@@ -261,7 +261,7 @@ var documenterSearchIndex = {"docs": [
"page": "Char RNN",
"title": "Char RNN",
"category": "section",
- "text": "This walkthrough will take you through a model like that used in Karpathy's 2015 blog post, which can learn to generate text in the style of Shakespeare (or whatever else you may use as input). shakespeare_input.txt is here.using Flux\nimport StatsBase: wsampleFirstly, we define up front how many steps we want to unroll the RNN, and the number of data points to batch together. Then we create some functions to prepare our data, using Flux's built-in utilities.nunroll = 50\nnbatch = 50\n\ngetseqs(chars, alphabet) = sequences((onehot(Float32, char, alphabet) for char in chars), nunroll)\ngetbatches(chars, alphabet) = batches((getseqs(part, alphabet) for part in chunk(chars, nbatch))...)Because we want the RNN to predict the next letter at each iteration, our target data is simply our input data offset by one. For example, if the input is \"The quick brown fox\", the target will be \"he quick brown fox \". Each letter is one-hot encoded and sequences are batched together to create the training data.input = readstring(\"shakespeare_input.txt\")\nalphabet = unique(input)\nN = length(alphabet)\n\nXs, Ys = getbatches(input, alphabet), getbatches(input[2:end], alphabet)Creating the model and training it is straightforward:model = Chain(\n Input(N),\n LSTM(N, 256),\n LSTM(256, 256),\n Affine(256, N),\n softmax)\n\nm = tf(unroll(model, nunroll))\n\n@time Flux.train!(m, Xs, Ys, η = 0.1, epoch = 1)Finally, we can sample the model. For sampling we remove the softmax from the end of the chain so that we can \"sharpen\" the resulting probabilities.function sample(model, n, temp = 1)\n s = [rand(alphabet)]\n m = tf(unroll(model, 1))\n for i = 1:n\n push!(s, wsample(alphabet, softmax(m(Seq((onehot(Float32, s[end], alphabet),)))[1]./temp)))\n end\n return string(s...)\nend\n\nsample(model[1:end-1], 100)sample then produces a string of Shakespeare-like text. This won't produce great results after only a single epoch (though they will be recognisably different from the untrained model). Going for 30 epochs or so produces good results."
+ "text": "This walkthrough will take you through a model like that used in Karpathy's 2015 blog post, which can learn to generate text in the style of Shakespeare (or whatever else you may use as input). shakespeare_input.txt is here.using Flux\nimport StatsBase: wsampleFirstly, we define up front how many steps we want to unroll the RNN, and the number of data points to batch together. Then we create some functions to prepare our data, using Flux's built-in utilities.nunroll = 50\nnbatch = 50\n\ngetseqs(chars, alphabet) = sequences((onehot(Float32, char, alphabet) for char in chars), nunroll)\ngetbatches(chars, alphabet) = batches((getseqs(part, alphabet) for part in chunk(chars, nbatch))...)Because we want the RNN to predict the next letter at each iteration, our target data is simply our input data offset by one. For example, if the input is \"The quick brown fox\", the target will be \"he quick brown fox \". Each letter is one-hot encoded and sequences are batched together to create the training data.input = readstring(\"shakespeare_input.txt\")\nalphabet = unique(input)\nN = length(alphabet)\n\nXs, Ys = getbatches(input, alphabet), getbatches(input[2:end], alphabet)Creating the model and training it is straightforward:model = Chain(\n Input(N),\n LSTM(N, 256),\n LSTM(256, 256),\n Affine(256, N),\n softmax)\n\nm = tf(unroll(model, nunroll))\n\n@time Flux.train!(m, Xs, Ys, η = 0.1, epoch = 1)Finally, we can sample the model. For sampling we remove the softmax from the end of the chain so that we can \"sharpen\" the resulting probabilities.function sample(model, n, temp = 1)\n s = [rand(alphabet)]\n m = tf(unroll(model, 1))\n for i = 1:n\n push!(s, wsample(alphabet, softmax(m(Seq((onehot(Float32, s[end], alphabet),)))[1]./temp)))\n end\n return string(s...)\nend\n\nsample(model[1:end-1], 100)sample then produces a string of Shakespeare-like text. This won't produce great results after only a single epoch (though they will be recognisably different from the untrained model). Going for 30 epochs or so produces good results.Trained on a dataset from base Julia, the network can produce code like:function show(io::IO, md::Githompty)\n Buffer(jowerTriangular(inals[i], initabs_indices), characters, side, nextfloat(typeof(x)))\n isnull(r) && return\n start::I!\n for j = 1:length(b,1)\n a = s->cosvect(code)\n return\n end\n indsERenv | maximum(func,lsg))\n for i = 1:last(Abjelar) && fname (=== nothing)\n throw(ArgumentError(\"read is declave non-fast-a/remaining of not descride method names\"))\n end\n if e.ht === Int\n # update file to a stroducative, but is decould.\n # xna i -GB =# [unsafe_color