From 1761e43bc460fead3f22ef7f6a166b8080441f1a Mon Sep 17 00:00:00 2001 From: Mike J Innes Date: Sun, 30 Oct 2016 00:24:29 +0100 Subject: [PATCH] handle state in training --- src/backend/tensorflow/recurrent.jl | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/backend/tensorflow/recurrent.jl b/src/backend/tensorflow/recurrent.jl index 269d4d00..61e3c23d 100644 --- a/src/backend/tensorflow/recurrent.jl +++ b/src/backend/tensorflow/recurrent.jl @@ -44,15 +44,17 @@ end function Flux.train!(m::SeqModel, train; epoch = 1, η = 0.1, loss = (y, y′) -> reduce_sum((y - y′).^2)/2, opt = TensorFlow.train.GradientDescentOptimizer(η)) + state = batchone.(m.m.model.state) Y = placeholder(Float32) Loss = loss(m.m.output[end], Y) minimize_op = TensorFlow.train.minimize(opt, Loss) for e in 1:epoch info("Epoch $e\n") @progress for (x, y) in train - y, cur_loss, _ = run(m.m.session, vcat(m.m.output[end], Loss, minimize_op), - merge(Dict(m.m.inputs[end]=>x, Y=>y), - Dict(zip(m.m.inputs[1:end-1], m.state)))) + out = run(m.m.session, vcat(m.m.output..., Loss, minimize_op), + merge(Dict(m.m.inputs[end]=>batchone(x), Y=>batchone(y)), + Dict(zip(m.m.inputs[1:end-1], state)))) + state = out[1:length(state)] end end end