handle state in training

This commit is contained in:
Mike J Innes 2016-10-30 00:24:29 +01:00
parent 605e3a9363
commit 1761e43bc4

View File

@ -44,15 +44,17 @@ end
function Flux.train!(m::SeqModel, train; epoch = 1, η = 0.1,
loss = (y, y) -> reduce_sum((y - y).^2)/2,
opt = TensorFlow.train.GradientDescentOptimizer(η))
state = batchone.(m.m.model.state)
Y = placeholder(Float32)
Loss = loss(m.m.output[end], Y)
minimize_op = TensorFlow.train.minimize(opt, Loss)
for e in 1:epoch
info("Epoch $e\n")
@progress for (x, y) in train
y, cur_loss, _ = run(m.m.session, vcat(m.m.output[end], Loss, minimize_op),
merge(Dict(m.m.inputs[end]=>x, Y=>y),
Dict(zip(m.m.inputs[1:end-1], m.state))))
out = run(m.m.session, vcat(m.m.output..., Loss, minimize_op),
merge(Dict(m.m.inputs[end]=>batchone(x), Y=>batchone(y)),
Dict(zip(m.m.inputs[1:end-1], state))))
state = out[1:length(state)]
end
end
end