Compare commits

...

1 Commits

Author SHA1 Message Date
Mike J Innes 8205402b66 comparative examples 2018-02-14 14:58:28 +00:00
3 changed files with 252 additions and 0 deletions

92
examples/flux.jl Normal file
View File

@ -0,0 +1,92 @@
using Flux
# #
# Logistic Regression from scratch #
# #
W = param(zeros(10,784))
b = param(zeros(10))
pred(x) = softmax(W*x .+ b)
cost(x, y) = mean(sum(log.(pred(x)).*y, 1))
# See an example predction
pred(rand(784))
# #
# Custom Layer: Dense #
# #
struct Dense
σ
W
b
end
function Dense(in::Integer, out::Integer, σ = σ)
W = param(randn(out, in))
b = param(zeros(out))
return Dense(σ, W, b)
end
# Note that Julia compiles:
# * Specialised code for the forward pass (wrt activation function,
# number types, ...)
# * Including a single GPU/CPU kernel for the broadcast call
function (m::Dense)(x)
σ, W, b = m.σ, m.W, m.b
σ.(W*x .+ b)
end
d = Dense(10, 5, relu)
d(rand(10))
# #
# RNN from scratch #
# #
in = 10
out = 5
Wi = param(randn(out, in))
Wh = param(randn(out, out))
b = param(zeros(out))
function rnn(h, x)
h = tanh.(Wi*x .+ Wh*h .+ b)
return h, h
end
h = rand(out)
xs = [rand(in) for i = 1:13] # Sequence of length 13
ys = []
for x in xs
h, y = rnn(h, x)
push!(ys, y)
end
# Output hidden state and sequence
h, ys
# #
# Recursive Net #
# #
N = 10
# Generate dummy data
tree() = rand() < 0.5 ? rand(N) : (tree(), tree())
# Model
shrink = Dense(2N, N)
combine(a, b) = shrink([a; b])
model(x) = x
model(x::Tuple) = combine(model(x[1]), model(x[2]))
# The model is able to compress an arbitrary tree into
# a single length N representation.
model(tree())

73
examples/pytorch.py Normal file
View File

@ -0,0 +1,73 @@
import torch
from torch.autograd import Variable
from torch import nn
from torch.nn import Parameter
from torch.nn.functional import softmax
# ------------------------------------------------------- #
# Logistic Regression from scratch #
# ------------------------------------------------------- #
W = Variable(torch.zeros(784, 10))
b = Variable(torch.zeros(1, 10))
def pred(x):
return softmax(torch.matmul(x, W) + b)
def cost(x, y):
return (pred(x).log() * y).sum(1).mean()
# See an example prediction
pred(Variable(torch.rand(1,784), requires_grad = False))
# ------------------------------------------------------- #
# Custom Layer: Dense #
# ------------------------------------------------------- #
class Dense(nn.Module):
def __init__(self, input, out, act = torch.nn.functional.sigmoid):
super(Dense, self).__init__()
self.act = act
self.W = Parameter(torch.randn(input, out))
self.b = Parameter(torch.randn(1, out))
def forward(self, x):
return self.act(torch.matmul(x, self.W) + self.b)
d = Dense(10, 5, torch.nn.functional.relu)
x = Variable(torch.rand(1, 10), requires_grad = False)
d(x)
# ------------------------------------------------------- #
# RNN from scratch #
# ------------------------------------------------------- #
class RNN(nn.Module):
def __init__(self, input, out):
super(RNN, self).__init__()
self.Wi = Parameter(torch.randn(input, out))
self.Wh = Parameter(torch.randn(out, out))
self.b = Parameter(torch.randn(1, out))
def forward(self, h, x):
Wi, Wh, b = self.Wi, self.Wh, self.b
h = (torch.matmul(x, Wi) + torch.matmul(h, Wh) + b).tanh()
return (h, h)
rnn = RNN(10, 5)
h = Variable(torch.rand(1, 5), requires_grad = False)
xs = [Variable(torch.rand(1, 10), requires_grad = False) for _ in range(10)]
ys = []
for x in xs:
(h, y) = rnn(h, x)
ys.append(y)
# Output hidden state and sequence
h, ys
# ------------------------------------------------------- #
# Recursive Net #
# ------------------------------------------------------- #
# TODO: similar to Julia

87
examples/tf.py Normal file
View File

@ -0,0 +1,87 @@
import tensorflow as tf
import numpy as np
# ------------------------------------------------------- #
# Logistic Regression from scratch #
# ------------------------------------------------------- #
sess = tf.Session()
x = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784
y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
pred = tf.nn.softmax(tf.matmul(x, W) + b)
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
init = tf.global_variables_initializer()
sess.run(init)
# See an example prediction
sess.run(pred, feed_dict = {x: np.random.rand(1,784)})
sess.close()
# ------------------------------------------------------- #
# Custom Layer: Dense #
# ------------------------------------------------------- #
def dense_layer(x, input, out, act = tf.sigmoid):
W = tf.get_variable("weights", [input, out],
initializer=tf.random_normal_initializer())
b = tf.get_variable("bias", [1, out],
initializer=tf.random_normal_initializer())
return act(tf.matmul(x, W) + b)
sess = tf.Session()
x = tf.placeholder(tf.float32, [None, 10])
with tf.variable_scope("layer1"):
y = dense_layer(x, 10, 5)
init = tf.global_variables_initializer()
sess.run(init)
sess.run(y, feed_dict = {x: np.random.rand(1,10)})
sess.close()
# ------------------------------------------------------- #
# RNN from scratch #
# ------------------------------------------------------- #
input = 10
hidden = 5
length = 13
def step(hprev, x):
# params
Wi = tf.get_variable('W', shape=[input, hidden], initializer=tf.random_normal_initializer())
Wh = tf.get_variable('U', shape=[hidden, hidden], initializer=tf.random_normal_initializer())
b = tf.get_variable('b', shape=[hidden], initializer=tf.constant_initializer(0.))
# current hidden state
h = tf.tanh(tf.matmul(hprev, Wh) + tf.matmul(x,Wi) + b)
return h
sess = tf.Session()
# (seqlength, batch, features)
xs = tf.placeholder(tf.float32, [length, 1, input])
h = tf.placeholder(tf.float32, [None, hidden])
states = tf.scan(step, xs, initializer=h)
init = tf.global_variables_initializer()
sess.run(init)
sess.run(states, feed_dict = {xs: np.random.rand(length,1,input), h: np.random.randn(1,hidden)})
sess.close()
# ------------------------------------------------------- #
# Recursive Net #
# ------------------------------------------------------- #
# Too long to repeat here, see https://github.com/erickrf/treernn