2018-09-08 19:44:06 +00:00
|
|
|
|
using NNlib: conv, ∇conv_data, depthwiseconv
|
2018-02-26 22:43:07 +00:00
|
|
|
|
|
2018-06-26 13:05:07 +00:00
|
|
|
|
expand(N, i::Tuple) = i
|
|
|
|
|
expand(N, i::Integer) = ntuple(_ -> i, N)
|
2017-12-18 18:05:48 +00:00
|
|
|
|
"""
|
2018-02-26 22:43:07 +00:00
|
|
|
|
Conv(size, in=>out)
|
|
|
|
|
Conv(size, in=>out, relu)
|
2017-12-18 18:05:48 +00:00
|
|
|
|
|
|
|
|
|
Standard convolutional layer. `size` should be a tuple like `(2, 2)`.
|
|
|
|
|
`in` and `out` specify the number of input and output channels respectively.
|
|
|
|
|
|
2019-02-23 20:31:27 +00:00
|
|
|
|
Example: Applying Conv layer to a 1-channel input using a 2x2 window size,
|
|
|
|
|
giving us a 16-channel output. Output is activated with ReLU.
|
|
|
|
|
|
|
|
|
|
size = (2,2)
|
|
|
|
|
in = 1
|
2019-06-12 16:04:42 +00:00
|
|
|
|
out = 16
|
2019-02-23 20:31:27 +00:00
|
|
|
|
Conv((2, 2), 1=>16, relu)
|
|
|
|
|
|
2019-06-12 16:04:42 +00:00
|
|
|
|
Data should be stored in WHCN order (width, height, # channels, # batches).
|
|
|
|
|
In other words, a 100×100 RGB image would be a `100×100×3×1` array,
|
2019-02-23 20:31:27 +00:00
|
|
|
|
and a batch of 50 would be a `100×100×3×50` array.
|
2017-12-18 18:05:48 +00:00
|
|
|
|
|
2018-05-21 19:20:43 +00:00
|
|
|
|
Takes the keyword arguments `pad`, `stride` and `dilation`.
|
2017-12-18 18:05:48 +00:00
|
|
|
|
"""
|
2019-03-01 00:31:41 +00:00
|
|
|
|
struct Conv{N,M,F,A,V}
|
2017-12-15 13:22:57 +00:00
|
|
|
|
σ::F
|
|
|
|
|
weight::A
|
2018-02-15 20:15:41 +00:00
|
|
|
|
bias::V
|
2018-02-26 22:43:07 +00:00
|
|
|
|
stride::NTuple{N,Int}
|
2019-03-01 00:31:41 +00:00
|
|
|
|
pad::NTuple{M,Int}
|
2018-05-21 19:20:43 +00:00
|
|
|
|
dilation::NTuple{N,Int}
|
2017-12-15 13:22:57 +00:00
|
|
|
|
end
|
|
|
|
|
|
2019-03-01 00:31:41 +00:00
|
|
|
|
function Conv(w::AbstractArray{T,N}, b::AbstractVector{T}, σ = identity;
|
|
|
|
|
stride = 1, pad = 0, dilation = 1) where {T,N}
|
|
|
|
|
stride = expand(Val(N-2), stride)
|
|
|
|
|
pad = expand(Val(2*(N-2)), pad)
|
|
|
|
|
dilation = expand(Val(N-2), dilation)
|
|
|
|
|
return Conv(σ, w, b, stride, pad, dilation)
|
|
|
|
|
end
|
2018-02-15 20:52:29 +00:00
|
|
|
|
|
2018-10-17 15:11:16 +00:00
|
|
|
|
Conv(k::NTuple{N,Integer}, ch::Pair{<:Integer,<:Integer}, σ = identity;
|
|
|
|
|
init = glorot_uniform, stride = 1, pad = 0, dilation = 1) where N =
|
2019-03-08 12:13:58 +00:00
|
|
|
|
Conv(init(k..., ch...), zeros(ch[2]), σ,
|
2018-05-21 19:20:43 +00:00
|
|
|
|
stride = stride, pad = pad, dilation = dilation)
|
2017-12-15 13:22:57 +00:00
|
|
|
|
|
2019-09-19 14:53:31 +00:00
|
|
|
|
@functor Conv
|
2017-12-15 13:22:57 +00:00
|
|
|
|
|
2018-11-12 20:21:27 +00:00
|
|
|
|
function (c::Conv)(x::AbstractArray)
|
2018-02-28 23:06:53 +00:00
|
|
|
|
# TODO: breaks gpu broadcast :(
|
|
|
|
|
# ndims(x) == ndims(c.weight)-1 && return squeezebatch(c(reshape(x, size(x)..., 1)))
|
2018-02-26 22:43:07 +00:00
|
|
|
|
σ, b = c.σ, reshape(c.bias, map(_->1, c.stride)..., :, 1)
|
2019-03-28 22:56:21 +00:00
|
|
|
|
cdims = DenseConvDims(x, c.weight; stride=c.stride, padding=c.pad, dilation=c.dilation)
|
|
|
|
|
σ.(conv(x, c.weight, cdims) .+ b)
|
2018-02-15 20:15:41 +00:00
|
|
|
|
end
|
2017-12-15 16:24:45 +00:00
|
|
|
|
|
2018-02-26 22:43:07 +00:00
|
|
|
|
function Base.show(io::IO, l::Conv)
|
|
|
|
|
print(io, "Conv(", size(l.weight)[1:ndims(l.weight)-2])
|
|
|
|
|
print(io, ", ", size(l.weight, ndims(l.weight)-1), "=>", size(l.weight, ndims(l.weight)))
|
2017-12-15 16:24:45 +00:00
|
|
|
|
l.σ == identity || print(io, ", ", l.σ)
|
|
|
|
|
print(io, ")")
|
|
|
|
|
end
|
2018-02-26 22:43:07 +00:00
|
|
|
|
|
2018-11-12 20:21:27 +00:00
|
|
|
|
(a::Conv{<:Any,<:Any,W})(x::AbstractArray{T}) where {T <: Union{Float32,Float64}, W <: AbstractArray{T}} =
|
|
|
|
|
invoke(a, Tuple{AbstractArray}, x)
|
|
|
|
|
|
|
|
|
|
(a::Conv{<:Any,<:Any,W})(x::AbstractArray{<:Real}) where {T <: Union{Float32,Float64}, W <: AbstractArray{T}} =
|
|
|
|
|
a(T.(x))
|
|
|
|
|
|
2018-09-08 19:44:06 +00:00
|
|
|
|
"""
|
|
|
|
|
ConvTranspose(size, in=>out)
|
|
|
|
|
ConvTranspose(size, in=>out, relu)
|
|
|
|
|
|
|
|
|
|
Standard convolutional transpose layer. `size` should be a tuple like `(2, 2)`.
|
|
|
|
|
`in` and `out` specify the number of input and output channels respectively.
|
2019-08-19 14:30:59 +00:00
|
|
|
|
|
2018-09-08 19:44:06 +00:00
|
|
|
|
Data should be stored in WHCN order. In other words, a 100×100 RGB image would
|
|
|
|
|
be a `100×100×3` array, and a batch of 50 would be a `100×100×3×50` array.
|
2019-08-19 14:30:59 +00:00
|
|
|
|
|
2018-09-08 19:44:06 +00:00
|
|
|
|
Takes the keyword arguments `pad`, `stride` and `dilation`.
|
|
|
|
|
"""
|
2019-03-01 00:31:41 +00:00
|
|
|
|
struct ConvTranspose{N,M,F,A,V}
|
2018-09-08 19:44:06 +00:00
|
|
|
|
σ::F
|
|
|
|
|
weight::A
|
|
|
|
|
bias::V
|
|
|
|
|
stride::NTuple{N,Int}
|
2019-03-01 00:31:41 +00:00
|
|
|
|
pad::NTuple{M,Int}
|
2018-09-08 19:44:06 +00:00
|
|
|
|
dilation::NTuple{N,Int}
|
|
|
|
|
end
|
|
|
|
|
|
2019-03-01 00:31:41 +00:00
|
|
|
|
function ConvTranspose(w::AbstractArray{T,N}, b::AbstractVector{T}, σ = identity;
|
|
|
|
|
stride = 1, pad = 0, dilation = 1) where {T,N}
|
|
|
|
|
stride = expand(Val(N-2), stride)
|
|
|
|
|
pad = expand(Val(2*(N-2)), pad)
|
|
|
|
|
dilation = expand(Val(N-2), dilation)
|
|
|
|
|
return ConvTranspose(σ, w, b, stride, pad, dilation)
|
|
|
|
|
end
|
2018-09-08 19:44:06 +00:00
|
|
|
|
|
2018-12-04 16:08:40 +00:00
|
|
|
|
ConvTranspose(k::NTuple{N,Integer}, ch::Pair{<:Integer,<:Integer}, σ = identity;
|
|
|
|
|
init = glorot_uniform, stride = 1, pad = 0, dilation = 1) where N =
|
2019-03-08 12:13:58 +00:00
|
|
|
|
ConvTranspose(init(k..., reverse(ch)...), zeros(ch[2]), σ,
|
2018-09-08 19:44:06 +00:00
|
|
|
|
stride = stride, pad = pad, dilation = dilation)
|
|
|
|
|
|
2019-09-19 14:53:31 +00:00
|
|
|
|
@functor ConvTranspose
|
2018-09-08 19:44:06 +00:00
|
|
|
|
|
2019-04-25 17:24:19 +00:00
|
|
|
|
function conv_transpose_dims(c::ConvTranspose, x::AbstractArray)
|
|
|
|
|
# Calculate size of "input", from ∇conv_data()'s perspective...
|
|
|
|
|
combined_pad = (c.pad[1:2:end] .+ c.pad[2:2:end])
|
|
|
|
|
I = (size(x)[1:end-2] .- 1).*c.stride .+ 1 .+ (size(c.weight)[1:end-2] .- 1).*c.dilation .- combined_pad
|
|
|
|
|
C_in = size(c.weight)[end-1]
|
|
|
|
|
batch_size = size(x)[end]
|
|
|
|
|
# Create DenseConvDims() that looks like the corresponding conv()
|
|
|
|
|
return DenseConvDims((I..., C_in, batch_size), size(c.weight);
|
|
|
|
|
stride=c.stride,
|
|
|
|
|
padding=c.pad,
|
|
|
|
|
dilation=c.dilation,
|
|
|
|
|
)
|
|
|
|
|
end
|
|
|
|
|
|
2018-12-04 16:08:40 +00:00
|
|
|
|
function (c::ConvTranspose)(x::AbstractArray)
|
2018-09-08 19:44:06 +00:00
|
|
|
|
# ndims(x) == ndims(c.weight)-1 && return squeezebatch(c(reshape(x, size(x)..., 1)))
|
|
|
|
|
σ, b = c.σ, reshape(c.bias, map(_->1, c.stride)..., :, 1)
|
2019-04-25 17:24:19 +00:00
|
|
|
|
cdims = conv_transpose_dims(c, x)
|
2019-03-28 22:56:21 +00:00
|
|
|
|
return σ.(∇conv_data(x, c.weight, cdims) .+ b)
|
2018-09-08 19:44:06 +00:00
|
|
|
|
end
|
|
|
|
|
|
|
|
|
|
function Base.show(io::IO, l::ConvTranspose)
|
|
|
|
|
print(io, "ConvTranspose(", size(l.weight)[1:ndims(l.weight)-2])
|
2018-12-09 17:50:09 +00:00
|
|
|
|
print(io, ", ", size(l.weight, ndims(l.weight)), "=>", size(l.weight, ndims(l.weight)-1))
|
|
|
|
|
l.σ == identity || print(io, ", ", l.σ)
|
|
|
|
|
print(io, ")")
|
2018-10-23 17:40:06 +00:00
|
|
|
|
end
|
|
|
|
|
|
2018-12-04 16:08:40 +00:00
|
|
|
|
(a::ConvTranspose{<:Any,<:Any,W})(x::AbstractArray{T}) where {T <: Union{Float32,Float64}, W <: AbstractArray{T}} =
|
|
|
|
|
invoke(a, Tuple{AbstractArray}, x)
|
2018-10-23 17:40:06 +00:00
|
|
|
|
|
2018-12-04 16:08:40 +00:00
|
|
|
|
(a::ConvTranspose{<:Any,<:Any,W})(x::AbstractArray{<:Real}) where {T <: Union{Float32,Float64}, W <: AbstractArray{T}} =
|
|
|
|
|
a(T.(x))
|
2018-05-30 10:23:57 +00:00
|
|
|
|
"""
|
2019-04-26 17:52:14 +00:00
|
|
|
|
DepthwiseConv(size, in=>out)
|
|
|
|
|
DepthwiseConv(size, in=>out, relu)
|
2019-08-19 14:30:59 +00:00
|
|
|
|
|
2018-05-30 10:23:57 +00:00
|
|
|
|
Depthwise convolutional layer. `size` should be a tuple like `(2, 2)`.
|
2019-04-26 17:52:14 +00:00
|
|
|
|
`in` and `out` specify the number of input and output channels respectively.
|
|
|
|
|
Note that `out` must be an integer multiple of `in`.
|
2019-08-19 14:30:59 +00:00
|
|
|
|
|
2018-05-30 10:23:57 +00:00
|
|
|
|
Data should be stored in WHCN order. In other words, a 100×100 RGB image would
|
|
|
|
|
be a `100×100×3` array, and a batch of 50 would be a `100×100×3×50` array.
|
2019-08-19 14:30:59 +00:00
|
|
|
|
|
2019-04-26 17:52:14 +00:00
|
|
|
|
Takes the keyword arguments `pad`, `stride` and `dilation`.
|
2018-05-30 10:23:57 +00:00
|
|
|
|
"""
|
2019-03-01 00:31:41 +00:00
|
|
|
|
struct DepthwiseConv{N,M,F,A,V}
|
2018-05-30 10:23:57 +00:00
|
|
|
|
σ::F
|
|
|
|
|
weight::A
|
|
|
|
|
bias::V
|
|
|
|
|
stride::NTuple{N,Int}
|
2019-03-01 00:31:41 +00:00
|
|
|
|
pad::NTuple{M,Int}
|
2019-03-28 22:56:21 +00:00
|
|
|
|
dilation::NTuple{N,Int}
|
2018-05-30 10:23:57 +00:00
|
|
|
|
end
|
|
|
|
|
|
2019-03-01 00:31:41 +00:00
|
|
|
|
function DepthwiseConv(w::AbstractArray{T,N}, b::AbstractVector{T}, σ = identity;
|
|
|
|
|
stride = 1, pad = 0, dilation = 1) where {T,N}
|
|
|
|
|
stride = expand(Val(N-2), stride)
|
|
|
|
|
pad = expand(Val(2*(N-2)), pad)
|
|
|
|
|
dilation = expand(Val(N-2), dilation)
|
|
|
|
|
return DepthwiseConv(σ, w, b, stride, pad, dilation)
|
|
|
|
|
end
|
2018-05-30 10:23:57 +00:00
|
|
|
|
|
2019-06-12 17:31:51 +00:00
|
|
|
|
function DepthwiseConv(k::NTuple{N,Integer}, ch::Pair{<:Integer,<:Integer}, σ = identity;
|
|
|
|
|
init = glorot_uniform, stride = 1, pad = 0, dilation = 1) where N
|
|
|
|
|
@assert ch[2] % ch[1] == 0 "Output channels must be integer multiple of input channels"
|
|
|
|
|
return DepthwiseConv(
|
|
|
|
|
init(k..., div(ch[2], ch[1]), ch[1]),
|
|
|
|
|
zeros(ch[2]),
|
|
|
|
|
σ;
|
|
|
|
|
stride = stride,
|
|
|
|
|
pad = pad,
|
|
|
|
|
dilation = dilation
|
|
|
|
|
)
|
|
|
|
|
end
|
2018-05-30 10:23:57 +00:00
|
|
|
|
|
2019-09-19 14:53:31 +00:00
|
|
|
|
@functor DepthwiseConv
|
2018-05-30 10:23:57 +00:00
|
|
|
|
|
|
|
|
|
function (c::DepthwiseConv)(x)
|
|
|
|
|
σ, b = c.σ, reshape(c.bias, map(_->1, c.stride)..., :, 1)
|
2019-03-28 22:56:21 +00:00
|
|
|
|
cdims = DepthwiseConvDims(x, c.weight; stride=c.stride, padding=c.pad, dilation=c.dilation)
|
|
|
|
|
σ.(depthwiseconv(x, c.weight, cdims) .+ b)
|
2018-05-30 10:23:57 +00:00
|
|
|
|
end
|
|
|
|
|
|
2018-06-09 05:32:15 +00:00
|
|
|
|
function Base.show(io::IO, l::DepthwiseConv)
|
2019-04-26 17:52:14 +00:00
|
|
|
|
print(io, "DepthwiseConv(", size(l.weight)[1:end-2])
|
|
|
|
|
print(io, ", ", size(l.weight)[end], "=>", prod(size(l.weight)[end-1:end]))
|
2018-05-30 10:23:57 +00:00
|
|
|
|
l.σ == identity || print(io, ", ", l.σ)
|
|
|
|
|
print(io, ")")
|
|
|
|
|
end
|
|
|
|
|
|
2019-03-01 09:34:05 +00:00
|
|
|
|
(a::DepthwiseConv{<:Any,<:Any,W})(x::AbstractArray{T}) where {T <: Union{Float32,Float64}, W <: AbstractArray{T}} =
|
|
|
|
|
invoke(a, Tuple{AbstractArray}, x)
|
|
|
|
|
|
|
|
|
|
(a::DepthwiseConv{<:Any,<:Any,W})(x::AbstractArray{<:Real}) where {T <: Union{Float32,Float64}, W <: AbstractArray{T}} =
|
2019-08-19 13:42:07 +00:00
|
|
|
|
a(T.(x))
|
|
|
|
|
|
2019-05-01 16:07:25 +00:00
|
|
|
|
"""
|
|
|
|
|
CrossCor(size, in=>out)
|
|
|
|
|
CrossCor(size, in=>out, relu)
|
2019-06-12 16:04:42 +00:00
|
|
|
|
|
2019-05-01 16:07:25 +00:00
|
|
|
|
Standard cross convolutional layer. `size` should be a tuple like `(2, 2)`.
|
|
|
|
|
`in` and `out` specify the number of input and output channels respectively.
|
2019-06-12 16:04:42 +00:00
|
|
|
|
|
2019-05-01 16:07:25 +00:00
|
|
|
|
Example: Applying CrossCor layer to a 1-channel input using a 2x2 window size,
|
|
|
|
|
giving us a 16-channel output. Output is activated with ReLU.
|
2019-06-12 16:04:42 +00:00
|
|
|
|
|
2019-05-01 16:07:25 +00:00
|
|
|
|
size = (2,2)
|
|
|
|
|
in = 1
|
2019-06-12 16:04:42 +00:00
|
|
|
|
out = 16
|
2019-05-01 16:07:25 +00:00
|
|
|
|
CrossCor((2, 2), 1=>16, relu)
|
2019-06-12 16:04:42 +00:00
|
|
|
|
|
|
|
|
|
Data should be stored in WHCN order (width, height, # channels, # batches).
|
|
|
|
|
In other words, a 100×100 RGB image would be a `100×100×3×1` array,
|
2019-05-01 16:07:25 +00:00
|
|
|
|
and a batch of 50 would be a `100×100×3×50` array.
|
2019-06-12 16:04:42 +00:00
|
|
|
|
|
2019-05-01 16:07:25 +00:00
|
|
|
|
Takes the keyword arguments `pad`, `stride` and `dilation`.
|
|
|
|
|
"""
|
|
|
|
|
struct CrossCor{N,M,F,A,V}
|
|
|
|
|
σ::F
|
|
|
|
|
weight::A
|
|
|
|
|
bias::V
|
|
|
|
|
stride::NTuple{N,Int}
|
|
|
|
|
pad::NTuple{M,Int}
|
|
|
|
|
dilation::NTuple{N,Int}
|
|
|
|
|
end
|
|
|
|
|
|
|
|
|
|
function CrossCor(w::AbstractArray{T,N}, b::AbstractVector{T}, σ = identity;
|
|
|
|
|
stride = 1, pad = 0, dilation = 1) where {T,N}
|
|
|
|
|
stride = expand(Val(N-2), stride)
|
|
|
|
|
pad = expand(Val(2*(N-2)), pad)
|
|
|
|
|
dilation = expand(Val(N-2), dilation)
|
|
|
|
|
return CrossCor(σ, w, b, stride, pad, dilation)
|
|
|
|
|
end
|
|
|
|
|
|
|
|
|
|
CrossCor(k::NTuple{N,Integer}, ch::Pair{<:Integer,<:Integer}, σ = identity;
|
|
|
|
|
init = glorot_uniform, stride = 1, pad = 0, dilation = 1) where N =
|
2019-08-19 13:49:53 +00:00
|
|
|
|
CrossCor(init(k..., ch...), zeros(ch[2]), σ,
|
2019-05-01 16:07:25 +00:00
|
|
|
|
stride = stride, pad = pad, dilation = dilation)
|
|
|
|
|
|
2019-09-19 14:53:31 +00:00
|
|
|
|
@functor CrossCor
|
2019-05-01 16:07:25 +00:00
|
|
|
|
|
|
|
|
|
function crosscor(x, w, ddims::DenseConvDims)
|
|
|
|
|
ddims = DenseConvDims(ddims, F=true)
|
|
|
|
|
return conv(x, w, ddims)
|
|
|
|
|
end
|
|
|
|
|
|
|
|
|
|
function (c::CrossCor)(x::AbstractArray)
|
|
|
|
|
# TODO: breaks gpu broadcast :(
|
|
|
|
|
# ndims(x) == ndims(c.weight)-1 && return squeezebatch(c(reshape(x, size(x)..., 1)))
|
|
|
|
|
σ, b = c.σ, reshape(c.bias, map(_->1, c.stride)..., :, 1)
|
|
|
|
|
cdims = DenseConvDims(x, c.weight; stride=c.stride, padding=c.pad, dilation=c.dilation)
|
|
|
|
|
σ.(crosscor(x, c.weight, cdims) .+ b)
|
|
|
|
|
end
|
|
|
|
|
|
|
|
|
|
function Base.show(io::IO, l::CrossCor)
|
|
|
|
|
print(io, "CrossCor(", size(l.weight)[1:ndims(l.weight)-2])
|
|
|
|
|
print(io, ", ", size(l.weight, ndims(l.weight)-1), "=>", size(l.weight, ndims(l.weight)))
|
|
|
|
|
l.σ == identity || print(io, ", ", l.σ)
|
|
|
|
|
print(io, ")")
|
|
|
|
|
end
|
|
|
|
|
|
|
|
|
|
(a::CrossCor{<:Any,<:Any,W})(x::AbstractArray{T}) where {T <: Union{Float32,Float64}, W <: AbstractArray{T}} =
|
|
|
|
|
invoke(a, Tuple{AbstractArray}, x)
|
|
|
|
|
|
|
|
|
|
(a::CrossCor{<:Any,<:Any,W})(x::AbstractArray{<:Real}) where {T <: Union{Float32,Float64}, W <: AbstractArray{T}} =
|
|
|
|
|
a(T.(x))
|
2019-03-01 09:34:05 +00:00
|
|
|
|
|
2018-07-31 16:10:53 +00:00
|
|
|
|
"""
|
2018-08-24 02:31:13 +00:00
|
|
|
|
MaxPool(k)
|
2018-07-31 16:10:53 +00:00
|
|
|
|
|
2018-09-04 13:30:02 +00:00
|
|
|
|
Max pooling layer. `k` stands for the size of the window for each dimension of the input.
|
2018-07-31 16:10:53 +00:00
|
|
|
|
|
|
|
|
|
Takes the keyword arguments `pad` and `stride`.
|
|
|
|
|
"""
|
2019-03-01 00:31:41 +00:00
|
|
|
|
struct MaxPool{N,M}
|
2018-10-17 15:11:16 +00:00
|
|
|
|
k::NTuple{N,Int}
|
2019-03-01 00:31:41 +00:00
|
|
|
|
pad::NTuple{M,Int}
|
2018-10-17 15:11:16 +00:00
|
|
|
|
stride::NTuple{N,Int}
|
2018-07-31 16:10:53 +00:00
|
|
|
|
end
|
|
|
|
|
|
2019-03-01 00:31:41 +00:00
|
|
|
|
function MaxPool(k::NTuple{N,Integer}; pad = 0, stride = k) where N
|
|
|
|
|
stride = expand(Val(N), stride)
|
|
|
|
|
pad = expand(Val(2*N), pad)
|
|
|
|
|
|
|
|
|
|
return MaxPool(k, pad, stride)
|
|
|
|
|
end
|
2018-08-24 02:31:13 +00:00
|
|
|
|
|
2019-03-28 22:56:21 +00:00
|
|
|
|
function (m::MaxPool)(x)
|
|
|
|
|
pdims = PoolDims(x, m.k; padding=m.pad, stride=m.stride)
|
|
|
|
|
return maxpool(x, pdims)
|
|
|
|
|
end
|
2018-07-31 16:10:53 +00:00
|
|
|
|
|
2018-08-24 02:31:13 +00:00
|
|
|
|
function Base.show(io::IO, m::MaxPool)
|
2018-09-04 13:30:02 +00:00
|
|
|
|
print(io, "MaxPool(", m.k, ", pad = ", m.pad, ", stride = ", m.stride, ")")
|
2018-07-31 16:10:53 +00:00
|
|
|
|
end
|
|
|
|
|
|
|
|
|
|
"""
|
2018-08-24 02:31:13 +00:00
|
|
|
|
MeanPool(k)
|
2018-07-31 16:10:53 +00:00
|
|
|
|
|
2018-09-04 13:30:02 +00:00
|
|
|
|
Mean pooling layer. `k` stands for the size of the window for each dimension of the input.
|
2018-07-31 16:10:53 +00:00
|
|
|
|
|
|
|
|
|
Takes the keyword arguments `pad` and `stride`.
|
|
|
|
|
"""
|
2019-03-01 00:31:41 +00:00
|
|
|
|
struct MeanPool{N,M}
|
2018-07-31 16:10:53 +00:00
|
|
|
|
k::NTuple{N,Int}
|
2019-03-01 00:31:41 +00:00
|
|
|
|
pad::NTuple{M,Int}
|
2018-07-31 16:10:53 +00:00
|
|
|
|
stride::NTuple{N,Int}
|
2018-08-24 02:31:13 +00:00
|
|
|
|
end
|
|
|
|
|
|
2019-03-01 00:31:41 +00:00
|
|
|
|
function MeanPool(k::NTuple{N,Integer}; pad = 0, stride = k) where N
|
|
|
|
|
stride = expand(Val(N), stride)
|
|
|
|
|
pad = expand(Val(2*N), pad)
|
|
|
|
|
return MeanPool(k, pad, stride)
|
|
|
|
|
end
|
2018-07-31 16:10:53 +00:00
|
|
|
|
|
2019-03-28 22:56:21 +00:00
|
|
|
|
function (m::MeanPool)(x)
|
|
|
|
|
pdims = PoolDims(x, m.k; padding=m.pad, stride=m.stride)
|
|
|
|
|
return meanpool(x, pdims)
|
|
|
|
|
end
|
2018-07-31 16:10:53 +00:00
|
|
|
|
|
2018-08-24 02:31:13 +00:00
|
|
|
|
function Base.show(io::IO, m::MeanPool)
|
2018-09-04 13:30:02 +00:00
|
|
|
|
print(io, "MeanPool(", m.k, ", pad = ", m.pad, ", stride = ", m.stride, ")")
|
2018-07-31 16:10:53 +00:00
|
|
|
|
end
|