Flux.jl/src/layers/conv.jl

202 lines
6.8 KiB
Julia
Raw Normal View History

2018-09-08 19:44:06 +00:00
using NNlib: conv, ∇conv_data, depthwiseconv
2018-02-26 22:43:07 +00:00
2018-09-04 13:30:02 +00:00
@generated sub2(::Val{N}) where N = :(Val($(N-2)))
2018-06-26 13:05:07 +00:00
expand(N, i::Tuple) = i
expand(N, i::Integer) = ntuple(_ -> i, N)
2018-04-15 15:02:40 +00:00
2017-12-18 18:05:48 +00:00
"""
2018-02-26 22:43:07 +00:00
Conv(size, in=>out)
Conv(size, in=>out, relu)
2017-12-18 18:05:48 +00:00
Standard convolutional layer. `size` should be a tuple like `(2, 2)`.
`in` and `out` specify the number of input and output channels respectively.
2018-02-16 00:06:15 +00:00
Data should be stored in WHCN order. In other words, a 100×100 RGB image would
2017-12-18 18:05:48 +00:00
be a `100×100×3` array, and a batch of 50 would be a `100×100×3×50` array.
Takes the keyword arguments `pad`, `stride` and `dilation`.
2017-12-18 18:05:48 +00:00
"""
2018-02-26 22:43:07 +00:00
struct Conv{N,F,A,V}
2017-12-15 13:22:57 +00:00
σ::F
weight::A
2018-02-15 20:15:41 +00:00
bias::V
2018-02-26 22:43:07 +00:00
stride::NTuple{N,Int}
pad::NTuple{N,Int}
dilation::NTuple{N,Int}
2017-12-15 13:22:57 +00:00
end
2018-06-26 13:05:07 +00:00
Conv(w::AbstractArray{T,N}, b::AbstractVector{T}, σ = identity;
stride = 1, pad = 0, dilation = 1) where {T,N} =
2018-09-04 13:30:02 +00:00
Conv(σ, w, b, expand.(sub2(Val(N)), (stride, pad, dilation))...)
2018-02-15 20:52:29 +00:00
Conv(k::NTuple{N,Integer}, ch::Pair{<:Integer,<:Integer}, σ = identity;
init = glorot_uniform, stride = 1, pad = 0, dilation = 1) where N =
2018-02-26 22:43:07 +00:00
Conv(param(init(k..., ch...)), param(zeros(ch[2])), σ,
stride = stride, pad = pad, dilation = dilation)
2017-12-15 13:22:57 +00:00
2018-07-12 21:43:11 +00:00
@treelike Conv
2017-12-15 13:22:57 +00:00
2018-11-12 20:21:27 +00:00
function (c::Conv)(x::AbstractArray)
2018-02-28 23:06:53 +00:00
# TODO: breaks gpu broadcast :(
# ndims(x) == ndims(c.weight)-1 && return squeezebatch(c(reshape(x, size(x)..., 1)))
2018-02-26 22:43:07 +00:00
σ, b = c.σ, reshape(c.bias, map(_->1, c.stride)..., :, 1)
σ.(conv(x, c.weight, stride = c.stride, pad = c.pad, dilation = c.dilation) .+ b)
2018-02-15 20:15:41 +00:00
end
2017-12-15 16:24:45 +00:00
2018-02-26 22:43:07 +00:00
function Base.show(io::IO, l::Conv)
print(io, "Conv(", size(l.weight)[1:ndims(l.weight)-2])
print(io, ", ", size(l.weight, ndims(l.weight)-1), "=>", size(l.weight, ndims(l.weight)))
2017-12-15 16:24:45 +00:00
l.σ == identity || print(io, ", ", l.σ)
print(io, ")")
end
2018-02-26 22:43:07 +00:00
2018-11-12 20:21:27 +00:00
(a::Conv{<:Any,<:Any,W})(x::AbstractArray{T}) where {T <: Union{Float32,Float64}, W <: AbstractArray{T}} =
invoke(a, Tuple{AbstractArray}, x)
(a::Conv{<:Any,<:Any,W})(x::AbstractArray{<:Real}) where {T <: Union{Float32,Float64}, W <: AbstractArray{T}} =
a(T.(x))
2018-09-08 19:44:06 +00:00
"""
ConvTranspose(size, in=>out)
ConvTranspose(size, in=>out, relu)
Standard convolutional transpose layer. `size` should be a tuple like `(2, 2)`.
`in` and `out` specify the number of input and output channels respectively.
Data should be stored in WHCN order. In other words, a 100×100 RGB image would
be a `100×100×3` array, and a batch of 50 would be a `100×100×3×50` array.
Takes the keyword arguments `pad`, `stride` and `dilation`.
"""
struct ConvTranspose{N,F,A,V}
σ::F
weight::A
bias::V
stride::NTuple{N,Int}
pad::NTuple{N,Int}
dilation::NTuple{N,Int}
end
ConvTranspose(w::AbstractArray{T,N}, b::AbstractVector{T}, σ = identity;
stride = 1, pad = 0, dilation = 1) where {T,N} =
ConvTranspose(σ, w, b, expand.(sub2(Val(N)), (stride, pad, dilation))...)
2018-12-04 16:08:40 +00:00
ConvTranspose(k::NTuple{N,Integer}, ch::Pair{<:Integer,<:Integer}, σ = identity;
init = glorot_uniform, stride = 1, pad = 0, dilation = 1) where N =
2018-09-08 19:44:06 +00:00
ConvTranspose(param(init(k..., reverse(ch)...)), param(zeros(ch[2])), σ,
stride = stride, pad = pad, dilation = dilation)
@treelike ConvTranspose
2018-12-04 16:08:40 +00:00
function (c::ConvTranspose)(x::AbstractArray)
2018-09-08 19:44:06 +00:00
# ndims(x) == ndims(c.weight)-1 && return squeezebatch(c(reshape(x, size(x)..., 1)))
σ, b = c.σ, reshape(c.bias, map(_->1, c.stride)..., :, 1)
σ.(∇conv_data(x, c.weight, stride = c.stride, pad = c.pad, dilation = c.dilation) .+ b)
end
function Base.show(io::IO, l::ConvTranspose)
print(io, "ConvTranspose(", size(l.weight)[1:ndims(l.weight)-2])
2018-12-09 17:50:09 +00:00
print(io, ", ", size(l.weight, ndims(l.weight)), "=>", size(l.weight, ndims(l.weight)-1))
l.σ == identity || print(io, ", ", l.σ)
print(io, ")")
2018-10-23 17:40:06 +00:00
end
2018-12-04 16:08:40 +00:00
(a::ConvTranspose{<:Any,<:Any,W})(x::AbstractArray{T}) where {T <: Union{Float32,Float64}, W <: AbstractArray{T}} =
invoke(a, Tuple{AbstractArray}, x)
2018-10-23 17:40:06 +00:00
2018-12-04 16:08:40 +00:00
(a::ConvTranspose{<:Any,<:Any,W})(x::AbstractArray{<:Real}) where {T <: Union{Float32,Float64}, W <: AbstractArray{T}} =
a(T.(x))
"""
DepthwiseConv(size, in)
DepthwiseConv(size, in=>mul)
DepthwiseConv(size, in=>mul, relu)
Depthwise convolutional layer. `size` should be a tuple like `(2, 2)`.
`in` and `mul` specify the number of input channels and channel multiplier respectively.
In case the `mul` is not specified it is taken as 1.
Data should be stored in WHCN order. In other words, a 100×100 RGB image would
be a `100×100×3` array, and a batch of 50 would be a `100×100×3×50` array.
Takes the keyword arguments `pad` and `stride`.
"""
struct DepthwiseConv{N,F,A,V}
σ::F
weight::A
bias::V
stride::NTuple{N,Int}
pad::NTuple{N,Int}
end
2018-10-11 16:09:35 +00:00
DepthwiseConv(w::AbstractArray{T,N}, b::AbstractVector{T}, σ = identity;
stride = 1, pad = 0) where {T,N} =
DepthwiseConv(σ, w, b, expand.(sub2(Val(N)), (stride, pad))...)
2019-01-24 13:18:30 +00:00
DepthwiseConv(k::NTuple{N,Integer}, ch::Integer, σ = identity; init = glorot_uniform,
stride = 1, pad = 0) where N =
DepthwiseConv(param(init(k..., 1, ch)), param(zeros(ch)), σ,
stride = stride, pad = pad)
2019-01-24 13:18:30 +00:00
DepthwiseConv(k::NTuple{N,Integer}, ch::Pair{<:Integer,<:Integer}, σ = identity; init = glorot_uniform,
stride::NTuple{N,Integer} = map(_->1,k),
pad::NTuple{N,Integer} = map(_->0,k)) where N =
DepthwiseConv(param(init(k..., ch[2], ch[1])), param(zeros(ch[2]*ch[1])), σ,
stride = stride, pad = pad)
@treelike DepthwiseConv
function (c::DepthwiseConv)(x)
σ, b = c.σ, reshape(c.bias, map(_->1, c.stride)..., :, 1)
σ.(depthwiseconv(x, c.weight, stride = c.stride, pad = c.pad) .+ b)
end
function Base.show(io::IO, l::DepthwiseConv)
print(io, "DepthwiseConv(", size(l.weight)[1:ndims(l.weight)-2])
print(io, ", ", size(l.weight, ndims(l.weight)), "=>", size(l.weight, ndims(l.weight)-1))
l.σ == identity || print(io, ", ", l.σ)
print(io, ")")
end
2018-07-31 16:10:53 +00:00
"""
2018-08-24 02:31:13 +00:00
MaxPool(k)
2018-07-31 16:10:53 +00:00
2018-09-04 13:30:02 +00:00
Max pooling layer. `k` stands for the size of the window for each dimension of the input.
2018-07-31 16:10:53 +00:00
Takes the keyword arguments `pad` and `stride`.
"""
2018-08-24 02:31:13 +00:00
struct MaxPool{N}
k::NTuple{N,Int}
pad::NTuple{N,Int}
stride::NTuple{N,Int}
2018-07-31 16:10:53 +00:00
end
2018-09-04 13:30:02 +00:00
MaxPool(k::NTuple{N,Integer}; pad = 0, stride = k) where N =
MaxPool(k, expand(Val(N), pad), expand(Val(N), stride))
2018-08-24 02:31:13 +00:00
(m::MaxPool)(x) = maxpool(x, m.k; pad = m.pad, stride = m.stride)
2018-07-31 16:10:53 +00:00
2018-08-24 02:31:13 +00:00
function Base.show(io::IO, m::MaxPool)
2018-09-04 13:30:02 +00:00
print(io, "MaxPool(", m.k, ", pad = ", m.pad, ", stride = ", m.stride, ")")
2018-07-31 16:10:53 +00:00
end
"""
2018-08-24 02:31:13 +00:00
MeanPool(k)
2018-07-31 16:10:53 +00:00
2018-09-04 13:30:02 +00:00
Mean pooling layer. `k` stands for the size of the window for each dimension of the input.
2018-07-31 16:10:53 +00:00
Takes the keyword arguments `pad` and `stride`.
"""
2018-08-24 02:31:13 +00:00
struct MeanPool{N}
2018-07-31 16:10:53 +00:00
k::NTuple{N,Int}
pad::NTuple{N,Int}
stride::NTuple{N,Int}
2018-08-24 02:31:13 +00:00
end
2018-09-04 13:30:02 +00:00
MeanPool(k::NTuple{N,Integer}; pad = 0, stride = k) where N =
MeanPool(k, expand(Val(N), pad), expand(Val(N), stride))
2018-07-31 16:10:53 +00:00
2018-08-24 02:31:13 +00:00
(m::MeanPool)(x) = meanpool(x, m.k; pad = m.pad, stride = m.stride)
2018-07-31 16:10:53 +00:00
2018-08-24 02:31:13 +00:00
function Base.show(io::IO, m::MeanPool)
2018-09-04 13:30:02 +00:00
print(io, "MeanPool(", m.k, ", pad = ", m.pad, ", stride = ", m.stride, ")")
2018-07-31 16:10:53 +00:00
end