merge conflict resolved
This commit is contained in:
commit
95e490a2c5
@ -57,6 +57,47 @@ end
|
|||||||
(a::Conv{<:Any,<:Any,W})(x::AbstractArray{<:Real}) where {T <: Union{Float32,Float64}, W <: AbstractArray{T}} =
|
(a::Conv{<:Any,<:Any,W})(x::AbstractArray{<:Real}) where {T <: Union{Float32,Float64}, W <: AbstractArray{T}} =
|
||||||
a(T.(x))
|
a(T.(x))
|
||||||
|
|
||||||
|
"""
|
||||||
|
ConvTranspose(size, in=>out)
|
||||||
|
ConvTranspose(size, in=>out, relu)
|
||||||
|
|
||||||
|
Standard convolutional transpose layer. `size` should be a tuple like `(2, 2)`.
|
||||||
|
`in` and `out` specify the number of input and output channels respectively.
|
||||||
|
Data should be stored in WHCN order. In other words, a 100×100 RGB image would
|
||||||
|
be a `100×100×3` array, and a batch of 50 would be a `100×100×3×50` array.
|
||||||
|
Takes the keyword arguments `pad`, `stride` and `dilation`.
|
||||||
|
"""
|
||||||
|
struct ConvTranspose{N,F,A,V}
|
||||||
|
σ::F
|
||||||
|
weight::A
|
||||||
|
bias::V
|
||||||
|
stride::NTuple{N,Int}
|
||||||
|
pad::NTuple{N,Int}
|
||||||
|
dilation::NTuple{N,Int}
|
||||||
|
end
|
||||||
|
|
||||||
|
ConvTranspose(w::AbstractArray{T,N}, b::AbstractVector{T}, σ = identity;
|
||||||
|
stride = 1, pad = 0, dilation = 1) where {T,N} =
|
||||||
|
ConvTranspose(σ, w, b, expand.(sub2(Val(N)), (stride, pad, dilation))...)
|
||||||
|
|
||||||
|
ConvTranspose(k::NTuple{N,Integer}, ch::Pair{<:Integer,<:Integer}, σ = identity; init = initn,
|
||||||
|
stride = 1, pad = 0, dilation = 1) where N =
|
||||||
|
ConvTranspose(param(init(k..., reverse(ch)...)), param(zeros(ch[2])), σ,
|
||||||
|
stride = stride, pad = pad, dilation = dilation)
|
||||||
|
|
||||||
|
@treelike ConvTranspose
|
||||||
|
|
||||||
|
function (c::ConvTranspose)(x)
|
||||||
|
# ndims(x) == ndims(c.weight)-1 && return squeezebatch(c(reshape(x, size(x)..., 1)))
|
||||||
|
σ, b = c.σ, reshape(c.bias, map(_->1, c.stride)..., :, 1)
|
||||||
|
σ.(∇conv_data(x, c.weight, stride = c.stride, pad = c.pad, dilation = c.dilation) .+ b)
|
||||||
|
end
|
||||||
|
|
||||||
|
function Base.show(io::IO, l::ConvTranspose)
|
||||||
|
print(io, "ConvTranspose(", size(l.weight)[1:ndims(l.weight)-2])
|
||||||
|
end
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
DepthwiseConv(size, in)
|
DepthwiseConv(size, in)
|
||||||
DepthwiseConv(size, in=>mul)
|
DepthwiseConv(size, in=>mul)
|
||||||
|
@ -356,7 +356,12 @@ x::TrackedVector * y::TrackedVector = track(*, x, y)
|
|||||||
# NNlib
|
# NNlib
|
||||||
|
|
||||||
using NNlib
|
using NNlib
|
||||||
|
<<<<<<< HEAD:src/tracker/lib/array.jl
|
||||||
import NNlib: softmax, ∇softmax, logsoftmax, ∇logsoftmax, conv, ∇conv_data, depthwiseconv, maxpool, meanpool
|
import NNlib: softmax, ∇softmax, logsoftmax, ∇logsoftmax, conv, ∇conv_data, depthwiseconv, maxpool, meanpool
|
||||||
|
=======
|
||||||
|
import NNlib: softmax, ∇softmax, logsoftmax, ∇logsoftmax,
|
||||||
|
conv, ∇conv_data, depthwiseconv, maxpool, meanpool
|
||||||
|
>>>>>>> a657c287d0590fdd9e49bb68c35bf96febe45e6d:src/tracker/array.jl
|
||||||
|
|
||||||
softmax(xs::TrackedArray) = track(softmax, xs)
|
softmax(xs::TrackedArray) = track(softmax, xs)
|
||||||
|
|
||||||
|
@ -1,6 +1,10 @@
|
|||||||
using Flux
|
using Flux
|
||||||
using Flux.Tracker, Test, NNlib
|
using Flux.Tracker, Test, NNlib
|
||||||
|
<<<<<<< HEAD
|
||||||
using Flux.Tracker: TrackedReal, gradcheck, grad, checkpoint
|
using Flux.Tracker: TrackedReal, gradcheck, grad, checkpoint
|
||||||
|
=======
|
||||||
|
using Flux.Tracker: TrackedReal, gradcheck, grad, derivative, checkpoint
|
||||||
|
>>>>>>> a657c287d0590fdd9e49bb68c35bf96febe45e6d
|
||||||
using NNlib: conv, ∇conv_data, depthwiseconv
|
using NNlib: conv, ∇conv_data, depthwiseconv
|
||||||
using Printf: @sprintf
|
using Printf: @sprintf
|
||||||
using LinearAlgebra: diagm, dot, LowerTriangular, norm
|
using LinearAlgebra: diagm, dot, LowerTriangular, norm
|
||||||
@ -186,6 +190,10 @@ end
|
|||||||
@test gradtest(conv, rand(10, 10, 3, 2), randn(Float64, 2, 2, 3, 2))
|
@test gradtest(conv, rand(10, 10, 3, 2), randn(Float64, 2, 2, 3, 2))
|
||||||
@test gradtest(conv, rand(10, 10, 10, 3, 2), randn(Float64, 2, 2, 2, 3, 2))
|
@test gradtest(conv, rand(10, 10, 10, 3, 2), randn(Float64, 2, 2, 2, 3, 2))
|
||||||
|
|
||||||
|
@test gradtest(∇conv_data, rand(10, 3, 2), randn(Float64, 2, 2, 3))
|
||||||
|
@test gradtest(∇conv_data, rand(10, 10, 3, 2), randn(Float64,2, 2, 2, 3))
|
||||||
|
@test gradtest(∇conv_data, rand(10, 10, 10, 3, 2), randn(Float64,2, 2, 2, 2, 3))
|
||||||
|
|
||||||
@test gradtest(depthwiseconv, rand(10,10,3,2), randn(2, 2, 2, 3))
|
@test gradtest(depthwiseconv, rand(10,10,3,2), randn(2, 2, 2, 3))
|
||||||
|
|
||||||
@test gradtest(∇conv_data, rand(10, 3, 2), randn(Float64, 2, 2, 3))
|
@test gradtest(∇conv_data, rand(10, 3, 2), randn(Float64, 2, 2, 3))
|
||||||
|
Loading…
Reference in New Issue
Block a user