Added loss functions

This commit is contained in:
Adarsh Kumar 2020-02-05 22:29:15 +05:30 committed by GitHub
parent 60043fa2aa
commit 7ac647a7ac
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 116 additions and 1 deletions

View File

@ -2,9 +2,91 @@ using CuArrays
using NNlib: logsoftmax, logσ
# Cost functions
"""
mae(, y)
L1 loss function. Computes the mean of absolute error between prediction and true values
"""
mae(, y) = sum(abs.(, y)) * 1 // length(y)
"""
mse(, y)
L2 loss function. Computes the mean of the squared errors between prediction and true values
"""
mse(, y) = sum(( .- y).^2) * 1 // length(y)
"""
mean_squared_logarithmic_error(, y;ϵ1=eps.(Float64.()),ϵ2=eps.(Float64.(y)))
L2 loss function. Returns the mean of the squared logarithmic errors of prediction , and true values y. The ϵ1 and ϵ2 terms provide numerical stability.
(Computes mean of squared(log(predicted values)-log(true value)). This error penalizes an under-predicted estimate greater than an over-predicted estimate.
```julia
julia> y_=[14726,327378,74734]
3-element Array{Int64,1}:
14726
327378
74734
julia> y = [12466.1,16353.95,16367.98]
3-element Array{Float64,1}:
12466.1
16353.95
16367.98
julia> mean_squared_logarithmic_error(y,y_)
3.771271382334686
```
Alias:
msle(,y;ϵ1=eps.(Float64.()),ϵ2=eps.(Float64.(y)))
"""
mean_squared_logarithmic_error(, y;ϵ1=eps.(Float64.()),ϵ2=eps.(Float64.(y))) = sum((log.(+ϵ1).-log.(y+ϵ2)).^2) * 1 // length(y)
#Alias
msle(, y;ϵ1=eps.(Float64.()),ϵ2=eps.(Float64.(y))) = sum((log.(+ϵ1).-log.(y+ϵ2)).^2) * 1 // length(y)
"""
huber_loss(, y,delta=1.0)
Computes the mean of the Huber loss between prediction and true values y. By default, delta is set to 1.0.
[Huber Loss](https://en.wikipedia.org/wiki/Huber_loss).
```julia
julia> y = [1.2636,1.25,1.73]
3-element Array{Float64,1}:
1.2636
1.25
1.73
julia> y_= [-1.376,0,3.37]
3-element Array{Float64,1}:
-1.376
0.0
3.37
julia> huber_loss(y,y_)
0.7131999999999998
```
"""
function huber_loss(, y,delta=1.0)
abs_error = abs.(.-y)
hub_loss =0
for i in 1:length(y)
if (abs_error[i]<=delta)
hub_loss+=abs_error[i]^2*0.5
else
hub_loss+=delta*(abs_error[i]-0.5*delta)
end
return hub_loss*1//length(y)
end
end
function _crossentropy(::AbstractVecOrMat, y::AbstractVecOrMat, weight::Nothing)
return -sum(y .* log.()) * 1 // size(y, 2)
end
@ -17,8 +99,32 @@ function _crossentropy(ŷ::AbstractVecOrMat, y::AbstractVecOrMat, weight::Abstr
return -sum(y .* log.() .* weight) * 1 // size(y, 2)
end
"""
crossentropy(, y, weight)
Computes crossentropy loss over the prediction and true labels y(expected `onehot` encoded). 'weight' parameter allows to set the class weights while calculating loss.
It can be a number or a vector of class weights. By default, weight is set to nothing.
```julia
julia> = [0.33 .11 .98;0.11 0.34 0.11]
2×3 Array{Float64,2}:
0.33 0.11 0.98
0.11 0.34 0.11
julia> y = [1 0 0;0 1 0]
2×3 Array{Int64,2}:
1 0 0
0 1 0
julia> crossentropy(,y)
0.7291574286311803
```
Note: If only two classes are there, better use binarycrossentropy(, y) function.
"""
crossentropy(::AbstractVecOrMat, y::AbstractVecOrMat; weight=nothing) = _crossentropy(, y, weight)
function logitcrossentropy(logŷ::AbstractVecOrMat, y::AbstractVecOrMat; weight = 1)
return -sum(y .* logsoftmax(logŷ) .* weight) * 1 // size(y, 2)
end
@ -106,7 +212,16 @@ poisson(ŷ, y) = sum(ŷ .- y .* log.(ŷ)) *1 // size(y,2)
"""
hinge(, y)
Measures the loss given the prediction and true labels y(containing 1 or -1).
L1 loss function. Measures the loss given the prediction and true labels y(containing 1 or -1).
[Hinge Loss](https://en.wikipedia.org/wiki/Hinge_loss).
"""
hinge(, y) = sum(max.(0, 1 .- .* y)) *1 // size(y,2)
"""
squared_hinge(, y)
L2 loss function. Computes squared hinge loss over the prediction and true labels y(conatining 1 or -1)
"""
squared_hinge(, y) = sum((max.(0,1.-.*y)).^2) *1//size(y,2)