Added new documentation folders for v0.0.4

This commit is contained in:
Eduardo Cueto Mendoza 2020-06-27 10:46:44 -06:00
parent 278af852f4
commit 6021828772
9 changed files with 1197 additions and 2 deletions

1
.gitignore vendored
View File

@ -2,3 +2,4 @@
*.jl.cov
*.jl.mem
/docs/build/
.vscode/*

View File

@ -1,2 +1,412 @@
# This file is machine-generated - editing it directly is not advised
[[AbstractFFTs]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "051c95d6836228d120f5f4b984dd5aba1624f716"
uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c"
version = "0.5.0"
[[AbstractTrees]]
deps = ["Markdown"]
git-tree-sha1 = "33e450545eaf7699da1a6e755f9ea65f14077a45"
uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"
version = "0.3.3"
[[Adapt]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "fd04049c7dd78cfef0b06cdc1f0f181467655712"
uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
version = "1.1.0"
[[ArrayLayouts]]
deps = ["FillArrays", "LinearAlgebra"]
git-tree-sha1 = "a3254b3780a3544838ca0b7e23b1e9b06eb71bd8"
uuid = "4c555306-a7a7-4459-81d9-ec55ddd5c99a"
version = "0.3.5"
[[Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[BinaryProvider]]
deps = ["Libdl", "Logging", "SHA"]
git-tree-sha1 = "ecdec412a9abc8db54c0efc5548c64dfce072058"
uuid = "b99e7846-7c00-51b0-8f62-c81ae34c0232"
version = "0.5.10"
[[CEnum]]
git-tree-sha1 = "1b77a77c3b28e0b3f413f7567c9bb8dd9bdccd14"
uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82"
version = "0.3.0"
[[CUDAapi]]
deps = ["Libdl", "Logging"]
git-tree-sha1 = "831b825d10104bd29e28f6da93312a976830717b"
pinned = true
uuid = "3895d2a7-ec45-59b8-82bb-cfc6a382f9b3"
version = "4.0.0"
[[CUDAdrv]]
deps = ["CEnum", "CUDAapi", "Printf"]
git-tree-sha1 = "f56bbf18c86bcff7a961a32a4947a5abb2963a29"
uuid = "c5f51814-7f29-56b8-a69c-e4d8f6be1fde"
version = "6.3.0"
[[CUDAnative]]
deps = ["Adapt", "BinaryProvider", "CEnum", "CUDAapi", "CUDAdrv", "ExprTools", "GPUCompiler", "LLVM", "Libdl", "Pkg", "Printf"]
git-tree-sha1 = "ac86db2b05fdfec96b011e25a504ffe7476e8a68"
uuid = "be33ccc6-a3ff-5ff2-a52e-74243cff1e17"
version = "3.1.0"
[[ChainRules]]
deps = ["ChainRulesCore", "LinearAlgebra", "Reexport", "Requires", "Statistics"]
git-tree-sha1 = "76cd719cb7ab57bd2687dcb3b186c4f99820a79d"
uuid = "082447d4-558c-5d27-93f4-14fc19e9eca2"
version = "0.6.5"
[[ChainRulesCore]]
deps = ["MuladdMacro"]
git-tree-sha1 = "c384e0e4fe6bfeb6bec0d41f71cc5e391cd110ba"
uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
version = "0.8.1"
[[CodeTracking]]
deps = ["InteractiveUtils", "UUIDs"]
git-tree-sha1 = "9c173f62af93cce8af2bd3527d160b6ddd6eaf81"
uuid = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
version = "1.0.0"
[[CodecZlib]]
deps = ["TranscodingStreams", "Zlib_jll"]
git-tree-sha1 = "ded953804d019afa9a3f98981d99b33e3db7b6da"
uuid = "944b1d66-785c-5afd-91f1-9de20f533193"
version = "0.7.0"
[[ColorTypes]]
deps = ["FixedPointNumbers", "Random"]
git-tree-sha1 = "27eb374570946a02aa184ef5b403dabaa7380693"
uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f"
version = "0.10.4"
[[Colors]]
deps = ["ColorTypes", "FixedPointNumbers", "InteractiveUtils", "Reexport"]
git-tree-sha1 = "1e9bba7984e78aa8cdeea7f9f7cc984ad4e4b1c7"
uuid = "5ae59095-9a9b-59fe-a467-6f913c188581"
version = "0.12.2"
[[CommonSubexpressions]]
deps = ["Test"]
git-tree-sha1 = "efdaf19ab11c7889334ca247ff4c9f7c322817b0"
uuid = "bbf7d656-a473-5ed7-a52c-81e309532950"
version = "0.2.0"
[[CompilerSupportLibraries_jll]]
deps = ["Libdl", "Pkg"]
git-tree-sha1 = "7c4f882c41faa72118841185afc58a2eb00ef612"
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
version = "0.3.3+0"
[[Cthulhu]]
deps = ["CodeTracking", "FoldingTrees", "InteractiveUtils", "REPL", "UUIDs", "Unicode"]
git-tree-sha1 = "34bf82a46f0600727673e3cbed9a5908efff3c0c"
uuid = "f68482b8-f384-11e8-15f7-abe071a5a75f"
version = "1.2.0"
[[CuArrays]]
deps = ["AbstractFFTs", "Adapt", "CEnum", "CUDAapi", "CUDAdrv", "CUDAnative", "DataStructures", "GPUArrays", "Libdl", "LinearAlgebra", "MacroTools", "NNlib", "Pkg", "Printf", "Random", "Reexport", "Requires", "SparseArrays", "Statistics", "TimerOutputs"]
git-tree-sha1 = "55ab24cc3fcf96f74246cf7e14bc9c2f818facbc"
uuid = "3a865a2d-5b23-5a0f-bc46-62713ec82fae"
version = "2.2.2"
[[DataAPI]]
git-tree-sha1 = "176e23402d80e7743fc26c19c681bfb11246af32"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.3.0"
[[DataStructures]]
deps = ["InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "edad9434967fdc0a2631a65d902228400642120c"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.17.19"
[[Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[DelimitedFiles]]
deps = ["Mmap"]
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
[[DiffResults]]
deps = ["StaticArrays"]
git-tree-sha1 = "da24935df8e0c6cf28de340b958f6aac88eaa0cc"
uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5"
version = "1.0.2"
[[DiffRules]]
deps = ["NaNMath", "Random", "SpecialFunctions"]
git-tree-sha1 = "eb0c34204c8410888844ada5359ac8b96292cfd1"
uuid = "b552c78f-8df3-52c6-915a-8e097449b14b"
version = "1.0.1"
[[Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[ExprTools]]
git-tree-sha1 = "6f0517056812fd6aa3af23d4b70d5325a2ae4e95"
uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04"
version = "0.1.1"
[[FillArrays]]
deps = ["LinearAlgebra", "Random", "SparseArrays"]
git-tree-sha1 = "bf726ba7ce99e00d10bf63c031285fb9ab3676ae"
uuid = "1a297f60-69ca-5386-bcde-b61e274b549b"
version = "0.8.11"
[[FixedPointNumbers]]
git-tree-sha1 = "8fb797c37a3b7ced4327a05ac4ca0dd6a4f1ba92"
uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93"
version = "0.8.1"
[[Flux]]
deps = ["AbstractTrees", "Adapt", "CodecZlib", "Colors", "CuArrays", "DelimitedFiles", "Juno", "MacroTools", "NNlib", "Pkg", "Printf", "Random", "Reexport", "SHA", "Statistics", "StatsBase", "Test", "ZipFile", "Zygote"]
git-tree-sha1 = "eb5801eea6294851dc2b16b20669f91776e79a3b"
pinned = true
uuid = "587475ba-b771-5e3f-ad9e-33799f191a9c"
version = "0.10.4"
[[FoldingTrees]]
deps = ["AbstractTrees", "REPL"]
git-tree-sha1 = "9a1e497cba7c33e5951aaffd9a421fa713acba0b"
uuid = "1eca21be-9b9b-4ed8-839a-6d8ae26b1781"
version = "1.0.0"
[[ForwardDiff]]
deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "NaNMath", "Random", "SpecialFunctions", "StaticArrays"]
git-tree-sha1 = "869540e4367122fbffaace383a5bdc34d6e5e5ac"
uuid = "f6369f11-7733-5829-9624-2563aa707210"
version = "0.10.10"
[[Future]]
deps = ["Random"]
uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820"
[[GPUArrays]]
deps = ["AbstractFFTs", "Adapt", "LinearAlgebra", "Printf", "Random", "Serialization"]
git-tree-sha1 = "d887693eb1bd5e1fd573262a978745481895ec7d"
uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7"
version = "3.4.1"
[[GPUCompiler]]
deps = ["Cthulhu", "DataStructures", "InteractiveUtils", "LLVM", "Libdl", "TimerOutputs"]
git-tree-sha1 = "5275aa268ecd09640b32560e1eae90c78816e4d1"
uuid = "61eb1bfa-7361-4325-ad38-22787b887f55"
version = "0.2.0"
[[IRTools]]
deps = ["InteractiveUtils", "MacroTools", "Test"]
git-tree-sha1 = "6875ae3cfcb9a50af80553d5cc825f406e8d13bc"
uuid = "7869d1d1-7146-5819-86e3-90919afe41df"
version = "0.4.0"
[[InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[Juno]]
deps = ["Base64", "Logging", "Media", "Profile"]
git-tree-sha1 = "a686b0cf235fa3e491b79b4783c2d2382292b436"
uuid = "e5e0dc1b-0480-54bc-9374-aad01c23163d"
version = "0.8.2"
[[LLVM]]
deps = ["CEnum", "Libdl", "Printf", "Unicode"]
git-tree-sha1 = "d9c6e1efcaa6c2fcd043da812a62b3e489a109a3"
uuid = "929cbde3-209d-540e-8aea-75f648917ca0"
version = "1.7.0"
[[LibGit2]]
deps = ["Printf"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[LinearAlgebra]]
deps = ["Libdl"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "f7d2e3f654af75f01ec49be82c231c382214223a"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.5"
[[Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[Media]]
deps = ["MacroTools", "Test"]
git-tree-sha1 = "75a54abd10709c01f1b86b84ec225d26e840ed58"
uuid = "e89f7d12-3494-54d1-8411-f7d8b9ae1f27"
version = "0.5.0"
[[Missings]]
deps = ["DataAPI"]
git-tree-sha1 = "de0a5ce9e5289f27df672ffabef4d1e5861247d5"
uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28"
version = "0.4.3"
[[Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[MuladdMacro]]
git-tree-sha1 = "c6190f9a7fc5d9d5915ab29f2134421b12d24a68"
uuid = "46d2c3a1-f734-5fdb-9937-b9b9aeba4221"
version = "0.2.2"
[[NNlib]]
deps = ["BinaryProvider", "Libdl", "LinearAlgebra", "Requires", "Statistics"]
git-tree-sha1 = "d9f196d911f55aeaff11b11f681b135980783824"
uuid = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
version = "0.6.6"
[[NaNMath]]
git-tree-sha1 = "928b8ca9b2791081dc71a51c55347c27c618760f"
uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
version = "0.3.3"
[[OpenSpecFun_jll]]
deps = ["CompilerSupportLibraries_jll", "Libdl", "Pkg"]
git-tree-sha1 = "d51c416559217d974a1113522d5919235ae67a87"
uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e"
version = "0.5.3+3"
[[OrderedCollections]]
git-tree-sha1 = "12ce190210d278e12644bcadf5b21cbdcf225cd3"
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
version = "1.2.0"
[[Pkg]]
deps = ["Dates", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "UUIDs"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
[[Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[Profile]]
deps = ["Printf"]
uuid = "9abbd945-dff8-562f-b5e8-e1ebf5ef1b79"
[[REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[Random]]
deps = ["Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[Reexport]]
deps = ["Pkg"]
git-tree-sha1 = "7b1d07f411bc8ddb7977ec7f377b97b158514fe0"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
version = "0.2.0"
[[Requires]]
deps = ["UUIDs"]
git-tree-sha1 = "d37400976e98018ee840e0ca4f9d20baa231dc6b"
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
version = "1.0.1"
[[SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
[[Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[SortingAlgorithms]]
deps = ["DataStructures", "Random", "Test"]
git-tree-sha1 = "03f5898c9959f8115e30bc7226ada7d0df554ddd"
uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c"
version = "0.3.1"
[[SparseArrays]]
deps = ["LinearAlgebra", "Random"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[SpecialFunctions]]
deps = ["OpenSpecFun_jll"]
git-tree-sha1 = "d8d8b8a9f4119829410ecd706da4cc8594a1e020"
uuid = "276daf66-3868-5448-9aa4-cd146d93841b"
version = "0.10.3"
[[StaticArrays]]
deps = ["LinearAlgebra", "Random", "Statistics"]
git-tree-sha1 = "5c06c0aeb81bef54aed4b3f446847905eb6cbda0"
uuid = "90137ffa-7385-5640-81b9-e52037218182"
version = "0.12.3"
[[Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[[StatsBase]]
deps = ["DataAPI", "DataStructures", "LinearAlgebra", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics"]
git-tree-sha1 = "a6102b1f364befdb05746f386b67c6b7e3262c45"
uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
version = "0.33.0"
[[Test]]
deps = ["Distributed", "InteractiveUtils", "Logging", "Random"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[TimerOutputs]]
deps = ["Printf"]
git-tree-sha1 = "f458ca23ff80e46a630922c555d838303e4b9603"
uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
version = "0.5.6"
[[TranscodingStreams]]
deps = ["Random", "Test"]
git-tree-sha1 = "7c53c35547de1c5b9d46a4797cf6d8253807108c"
uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa"
version = "0.9.5"
[[UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[ZipFile]]
deps = ["Libdl", "Printf", "Zlib_jll"]
git-tree-sha1 = "254975fef2fc526583bb9b7c9420fe66ffe09f2f"
uuid = "a5390f91-8eb1-5f08-bee0-b1d1ffed6cea"
version = "0.9.2"
[[Zlib_jll]]
deps = ["Libdl", "Pkg"]
git-tree-sha1 = "622d8b6dc0c7e8029f17127703de9819134d1b71"
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
version = "1.2.11+14"
[[Zygote]]
deps = ["AbstractFFTs", "ArrayLayouts", "ChainRules", "FillArrays", "ForwardDiff", "Future", "IRTools", "InteractiveUtils", "LinearAlgebra", "MacroTools", "NNlib", "Random", "Requires", "Statistics", "ZygoteRules"]
git-tree-sha1 = "2e2c82549fb0414df10469082fd001e2ede8547c"
uuid = "e88e6eb3-aa80-5325-afca-941959d7151f"
version = "0.4.22"
[[ZygoteRules]]
deps = ["MacroTools"]
git-tree-sha1 = "b3b4882cc9accf6731a08cc39543fbc6b669dca8"
uuid = "700de1a5-db45-46bc-99cf-38207098b444"
version = "0.2.0"

View File

@ -1,7 +1,12 @@
name = "GreenFlux"
uuid = "ccad5352-7643-4eb2-b711-e9c298e87bf0"
authors = ["Eduardo Cueto Mendoza"]
version = "0.1.0"
version = "0.0.4"
[deps]
CUDAapi = "3895d2a7-ec45-59b8-82bb-cfc6a382f9b3"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[compat]
julia = "1"

View File

@ -1,5 +1,24 @@
module GreenFlux
# Write your package code here.
using Flux: Chain, Recur, Dense, Conv, MaxPool, GlobalMaxPool, MeanPool, GlobalMeanPool,
DepthwiseConv, ConvTranspose, CrossCor, GRUCell, LSTMCell,RNNCell, Maxout
using Flux: params, outdims, sigmoid, rrelu, elu, celu, softsign, softplus, tanh, gelu,
hardsigmoid, logsigmoid,swish, selu, softmax, logsoftmax, hardtanh,
leakyrelu, relu6, lisht, tanhshrink, logcosh, mish, relu, trelu,
softshrink, identity
using Statistics: mean
using CUDAapi: has_cuda_gpu
export avgpowerdraw, modelflops
include("power/powerdraw.jl")
include("neflops/measureutils.jl")
include("neflops/layerflops.jl")
include("neflops/gradientflops.jl")
include("neflops/modelflops.jl")
function __init__()
@info "Finished loading GreenFlux..."
end
end # module

View File

@ -0,0 +1,206 @@
"""
gradientflops(layer, input)::Float64
Calculates the number of non-embedding Floating Point Operations `neFLOPs` for most layers.
`layer` should be any of the [Flux](https://github.com/FluxML/Flux.jl) model layers except
`GlobalMaxPool` and `GlobalMeanPool`.
# Example
```julia
layer = Conv((2, 2), 1=>16, relu)
input = (4,4)
layerflops(Conv((2, 2), 1=>16, relu),(4,4))
```
"""
function gradientflops(layer::Dense, input::Tuple)
x = 0; y = 0; Fm = 1
if length(input) == 3
x,y,Fn = input
xo, yo = outdims(layer,(x,y))
elseif length(input) == 2
x,y,_ = input
xo, yo = outdims(layer,(x,y))
else
error("Not a valid Input size, expected (::Int,::Int) or (::Int,::Int,::Int)")
end
bi = length(layer.b)
noofoper = gradientoperations(layer)
return convert(Float64, ((xo*yo+bi)*noofoper)*Fm), (xo,yo)
end
function gradientflops(layer::Maxout,input::Tuple)
x = 0; y = 0; Fm = 1
if length(input) == 3
x,y,Fm = input
xo, yo = outdims(layer,(x,y))
elseif length(input) == 2
x,y,_ = input
xo, yo = outdims(layer,(x,y))
else
error("Not a valid Input size, expected (::Int,::Int) or (::Int,::Int,::Int)")
end
a,b = layer.over
return convert(Float64,xo*yo*Fm), (x,1)
end
function gradientflops(layer::Conv, input::Tuple)
_,_,_,outputfeaturemaps = size(layer.weight)
x = 0; y = 0; Fm = 1
if length(input) == 3
x,y,Fm = input
xo, yo = outdims(layer,(x,y))
elseif length(input) == 2
x,y,_ = input
xo, yo = outdims(layer,(x,y))
else
error("Not a valid Input size, expected (::Int,::Int) or (::Int,::Int,::Int)")
end
noofoper = gradientoperations(layer)
if size(layer.bias) == ()
return convert(Float64,xo*yo*noofoper*Fm), (xo,yo,outputfeaturemaps)
else
return convert(Float64,((xo*yo+size(layer.bias))*noofoper)*Fm), (xo,yo,outputfeaturemaps)
end
end
function gradientflops(layer::MaxPool, input::Tuple)
kernelsizex,kernelsizey = layer.k
x = 0; y = 0; Fm = 1
if length(input) == 3
x,y,Fm = input
xo, yo = outdims(layer,(x,y))
elseif length(input) == 2
x,y,_ = input
xo, yo = outdims(layer,(x,y))
else
error("Not a valid Input size, expected (::Int,::Int) or (::Int,::Int,::Int)")
end
outx, outy = outdims(layer,(x,y))
return convert(Float64,outx*outy*Fm), (outx,outy)
end
gradientflops(layer::GlobalMaxPool, input::Tuple) = error("Must be implemented in a future release")
function gradientflops(layer::MeanPool, input::Tuple)
kernelsizex,kernelsizey = layer.k
x = 0; y = 0; Fm = 1
if length(input) == 3
x,y,Fm = input
xo, yo = outdims(layer,(x,y))
elseif length(input) == 2
x,y,_ = input
xo, yo = outdims(layer,(x,y))
else
error("Not a valid Input size, expected (::Int,::Int) or (::Int,::Int,::Int)")
end
outx, outy = outdims(layer,(x,y))
return convert(Float64,outx*outy*Fm), (outx,outy)
end
gradientflops(layer::GlobalMeanPool, input::Tuple) = error("Must be implemented in a future release")
function gradientflops(layer::DepthwiseConv, input::Tuple)
kernelsizex,kernelsizey,inputfeaturemaps,outputfeaturemaps = size(layer.weight)
x = 0; y = 0; Fm = 1
if length(input) == 3
x,y,Fm = input
xo, yo = outdims(layer,(x,y))
elseif length(input) == 2
x,y,_ = input
xo, yo = outdims(layer,(x,y))
else
error("Not a valid Input size, expected (::Int,::Int) or (::Int,::Int,::Int)")
end
outx, outy = outdims(layer,(x,y))
noofoper = gradientoperations(layer)
if size(layer.bias) == ()
return convert(Float64,kernelsizex*kernelsizey*noofoper*Fm), (outx,outy)
else
return convert(Float64,kernelsizex*kernelsizey*noofoper*Fm + (length(layer.bias)-1)), (outx,outy)
end
end
function gradientflops(layer::ConvTranspose, input::Tuple)
kernelsizex,kernelsizey,inputfeaturemaps,outputfeaturemaps = size(layer.weight)
x = 0; y = 0; Fm = 1
if length(input) == 3
x,y,Fm = input
xo, yo = outdims(layer,(x,y))
elseif length(input) == 2
x,y,_ = input
xo, yo = outdims(layer,(x,y))
else
error("Not a valid Input size, expected (::Int,::Int) or (::Int,::Int,::Int)")
end
outx, outy = outdims(layer,(x,y))
noofoper = gradientoperations(layer)
if size(layer.bias) == ()
return convert(Float64,kernelsizex*kernelsizey*noofoper*Fm), (outx,outy)
else
return convert(Float64,kernelsizex*kernelsizey*noofoper*Fm + (length(layer.bias)-1)), (outx,outy)
end
end
function gradientflops(layer::CrossCor, input::Tuple)
kernelsizex,kernelsizey,inputfeaturemaps,outputfeaturemaps = size(layer.weight)
x = 0; y = 0; Fm = 1
if length(input) == 3
x,y,Fm = input
xo, yo = outdims(layer,(x,y))
elseif length(input) == 2
x,y,_ = input
xo, yo = outdims(layer,(x,y))
else
error("Not a valid Input size, expected (::Int,::Int) or (::Int,::Int,::Int)")
end
outx, outy = outdims(layer,(x,y))
noofoper = gradientoperations(layer)
if size(layer.bias) == ()
return convert(Float64,kernelsizex*kernelsizey*noofoper*Fm), (outx,outy)
else
return convert(Float64,kernelsizex*kernelsizey*noofoper*Fm + (length(layer.bias)-1)), (outx,outy)
end
end
function gradientflops(layer::Recur{T}, input::Tuple) where {T <: RNNCell}
inM,inN = input
WhM,WhN = size(layer.cell.Wh)
WiM,WiN = size(layer.cell.Wi)
hM = length(layer.cell.h)
noofoper = activationoperations(layer)
if size(layer.cell.b) == ()
return convert(Float64,((2*WhN*WhM-hM + 2*WiM*WiN-inM)*noofoper)+((2*WhN*WhM-hM)*noofoper)), (hM,1)
else
bM = length(layer.cell.b)
return convert(Float64,((2*WhN*WhM-hM + 2*WiM*WiN-inM + 2*bM)*noofoper)+((2*WhN*WhM-hM + bM)*noofoper)), (bM,1)
end
end
function gradientflops(layer::Recur{T}, input::Tuple) where {T <: LSTMCell}
inM,inN = input
WhM,WhN = size(layer.cell.Wh)
WiM,WiN = size(layer.cell.Wi)
hM = length(layer.cell.h)
noofoper = 3
if size(layer.cell.b) == ()
return convert(Float64,((2*WhN*WhM-hM + 2*WiM*WiN-inM)*noofoper)+((2*WhN*WhM-hM + 2*WiM*WiN-inM)*noofoper)+((2*WhN*WhM-hM + 2*WiM*WiN-inM)*noofoper)+((2*WhN*WhM-hM + 2*WiM*WiN-inM)*noofoper)+(3*hM)+((hM*noofoper)+hM)), (hM,1)
else
bM = length(layer.cell.b)
return convert(Float64,((2*WhN*WhM-hM + 2*WiM*WiN-inM + 2*bM)*noofoper)+((2*WhN*WhM-hM + 2*WiM*WiN-inM + 2*bM)*noofoper)+((2*WhN*WhM-hM + 2*WiM*WiN-inM + 2*bM)*noofoper)+((2*WhN*WhM-hM + 2*WiM*WiN-inM + 2*bM)*noofoper)+(3*bM)+((bM*noofoper)+bM)), (bM,1)
end
end
function gradientflops(layer::Recur{T}, input::Tuple) where {T <: GRUCell}
inM,inN = input
WhM,WhN = size(layer.cell.Wh)
WiM,WiN = size(layer.cell.Wi)
hM = length(layer.cell.h)
noofoper = 3
if size(layer.cell.b) == ()
return convert(Float64,((2*WhN*WhM-hM + 2*WiM*WiN-inM)*noofoper)+((2*WhN*WhM-hM + 2*WiM*WiN-inM)*noofoper)+((2*WhN*WhM-hM + 2*WiM*WiN-inM)*noofoper)+((2*WhN*WhM-hM + 2*WiM*WiN-inM)*noofoper)+(3*hM)+((hM*noofoper)+hM)), (hM,1)
else
bM = length(layer.cell.b)
return convert(Float64,((2*WhN*WhM-hM + 2*WiM*WiN-inM + 2*bM)*noofoper)+((2*WhN*WhM-hM + 2*WiM*WiN-inM + 2*bM)*noofoper)+((2*WhN*WhM-hM + 2*WiM*(WiN+hM)-inM + 2*bM)*noofoper)+(4bM)), (bM,1)
end
end

223
src/neflops/layerflops.jl Normal file
View File

@ -0,0 +1,223 @@
"""
layerflops(layer, input)::Float64
Calculates the number of non-embedding Floating Point Operations `neFLOPs` for most layers.
`layer` should be any of the [Flux](https://github.com/FluxML/Flux.jl) model layers except
`GlobalMaxPool` and `GlobalMeanPool`.
# Example
```julia
layer = Conv((2, 2), 1=>16, relu)
input = (4,4)
layerflops(Conv((2, 2), 1=>16, relu),(4,4))
```
"""
function layerflops(layer::Dense,input::Tuple)
N,M = size(layer.W)
Mi = 0; Ni = 0; Fm = 1; out = 0
if length(input) == 3
Mi,Ni,Fm = input
out = outdims(layer,(Mi,Ni))
elseif length(input) == 2
Mi,Ni = input
Fm = 1
out = outdims(layer,input)
else
error("Not a valid Input size, expected (::Int,::Int) or (::Int,::Int,::Int)")
end
bi = length(layer.b)
if length(out) < 2
out = (out[1],1)
end
noofopers = activationoperations(layer)
return convert(Float64,((2*Mi*N - M)+bi)*noofopers*Fm), out
end
function layerflops(layer::Maxout,input::Tuple)
i = 0; j = 0; Fm = 1
if length(input) == 3
i,j,Fm = input
elseif length(input) == 2
i,j = input
Fm = 1
else
error("Not a valid Input size, expected (::Int,::Int) or (::Int,::Int,::Int)")
end
a,b = layer.over
return convert(Float64,a*b*j*i*Fm), (j,1)
end
function layerflops(layer::Conv, input::Tuple)
kernelsizex,kernelsizey,inputfeaturemaps,outputfeaturemaps = size(layer.weight)
x = 0; y = 0
if length(input) == 3
x, y, _ = input
elseif length(input) == 2
x, y = input
else
error("Not a valid Input size, expected (::Int,::Int) or (::Int,::Int,::Int)")
end
outx, outy = outdims(layer,(x,y))
noofoper = activationoperations(layer)
if size(layer.bias) == ()
return convert(Float64,inputfeaturemaps*(kernelsizex*kernelsizey+(kernelsizex*kernelsizey-1))*outx*outy + outx*outy*noofoper*outputfeaturemaps), (outx,outy,outputfeaturemaps)
else
return convert(Float64,(2*kernelsizex*kernelsizey)*inputfeaturemaps*outx*outy + outx*outy*noofoper*outputfeaturemaps), (outx,outy,outputfeaturemaps)
end
end
function layerflops(layer::MaxPool, input::Tuple)
Fm = 1
if length(input) == 3
_,_,Fm = input
elseif length(input) == 2
_,_ = input
Fm = 1
else
error("Not a valid Input size, expected (::Int,::Int) or (::Int,::Int,::Int)")
end
kernelsizex,kernelsizey = layer.k
outx, outy = outdims(layer,input)
return convert(Float64,kernelsizex*kernelsizey*outx*outy*Fm), (outx,outy)
end
layerflops(layer::GlobalMaxPool, input::Tuple) = error("Must be implemented in a future release")
function layerflops(layer::MeanPool, input::Tuple)
Fm = 1
if length(input) == 3
_,_,Fm = input
elseif length(input) == 2
_,_ = input
Fm = 1
else
error("Not a valid Input size, expected (::Int,::Int) or (::Int,::Int,::Int)")
end
kernelsizex,kernelsizey = layer.k
outx, outy = outdims(layer,input)
return convert(Float64,kernelsizex*kernelsizey*outx*outy*Fm), (outx,outy)
end
layerflops(layer::GlobalMeanPool, input::Tuple) = error("Must be implemented in a future release")
function layerflops(layer::DepthwiseConv, input::Tuple)
kernelsizex,kernelsizey,inputfeaturemaps,outputfeaturemaps = size(layer.weight)
x = 0; y = 0
if length(input) == 3
x, y, _ = input
elseif length(input) == 2
x, y = input
else
error("Not a valid Input size, expected (::Int,::Int) or (::Int,::Int,::Int)")
end
outx, outy = outdims(layer,(x,y))
noofoper = activationoperations(layer)
if size(layer.bias) == ()
return convert(Float64,inputfeaturemaps*(kernelsizex*kernelsizey+(kernelsizex*kernelsizey-1))*outx*outy + outx*outy*noofoper*outputfeaturemaps), (outx,outy,outputfeaturemaps)
else
return convert(Float64,(2*kernelsizex*kernelsizey)*inputfeaturemaps*outx*outy + outx*outy*noofoper*outputfeaturemaps), (outx,outy,outputfeaturemaps)
end
end
function layerflops(layer::ConvTranspose, input::Tuple)
kernelsizex,kernelsizey,inputfeaturemaps,outputfeaturemaps = size(layer.weight)
x = 0; y = 0
if length(input) == 3
x, y, _ = input
elseif length(input) == 2
x, y = input
else
error("Not a valid Input size, expected (::Int,::Int) or (::Int,::Int,::Int)")
end
outx, outy = outdims(layer,(x,y))
noofoper = activationoperations(layer)
if size(layer.bias) == ()
return convert(Float64,inputfeaturemaps*(kernelsizex*kernelsizey+(kernelsizex*kernelsizey-1))*outx*outy + outx*outy*noofoper*outputfeaturemaps), (outx,outy,outputfeaturemaps)
else
return convert(Float64,(2*kernelsizex*kernelsizey)*inputfeaturemaps*outx*outy + outx*outy*noofoper*outputfeaturemaps), (outx,outy,outputfeaturemaps)
end
end
function layerflops(layer::CrossCor, input::Tuple)
kernelsizex,kernelsizey,inputfeaturemaps,outputfeaturemaps = size(layer.weight)
x = 0; y = 0
if length(input) == 3
x, y, _ = input
elseif length(input) == 2
x, y = input
else
error("Not a valid Input size, expected (::Int,::Int) or (::Int,::Int,::Int)")
end
outx, outy = outdims(layer,(x,y))
noofoper = activationoperations(layer)
if size(layer.bias) == ()
return convert(Float64,inputfeaturemaps*(kernelsizex*kernelsizey+(kernelsizex*kernelsizey-1))*outx*outy + outx*outy*noofoper*outputfeaturemaps), (outx,outy,outputfeaturemaps)
else
return convert(Float64,(2*kernelsizex*kernelsizey)*inputfeaturemaps*outx*outy + outx*outy*noofoper*outputfeaturemaps), (outx,outy,outputfeaturemaps)
end
end
function layerflops(layer::Recur{T}, input::Tuple) where {T <: RNNCell}
inM = 0; inN = 0; Fm = 1
if length(input) == 3
inM,inN, Fm = input
elseif length(input) == 2
inM,inN = input
else
error("Not a valid Input size, expected (::Int,::Int) or (::Int,::Int,::Int)")
end
WhM,WhN = size(layer.cell.Wh)
WiM,WiN = size(layer.cell.Wi)
hM = length(layer.cell.h)
noofoper = activationoperations(layer)
if size(layer.cell.b) == ()
return convert(Float64,(((2*WhN*WhM-hM + 2*WiM*WiN-inM)*noofoper)+((2*WhN*WhM-hM)*noofoper))*Fm), (hM,1)
else
bM = length(layer.cell.b)
return convert(Float64,(((2*WhN*WhM-hM + 2*WiM*WiN-inM + 2*bM)*noofoper)+((2*WhN*WhM-hM + bM)*noofoper))*Fm), (bM,1)
end
end
function layerflops(layer::Recur{T}, input::Tuple) where {T <: LSTMCell}
inM = 0; inN = 0; Fm = 1
if length(input) == 3
inM,inN, Fm = input
elseif length(input) == 2
inM,inN = input
else
error("Not a valid Input size, expected (::Int,::Int) or (::Int,::Int,::Int)")
end
WhM,WhN = size(layer.cell.Wh)
WiM,WiN = size(layer.cell.Wi)
hM = length(layer.cell.h)
noofoper = 3
if size(layer.cell.b) == ()
return convert(Float64,(((2*WhN*WhM-hM + 2*WiM*WiN-inM)*noofoper)+((2*WhN*WhM-hM + 2*WiM*WiN-inM)*noofoper)+((2*WhN*WhM-hM + 2*WiM*WiN-inM)*noofoper)+((2*WhN*WhM-hM + 2*WiM*WiN-inM)*noofoper)+(3*hM)+((hM*noofoper)+hM))*Fm), (hM,1)
else
bM = length(layer.cell.b)
return convert(Float64,(((2*WhN*WhM-hM + 2*WiM*WiN-inM + 2*bM)*noofoper)+((2*WhN*WhM-hM + 2*WiM*WiN-inM + 2*bM)*noofoper)+((2*WhN*WhM-hM + 2*WiM*WiN-inM + 2*bM)*noofoper)+((2*WhN*WhM-hM + 2*WiM*WiN-inM + 2*bM)*noofoper)+(3*bM)+((bM*noofoper)+bM))*Fm), (bM,1)
end
end
function layerflops(layer::Recur{T}, input::Tuple) where {T <: GRUCell}
inM = 0; inN = 0; Fm = 1
if length(input) == 3
inM,inN, Fm = input
elseif length(input) == 2
inM,inN = input
else
error("Not a valid Input size, expected (::Int,::Int) or (::Int,::Int,::Int)")
end
WhM,WhN = size(layer.cell.Wh)
WiM,WiN = size(layer.cell.Wi)
hM = length(layer.cell.h)
noofoper = 3
if size(layer.cell.b) == ()
return convert(Float64,(((2*WhN*WhM-hM + 2*WiM*WiN-inM)*noofoper)+((2*WhN*WhM-hM + 2*WiM*WiN-inM)*noofoper)+((2*WhN*WhM-hM + 2*WiM*WiN-inM)*noofoper)+((2*WhN*WhM-hM + 2*WiM*WiN-inM)*noofoper)+(3*hM)+((hM*noofoper)+hM))*Fm), (hM,1)
else
bM = length(layer.cell.b)
return convert(Float64,(((2*WhN*WhM-hM + 2*WiM*WiN-inM + 2*bM)*noofoper)+((2*WhN*WhM-hM + 2*WiM*WiN-inM + 2*bM)*noofoper)+((2*WhN*WhM-hM + 2*WiM*(WiN+hM)-inM + 2*bM)*noofoper)+(4bM))*Fm), (bM,1)
end
end

149
src/neflops/measureutils.jl Normal file
View File

@ -0,0 +1,149 @@
"""
activationoperations(layer)::Int
Outputs the approximate mumber of operations for a Flux activation function.
```julia
layer = Conv(weight = weight,
σ = sigmoid)
activationoperations(Conv(weight = weight,
σ = sigmoid))
```
"""
function activationoperations(layer::Recur)
layer = recurrentunpack(layer)
activation = layer.σ
if activation == sigmoid || activation == rrelu || activation == elu || activation == celu ||
activation == softsign || activation == softplus || activation == tanh
noofoper = 3
elseif activation == gelu
noofoper = 8
elseif activation == hardsigmoid || activation == logsigmoid || activation == swish ||
activation == selu || activation == softmax || activation == logsoftmax
noofoper = 4
elseif activation == hardtanh || activation == leakyrelu || activation == relu6 ||
activation == lisht || activation == tanhshrink
noofoper = 2
elseif activation == logcosh || activation == mish
noofoper = 5
elseif activation == relu || activation == trelu || activation == softshrink
noofoper = 1
elseif activation == identity
noofoper = 0
else
@info "Unkown activation type defaulting to identity"
return 0
end
return noofoper
end
function activationoperations(layer)
activation = layer.σ
if activation == sigmoid || activation == rrelu || activation == elu || activation == celu ||
activation == softsign || activation == softplus || activation == tanh
noofoper = 4
elseif activation == gelu
noofoper = 9
elseif activation == hardsigmoid || activation == logsigmoid || activation == swish ||
activation == selu || activation == softmax || activation == logsoftmax
noofoper = 5
elseif activation == hardtanh || activation == leakyrelu || activation == relu6 ||
activation == lisht || activation == tanhshrink
noofoper = 3
elseif activation == logcosh || activation == mish
noofoper = 6
elseif activation == relu || activation == trelu || activation == softshrink
noofoper = 2
elseif activation == identity
noofoper = 1
else
@info "Unkown activation type defaulting to identity"
return 1
end
return noofoper
end
"""
gradientoperations(layer)::Int
Outputs the approximate mumber of operations for the gradient of a Flux activation
function.
```julia
layer = Conv(weight = weight,
σ = sigmoid)
gradientoperations(Conv(weight = weight,
σ = sigmoid))
```
"""
function gradientoperations(layer::Recur)
layer = recurrentunpack(layer)
activation = layer.σ
if activation == sigmoid || activation == rrelu || activation == elu || activation == celu ||
activation == softsign || activation == softplus || activation == tanh
noofoper = 4
elseif activation == gelu
noofoper = 8
elseif activation == hardsigmoid || activation == logsigmoid || activation == swish ||
activation == selu || activation == softmax || activation == logsoftmax
noofoper = 4
elseif activation == hardtanh || activation == leakyrelu || activation == relu6 ||
activation == lisht || activation == tanhshrink
noofoper = 2
elseif activation == logcosh || activation == mish
noofoper = 5
elseif activation == relu || activation == trelu || activation == softshrink
noofoper = 1
elseif activation == identity
noofoper = 0
else
@info "Unkown activation type defaulting to identity"
return 1
end
return noofoper
end
function gradientoperations(layer)
activation = layer.σ
if activation == sigmoid || activation == rrelu || activation == elu || activation == celu ||
activation == softsign || activation == softplus || activation == tanh
noofoper = 4
elseif activation == gelu
noofoper = 8
elseif activation == hardsigmoid || activation == logsigmoid || activation == swish ||
activation == selu || activation == softmax || activation == logsoftmax
noofoper = 4
elseif activation == hardtanh || activation == leakyrelu || activation == relu6 ||
activation == lisht || activation == tanhshrink
noofoper = 2
elseif activation == logcosh || activation == mish
noofoper = 5
elseif activation == relu || activation == trelu || activation == softshrink
noofoper = 1
elseif activation == identity
noofoper = 0
else
@info "Unkown activation type defaulting to identity"
return 1
end
return noofoper
end
function recurrentunpack(layer::Recur)
return layer.cell
end
islayer(::Any) = false
islayer(::Recur) = true
islayer(::Dense) = true
islayer(::Conv) = true
islayer(::MaxPool) = true
islayer(::MeanPool) = true
islayer(::DepthwiseConv) = true
islayer(::ConvTranspose) = true
islayer(::CrossCor) = true
islayer(::Maxout) = true

44
src/neflops/modelflops.jl Normal file
View File

@ -0,0 +1,44 @@
"""
modelflops(model::Chain)::Float64
Calculates the approximate number of Floating Point Operations that the model will require
```julia
weight = rand(Float64, 3, 3, 5)
bias = zeros(Float64, 5)
Conv(weight = weight,
bias = bias,
σ = sigmoid)
```
"""
function modelflops(model::Chain,inputsize::Tuple,samplesize::Float64,batchsize::Float64)
x = 0; y = 0; Fm = 1
if length(inputsize) == 3
x,y,Fm = inputsize
elseif length(inputsize) == 2
x,y = inputsize
end
modellayers = collect(model)
nelayers = Array{Any,1}()
layeroutput = Array{Tuple,1}()
outsizes = Array{Tuple,1}()
lossandgradient = 1
for ml in modellayers
if islayer(ml)
push!(nelayers,ml)
end
end
output = (0,0)
for mli in 1:length(nelayers)
if mli == 1
noflops, output = layerflops(nelayers[mli],inputsize)
layeroutput = vcat(layeroutput, noflops)
else
noflops, output = layerflops(nelayers[mli],output)
layeroutput = vcat(layeroutput, noflops)
end
end
numberoflayers = length(layeroutput)
return sum(layeroutput) * samplesize + lossandgradient*batchsize
end

138
src/power/powerdraw.jl Normal file
View File

@ -0,0 +1,138 @@
"""
gpupowerdraw()::Float64
The function uses Linux `nvidia-smi` package to sample and get the average electricity
draw of the GPUs.
"""
function gpupowerdraw()
if has_cuda_gpu()
gpucommand = `nvidia-smi`
usage = Array{Any}(undef,60)
cap = Array{Any}(undef,60)
nogpus = 0
for count in 1:60
smis = Array{Any}[]
smiss = Array{Any}[]
gpus = Array{Any}[]
powerdraw = Array{Float64}[]
powercap = Array{Float64}[]
smi = read(gpucommand, String);
smi = split(smi, "\n")
for s in smi
push!(smis,split(s, " "))
end
for s in smis
push!(smiss,filter(x->x≠"",s))
end
for strings in smiss
if length(strings) > 5 && strings[6] == "/" && strings[10] == "/"
push!(gpus,strings)
end
end
nogpus = length(gpus)
for g in gpus
usagestr = ""
capstr = ""
if g[5] == "N/A"
usagestr = "0.0"
else
usagestr = usagestr * g[5]
end
if g[7] == "N/A"
capstr = "0.0"
else
capstr = capstr * g[7]
end
powerdraw = vcat(powerdraw, parse(Float64,usagestr))
powercap = vcat(powercap, parse(Float64,capstr))
end
usage[count] = mean(powerdraw)
cap[count] = mean(powercap)
sleep(1)
end
return nogpus, mean(usage), mean(cap)
else
@info "This computer does not have acces to a GPU passing to CPU and RAM computations"
end
end
"""
cpupowerdraw()::Float64
This function uses the Linux `powerstat` utility to get the average CPU energy cost.
"""
function cpupowerdraw()
cpucommand = `powerstat -R -n -d0`
try
cpu = read(cpucommand, String);
cpu = split(cpu,"\n")
cpu = cpu[66][60:64]
return parse(Float64,cpu)
catch e
@info "powerstat not installed in your computer"
end
end
#TODO: further fine tune the model
"""
rampowerdraw()::Float64
[Approximate RAM Power Draw](https://www.jedec.org/) the values are provided by the JEDEC we just take the
ratio of activated memory against the unactivated for the maximum power value and convert it
to hours.
"""
function rampowerdraw()
ramcommand = `free`
powerused = Array{Float64}(undef,60)
for count in 1:60
ram = read(ramcommand, String);
ram = split(ram,"\n")
ram = split(ram[2]," ")
filter!(x->x≠"",ram)
usedram = parse(Float64,ram[3])
totalram = parse(Float64,ram[2])
powerused[count] = ((usedram*1.575)/totalram)*1.904
sleep(1)
end
return mean(powerused)
end
#TODO: modify the code to work in Windows.
"""
avgpowerdraw()::Float64
[Average Power Draw](https://arxiv.org/abs/1906.02243) where `pc` is the average power
draw (in watts) from all CPU sockets during training, let `pr` be the average power draw from all
DRAM (main memory) sockets, let `pg` be the average power draw of the GPUs during training and `g`
the number of available gpus.
`apd = 1.58*t*(pc + pr + g*pg)/1000`
returns the average power consumption in kWh.
"""
function avgpowerdraw()
if has_cuda_gpu()
starttime = time()
g, pg, _ = gpupowerdraw()
pc = cpupowerdraw()
pr = rampowerdraw()
endtime = time()
elapsedtime = (endtime - starttime)/3600
return 1.58*elapsedtime*(pc + pr + g*pg)/1000
else
pc = cpupowerdraw()
pr = rampowerdraw()
endtime = time()
elapsedtime = (endtime - starttime)/3600
return 1.58*elapsedtime*(pc + pr)/1000
end
end