Initial commit

This commit is contained in:
Eduardo Cueto-Mendoza 2024-04-25 14:14:19 +01:00
parent 51f5fcae5f
commit cc92f4ce04
6 changed files with 274 additions and 1 deletions

2
.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
__pycache__/
*.pkl

View File

@ -1,4 +1,4 @@
Copyright (c) 2024 TastyPancakes Copyright (c) 2024 Eduardo Cueto-Mendoza
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:

View File

@ -0,0 +1,84 @@
import functions as aux
import statistics as st
# import pandas as pd
data_types = ['mni', 'cif']
model_types = ['fre', 'bay']
o_data_types = ['MNIST', 'CIFAR']
o_model_types = ['LeNet', 'BCNN']
gpu_ene_data = aux.load_pickle("energy_gpu.pkl")
cpu_ene_data = aux.load_pickle("energy_cpu.pkl")
gpu_exp_data = aux.load_pickle("result_gpu.pkl")
for data in data_types:
for model in model_types:
for size in range(1, 8):
gpu_ene_data[data][model][size] = \
list(
aux.split(gpu_ene_data[data][model][size], 100)
)
for data in data_types:
for model in model_types:
for size in range(1, 8):
cpu_ene_data[data][model][size] = \
list(
aux.split(cpu_ene_data[data][model][size], 100)
)
spl_ene_data = dict(gpu_ene_data)
for data in data_types:
for model in model_types:
for size in range(1, 8):
for i in range(0, 100):
spl_ene_data[data][model][size][i] = \
gpu_ene_data[data][model][size][i] +\
cpu_ene_data[data][model][size][i]
for data in data_types:
for model in model_types:
for size in range(1, 8):
for i in range(0, 100):
spl_ene_data[data][model][size][i] = \
sum(spl_ene_data[data][model][size][i])
for data in data_types:
for model in model_types:
for size in range(1, 8):
temp = []
for i in range(0, 100):
temp.append(
# st.mean(spl_ene_data[data][model][size][0:i+1])
sum(spl_ene_data[data][model][size][0:i+1])
)
spl_ene_data[data][model][size] = temp
eff_data = dict(gpu_ene_data)
for data in data_types:
for model in model_types:
for size in range(1, 8):
for i in range(0, 100):
eff_data[data][model][size][i] = \
(gpu_exp_data[data][model][size]['acc'][i] /
spl_ene_data[data][model][size][i]) * 100
for data, o_data in zip(data_types, o_data_types):
eff_data[o_data] = \
eff_data.pop(data)
for o_data in o_data_types:
for model, o_model in zip(model_types, o_model_types):
eff_data[o_data][o_model] = \
eff_data[o_data].pop(model)
# mul = pd.MultiIndex.from_product([[1, 2, 3, 4, 5, 6, 7],
# ['bay', 'fre'], ['cif', 'mni']])
# eff_data = pd.DataFrame(eff_data)
aux.save_pickle("efficiency_data.pkl", eff_data)

87
functions.py Normal file
View File

@ -0,0 +1,87 @@
import torch.linalg as alg
import pickle
import torch
def square_matrix(tensor):
tensor_size = tensor.size()
if len(tensor_size) == 1:
temp = torch.zeros([tensor_size[0],
tensor_size[0]-1])
return torch.cat((temp.T,
tensor.reshape(1, tensor_size[0])))
elif len(tensor_size) == 2:
if tensor_size[0] > tensor_size[1]:
temp = torch.zeros([tensor_size[0],
tensor_size[0]-tensor_size[1]])
return torch.cat((temp.T, tensor))
elif tensor_size[0] < tensor_size[1]:
temp = torch.zeros([tensor_size[1],
tensor_size[1]-tensor_size[0]])
return torch.cat((temp.T, tensor))
else:
return tensor
elif len(tensor_size) > 2:
temp_tensor = tensor.detach().clone()
for i, x in enumerate(tensor):
# print("i: {}".format(i))
for j, t in enumerate(x):
# print("j: {}".format(j))
t_size = t.size()
if t_size[0] > t_size[1]:
temp = torch.zeros([t_size[0],
t_size[0]-t_size[1]])
temp_tensor[i][j] = torch.cat((temp.T, t))
elif t_size[0] < t_size[1]:
temp = torch.zeros([t_size[1],
t_size[1]-t_size[0]])
temp_tensor[i][j] = torch.cat((temp.T, t))
else:
temp_tensor[i][j] = t
return temp_tensor
def neumann_entropy(tensor):
tensor_size = tensor.size()
if len(tensor_size) == 1:
return 0
elif len(tensor_size) == 2:
e = alg.eigvals(tensor)
# temp_abs = torch.abs(e)
temp_abs = e.real
temp = torch.log(temp_abs)
temp[temp == float("Inf")] = 0
temp[temp == float("-Inf")] = 0
return -1 * torch.sum(temp_abs * temp)
elif len(tensor_size) > 2:
for i, x in enumerate(tensor):
for j, t in enumerate(x):
e = alg.eigvals(t)
# temp_abs = torch.abs(e)
temp_abs = e.real
temp = torch.log(temp_abs)
temp[temp == float("Inf")] = 0
temp[temp == float("-Inf")] = 0
return -1 * torch.sum(temp_abs * temp)
def load_pickle(fpath):
with open(fpath, "rb") as f:
data = pickle.load(f)
return data
def save_pickle(pickle_name, data_dump):
with open(pickle_name, 'wb') as f:
pickle.dump(data_dump, f)
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def split(lst, n):
k, m = divmod(len(lst), n)
return (lst[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n))

32
general_plots.py Normal file
View File

@ -0,0 +1,32 @@
import matplotlib.pyplot as plt
import functions as aux
eff_df = aux.load_pickle("efficiency_data.pkl")
bayes_cifar_entropy = aux.load_pickle("bayes_data_cifar_ne.pkl")
bayes_mnist_entropy = aux.load_pickle("bayes_data_mnist_ne.pkl")
bayes_keys = ['conv1.W_mu', 'conv1.W_rho', 'conv1.bias_mu', 'conv1.bias_rho',
'conv2.W_mu', 'conv2.W_rho', 'conv2.bias_mu', 'conv2.bias_rho',
'fc1.W_mu', 'fc1.W_rho', 'fc1.bias_mu', 'fc1.bias_rho',
'fc2.W_mu', 'fc2.W_rho', 'fc2.bias_mu', 'fc2.bias_rho',
'fc3.W_mu', 'fc3.W_rho', 'fc3.bias_mu', 'fc3.bias_rho']
lenet_keys = ['conv1.weight', 'conv1.bias', 'conv2.weight', 'conv2.bias',
'fc1.weight', 'fc1.bias', 'fc2.weight', 'fc2.bias', 'fc3.weight',
'fc3.bias']
"""
for size in range(1, 8):
if size != 3:
plt.plot(eff_df['CIFAR']['BCNN'][size], label='Size {}'.format(size))
plt.legend(loc='upper right')
plt.show()
"""
temp = []
for epoch in range(0, 100):
temp.append(bayes_cifar_entropy[1][epoch]['conv2.W_mu'])
plt.plot(temp)
plt.show()

68
get_entropy.py Normal file
View File

@ -0,0 +1,68 @@
import functions as aux
models_bayes_cifar = aux.load_pickle("bayes_data_cifar.pkl")
models_bayes_mnist = aux.load_pickle("bayes_data_mnist.pkl")
models_lenet_cifar = aux.load_pickle("lenet_data_cifar.pkl")
models_lenet_mnist = aux.load_pickle("lenet_data_mnist.pkl")
bayes_keys = ['conv1.W_mu', 'conv1.W_rho', 'conv1.bias_mu', 'conv1.bias_rho',
'conv2.W_mu', 'conv2.W_rho', 'conv2.bias_mu', 'conv2.bias_rho',
'fc1.W_mu', 'fc1.W_rho', 'fc1.bias_mu', 'fc1.bias_rho',
'fc2.W_mu', 'fc2.W_rho', 'fc2.bias_mu', 'fc2.bias_rho',
'fc3.W_mu', 'fc3.W_rho', 'fc3.bias_mu', 'fc3.bias_rho']
lenet_keys = ['conv1.weight', 'conv1.bias', 'conv2.weight', 'conv2.bias',
'fc1.weight', 'fc1.bias', 'fc2.weight', 'fc2.bias', 'fc3.weight',
'fc3.bias']
for model_size in range(1, 8):
for epoch in range(0, 100):
for k in bayes_keys:
models_bayes_cifar[model_size][epoch][k] = \
aux.neumann_entropy(
aux.square_matrix(
models_bayes_cifar[model_size][epoch][k]
)
)
aux.save_pickle("bayes_data_cifar_ne.pkl", models_bayes_cifar)
del models_bayes_cifar
for model_size in range(1, 8):
for epoch in range(0, 100):
for k in bayes_keys:
models_bayes_mnist[model_size][epoch][k] = \
aux.neumann_entropy(
aux.square_matrix(
models_bayes_mnist[model_size][epoch][k]
)
)
aux.save_pickle("bayes_data_mnist_ne.pkl", models_bayes_mnist)
del models_bayes_mnist
for model_size in range(1, 8):
for epoch in range(0, 100):
for k in lenet_keys:
models_lenet_cifar[model_size][epoch][k] = \
aux.neumann_entropy(
aux.square_matrix(
models_lenet_cifar[model_size][epoch][k]
)
)
aux.save_pickle("lenet_data_cifar_ne.pkl", models_lenet_cifar)
del models_lenet_cifar
for model_size in range(1, 8):
for epoch in range(0, 100):
for k in lenet_keys:
models_lenet_mnist[model_size][epoch][k] = \
aux.neumann_entropy(
aux.square_matrix(
models_lenet_mnist[model_size][epoch][k]
)
)
aux.save_pickle("lenet_data_mnist_ne.pkl", models_lenet_mnist)
del models_lenet_mnist