diff --git a/efficiency_computations.py b/efficiency_computations.py index 53087c6..61530ce 100644 --- a/efficiency_computations.py +++ b/efficiency_computations.py @@ -2,6 +2,9 @@ import functions as aux import statistics as st # import pandas as pd +max_epoch = 30 +max_size = 8 + data_types = ['mni', 'cif'] model_types = ['fre', 'bay'] @@ -15,41 +18,41 @@ gpu_exp_data = aux.load_pickle("result_gpu.pkl") for data in data_types: for model in model_types: - for size in range(1, 8): + for size in range(1, max_size): gpu_ene_data[data][model][size] = \ list( - aux.split(gpu_ene_data[data][model][size], 100) + aux.split(gpu_ene_data[data][model][size], max_epoch) ) for data in data_types: for model in model_types: - for size in range(1, 8): + for size in range(1, max_size): cpu_ene_data[data][model][size] = \ list( - aux.split(cpu_ene_data[data][model][size], 100) + aux.split(cpu_ene_data[data][model][size], max_epoch) ) spl_ene_data = dict(gpu_ene_data) for data in data_types: for model in model_types: - for size in range(1, 8): - for i in range(0, 100): + for size in range(1, max_size): + for i in range(0, max_epoch): spl_ene_data[data][model][size][i] = \ gpu_ene_data[data][model][size][i] +\ cpu_ene_data[data][model][size][i] for data in data_types: for model in model_types: - for size in range(1, 8): - for i in range(0, 100): + for size in range(1, max_size): + for i in range(0, max_epoch): spl_ene_data[data][model][size][i] = \ sum(spl_ene_data[data][model][size][i]) for data in data_types: for model in model_types: - for size in range(1, 8): + for size in range(1, max_size): temp = [] - for i in range(0, 100): + for i in range(0, max_epoch): temp.append( # st.mean(spl_ene_data[data][model][size][0:i+1]) sum(spl_ene_data[data][model][size][0:i+1]) @@ -59,8 +62,8 @@ for data in data_types: eff_data = dict(gpu_ene_data) for data in data_types: for model in model_types: - for size in range(1, 8): - for i in range(0, 100): + for size in range(1, max_size): + for i in range(0, max_epoch): eff_data[data][model][size][i] = \ (gpu_exp_data[data][model][size]['acc'][i] / spl_ene_data[data][model][size][i]) * 100 diff --git a/functions.py b/functions.py index 685c000..ffda401 100644 --- a/functions.py +++ b/functions.py @@ -5,6 +5,8 @@ import torch def square_matrix(tensor): tensor_size = tensor.size() + if len(tensor_size) == 0: + return tensor if len(tensor_size) == 1: temp = torch.zeros([tensor_size[0], tensor_size[0]-1]) @@ -43,6 +45,8 @@ def square_matrix(tensor): def neumann_entropy(tensor): tensor_size = tensor.size() + if len(tensor_size) == 0: + return tensor if len(tensor_size) == 1: return 0 elif len(tensor_size) == 2: diff --git a/general_plots.py b/general_plots.py index 32c1b87..b10db41 100644 --- a/general_plots.py +++ b/general_plots.py @@ -18,10 +18,10 @@ lenet_keys = ['conv1.weight', 'conv1.bias', 'conv2.weight', 'conv2.bias', 'fc3.bias'] for size in range(1, 8): - # if size != 8: - plt.plot(eff_df['MNIST']['BCNN'][size], + # if size != 3: + plt.plot(eff_df['MNIST']['LeNet'][size], label='Efficiency size {}'.format(size)) - plt.plot(entropy_data['MNIST']['BCNN'][size], + plt.plot(entropy_data['MNIST']['LeNet'][size], label='Entropy size {}'.format(size)) plt.legend(loc='upper right') diff --git a/general_plots_noisy.py b/general_plots_noisy.py new file mode 100644 index 0000000..ff17bd7 --- /dev/null +++ b/general_plots_noisy.py @@ -0,0 +1,36 @@ +import matplotlib.pyplot as plt +import functions as aux + +model_type = 'BCNN' # BCNN or LeNet +dataset = 'MNIST' # MNIST or CIFAR + +eff_df = aux.load_pickle("efficiency_data.pkl") + +entropy_data_noise = aux.load_pickle("entropy_data_noisy.pkl") +entropy_data = aux.load_pickle("entropy_data.pkl") + +bayes_keys = ['conv1.W_mu', 'conv1.W_rho', 'conv1.bias_mu', 'conv1.bias_rho', + 'conv2.W_mu', 'conv2.W_rho', 'conv2.bias_mu', 'conv2.bias_rho', + 'fc1.W_mu', 'fc1.W_rho', 'fc1.bias_mu', 'fc1.bias_rho', + 'fc2.W_mu', 'fc2.W_rho', 'fc2.bias_mu', 'fc2.bias_rho', + 'fc3.W_mu', 'fc3.W_rho', 'fc3.bias_mu', 'fc3.bias_rho'] + +lenet_keys = ['conv1.weight', 'conv1.bias', 'conv2.weight', 'conv2.bias', + 'fc1.weight', 'fc1.bias', 'fc2.weight', 'fc2.bias', 'fc3.weight', + 'fc3.bias'] + +all_noises = [0.1, 0.25, 0.5, 0.75, 0.99] + +for size in range(1, 2): + plt.plot(eff_df['MNIST']['LeNet'][size], + label='Efficiency') + plt.plot(entropy_data[dataset][model_type][size], + label='Entropy at noise 0.0') + +for noise in all_noises: + plt.plot(entropy_data_noise[dataset][model_type][noise], + label='Entropy at noise {}'.format(noise)) + +plt.legend(loc='upper right') +# plt.legend(loc='lower right') +plt.show() diff --git a/get_entropy.py b/get_entropy.py index 4b00a2e..073ee17 100644 --- a/get_entropy.py +++ b/get_entropy.py @@ -1,7 +1,9 @@ import functions as aux import statistics as st -alpha = 100000 +alpha = 10000 +max_epoch = 30 +max_size = 8 models_bayes_cifar = aux.load_pickle("bayes_data_cifar.pkl") models_bayes_mnist = aux.load_pickle("bayes_data_mnist.pkl") @@ -55,8 +57,8 @@ bayes_keys = ['conv1.W_mu', lenet_keys = ['conv1.weight', 'conv2.weight', 'fc1.weight', 'fc2.weight', 'fc3.weight'] -for model_size in range(1, 8): - for epoch in range(0, 100): +for model_size in range(1, max_size): + for epoch in range(0, max_epoch): for k in bayes_keys: models_bayes_cifar[model_size][epoch][k] = \ aux.neumann_entropy( @@ -65,9 +67,9 @@ for model_size in range(1, 8): ) ) -for size in range(1, 8): +for size in range(1, max_size): temp_epoch = [] - for epoch in range(0, 100): + for epoch in range(0, max_epoch): temp_mean = [] for layer in bayes_keys: temp_mean.append( @@ -82,8 +84,8 @@ for size in range(1, 8): # aux.save_pickle("bayes_data_cifar_ne.pkl", models_bayes_cifar) del models_bayes_cifar -for model_size in range(1, 8): - for epoch in range(0, 100): +for model_size in range(1, max_size): + for epoch in range(0, max_epoch): for k in bayes_keys: models_bayes_mnist[model_size][epoch][k] = \ aux.neumann_entropy( @@ -92,9 +94,9 @@ for model_size in range(1, 8): ) ) -for size in range(1, 8): +for size in range(1, max_size): temp_epoch = [] - for epoch in range(0, 100): + for epoch in range(0, max_epoch): temp_mean = [] for layer in bayes_keys: temp_mean.append( @@ -109,8 +111,8 @@ for size in range(1, 8): # aux.save_pickle("bayes_data_mnist_ne.pkl", models_bayes_mnist) del models_bayes_mnist -for model_size in range(1, 8): - for epoch in range(0, 100): +for model_size in range(1, max_size): + for epoch in range(0, max_epoch): for k in lenet_keys: models_lenet_cifar[model_size][epoch][k] = \ aux.neumann_entropy( @@ -119,9 +121,9 @@ for model_size in range(1, 8): ) ) -for size in range(1, 8): +for size in range(1, max_size): temp_epoch = [] - for epoch in range(0, 100): + for epoch in range(0, max_epoch): temp_mean = [] for layer in lenet_keys: temp_mean.append( @@ -136,8 +138,8 @@ for size in range(1, 8): # aux.save_pickle("lenet_data_cifar_ne.pkl", models_lenet_cifar) del models_lenet_cifar -for model_size in range(1, 8): - for epoch in range(0, 100): +for model_size in range(1, max_size): + for epoch in range(0, max_epoch): for k in lenet_keys: models_lenet_mnist[model_size][epoch][k] = \ aux.neumann_entropy( @@ -146,9 +148,9 @@ for model_size in range(1, 8): ) ) -for size in range(1, 8): +for size in range(1, max_size): temp_epoch = [] - for epoch in range(0, 100): + for epoch in range(0, max_epoch): temp_mean = [] for layer in lenet_keys: temp_mean.append( diff --git a/get_entropy_noisy.py b/get_entropy_noisy.py new file mode 100644 index 0000000..e60398c --- /dev/null +++ b/get_entropy_noisy.py @@ -0,0 +1,167 @@ +import functions as aux +import statistics as st + +alpha = 10000 + +models_bayes_cifar = aux.load_pickle("bayes_data_cifar_noisy.pkl") +models_bayes_mnist = aux.load_pickle("bayes_data_mnist_noisy.pkl") +models_lenet_cifar = aux.load_pickle("lenet_data_cifar_noisy.pkl") +models_lenet_mnist = aux.load_pickle("lenet_data_mnist_noisy.pkl") + +entropy_data = {'CIFAR': + {'BCNN': + {0.1: None, 0.25: None, + 0.5: None, 0.75: None, 0.99: None}, + 'LeNet': + {0.1: None, 0.25: None, + 0.5: None, 0.75: None, 0.99: None}, + }, + 'MNIST': + {'BCNN': + {0.1: None, 0.25: None, + 0.5: None, 0.75: None, 0.99: None}, + 'LeNet': + {0.1: None, 0.25: None, + 0.5: None, 0.75: None, 0.99: None}, + }, + } + +""" +bayes_keys = ['conv1.W_mu', 'conv1.W_rho', 'conv1.bias_mu', 'conv1.bias_rho', + 'conv2.W_mu', 'conv2.W_rho', 'conv2.bias_mu', 'conv2.bias_rho', + 'fc1.W_mu', 'fc1.W_rho', 'fc1.bias_mu', 'fc1.bias_rho', + 'fc2.W_mu', 'fc2.W_rho', 'fc2.bias_mu', 'fc2.bias_rho', + 'fc3.W_mu', 'fc3.W_rho', 'fc3.bias_mu', 'fc3.bias_rho'] + +lenet_keys = ['conv1.weight', 'conv1.bias', 'conv2.weight', 'conv2.bias', + 'fc1.weight', 'fc1.bias', 'fc2.weight', 'fc2.bias', 'fc3.weight', + 'fc3.bias'] + +bayes_keys = ['conv1.W_mu', 'conv1.W_rho', + 'conv2.W_mu', 'conv2.W_rho', + 'fc1.W_mu', 'fc1.W_rho', + 'fc2.W_mu', 'fc2.W_rho', + 'fc3.W_mu', 'fc3.W_rho'] + +""" + +noise_levels = [0.1, 0.25, 0.5, 0.75, 0.99] + +bayes_keys = ['conv1.W_mu', + 'conv2.W_mu', + 'fc1.W_mu', + 'fc2.W_mu', + 'fc3.W_mu',] + + +lenet_keys = ['conv1.weight', 'conv2.weight', + 'fc1.weight', 'fc2.weight', 'fc3.weight'] + +for noise in noise_levels: + for epoch in range(0, 30): + for k in bayes_keys: + models_bayes_cifar[noise][epoch][k] = \ + aux.neumann_entropy( + aux.square_matrix( + models_bayes_cifar[noise][epoch][k] + ) + ) + +for noise in noise_levels: + temp_epoch = [] + for epoch in range(0, 30): + temp_mean = [] + for layer in bayes_keys: + temp_mean.append( + models_bayes_cifar[noise][epoch][layer].item() + ) + temp_mean = st.mean(temp_mean) + temp_epoch.append(temp_mean) + entropy_data['CIFAR']['BCNN'][noise] = [x / alpha for x in temp_epoch]# temp_epoch + +# aux.save_pickle("bayes_data_cifar_ne.pkl", models_bayes_cifar) +del models_bayes_cifar + +for noise in noise_levels: + for epoch in range(0, 30): + for k in bayes_keys: + models_bayes_mnist[noise][epoch][k] = \ + aux.neumann_entropy( + aux.square_matrix( + models_bayes_mnist[noise][epoch][k] + ) + ) + +for noise in noise_levels: + temp_epoch = [] + for epoch in range(0, 30): + temp_mean = [] + for layer in bayes_keys: + temp_mean.append( + models_bayes_mnist[noise][epoch][layer].item() + ) + temp_mean = st.mean(temp_mean) + temp_epoch.append( + temp_mean + ) + entropy_data['MNIST']['BCNN'][noise] = [x / alpha for x in temp_epoch]# temp_epoch + +# aux.save_pickle("bayes_data_mnist_ne.pkl", models_bayes_mnist) +del models_bayes_mnist + +for noise in noise_levels: + for epoch in range(0, 30): + for k in lenet_keys: + models_lenet_cifar[noise][epoch][k] = \ + aux.neumann_entropy( + aux.square_matrix( + models_lenet_cifar[noise][epoch][k] + ) + ) + +for noise in noise_levels: + temp_epoch = [] + for epoch in range(0, 30): + temp_mean = [] + for layer in lenet_keys: + temp_mean.append( + models_lenet_cifar[noise][epoch][layer].item() + ) + temp_mean = st.mean(temp_mean) + temp_epoch.append( + temp_mean + ) + entropy_data['CIFAR']['LeNet'][noise] = [x / alpha for x in temp_epoch]# temp_epoch + +# aux.save_pickle("lenet_data_cifar_ne.pkl", models_lenet_cifar) +del models_lenet_cifar + +for noise in noise_levels: + for epoch in range(0, 30): + for k in lenet_keys: + models_lenet_mnist[noise][epoch][k] = \ + aux.neumann_entropy( + aux.square_matrix( + models_lenet_mnist[noise][epoch][k] + ) + ) + +for noise in noise_levels: + temp_epoch = [] + for epoch in range(0, 30): + temp_mean = [] + for layer in lenet_keys: + temp_mean.append( + models_lenet_mnist[noise][epoch][layer].item() + ) + temp_mean = st.mean(temp_mean) + temp_epoch.append( + temp_mean + ) + entropy_data['MNIST']['LeNet'][noise] = [x / alpha for x in temp_epoch]# temp_epoch + + +# aux.save_pickle("lenet_data_mnist_ne.pkl", models_lenet_mnist) +del models_lenet_mnist + +aux.save_pickle("entropy_data_noisy.pkl", entropy_data)