Did some Python formatting

This commit is contained in:
Eddie Cueto 2023-06-30 11:09:54 +01:00
parent 0f8ee842a3
commit 3c928cc350
6 changed files with 150 additions and 143 deletions

View File

@ -9,50 +9,21 @@ with (open("configuration.pkl", "rb")) as file:
except EOFError: except EOFError:
break break
#with open("frq", "r") as file:
# frq = int(file.read())
#with open("bay", "r") as file: # pickle_name = "{}_wattdata_{}.pkl".format(model_t,size)
# bay = int(file.read()) # print("GPU energy file config: {}".format(pickle_name))
#if frq == 1: # print(cfg)
# model_t = "freq"
# with open("tmp", "r") as file:
# size = float(file.read())
#if bay == 1:
# model_t = "bayes"
# with open("tmp", "r") as file:
# size = int(file.read())
#pickle_name = "{}_wattdata_{}.pkl".format(model_t,size)
#print("GPU energy file config: {}".format(pickle_name))
#print(cfg)
if __name__ == '__main__': if __name__ == '__main__':
dataDump = [] dataDump = []
#var = True while True:
#pickling_on = open("wattdata.pickle","wb") try:
while True: dataDump.append(get_sample_of_gpu())
try: with open(cfg["pickle_path"], 'wb') as f:
dataDump.append(get_sample_of_gpu()) pickle.dump(dataDump, f)
with open(cfg["pickle_path"], 'wb') as f: except EOFError:
pickle.dump(dataDump, f) warn('Pickle ran out of space')
except EOFError: finally:
warn('Pickle ran out of space') f.close()
size += 0.01
finally:
f.close()
#if retcode == 0:
#break
#pickle.dump(dataDump, pickling_on)
#pickling_on.close()

View File

@ -7,16 +7,21 @@ all_args = argparse.ArgumentParser()
def makeArguments(arguments: ArgumentParser) -> dict: def makeArguments(arguments: ArgumentParser) -> dict:
all_args.add_argument("-b", "--Bayesian", action="store", dest="b", all_args.add_argument("-b", "--Bayesian", action="store", dest="b",
type=int, choices=range(1,8), help="Bayesian model of size x") type=int, choices=range(1, 8),
help="Bayesian model of size x")
all_args.add_argument("-f", "--Frequentist", action="store", dest="f", all_args.add_argument("-f", "--Frequentist", action="store", dest="f",
type=int, choices=range(1,8), help="Frequentist model of size x") type=int, choices=range(1, 8),
help="Frequentist model of size x")
all_args.add_argument("-E", "--EarlyStopping", action="store_true", all_args.add_argument("-E", "--EarlyStopping", action="store_true",
help="Early Stopping criteria") help="Early Stopping criteria")
all_args.add_argument("-e", "--EnergyBound", action="store_true", all_args.add_argument("-e", "--EnergyBound", action="store_true",
help="Energy Bound criteria") help="Energy Bound criteria")
all_args.add_argument("-a", "--AccuracyBound", action="store_true", all_args.add_argument("-a", "--AccuracyBound", action="store_true",
help="Accuracy Bound criteria") help="Accuracy Bound criteria")
all_args.add_argument("-s", "--Save", action="store_true", help="Save model") all_args.add_argument("-s", "--Save", action="store_true",
all_args.add_argument('--net_type', default='lenet', type=str, help='model = [lenet/AlexNet/3Conv3FC]') help="Save model")
all_args.add_argument('--dataset', default='CIFAR10', type=str, help='dataset = [MNIST/CIFAR10/CIFAR100]') all_args.add_argument('--net_type', default='lenet', type=str,
help='model = [lenet/AlexNet/3Conv3FC]')
all_args.add_argument('--dataset', default='CIFAR10', type=str,
help='dataset = [MNIST/CIFAR10/CIFAR100]')
return vars(all_args.parse_args()) return vars(all_args.parse_args())

0
gpu_power_func.py Normal file → Executable file
View File

View File

@ -10,7 +10,6 @@ import numpy as np
from datetime import datetime from datetime import datetime
from torch.nn import functional as F from torch.nn import functional as F
from torch.optim import Adam, lr_scheduler from torch.optim import Adam, lr_scheduler
from gpu_power_func import total_watt_consumed
from models.BayesianModels.BayesianLeNet import BBBLeNet from models.BayesianModels.BayesianLeNet import BBBLeNet
from models.BayesianModels.BayesianAlexNet import BBBAlexNet from models.BayesianModels.BayesianAlexNet import BBBAlexNet
from models.BayesianModels.Bayesian3Conv3FC import BBB3Conv3FC from models.BayesianModels.Bayesian3Conv3FC import BBB3Conv3FC
@ -27,18 +26,23 @@ with (open("configuration.pkl", "rb")) as file:
# CUDA settings # CUDA settings
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu") device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
def getModel(net_type, inputs, outputs, priors, layer_type, activation_type): def getModel(net_type, inputs, outputs, priors, layer_type, activation_type):
if (net_type == 'lenet'): if (net_type == 'lenet'):
return BBBLeNet(outputs, inputs, priors, layer_type, activation_type,wide=cfg["model"]["size"]) return BBBLeNet(outputs, inputs, priors, layer_type, activation_type,
wide=cfg["model"]["size"])
elif (net_type == 'alexnet'): elif (net_type == 'alexnet'):
return BBBAlexNet(outputs, inputs, priors, layer_type, activation_type) return BBBAlexNet(outputs, inputs, priors, layer_type, activation_type)
elif (net_type == '3conv3fc'): elif (net_type == '3conv3fc'):
return BBB3Conv3FC(outputs, inputs, priors, layer_type, activation_type) return BBB3Conv3FC(outputs, inputs, priors, layer_type,
activation_type)
else: else:
raise ValueError('Network should be either [LeNet / AlexNet / 3Conv3FC') raise ValueError('Network should be either [LeNet / AlexNet\
/ 3Conv3FC')
def train_model(net, optimizer, criterion, trainloader, num_ens=1, beta_type=0.1, epoch=None, num_epochs=None): def train_model(net, optimizer, criterion, trainloader, num_ens=1,
beta_type=0.1, epoch=None, num_epochs=None):
net.train() net.train()
training_loss = 0.0 training_loss = 0.0
accs = [] accs = []
@ -48,7 +52,8 @@ def train_model(net, optimizer, criterion, trainloader, num_ens=1, beta_type=0.1
optimizer.zero_grad() optimizer.zero_grad()
inputs, labels = inputs.to(device), labels.to(device) inputs, labels = inputs.to(device), labels.to(device)
outputs = torch.zeros(inputs.shape[0], net.num_classes, num_ens).to(device) outputs = torch.zeros(inputs.shape[0], net.num_classes,
num_ens).to(device)
kl = 0.0 kl = 0.0
for j in range(num_ens): for j in range(num_ens):
@ -60,7 +65,8 @@ def train_model(net, optimizer, criterion, trainloader, num_ens=1, beta_type=0.1
kl_list.append(kl.item()) kl_list.append(kl.item())
log_outputs = utils.logmeanexp(outputs, dim=2) log_outputs = utils.logmeanexp(outputs, dim=2)
beta = metrics.get_beta(i-1, len(trainloader), beta_type, epoch, num_epochs) beta = metrics.get_beta(i-1, len(trainloader), beta_type,
epoch, num_epochs)
loss = criterion(log_outputs, labels, kl, beta) loss = criterion(log_outputs, labels, kl, beta)
loss.backward() loss.backward()
optimizer.step() optimizer.step()
@ -70,7 +76,8 @@ def train_model(net, optimizer, criterion, trainloader, num_ens=1, beta_type=0.1
return training_loss/len(trainloader), np.mean(accs), np.mean(kl_list) return training_loss/len(trainloader), np.mean(accs), np.mean(kl_list)
def validate_model(net, criterion, validloader, num_ens=1, beta_type=0.1, epoch=None, num_epochs=None): def validate_model(net, criterion, validloader, num_ens=1, beta_type=0.1,
epoch=None, num_epochs=None):
"""Calculate ensemble accuracy and NLL Loss""" """Calculate ensemble accuracy and NLL Loss"""
net.train() net.train()
valid_loss = 0.0 valid_loss = 0.0
@ -78,7 +85,8 @@ def validate_model(net, criterion, validloader, num_ens=1, beta_type=0.1, epoch=
for i, (inputs, labels) in enumerate(validloader): for i, (inputs, labels) in enumerate(validloader):
inputs, labels = inputs.to(device), labels.to(device) inputs, labels = inputs.to(device), labels.to(device)
outputs = torch.zeros(inputs.shape[0], net.num_classes, num_ens).to(device) outputs = torch.zeros(inputs.shape[0], net.num_classes,
num_ens).to(device)
kl = 0.0 kl = 0.0
for j in range(num_ens): for j in range(num_ens):
net_out, _kl = net(inputs) net_out, _kl = net(inputs)
@ -87,7 +95,8 @@ def validate_model(net, criterion, validloader, num_ens=1, beta_type=0.1, epoch=
log_outputs = utils.logmeanexp(outputs, dim=2) log_outputs = utils.logmeanexp(outputs, dim=2)
beta = metrics.get_beta(i-1, len(validloader), beta_type, epoch, num_epochs) beta = metrics.get_beta(i-1, len(validloader), beta_type,
epoch, num_epochs)
valid_loss += criterion(log_outputs, labels, kl, beta).item() valid_loss += criterion(log_outputs, labels, kl, beta).item()
accs.append(metrics.acc(log_outputs, labels)) accs.append(metrics.acc(log_outputs, labels))
@ -113,10 +122,12 @@ def run(dataset, net_type):
trainset, testset, inputs, outputs = data.getDataset(dataset) trainset, testset, inputs, outputs = data.getDataset(dataset)
train_loader, valid_loader, test_loader = data.getDataloader( train_loader, valid_loader, test_loader = data.getDataloader(
trainset, testset, valid_size, batch_size, num_workers) trainset, testset, valid_size, batch_size, num_workers)
net = getModel(net_type, inputs, outputs, priors, layer_type, activation_type).to(device) net = getModel(net_type, inputs, outputs, priors, layer_type,
activation_type).to(device)
ckpt_dir = f'checkpoints/{dataset}/bayesian' ckpt_dir = f'checkpoints/{dataset}/bayesian'
ckpt_name = f'checkpoints/{dataset}/bayesian/model_{net_type}_{layer_type}_{activation_type}_{cfg["model"]["size"]}.pt' ckpt_name = f'checkpoints/{dataset}/bayesian/model_{net_type}_{layer_type}\
_{activation_type}_{cfg["model"]["size"]}.pt'
if not os.path.exists(ckpt_dir): if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir, exist_ok=True) os.makedirs(ckpt_dir, exist_ok=True)
@ -126,25 +137,40 @@ def run(dataset, net_type):
criterion = metrics.ELBO(len(trainset)).to(device) criterion = metrics.ELBO(len(trainset)).to(device)
optimizer = Adam(net.parameters(), lr=lr_start) optimizer = Adam(net.parameters(), lr=lr_start)
lr_sched = lr_scheduler.ReduceLROnPlateau(optimizer, patience=6, verbose=True) lr_sched = lr_scheduler.ReduceLROnPlateau(optimizer, patience=6,
#valid_loss_max = np.Inf verbose=True)
#if stp == 2: # valid_loss_max = np.Inf
# if stp == 2:
early_stop = [] early_stop = []
train_data = [] train_data = []
for epoch in range(n_epochs): # loop over the dataset multiple times for epoch in range(n_epochs): # loop over the dataset multiple times
train_loss, train_acc, train_kl = train_model(net, optimizer, criterion, train_loader, num_ens=train_ens, beta_type=beta_type, epoch=epoch, num_epochs=n_epochs) train_loss, train_acc, train_kl = train_model(net, optimizer,
valid_loss, valid_acc = validate_model(net, criterion, valid_loader, num_ens=valid_ens, beta_type=beta_type, epoch=epoch, num_epochs=n_epochs) criterion,
train_loader,
num_ens=train_ens,
beta_type=beta_type,
epoch=epoch,
num_epochs=n_epochs)
valid_loss, valid_acc = validate_model(net, criterion, valid_loader,
num_ens=valid_ens,
beta_type=beta_type,
epoch=epoch,
num_epochs=n_epochs)
lr_sched.step(valid_loss) lr_sched.step(valid_loss)
train_data.append([epoch, train_loss, train_acc, valid_loss,
train_data.append([epoch,train_loss,train_acc,valid_loss,valid_acc]) valid_acc])
print('Epoch: {} \tTraining Loss: {:.4f} \tTraining Accuracy: {:.4f} \tValidation Loss: {:.4f} \tValidation Accuracy: {:.4f} \ttrain_kl_div: {:.4f}'.format( print('Epoch: {} \tTraining Loss: {:.4f} \tTraining Accuracy:\
epoch, train_loss, train_acc, valid_loss, valid_acc, train_kl)) {:.4f} \tValidation Loss: {:.4f} \tValidation Accuracy:\
{:.4f} \ttrain_kl_div: {:.4f}'.format(epoch, train_loss,
train_acc, valid_loss,
valid_acc, train_kl))
if stp == 2: if stp == 2:
print('Using early stopping') print('Using early stopping')
if earlyStopping(early_stop,valid_acc,epoch,cfg["model"]["sens"]) == 1: if earlyStopping(early_stop, valid_acc, epoch,
cfg["model"]["sens"]) == 1:
break break
elif stp == 3: elif stp == 3:
print('Using energy bound') print('Using energy bound')
@ -152,7 +178,7 @@ def run(dataset, net_type):
break break
elif stp == 4: elif stp == 4:
print('Using accuracy bound') print('Using accuracy bound')
if accuracyBound(train_acc,cfg.acc_thrs) == 1: if accuracyBound(train_acc, cfg.acc_thrs) == 1:
break break
else: else:
print('Training for {} epochs'.format(cfg["model"]["n_epochs"])) print('Training for {} epochs'.format(cfg["model"]["n_epochs"]))
@ -162,9 +188,9 @@ def run(dataset, net_type):
if epoch == cfg.n_epochs-1: if epoch == cfg.n_epochs-1:
torch.save(net.state_dict(), ckpt_name) torch.save(net.state_dict(), ckpt_name)
with open("bayes_exp_data_"+str(cfg["model"]["size"])+".pkl", 'wb') as f: with open("bayes_exp_data_"+str(cfg["model"]["size"])+".pkl", 'wb') as f:
pickle.dump(train_data, f) pickle.dump(train_data, f)
if __name__ == '__main__': if __name__ == '__main__':
now = datetime.now() now = datetime.now()
@ -174,4 +200,3 @@ if __name__ == '__main__':
now = datetime.now() now = datetime.now()
current_time = now.strftime("%H:%M:%S") current_time = now.strftime("%H:%M:%S")
print("Final Time =", current_time) print("Final Time =", current_time)

View File

@ -2,15 +2,12 @@ from __future__ import print_function
import os import os
import data import data
import torch import torch
#import utils
import pickle import pickle
import metrics import metrics
import argparse
import numpy as np import numpy as np
import torch.nn as nn import torch.nn as nn
from datetime import datetime from datetime import datetime
from torch.optim import Adam, lr_scheduler from torch.optim import Adam, lr_scheduler
from gpu_power_func import total_watt_consumed
from models.NonBayesianModels.LeNet import LeNet from models.NonBayesianModels.LeNet import LeNet
from models.NonBayesianModels.AlexNet import AlexNet from models.NonBayesianModels.AlexNet import AlexNet
from stopping_crit import earlyStopping, energyBound, accuracyBound from stopping_crit import earlyStopping, energyBound, accuracyBound
@ -27,23 +24,24 @@ with (open("configuration.pkl", "rb")) as file:
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu") device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
def getModel(net_type, inputs, outputs,wide=cfg["model"]["size"]): def getModel(net_type, inputs, outputs, wide=cfg["model"]["size"]):
if (net_type == 'lenet'): if (net_type == 'lenet'):
return LeNet(outputs, inputs,wide) return LeNet(outputs, inputs, wide)
elif (net_type == 'alexnet'): elif (net_type == 'alexnet'):
return AlexNet(outputs, inputs) return AlexNet(outputs, inputs)
elif (net_type == '3conv3fc'): elif (net_type == '3conv3fc'):
return ThreeConvThreeFC(outputs, inputs) return ThreeConvThreeFC(outputs, inputs)
else: else:
raise ValueError('Network should be either [LeNet / AlexNet / 3Conv3FC') raise ValueError('Network should be either [LeNet / AlexNet / \
3Conv3FC')
def train_model(net, optimizer, criterion, train_loader): def train_model(net, optimizer, criterion, train_loader):
train_loss = 0.0 train_loss = 0.0
net.train() net.train()
accs = [] accs = []
for data, target in train_loader: for datas, target in train_loader:
data, target = data.to(device), target.to(device) data, target = datas.to(device), target.to(device)
optimizer.zero_grad() optimizer.zero_grad()
output = net(data) output = net(data)
loss = criterion(output, target) loss = criterion(output, target)
@ -58,8 +56,8 @@ def validate_model(net, criterion, valid_loader):
valid_loss = 0.0 valid_loss = 0.0
net.eval() net.eval()
accs = [] accs = []
for data, target in valid_loader: for datas, target in valid_loader:
data, target = data.to(device), target.to(device) data, target = datas.to(device), target.to(device)
output = net(data) output = net(data)
loss = criterion(output, target) loss = criterion(output, target)
valid_loss += loss.item()*data.size(0) valid_loss += loss.item()*data.size(0)
@ -82,7 +80,8 @@ def run(dataset, net_type):
net = getModel(net_type, inputs, outputs).to(device) net = getModel(net_type, inputs, outputs).to(device)
ckpt_dir = f'checkpoints/{dataset}/frequentist' ckpt_dir = f'checkpoints/{dataset}/frequentist'
ckpt_name = f'checkpoints/{dataset}/frequentist/model_{net_type}_{cfg["model"]["size"]}.pt' ckpt_name = f'checkpoints/{dataset}/frequentist/model\
_{net_type}_{cfg["model"]["size"]}.pt'
if not os.path.exists(ckpt_dir): if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir, exist_ok=True) os.makedirs(ckpt_dir, exist_ok=True)
@ -92,35 +91,41 @@ def run(dataset, net_type):
criterion = nn.CrossEntropyLoss() criterion = nn.CrossEntropyLoss()
optimizer = Adam(net.parameters(), lr=lr) optimizer = Adam(net.parameters(), lr=lr)
lr_sched = lr_scheduler.ReduceLROnPlateau(optimizer, patience=6, verbose=True) lr_sched = lr_scheduler.ReduceLROnPlateau(optimizer, patience=6,
#valid_loss_min = np.Inf verbose=True)
#if stp == 2: # valid_loss_min = np.Inf
# if stp == 2:
early_stop = [] early_stop = []
train_data = [] train_data = []
for epoch in range(1, n_epochs+1): for epoch in range(1, n_epochs+1):
train_loss, train_acc = train_model(net, optimizer, criterion, train_loader) train_loss, train_acc = train_model(net, optimizer, criterion,
train_loader)
valid_loss, valid_acc = validate_model(net, criterion, valid_loader) valid_loss, valid_acc = validate_model(net, criterion, valid_loader)
lr_sched.step(valid_loss) lr_sched.step(valid_loss)
train_loss = train_loss/len(train_loader.dataset) train_loss = train_loss/len(train_loader.dataset)
valid_loss = valid_loss/len(valid_loader.dataset) valid_loss = valid_loss/len(valid_loader.dataset)
train_data.append([epoch,train_loss,train_acc,valid_loss,valid_acc]) train_data.append([epoch, train_loss, train_acc, valid_loss,
print('Epoch: {} \tTraining Loss: {:.4f} \tTraining Accuracy: {:.4f} \tValidation Loss: {:.4f} \tValidation Accuracy: {:.4f}'.format( valid_acc])
epoch, train_loss, train_acc, valid_loss, valid_acc)) print('Epoch: {} \tTraining Loss: {: .4f} \tTraining Accuracy: {: .4f}\
\tValidation Loss: {: .4f} \tValidation Accuracy: {: .4f}\
'.format(epoch, train_loss, train_acc, valid_loss, valid_acc))
if stp == 2: if stp == 2:
#print('Using early stopping') # print('Using early stopping')
if earlyStopping(early_stop,valid_acc,epoch,cfg["model"]["sens"]) == 1: if earlyStopping(early_stop, valid_acc, epoch,
cfg["model"]["sens"]) == 1:
break break
elif stp == 3: elif stp == 3:
#print('Using energy bound') # print('Using energy bound')
if energyBound(cfg["model"]["energy_thrs"]) == 1: if energyBound(cfg["model"]["energy_thrs"]) == 1:
break break
elif stp == 4: elif stp == 4:
#print('Using accuracy bound') # print('Using accuracy bound')
if accuracyBound(train_acc,cfg["model"]["acc_thrs"]) == 1: if accuracyBound(train_acc,
cfg["model"]["acc_thrs"]) == 1:
break break
else: else:
print('Training for {} epochs'.format(cfg["model"]["n_epochs"])) print('Training for {} epochs'.format(cfg["model"]["n_epochs"]))
@ -131,7 +136,7 @@ def run(dataset, net_type):
torch.save(net.state_dict(), ckpt_name) torch.save(net.state_dict(), ckpt_name)
with open("freq_exp_data_"+str(cfg["model"]["size"])+".pkl", 'wb') as f: with open("freq_exp_data_"+str(cfg["model"]["size"])+".pkl", 'wb') as f:
pickle.dump(train_data, f) pickle.dump(train_data, f)
if __name__ == '__main__': if __name__ == '__main__':
@ -142,4 +147,3 @@ if __name__ == '__main__':
now = datetime.now() now = datetime.now()
current_time = now.strftime("%H:%M:%S") current_time = now.strftime("%H:%M:%S")
print("Final Time =", current_time) print("Final Time =", current_time)

View File

@ -12,30 +12,32 @@ def kill(proc_pid):
proc.kill() proc.kill()
process.kill() process.kill()
cfg = { cfg = {
"model": {"net_type": None, "type": None, "size": None, "layer_type": "lrt", "model": {"net_type": None, "type": None, "size": None, "layer_type":
"activation_type": "softplus", "priors": { "lrt", "activation_type": "softplus", "priors": {
'prior_mu': 0, 'prior_mu': 0,
'prior_sigma': 0.1, 'prior_sigma': 0.1,
'posterior_mu_initial': (0, 0.1), # (mean, std) normal_ 'posterior_mu_initial': (0, 0.1), # (mean,std) normal_
'posterior_rho_initial': (-5, 0.1), # (mean, std) normal_ 'posterior_rho_initial': (-5, 0.1), # (mean,std) normal_
}, },
"n_epochs": 3, "n_epochs": 100,
"sens": 1e-9, "sens": 1e-9,
"energy_thrs": 10000, "energy_thrs": 10000,
"acc_thrs": 0.99, "acc_thrs": 0.99,
"lr": 0.001, "lr": 0.001,
"num_workers": 4, "num_workers": 4,
"valid_size": 0.2, "valid_size": 0.2,
"batch_size": 256, "batch_size": 256,
"train_ens": 1, "train_ens": 1,
"valid_ens": 1, "valid_ens": 1,
"beta_type": 0.1, # 'Blundell', 'Standard', etc. Use float for const value "beta_type": 0.1, # 'Blundell','Standard',etc.
}, # Use float for const value
"data": None, },
"stopping_crit": None, "data": None,
"save": None, "stopping_crit": None,
"pickle_path": None, "save": None,
"pickle_path": None,
} }
args = makeArguments(arguments.all_args) args = makeArguments(arguments.all_args)
@ -46,10 +48,10 @@ if all(v is None for v in check):
elif None in check: elif None in check:
if args['f'] is not None: if args['f'] is not None:
cmd = ["python", "main_frequentist.py"] cmd = ["python", "main_frequentist.py"]
cfg["model"]["type"] = "frequentist" cfg["model"]["type"] = "freq"
elif args['b'] is not None: elif args['b'] is not None:
cmd = ["python", "main_bayesian.py"] cmd = ["python", "main_bayesian.py"]
cfg["model"]["type"] = "bayesian" cfg["model"]["type"] = "bayes"
else: else:
raise Exception("Only one argument allowed") raise Exception("Only one argument allowed")
@ -76,13 +78,14 @@ else:
cfg["save"] = 0 cfg["save"] = 0
cfg["pickle_path"] = "{}_wattdata_{}.pkl".format(cfg["model"]["type"],cfg["model"]["size"]) cfg["pickle_path"] = "{}_wattdata_{}.pkl".format(cfg["model"]["type"],
cfg["model"]["size"])
with open("configuration.pkl", "wb") as f: with open("configuration.pkl", "wb") as f:
pickle.dump(cfg, f) pickle.dump(cfg, f)
#print(args) # print(args)
#print(cfg) # print(cfg)
sleep(3) sleep(3)
@ -104,12 +107,12 @@ path = path.replace('\n', '')
startWattCounter = 'python ' + path + '/amd_sample_draw.py' startWattCounter = 'python ' + path + '/amd_sample_draw.py'
p1 = sub.Popen(cmd) p1 = sub.Popen(cmd)
p2 = sub.Popen(startWattCounter.split(),stdin=sub.PIPE,stdout=sub.PIPE, stderr=sub.PIPE) p2 = sub.Popen(startWattCounter.split(), stdin=sub.PIPE, stdout=sub.PIPE,
p3 = sub.Popen(cmd2,stdin=sub.PIPE,stdout=sub.PIPE, stderr=sub.PIPE) stderr=sub.PIPE)
p4 = sub.Popen(cmd3,stdin=sub.PIPE,stdout=sub.PIPE, stderr=sub.PIPE) p3 = sub.Popen(cmd2, stdin=sub.PIPE, stdout=sub.PIPE, stderr=sub.PIPE)
p5 = sub.Popen(cmd4,stdin=sub.PIPE,stdout=sub.PIPE, stderr=sub.PIPE) p4 = sub.Popen(cmd3, stdin=sub.PIPE, stdout=sub.PIPE, stderr=sub.PIPE)
p5 = sub.Popen(cmd4, stdin=sub.PIPE, stdout=sub.PIPE, stderr=sub.PIPE)
retcode = p1.wait() retcode = p1.wait()
print("Return code: {}".format(retcode)) print("Return code: {}".format(retcode))
@ -119,4 +122,3 @@ kill(p2.pid)
kill(p3.pid) kill(p3.pid)
kill(p4.pid) kill(p4.pid)
kill(p5.pid) kill(p5.pid)