diff --git a/.data/cifar/cifar-10-batches-py/batches.meta b/.data/cifar/cifar-10-batches-py/batches.meta new file mode 100644 index 0000000..4467a6e Binary files /dev/null and b/.data/cifar/cifar-10-batches-py/batches.meta differ diff --git a/.data/cifar/cifar-10-batches-py/data_batch_1 b/.data/cifar/cifar-10-batches-py/data_batch_1 new file mode 100644 index 0000000..ab404a5 Binary files /dev/null and b/.data/cifar/cifar-10-batches-py/data_batch_1 differ diff --git a/.data/cifar/cifar-10-batches-py/data_batch_2 b/.data/cifar/cifar-10-batches-py/data_batch_2 new file mode 100644 index 0000000..6bf1369 Binary files /dev/null and b/.data/cifar/cifar-10-batches-py/data_batch_2 differ diff --git a/.data/cifar/cifar-10-batches-py/data_batch_3 b/.data/cifar/cifar-10-batches-py/data_batch_3 new file mode 100644 index 0000000..66a0d63 Binary files /dev/null and b/.data/cifar/cifar-10-batches-py/data_batch_3 differ diff --git a/.data/cifar/cifar-10-batches-py/data_batch_4 b/.data/cifar/cifar-10-batches-py/data_batch_4 new file mode 100644 index 0000000..cf8d03d Binary files /dev/null and b/.data/cifar/cifar-10-batches-py/data_batch_4 differ diff --git a/.data/cifar/cifar-10-batches-py/data_batch_5 b/.data/cifar/cifar-10-batches-py/data_batch_5 new file mode 100644 index 0000000..468b2aa Binary files /dev/null and b/.data/cifar/cifar-10-batches-py/data_batch_5 differ diff --git a/.data/cifar/cifar-10-batches-py/readme.html b/.data/cifar/cifar-10-batches-py/readme.html new file mode 100644 index 0000000..e377ade --- /dev/null +++ b/.data/cifar/cifar-10-batches-py/readme.html @@ -0,0 +1 @@ + diff --git a/.data/cifar/cifar-10-batches-py/test_batch b/.data/cifar/cifar-10-batches-py/test_batch new file mode 100644 index 0000000..3e03f1f Binary files /dev/null and b/.data/cifar/cifar-10-batches-py/test_batch differ diff --git a/.data/cifar/cifar-10-python.tar.gz b/.data/cifar/cifar-10-python.tar.gz new file mode 100644 index 0000000..90c5365 Binary files /dev/null and b/.data/cifar/cifar-10-python.tar.gz differ diff --git a/.data/mnist/MNIST/raw/t10k-images-idx3-ubyte b/.data/mnist/MNIST/raw/t10k-images-idx3-ubyte new file mode 100644 index 0000000..1170b2c Binary files /dev/null and b/.data/mnist/MNIST/raw/t10k-images-idx3-ubyte differ diff --git a/.data/mnist/MNIST/raw/t10k-images-idx3-ubyte.gz b/.data/mnist/MNIST/raw/t10k-images-idx3-ubyte.gz new file mode 100644 index 0000000..5ace8ea Binary files /dev/null and b/.data/mnist/MNIST/raw/t10k-images-idx3-ubyte.gz differ diff --git a/.data/mnist/MNIST/raw/t10k-labels-idx1-ubyte b/.data/mnist/MNIST/raw/t10k-labels-idx1-ubyte new file mode 100644 index 0000000..d1c3a97 Binary files /dev/null and b/.data/mnist/MNIST/raw/t10k-labels-idx1-ubyte differ diff --git a/.data/mnist/MNIST/raw/t10k-labels-idx1-ubyte.gz b/.data/mnist/MNIST/raw/t10k-labels-idx1-ubyte.gz new file mode 100644 index 0000000..a7e1415 Binary files /dev/null and b/.data/mnist/MNIST/raw/t10k-labels-idx1-ubyte.gz differ diff --git a/.data/mnist/MNIST/raw/train-images-idx3-ubyte b/.data/mnist/MNIST/raw/train-images-idx3-ubyte new file mode 100644 index 0000000..bbce276 Binary files /dev/null and b/.data/mnist/MNIST/raw/train-images-idx3-ubyte differ diff --git a/.data/mnist/MNIST/raw/train-images-idx3-ubyte.gz b/.data/mnist/MNIST/raw/train-images-idx3-ubyte.gz new file mode 100644 index 0000000..b50e4b6 Binary files /dev/null and b/.data/mnist/MNIST/raw/train-images-idx3-ubyte.gz differ diff --git a/.data/mnist/MNIST/raw/train-labels-idx1-ubyte b/.data/mnist/MNIST/raw/train-labels-idx1-ubyte new file mode 100644 index 0000000..d6b4c5d Binary files /dev/null and b/.data/mnist/MNIST/raw/train-labels-idx1-ubyte differ diff --git a/.data/mnist/MNIST/raw/train-labels-idx1-ubyte.gz b/.data/mnist/MNIST/raw/train-labels-idx1-ubyte.gz new file mode 100644 index 0000000..707a576 Binary files /dev/null and b/.data/mnist/MNIST/raw/train-labels-idx1-ubyte.gz differ diff --git a/making_noise.py b/making_noise.py index 9f7dff6..75cea3a 100644 --- a/making_noise.py +++ b/making_noise.py @@ -1,4 +1,6 @@ import torch +import random +import numpy as np from tqdm import tqdm import matplotlib.pyplot as plt from torchvision import datasets @@ -6,6 +8,8 @@ from torch.utils.data import Dataset from torch.utils.data import DataLoader import torchvision.transforms as transforms +torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732 + class AddGaussianNoise(object): def __init__(self, mean=0., std=1.): @@ -19,12 +23,102 @@ class AddGaussianNoise(object): return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std) +class AddRaleighNoise(object): + def __init__(self, a=0.0, b=0.0): + self.std = (b * (4 - np.pi)) / 4 + self.mean = a + np.sqrt((np.pi * b) / 4) + + def __call__(self, tensor): + return tensor + torch.randn(tensor.size()) * self.std + self.mean + + def __repr__(self): + return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std) + + +class AddErlangNoise(object): + def __init__(self, a=0.0, b=0.0): + if a == 0.0: + self.std = 0.0 + self.mean = 0.0 + else: + self.std = b / a + self.mean = b / (2*a) + + def __call__(self, tensor): + if self.mean == 0.0: + return tensor * self.mean + else: + return tensor + torch.randn(tensor.size()) * self.std + self.mean + + def __repr__(self): + return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std) + + +class AddExponentialNoise(object): + def __init__(self, a=0.0): + if a == 0.0: + self.mean = 0.0 + else: + self.std = 1 / (2*a) + self.mean = 1 / a + + def __call__(self, tensor): + if self.mean == 0.0: + return tensor * self.mean + else: + return tensor + torch.randn(tensor.size()) * self.std + self.mean + + def __repr__(self): + return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std) + + +class AddUniformNoise(object): + def __init__(self, a=0.0, b=0.0): + if a == 0.0: + self.std = 0.0 + self.mean = 0.0 + else: + self.std = (b - a)**2 / 12 + self.mean = (b + a) / 2 + + def __call__(self, tensor): + if self.mean == 0.0: + return tensor * self.mean + else: + print('(mean={0}, std={1})'.format(self.mean, self.std)) + return tensor + (torch.randn(tensor.size()) * self.std + self.mean) + + def __repr__(self): + return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std) + + +class AddInpulseNoise(object): + def __init__(self, a=0.0): + self.value = a + + def __call__(self, tensor): + if random.gauss(0, 1) > 0: + return tensor * self.value + elif random.gauss(0, 1) < 0: + return tensor * (-1 * self.value) + else: + return tensor * 0.0 + + def __repr__(self): + return self.__class__.__name__ + '(a={0})'.format(self.value) + + def get_mnist_loaders(batch_size=128, test_batch_size=1000, perc=1.0): transform_train = transforms.Compose([ transforms.RandomCrop(28, padding=4), transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)), - AddGaussianNoise(0., 0.99), + # AddGaussianNoise(0., 0.0), + # AddRaleighNoise(1, 1), + # AddErlangNoise(0.0001, 0.0001), + # AddExponentialNoise(2), + # AddUniformNoise(2, 1), + AddInpulseNoise(0.5), ]) transform_test = transforms.Compose([ @@ -33,8 +127,8 @@ def get_mnist_loaders(batch_size=128, test_batch_size=1000, perc=1.0): ]) train_loader = DataLoader( - datasets.MNIST(root='.data/mnist', train=True, download=True, transform=transform_train), batch_size=batch_size, - shuffle=True, num_workers=2, drop_last=True + datasets.MNIST(root='.data/mnist', train=True, download=True, transform=transform_train), + batch_size=batch_size, shuffle=True, num_workers=2, drop_last=True ) train_eval_loader = DataLoader( @@ -52,10 +146,15 @@ def get_mnist_loaders(batch_size=128, test_batch_size=1000, perc=1.0): def get_cifar_loaders(batch_size=128, test_batch_size=1000, perc=1.0): transform_train = transforms.Compose([ - #transforms.RandomCrop(32, padding=4), transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,)), - AddGaussianNoise(0., 0.99), + transforms.RandomCrop(32, padding=0), + # transforms.Normalize((0.5,), (0.5,)), + # AddGaussianNoise(0., 0.25), + # AddRaleighNoise(1, 2), # Not worinkg for CIFAR + # AddErlangNoise(0.0001, 0.0001), + # AddExponentialNoise(2), + AddUniformNoise(2, 1), # Not working for CIFAR + # AddInpulseNoise(0.5), ]) transform_test = transforms.Compose([ @@ -64,8 +163,8 @@ def get_cifar_loaders(batch_size=128, test_batch_size=1000, perc=1.0): ]) train_loader = DataLoader( - datasets.CIFAR10(root='.data/cifar', train=True, download=True, transform=transform_train), batch_size=batch_size, - shuffle=True, num_workers=2, drop_last=True + datasets.CIFAR10(root='.data/cifar', train=True, download=True, transform=transform_train), + batch_size=batch_size, shuffle=True, num_workers=2, drop_last=True ) train_eval_loader = DataLoader( @@ -91,7 +190,7 @@ if __name__ == '__main__': # # print(data) images, labels = next(iter(train_loader)) - #plt.imshow(images[0].reshape(3,32,32).transpose(0,2,3,1)) - plt.imshow(images[0]) + plt.imshow(images[0].permute(1, 2, 0)) + # plt.imshow(images[0].reshape(28, 28), cmap='gray') plt.show() #print(images[0].shape) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..f387fe7 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,35 @@ +certifi +charset-normalizer +cmake +contourpy +cycler +filelock +fonttools +idna +Jinja2 +kiwisolver +lit +MarkupSafe +matplotlib +mpmath +networkx +numpy +packaging +pandas +Pillow +psutil +pyparsing +python-dateutil +pytorch-triton-rocm +pytz +requests +seaborn +six +sympy +torch +torchaudio +torchvision +tqdm +typing_extensions +tzdata +urllib3