pytorch-stuff/Numpy_Bridge.ipynb

351 lines
7.5 KiB
Plaintext
Executable File

{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"False"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import torch\n",
"import torchvision\n",
"torch.cuda.is_available()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([1., 1., 1., 1., 1.])\n",
"[1. 1. 1. 1. 1.]\n",
"tensor([2., 2., 2., 2., 2.])\n",
"[2. 2. 2. 2. 2.]\n"
]
}
],
"source": [
"# the numpy array is just a pointer to the torch tensor\n",
"a = torch.ones(5)\n",
"print(a)\n",
"b = a.numpy()\n",
"print(b)\n",
"a.add_(1)\n",
"print(a)\n",
"print(b)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2. 2. 2. 2. 2.]\n",
"tensor([2., 2., 2., 2., 2.], dtype=torch.float64)\n"
]
}
],
"source": [
"a = np.ones(5)\n",
"b = torch.from_numpy(a)\n",
"np.add(a,1,out=a)\n",
"print(a)\n",
"print(b)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"r2 = torch.randn(4,4)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"r = torch.rand(4,4)\n",
"\n",
"add_result = torch.add(r,r2)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([[-0.5124, -0.0635, -1.0141, 0.2371],\n",
" [ 0.7809, -1.0184, 1.1734, -0.4778],\n",
" [ 1.6340, 0.0201, 0.4821, -0.1986],\n",
" [ 1.7512, -0.1389, 1.4546, -0.2230]], device='cuda:0')\n"
]
}
],
"source": [
"# Move tensor to GPU\n",
"r2 = r2.cuda()\n",
"print(r2)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"True\n",
"tensor([[ 0.0573, 0.7549, -0.1884, 0.6424],\n",
" [ 0.9001, -0.9460, 2.1332, 0.0502],\n",
" [ 1.7473, 0.6463, 0.5804, 0.2859],\n",
" [ 2.0273, 0.6695, 2.4453, 0.4559]], device='cuda:0')\n"
]
}
],
"source": [
"CUDA = torch.cuda.is_available()\n",
"print(CUDA)\n",
"\n",
"if CUDA:\n",
" add_result = add_result.cuda()\n",
" print(add_result)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[-0.4551, 0.6915, -1.2024, 0.8795],\n",
" [ 1.6810, -1.9643, 3.3066, -0.4276],\n",
" [ 3.3813, 0.6664, 1.0625, 0.0872],\n",
" [ 3.7784, 0.5306, 3.8999, 0.2329]], device='cuda:0')"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"add_result.add(r2)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2, 3, 4, 1]\n",
"tensor([2, 3, 4, 1]) torch.int64\n"
]
}
],
"source": [
"a = [2,3,4,1]\n",
"print(a)\n",
"to_list = torch.tensor(a)\n",
"print(to_list, to_list.dtype)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([[1, 2],\n",
" [3, 4],\n",
" [5, 6],\n",
" [7, 8]]) torch.int64\n"
]
}
],
"source": [
"data = [[1,2],[3,4],[5,6],[7,8]]\n",
"T = torch.tensor(data)\n",
"print(T,T.dtype)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Tensor Concatenation"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([[ 0.7607, 0.2490, -1.5199, 0.4037, 0.0063],\n",
" [ 0.2223, -1.0452, -1.6327, -0.1692, -0.8291]])\n",
"tensor([[-0.2285, 0.7535, -1.4712, 1.1518, 0.4560],\n",
" [-0.4817, -0.6983, -0.9611, -1.5915, -1.7998],\n",
" [-0.3149, 0.4309, 1.4270, 0.1497, -0.4793]])\n",
"\n",
"\n",
"tensor([[ 0.7607, 0.2490, -1.5199, 0.4037, 0.0063],\n",
" [ 0.2223, -1.0452, -1.6327, -0.1692, -0.8291],\n",
" [-0.2285, 0.7535, -1.4712, 1.1518, 0.4560],\n",
" [-0.4817, -0.6983, -0.9611, -1.5915, -1.7998],\n",
" [-0.3149, 0.4309, 1.4270, 0.1497, -0.4793]])\n"
]
}
],
"source": [
"first_1 = torch.randn(2,5)\n",
"print(first_1)\n",
"second_1 = torch.randn(3,5)\n",
"print(second_1)\n",
"# Concaatenate along the 0 dimension row-wise\n",
"con_1 = torch.cat([first_1,second_1])\n",
"print(\"\\n\")\n",
"print(con_1)\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([[-0.6601, 1.8097, 0.6295],\n",
" [-1.8868, -2.2800, 1.7137]])\n",
"tensor([[ 1.2576, -0.5680, 1.2772, -0.2566, -2.1952],\n",
" [-0.4767, -0.5083, -0.0795, -1.5576, 0.6238]])\n",
"\n",
"\n",
"tensor([[-0.6601, 1.8097, 0.6295, 1.2576, -0.5680, 1.2772, -0.2566, -2.1952],\n",
" [-1.8868, -2.2800, 1.7137, -0.4767, -0.5083, -0.0795, -1.5576, 0.6238]])\n"
]
}
],
"source": [
"first_2 = torch.randn(2,3)\n",
"print(first_2)\n",
"second_2 = torch.randn(2,5)\n",
"print(second_2)\n",
"# Concaatenate along the 1 dimension column-wise\n",
"con_2 = torch.cat([first_2,second_2],1)\n",
"print(\"\\n\")\n",
"print(con_2)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Adding ddimensions to tensors"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([[2, 3, 4, 5]])\n",
"torch.Size([1, 4])\n",
"torch.Size([4])\n"
]
}
],
"source": [
"tensor_1 = torch.tensor([2,3,4,5])\n",
"tensor_a = torch.unsqueeze(tensor_1,0)\n",
"print(tensor_a)\n",
"print(tensor_a.shape)\n",
"print(tensor_1.shape)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 4
}