{ "cells": [ { "cell_type": "markdown", "metadata": { "id": "H7gQFbUxOQtb" }, "source": [ "# Deep Learning with PyTorch" ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "execution": { "iopub.execute_input": "2024-09-05T21:03:48.689121Z", "iopub.status.busy": "2024-09-05T21:03:48.688649Z", "iopub.status.idle": "2024-09-05T21:04:05.535024Z", "shell.execute_reply": "2024-09-05T21:04:05.533997Z" }, "id": "FuhJIaeXO2W9", "outputId": "bf494471-115e-45a8-c7cb-15a26f12154a" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "2.4.0\n" ] } ], "source": [ "import torch\n", "import torch.nn as nn\n", "import os\n", "\n", "## print out the pytorch version used\n", "print(torch.__version__)" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "execution": { "iopub.execute_input": "2024-09-05T21:04:05.587958Z", "iopub.status.busy": "2024-09-05T21:04:05.587256Z", "iopub.status.idle": "2024-09-05T21:04:05.592276Z", "shell.execute_reply": "2024-09-05T21:04:05.591365Z" } }, "outputs": [], "source": [ "## Set up the number of CPU cores to use" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "execution": { "iopub.execute_input": "2024-09-05T21:04:05.595454Z", "iopub.status.busy": "2024-09-05T21:04:05.594691Z", "iopub.status.idle": "2024-09-05T21:04:05.641329Z", "shell.execute_reply": "2024-09-05T21:04:05.640879Z" } }, "outputs": [], "source": [ "# Tell PyTorch it can use multiple threads.\n", "# If you are doing parallel data loading be careful\n", "# as the total num of threads+data loaders can't be\n", "# greater than n_cores \n", "# Get the assigned number of cores \n", "n_cores = int(os.environ.get('NSLOTS',1))\n", "torch.set_num_threads(n_cores)" ] }, { "cell_type": "markdown", "metadata": { "id": "0a2C_nneO_wp" }, "source": [ "## Preparation of Neural Network\n", "\n", "The first step is to process and prepare the data. " ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "execution": { "iopub.execute_input": "2024-09-05T21:04:05.643136Z", "iopub.status.busy": "2024-09-05T21:04:05.642938Z", "iopub.status.idle": "2024-09-05T21:04:05.649177Z", "shell.execute_reply": "2024-09-05T21:04:05.648833Z" }, "id": "JWFtgUX85iwO" }, "outputs": [], "source": [ "x = torch.tensor([[-1.0], [0.0], [1.0], [2.0], [3.0], [4.0]], dtype=torch.float)\n", "y = torch.tensor([[-3.0], [-1.0], [1.0], [3.0], [5.0], [7.0]], dtype=torch.float)" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "execution": { "iopub.execute_input": "2024-09-05T21:04:05.650767Z", "iopub.status.busy": "2024-09-05T21:04:05.650482Z", "iopub.status.idle": "2024-09-05T21:04:05.655355Z", "shell.execute_reply": "2024-09-05T21:04:05.655001Z" }, "id": "NcQUjR_95z5J", "outputId": "6db5df38-6f9d-454e-87d6-cee0c29dccb3" }, "outputs": [ { "data": { "text/plain": [ "torch.Size([6, 1])" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "## print size of the input tensor\n", "x.size()" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "execution": { "iopub.execute_input": "2024-09-05T21:04:05.657069Z", "iopub.status.busy": "2024-09-05T21:04:05.656844Z", "iopub.status.idle": "2024-09-05T21:04:05.728047Z", "shell.execute_reply": "2024-09-05T21:04:05.727440Z" }, "id": "N1Ii5JRz3Jud" }, "outputs": [], "source": [ "## Neural network with 1 hidden layer\n", "layer1 = nn.Linear(1,1, bias=False)\n", "model = nn.Sequential(layer1)" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "execution": { "iopub.execute_input": "2024-09-05T21:04:05.730934Z", "iopub.status.busy": "2024-09-05T21:04:05.730617Z", "iopub.status.idle": "2024-09-05T21:04:17.300723Z", "shell.execute_reply": "2024-09-05T21:04:17.299840Z" }, "id": "3hglFpejArxx" }, "outputs": [], "source": [ "## loss function\n", "criterion = nn.MSELoss()\n", "\n", "## optimizer algorithm\n", "optimizer = torch.optim.SGD(model.parameters(), lr=0.01)" ] }, { "cell_type": "markdown", "metadata": { "id": "FKj6jvZTUtGh" }, "source": [ "## Training the Neural Network Model\n" ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "execution": { "iopub.execute_input": "2024-09-05T21:04:17.305839Z", "iopub.status.busy": "2024-09-05T21:04:17.305307Z", "iopub.status.idle": "2024-09-05T21:04:17.486092Z", "shell.execute_reply": "2024-09-05T21:04:17.485457Z" }, "id": "JeOr9i-aBzRv", "outputId": "299a0b60-a64c-46c4-d031-8aaf1cacbff9" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch: 0 | Loss: 4.8011\n", "Epoch: 1 | Loss: 3.9708\n", "Epoch: 2 | Loss: 3.3032\n", "Epoch: 3 | Loss: 2.7664\n", "Epoch: 4 | Loss: 2.3349\n", "Epoch: 5 | Loss: 1.9879\n", "Epoch: 6 | Loss: 1.7089\n", "Epoch: 7 | Loss: 1.4846\n", "Epoch: 8 | Loss: 1.3043\n", "Epoch: 9 | Loss: 1.1593\n", "Epoch: 10 | Loss: 1.0427\n", "Epoch: 11 | Loss: 0.9490\n", "Epoch: 12 | Loss: 0.8737\n", "Epoch: 13 | Loss: 0.8131\n", "Epoch: 14 | Loss: 0.7644\n", "Epoch: 15 | Loss: 0.7252\n", "Epoch: 16 | Loss: 0.6937\n", "Epoch: 17 | Loss: 0.6684\n", "Epoch: 18 | Loss: 0.6480\n", "Epoch: 19 | Loss: 0.6317\n", "Epoch: 20 | Loss: 0.6185\n", "Epoch: 21 | Loss: 0.6079\n", "Epoch: 22 | Loss: 0.5994\n", "Epoch: 23 | Loss: 0.5926\n", "Epoch: 24 | Loss: 0.5871\n", "Epoch: 25 | Loss: 0.5827\n", "Epoch: 26 | Loss: 0.5791\n", "Epoch: 27 | Loss: 0.5762\n", "Epoch: 28 | Loss: 0.5739\n", "Epoch: 29 | Loss: 0.5721\n", "Epoch: 30 | Loss: 0.5706\n", "Epoch: 31 | Loss: 0.5694\n", "Epoch: 32 | Loss: 0.5685\n", "Epoch: 33 | Loss: 0.5677\n", "Epoch: 34 | Loss: 0.5671\n", "Epoch: 35 | Loss: 0.5666\n", "Epoch: 36 | Loss: 0.5662\n", "Epoch: 37 | Loss: 0.5658\n", "Epoch: 38 | Loss: 0.5656\n", "Epoch: 39 | Loss: 0.5654\n", "Epoch: 40 | Loss: 0.5652\n", "Epoch: 41 | Loss: 0.5651\n", "Epoch: 42 | Loss: 0.5650\n", "Epoch: 43 | Loss: 0.5649\n", "Epoch: 44 | Loss: 0.5648\n", "Epoch: 45 | Loss: 0.5647\n", "Epoch: 46 | Loss: 0.5647\n", "Epoch: 47 | Loss: 0.5647\n", "Epoch: 48 | Loss: 0.5646\n", "Epoch: 49 | Loss: 0.5646\n", "Epoch: 50 | Loss: 0.5646\n", "Epoch: 51 | Loss: 0.5646\n", "Epoch: 52 | Loss: 0.5646\n", "Epoch: 53 | Loss: 0.5646\n", "Epoch: 54 | Loss: 0.5645\n", "Epoch: 55 | Loss: 0.5645\n", "Epoch: 56 | Loss: 0.5645\n", "Epoch: 57 | Loss: 0.5645\n", "Epoch: 58 | Loss: 0.5645\n", "Epoch: 59 | Loss: 0.5645\n", "Epoch: 60 | Loss: 0.5645\n", "Epoch: 61 | Loss: 0.5645\n", "Epoch: 62 | Loss: 0.5645\n", "Epoch: 63 | Loss: 0.5645\n", "Epoch: 64 | Loss: 0.5645\n", "Epoch: 65 | Loss: 0.5645\n", "Epoch: 66 | Loss: 0.5645\n", "Epoch: 67 | Loss: 0.5645\n", "Epoch: 68 | Loss: 0.5645\n", "Epoch: 69 | Loss: 0.5645\n", "Epoch: 70 | Loss: 0.5645\n", "Epoch: 71 | Loss: 0.5645\n", "Epoch: 72 | Loss: 0.5645\n", "Epoch: 73 | Loss: 0.5645\n", "Epoch: 74 | Loss: 0.5645\n", "Epoch: 75 | Loss: 0.5645\n", "Epoch: 76 | Loss: 0.5645\n", "Epoch: 77 | Loss: 0.5645\n", "Epoch: 78 | Loss: 0.5645\n", "Epoch: 79 | Loss: 0.5645\n", "Epoch: 80 | Loss: 0.5645\n", "Epoch: 81 | Loss: 0.5645\n", "Epoch: 82 | Loss: 0.5645\n", "Epoch: 83 | Loss: 0.5645\n", "Epoch: 84 | Loss: 0.5645\n", "Epoch: 85 | Loss: 0.5645\n", "Epoch: 86 | Loss: 0.5645\n", "Epoch: 87 | Loss: 0.5645\n", "Epoch: 88 | Loss: 0.5645\n", "Epoch: 89 | Loss: 0.5645\n", "Epoch: 90 | Loss: 0.5645\n", "Epoch: 91 | Loss: 0.5645\n", "Epoch: 92 | Loss: 0.5645\n", "Epoch: 93 | Loss: 0.5645\n", "Epoch: 94 | Loss: 0.5645\n", "Epoch: 95 | Loss: 0.5645\n", "Epoch: 96 | Loss: 0.5645\n", "Epoch: 97 | Loss: 0.5645\n", "Epoch: 98 | Loss: 0.5645\n", "Epoch: 99 | Loss: 0.5645\n", "Epoch: 100 | Loss: 0.5645\n", "Epoch: 101 | Loss: 0.5645\n", "Epoch: 102 | Loss: 0.5645\n", "Epoch: 103 | Loss: 0.5645\n", "Epoch: 104 | Loss: 0.5645\n", "Epoch: 105 | Loss: 0.5645\n", "Epoch: 106 | Loss: 0.5645\n", "Epoch: 107 | Loss: 0.5645\n", "Epoch: 108 | Loss: 0.5645\n", "Epoch: 109 | Loss: 0.5645\n", "Epoch: 110 | Loss: 0.5645\n", "Epoch: 111 | Loss: 0.5645\n", "Epoch: 112 | Loss: 0.5645\n", "Epoch: 113 | Loss: 0.5645\n", "Epoch: 114 | Loss: 0.5645\n", "Epoch: 115 | Loss: 0.5645\n", "Epoch: 116 | Loss: 0.5645\n", "Epoch: 117 | Loss: 0.5645\n", "Epoch: 118 | Loss: 0.5645\n", "Epoch: 119 | Loss: 0.5645\n", "Epoch: 120 | Loss: 0.5645\n", "Epoch: 121 | Loss: 0.5645\n", "Epoch: 122 | Loss: 0.5645\n", "Epoch: 123 | Loss: 0.5645\n", "Epoch: 124 | Loss: 0.5645\n", "Epoch: 125 | Loss: 0.5645\n", "Epoch: 126 | Loss: 0.5645\n", "Epoch: 127 | Loss: 0.5645\n", "Epoch: 128 | Loss: 0.5645\n", "Epoch: 129 | Loss: 0.5645\n", "Epoch: 130 | Loss: 0.5645\n", "Epoch: 131 | Loss: 0.5645\n", "Epoch: 132 | Loss: 0.5645\n", "Epoch: 133 | Loss: 0.5645\n", "Epoch: 134 | Loss: 0.5645\n", "Epoch: 135 | Loss: 0.5645\n", "Epoch: 136 | Loss: 0.5645\n", "Epoch: 137 | Loss: 0.5645\n", "Epoch: 138 | Loss: 0.5645\n", "Epoch: 139 | Loss: 0.5645\n", "Epoch: 140 | Loss: 0.5645\n", "Epoch: 141 | Loss: 0.5645\n", "Epoch: 142 | Loss: 0.5645\n", "Epoch: 143 | Loss: 0.5645\n", "Epoch: 144 | Loss: 0.5645\n", "Epoch: 145 | Loss: 0.5645\n", "Epoch: 146 | Loss: 0.5645\n", "Epoch: 147 | Loss: 0.5645\n", "Epoch: 148 | Loss: 0.5645\n", "Epoch: 149 | Loss: 0.5645\n" ] } ], "source": [ "## training\n", "for ITER in range(150):\n", " model = model.train()\n", "\n", " ## forward\n", " output = model(x)\n", " loss = criterion(output, y)\n", " optimizer.zero_grad()\n", "\n", " ## backward + update model params\n", " loss.backward()\n", " optimizer.step()\n", "\n", " model.eval()\n", " print('Epoch: %d | Loss: %.4f' %(ITER, loss.detach().item()))" ] }, { "cell_type": "markdown", "metadata": { "id": "Bp50Q7J0Xkiw" }, "source": [ "## Testing the Model" ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "execution": { "iopub.execute_input": "2024-09-05T21:04:17.490296Z", "iopub.status.busy": "2024-09-05T21:04:17.490137Z", "iopub.status.idle": "2024-09-05T21:04:17.494116Z", "shell.execute_reply": "2024-09-05T21:04:17.493571Z" }, "id": "V1odfZpGFoBi", "outputId": "a447b232-729e-4ccf-adc2-5aeaf79cc2ea" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "17.096769332885742\n" ] } ], "source": [ "## test the model\n", "sample = torch.tensor([10.0], dtype=torch.float)\n", "predicted = model(sample)\n", "print(predicted.detach().item())" ] } ], "metadata": { "colab": { "provenance": [] }, "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.9" } }, "nbformat": 4, "nbformat_minor": 4 }