From 9a74ec7536016941350e11336454bf6d5e0c9810 Mon Sep 17 00:00:00 2001
From: NazeerY <96047334+NazeerY@users.noreply.github.com>
Date: Mon, 17 Apr 2023 15:51:27 +0530
Subject: [PATCH 1/2] Created using Colaboratory
---
colabs/intro/Intro_to_Weights_&_Biases.ipynb | 451 +++++++++++++++++++
1 file changed, 451 insertions(+)
create mode 100644 colabs/intro/Intro_to_Weights_&_Biases.ipynb
diff --git a/colabs/intro/Intro_to_Weights_&_Biases.ipynb b/colabs/intro/Intro_to_Weights_&_Biases.ipynb
new file mode 100644
index 00000000..b5283647
--- /dev/null
+++ b/colabs/intro/Intro_to_Weights_&_Biases.ipynb
@@ -0,0 +1,451 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "view-in-github",
+ "colab_type": "text"
+ },
+ "source": [
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "0aEjwBJuuy9e"
+ },
+ "source": [
+ "
\n",
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "KW8YttmNuy9r"
+ },
+ "source": [
+ "
\n",
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "pAizNs-wuy9t"
+ },
+ "source": [
+ "# πββοΈ Quickstart\n",
+ "Use **[Weights & Biases](https://wandb.ai/site?utm_source=intro_colab&utm_medium=code&utm_campaign=intro)** for machine learning experiment tracking, model checkpointing, and collaboration with your team. See the full Weights & Biases Documentation **[here](https://docs.wandb.ai/guides/integrations/keras)**"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "TGGzch5Luy9w"
+ },
+ "source": [
+ "## π€© A shared dashboard for your experiments\n",
+ "\n",
+ "With just a few lines of code,\n",
+ "you'll get rich, interactive, shareable dashboards [which you can see yourself here](https://wandb.ai/wandb/wandb_example).\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "atzqkq2Zuy9y"
+ },
+ "source": [
+ "\n",
+ "## π Data & Privacy\n",
+ "\n",
+ "We take security very seriously, and our cloud-hosted dashboard uses industry standard best practices for encryption. If you're working with datasets that cannot leave your enterprise cluster, we have [on-prem](https://docs.wandb.com/self-hosted) installations available. \n",
+ "\n",
+ "It's also easy to download all your data and export it to other tools β like custom analysis in a Jupyter notebook. Here's [more on our API](https://docs.wandb.com/library/api).\n",
+ "\n",
+ "---"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "9fs7b--Yuy9z"
+ },
+ "source": [
+ "## πͺ Install `wandb` library and login\n",
+ "\n",
+ "\n",
+ "Start by installing the library and logging in to your free account.\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "5jyUB2Gzuy91"
+ },
+ "outputs": [],
+ "source": [
+ "!pip install wandb -qU"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "vJVylARPuy98"
+ },
+ "outputs": [],
+ "source": [
+ "# Log in to your W&B account\n",
+ "import wandb\n",
+ "wandb.login()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "r-E5UVPUuy9-"
+ },
+ "source": [
+ "## π Run an experiment\n",
+ "1οΈβ£. **Start a new run** and pass in hyperparameters to track\n",
+ "\n",
+ "2οΈβ£. **Log metrics** from training or evaluation\n",
+ "\n",
+ "3οΈβ£. **Visualize results** in the dashboard"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "t0DxkWWYuy-A"
+ },
+ "outputs": [],
+ "source": [
+ "import random\n",
+ "\n",
+ "# Launch 5 simulated experiments\n",
+ "total_runs = 5\n",
+ "for run in range(total_runs):\n",
+ " # π 1οΈβ£ Start a new run to track this script\n",
+ " wandb.init(\n",
+ " # Set the project where this run will be logged\n",
+ " project=\"basic-intro\", \n",
+ " # We pass a run name (otherwise itβll be randomly assigned, like sunshine-lollypop-10)\n",
+ " name=f\"experiment_{run}\", \n",
+ " # Track hyperparameters and run metadata\n",
+ " config={\n",
+ " \"learning_rate\": 0.02,\n",
+ " \"architecture\": \"CNN\",\n",
+ " \"dataset\": \"CIFAR-100\",\n",
+ " \"epochs\": 10,\n",
+ " })\n",
+ " \n",
+ " # This simple block simulates a training loop logging metrics\n",
+ " epochs = 10\n",
+ " offset = random.random() / 5\n",
+ " for epoch in range(2, epochs):\n",
+ " acc = 1 - 2 ** -epoch - random.random() / epoch - offset\n",
+ " loss = 2 ** -epoch + random.random() / epoch + offset\n",
+ " \n",
+ " # π 2οΈβ£ Log metrics from your script to W&B\n",
+ " wandb.log({\"acc\": acc, \"loss\": loss})\n",
+ " \n",
+ " # Mark the run as finished\n",
+ " wandb.finish()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "t3RvkBzruy-C"
+ },
+ "source": [
+ "3οΈβ£ You can find your interactive dashboard by clicking any of the π wandb links above."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "QVLxbNgQuy-D"
+ },
+ "source": [
+ "# π₯ Simple Pytorch Neural Network\n",
+ "\n",
+ "πͺ Run this model to train a simple MNIST classifier, and click on the project page link to see your results stream in live to a W&B project.\n",
+ "\n",
+ "\n",
+ "Any run in `wandb` automatically logs [metrics](https://docs.wandb.ai/ref/app/pages/run-page#charts-tab),\n",
+ "[system information](https://docs.wandb.ai/ref/app/pages/run-page#system-tab),\n",
+ "[hyperparameters](https://docs.wandb.ai/ref/app/pages/run-page#overview-tab),\n",
+ "[terminal output](https://docs.wandb.ai/ref/app/pages/run-page#logs-tab) and\n",
+ "you'll see an [interactive table](https://docs.wandb.ai/guides/data-vis)\n",
+ "with model inputs and outputs."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "WgYW8l76uy-F"
+ },
+ "source": [
+ "## Set up Dataloader"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "18LZt6jRuy-F"
+ },
+ "outputs": [],
+ "source": [
+ "#@title\n",
+ "import wandb\n",
+ "import math\n",
+ "import random\n",
+ "import torch, torchvision\n",
+ "import torch.nn as nn\n",
+ "import torchvision.transforms as T\n",
+ "\n",
+ "device = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n",
+ "\n",
+ "def get_dataloader(is_train, batch_size, slice=5):\n",
+ " \"Get a training dataloader\"\n",
+ " full_dataset = torchvision.datasets.MNIST(root=\".\", train=is_train, transform=T.ToTensor(), download=True)\n",
+ " sub_dataset = torch.utils.data.Subset(full_dataset, indices=range(0, len(full_dataset), slice))\n",
+ " loader = torch.utils.data.DataLoader(dataset=sub_dataset, \n",
+ " batch_size=batch_size, \n",
+ " shuffle=True if is_train else False, \n",
+ " pin_memory=True, num_workers=2)\n",
+ " return loader\n",
+ "\n",
+ "def get_model(dropout):\n",
+ " \"A simple model\"\n",
+ " model = nn.Sequential(nn.Flatten(),\n",
+ " nn.Linear(28*28, 256),\n",
+ " nn.BatchNorm1d(256),\n",
+ " nn.ReLU(),\n",
+ " nn.Dropout(dropout),\n",
+ " nn.Linear(256,10)).to(device)\n",
+ " return model\n",
+ "\n",
+ "def validate_model(model, valid_dl, loss_func, log_images=False, batch_idx=0):\n",
+ " \"Compute performance of the model on the validation dataset and log a wandb.Table\"\n",
+ " model.eval()\n",
+ " val_loss = 0.\n",
+ " with torch.inference_mode():\n",
+ " correct = 0\n",
+ " for i, (images, labels) in enumerate(valid_dl):\n",
+ " images, labels = images.to(device), labels.to(device)\n",
+ "\n",
+ " # Forward pass β‘\n",
+ " outputs = model(images)\n",
+ " val_loss += loss_func(outputs, labels)*labels.size(0)\n",
+ "\n",
+ " # Compute accuracy and accumulate\n",
+ " _, predicted = torch.max(outputs.data, 1)\n",
+ " correct += (predicted == labels).sum().item()\n",
+ "\n",
+ " # Log one batch of images to the dashboard, always same batch_idx.\n",
+ " if i==batch_idx and log_images:\n",
+ " log_image_table(images, predicted, labels, outputs.softmax(dim=1))\n",
+ " return val_loss / len(valid_dl.dataset), correct / len(valid_dl.dataset)\n",
+ "\n",
+ "def log_image_table(images, predicted, labels, probs):\n",
+ " \"Log a wandb.Table with (img, pred, target, scores)\"\n",
+ " # π Create a wandb Table to log images, labels and predictions to\n",
+ " table = wandb.Table(columns=[\"image\", \"pred\", \"target\"]+[f\"score_{i}\" for i in range(10)])\n",
+ " for img, pred, targ, prob in zip(images.to(\"cpu\"), predicted.to(\"cpu\"), labels.to(\"cpu\"), probs.to(\"cpu\")):\n",
+ " table.add_data(wandb.Image(img[0].numpy()*255), pred, targ, *prob.numpy())\n",
+ " wandb.log({\"predictions_table\":table}, commit=False)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "spin810Nuy-I"
+ },
+ "source": [
+ "## Train Your Model"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "jzPs3BTKuy-J"
+ },
+ "outputs": [],
+ "source": [
+ "# Launch 5 experiments, trying different dropout rates\n",
+ "for _ in range(5):\n",
+ " # π initialise a wandb run\n",
+ " wandb.init(\n",
+ " project=\"pytorch-intro\",\n",
+ " config={\n",
+ " \"epochs\": 10,\n",
+ " \"batch_size\": 128,\n",
+ " \"lr\": 1e-3,\n",
+ " \"dropout\": random.uniform(0.01, 0.80),\n",
+ " })\n",
+ " \n",
+ " # Copy your config \n",
+ " config = wandb.config\n",
+ "\n",
+ " # Get the data\n",
+ " train_dl = get_dataloader(is_train=True, batch_size=config.batch_size)\n",
+ " valid_dl = get_dataloader(is_train=False, batch_size=2*config.batch_size)\n",
+ " n_steps_per_epoch = math.ceil(len(train_dl.dataset) / config.batch_size)\n",
+ " \n",
+ " # A simple MLP model\n",
+ " model = get_model(config.dropout)\n",
+ "\n",
+ " # Make the loss and optimizer\n",
+ " loss_func = nn.CrossEntropyLoss()\n",
+ " optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)\n",
+ "\n",
+ " # Training\n",
+ " example_ct = 0\n",
+ " step_ct = 0\n",
+ " for epoch in range(config.epochs):\n",
+ " model.train()\n",
+ " for step, (images, labels) in enumerate(train_dl):\n",
+ " images, labels = images.to(device), labels.to(device)\n",
+ "\n",
+ " outputs = model(images)\n",
+ " train_loss = loss_func(outputs, labels)\n",
+ " optimizer.zero_grad()\n",
+ " train_loss.backward()\n",
+ " optimizer.step()\n",
+ " \n",
+ " example_ct += len(images)\n",
+ " metrics = {\"train/train_loss\": train_loss, \n",
+ " \"train/epoch\": (step + 1 + (n_steps_per_epoch * epoch)) / n_steps_per_epoch, \n",
+ " \"train/example_ct\": example_ct}\n",
+ " \n",
+ " if step + 1 < n_steps_per_epoch:\n",
+ " # π Log train metrics to wandb \n",
+ " wandb.log(metrics)\n",
+ " \n",
+ " step_ct += 1\n",
+ "\n",
+ " val_loss, accuracy = validate_model(model, valid_dl, loss_func, log_images=(epoch==(config.epochs-1)))\n",
+ "\n",
+ " # π Log train and validation metrics to wandb\n",
+ " val_metrics = {\"val/val_loss\": val_loss, \n",
+ " \"val/val_accuracy\": accuracy}\n",
+ " wandb.log({**metrics, **val_metrics})\n",
+ " \n",
+ " print(f\"Train Loss: {train_loss:.3f}, Valid Loss: {val_loss:3f}, Accuracy: {accuracy:.2f}\")\n",
+ "\n",
+ " # If you had a test set, this is how you could log it as a Summary metric\n",
+ " wandb.summary['test_accuracy'] = 0.8\n",
+ "\n",
+ " # π Close your wandb run \n",
+ " wandb.finish()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "krhux9zzuy-L"
+ },
+ "source": [
+ "You have now trained your first model using wandb! π Click on the wandb link above to see your metrics"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "rPBveFsKuy-M"
+ },
+ "source": [
+ "# π Try W&B Alerts\n",
+ "\n",
+ "**[W&B Alerts](https://docs.wandb.ai/guides/track/alert)** allows you to send alerts, triggered from your Python code, to your Slack or email. There are 2 steps to follow the first time you'd like to send a Slack or email alert, triggered from your code:\n",
+ "\n",
+ "1) Turn on Alerts in your W&B [User Settings](https://wandb.ai/settings)\n",
+ "\n",
+ "2) Add `wandb.alert()` to your code:\n",
+ "\n",
+ "```python\n",
+ "wandb.alert(\n",
+ " title=\"Low accuracy\", \n",
+ " text=f\"Accuracy is below the acceptable threshold\"\n",
+ ")\n",
+ "```\n",
+ "\n",
+ "See the minimal example below to see how to use `wandb.alert`. You can find the full docs for **[W&B Alerts here](https://docs.wandb.ai/guides/track/alert)**"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "0osuneuLuy-N"
+ },
+ "outputs": [],
+ "source": [
+ "# Start a wandb run\n",
+ "wandb.init(project=\"pytorch-intro\")\n",
+ "\n",
+ "# Simulating a model training loop\n",
+ "acc_threshold = 0.3\n",
+ "for training_step in range(1000):\n",
+ "\n",
+ " # Generate a random number for accuracy\n",
+ " accuracy = round(random.random() + random.random(), 3)\n",
+ " print(f'Accuracy is: {accuracy}, {acc_threshold}')\n",
+ " \n",
+ " # π Log accuracy to wandb\n",
+ " wandb.log({\"Accuracy\": accuracy})\n",
+ "\n",
+ " # π If the accuracy is below the threshold, fire a W&B Alert and stop the run\n",
+ " if accuracy <= acc_threshold:\n",
+ " # π Send the wandb Alert\n",
+ " wandb.alert(\n",
+ " title='Low Accuracy',\n",
+ " text=f'Accuracy {accuracy} at step {training_step} is below the acceptable theshold, {acc_threshold}',\n",
+ " )\n",
+ " print('Alert triggered')\n",
+ " break\n",
+ "\n",
+ "# Mark the run as finished (useful in Jupyter notebooks)\n",
+ "wandb.finish()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "ytwesdjDuy-P"
+ },
+ "source": [
+ "\n",
+ "# What's next π ?\n",
+ "The next tutorial you will learn how to do hyperparameter optimization using W&B Sweeps:\n",
+ "## π [Hyperparameters sweeps using PyTorch](https://colab.research.google.com/github/wandb/examples/blob/master/colabs/pytorch/Organizing_Hyperparameter_Sweeps_in_PyTorch_with_W%26B.ipynb)"
+ ]
+ }
+ ],
+ "metadata": {
+ "accelerator": "GPU",
+ "colab": {
+ "provenance": [],
+ "toc_visible": true,
+ "include_colab_link": true
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "name": "python3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
\ No newline at end of file
From 97cd9dd736a95e4c05559bc6e25ae1a076723235 Mon Sep 17 00:00:00 2001
From: NazeerY <96047334+NazeerY@users.noreply.github.com>
Date: Tue, 22 Jul 2025 09:37:55 +0400
Subject: [PATCH 2/2] Updated .gitignore
---
.gitignore | 1 +
1 file changed, 1 insertion(+)
diff --git a/.gitignore b/.gitignore
index 9a8b30fb..476c9ba9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,3 +5,4 @@ venv
env
__pycache__
.vscode
+.env