Skip to content
Snippets Groups Projects
Commit 2443fa8c authored by William Falcon's avatar William Falcon
Browse files

updated seed

parent d2de2224
No related branches found
No related tags found
No related merge requests found
Showing
with 184 additions and 21 deletions
pytorch-lightning pytorch-lightning >= 0.7.5
\ No newline at end of file \ No newline at end of file
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
from setuptools import setup, find_packages from setuptools import setup, find_packages
setup(name='research_seed', setup(name='src',
version='0.0.1', version='0.0.1',
description='Describe Your Cool Project', description='Describe Your Cool Project',
author='', author='',
......
File moved
File moved
File moved
File moved
## MNIST
In this readme, give instructions on how to run your code.
In this case, we remove the datasets from the lightningModule as you
might want to use the same model with many datasets
#### CPU
```bash
python mnist_trainer.py
```
#### Multiple-GPUs
```bash
python mnist_trainer.py --gpus 4
```
or specific GPUs
```bash
python mnist_trainer.py --gpus '0,3'
```
#### On multiple nodes
```bash
python mnist_trainer.py --gpus 4 --nodes 4 --precision 16
```
File moved
"""
This file defines the core research contribution
"""
import os
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
import torchvision.transforms as transforms
from argparse import ArgumentParser
import pytorch_lightning as pl
class CoolSystem(pl.LightningModule):
def __init__(self, hparams):
super(CoolSystem, self).__init__()
# not the best model...
self.hparams = hparams
self.l1 = torch.nn.Linear(28 * 28, 10)
def forward(self, x):
return torch.relu(self.l1(x.view(x.size(0), -1)))
def training_step(self, batch, batch_idx):
# REQUIRED
x, y = batch
y_hat = self.forward(x)
loss = F.cross_entropy(y_hat, y)
tensorboard_logs = {'train_loss': loss}
return {'loss': loss, 'log': tensorboard_logs}
def validation_step(self, batch, batch_idx):
# OPTIONAL
x, y = batch
y_hat = self.forward(x)
return {'val_loss': F.cross_entropy(y_hat, y)}
def validation_epoch_end(self, outputs):
# OPTIONAL
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
tensorboard_logs = {'avg_val_loss': avg_loss}
return {'val_loss': avg_loss, 'log': tensorboard_logs}
def configure_optimizers(self):
# REQUIRED
# can return multiple optimizers and learning_rate schedulers
return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
@staticmethod
def add_model_specific_args(parent_parser):
"""
Specify the hyperparams for this LightningModule
"""
# MODEL specific
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--learning_rate', default=0.02, type=float)
parser.add_argument('--batch_size', default=32, type=int)
# training specific (for this model)
parser.add_argument('--max_nb_epochs', default=2, type=int)
return parser
"""
This file runs the main training/val loop, etc... using Lightning Trainer
"""
from pytorch_lightning import Trainer
from argparse import ArgumentParser
from srv.mnist.mnist import CoolSystem
# sets seeds for numpy, torch, etc...
# must do for DDP to work well
seed_everything(123)
def main(args):
# init module
model = CoolSystem(hparams=args)
train_loader = DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=self.hparams.batch_size)
val_loader = DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=self.hparams.batch_size)
# makes all flags available to trainer from cli
trainer = Trainer.from_argparse_args(args)
trainer.fit(model, train_loader, val_loader)
if __name__ == '__main__':
parser = ArgumentParser(add_help=False)
# add args from trainer
parser = Trainer.add_argparse_args(parser)
# give the module a chance to add own params
# good practice to define LightningModule speficic params in the module
parser = CoolSystem.add_model_specific_args(parser)
# parse params
args = parser.parse_args()
main(args)
...@@ -8,10 +8,15 @@ python mnist_trainer.py ...@@ -8,10 +8,15 @@ python mnist_trainer.py
#### Multiple-GPUs #### Multiple-GPUs
```bash ```bash
python mnist_trainer.py --gpus '0,1,2,3' python mnist_trainer.py --gpus 4
```
or specific GPUs
```bash
python mnist_trainer.py --gpus '0,3'
``` ```
#### On multiple nodes #### On multiple nodes
```bash ```bash
python mnist_trainer.py --gpus '0,1,2,3' --nodes 4 python mnist_trainer.py --gpus 4 --nodes 4 --precision 16
``` ```
...@@ -27,7 +27,11 @@ class CoolSystem(pl.LightningModule): ...@@ -27,7 +27,11 @@ class CoolSystem(pl.LightningModule):
# REQUIRED # REQUIRED
x, y = batch x, y = batch
y_hat = self.forward(x) y_hat = self.forward(x)
return {'loss': F.cross_entropy(y_hat, y)} loss = F.cross_entropy(y_hat, y)
tensorboard_logs = {'train_loss': loss}
return {'loss': loss, 'log': tensorboard_logs}
def validation_step(self, batch, batch_idx): def validation_step(self, batch, batch_idx):
# OPTIONAL # OPTIONAL
...@@ -35,10 +39,12 @@ class CoolSystem(pl.LightningModule): ...@@ -35,10 +39,12 @@ class CoolSystem(pl.LightningModule):
y_hat = self.forward(x) y_hat = self.forward(x)
return {'val_loss': F.cross_entropy(y_hat, y)} return {'val_loss': F.cross_entropy(y_hat, y)}
def validation_end(self, outputs): def validation_epoch_end(self, outputs):
# OPTIONAL # OPTIONAL
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean() avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
return {'avg_val_loss': avg_loss}
tensorboard_logs = {'avg_val_loss': avg_loss}
return {'val_loss': avg_loss, 'log': tensorboard_logs}
def configure_optimizers(self): def configure_optimizers(self):
# REQUIRED # REQUIRED
...@@ -63,7 +69,7 @@ class CoolSystem(pl.LightningModule): ...@@ -63,7 +69,7 @@ class CoolSystem(pl.LightningModule):
Specify the hyperparams for this LightningModule Specify the hyperparams for this LightningModule
""" """
# MODEL specific # MODEL specific
parser = ArgumentParser(parents=[parent_parser]) parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--learning_rate', default=0.02, type=float) parser.add_argument('--learning_rate', default=0.02, type=float)
parser.add_argument('--batch_size', default=32, type=int) parser.add_argument('--batch_size', default=32, type=int)
......
""" """
This file runs the main training/val loop, etc... using Lightning Trainer This file runs the main training/val loop, etc... using Lightning Trainer
""" """
from pytorch_lightning import Trainer from pytorch_lightning import Trainer, seed_everything
from argparse import ArgumentParser from argparse import ArgumentParser
from research_seed.mnist.mnist import CoolSystem from srv.mnist.mnist import CoolSystem
# sets seeds for numpy, torch, etc...
# must do for DDP to work well
seed_everything(123)
def main(hparams): def main(args):
# init module # init module
model = CoolSystem(hparams) model = CoolSystem(hparams=args)
# most basic trainer, uses good defaults # most basic trainer, uses good defaults
trainer = Trainer( trainer = Trainer.from_argparse_args(args)
max_nb_epochs=hparams.max_nb_epochs,
gpus=hparams.gpus,
nb_gpu_nodes=hparams.nodes,
)
trainer.fit(model) trainer.fit(model)
if __name__ == '__main__': if __name__ == '__main__':
parser = ArgumentParser(add_help=False) parser = ArgumentParser(add_help=False)
parser.add_argument('--gpus', type=str, default=None)
parser.add_argument('--nodes', type=int, default=1) # add args from trainer
parser = Trainer.add_argparse_args(parser)
# give the module a chance to add own params # give the module a chance to add own params
# good practice to define LightningModule speficic params in the module # good practice to define LightningModule speficic params in the module
parser = CoolSystem.add_model_specific_args(parser) parser = CoolSystem.add_model_specific_args(parser)
# parse params # parse params
hparams = parser.parse_args() args = parser.parse_args()
main(hparams) main(args)
## MNIST
In this readme, give instructions on how to run your code.
#### CPU
```bash
python mnist_trainer.py
```
#### Multiple-GPUs
```bash
python mnist_trainer.py --gpus 4
```
or specific GPUs
```bash
python mnist_trainer.py --gpus '0,3'
```
#### On multiple nodes
```bash
python mnist_trainer.py --gpus 4 --nodes 4 --precision 16
```
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment