Skip to content
Snippets Groups Projects
Unverified Commit 1b3d8291 authored by Jirka Borovec's avatar Jirka Borovec Committed by GitHub
Browse files

prune packages (#10)

parent 2343f347
No related branches found
No related tags found
No related merge requests found
......@@ -86,4 +86,4 @@ trainer.fit(model)
### Trainer
It's recommended that you have a single trainer per lightning module. However, you can also use a single trainer for all your LightningModules.
Check out the [MNIST example](https://github.com/williamFalcon/pytorch-lightning-conference-seed/tree/master/research_seed/mnist).
Check out the [MNIST example](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/research_seed/mnist).
......@@ -3,13 +3,15 @@ Use this seed to refactor your PyTorch research code for:
- a paper submission
- a new research project.
[Read the usage instructions here](https://github.com/williamFalcon/pytorch-lightning-conference-seed/blob/master/HOWTO.md)
[Read the usage instructions here](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/blob/master/HOWTO.md)
#### Goals
The goal of this seed is to structure ML paper-code the same so that work can easily be extended and replicated.
###### DELETE EVERYTHING ABOVE FOR YOUR PROJECT
### DELETE EVERYTHING ABOVE FOR YOUR PROJECT
---
<div align="center">
# Your Project Name
......@@ -47,7 +49,7 @@ pip install -r requirements.txt
Next, navigate to [Your Main Contribution (MNIST here)] and run it.
```bash
# module folder
cd src/
cd research_mnist/
# run module (example: mnist as your main contribution)
python simplest_mnist.py
......@@ -55,12 +57,11 @@ python simplest_mnist.py
## Main Contribution
List your modules here. Each module contains all code for a full system including how to run instructions.
- [Production MNIST](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/src/production_mnist)
- [Research MNIST](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/src/research_mnist)
- [Research MNIST](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/research_mnist)
## Baselines
List your baselines here.
- [Research MNIST](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/src/research_mnist)
- [Research MNIST](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/research_mnist)
### Citation
```
......
## MNIST
## Research Seed Folder
Create a folder for each contribution (ie: MNIST, BERT, etc...).
Each folder will have:
##### contribution_name_trainer.py
Runs your LightningModule. Abstracts training loop, distributed training, etc...
##### contribution_name.py
Holds your main contribution
## Example
The folder here gives an example for mnist.
### MNIST
In this readme, give instructions on how to run your code.
In this case, we remove the datasets from the lightningModule as you
might want to use the same model with many datasets
#### CPU
```bash
......
File moved
......@@ -3,8 +3,8 @@ This file runs the main training/val loop, etc... using Lightning Trainer
"""
from pytorch_lightning import Trainer, seed_everything
from argparse import ArgumentParser
from src.research_mnist.mnist import CoolSystem
from src.research_mnist.mnist_data_module import MNISTDataModule
from research_mnist import CoolSystem
from research_mnist.mnist_data_module import MNISTDataModule
# sets seeds for numpy, torch, etc...
# must do for DDP to work well
......
File moved
......@@ -2,12 +2,12 @@
from setuptools import setup, find_packages
setup(name='src',
version='0.0.1',
setup(name='research_mnist',
version='0.0.0',
description='Describe Your Cool Project',
author='',
author_email='',
url='https://github.com/williamFalcon/pytorch-lightning-conference-seed', # REPLACE WITH YOUR OWN GITHUB PROJECT LINK
url='https://github.com/PyTorchLightning/pytorch-lightning-conference-seed', # REPLACE WITH YOUR OWN GITHUB PROJECT LINK
install_requires=[
'pytorch-lightning'
],
......
## Research Seed Folder
Create a folder for each contribution (ie: MNIST, BERT, etc...).
Each folder will have:
##### contribution_name_trainer.py
Runs your LightningModule. Abstracts training loop, distributed training, etc...
##### contribution_name.py
Holds your main contribution
## Example
The folder here gives an example for mnist.
"""
This file defines the core research contribution
"""
import os
import torch
from torch.nn import functional as F
from argparse import ArgumentParser
import pytorch_lightning as pl
class CoolSystem(pl.LightningModule):
def __init__(self, hparams):
super(CoolSystem, self).__init__()
# not the best model...
self.hparams = hparams
self.l1 = torch.nn.Linear(28 * 28, 10)
def forward(self, x):
return torch.relu(self.l1(x.view(x.size(0), -1)))
def training_step(self, batch, batch_idx):
# REQUIRED
x, y = batch
y_hat = self.forward(x)
loss = F.cross_entropy(y_hat, y)
tensorboard_logs = {'train_loss': loss}
return {'loss': loss, 'log': tensorboard_logs}
def validation_step(self, batch, batch_idx):
# OPTIONAL
x, y = batch
y_hat = self.forward(x)
return {'val_loss': F.cross_entropy(y_hat, y)}
def validation_epoch_end(self, outputs):
# OPTIONAL
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
tensorboard_logs = {'avg_val_loss': avg_loss}
return {'val_loss': avg_loss, 'log': tensorboard_logs}
def configure_optimizers(self):
# REQUIRED
# can return multiple optimizers and learning_rate schedulers
return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
@staticmethod
def add_model_specific_args(parent_parser):
"""
Specify the hyperparams for this LightningModule
"""
# MODEL specific
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--learning_rate', default=0.02, type=float)
parser.add_argument('--batch_size', default=32, type=int)
# training specific (for this model)
parser.add_argument('--max_nb_epochs', default=2, type=int)
return parser
"""
This file runs the main training/val loop, etc... using Lightning Trainer
"""
import os
from pytorch_lightning import Trainer, seed_everything
from argparse import ArgumentParser
from src.production_mnist.mnist import CoolSystem
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
import torchvision.transforms as transforms
# sets seeds for numpy, torch, etc...
# must do for DDP to work well
seed_everything(123)
def main(args):
# init module
model = CoolSystem(hparams=args)
train_loader = DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=args.batch_size)
val_loader = DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=args.batch_size)
# makes all flags available to trainer from cli
trainer = Trainer.from_argparse_args(args)
trainer.fit(model, train_loader, val_loader)
if __name__ == '__main__':
parser = ArgumentParser(add_help=False)
# add args from trainer
parser = Trainer.add_argparse_args(parser)
# give the module a chance to add own params
# good practice to define LightningModule speficic params in the module
parser = CoolSystem.add_model_specific_args(parser)
# parse params
args = parser.parse_args()
main(args)
## MNIST
In this readme, give instructions on how to run your code.
#### CPU
```bash
python mnist_trainer.py
```
#### Multiple-GPUs
```bash
python mnist_trainer.py --gpus 4
```
or specific GPUs
```bash
python mnist_trainer.py --gpus '0,3'
```
#### On multiple nodes
```bash
python mnist_trainer.py --gpus 4 --nodes 4 --precision 16
```
"""
This file defines the core research contribution
"""
import os
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
import torchvision.transforms as transforms
from argparse import ArgumentParser
import pytorch_lightning as pl
pl.seed_everything(123)
class CoolSystem(pl.LightningModule):
def __init__(self, hparams):
super(CoolSystem, self).__init__()
self.hparams = hparams
self.l1 = torch.nn.Linear(28 * 28, 10)
def forward(self, x):
return torch.relu(self.l1(x.view(x.size(0), -1)))
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
tensorboard_logs = {'train_loss': loss}
return {'loss': loss, 'log': tensorboard_logs}
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
if __name__ == '__main__':
train_data = DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=32)
parser = ArgumentParser(add_help=False)
parser.add_argument('--learning_rate', default=0.02, type=float)
# add args from trainer
parser = pl.Trainer.add_argparse_args(parser)
# parse params
args = parser.parse_args()
# init module
model = CoolSystem(hparams=args)
# most basic trainer, uses good defaults
trainer = pl.Trainer.from_argparse_args(args)
trainer.fit(model, train_data)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment