From 2443fa8cce0862a58810e6f341439428daf6ec78 Mon Sep 17 00:00:00 2001
From: William Falcon <waf2107@columbia.edu>
Date: Thu, 14 May 2020 12:05:27 -0400
Subject: [PATCH] updated seed

---
 requirements.txt                              |   2 +-
 setup.py                                      |   2 +-
 {research_seed => src}/README.md              |   0
 {research_seed => src}/__init__.py            |   0
 {research_seed => src}/baselines/README.md    |   0
 {research_seed => src}/baselines/__init__.py  |   0
 .../baselines/mnist_baseline/README.md        |   0
 .../baselines/mnist_baseline/__init__.py      |   0
 .../mnist_baseline/mnist_baseline.py          |   0
 .../mnist_baseline/mnist_baseline_trainer.py  |   0
 src/production_mnist/README.md                |  24 ++++
 .../production_mnist}/__init__.py             |   0
 src/production_mnist/mnist.py                 |  69 ++++++++++++
 src/production_mnist/mnist_trainer.py         |  37 +++++++
 .../mnist => src/research_mnist}/README.md    |   9 +-
 src/research_mnist/__init__.py                |   0
 .../mnist => src/research_mnist}/mnist.py     |  14 ++-
 .../research_mnist}/mnist_trainer.py          |  26 ++---
 src/single_file_mnist/README.md               |  22 ++++
 src/single_file_mnist/__init__.py             |   0
 src/single_file_mnist/mnist.py                | 104 ++++++++++++++++++
 src/single_file_mnist/mnist_trainer.py        |  34 ++++++
 22 files changed, 322 insertions(+), 21 deletions(-)
 rename {research_seed => src}/README.md (100%)
 rename {research_seed => src}/__init__.py (100%)
 rename {research_seed => src}/baselines/README.md (100%)
 rename {research_seed => src}/baselines/__init__.py (100%)
 rename {research_seed => src}/baselines/mnist_baseline/README.md (100%)
 rename {research_seed => src}/baselines/mnist_baseline/__init__.py (100%)
 rename {research_seed => src}/baselines/mnist_baseline/mnist_baseline.py (100%)
 rename {research_seed => src}/baselines/mnist_baseline/mnist_baseline_trainer.py (100%)
 create mode 100644 src/production_mnist/README.md
 rename {research_seed/mnist => src/production_mnist}/__init__.py (100%)
 create mode 100644 src/production_mnist/mnist.py
 create mode 100644 src/production_mnist/mnist_trainer.py
 rename {research_seed/mnist => src/research_mnist}/README.md (56%)
 create mode 100644 src/research_mnist/__init__.py
 rename {research_seed/mnist => src/research_mnist}/mnist.py (85%)
 rename {research_seed/mnist => src/research_mnist}/mnist_trainer.py (52%)
 create mode 100644 src/single_file_mnist/README.md
 create mode 100644 src/single_file_mnist/__init__.py
 create mode 100644 src/single_file_mnist/mnist.py
 create mode 100644 src/single_file_mnist/mnist_trainer.py

diff --git a/requirements.txt b/requirements.txt
index 8ba9e64..2992487 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1 +1 @@
-pytorch-lightning
\ No newline at end of file
+pytorch-lightning >= 0.7.5
\ No newline at end of file
diff --git a/setup.py b/setup.py
index cc30dd4..7eb0124 100644
--- a/setup.py
+++ b/setup.py
@@ -2,7 +2,7 @@
 
 from setuptools import setup, find_packages
 
-setup(name='research_seed',
+setup(name='src',
       version='0.0.1',
       description='Describe Your Cool Project',
       author='',
diff --git a/research_seed/README.md b/src/README.md
similarity index 100%
rename from research_seed/README.md
rename to src/README.md
diff --git a/research_seed/__init__.py b/src/__init__.py
similarity index 100%
rename from research_seed/__init__.py
rename to src/__init__.py
diff --git a/research_seed/baselines/README.md b/src/baselines/README.md
similarity index 100%
rename from research_seed/baselines/README.md
rename to src/baselines/README.md
diff --git a/research_seed/baselines/__init__.py b/src/baselines/__init__.py
similarity index 100%
rename from research_seed/baselines/__init__.py
rename to src/baselines/__init__.py
diff --git a/research_seed/baselines/mnist_baseline/README.md b/src/baselines/mnist_baseline/README.md
similarity index 100%
rename from research_seed/baselines/mnist_baseline/README.md
rename to src/baselines/mnist_baseline/README.md
diff --git a/research_seed/baselines/mnist_baseline/__init__.py b/src/baselines/mnist_baseline/__init__.py
similarity index 100%
rename from research_seed/baselines/mnist_baseline/__init__.py
rename to src/baselines/mnist_baseline/__init__.py
diff --git a/research_seed/baselines/mnist_baseline/mnist_baseline.py b/src/baselines/mnist_baseline/mnist_baseline.py
similarity index 100%
rename from research_seed/baselines/mnist_baseline/mnist_baseline.py
rename to src/baselines/mnist_baseline/mnist_baseline.py
diff --git a/research_seed/baselines/mnist_baseline/mnist_baseline_trainer.py b/src/baselines/mnist_baseline/mnist_baseline_trainer.py
similarity index 100%
rename from research_seed/baselines/mnist_baseline/mnist_baseline_trainer.py
rename to src/baselines/mnist_baseline/mnist_baseline_trainer.py
diff --git a/src/production_mnist/README.md b/src/production_mnist/README.md
new file mode 100644
index 0000000..12df4d8
--- /dev/null
+++ b/src/production_mnist/README.md
@@ -0,0 +1,24 @@
+## MNIST    
+In this readme, give instructions on how to run your code.   
+In this case, we remove the datasets from the lightningModule as you
+might want to use the same model with many datasets
+
+#### CPU   
+```bash   
+python mnist_trainer.py     
+```
+
+#### Multiple-GPUs   
+```bash   
+python mnist_trainer.py --gpus 4
+```   
+
+or specific GPUs
+```bash   
+python mnist_trainer.py --gpus '0,3'
+```   
+
+#### On multiple nodes   
+```bash  
+python mnist_trainer.py --gpus 4 --nodes 4  --precision 16
+```   
diff --git a/research_seed/mnist/__init__.py b/src/production_mnist/__init__.py
similarity index 100%
rename from research_seed/mnist/__init__.py
rename to src/production_mnist/__init__.py
diff --git a/src/production_mnist/mnist.py b/src/production_mnist/mnist.py
new file mode 100644
index 0000000..1e2b135
--- /dev/null
+++ b/src/production_mnist/mnist.py
@@ -0,0 +1,69 @@
+"""
+This file defines the core research contribution   
+"""
+import os
+import torch
+from torch.nn import functional as F
+from torch.utils.data import DataLoader
+from torchvision.datasets import MNIST
+import torchvision.transforms as transforms
+from argparse import ArgumentParser
+
+import pytorch_lightning as pl
+
+
+class CoolSystem(pl.LightningModule):
+
+    def __init__(self, hparams):
+        super(CoolSystem, self).__init__()
+        # not the best model...
+        self.hparams = hparams
+        self.l1 = torch.nn.Linear(28 * 28, 10)
+
+    def forward(self, x):
+        return torch.relu(self.l1(x.view(x.size(0), -1)))
+
+    def training_step(self, batch, batch_idx):
+        # REQUIRED
+        x, y = batch
+        y_hat = self.forward(x)
+        loss = F.cross_entropy(y_hat, y)
+
+        tensorboard_logs = {'train_loss': loss}
+
+        return {'loss': loss, 'log': tensorboard_logs}
+
+    def validation_step(self, batch, batch_idx):
+        # OPTIONAL
+        x, y = batch
+        y_hat = self.forward(x)
+        return {'val_loss': F.cross_entropy(y_hat, y)}
+
+    def validation_epoch_end(self, outputs):
+        # OPTIONAL
+        avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
+
+        tensorboard_logs = {'avg_val_loss': avg_loss}
+        return {'val_loss': avg_loss, 'log': tensorboard_logs}
+
+    def configure_optimizers(self):
+        # REQUIRED
+        # can return multiple optimizers and learning_rate schedulers
+        return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
+
+
+    @staticmethod
+    def add_model_specific_args(parent_parser):
+        """
+        Specify the hyperparams for this LightningModule
+        """
+        # MODEL specific
+        parser = ArgumentParser(parents=[parent_parser], add_help=False)
+        parser.add_argument('--learning_rate', default=0.02, type=float)
+        parser.add_argument('--batch_size', default=32, type=int)
+
+        # training specific (for this model)
+        parser.add_argument('--max_nb_epochs', default=2, type=int)
+
+        return parser
+
diff --git a/src/production_mnist/mnist_trainer.py b/src/production_mnist/mnist_trainer.py
new file mode 100644
index 0000000..cd3e376
--- /dev/null
+++ b/src/production_mnist/mnist_trainer.py
@@ -0,0 +1,37 @@
+"""
+This file runs the main training/val loop, etc... using Lightning Trainer    
+"""
+from pytorch_lightning import Trainer
+from argparse import ArgumentParser
+from srv.mnist.mnist import CoolSystem
+
+# sets seeds for numpy, torch, etc...
+# must do for DDP to work well
+seed_everything(123)
+
+def main(args):
+    # init module
+    model = CoolSystem(hparams=args)
+
+    train_loader = DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=self.hparams.batch_size)
+    val_loader = DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=self.hparams.batch_size)
+
+    # makes all flags available to trainer from cli
+    trainer = Trainer.from_argparse_args(args)
+    trainer.fit(model, train_loader, val_loader)
+
+
+if __name__ == '__main__':
+    parser = ArgumentParser(add_help=False)
+
+    # add args from trainer
+    parser = Trainer.add_argparse_args(parser)
+
+    # give the module a chance to add own params
+    # good practice to define LightningModule speficic params in the module
+    parser = CoolSystem.add_model_specific_args(parser)
+
+    # parse params
+    args = parser.parse_args()
+
+    main(args)
diff --git a/research_seed/mnist/README.md b/src/research_mnist/README.md
similarity index 56%
rename from research_seed/mnist/README.md
rename to src/research_mnist/README.md
index ebaaeee..4f7b00d 100644
--- a/research_seed/mnist/README.md
+++ b/src/research_mnist/README.md
@@ -8,10 +8,15 @@ python mnist_trainer.py
 
 #### Multiple-GPUs   
 ```bash   
-python mnist_trainer.py --gpus '0,1,2,3'  
+python mnist_trainer.py --gpus 4
+```   
+
+or specific GPUs
+```bash   
+python mnist_trainer.py --gpus '0,3'
 ```   
 
 #### On multiple nodes   
 ```bash  
-python mnist_trainer.py --gpus '0,1,2,3' --nodes 4  
+python mnist_trainer.py --gpus 4 --nodes 4  --precision 16
 ```   
diff --git a/src/research_mnist/__init__.py b/src/research_mnist/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/research_seed/mnist/mnist.py b/src/research_mnist/mnist.py
similarity index 85%
rename from research_seed/mnist/mnist.py
rename to src/research_mnist/mnist.py
index e1d54bd..00ac111 100644
--- a/research_seed/mnist/mnist.py
+++ b/src/research_mnist/mnist.py
@@ -27,7 +27,11 @@ class CoolSystem(pl.LightningModule):
         # REQUIRED
         x, y = batch
         y_hat = self.forward(x)
-        return {'loss': F.cross_entropy(y_hat, y)}
+        loss = F.cross_entropy(y_hat, y)
+
+        tensorboard_logs = {'train_loss': loss}
+
+        return {'loss': loss, 'log': tensorboard_logs}
 
     def validation_step(self, batch, batch_idx):
         # OPTIONAL
@@ -35,10 +39,12 @@ class CoolSystem(pl.LightningModule):
         y_hat = self.forward(x)
         return {'val_loss': F.cross_entropy(y_hat, y)}
 
-    def validation_end(self, outputs):
+    def validation_epoch_end(self, outputs):
         # OPTIONAL
         avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
-        return {'avg_val_loss': avg_loss}
+
+        tensorboard_logs = {'avg_val_loss': avg_loss}
+        return {'val_loss': avg_loss, 'log': tensorboard_logs}
 
     def configure_optimizers(self):
         # REQUIRED
@@ -63,7 +69,7 @@ class CoolSystem(pl.LightningModule):
         Specify the hyperparams for this LightningModule
         """
         # MODEL specific
-        parser = ArgumentParser(parents=[parent_parser])
+        parser = ArgumentParser(parents=[parent_parser], add_help=False)
         parser.add_argument('--learning_rate', default=0.02, type=float)
         parser.add_argument('--batch_size', default=32, type=int)
 
diff --git a/research_seed/mnist/mnist_trainer.py b/src/research_mnist/mnist_trainer.py
similarity index 52%
rename from research_seed/mnist/mnist_trainer.py
rename to src/research_mnist/mnist_trainer.py
index 152ae4d..ac2719b 100644
--- a/research_seed/mnist/mnist_trainer.py
+++ b/src/research_mnist/mnist_trainer.py
@@ -1,34 +1,34 @@
 """
 This file runs the main training/val loop, etc... using Lightning Trainer    
 """
-from pytorch_lightning import Trainer
+from pytorch_lightning import Trainer, seed_everything
 from argparse import ArgumentParser
-from research_seed.mnist.mnist import CoolSystem
+from srv.mnist.mnist import CoolSystem
 
+# sets seeds for numpy, torch, etc...
+# must do for DDP to work well
+seed_everything(123)
 
-def main(hparams):
+def main(args):
     # init module
-    model = CoolSystem(hparams)
+    model = CoolSystem(hparams=args)
 
     # most basic trainer, uses good defaults
-    trainer = Trainer(
-        max_nb_epochs=hparams.max_nb_epochs,
-        gpus=hparams.gpus,
-        nb_gpu_nodes=hparams.nodes,
-    )
+    trainer = Trainer.from_argparse_args(args)
     trainer.fit(model)
 
 
 if __name__ == '__main__':
     parser = ArgumentParser(add_help=False)
-    parser.add_argument('--gpus', type=str, default=None)
-    parser.add_argument('--nodes', type=int, default=1)
+
+    # add args from trainer
+    parser = Trainer.add_argparse_args(parser)
 
     # give the module a chance to add own params
     # good practice to define LightningModule speficic params in the module
     parser = CoolSystem.add_model_specific_args(parser)
 
     # parse params
-    hparams = parser.parse_args()
+    args = parser.parse_args()
 
-    main(hparams)
+    main(args)
diff --git a/src/single_file_mnist/README.md b/src/single_file_mnist/README.md
new file mode 100644
index 0000000..4f7b00d
--- /dev/null
+++ b/src/single_file_mnist/README.md
@@ -0,0 +1,22 @@
+## MNIST    
+In this readme, give instructions on how to run your code.   
+
+#### CPU   
+```bash   
+python mnist_trainer.py     
+```
+
+#### Multiple-GPUs   
+```bash   
+python mnist_trainer.py --gpus 4
+```   
+
+or specific GPUs
+```bash   
+python mnist_trainer.py --gpus '0,3'
+```   
+
+#### On multiple nodes   
+```bash  
+python mnist_trainer.py --gpus 4 --nodes 4  --precision 16
+```   
diff --git a/src/single_file_mnist/__init__.py b/src/single_file_mnist/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/single_file_mnist/mnist.py b/src/single_file_mnist/mnist.py
new file mode 100644
index 0000000..07d77f2
--- /dev/null
+++ b/src/single_file_mnist/mnist.py
@@ -0,0 +1,104 @@
+"""
+This file defines the core research contribution   
+"""
+import os
+import torch
+from torch.nn import functional as F
+from torch.utils.data import DataLoader
+from torchvision.datasets import MNIST
+import torchvision.transforms as transforms
+from argparse import ArgumentParser
+
+import pytorch_lightning as pl
+
+pl.seed_everything(123)
+
+
+class CoolSystem(pl.LightningModule):
+
+    def __init__(self, hparams):
+        super(CoolSystem, self).__init__()
+        # not the best model...
+        self.hparams = hparams
+        self.l1 = torch.nn.Linear(28 * 28, 10)
+
+    def forward(self, x):
+        return torch.relu(self.l1(x.view(x.size(0), -1)))
+
+    def training_step(self, batch, batch_idx):
+        # REQUIRED
+        x, y = batch
+        y_hat = self.forward(x)
+        loss = F.cross_entropy(y_hat, y)
+
+        tensorboard_logs = {'train_loss': loss}
+
+        return {'loss': loss, 'log': tensorboard_logs}
+
+    def validation_step(self, batch, batch_idx):
+        # OPTIONAL
+        x, y = batch
+        y_hat = self.forward(x)
+        return {'val_loss': F.cross_entropy(y_hat, y)}
+
+    def validation_epoch_end(self, outputs):
+        # OPTIONAL
+        avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
+
+        tensorboard_logs = {'avg_val_loss': avg_loss}
+        return {'val_loss': avg_loss, 'log': tensorboard_logs}
+
+    def configure_optimizers(self):
+        # REQUIRED
+        # can return multiple optimizers and learning_rate schedulers
+        return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
+
+    def train_dataloader(self):
+        # REQUIRED
+        return DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=self.hparams.batch_size)
+
+    def val_dataloader(self):
+        # OPTIONAL
+        return DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=self.hparams.batch_size)
+
+    def test_dataloader(self):
+        # OPTIONAL
+        return DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=self.hparams.batch_size)
+
+    @staticmethod
+    def add_model_specific_args(parent_parser):
+        """
+        Specify the hyperparams for this LightningModule
+        """
+        # MODEL specific
+        parser = ArgumentParser(parents=[parent_parser], add_help=False)
+        parser.add_argument('--learning_rate', default=0.02, type=float)
+        parser.add_argument('--batch_size', default=32, type=int)
+
+        # training specific (for this model)
+        parser.add_argument('--max_nb_epochs', default=2, type=int)
+
+        return parser
+
+
+if __name__ == '__main__':
+    parser = ArgumentParser(add_help=False)
+
+    # add args from trainer
+    parser = Trainer.add_argparse_args(parser)
+
+    # give the module a chance to add own params
+    # good practice to define LightningModule speficic params in the module
+    parser = CoolSystem.add_model_specific_args(parser)
+
+    # parse params
+    args = parser.parse_args()
+
+    # init module
+    model = CoolSystem(hparams=args)
+
+    # most basic trainer, uses good defaults
+    trainer = pl.Trainer.from_argparse_args(args)
+    trainer.fit(model)
+
+
diff --git a/src/single_file_mnist/mnist_trainer.py b/src/single_file_mnist/mnist_trainer.py
new file mode 100644
index 0000000..ac2719b
--- /dev/null
+++ b/src/single_file_mnist/mnist_trainer.py
@@ -0,0 +1,34 @@
+"""
+This file runs the main training/val loop, etc... using Lightning Trainer    
+"""
+from pytorch_lightning import Trainer, seed_everything
+from argparse import ArgumentParser
+from srv.mnist.mnist import CoolSystem
+
+# sets seeds for numpy, torch, etc...
+# must do for DDP to work well
+seed_everything(123)
+
+def main(args):
+    # init module
+    model = CoolSystem(hparams=args)
+
+    # most basic trainer, uses good defaults
+    trainer = Trainer.from_argparse_args(args)
+    trainer.fit(model)
+
+
+if __name__ == '__main__':
+    parser = ArgumentParser(add_help=False)
+
+    # add args from trainer
+    parser = Trainer.add_argparse_args(parser)
+
+    # give the module a chance to add own params
+    # good practice to define LightningModule speficic params in the module
+    parser = CoolSystem.add_model_specific_args(parser)
+
+    # parse params
+    args = parser.parse_args()
+
+    main(args)
-- 
GitLab