From 1b3d8291b508321c32027341a45a41159aab0487 Mon Sep 17 00:00:00 2001
From: Jirka Borovec <Borda@users.noreply.github.com>
Date: Sun, 30 Aug 2020 23:15:14 +0200
Subject: [PATCH] prune packages (#10)

---
 HOWTO.md                                      |  2 +-
 README.md                                     | 15 +++--
 research_mnist/README.md                      | 35 ++++++++++
 {src => research_mnist}/__init__.py           |  0
 .../mnist_data_module.py                      |  0
 .../mnist_trainer.py                          |  4 +-
 .../simplest_mnist.py                         |  0
 setup.py                                      |  6 +-
 src/README.md                                 | 12 ----
 src/production_mnist/README.md                | 24 -------
 src/production_mnist/__init__.py              |  0
 src/production_mnist/mnist.py                 | 66 -------------------
 src/production_mnist/mnist_trainer.py         | 41 ------------
 src/research_mnist/README.md                  | 22 -------
 src/research_mnist/__init__.py                |  0
 src/simplest_mnist.py                         | 56 ----------------
 16 files changed, 49 insertions(+), 234 deletions(-)
 create mode 100644 research_mnist/README.md
 rename {src => research_mnist}/__init__.py (100%)
 rename {src/research_mnist => research_mnist}/mnist_data_module.py (100%)
 rename {src/research_mnist => research_mnist}/mnist_trainer.py (89%)
 rename src/research_mnist/mnist.py => research_mnist/simplest_mnist.py (100%)
 delete mode 100644 src/README.md
 delete mode 100644 src/production_mnist/README.md
 delete mode 100644 src/production_mnist/__init__.py
 delete mode 100644 src/production_mnist/mnist.py
 delete mode 100644 src/production_mnist/mnist_trainer.py
 delete mode 100644 src/research_mnist/README.md
 delete mode 100644 src/research_mnist/__init__.py
 delete mode 100644 src/simplest_mnist.py

diff --git a/HOWTO.md b/HOWTO.md
index 7f53852..59f6b4a 100644
--- a/HOWTO.md
+++ b/HOWTO.md
@@ -86,4 +86,4 @@ trainer.fit(model)
 ### Trainer   
 It's recommended that you have a single trainer per lightning module. However, you can also use a single trainer for all your LightningModules.    
 
-Check out the [MNIST example](https://github.com/williamFalcon/pytorch-lightning-conference-seed/tree/master/research_seed/mnist).  
+Check out the [MNIST example](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/research_seed/mnist).  
diff --git a/README.md b/README.md
index 734a5ad..107ad71 100644
--- a/README.md
+++ b/README.md
@@ -3,13 +3,15 @@ Use this seed to refactor your PyTorch research code for:
 - a paper submission  
 - a new research project.     
 
-[Read the usage instructions here](https://github.com/williamFalcon/pytorch-lightning-conference-seed/blob/master/HOWTO.md)
+[Read the usage instructions here](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/blob/master/HOWTO.md)
 
 #### Goals  
 The goal of this seed is to structure ML paper-code the same so that work can easily be extended and replicated.   
 
-###### DELETE EVERYTHING ABOVE FOR YOUR PROJECT   
----   
+### DELETE EVERYTHING ABOVE FOR YOUR PROJECT  
+ 
+---
+
 <div align="center">    
  
 # Your Project Name     
@@ -47,7 +49,7 @@ pip install -r requirements.txt
  Next, navigate to [Your Main Contribution (MNIST here)] and run it.   
  ```bash
 # module folder
-cd src/    
+cd research_mnist/    
 
 # run module (example: mnist as your main contribution)   
 python simplest_mnist.py    
@@ -55,12 +57,11 @@ python simplest_mnist.py
 
 ## Main Contribution      
 List your modules here. Each module contains all code for a full system including how to run instructions.   
-- [Production MNIST](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/src/production_mnist)    
-- [Research MNIST](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/src/research_mnist)  
+- [Research MNIST](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/research_mnist)  
 
 ## Baselines    
 List your baselines here.   
-- [Research MNIST](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/src/research_mnist) 
+- [Research MNIST](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/research_mnist) 
 
 ### Citation   
 ```
diff --git a/research_mnist/README.md b/research_mnist/README.md
new file mode 100644
index 0000000..029c097
--- /dev/null
+++ b/research_mnist/README.md
@@ -0,0 +1,35 @@
+## Research Seed Folder   
+Create a folder for each contribution (ie: MNIST, BERT, etc...).   
+Each folder will have:
+
+##### contribution_name_trainer.py    
+Runs your LightningModule. Abstracts training loop, distributed training, etc...   
+
+##### contribution_name.py  
+Holds your main contribution   
+
+## Example  
+The folder here gives an example for mnist.   
+
+### MNIST    
+In this readme, give instructions on how to run your code.   
+
+#### CPU   
+```bash   
+python mnist_trainer.py     
+```
+
+#### Multiple-GPUs   
+```bash   
+python mnist_trainer.py --gpus 4
+```   
+
+or specific GPUs
+```bash   
+python mnist_trainer.py --gpus '0,3'
+```   
+
+#### On multiple nodes   
+```bash  
+python mnist_trainer.py --gpus 4 --nodes 4  --precision 16
+```   
diff --git a/src/__init__.py b/research_mnist/__init__.py
similarity index 100%
rename from src/__init__.py
rename to research_mnist/__init__.py
diff --git a/src/research_mnist/mnist_data_module.py b/research_mnist/mnist_data_module.py
similarity index 100%
rename from src/research_mnist/mnist_data_module.py
rename to research_mnist/mnist_data_module.py
diff --git a/src/research_mnist/mnist_trainer.py b/research_mnist/mnist_trainer.py
similarity index 89%
rename from src/research_mnist/mnist_trainer.py
rename to research_mnist/mnist_trainer.py
index 6e05f05..f3caa6b 100644
--- a/src/research_mnist/mnist_trainer.py
+++ b/research_mnist/mnist_trainer.py
@@ -3,8 +3,8 @@ This file runs the main training/val loop, etc... using Lightning Trainer
 """
 from pytorch_lightning import Trainer, seed_everything
 from argparse import ArgumentParser
-from src.research_mnist.mnist import CoolSystem
-from src.research_mnist.mnist_data_module import MNISTDataModule
+from research_mnist import CoolSystem
+from research_mnist.mnist_data_module import MNISTDataModule
 
 # sets seeds for numpy, torch, etc...
 # must do for DDP to work well
diff --git a/src/research_mnist/mnist.py b/research_mnist/simplest_mnist.py
similarity index 100%
rename from src/research_mnist/mnist.py
rename to research_mnist/simplest_mnist.py
diff --git a/setup.py b/setup.py
index 7eb0124..f224a16 100644
--- a/setup.py
+++ b/setup.py
@@ -2,12 +2,12 @@
 
 from setuptools import setup, find_packages
 
-setup(name='src',
-      version='0.0.1',
+setup(name='research_mnist',
+      version='0.0.0',
       description='Describe Your Cool Project',
       author='',
       author_email='',
-      url='https://github.com/williamFalcon/pytorch-lightning-conference-seed',  # REPLACE WITH YOUR OWN GITHUB PROJECT LINK
+      url='https://github.com/PyTorchLightning/pytorch-lightning-conference-seed',  # REPLACE WITH YOUR OWN GITHUB PROJECT LINK
       install_requires=[
             'pytorch-lightning'
       ],
diff --git a/src/README.md b/src/README.md
deleted file mode 100644
index d29a9c3..0000000
--- a/src/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-## Research Seed Folder   
-Create a folder for each contribution (ie: MNIST, BERT, etc...).   
-Each folder will have:
-
-##### contribution_name_trainer.py    
-Runs your LightningModule. Abstracts training loop, distributed training, etc...   
-
-##### contribution_name.py  
-Holds your main contribution   
-
-## Example  
-The folder here gives an example for mnist.   
diff --git a/src/production_mnist/README.md b/src/production_mnist/README.md
deleted file mode 100644
index 12df4d8..0000000
--- a/src/production_mnist/README.md
+++ /dev/null
@@ -1,24 +0,0 @@
-## MNIST    
-In this readme, give instructions on how to run your code.   
-In this case, we remove the datasets from the lightningModule as you
-might want to use the same model with many datasets
-
-#### CPU   
-```bash   
-python mnist_trainer.py     
-```
-
-#### Multiple-GPUs   
-```bash   
-python mnist_trainer.py --gpus 4
-```   
-
-or specific GPUs
-```bash   
-python mnist_trainer.py --gpus '0,3'
-```   
-
-#### On multiple nodes   
-```bash  
-python mnist_trainer.py --gpus 4 --nodes 4  --precision 16
-```   
diff --git a/src/production_mnist/__init__.py b/src/production_mnist/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/production_mnist/mnist.py b/src/production_mnist/mnist.py
deleted file mode 100644
index 609b3ba..0000000
--- a/src/production_mnist/mnist.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""
-This file defines the core research contribution   
-"""
-import os
-import torch
-from torch.nn import functional as F
-from argparse import ArgumentParser
-
-import pytorch_lightning as pl
-
-
-class CoolSystem(pl.LightningModule):
-
-    def __init__(self, hparams):
-        super(CoolSystem, self).__init__()
-        # not the best model...
-        self.hparams = hparams
-        self.l1 = torch.nn.Linear(28 * 28, 10)
-
-    def forward(self, x):
-        return torch.relu(self.l1(x.view(x.size(0), -1)))
-
-    def training_step(self, batch, batch_idx):
-        # REQUIRED
-        x, y = batch
-        y_hat = self.forward(x)
-        loss = F.cross_entropy(y_hat, y)
-
-        tensorboard_logs = {'train_loss': loss}
-
-        return {'loss': loss, 'log': tensorboard_logs}
-
-    def validation_step(self, batch, batch_idx):
-        # OPTIONAL
-        x, y = batch
-        y_hat = self.forward(x)
-        return {'val_loss': F.cross_entropy(y_hat, y)}
-
-    def validation_epoch_end(self, outputs):
-        # OPTIONAL
-        avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
-
-        tensorboard_logs = {'avg_val_loss': avg_loss}
-        return {'val_loss': avg_loss, 'log': tensorboard_logs}
-
-    def configure_optimizers(self):
-        # REQUIRED
-        # can return multiple optimizers and learning_rate schedulers
-        return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
-
-
-    @staticmethod
-    def add_model_specific_args(parent_parser):
-        """
-        Specify the hyperparams for this LightningModule
-        """
-        # MODEL specific
-        parser = ArgumentParser(parents=[parent_parser], add_help=False)
-        parser.add_argument('--learning_rate', default=0.02, type=float)
-        parser.add_argument('--batch_size', default=32, type=int)
-
-        # training specific (for this model)
-        parser.add_argument('--max_nb_epochs', default=2, type=int)
-
-        return parser
-
diff --git a/src/production_mnist/mnist_trainer.py b/src/production_mnist/mnist_trainer.py
deleted file mode 100644
index 2dae87b..0000000
--- a/src/production_mnist/mnist_trainer.py
+++ /dev/null
@@ -1,41 +0,0 @@
-"""
-This file runs the main training/val loop, etc... using Lightning Trainer    
-"""
-import os
-from pytorch_lightning import Trainer, seed_everything
-from argparse import ArgumentParser
-from src.production_mnist.mnist import CoolSystem
-from torch.utils.data import DataLoader
-from torchvision.datasets import MNIST
-import torchvision.transforms as transforms
-
-# sets seeds for numpy, torch, etc...
-# must do for DDP to work well
-seed_everything(123)
-
-def main(args):
-    # init module
-    model = CoolSystem(hparams=args)
-
-    train_loader = DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=args.batch_size)
-    val_loader = DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=args.batch_size)
-
-    # makes all flags available to trainer from cli
-    trainer = Trainer.from_argparse_args(args)
-    trainer.fit(model, train_loader, val_loader)
-
-
-if __name__ == '__main__':
-    parser = ArgumentParser(add_help=False)
-
-    # add args from trainer
-    parser = Trainer.add_argparse_args(parser)
-
-    # give the module a chance to add own params
-    # good practice to define LightningModule speficic params in the module
-    parser = CoolSystem.add_model_specific_args(parser)
-
-    # parse params
-    args = parser.parse_args()
-
-    main(args)
diff --git a/src/research_mnist/README.md b/src/research_mnist/README.md
deleted file mode 100644
index 4f7b00d..0000000
--- a/src/research_mnist/README.md
+++ /dev/null
@@ -1,22 +0,0 @@
-## MNIST    
-In this readme, give instructions on how to run your code.   
-
-#### CPU   
-```bash   
-python mnist_trainer.py     
-```
-
-#### Multiple-GPUs   
-```bash   
-python mnist_trainer.py --gpus 4
-```   
-
-or specific GPUs
-```bash   
-python mnist_trainer.py --gpus '0,3'
-```   
-
-#### On multiple nodes   
-```bash  
-python mnist_trainer.py --gpus 4 --nodes 4  --precision 16
-```   
diff --git a/src/research_mnist/__init__.py b/src/research_mnist/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/simplest_mnist.py b/src/simplest_mnist.py
deleted file mode 100644
index 8ba4f8d..0000000
--- a/src/simplest_mnist.py
+++ /dev/null
@@ -1,56 +0,0 @@
-"""
-This file defines the core research contribution   
-"""
-import os
-import torch
-from torch.nn import functional as F
-from torch.utils.data import DataLoader
-from torchvision.datasets import MNIST
-import torchvision.transforms as transforms
-from argparse import ArgumentParser
-import pytorch_lightning as pl
-
-pl.seed_everything(123)
-
-
-class CoolSystem(pl.LightningModule):
-
-    def __init__(self, hparams):
-        super(CoolSystem, self).__init__()
-        self.hparams = hparams
-        self.l1 = torch.nn.Linear(28 * 28, 10)
-
-    def forward(self, x):
-        return torch.relu(self.l1(x.view(x.size(0), -1)))
-
-    def training_step(self, batch, batch_idx):
-        x, y = batch
-        y_hat = self(x)
-        loss = F.cross_entropy(y_hat, y)
-        tensorboard_logs = {'train_loss': loss}
-        return {'loss': loss, 'log': tensorboard_logs}
-
-    def configure_optimizers(self):
-        return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
-
-
-if __name__ == '__main__':
-    train_data = DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=32)
-
-    parser = ArgumentParser(add_help=False)
-    parser.add_argument('--learning_rate', default=0.02, type=float)
-
-    # add args from trainer
-    parser = pl.Trainer.add_argparse_args(parser)
-
-    # parse params
-    args = parser.parse_args()
-
-    # init module
-    model = CoolSystem(hparams=args)
-
-    # most basic trainer, uses good defaults
-    trainer = pl.Trainer.from_argparse_args(args)
-    trainer.fit(model, train_data)
-
-
-- 
GitLab