diff --git a/requirements.txt b/requirements.txt
index 8ba9e64c715529ef1c1bddb9263954f2a1104e35..299248718de256390ef419166ee3f51be197d3c0 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1 +1 @@
-pytorch-lightning
\ No newline at end of file
+pytorch-lightning >= 0.7.5
\ No newline at end of file
diff --git a/setup.py b/setup.py
index cc30dd48b3872f271836879e8e19a18c8f1c922e..7eb01249a3e457117396ac3905c174d3ce42e46a 100644
--- a/setup.py
+++ b/setup.py
@@ -2,7 +2,7 @@
 
 from setuptools import setup, find_packages
 
-setup(name='research_seed',
+setup(name='src',
       version='0.0.1',
       description='Describe Your Cool Project',
       author='',
diff --git a/research_seed/README.md b/src/README.md
similarity index 100%
rename from research_seed/README.md
rename to src/README.md
diff --git a/research_seed/__init__.py b/src/__init__.py
similarity index 100%
rename from research_seed/__init__.py
rename to src/__init__.py
diff --git a/research_seed/baselines/README.md b/src/baselines/README.md
similarity index 100%
rename from research_seed/baselines/README.md
rename to src/baselines/README.md
diff --git a/research_seed/baselines/__init__.py b/src/baselines/__init__.py
similarity index 100%
rename from research_seed/baselines/__init__.py
rename to src/baselines/__init__.py
diff --git a/research_seed/baselines/mnist_baseline/README.md b/src/baselines/mnist_baseline/README.md
similarity index 100%
rename from research_seed/baselines/mnist_baseline/README.md
rename to src/baselines/mnist_baseline/README.md
diff --git a/research_seed/baselines/mnist_baseline/__init__.py b/src/baselines/mnist_baseline/__init__.py
similarity index 100%
rename from research_seed/baselines/mnist_baseline/__init__.py
rename to src/baselines/mnist_baseline/__init__.py
diff --git a/research_seed/baselines/mnist_baseline/mnist_baseline.py b/src/baselines/mnist_baseline/mnist_baseline.py
similarity index 100%
rename from research_seed/baselines/mnist_baseline/mnist_baseline.py
rename to src/baselines/mnist_baseline/mnist_baseline.py
diff --git a/research_seed/baselines/mnist_baseline/mnist_baseline_trainer.py b/src/baselines/mnist_baseline/mnist_baseline_trainer.py
similarity index 100%
rename from research_seed/baselines/mnist_baseline/mnist_baseline_trainer.py
rename to src/baselines/mnist_baseline/mnist_baseline_trainer.py
diff --git a/src/production_mnist/README.md b/src/production_mnist/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..12df4d8a85281292b6eb99e9030300c566949944
--- /dev/null
+++ b/src/production_mnist/README.md
@@ -0,0 +1,24 @@
+## MNIST    
+In this readme, give instructions on how to run your code.   
+In this case, we remove the datasets from the lightningModule as you
+might want to use the same model with many datasets
+
+#### CPU   
+```bash   
+python mnist_trainer.py     
+```
+
+#### Multiple-GPUs   
+```bash   
+python mnist_trainer.py --gpus 4
+```   
+
+or specific GPUs
+```bash   
+python mnist_trainer.py --gpus '0,3'
+```   
+
+#### On multiple nodes   
+```bash  
+python mnist_trainer.py --gpus 4 --nodes 4  --precision 16
+```   
diff --git a/research_seed/mnist/__init__.py b/src/production_mnist/__init__.py
similarity index 100%
rename from research_seed/mnist/__init__.py
rename to src/production_mnist/__init__.py
diff --git a/src/production_mnist/mnist.py b/src/production_mnist/mnist.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e2b13575c6ad20d8e7f237a7617d6a558c1637e
--- /dev/null
+++ b/src/production_mnist/mnist.py
@@ -0,0 +1,69 @@
+"""
+This file defines the core research contribution   
+"""
+import os
+import torch
+from torch.nn import functional as F
+from torch.utils.data import DataLoader
+from torchvision.datasets import MNIST
+import torchvision.transforms as transforms
+from argparse import ArgumentParser
+
+import pytorch_lightning as pl
+
+
+class CoolSystem(pl.LightningModule):
+
+    def __init__(self, hparams):
+        super(CoolSystem, self).__init__()
+        # not the best model...
+        self.hparams = hparams
+        self.l1 = torch.nn.Linear(28 * 28, 10)
+
+    def forward(self, x):
+        return torch.relu(self.l1(x.view(x.size(0), -1)))
+
+    def training_step(self, batch, batch_idx):
+        # REQUIRED
+        x, y = batch
+        y_hat = self.forward(x)
+        loss = F.cross_entropy(y_hat, y)
+
+        tensorboard_logs = {'train_loss': loss}
+
+        return {'loss': loss, 'log': tensorboard_logs}
+
+    def validation_step(self, batch, batch_idx):
+        # OPTIONAL
+        x, y = batch
+        y_hat = self.forward(x)
+        return {'val_loss': F.cross_entropy(y_hat, y)}
+
+    def validation_epoch_end(self, outputs):
+        # OPTIONAL
+        avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
+
+        tensorboard_logs = {'avg_val_loss': avg_loss}
+        return {'val_loss': avg_loss, 'log': tensorboard_logs}
+
+    def configure_optimizers(self):
+        # REQUIRED
+        # can return multiple optimizers and learning_rate schedulers
+        return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
+
+
+    @staticmethod
+    def add_model_specific_args(parent_parser):
+        """
+        Specify the hyperparams for this LightningModule
+        """
+        # MODEL specific
+        parser = ArgumentParser(parents=[parent_parser], add_help=False)
+        parser.add_argument('--learning_rate', default=0.02, type=float)
+        parser.add_argument('--batch_size', default=32, type=int)
+
+        # training specific (for this model)
+        parser.add_argument('--max_nb_epochs', default=2, type=int)
+
+        return parser
+
diff --git a/src/production_mnist/mnist_trainer.py b/src/production_mnist/mnist_trainer.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd3e376034f38d4f79f2a578be971430ed91dfd4
--- /dev/null
+++ b/src/production_mnist/mnist_trainer.py
@@ -0,0 +1,37 @@
+"""
+This file runs the main training/val loop, etc... using Lightning Trainer    
+"""
+from pytorch_lightning import Trainer
+from argparse import ArgumentParser
+from srv.mnist.mnist import CoolSystem
+
+# sets seeds for numpy, torch, etc...
+# must do for DDP to work well
+seed_everything(123)
+
+def main(args):
+    # init module
+    model = CoolSystem(hparams=args)
+
+    train_loader = DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=self.hparams.batch_size)
+    val_loader = DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=self.hparams.batch_size)
+
+    # makes all flags available to trainer from cli
+    trainer = Trainer.from_argparse_args(args)
+    trainer.fit(model, train_loader, val_loader)
+
+
+if __name__ == '__main__':
+    parser = ArgumentParser(add_help=False)
+
+    # add args from trainer
+    parser = Trainer.add_argparse_args(parser)
+
+    # give the module a chance to add own params
+    # good practice to define LightningModule speficic params in the module
+    parser = CoolSystem.add_model_specific_args(parser)
+
+    # parse params
+    args = parser.parse_args()
+
+    main(args)
diff --git a/research_seed/mnist/README.md b/src/research_mnist/README.md
similarity index 56%
rename from research_seed/mnist/README.md
rename to src/research_mnist/README.md
index ebaaeeeeb92e2b269c274ecf27aec3d4bb9dd2d4..4f7b00df1bbc8bc435e771fb10564eeacd0fb5d5 100644
--- a/research_seed/mnist/README.md
+++ b/src/research_mnist/README.md
@@ -8,10 +8,15 @@ python mnist_trainer.py
 
 #### Multiple-GPUs   
 ```bash   
-python mnist_trainer.py --gpus '0,1,2,3'  
+python mnist_trainer.py --gpus 4
+```   
+
+or specific GPUs
+```bash   
+python mnist_trainer.py --gpus '0,3'
 ```   
 
 #### On multiple nodes   
 ```bash  
-python mnist_trainer.py --gpus '0,1,2,3' --nodes 4  
+python mnist_trainer.py --gpus 4 --nodes 4  --precision 16
 ```   
diff --git a/src/research_mnist/__init__.py b/src/research_mnist/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/research_seed/mnist/mnist.py b/src/research_mnist/mnist.py
similarity index 85%
rename from research_seed/mnist/mnist.py
rename to src/research_mnist/mnist.py
index e1d54bda640669e01b026046b3b8f00fbf12a0d9..00ac111aae204b64dbb7d04800c0afa6b55e69e2 100644
--- a/research_seed/mnist/mnist.py
+++ b/src/research_mnist/mnist.py
@@ -27,7 +27,11 @@ class CoolSystem(pl.LightningModule):
         # REQUIRED
         x, y = batch
         y_hat = self.forward(x)
-        return {'loss': F.cross_entropy(y_hat, y)}
+        loss = F.cross_entropy(y_hat, y)
+
+        tensorboard_logs = {'train_loss': loss}
+
+        return {'loss': loss, 'log': tensorboard_logs}
 
     def validation_step(self, batch, batch_idx):
         # OPTIONAL
@@ -35,10 +39,12 @@ class CoolSystem(pl.LightningModule):
         y_hat = self.forward(x)
         return {'val_loss': F.cross_entropy(y_hat, y)}
 
-    def validation_end(self, outputs):
+    def validation_epoch_end(self, outputs):
         # OPTIONAL
         avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
-        return {'avg_val_loss': avg_loss}
+
+        tensorboard_logs = {'avg_val_loss': avg_loss}
+        return {'val_loss': avg_loss, 'log': tensorboard_logs}
 
     def configure_optimizers(self):
         # REQUIRED
@@ -63,7 +69,7 @@ class CoolSystem(pl.LightningModule):
         Specify the hyperparams for this LightningModule
         """
         # MODEL specific
-        parser = ArgumentParser(parents=[parent_parser])
+        parser = ArgumentParser(parents=[parent_parser], add_help=False)
         parser.add_argument('--learning_rate', default=0.02, type=float)
         parser.add_argument('--batch_size', default=32, type=int)
 
diff --git a/research_seed/mnist/mnist_trainer.py b/src/research_mnist/mnist_trainer.py
similarity index 52%
rename from research_seed/mnist/mnist_trainer.py
rename to src/research_mnist/mnist_trainer.py
index 152ae4d1caaf6c7bee5fc6e496baa17917518290..ac2719b140fb105711162d6a7371df03b365a2c7 100644
--- a/research_seed/mnist/mnist_trainer.py
+++ b/src/research_mnist/mnist_trainer.py
@@ -1,34 +1,34 @@
 """
 This file runs the main training/val loop, etc... using Lightning Trainer    
 """
-from pytorch_lightning import Trainer
+from pytorch_lightning import Trainer, seed_everything
 from argparse import ArgumentParser
-from research_seed.mnist.mnist import CoolSystem
+from srv.mnist.mnist import CoolSystem
 
+# sets seeds for numpy, torch, etc...
+# must do for DDP to work well
+seed_everything(123)
 
-def main(hparams):
+def main(args):
     # init module
-    model = CoolSystem(hparams)
+    model = CoolSystem(hparams=args)
 
     # most basic trainer, uses good defaults
-    trainer = Trainer(
-        max_nb_epochs=hparams.max_nb_epochs,
-        gpus=hparams.gpus,
-        nb_gpu_nodes=hparams.nodes,
-    )
+    trainer = Trainer.from_argparse_args(args)
     trainer.fit(model)
 
 
 if __name__ == '__main__':
     parser = ArgumentParser(add_help=False)
-    parser.add_argument('--gpus', type=str, default=None)
-    parser.add_argument('--nodes', type=int, default=1)
+
+    # add args from trainer
+    parser = Trainer.add_argparse_args(parser)
 
     # give the module a chance to add own params
     # good practice to define LightningModule speficic params in the module
     parser = CoolSystem.add_model_specific_args(parser)
 
     # parse params
-    hparams = parser.parse_args()
+    args = parser.parse_args()
 
-    main(hparams)
+    main(args)
diff --git a/src/single_file_mnist/README.md b/src/single_file_mnist/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..4f7b00df1bbc8bc435e771fb10564eeacd0fb5d5
--- /dev/null
+++ b/src/single_file_mnist/README.md
@@ -0,0 +1,22 @@
+## MNIST    
+In this readme, give instructions on how to run your code.   
+
+#### CPU   
+```bash   
+python mnist_trainer.py     
+```
+
+#### Multiple-GPUs   
+```bash   
+python mnist_trainer.py --gpus 4
+```   
+
+or specific GPUs
+```bash   
+python mnist_trainer.py --gpus '0,3'
+```   
+
+#### On multiple nodes   
+```bash  
+python mnist_trainer.py --gpus 4 --nodes 4  --precision 16
+```   
diff --git a/src/single_file_mnist/__init__.py b/src/single_file_mnist/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/single_file_mnist/mnist.py b/src/single_file_mnist/mnist.py
new file mode 100644
index 0000000000000000000000000000000000000000..07d77f2660508ec8ee156ecceb6276af12e22df9
--- /dev/null
+++ b/src/single_file_mnist/mnist.py
@@ -0,0 +1,104 @@
+"""
+This file defines the core research contribution   
+"""
+import os
+import torch
+from torch.nn import functional as F
+from torch.utils.data import DataLoader
+from torchvision.datasets import MNIST
+import torchvision.transforms as transforms
+from argparse import ArgumentParser
+
+import pytorch_lightning as pl
+
+pl.seed_everything(123)
+
+
+class CoolSystem(pl.LightningModule):
+
+    def __init__(self, hparams):
+        super(CoolSystem, self).__init__()
+        # not the best model...
+        self.hparams = hparams
+        self.l1 = torch.nn.Linear(28 * 28, 10)
+
+    def forward(self, x):
+        return torch.relu(self.l1(x.view(x.size(0), -1)))
+
+    def training_step(self, batch, batch_idx):
+        # REQUIRED
+        x, y = batch
+        y_hat = self.forward(x)
+        loss = F.cross_entropy(y_hat, y)
+
+        tensorboard_logs = {'train_loss': loss}
+
+        return {'loss': loss, 'log': tensorboard_logs}
+
+    def validation_step(self, batch, batch_idx):
+        # OPTIONAL
+        x, y = batch
+        y_hat = self.forward(x)
+        return {'val_loss': F.cross_entropy(y_hat, y)}
+
+    def validation_epoch_end(self, outputs):
+        # OPTIONAL
+        avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
+
+        tensorboard_logs = {'avg_val_loss': avg_loss}
+        return {'val_loss': avg_loss, 'log': tensorboard_logs}
+
+    def configure_optimizers(self):
+        # REQUIRED
+        # can return multiple optimizers and learning_rate schedulers
+        return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
+
+    def train_dataloader(self):
+        # REQUIRED
+        return DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=self.hparams.batch_size)
+
+    def val_dataloader(self):
+        # OPTIONAL
+        return DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=self.hparams.batch_size)
+
+    def test_dataloader(self):
+        # OPTIONAL
+        return DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=self.hparams.batch_size)
+
+    @staticmethod
+    def add_model_specific_args(parent_parser):
+        """
+        Specify the hyperparams for this LightningModule
+        """
+        # MODEL specific
+        parser = ArgumentParser(parents=[parent_parser], add_help=False)
+        parser.add_argument('--learning_rate', default=0.02, type=float)
+        parser.add_argument('--batch_size', default=32, type=int)
+
+        # training specific (for this model)
+        parser.add_argument('--max_nb_epochs', default=2, type=int)
+
+        return parser
+
+
+if __name__ == '__main__':
+    parser = ArgumentParser(add_help=False)
+
+    # add args from trainer
+    parser = Trainer.add_argparse_args(parser)
+
+    # give the module a chance to add own params
+    # good practice to define LightningModule speficic params in the module
+    parser = CoolSystem.add_model_specific_args(parser)
+
+    # parse params
+    args = parser.parse_args()
+
+    # init module
+    model = CoolSystem(hparams=args)
+
+    # most basic trainer, uses good defaults
+    trainer = pl.Trainer.from_argparse_args(args)
+    trainer.fit(model)
+
+
diff --git a/src/single_file_mnist/mnist_trainer.py b/src/single_file_mnist/mnist_trainer.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac2719b140fb105711162d6a7371df03b365a2c7
--- /dev/null
+++ b/src/single_file_mnist/mnist_trainer.py
@@ -0,0 +1,34 @@
+"""
+This file runs the main training/val loop, etc... using Lightning Trainer    
+"""
+from pytorch_lightning import Trainer, seed_everything
+from argparse import ArgumentParser
+from srv.mnist.mnist import CoolSystem
+
+# sets seeds for numpy, torch, etc...
+# must do for DDP to work well
+seed_everything(123)
+
+def main(args):
+    # init module
+    model = CoolSystem(hparams=args)
+
+    # most basic trainer, uses good defaults
+    trainer = Trainer.from_argparse_args(args)
+    trainer.fit(model)
+
+
+if __name__ == '__main__':
+    parser = ArgumentParser(add_help=False)
+
+    # add args from trainer
+    parser = Trainer.add_argparse_args(parser)
+
+    # give the module a chance to add own params
+    # good practice to define LightningModule speficic params in the module
+    parser = CoolSystem.add_model_specific_args(parser)
+
+    # parse params
+    args = parser.parse_args()
+
+    main(args)