diff --git a/.gitignore b/.gitignore
index c7e1583db610bb00fd67605c59f040a5b395d6aa..73c615f495e411a8b0f83d29460d4279dce974d0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -120,3 +120,8 @@ venv.bak/
 
 # IDEs
 .idea
+
+# seed project
+lightning_logs/
+MNIST
+.DS_Store
diff --git a/README.md b/README.md
index f0466bbd2ba11dc92c8bc880fd3f6b0c004a46ca..fe7bf4977dd18264db595b49789a05aae058e406 100644
--- a/README.md
+++ b/README.md
@@ -47,19 +47,20 @@ pip install -r requirements.txt
  Next, navigate to [Your Main Contribution (MNIST here)] and run it.   
  ```bash
 # module folder
-cd research_seed/mnist/   
+cd src/    
 
 # run module (example: mnist as your main contribution)   
-python mnist_trainer.py    
+python simplest_mnist.py    
 ```
 
 ## Main Contribution      
 List your modules here. Each module contains all code for a full system including how to run instructions.   
-- [MNIST](https://github.com/williamFalcon/pytorch-lightning-conference-seed/tree/master/research_seed/mnist)  
+- [Production MNIST](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/src/produtcion_mnist)    
+- [Research MNIST](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/src/research_mnist)  
 
 ## Baselines    
 List your baselines here.   
-- [MNIST_baseline](https://github.com/williamFalcon/pytorch-lightning-conference-seed/tree/master/research_seed/baselines/mnist_baseline)  
+- [Research MNIST](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/src/research_mnist) 
 
 ### Citation   
 ```
diff --git a/src/baselines/README.md b/src/baselines/README.md
deleted file mode 100644
index 89a1e9a4049edf23f49415f83f9c94baf0254981..0000000000000000000000000000000000000000
--- a/src/baselines/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Baselines    
-Set up a folder for each baseline. Each baseline needs a trainer and LightningModule   
-
diff --git a/src/baselines/__init__.py b/src/baselines/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/src/baselines/mnist_baseline/README.md b/src/baselines/mnist_baseline/README.md
deleted file mode 100644
index 3945ca712e2957ca82258411870c0133e7407616..0000000000000000000000000000000000000000
--- a/src/baselines/mnist_baseline/README.md
+++ /dev/null
@@ -1,17 +0,0 @@
-## MNIST Baseline    
-In this readme, give instructions on how to run your code.   
-
-#### CPU   
-```bash   
-python mnist_baseline_trainer.py     
-```
-
-#### Multiple-GPUs   
-```bash   
-python mnist_baseline_trainer.py --gpus '0,1,2,3'  
-```   
-
-#### On multiple nodes   
-```bash  
-python mnist_baseline_trainer.py --gpus '0,1,2,3' --nodes 4  
-```   
diff --git a/src/baselines/mnist_baseline/__init__.py b/src/baselines/mnist_baseline/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/src/baselines/mnist_baseline/mnist_baseline.py b/src/baselines/mnist_baseline/mnist_baseline.py
deleted file mode 100644
index e1d54bda640669e01b026046b3b8f00fbf12a0d9..0000000000000000000000000000000000000000
--- a/src/baselines/mnist_baseline/mnist_baseline.py
+++ /dev/null
@@ -1,74 +0,0 @@
-"""
-This file defines the core research contribution   
-"""
-import os
-import torch
-from torch.nn import functional as F
-from torch.utils.data import DataLoader
-from torchvision.datasets import MNIST
-import torchvision.transforms as transforms
-from argparse import ArgumentParser
-
-import pytorch_lightning as pl
-
-
-class CoolSystem(pl.LightningModule):
-
-    def __init__(self, hparams):
-        super(CoolSystem, self).__init__()
-        # not the best model...
-        self.hparams = hparams
-        self.l1 = torch.nn.Linear(28 * 28, 10)
-
-    def forward(self, x):
-        return torch.relu(self.l1(x.view(x.size(0), -1)))
-
-    def training_step(self, batch, batch_idx):
-        # REQUIRED
-        x, y = batch
-        y_hat = self.forward(x)
-        return {'loss': F.cross_entropy(y_hat, y)}
-
-    def validation_step(self, batch, batch_idx):
-        # OPTIONAL
-        x, y = batch
-        y_hat = self.forward(x)
-        return {'val_loss': F.cross_entropy(y_hat, y)}
-
-    def validation_end(self, outputs):
-        # OPTIONAL
-        avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
-        return {'avg_val_loss': avg_loss}
-
-    def configure_optimizers(self):
-        # REQUIRED
-        # can return multiple optimizers and learning_rate schedulers
-        return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
-
-    def train_dataloader(self):
-        # REQUIRED
-        return DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=self.hparams.batch_size)
-
-    def val_dataloader(self):
-        # OPTIONAL
-        return DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=self.hparams.batch_size)
-
-    def test_dataloader(self):
-        # OPTIONAL
-        return DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=self.hparams.batch_size)
-
-    @staticmethod
-    def add_model_specific_args(parent_parser):
-        """
-        Specify the hyperparams for this LightningModule
-        """
-        # MODEL specific
-        parser = ArgumentParser(parents=[parent_parser])
-        parser.add_argument('--learning_rate', default=0.02, type=float)
-        parser.add_argument('--batch_size', default=32, type=int)
-
-        # training specific (for this model)
-        parser.add_argument('--max_nb_epochs', default=2, type=int)
-
-        return parser
-
diff --git a/src/baselines/mnist_baseline/mnist_baseline_trainer.py b/src/baselines/mnist_baseline/mnist_baseline_trainer.py
deleted file mode 100644
index 152ae4d1caaf6c7bee5fc6e496baa17917518290..0000000000000000000000000000000000000000
--- a/src/baselines/mnist_baseline/mnist_baseline_trainer.py
+++ /dev/null
@@ -1,34 +0,0 @@
-"""
-This file runs the main training/val loop, etc... using Lightning Trainer    
-"""
-from pytorch_lightning import Trainer
-from argparse import ArgumentParser
-from research_seed.mnist.mnist import CoolSystem
-
-
-def main(hparams):
-    # init module
-    model = CoolSystem(hparams)
-
-    # most basic trainer, uses good defaults
-    trainer = Trainer(
-        max_nb_epochs=hparams.max_nb_epochs,
-        gpus=hparams.gpus,
-        nb_gpu_nodes=hparams.nodes,
-    )
-    trainer.fit(model)
-
-
-if __name__ == '__main__':
-    parser = ArgumentParser(add_help=False)
-    parser.add_argument('--gpus', type=str, default=None)
-    parser.add_argument('--nodes', type=int, default=1)
-
-    # give the module a chance to add own params
-    # good practice to define LightningModule speficic params in the module
-    parser = CoolSystem.add_model_specific_args(parser)
-
-    # parse params
-    hparams = parser.parse_args()
-
-    main(hparams)
diff --git a/src/research_mnist/mnist.py b/src/research_mnist/mnist.py
index 00ac111aae204b64dbb7d04800c0afa6b55e69e2..d441101d3af99b6910c5965ae2f8abd1faadba9c 100644
--- a/src/research_mnist/mnist.py
+++ b/src/research_mnist/mnist.py
@@ -46,6 +46,19 @@ class CoolSystem(pl.LightningModule):
         tensorboard_logs = {'avg_val_loss': avg_loss}
         return {'val_loss': avg_loss, 'log': tensorboard_logs}
 
+    def test_step(self, batch, batch_idx):
+        # OPTIONAL
+        x, y = batch
+        y_hat = self.forward(x)
+        return {'test_loss': F.cross_entropy(y_hat, y)}
+
+    def test_epoch_end(self, outputs):
+        # OPTIONAL
+        avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean()
+
+        tensorboard_logs = {'test_val_loss': avg_loss}
+        return {'test_loss': avg_loss, 'log': tensorboard_logs}
+
     def configure_optimizers(self):
         # REQUIRED
         # can return multiple optimizers and learning_rate schedulers
diff --git a/src/research_mnist/mnist_trainer.py b/src/research_mnist/mnist_trainer.py
index ac2719b140fb105711162d6a7371df03b365a2c7..74396b93af5ba4ca8f3d0c0b2ccc14c6420ddaa8 100644
--- a/src/research_mnist/mnist_trainer.py
+++ b/src/research_mnist/mnist_trainer.py
@@ -3,7 +3,7 @@ This file runs the main training/val loop, etc... using Lightning Trainer
 """
 from pytorch_lightning import Trainer, seed_everything
 from argparse import ArgumentParser
-from srv.mnist.mnist import CoolSystem
+from src.research_mnist.mnist import CoolSystem
 
 # sets seeds for numpy, torch, etc...
 # must do for DDP to work well
@@ -17,6 +17,8 @@ def main(args):
     trainer = Trainer.from_argparse_args(args)
     trainer.fit(model)
 
+    trainer.test()
+
 
 if __name__ == '__main__':
     parser = ArgumentParser(add_help=False)
diff --git a/src/simplest_mnist.py b/src/simplest_mnist.py
new file mode 100644
index 0000000000000000000000000000000000000000..34becdf2e4cb9fbe87b24d3a9e5cd5552b564bbf
--- /dev/null
+++ b/src/simplest_mnist.py
@@ -0,0 +1,58 @@
+"""
+This file defines the core research contribution   
+"""
+import os
+import torch
+from torch.nn import functional as F
+from torch.utils.data import DataLoader
+from torchvision.datasets import MNIST
+import torchvision.transforms as transforms
+from argparse import ArgumentParser
+import pytorch_lightning as pl
+
+pl.seed_everything(123)
+
+
+class CoolSystem(pl.LightningModule):
+
+    def __init__(self, hparams):
+        super(CoolSystem, self).__init__()
+        self.hparams = hparams
+        self.l1 = torch.nn.Linear(28 * 28, 10)
+
+    def forward(self, x):
+        return torch.relu(self.l1(x.view(x.size(0), -1)))
+
+    def training_step(self, batch, batch_idx):
+        x, y = batch
+        y_hat = self(x)
+        loss = F.cross_entropy(y_hat, y)
+        tensorboard_logs = {'train_loss': loss}
+        return {'loss': loss, 'log': tensorboard_logs}
+
+    def configure_optimizers(self):
+        # REQUIRED
+        # can return multiple optimizers and learning_rate schedulers
+        return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
+
+
+if __name__ == '__main__':
+    train_data = DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=32)
+
+    parser = ArgumentParser(add_help=False)
+    parser.add_argument('--learning_rate', default=0.02, type=float)
+
+    # add args from trainer
+    parser = pl.Trainer.add_argparse_args(parser)
+
+    # parse params
+    args = parser.parse_args()
+
+    # init module
+    model = CoolSystem(hparams=args)
+
+    # most basic trainer, uses good defaults
+    trainer = pl.Trainer.from_argparse_args(args)
+    trainer.fit(model, train_data)
+
+
diff --git a/src/single_file_mnist/mnist.py b/src/single_file_mnist/mnist.py
deleted file mode 100644
index 07d77f2660508ec8ee156ecceb6276af12e22df9..0000000000000000000000000000000000000000
--- a/src/single_file_mnist/mnist.py
+++ /dev/null
@@ -1,104 +0,0 @@
-"""
-This file defines the core research contribution   
-"""
-import os
-import torch
-from torch.nn import functional as F
-from torch.utils.data import DataLoader
-from torchvision.datasets import MNIST
-import torchvision.transforms as transforms
-from argparse import ArgumentParser
-
-import pytorch_lightning as pl
-
-pl.seed_everything(123)
-
-
-class CoolSystem(pl.LightningModule):
-
-    def __init__(self, hparams):
-        super(CoolSystem, self).__init__()
-        # not the best model...
-        self.hparams = hparams
-        self.l1 = torch.nn.Linear(28 * 28, 10)
-
-    def forward(self, x):
-        return torch.relu(self.l1(x.view(x.size(0), -1)))
-
-    def training_step(self, batch, batch_idx):
-        # REQUIRED
-        x, y = batch
-        y_hat = self.forward(x)
-        loss = F.cross_entropy(y_hat, y)
-
-        tensorboard_logs = {'train_loss': loss}
-
-        return {'loss': loss, 'log': tensorboard_logs}
-
-    def validation_step(self, batch, batch_idx):
-        # OPTIONAL
-        x, y = batch
-        y_hat = self.forward(x)
-        return {'val_loss': F.cross_entropy(y_hat, y)}
-
-    def validation_epoch_end(self, outputs):
-        # OPTIONAL
-        avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
-
-        tensorboard_logs = {'avg_val_loss': avg_loss}
-        return {'val_loss': avg_loss, 'log': tensorboard_logs}
-
-    def configure_optimizers(self):
-        # REQUIRED
-        # can return multiple optimizers and learning_rate schedulers
-        return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
-
-    def train_dataloader(self):
-        # REQUIRED
-        return DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=self.hparams.batch_size)
-
-    def val_dataloader(self):
-        # OPTIONAL
-        return DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=self.hparams.batch_size)
-
-    def test_dataloader(self):
-        # OPTIONAL
-        return DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=self.hparams.batch_size)
-
-    @staticmethod
-    def add_model_specific_args(parent_parser):
-        """
-        Specify the hyperparams for this LightningModule
-        """
-        # MODEL specific
-        parser = ArgumentParser(parents=[parent_parser], add_help=False)
-        parser.add_argument('--learning_rate', default=0.02, type=float)
-        parser.add_argument('--batch_size', default=32, type=int)
-
-        # training specific (for this model)
-        parser.add_argument('--max_nb_epochs', default=2, type=int)
-
-        return parser
-
-
-if __name__ == '__main__':
-    parser = ArgumentParser(add_help=False)
-
-    # add args from trainer
-    parser = Trainer.add_argparse_args(parser)
-
-    # give the module a chance to add own params
-    # good practice to define LightningModule speficic params in the module
-    parser = CoolSystem.add_model_specific_args(parser)
-
-    # parse params
-    args = parser.parse_args()
-
-    # init module
-    model = CoolSystem(hparams=args)
-
-    # most basic trainer, uses good defaults
-    trainer = pl.Trainer.from_argparse_args(args)
-    trainer.fit(model)
-
-
diff --git a/src/single_file_mnist/mnist_trainer.py b/src/single_file_mnist/simplest_mnist.py
similarity index 100%
rename from src/single_file_mnist/mnist_trainer.py
rename to src/single_file_mnist/simplest_mnist.py