diff --git a/HOWTO.md b/HOWTO.md
index 7f538523bbfed18cc8a6cfed2cdd7fe7bfe28fa8..59f6b4a1e751cfd08cd3155bb8a8b5a186f12393 100644
--- a/HOWTO.md
+++ b/HOWTO.md
@@ -86,4 +86,4 @@ trainer.fit(model)
 ### Trainer   
 It's recommended that you have a single trainer per lightning module. However, you can also use a single trainer for all your LightningModules.    
 
-Check out the [MNIST example](https://github.com/williamFalcon/pytorch-lightning-conference-seed/tree/master/research_seed/mnist).  
+Check out the [MNIST example](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/research_seed/mnist).  
diff --git a/README.md b/README.md
index 734a5ad82aa170a1e166d0703010c77ef5abaefd..107ad71acd68461118f0ae88a3183094f3edaf42 100644
--- a/README.md
+++ b/README.md
@@ -3,13 +3,15 @@ Use this seed to refactor your PyTorch research code for:
 - a paper submission  
 - a new research project.     
 
-[Read the usage instructions here](https://github.com/williamFalcon/pytorch-lightning-conference-seed/blob/master/HOWTO.md)
+[Read the usage instructions here](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/blob/master/HOWTO.md)
 
 #### Goals  
 The goal of this seed is to structure ML paper-code the same so that work can easily be extended and replicated.   
 
-###### DELETE EVERYTHING ABOVE FOR YOUR PROJECT   
----   
+### DELETE EVERYTHING ABOVE FOR YOUR PROJECT  
+ 
+---
+
 <div align="center">    
  
 # Your Project Name     
@@ -47,7 +49,7 @@ pip install -r requirements.txt
  Next, navigate to [Your Main Contribution (MNIST here)] and run it.   
  ```bash
 # module folder
-cd src/    
+cd research_mnist/    
 
 # run module (example: mnist as your main contribution)   
 python simplest_mnist.py    
@@ -55,12 +57,11 @@ python simplest_mnist.py
 
 ## Main Contribution      
 List your modules here. Each module contains all code for a full system including how to run instructions.   
-- [Production MNIST](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/src/production_mnist)    
-- [Research MNIST](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/src/research_mnist)  
+- [Research MNIST](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/research_mnist)  
 
 ## Baselines    
 List your baselines here.   
-- [Research MNIST](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/src/research_mnist) 
+- [Research MNIST](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/research_mnist) 
 
 ### Citation   
 ```
diff --git a/research_mnist/README.md b/research_mnist/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..029c0970b8c66b4ca7120485c8722a6256bc7e4c
--- /dev/null
+++ b/research_mnist/README.md
@@ -0,0 +1,35 @@
+## Research Seed Folder   
+Create a folder for each contribution (ie: MNIST, BERT, etc...).   
+Each folder will have:
+
+##### contribution_name_trainer.py    
+Runs your LightningModule. Abstracts training loop, distributed training, etc...   
+
+##### contribution_name.py  
+Holds your main contribution   
+
+## Example  
+The folder here gives an example for mnist.   
+
+### MNIST    
+In this readme, give instructions on how to run your code.   
+
+#### CPU   
+```bash   
+python mnist_trainer.py     
+```
+
+#### Multiple-GPUs   
+```bash   
+python mnist_trainer.py --gpus 4
+```   
+
+or specific GPUs
+```bash   
+python mnist_trainer.py --gpus '0,3'
+```   
+
+#### On multiple nodes   
+```bash  
+python mnist_trainer.py --gpus 4 --nodes 4  --precision 16
+```   
diff --git a/src/__init__.py b/research_mnist/__init__.py
similarity index 100%
rename from src/__init__.py
rename to research_mnist/__init__.py
diff --git a/src/research_mnist/mnist_data_module.py b/research_mnist/mnist_data_module.py
similarity index 100%
rename from src/research_mnist/mnist_data_module.py
rename to research_mnist/mnist_data_module.py
diff --git a/src/research_mnist/mnist_trainer.py b/research_mnist/mnist_trainer.py
similarity index 89%
rename from src/research_mnist/mnist_trainer.py
rename to research_mnist/mnist_trainer.py
index 6e05f059cad4305de31998bfb39f5316379b9ad3..f3caa6b175867d56963363e72f480ae6b6895712 100644
--- a/src/research_mnist/mnist_trainer.py
+++ b/research_mnist/mnist_trainer.py
@@ -3,8 +3,8 @@ This file runs the main training/val loop, etc... using Lightning Trainer
 """
 from pytorch_lightning import Trainer, seed_everything
 from argparse import ArgumentParser
-from src.research_mnist.mnist import CoolSystem
-from src.research_mnist.mnist_data_module import MNISTDataModule
+from research_mnist import CoolSystem
+from research_mnist.mnist_data_module import MNISTDataModule
 
 # sets seeds for numpy, torch, etc...
 # must do for DDP to work well
diff --git a/src/research_mnist/mnist.py b/research_mnist/simplest_mnist.py
similarity index 100%
rename from src/research_mnist/mnist.py
rename to research_mnist/simplest_mnist.py
diff --git a/setup.py b/setup.py
index 7eb01249a3e457117396ac3905c174d3ce42e46a..f224a1678b28c2e8bc3f4b9999877787e68405b9 100644
--- a/setup.py
+++ b/setup.py
@@ -2,12 +2,12 @@
 
 from setuptools import setup, find_packages
 
-setup(name='src',
-      version='0.0.1',
+setup(name='research_mnist',
+      version='0.0.0',
       description='Describe Your Cool Project',
       author='',
       author_email='',
-      url='https://github.com/williamFalcon/pytorch-lightning-conference-seed',  # REPLACE WITH YOUR OWN GITHUB PROJECT LINK
+      url='https://github.com/PyTorchLightning/pytorch-lightning-conference-seed',  # REPLACE WITH YOUR OWN GITHUB PROJECT LINK
       install_requires=[
             'pytorch-lightning'
       ],
diff --git a/src/README.md b/src/README.md
deleted file mode 100644
index d29a9c3c7f528ef6ed10165eb4cdb2751658ac01..0000000000000000000000000000000000000000
--- a/src/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-## Research Seed Folder   
-Create a folder for each contribution (ie: MNIST, BERT, etc...).   
-Each folder will have:
-
-##### contribution_name_trainer.py    
-Runs your LightningModule. Abstracts training loop, distributed training, etc...   
-
-##### contribution_name.py  
-Holds your main contribution   
-
-## Example  
-The folder here gives an example for mnist.   
diff --git a/src/production_mnist/README.md b/src/production_mnist/README.md
deleted file mode 100644
index 12df4d8a85281292b6eb99e9030300c566949944..0000000000000000000000000000000000000000
--- a/src/production_mnist/README.md
+++ /dev/null
@@ -1,24 +0,0 @@
-## MNIST    
-In this readme, give instructions on how to run your code.   
-In this case, we remove the datasets from the lightningModule as you
-might want to use the same model with many datasets
-
-#### CPU   
-```bash   
-python mnist_trainer.py     
-```
-
-#### Multiple-GPUs   
-```bash   
-python mnist_trainer.py --gpus 4
-```   
-
-or specific GPUs
-```bash   
-python mnist_trainer.py --gpus '0,3'
-```   
-
-#### On multiple nodes   
-```bash  
-python mnist_trainer.py --gpus 4 --nodes 4  --precision 16
-```   
diff --git a/src/production_mnist/__init__.py b/src/production_mnist/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/src/production_mnist/mnist.py b/src/production_mnist/mnist.py
deleted file mode 100644
index 609b3bad7e637834d7856b1da000765bdb9a4a1e..0000000000000000000000000000000000000000
--- a/src/production_mnist/mnist.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""
-This file defines the core research contribution   
-"""
-import os
-import torch
-from torch.nn import functional as F
-from argparse import ArgumentParser
-
-import pytorch_lightning as pl
-
-
-class CoolSystem(pl.LightningModule):
-
-    def __init__(self, hparams):
-        super(CoolSystem, self).__init__()
-        # not the best model...
-        self.hparams = hparams
-        self.l1 = torch.nn.Linear(28 * 28, 10)
-
-    def forward(self, x):
-        return torch.relu(self.l1(x.view(x.size(0), -1)))
-
-    def training_step(self, batch, batch_idx):
-        # REQUIRED
-        x, y = batch
-        y_hat = self.forward(x)
-        loss = F.cross_entropy(y_hat, y)
-
-        tensorboard_logs = {'train_loss': loss}
-
-        return {'loss': loss, 'log': tensorboard_logs}
-
-    def validation_step(self, batch, batch_idx):
-        # OPTIONAL
-        x, y = batch
-        y_hat = self.forward(x)
-        return {'val_loss': F.cross_entropy(y_hat, y)}
-
-    def validation_epoch_end(self, outputs):
-        # OPTIONAL
-        avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
-
-        tensorboard_logs = {'avg_val_loss': avg_loss}
-        return {'val_loss': avg_loss, 'log': tensorboard_logs}
-
-    def configure_optimizers(self):
-        # REQUIRED
-        # can return multiple optimizers and learning_rate schedulers
-        return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
-
-
-    @staticmethod
-    def add_model_specific_args(parent_parser):
-        """
-        Specify the hyperparams for this LightningModule
-        """
-        # MODEL specific
-        parser = ArgumentParser(parents=[parent_parser], add_help=False)
-        parser.add_argument('--learning_rate', default=0.02, type=float)
-        parser.add_argument('--batch_size', default=32, type=int)
-
-        # training specific (for this model)
-        parser.add_argument('--max_nb_epochs', default=2, type=int)
-
-        return parser
-
diff --git a/src/production_mnist/mnist_trainer.py b/src/production_mnist/mnist_trainer.py
deleted file mode 100644
index 2dae87bd47b8372afdc0bfaf5d9bb80b72e0c394..0000000000000000000000000000000000000000
--- a/src/production_mnist/mnist_trainer.py
+++ /dev/null
@@ -1,41 +0,0 @@
-"""
-This file runs the main training/val loop, etc... using Lightning Trainer    
-"""
-import os
-from pytorch_lightning import Trainer, seed_everything
-from argparse import ArgumentParser
-from src.production_mnist.mnist import CoolSystem
-from torch.utils.data import DataLoader
-from torchvision.datasets import MNIST
-import torchvision.transforms as transforms
-
-# sets seeds for numpy, torch, etc...
-# must do for DDP to work well
-seed_everything(123)
-
-def main(args):
-    # init module
-    model = CoolSystem(hparams=args)
-
-    train_loader = DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=args.batch_size)
-    val_loader = DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=args.batch_size)
-
-    # makes all flags available to trainer from cli
-    trainer = Trainer.from_argparse_args(args)
-    trainer.fit(model, train_loader, val_loader)
-
-
-if __name__ == '__main__':
-    parser = ArgumentParser(add_help=False)
-
-    # add args from trainer
-    parser = Trainer.add_argparse_args(parser)
-
-    # give the module a chance to add own params
-    # good practice to define LightningModule speficic params in the module
-    parser = CoolSystem.add_model_specific_args(parser)
-
-    # parse params
-    args = parser.parse_args()
-
-    main(args)
diff --git a/src/research_mnist/README.md b/src/research_mnist/README.md
deleted file mode 100644
index 4f7b00df1bbc8bc435e771fb10564eeacd0fb5d5..0000000000000000000000000000000000000000
--- a/src/research_mnist/README.md
+++ /dev/null
@@ -1,22 +0,0 @@
-## MNIST    
-In this readme, give instructions on how to run your code.   
-
-#### CPU   
-```bash   
-python mnist_trainer.py     
-```
-
-#### Multiple-GPUs   
-```bash   
-python mnist_trainer.py --gpus 4
-```   
-
-or specific GPUs
-```bash   
-python mnist_trainer.py --gpus '0,3'
-```   
-
-#### On multiple nodes   
-```bash  
-python mnist_trainer.py --gpus 4 --nodes 4  --precision 16
-```   
diff --git a/src/research_mnist/__init__.py b/src/research_mnist/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/src/simplest_mnist.py b/src/simplest_mnist.py
deleted file mode 100644
index 8ba4f8d875c4827a497c26a58267ede5b0d25472..0000000000000000000000000000000000000000
--- a/src/simplest_mnist.py
+++ /dev/null
@@ -1,56 +0,0 @@
-"""
-This file defines the core research contribution   
-"""
-import os
-import torch
-from torch.nn import functional as F
-from torch.utils.data import DataLoader
-from torchvision.datasets import MNIST
-import torchvision.transforms as transforms
-from argparse import ArgumentParser
-import pytorch_lightning as pl
-
-pl.seed_everything(123)
-
-
-class CoolSystem(pl.LightningModule):
-
-    def __init__(self, hparams):
-        super(CoolSystem, self).__init__()
-        self.hparams = hparams
-        self.l1 = torch.nn.Linear(28 * 28, 10)
-
-    def forward(self, x):
-        return torch.relu(self.l1(x.view(x.size(0), -1)))
-
-    def training_step(self, batch, batch_idx):
-        x, y = batch
-        y_hat = self(x)
-        loss = F.cross_entropy(y_hat, y)
-        tensorboard_logs = {'train_loss': loss}
-        return {'loss': loss, 'log': tensorboard_logs}
-
-    def configure_optimizers(self):
-        return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
-
-
-if __name__ == '__main__':
-    train_data = DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=32)
-
-    parser = ArgumentParser(add_help=False)
-    parser.add_argument('--learning_rate', default=0.02, type=float)
-
-    # add args from trainer
-    parser = pl.Trainer.add_argparse_args(parser)
-
-    # parse params
-    args = parser.parse_args()
-
-    # init module
-    model = CoolSystem(hparams=args)
-
-    # most basic trainer, uses good defaults
-    trainer = pl.Trainer.from_argparse_args(args)
-    trainer.fit(model, train_data)
-
-