diff --git a/AdditionalPDFs.py b/AdditionalPDFs.py
new file mode 100644
index 0000000000000000000000000000000000000000..2934d4c22f3f5e9b9d77c92d44216b9c2be0f08b
--- /dev/null
+++ b/AdditionalPDFs.py
@@ -0,0 +1,127 @@
+import numpy as np
+from scipy.stats import rv_discrete, rv_continuous, uniform
+import scipy.special as sc
+import matplotlib.pyplot as plt
+
+
+from scipy.stats._distn_infrastructure import (
+        rv_discrete, _ncx2_pdf, _ncx2_cdf, get_distribution_names)
+
+
+class gpd_gen(rv_discrete):
+    def _argcheck(self, mu, lbda):
+        return mu >= 0.0 and lbda >= 0.0 and lbda <= 1.0
+
+    def _rvs(self, mu, lbda):
+        population = np.asarray(
+            self._random_state.poisson(mu, self._size)
+        )
+        if population.shape == ():
+            population = population.reshape(-1)
+        offspring = population.copy()
+        while np.any(offspring > 0):
+            # probability dists are NOT ufuncs
+            # print("offspring", offspring)
+            offspring[:] = [
+                self._random_state.poisson(m)
+                for m in lbda*offspring
+            ]
+            population += offspring
+        return population
+
+    def _pmf(self, k, mu, lbda):
+        return np.exp(self._logpmf(k, mu, lbda))
+
+    def _logpmf(self, k, mu, lbda):
+        mu_pls_klmb = mu + lbda*k
+        return np.log(mu) + sc.xlogy(k-1, mu_pls_klmb) - mu_pls_klmb - sc.gammaln(k+1)
+
+    def _munp(self, n, mu, lbda):
+        if n == 1:
+            return mu/(1-lbda)
+        elif n == 2:
+            return (mu/(1-lbda))**2+mu/(1-lbda)**3
+
+
+gpoisson = gpd_gen(name='gpoisson')
+
+    
+class borel_gen(rv_discrete):
+    def _argcheck(self, mu):
+        return ((mu > 0) & (mu<1))
+
+    def _logpmf(self, k, mu):
+        n = k+1
+        Pk = sc.xlogy(n-1, mu*n) - sc.gammaln(n + 1) - mu*n
+        return Pk
+
+    def _pmf(self, k, mu):
+        return np.exp(self._logpmf(k, mu))
+
+#     def _rvs(self, mu, size=None, random_state=None):
+#         u = uniform.rvs(loc=0, scale = 1, size=size)
+#         cum = np.cumsum([self._pmf(_k, mu) for _k in range(0, 100)])
+#         print(cum)
+#         rnd = [ np.argmax( cum>=_u ) for _u in u ]
+#         return rnd
+    
+    def _rvs(self, mu, size=None, random_state=None, epsilon=1e-4):
+        _u = uniform.rvs(loc=0, scale = 1-epsilon, size=size)
+        _sum = 0
+        _k=0
+        _elem = []
+        _max_u = np.max(_u)
+        
+        while(_sum<_max_u):
+            _pmf = self._pmf(_k, mu)
+            _elem.append(_pmf)
+            _sum+=_pmf
+            _k+=1
+            
+        _cum = np.cumsum(_elem)
+        _rnd = [ np.argmax( _cum>=__u ) for __u in _u ]
+        
+        return _rnd
+
+
+    def _stats(self, mu):
+        _mu = 1/(1-mu)
+        _var = mu/(1-mu)**3
+        tmp = np.asarray(mu)
+        mu_nonzero = ((tmp > 0) & (tmp<1))
+        #g1 and g2: Lagrangian Probability Distributions, 978-0-8176-4365-2, page 159
+        g1 = scipy._lib._util._lazywhere(mu_nonzero, (tmp,), lambda x: (1+2*x)/scipy.sqrt(x*(1-x)), np.inf)
+        g2 = scipy._lib._util._lazywhere(mu_nonzero, (tmp,), lambda x: 3 + (1 + 8*x+6*x**2)/(x*(1-x)), np.inf)
+        return _mu, _var, g1, g2
+
+
+borel= borel_gen(name='borel')
+    
+  
+    
+class erlang_gen(rv_discrete):
+    
+    
+    
+    def _pdf(self, x, a):
+        # gamma.pdf(x, a) = x**(a-1) * exp(-x) / gamma(a)
+        return np.exp(self._logpdf(x, a))
+
+    def _logpdf(self, k, mu, nu):
+        return sc.xlogy(a-1.0, x) - x - sc.gammaln(a)
+
+   
+    
+    
+  
+
+#     def _rvs(self, mu, nu, size=None, random_state=None):
+#         u = scipy.stats.uniform.rvs(loc=0, scale = 1, size=size)
+#         cum = np.cumsum([self._pmf(_k, mu, nu) for _k in range(0, 100)])
+#         rnd = [ np.argmax( cum>=_u ) for _u in u ]
+#         return rnd
+
+pairs = list(globals().items())
+_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
+
+__all__ = _distn_names + _distn_gen_names
diff --git a/Example.png b/Example.png
new file mode 100644
index 0000000000000000000000000000000000000000..f6e165d7f9a83aeb4e4875c1243cb9c9d6fc1f98
Binary files /dev/null and b/Example.png differ
diff --git a/Example.py b/Example.py
new file mode 100644
index 0000000000000000000000000000000000000000..36621728347ebff3459d791b1c1fe14cb9c700ba
--- /dev/null
+++ b/Example.py
@@ -0,0 +1,93 @@
+from LightSimtastic import SiPMSimulation
+import matplotlib.pyplot as plt
+
+
+#################################
+# THE SIMULATION TOOL TAKES A DICTIONARY AS AN INPUT, WITH EACH OF THE VARIABLES AS KEYS.
+# FOR EACH ELEMENT YOU ADD TO THE LISTS, ANOTHER SIMULATION WILL BE PERFORMED. 
+# THIS ALLOWS SCANS THROUGH PARAMETERS.
+#################################
+
+variables={
+"Name_Sim":[0], #THIS CAN BE ANYTHING YOU LIKE
+"mu":[7], # MEAN NUMBER OF GEIGER DISCHARGES
+"ppXT":[0.0], # PROBABILITY OF PROMPT CROSS-TALK 
+"pdXT":[0.2], #PROBABILITY OF DELAYED CROSS-TALK
+"taudXT":[25], #TIME CONSTANT FOR DELAYED CROSS-TALK (NS)
+"rdXT":[0.5], #FRACTION OF DELAYED CROSS-TALK FROM ADJACENT CELLS
+"pAp":[0.15], #PROBABILITY OF AFTERPULSE
+"tauAp":[7.5], #TIME CONSTANT OF AFTERPULSE
+"taur":[20], # SIPM RECOVERY TIME CONSTANT
+"Sig0":[0.075], #WIDTH OF PEDESTAL [NORMALISED ADC]
+"Sig1":[0.02], # INCREASE IN PEAK WIDTH PER ADDITIONAL DISCHARGE [NORMALISED ADC]
+"DCR":[0.0], # DARK COUNT RATE [GHZ]
+"Sigt":[0.02], # ELECTRONICS NOISE FOR CURRENT TRANSIENT (FOR TRANSIENT)
+"GSig":[0.75], # WIDTH OF ELECTRONICS NOISE TRANSFER FUNCTION (FOR TRANSIENT)
+"tslow":[20], # TIME CONSTANT FOR SLOW PART OF PULSE
+"tfast":[1.5], # TIME CONSTANT FOR FAST PART OF PULSE
+"rfast":[0.2], # PROPORTION OF SLOW/FAST
+"start_tgate":[-5], # GATE START TIME [NS]
+"len_tgate":[100], # LENGTH OF GATE
+"t0":[100,], # STARTING POINT OF GATE
+"tl0":[0], #GAUSSIAN MEAN OF PRIMARY GEIGER DICHARGE TIME DISTRIBUTION
+"tl1":[0.1], #GAUSSIAN WIDTH OF PRIMARY GEIGER DICHARGE TIME DISTRIBUTION
+"tl2":[0], #FREE PARAMETER
+"Gen_mu":["Poisson"], # NUMBER PRIMARY GEIGER DISCHARGE PDF
+"Gen_tmu":["Gauss"], # TIME OF PRIMARY GEIGER DISCHARGE PDF
+"Gen_gain":["Gauss"],  # GAIN PDF (FOR TRANSIENT)
+"Gen_npXT":["Binomial"], #NUMBER PROMPT X-TALK DISCHARGE DISTRIBUTION
+"Gen_ndXT":["Binomial"],  #NUMBER PROMPT X-TALK DISCHARGE DISTRIBUTION
+"Gen_tdXT":["Exp"], #TIME DELAYED X-TALK DISTRIBUTION
+"Gen_nAP":["Binom"],  #NUMBER AFTER DISTRIBUTION
+"Gen_tAP":["Exp"],  #AFTERPULSE TIME DISTRIBUTION
+"Gen_noise":["Gauss"] #ELECTRONIC NOISE DISTRIBUTION (FOR TRANSIENT)
+}
+
+#################################
+# CREATE A SIPM SIMULATION CLASS OBJECT
+#################################
+        
+s = SiPMSimulation()
+
+#################################
+# ADD VARIABLES
+#################################
+
+s.AddVariables(variables)
+
+n_events = int(1e5)
+
+
+#################################
+# SIMULATE
+#################################
+
+s.Simulate(n_events, transients=False)
+
+#################################
+# EXTRACT SIMULATION DATAFRAME
+#################################
+
+df = s.pars
+
+#################################
+# EXTRACT CHARGE SPECTRUM
+#################################
+
+Qs = df.iloc[0]["ChargeSpectrum"]
+
+#################################
+# PLOT
+#################################
+
+ 
+ 
+plt.figure(figsize=(15.5,10.5))
+plt.hist(Qs, label="Simulated Charge Spectrum", bins=1000)
+plt.yscale("log")
+plt.xticks(fontsize=25)
+plt.yticks(fontsize=25)
+plt.xlabel("# GD", fontsize=25),
+plt.savefig("./Example.png")
+plt.legend(fontsize=25)
+                
diff --git a/LightSimtastic.py b/LightSimtastic.py
new file mode 100644
index 0000000000000000000000000000000000000000..fbf2310b675c2f78056f5459a2ff3ec5a0984d5f
--- /dev/null
+++ b/LightSimtastic.py
@@ -0,0 +1,777 @@
+import time
+import os
+import glob
+import numpy as np
+import pandas as pd
+import matplotlib
+import matplotlib.pyplot as plt  
+import scipy
+import scipy.stats as stats
+import scipy.signal as signal
+from tqdm import tqdm, notebook
+from itertools import repeat, chain, product, zip_longest
+from operator import add
+from types import SimpleNamespace 
+import json
+import re
+import codecs
+import random
+
+np.set_printoptions(suppress=True)
+
+                    
+class SiPMSimulation:
+    
+    
+
+
+    def __init__(self):
+        
+        '"Name_Sim"', '"mu"', '"ppXT"', '"pdXT"', '"taudXT"', '"rdXT"', '"pAp"', '"tauAp"', '"taur"', '"Sig0"', '"Sig1"', '"DCR"', '"Sigt"', '"GSig"', '"tslow"', '"tfast"', '"rfast"', '"start_tgate"', '"len_tgate"', '"t0"', '"tl0"', '"tl1"', '"tl2"', 
+        
+        
+    
+        self.VariableDictionary ={
+            "Name_Sim": {
+                "Description": "Name for the simulation.",
+                "Unit": None,  
+            },
+            
+            "mu": {
+                "Description": "Mean Number of Primary Geiger discharges in SiPM Cell",
+                "Unit": "Units of Charge, Normalized to Gain [Q/G]"
+            },
+            "ppXT": {
+                "Description": "Characteristic Probability of Prompt Cross Talk in SiPM Cell",
+                "Unit": None
+            },
+            "pdXT": {
+                "Description": "Characteristic Probability of Delayed Cross Talk in SiPM Cell",
+                "Unit": None
+            },
+            "taudXT":{
+                "Description": "Time Constant of Delayed Cross Talk in SiPM Cell",
+                "Unit": "Nanoseconds [ns]"
+            },
+            "rdXT":{
+                "Description": " Fraction of Delayed Cross Talk from Adjacent SiPM Cells",
+                "Unit": None
+            },
+            "pAp":{
+                "Description": " Probability of Afterpulse occurrence in SiPM Cell",
+                "Unit": None
+            },
+            "tauAp":{
+                "Description": " Time delay constant of Afterpulsing in SiPM Cell",
+                "Unit": None
+            },
+            "taur":{
+                "Description": " PDE Recovery constant in SiPM Cell",
+                "Unit": None
+            },
+            "Sig0":{
+                "Description": "Fluctuation in Gain of Geiger Discharge",
+                "Unit": "Units of Charge, Normalized to Gain [Q/G]"
+            },
+            "Sig1":{
+                "Description": " Pedestal width due to Noise from Electronic Components",
+                "Unit": "Units of Charge, Normalized to Gain [Q/G]"
+            },
+            "DCR":{
+                "Description": "Dark Count Rate",
+                "Unit": "Gigahertz [GHz]"
+            },
+            "Sigt":{
+                "Description": "Electronics Noise for Current Transient",
+                "Unit": "Nanoseconds [ns]"
+            },
+            "GSig":{
+                "Description": "Width of Electronics Transfer Function",
+                "Unit": "Nanoseconds [ns]"
+            },
+            "tslow":{
+                "Description": "Time constant of Slow Component of SiPM pulse shape.",
+                "Unit": "Nanoseconds [ns]"
+                
+            },
+            "tfast":{
+                "Description": "Time constant of Fast Component of SiPM pulse shape.",
+                "Unit": "Nanoseconds [ns]"
+                
+            },
+            "rfast":{
+                "Description": "Fraction of Fast Component contribution to SiPM pulse shape.",
+                "Unit": None
+                
+            },
+            "start_tgate":{
+                "Description": "Start time of SiPM Integration Window",
+                "Unit": "Nanoseconds [ns]"
+                
+            },
+            "len_tgate":{
+                "Description": "Duration of SiPM Integration Window",
+                "Unit": "Nanoseconds [ns]"
+                
+            },
+            "t0":{
+                "Description": "Duration before Dark Count Evaluation",
+                "Unit": "Nanoseconds [ns]"
+                
+            },
+            "tl0":{
+                "Description": "Free Parameter 0 for Light Pulse Distribution",
+                "Unit": "Nanoseconds [ns]"
+                
+            },
+            
+            "tl1":{
+                "Description": "Free Parameter 1 for Light Pulse Distribution",
+                "Unit": "Nanoseconds [ns]"
+                
+            },
+            "tl2":{
+                "Description": "Free Parameter 2 for Light Pulse Distribution",
+                "Unit": "Nanoseconds [ns]"
+                
+            },
+            
+            "Gen_mu":{
+                "Description": "Function for the random sampling of the number of Primary Gieger Discharges",
+                "Unit": None,
+                "Options": ["Poisson", "Gauss", "Fixed"]
+                
+            },
+            
+            "Gen_tmu":{
+                "Description":"Function for the random sampling of the time of primary Gieger discharges",
+                "Unit": None,
+                "Options": ["Gauss", "Exp"]
+                
+            },
+            
+            "Gen_gain":{
+                "Description":"Function for the random sampling of the SiPM Gain",
+                "Unit": None,
+                "Options": ["Gauss"]
+                
+            },
+            
+            "Gen_npXT":{
+                "Description":"Function for the random sampling of the prompt cross-talk",
+                "Unit": None,
+                "Options": ["Poisson", "Binom", "Borel"]
+                
+            },
+            
+            "Gen_ndXT":{
+                "Description":"Function for the random sampling of the number delayed cross-talk discharges",
+                "Unit": None,
+                "Options": ["Poisson", "Binom", "Borel"]
+                
+            },
+            
+            "Gen_tdXT":{
+                "Description":"Function for the random sampling of the time of delayed cross-talk discharges",
+                "Unit": None,
+                "Options": ["Exp"]
+                
+            }
+
+        }
+        
+    
+
+        self.pb_ag_text=["Sampling Primary Geiger Distributions...", 
+                     "Sampling SiPM Noise Distributions...",
+                     "Calculating Prompt Cross Talk...", 
+                     "Calculating Delayed Cross Talk...",
+                     "Calculating Afterpulses...",
+                     "Combining SiPM Noise...",
+                     "All Geiger Discharges Calculated.",
+                    ]
+        
+        self.pb_Q_text=["Sampling Charge Distributions...", 
+                     "Calculating Charge from Geiger Discharges...", 
+                     "Charge Spectrum Calculated."
+                        
+                    ]
+        
+        self.pb_I_text=["Sampling Current Distributions...", 
+                     "Calculating Noise from Geiger Discharges...", 
+                     "Convolving Electronic Noise Function..."
+                     "Transient Calculated."
+                    ]
+
+
+        self.out_columns = ["GeigerArray", 
+                            "ChargeSpectrum", 
+                            "Transients", 
+                            "TimeElapsed_GeigerArray",
+                            "TimeElapsed_ChargeSpectrum",
+                            "TimeElapsed_Transients",
+                            "NSim"
+                           ]
+      
+        self.pars = pd.DataFrame(columns = list(self.VariableDictionary.keys())+ self.out_columns)
+       
+        
+        self.pars["GeigerArray"] = self.pars["GeigerArray"].astype(object)
+        self.pars["ChargeSpectrum"] = self.pars["ChargeSpectrum"].astype(object)
+        self.pars["Transients"] = self.pars["Transients"].astype(object)
+        
+
+        
+   
+    def ReadMathCad(self, inputfile):
+        variables = {}
+        with codecs.open(inputfile, encoding='utf-8') as file:
+            def _SetFloating(string):
+                try:
+                    string=float(string)
+                    return string
+                except ValueError:
+                    return string
+                
+            def _ToFloats(list_strings):
+                return [_SetFloating(string) for string in list_strings]
+    
+            def _RemoveNonAscii(string): 
+                return "".join(i for i in string if ord(i)<126 and ord(i)>31)
+    
+            lines = file.read()
+   
+            for line in lines.splitlines(True):
+                line = _RemoveNonAscii(line)
+                elems = line.replace('"', "").split(",")
+                elems = _ToFloats(elems)
+                
+                if elems[0] in self.VariableDictionary:
+                    variables[elems[0]] = elems[1::]
+        
+        self.AddVariables(variables)
+
+
+
+            
+    def ClearVariables(self):
+        self.pars = self.pars.iloc[0:0]
+        
+    def AddVariables(self, variables):
+        if not isinstance(variables, dict):
+             raise Exception("Expected a dictionary of parameters. Please enter a dictionary with all of the following elements: {0}.".format(
+                ", ".join(list(self.VariableDictionary.keys())))) 
+                
+
+        missing = np.setdiff1d(list(self.VariableDictionary.keys()),list(variables.keys()))
+        
+        if (len(missing)>0):
+            raise Exception("Variables are missing from the entered dictionary. Please add following elements: {0}.".format(
+                ", ".join(list(missing)))) 
+        
+        for key, value in variables.items():
+            if not isinstance(value, list):
+                 raise Exception("Expected a list of simulation parameters, but a type {0} was found for variable {1}. Please enter your simulation parameters in a list.".format(type(value), key))
+        
+        self.ClearVariables()
+        
+        for key, value in variables.items():
+            self.pars[key] = value
+               
+    def GetInformation(self, key):
+        try:
+            temp = pd.DataFrame.from_dict(self.VariableDictionary[key],orient='index')
+            print(temp)
+        except:
+            raise Exception("Parameter not found. Available parameters are: {0}".format(
+                ", ".join(list(self.VariableDictionary.keys())))) 
+            
+    def PrintVariables(self, latex=False):
+        
+        if (latex):
+            df_var = pd.DataFrame.from_dict(self.VariableDictionary)
+            print(df_var.to_latex())
+        else:
+            print(self.pars)
+    
+    def dXTtSplit(self, dXT_nt, dXT_n):
+        output = []
+        partialSum = 0
+        for _i in dXT_n:
+            temp = []
+            for _ii in _i:
+                temp.append(dXT_nt[partialSum:partialSum+_ii])
+                partialSum += _ii
+            output.append(temp)
+        return output
+
+
+    
+    def AllGeiger(self, ns):
+        
+        pbar = tqdm(range(len(self.pb_ag_text)-1))
+        pbar.set_description("{0}".format(self.pb_ag_text[0]))
+        
+        nlight = np.asarray(ns.dist_flight.rvs(size=ns.n_sim))
+        nDC = np.asarray(ns.dist_fDC.rvs(size=ns.n_sim))
+        
+        if(len(nlight)>len(nDC)):
+            nDC.resize(nlight.shape)
+        elif (len(nDC)>len(nlight)):
+            nlight.resize(nDC.shape)
+            
+        nPG = nDC + nlight
+        
+        pbar.update(1)
+        pbar.set_description("{0}".format(self.pb_ag_text[1]))
+        
+        nlight_sum = np.sum(nlight)
+        nDC_sum = np.sum(nDC)
+        nPG_sum = nlight_sum + nDC_sum
+ 
+        npXT = np.array(ns.dist_fpXT.rvs(size=nPG_sum))
+        ndXT = np.array(ns.dist_fdXT.rvs(size=nPG_sum))
+        tAp = np.array(ns.dist_ftAp.rvs(size=nPG_sum))
+        nAp = np.array(stats.binom.rvs(
+                        n=np.ones(nPG_sum).astype(int), 
+                        p=(ns.pAp*(1 - np.exp(-tAp/ns.taur))), 
+                        size=nPG_sum
+                    )
+             )
+ 
+        pg_l_d = np.ones(nlight_sum)
+        pg_l_t = np.asarray(ns.dist_ftlight.rvs(size=nlight_sum))
+        pg_dc_d = np.ones(nDC_sum)
+        pg_dc_t = np.asarray(ns.dist_ftDC.rvs(size=nDC_sum))
+        
+        nlight_cumsum = np.cumsum(nlight)
+        nDC_cumsum = np.cumsum(nDC)
+        nPG_cumsum = np.cumsum(nPG)
+        ndXT_cumsum = np.cumsum(ndXT)
+        
+        pg_l_d = np.split(pg_l_d, nlight_cumsum)[:-1]
+        pg_l_t = np.split(pg_l_t, nlight_cumsum)[:-1]
+        pg_dc_d = np.split(pg_dc_d, nDC_cumsum)[:-1]
+        pg_dc_t = np.split(pg_dc_t, nDC_cumsum)[:-1]
+        
+        pXT_n = np.split(npXT, nPG_cumsum)[:-1]
+        dXT_n = np.split(ndXT, nPG_cumsum)[:-1]
+        dXT_nt =ns.dist_ftdXT.rvs(size=sum(map(sum, dXT_n)))
+        dXT_nt = self.dXTtSplit(dXT_nt, dXT_n)  
+ 
+        Ap_n = np.split(nAp, nPG_cumsum)[:-1]
+        Ap_nt = np.split(tAp, nPG_cumsum)[:-1]
+        
+        pg_d = [np.concatenate(elem) for elem in zip(pg_l_d, pg_dc_d)]
+        pg_t = [np.concatenate(elem) for elem in zip(pg_l_t, pg_dc_t)]
+        
+        pbar.update(1)
+        pbar.set_description("{0}".format(self.pb_ag_text[2]))
+        
+        pXT_d = [
+            np.array(
+                list(
+                    chain.from_iterable(
+                        [
+                            [elem_pg_d]*elem_pXT_n
+                            if(elem_pXT_n > 0 and elem_pg_d >0)
+                            else []
+                            for elem_pg_d, elem_pXT_n in zip(list_pg_d, list_pXT_n)
+                        ]
+                    )
+                )
+            )
+           for list_pg_d, list_pXT_n in zip(pg_d, pXT_n)
+        ]
+        
+       
+        pXT_t = [
+            np.array(
+                list(
+                    chain.from_iterable(
+                        [
+                            [elem_pg_t]*elem_pXT_n
+                            if(elem_pXT_n > 0 and elem_pg_d >0)
+                            else []
+                            for elem_pg_d, elem_pg_t, elem_pXT_n in zip(list_pg_d, list_pg_t, list_pXT_n)
+                        ]
+                    )
+                )
+            )
+            for list_pg_d, list_pg_t, list_pXT_n in zip(pg_d, pg_t, pXT_n)
+        ]
+        
+        
+        pbar.update(1)
+        pbar.set_description("{0}".format(self.pb_ag_text[3]))
+        
+
+        dXT_d = [
+            np.array(
+                list(chain.from_iterable(
+                        [
+                            [elem_pg_d]*elem_dXT_n
+                            if(elem_dXT_n > 0 and elem_pg_d >0)
+                            else []
+                            for elem_pg_d, elem_dXT_n in zip(list_pg_d, list_dXT_n)
+                        ]
+                    )
+                )
+            )
+            for list_pg_d, list_dXT_n in zip(pg_d, dXT_n)
+        ]
+        
+        
+        dXT_t = [
+            np.array(
+                list(
+                    chain.from_iterable(
+                        [
+                            list(map(add, [elem_pg_t]*elem_dXT_n, elem_dXT_nt))
+                            if(elem_dXT_n > 0 and elem_pg_d >0)
+                            else []
+                            for elem_pg_d, elem_pg_t, elem_dXT_n, elem_dXT_nt in zip(list_pg_d, list_pg_t, list_dXT_n, list_dXT_nt)
+                        ]
+                    )
+                )
+            )
+            for list_pg_d, list_pg_t, list_dXT_n, list_dXT_nt in zip(pg_d, pg_t, dXT_n, dXT_nt)
+        ]
+        
+        
+        pbar.update(1)
+        pbar.set_description("{0}".format(self.pb_ag_text[4]))
+        
+        Ap_d = [
+            np.array(
+                list(chain.from_iterable(
+                        [
+                            [elem_Ap_n]*elem_Ap_n
+                            if(elem_Ap_n > 0 and elem_pg_d >0)
+                            else []
+                            for elem_pg_d, elem_pg_t, elem_Ap_n, elem_Ap_t in zip(list_pg_d, list_pg_t, list_Ap_n, list_Ap_t)
+                        ]
+                    )
+                )
+                
+            )
+            for list_pg_d, list_pg_t, list_Ap_n, list_Ap_t, in zip(pg_d, pg_t, Ap_n, Ap_nt)
+        ]
+        
+        
+        Ap_t = [
+            np.array(
+                list(chain.from_iterable(
+                    [
+                        [elem_pg_t + elem_Ap_t]*elem_Ap_n
+                        if(elem_Ap_n > 0 and elem_pg_d >0)
+                        else []
+                        for elem_pg_d, elem_pg_t, elem_Ap_n, elem_Ap_t in zip(list_pg_d, list_pg_t, list_Ap_n, list_Ap_t)
+                    ]
+                    )
+                )
+            )
+            
+            for list_pg_d, list_pg_t, list_Ap_n, list_Ap_t, in zip(pg_d, pg_t, Ap_n, Ap_nt)
+        ]
+        
+        pbar.update(1)
+        pbar.set_description("{0}".format(self.pb_ag_text[5]))
+        
+        ag_d = [
+            np.concatenate( (list_pg_d, list_pXT_d, list_dXT_d, list_Ap_d))
+            for list_pg_d, list_pXT_d, list_dXT_d, list_Ap_d, in zip(pg_d, pXT_d, dXT_d, Ap_d)  
+        ]
+        
+        ag_t = [
+            np.concatenate( (list_pg_t, list_pXT_t, list_dXT_t, list_Ap_t))
+            for list_pg_t, list_pXT_t, list_dXT_t, list_Ap_t, in zip(pg_t, pXT_t, dXT_t, Ap_t)  
+        ]
+        
+        ns.ag_d = ag_d
+        ns.ag_t = ag_t
+        
+        pbar.update(1)
+        pbar.set_description("{0}".format(self.pb_ag_text[6]))
+        pbar.close()
+    
+    
+    def QsG(self, ns, t):
+               
+        tslow = ns.tslow
+        tfast = ns.tfast
+        rfast = ns.rfast
+        tstart = ns.start_tgate
+        tend = ns.start_tgate + ns.len_tgate
+        
+        
+        if(t<tstart):
+            QsG_slow = (1 - rfast)*(np.exp(-(tstart - t)/tslow) - np.exp(-(tend - t)/tslow)) 
+            QsG_fast = (rfast)*(np.exp(-(tstart - t)/tfast) - np.exp(-(tend - t)/tfast)) 
+            QsG = QsG_slow + QsG_fast
+        
+        elif(t>=tstart and t<tend):
+            QsG_slow = (1 - rfast)*np.exp(-(tend - t)/tslow)
+            QsG_fast = (rfast)*np.exp(-(tend - t)/tfast)
+            QsG = 1 - QsG_slow - QsG_fast
+            
+        else:
+            QsG = 0
+            
+        return QsG  
+    
+    
+    def IsG(self, ns, t):
+        Islow= ((1 - ns.rfast)/ns.tslow)*(np.exp(-t/ns.tslow)) 
+        Ifast = (ns.rfast/ns.tfast)*(np.exp(-t/ns.tfast))
+        IsG = Islow + Ifast
+        return IsG
+    
+    
+    def QSiPM(self, ns):
+        
+        
+        
+        pbar = tqdm(range(len(self.pb_Q_text)-1))
+        
+        pbar.set_description("{0}".format(self.pb_Q_text[0]))
+        
+        Q = np.array(ns.dist_Q.rvs(ns.n_sim))
+        
+
+        n_ag_d = np.asarray([len(list_ag_d) for list_ag_d in ns.ag_d])
+        Sig1 = ns.dist_Sig1.rvs(np.sum(n_ag_d))
+        n_ag_d_cumsum = np.cumsum(n_ag_d)[:-1]
+        ns.Sig1 = np.split(Sig1, n_ag_d_cumsum)
+        
+        pbar.update(1)
+        pbar.set_description("{0}".format(self.pb_Q_text[1]))
+        
+        Q_SiPM = np.array([
+                    np.sum(
+                        np.asarray(
+                            [
+                                elem_Sig1*elem_ag_d*self.QsG(ns, elem_ag_t)
+                                if(elem_ag_d>0)
+                                else 0
+                                for elem_Sig1, elem_ag_d, elem_ag_t in zip(list_Sig1, list_ag_d, list_ag_t)
+                            ]
+                        )
+                    )
+                    
+                    for list_Sig1, list_ag_d, list_ag_t in zip(ns.Sig1, ns.ag_d, ns.ag_t)
+                    
+                ])
+        
+        Q += Q_SiPM
+        
+        
+        pbar.update(1)
+        pbar.set_description("{0}".format(self.pb_Q_text[2]))
+        pbar.close()
+        
+        return Q
+                     
+                     
+
+    def ISiPM(self, ns, ntim=2048, convolve=True, n_transients=20):
+        
+        
+       
+        _range = [-ns.t0, ns.start_tgate+ns.len_tgate]
+       
+        dtim = int(np.ceil((_range[1] - _range[0])/ntim-1))
+        tim = np.linspace(_range[0], _range[1], ntim)
+        I = ns.dist_I.rvs(ns.n_sim*len(tim))
+        I = np.array(np.split(I, len(tim))).T
+        Gt = ns.dist_Gt.pdf(tim)
+        
+        
+        n_ag_d = np.asarray([len(list_ag_d) for list_ag_d in ns.ag_d])
+        Sig1 = ns.dist_Sig1.rvs(np.sum(n_ag_d))
+        n_ag_d_cumsum = np.cumsum(n_ag_d)[:-1]
+        Sig1 = np.split(Sig1, n_ag_d_cumsum)
+        ns.Sig1 = Sig1
+        
+       
+        I_temp = I[0:int(n_transients)]
+        ag_d_temp = ns.ag_d[0:int(n_transients)]
+        ag_t_temp = ns.ag_t[0:int(n_transients)]
+        Sig1_temp = ns.Sig1[0:int(n_transients)]
+        
+    
+ 
+        I_SiPM = np.array([
+                    np.sum(
+                        np.asarray(
+                            [
+                                elem_Sig1*elem_ag_d*self.IsG(ns, elem_tim - elem_ag_t)
+                                if(elem_ag_d>0 and elem_ag_t<elem_tim)
+                                else 0
+                                for (elem_Sig1, elem_ag_d, elem_ag_t), elem_tim in product(
+                                    zip(list_Sig1, 
+                                        list_ag_d, 
+                                        list_ag_t), tim)
+                            ]
+                        ).reshape(len(list_ag_d), ntim), axis=0
+                    )
+                    
+                    for list_Sig1, list_ag_d, list_ag_t in zip(tqdm(Sig1_temp), 
+                                                               ag_d_temp, 
+                                                               ag_t_temp)
+                    
+                ])
+            
+        I_temp+=I_SiPM
+        
+        I = np.apply_along_axis(signal.fftconvolve, 1, I_temp, Gt, 'same')
+        
+        return I, tim
+        
+     
+    
+        
+
+    def Simulate(self,
+                 n_sim,
+                 transients=False,
+                 n_transients=None,
+                 verbose=False
+                ):
+        
+        
+        
+        n_sim = int(n_sim)
+            
+        for index, row in self.pars.iterrows():
+            
+            
+            dictionary = row.to_dict()
+
+            if(verbose):
+                print("\n-------------------\n")
+                print("\nSIMULATION {0}\n  - {1}".format(index, row["Name_Sim"]))
+                print("\n-------------------\n")
+                for key, value in dictionary.items():
+                    if(key!="Name_Sim" and key not in self.out_columns):
+                        print("{0} \t\t {1} {2}".format(key, value, self.VariableDictionary[key]["Unit"]))
+                print("\n-------------------\n")
+            
+            ns =  SimpleNamespace(**dictionary)
+            
+
+            if(ns.Gen_mu == "Poisson"):
+                ns.dist_flight = scipy.stats.poisson(mu=ns.mu)
+            elif(ns.Gen_mu == "Fixed"):
+                ns.dist_flight = scipy.stats.uniform(loc = ns.mu, scale=0)
+                
+            if(ns.Gen_tmu == "Gauss"):
+                ns.dist_ftlight = scipy.stats.norm(loc=ns.tl0, scale=ns.tl1)
+            elif(ns.Gen_tmu == "Exp"):
+                ns.dist_ftlight = scipy.stats.expon(loc=ns.tl0, scale=ns.tl1)
+            else:
+                raise Exception("The Time for Primary Geiger Discharge distribution mode that has been selected is invalid. Please choose from the following options: {0}".format(
+                ", ".join(self.VariableDictionary["Gen_tmu"]["options"])))
+                
+            ns.dist_fDC = scipy.stats.poisson(mu=(ns.t0 + ns.start_tgate + ns.len_tgate)*ns.DCR)
+            ns.dist_ftDC = scipy.stats.uniform(loc=-ns.t0, scale=ns.t0+ns.start_tgate+ns.len_tgate)
+            
+            if(ns.Gen_npXT == "Poisson"):
+                ns.dist_fpXT = scipy.stats.poisson(mu = ns.ppXT)
+            elif(ns.Gen_npXT == "Binomial"): 
+                ns.dist_fpXT = scipy.stats.binom(n=1, p = ns.ppXT)
+            elif(ns.Gen_npXT == "Borel"):
+                ns.dist_fpXT = self.borel_gen(name="borel_npXT")(mu=ns.ppXT)
+            else:
+                raise Exception("The Prompt Cross-talk Discharge distribution mode that has been selected is invalid. Please choose from the following options: {0}".format(
+                ", ".join(self.VariableDictionary["Gen_npXT"]["options"])))
+            
+            if(ns.Gen_ndXT == "Poisson"):
+                ns.dist_fdXT = scipy.stats.poisson(mu = ns.pdXT)
+            elif(ns.Gen_ndXT== "Binomial"): 
+                ns.dist_fdXT = scipy.stats.binom(n=1, p = ns.pdXT)
+            elif(ns.Gen_ndXT == "Borel"):
+                ns.dist_fdXT = self.borel_gen(name="borel_ndXT")(mu=ns.pdXT)
+            else:
+                raise Exception("The Delayed Cross-talk Discharge mode that has been selected is invalid. Please choose from the following options: {0}".format(
+                ", ".join(self.VariableDictionary["Gen_ndXT"]["options"])))
+
+            if(ns.Gen_tdXT == "Exp"):
+                ns.dist_ftdXT = scipy.stats.expon(scale=(ns.taudXT))
+            else:
+                raise Exception("The Time of Delayed Cross-talk Discharge mode that has been selected is invalid. Please choose from the following options: {0}".format(
+                ", ".join(self.VariableDictionary["Gen_tdXT"]["options"])))
+            
+                
+            ns.dist_ftAp = scipy.stats.expon(scale=(ns.tauAp))
+            ns.dist_Q = scipy.stats.norm(loc=0, scale=ns.Sig0)
+            ns.dist_Sig1 = scipy.stats.norm(loc=1, scale=ns.Sig1)
+            ns.dist_I = scipy.stats.norm(loc=0, scale=ns.Sigt)
+            ns.dist_Gt = scipy.stats.norm(loc=0, scale=ns.GSig)
+            ns.n_sim = n_sim
+            
+            
+            self.pars.at[index, "NSim"] =  n_sim
+            
+            start_time = time.time()
+            self.AllGeiger(ns)
+            end_time = time.time()
+            
+            self.pars.at[index, "GeigerArray"] =  np.vstack([ns.ag_d, ns.ag_t])
+            self.pars.at[index, "TimeElapsed_GeigerArray"] = end_time-start_time
+
+            start_time = time.time()
+            Qs = self.QSiPM(ns)
+            end_time = time.time()
+
+            self.pars.at[index, "ChargeSpectrum"] =  Qs
+            self.pars.at[index, "TimeElapsed_ChargeSpectrum"] = end_time-start_time
+
+            if(transients):
+                start_time = time.time()
+                I, t = self.ISiPM(ns)
+                end_time = time.time()
+                Trans = np.vstack([I,t])
+                self.pars.at[index, "Transients"] =  Trans
+                self.pars.at[index, "TimeElapsed_Transients"] = end_time-start_time
+               
+
+            
+    def WriteDataFrame(self, name, directory, ext=None):
+        
+          
+        if (self.pars.empty):
+            raise Exception("The simulation data is empty. Please simulate an SiPM before writing a file.")
+       
+        if(ext is None):
+            outdir = directory + name + ".pkl"
+            self.pars.to_pickle(outdir)
+        else:
+            outdir = directory + name + ext
+            if(ext == ".pkl"):
+                self.pars.to_pickle(outdir)
+            elif(ext == ".hdf"):
+                self.pars.to_hdf(outdir)
+            elif(ext == ".csv"):
+                self.pars.to_csv(outdir)
+            elif(ext == ".json"):
+                self.pars.to_json(outdir)
+            else:
+                raise Exception("The file format was not recognized. Please choose an output file extension .pkl, .hdf, .csv or .json " ) 
+
+    def WriteASCII(self, name, directory):
+        
+        if (self.pars.empty):
+            raise Exception("The simulation data is empty. Please simulate an SiPM before writing a file.")
+        
+        variables = self.pars.drop(['ChargeSpectrum'], axis=1)
+        Qs = self.pars["ChargeSpectrum"]
+       
+        with open(directory + name + ".head", 'a') as f:
+            f.write(variables.to_string(header = True, index = False))
+        
+        with open(directory + name + ".dat", 'a') as f:
+            for _elem in zip_longest(*Qs):
+                formatted = (["{:.3f}".format(__elem) if __elem is not None else " " for __elem in _elem])
+                formatted = ','.join(formatted)
+                f.write(formatted + '\n')
\ No newline at end of file
diff --git a/__pycache__/AdditionalPDFs.cpython-36.pyc b/__pycache__/AdditionalPDFs.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a3a9c2233096a3e8c53df840ff7de2202f912ea1
Binary files /dev/null and b/__pycache__/AdditionalPDFs.cpython-36.pyc differ
diff --git a/__pycache__/LightSimtastic.cpython-36.pyc b/__pycache__/LightSimtastic.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f28c81b2d49b775f4378b3a3104db5ecf63c13ea
Binary files /dev/null and b/__pycache__/LightSimtastic.cpython-36.pyc differ
diff --git a/__pycache__/SiPMSimulation.cpython-36.pyc b/__pycache__/SiPMSimulation.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a202d1aaed5270b83942bf4a9e314b262640eb48
Binary files /dev/null and b/__pycache__/SiPMSimulation.cpython-36.pyc differ