diff --git a/PeakOTron.py b/PeakOTron.py
index 31c9e978bf766a15a603fc45c5b365aceda8298c..c93cfa9d8e87e7cd0ff1533d13ed2dad88ef9d8a 100644
--- a/PeakOTron.py
+++ b/PeakOTron.py
@@ -27,7 +27,6 @@ import matplotlib.pyplot as plt
 #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 
 
-
 class PeakOTron:
     
     
@@ -49,13 +48,13 @@ class PeakOTron:
     
     def __init__(self,
                  verbose=False,
-                 n_call_minuit=30000,
-                 n_iterations_minuit = 50,
-                 n_bootstrap=100,
+                 n_call_minuit=None,
+                 n_iterations_minuit = 5,
                  plot_figsize=(10,10),
                  plot_fontsize=25,
                  plot_legendfontsize=18,
-                 plot_cmap = 'turbo'
+                 plot_cmap = 'turbo',
+                 eps = 1e-6
                 ):
         
         self._default_hist_data={
@@ -65,7 +64,6 @@ class PeakOTron:
             "bin_centres":None,
             "counts_bg":None,
             "counts_bgsub":None,
-            "f_dcr_interpeak":None,
             "bw":None,
             "peaks":{
                 "peak_position":None,
@@ -95,12 +93,11 @@ class PeakOTron:
             "len_pad":None
         }
         
- 
+        self._eps = eps
         self._plot_figsize= plot_figsize
         self._plot_fontsize= plot_fontsize
         self._plot_legendfontsize= plot_legendfontsize
         self._cmap = cm.get_cmap(plot_cmap)
-        self._n_bootstrap=n_bootstrap
         self._len_pad = int(100)
         self._verbose=verbose
         self._n_call_minuit = n_call_minuit
@@ -109,8 +106,7 @@ class PeakOTron:
         
         self._max_n_peaks = None
         self._max_n_peaks_dcr = 6
-        self._max_tau_Ap = 30.0
-        
+        self._min_n_events_peak=100
         
         self._m_DRM = None
         self._time_prefit = None
@@ -126,9 +122,12 @@ class PeakOTron:
                 "t_gate":None,
                 "t_gate_err":None,
                 "bw":None,
-                "truncate_nG":None,
+                "bin_0":None,
+                "truncate_nsigma0_up":None,
+                "truncate_nsigma0_do":None,
+                "truncate_nsigma0_chi2scanlim":None,
+                "min_n_events_peak":None,
                 "prefit_only":False,
-                "peak_dist_nG":0.8,
                 "bin_method":"knuth"
         }
     
@@ -152,6 +151,13 @@ class PeakOTron:
         self._prefit_errors = self._default_fit_dict.copy()
         self._fit_values= self._default_fit_dict.copy()
         self._fit_errors = self._default_fit_dict.copy()
+        
+        self._prefit_values_bin = self._default_fit_dict.copy()
+        self._prefit_errors_bin = self._default_fit_dict.copy()
+        self._fit_values_bin= self._default_fit_dict.copy()
+        self._fit_errors_bin = self._default_fit_dict.copy()
+        
+        
         self._hist_data = self._default_hist_data.copy()
     
 
@@ -197,27 +203,6 @@ class PeakOTron:
         if(self._verbose):
             print("Set maximum number of dark peaks to {:d}.".format(self._max_n_peaks_dcr))
             
-            
-    ########################################################################################################
-    ## SetMaxNPeaksDCR(max_n_peaks)
-    ########################################################################################################
-    ##
-    ## Typically, it is sufficient to determine the appropriate number of peaks to fit from mu. 
-    ##  However, to make sure we don't accidentally create a fit with a large number of peaks, 
-    ##  the default threshold of 20 peaks may be set with this function
-    ##
-    ########################################################################################################
-    
-    def SetMaxTauAp(self, max_tau_Ap, override=False):
-        max_tau_Ap = float(max_tau_Ap)
-        if(max_tau_Ap<1.0):
-            raise Exception("Maximum tau_Ap must exceed 1 ns.")
-        
-
-        self._max_tau_Ap =  max_tau_Ap
-        if(self._verbose):
-            print("Set maximum tau_Ap to {:3.3f} ns".format(self._max_tau_Ap))
- 
 
                 
 
@@ -288,44 +273,39 @@ class PeakOTron:
     
     
     
-    def GetFitResults(self, debug=True):
+    def GetFitResults(self, debug=True, bin_units=True):
         
 
         
         if(self._m_DRM is None):
             raise Exception ("Fit has not yet been performed. Please first perform a fit.")
         else:
-            if(not debug):
+                            
+            if(bin_units):
+                temp_values = deepcopy(self._fit_values_bin)
+                temp_errors = deepcopy(self._fit_errors_bin)
+                
+            else:
                 temp_values = deepcopy(self._fit_values)
                 temp_errors = deepcopy(self._fit_errors)
-                
+            
+            if(not debug):
+
                 temp_values["p_Ap"] = P_Ap(self._fit_values["tau_R"], self._fit_values["tau_Ap"], self._fit_values["t_gate"], self._fit_values["p_Ap"])
                 temp_errors["p_Ap"] = dP_Ap(self._fit_values["tau_R"], self._fit_values["tau_Ap"], self._fit_values["t_gate"], self._fit_values["p_Ap"], self._fit_errors["tau_Ap"], self._fit_errors["p_Ap"])
                 
-                t_fit = self.GetFitTime(prefit=False)
-                t_prefit = self.GetFitTime(prefit=True)
-                
-                temp_values["fit_time"] = t_fit
-                temp_values["prefit_time"] = t_prefit
-                
-                
-                return temp_values, temp_errors
-                
             else:
-                
-                temp_values = deepcopy(self._fit_values)
-                temp_errors = deepcopy(self._fit_errors)
-                
+                   
                 temp_values["P_Ap"] = P_Ap(self._fit_values["tau_R"], self._fit_values["tau_Ap"], self._fit_values["t_gate"], self._fit_values["p_Ap"])
                 temp_errors["P_Ap"] = dP_Ap(self._fit_values["tau_R"], self._fit_values["tau_Ap"], self._fit_values["t_gate"], self._fit_values["p_Ap"], self._fit_errors["tau_Ap"], self._fit_errors["p_Ap"])
                 
-                t_fit = self.GetFitTime(prefit=False)
-                t_prefit = self.GetFitTime(prefit=True)
+            t_fit = self.GetFitTime(prefit=False)
+            t_prefit = self.GetFitTime(prefit=True)
                 
-                temp_values["fit_time"] = t_fit
-                temp_values["prefit_time"] = t_prefit
-                
-                return temp_values, temp_errors
+            temp_values["fit_time"] = t_fit
+            temp_values["prefit_time"] = t_prefit
+ 
+            return temp_values, temp_errors
         
         
     ########################################################################################################
@@ -336,8 +316,11 @@ class PeakOTron:
     ##
     ########################################################################################################
         
-    def GetPrefitResults(self):
-        return self._prefit_values, self._prefit_errors
+    def GetPrefitResults(self, bin_units=True):
+        if(bin_units):
+            return self._prefit_values_bin, self._prefit_errors_bin
+        else:
+            return self._prefit_values, self._prefit_errors
         
         
     ########################################################################################################
@@ -369,25 +352,108 @@ class PeakOTron:
     ########################################################################################################
 
     
-    def GetChi2(self, prefit=False):
+    
+#     def __CalcChi2(self, x, y, y_hat, y_err):
+#         chi2 = np.nansum(((y_hat - y)/y_err)**2)
+#         ndof = len(x) - 10
+#         return chi2, ndof
+    
+    
+    
+#     def GetChi2(self, prefit=False):
         
-        if(self._m_DRM is None):
-            raise Exception ("Fit has not yet been performed. Please first perform a fit.")
-        else:
+#         if(self._m_DRM is None):
+#             raise Exception ("Fit has not yet been performed. Please first perform a fit.")
+#         else:
 
-            x = self._hist_data["bin_centres"]
-            y = self._hist_data["counts"]
-            y_err = self._hist_data["counts_error"]
+#             x_all = self._hist_data["bin_numbers"]
+#             y_all = self._hist_data["counts"]
+#             y_err_all = self._hist_data["counts_error"]
             
-            bw = self._hist_data["bw"]
-            N = np.sum(y)
-            y_hat = N*bw*self.GetModel(x, prefit)
+#             if(self._fit_kwargs["truncate_nsigma0_do"] is not None):
+#                 lim_nsigma0 = self._prefit_values_bin["Q_0"] - self._fit_kwargs["truncate_nsigma0_do"]*self._prefit_values_bin["sigma_0"]
+#                 cond_sel_nsigma0 = (self._hist_data["bin_numbers"]>lim_nsigma0)
+
+#             else:
+#                 cond_sel_nsigma0 = np.full(len(self._hist_data["bin_numbers"]), True).astype(bool)
+
+#             x = x_all[cond_sel_nsigma0]
+#             y = y_all[cond_sel_nsigma0]
+#             y_err = y_err_all[cond_sel_nsigma0]
+
+#             y_hat = self.GetModel(x, prefit=prefit, bin_units=True)
+#             y_hat_err = np.sqrt(y_hat)
+            
+                 
+#             plt.figure()
+#             plt.plot(x, (y-y_hat)/y_err)
+#             plt.show()
+        
+#             chi2, ndof = self.__CalcChi2(x, y, y_hat, y_err)
+
+#             return chi2, ndof
+    
+    
+
+    
+    def GetChi2(self, pars=None, prefit=False, full=False, n_sigma_lims=[None, None]):
+
+     
+        x_all = self._hist_data["bin_numbers"]
+        y_all = self._hist_data["counts"]
+        y_err_all = self._hist_data["counts_error"]
+        
+        Q_0_prefit = self._prefit_values_bin["Q_0"]
+        sigma_0_prefit = self._prefit_values_bin["sigma_0"]
+
+        if(pars is None):
+            y_hat_all = self.GetModel(x_all, prefit=prefit, bin_units=True)
+        else:
+            y_hat_all = DRM(x_all, **pars)
             
+        y_hat_err_all = np.sqrt(y_hat_all)
+        
+        conds = []
+        
+        cond_count = (y_all>1)
+
+        conds.append(cond_count)
+#         print("Check nsigma = {:3.3f} for chi2".format(self._fit_kwargs["truncate_nsigma0_do"]))
+        
+        if(n_sigma_lims[0] is None):
+            if(self._fit_kwargs["truncate_nsigma0_do"] is not None and not full):
+                n_sigma_lims[0] = self._fit_kwargs["truncate_nsigma0_do"]
+
+      
+        if(n_sigma_lims[0] is not None and not full):
+            lim_nsigma0_low = Q_0_prefit - n_sigma_lims[0]*sigma_0_prefit
+            cond_nsigma0_low = (x_all > lim_nsigma0_low)
+            conds.append(cond_nsigma0_low)
+
+        if(n_sigma_lims[1] is not None and not full):
+            lim_nsigma0_hi = Q_0_prefit + n_sigma_lims[1]*sigma_0_prefit
+            cond_nsigma0_hi = (x_all < lim_nsigma0_hi)
+            conds.append(cond_nsigma0_hi)
+              
+        conds = np.vstack(conds).all(axis=0)
+        
+        
+        x = x_all[conds]
+        y = y_all[conds]
+        y_err = y_err_all[conds]
+        y_hat = y_hat_all[conds]
+        y_hat_err =  y_hat_err_all[conds]
+        
+
+        
+                    
+        chi2 = np.nansum(((y_hat - y)/y_hat_err)**2)
+        ndof = len(x)-10
 
-            chi2 = np.nansum(((y_hat - y)/y_err)**2)
-            ndof = len(x) - 9
-            return chi2, ndof
+        
+        return chi2, ndof
     
+
     
     
     ########################################################################################################
@@ -403,15 +469,21 @@ class PeakOTron:
     ########################################################################################################
     
 
-    def GetModel(self, x, prefit=False):
+    def GetModel(self, x, prefit=False, bin_units=False):
         
         if(self._m_DRM is None):
             raise Exception ("Fit has not yet been performed. Please first perform a fit.")
         else: 
-            if(prefit):            
-                return DRM(x, **self._prefit_values)
+            if(prefit): 
+                if(bin_units):
+                    return DRM(x, **self._prefit_values_bin)
+                else:
+                    return DRM(x, **self._prefit_values)
             else:
-                return DRM(x, **self._fit_values)
+                if(bin_units):
+                    return DRM(x, **self._fit_values_bin)
+                else:
+                    return DRM(x, **self._fit_values)
         
         
         
@@ -428,7 +500,7 @@ class PeakOTron:
     ## Note: because spectra from scopes have been observed with variable bin widths, the program also rebins 2D histograms using the same bin width as the original spectrum.
     ########################################################################################################
     
-    def GetBins(self, data, bw, bin_method):
+    def GetBins(self, data, bw, bin_method, bin_0):
         
         
         if(data.ndim == 1):
@@ -467,6 +539,11 @@ class PeakOTron:
         else:
             raise Exception("Data is neither 1D nor 2D. Please provide the correct dimensions of data to the fit program.")
 
+        if(bin_0 is not None):
+            if(self._verbose):
+                print("Setting first bin to be {:3.3f} input charge units".format(bin_0))
+            
+            x_min = bin_0
         
         nbins = np.ceil((x_max - x_min) / bw)
         nbins = max(1, nbins)
@@ -485,15 +562,16 @@ class PeakOTron:
         bin_numbers = self._hist_data["bin_numbers"]
         bin_centres = self._hist_data["bin_centres"]
         
-        ax.plot(bin_numbers, 
+        ax.step(bin_numbers, 
                 counts, 
-                label="{:s} \n $N_{{events}}$ = {:s}".format(label, 
-                                                         LatexFormat(float(np.sum(counts)))), 
+                #label="{:s} \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts)))),
+                label = "{:s}".format(label),
                 color="C0", 
+                where="mid",
                 lw=5)
         ax.grid(which="both")
         ax.set_ylabel("Counts", fontsize=self._plot_fontsize)
-        ax.set_xlabel("Bin Index", fontsize=self._plot_fontsize)
+        ax.set_xlabel("Bin", fontsize=self._plot_fontsize)
         ax.tick_params(axis="x", labelsize=self._plot_fontsize, rotation=45)
         ax.tick_params(axis="y", labelsize=self._plot_fontsize)
         ax.set_ylim([1,None])
@@ -504,16 +582,16 @@ class PeakOTron:
         _f_inv = interp1d(bin_centres, bin_centres, bounds_error=False, fill_value="extrapolate")
         
         
-        secax = ax.secondary_xaxis('top', functions=(_f, _f_inv))
-        secax.set_xlabel('{:s}'.format(x_label), fontsize=self._plot_fontsize)
-        secax.tick_params(axis="x", labelsize=self._plot_fontsize, rotation=45)
+#         secax = ax.secondary_xaxis('top', functions=(_f, _f_inv))
+#         secax.set_xlabel('{:s}'.format(x_label), fontsize=self._plot_fontsize)
+#         secax.tick_params(axis="x", labelsize=self._plot_fontsize, rotation=45)
         
         
-        mf = ScalarFormatter(useMathText=True)
-        mf.set_powerlimits((-2,2))
-        secax.xaxis.set_major_formatter(mf)
-        offset = secax.xaxis.get_offset_text()
-        offset.set_size(int(0.8*self._plot_fontsize))
+#         mf = ScalarFormatter(useMathText=True)
+#         mf.set_powerlimits((-2,2))
+#         secax.xaxis.set_major_formatter(mf)
+#         offset = secax.xaxis.get_offset_text()
+#         offset.set_size(int(0.8*self._plot_fontsize))
         
         if(save_directory is not None):
             print("Saving figure to {0}...".format(save_directory))
@@ -540,19 +618,21 @@ class PeakOTron:
         G_fft = self._hist_data["G_fft"]
         inv_G_fft = 1/G_fft
     
-        ax.plot(fft_freq,
+        ax.step(fft_freq,
                  fft_amplitude,
                  color="C0",
-                 label="{:s} \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts)))),
+                 where="mid",
+                 #label="{:s} \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts)))),
+                 label = "{:s}".format(label),
                  lw=5)
         
         
-        ax.axvline(x=inv_G_fft, color="green", lw=2, label="$G^{{*}}_{{FFT}}$ = {:3.3f} bins".format(G_fft))
+        ax.axvline(x=inv_G_fft, color="purple", linestyle="--", lw=4, label="$G^{{*}}_{{FFT}}$ = {:3.3f} Bin".format(G_fft))
         
         ax.set_yscale("log")
         ax.set_xscale("log")
         ax.grid(which="both")
-        ax.set_xlabel("Frequency of Bin Index", fontsize=25)
+        ax.set_xlabel("Bin in Fourier Space", fontsize=25)
         ax.set_ylabel("Power Spectral Density", fontsize=25)
         ax.tick_params(axis="x", labelsize=self._plot_fontsize, rotation=45)
         ax.tick_params(axis="y", labelsize=self._plot_fontsize)
@@ -580,36 +660,44 @@ class PeakOTron:
         if(plot_bgsub):
             
             
-            ax.plot(bin_numbers,
+            ax.step(bin_numbers,
                      counts,
-                     label="{:s} \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts)))),
+                     #label="{:s} \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts)))),
+                     label = "{:s}".format(label),
                      color="C0",
+                     where="mid",
                      lw=5)
             
             
-            ax.plot(bin_numbers,
+            ax.step(bin_numbers,
                      counts_bgsub,
-                     label="{:s} - BG \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts_bgsub)))),
+                     #label="{:s} - BG \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts_bgsub)))),
+                     label = "{:s} - Background".format(label),
                      color="r",
+                     where="mid",
                      lw=5)
             
         else:
-            ax.plot(bin_numbers,
+            ax.step(bin_numbers,
                      counts,
-                     label="{:s} \n $N_{{events}}$ = {:s}".format(label,LatexFormat(float(np.sum(counts)))),
+                     #label="{:s} \n $N_{{events}}$ = {:s}".format(label,LatexFormat(float(np.sum(counts)))),
+                     label = "{:s}".format(label),
                      color="C0",
+                     where="mid",
                      lw=5)
             
             ax.fill_between(bin_numbers[counts_bg>0],
                      counts_bg[counts_bg>0],
-                     label="{:s}, BG \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts_bg)))),
+                     #label="{:s}, BG \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts_bg)))),
+                     label = "{:s}, Background".format(label),
                      color='none',
                      hatch="///",
                      edgecolor="r",
+                     step="mid",
                      lw=5)
             
         ax.grid(which="both")
-        ax.set_xlabel("Bin Index", fontsize=25)
+        ax.set_xlabel("Bin", fontsize=25)
         ax.set_ylabel("Counts", fontsize=self._plot_fontsize)
         ax.tick_params(axis="x", labelsize=self._plot_fontsize, rotation=45)
         ax.tick_params(axis="y", labelsize=self._plot_fontsize)
@@ -639,17 +727,21 @@ class PeakOTron:
         
         peak_position = self._hist_data["peaks"]["peak_position"]
 
-        ax.plot(bin_numbers, 
+        ax.step(bin_numbers, 
                 counts,
-                label="{:s} \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts)))), 
+                #label="{:s} \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts)))), 
+                label = "{:s}".format(label),
                 color="C0", 
                 lw=5, 
+                where="mid",
                 zorder=-5)
         
-        ax.plot(bin_numbers, counts_bgsub,
-                label="{:s} - BG \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts_bgsub)))),
+        ax.step(bin_numbers, counts_bgsub,
+                #label="{:s} - BG \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts_bgsub)))),
+                label="{:s} - Background".format(label),
                      color="r",
                      lw=5,
+                     where="mid",
                      zorder=-4)
                
         
@@ -670,7 +762,7 @@ class PeakOTron:
                      )
                 
             else:
-                annotation_text = "Peak {:d}".format(peak_i)
+                annotation_text = "Peak Position {:d}".format(peak_i)
                 color = color_vals[peak_i]
                 ls="-"
                
@@ -679,6 +771,7 @@ class PeakOTron:
                       linestyle=ls,
                       lw=2.5
                      )
+#                 plt.text(10.1,0, annotation_text, color=color, fontsize=self._plot_legend,rotation=90)
                 
            
 
@@ -688,9 +781,9 @@ class PeakOTron:
                    s=200,
                    color="purple",
                    zorder=5,
-                   label= "$Q_{{0}}$ Estimate")
+                   label= "$Q^{est}_{{0}}$")
         
-        ax.set_xlabel("Bin Index", fontsize=self._plot_fontsize)
+        ax.set_xlabel("Bin", fontsize=self._plot_fontsize)
         ax.set_ylabel("Counts", fontsize=self._plot_fontsize)
         ax.tick_params(axis="x", labelsize=self._plot_fontsize, rotation=45)
         ax.tick_params(axis="y", labelsize=self._plot_fontsize)
@@ -707,24 +800,20 @@ class PeakOTron:
             plt.close(fig)
 
 
-    def PlotVarianceFit(self, figsize=(10.0, 10.0), save_directory=None,  y_limits=[None, None]):
+    def PlotVarianceFit(self, figsize=(10.0, 10.0), save_directory=None,  y_limits=[None, None], x_limits=[None, None]):
         
         fig = plt.figure(figsize=figsize)
         ax = plt.subplot(111)
         
-        cond_sel_peaks = self._hist_data["peaks"]["cond_sel_peaks"]
-        peak_index = self._hist_data["peaks"]["peak_index"][cond_sel_peaks]
-        peak_variance = self._hist_data["peaks"]["peak_variance"][cond_sel_peaks]
-        peak_variance_error = self._hist_data["peaks"]["peak_variance_error"][cond_sel_peaks]
-        
+        peak_index = self._hist_data["peaks"]["peak_index"]
+        peak_variance = self._hist_data["peaks"]["peak_variance"]
+        peak_variance_error = self._hist_data["peaks"]["peak_variance_error"]
         
-        _offset = self._hist_data["bc_min"]
-        _scale =  self._hist_data["bw"]
         
-        sigma_0 = self._prefit_values["sigma_0"]/_scale
-        dsigma_0 = self._prefit_errors["sigma_0"]/_scale
-        sigma_1 = self._prefit_values["sigma_1"]/_scale
-        dsigma_1 = self._prefit_errors["sigma_1"]/_scale
+        sigma_0 = self._prefit_values_bin["sigma_0"]
+        dsigma_0 = self._prefit_errors_bin["sigma_0"]
+        sigma_1 = self._prefit_values_bin["sigma_1"]
+        dsigma_1 = self._prefit_errors_bin["sigma_1"]
         
         ax.errorbar( peak_index,
                      peak_variance,
@@ -744,7 +833,7 @@ class PeakOTron:
                  color="C0",
                  linestyle="--",
                  lw=3,
-                 label = "Linear Fit: \n $\sigma^{{2}}_{{0}}$ = {:s} $\pm$ {:s} bins \n $\sigma^{{2}}_{{1}}$ = {:s} $\pm$ {:s} bins".format(
+                 label = "Linear Fit: \n $\sigma^{{2}}_{{0}}$ = {:s} $\pm$ {:s} Bin$^{{2}}$ \n $\sigma^{{2}}_{{1}}$ = {:s} $\pm$ {:s} Bin$^{{2}}$".format(
                                                               LatexFormat(sigma_0**2),
                                                               LatexFormat(2*sigma_0*dsigma_0),
                                                               LatexFormat(sigma_1**1),
@@ -756,11 +845,12 @@ class PeakOTron:
         
         
         ax.set_xlabel("Peak Index", fontsize=self._plot_fontsize)
-        ax.set_ylabel("$\sigma^{2}_{peak}$", fontsize=self._plot_fontsize)
+        ax.set_ylabel("$\sigma^{2}_{peak}$ [Bin$^{2}$]", fontsize=self._plot_fontsize)
         ax.tick_params(axis="x", labelsize=self._plot_fontsize, rotation=45)
         ax.tick_params(axis="y", labelsize=self._plot_fontsize)
         ax.grid(which="both")
         ax.set_ylim(y_limits)
+        ax.set_xlim(x_limits)
         ax.legend(fontsize=self._plot_legendfontsize)
 
         if(save_directory is not None):
@@ -773,24 +863,19 @@ class PeakOTron:
             plt.close(fig)
         
         
-    def PlotMeanFit(self, figsize=(10.0, 10.0), save_directory=None):
+    def PlotMeanFit(self, figsize=(10.0, 10.0), y_limits=[None, None], x_limits=[None, None], save_directory=None):
         
         fig = plt.figure(figsize=figsize)
         ax = plt.subplot(111)
         
         bin_numbers = self._hist_data["bin_numbers"]
         
-        cond_sel_peaks = self._hist_data["peaks"]["cond_sel_peaks"]
-        peak_index = self._hist_data["peaks"]["peak_index"][cond_sel_peaks]
-        peak_mean = self._hist_data["peaks"]["peak_mean"][cond_sel_peaks]
-        peak_mean_error = self._hist_data["peaks"]["peak_mean_error"][cond_sel_peaks]
+        peak_index = self._hist_data["peaks"]["peak_index"]
+        peak_mean = self._hist_data["peaks"]["peak_mean"]
+        peak_mean_error = self._hist_data["peaks"]["peak_mean_error"]
 
-        
-        _offset = self._hist_data["bc_min"]
-        _scale =  self._hist_data["bw"]
-        
-        Q_0 = (self._prefit_values["Q_0"] - _offset)/_scale
-        dQ_0 = self._prefit_errors["Q_0"]/_scale
+        Q_0 = self._prefit_values_bin["Q_0"]
+        dQ_0 = self._prefit_errors_bin["Q_0"]
         G = self._hist_data["G_fft"]
         
         ax.errorbar( peak_index,
@@ -800,7 +885,7 @@ class PeakOTron:
                      markersize=10,
                      lw=3,
                      color="C0",
-                     label="Means of Peak"
+                     label="Means of Peaks"
                     )
         
         ax.plot( peak_index,
@@ -812,7 +897,7 @@ class PeakOTron:
                  linestyle="--",
                  lw=3,
                  markersize=10,
-                 label = "Linear Fit: \n $Q_{{0}}$ = {:s} $\pm$ {:s} bins \n $G^{{*}}_{{FFT}}$ = {:s} bins".format(
+                 label = "Linear Fit: \n $Q_{{0}}$ = {:s} $\pm$ {:s} Bin \n $G^{{*}}_{{FFT}}$ = {:s} Bin".format(
                                                               LatexFormat(Q_0),
                                                               LatexFormat(dQ_0),
                                                               LatexFormat(G),
@@ -821,12 +906,14 @@ class PeakOTron:
                 )
         
         
-        
         ax.set_xlabel("Peak Index", fontsize=self._plot_fontsize)
-        ax.set_ylabel("$\\langle Q_{{peak}} \\rangle$", fontsize=self._plot_fontsize)
+        ax.set_ylabel("Peak Position [Bin]", fontsize=self._plot_fontsize)
+#         ax.set_ylabel("$\\langle Q_{{peak}} \\rangle$", fontsize=self._plot_fontsize)
         ax.tick_params(axis="x", labelsize=self._plot_fontsize, rotation=45)
         ax.tick_params(axis="y", labelsize=self._plot_fontsize)
         ax.grid(which="both")
+        ax.set_ylim(y_limits)
+        ax.set_xlim(x_limits)
         ax.legend(fontsize=self._plot_legendfontsize)
         
         
@@ -843,7 +930,7 @@ class PeakOTron:
         
         
         
-    def PlotDCR(self, figsize=(10.0, 10.0), save_directory=None, label = "Histogram"):
+    def PlotDCR(self, figsize=(10.0, 10.0),  save_directory=None, label = "Histogram"):
         
                 
         fig = plt.figure(figsize=figsize)
@@ -853,15 +940,12 @@ class PeakOTron:
         bin_numbers = self._hist_data["bin_numbers"]
         bw = self._hist_data["bw"]
         
-        _offset = self._hist_data["bc_min"]
-        _scale =  self._hist_data["bw"]
         
-        
-        Q_0 = (self._prefit_values["Q_0"] - _offset)/_scale
-        G = self._prefit_values["G"]/_scale
+        Q_0 = self._prefit_values_bin["Q_0"]
+        G = self._prefit_values_bin["G"]
       
-        DCR = self._prefit_values["DCR"]
-        dDCR = self._prefit_errors["DCR"]
+        DCR = self._prefit_values_bin["DCR"]
+        dDCR = self._prefit_errors_bin["DCR"]
         
         
         K = (bin_numbers - Q_0)/G
@@ -869,7 +953,7 @@ class PeakOTron:
         
         cond_NN = (K>=0.45)&(K<=0.55)
         cond_Nc = (K<=0.5)
-        cond_DCR = (K<=1.8)
+        cond_DCR = (K<=2.5)
         
         NN_sum = max(np.sum(counts[cond_NN]),1)
         NN = np.mean(counts[cond_NN])
@@ -882,10 +966,11 @@ class PeakOTron:
 
         
 
-        ax.plot(K[cond_DCR], 
+        ax.step(K[cond_DCR], 
                 counts[cond_DCR], 
                 label="{:s}".format(label), 
                 color="C0", 
+                where="mid",
                 lw=5)
     
     
@@ -897,8 +982,9 @@ class PeakOTron:
                      hatch="///",
                      edgecolor="r",
                      lw=3,
-                     label = "$N_{{K \leq 0.5}}$={:d} events".format(int(Nc)) )
-        
+                     step="mid",
+                     #label = "$N_{{K \leq 0.5}}$={:d} events".format(int(Nc)) )
+                     label = "$N_{{K \leq 0.5}}$")
         
         ax.fill_between(
                      K[cond_NN],
@@ -906,7 +992,9 @@ class PeakOTron:
                      color='green',
                      edgecolor="g",
                      alpha=0.3,
-                     label = "$\\langle N \\rangle_{{0.45 < K \leq 0.55}}$={:3.1f} events".format(NN))
+                     step="mid",
+                     #label = "$\\langle N \\rangle_{{0.45 < K \leq 0.55}}$={:3.1f} events".format(NN))
+                     label = "$\\langle N \\rangle_{{0.45 < K \leq 0.55}}$")
     
         legend_title="$DCR$={:s} $\pm$ {:s} MHz".format(LatexFormat(DCR*1e3), LatexFormat(dDCR*1e3))
         
@@ -916,7 +1004,6 @@ class PeakOTron:
         ax.set_ylabel("Counts", fontsize=self._plot_fontsize)
         ax.tick_params(axis="x", which='both', labelsize=self._plot_fontsize, rotation=45)
         ax.tick_params(axis="y", which='both', labelsize=self._plot_fontsize)
-#         ax.set_xlim([0,1])
         ax.set_yscale("log")
         ax.legend(title=legend_title, title_fontsize=int(self._plot_legendfontsize), fontsize=int(self._plot_legendfontsize))
             
@@ -949,7 +1036,8 @@ class PeakOTron:
                 linewidth_main = 5,  
                 x_limits = [None, None],
                 display=True,
-                prefit=False
+                prefit=False,
+                plot_in_bins=False
                ):
     
     
@@ -968,28 +1056,59 @@ class PeakOTron:
         if(residualticksize is None):
             residualticksize =  int(0.8*self._plot_fontsize)
             
-        x = self._hist_data["bin_centres"]
-        y = self._hist_data["counts"]
-        y_err = self._hist_data["counts_error"]
-        bw = self._hist_data["bw"]
-        N = np.sum(y)
+
+            
+            
+            
+        if(plot_in_bins):
+            x_all = self._hist_data["bin_numbers"]
+            bw = 1
+        else:
+            x_all = self._hist_data["bin_centres"]
+            bw = self._hist_data["bw"]
+            
+            
+ 
+        y_all = self._hist_data["counts"]
+        y_err_all = self._hist_data["counts_error"]
+        
+        Q_0_prefit = self._prefit_values_bin["Q_0"]
+        sigma_0_prefit = self._prefit_values_bin["sigma_0"]
+        
+        
+        if(self._fit_kwargs["truncate_nsigma0_do"] is not None):
+            lim_nsigma0 =  Q_0_prefit - self._fit_kwargs["truncate_nsigma0_do"]*sigma_0_prefit
+            cond_sel_nsigma0 = (self._hist_data["bin_numbers"]>lim_nsigma0)
+          
+        else:
+            cond_sel_nsigma0 = np.full(len(x_all), True).astype(bool)
+            
+        x = x_all[cond_sel_nsigma0]
+        y = y_all[cond_sel_nsigma0]
+        y_err = y_err_all[cond_sel_nsigma0]
         
-        y_hat = N*bw*self.GetModel(x, prefit)
+        y_hat = self.GetModel(x, prefit=prefit, bin_units=plot_in_bins)*bw
+        y_hat_err = np.sqrt(y_hat)
         
-        chi2, ndf = self.GetChi2(prefit)
+        
+        chi2, ndf = self.GetChi2(prefit=prefit)
             
         if(xlabel is None):
-            xlabel = "Q"
+            if(plot_in_bins):
+                xlabel="Bin"
+            else:
+                xlabel = "Q"
             
         fig = plt.figure(figsize=figsize)
         gs = gridspec.GridSpec(2, 1, height_ratios=[2, 1])
         ax0 = plt.subplot(gs[0])   
         if(label is None):
             label = "Histogram" 
-        label="{:s}, $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(y))))
-        ax0.plot(x, y, label=label, lw=5, color="C0")
-        ax0.plot(x, y_hat, linestyle="--", label="Model", lw=5, color="C1")
-        ax0.plot([], [], ' ', label="$\\frac{{\\chi^{{2}}}}{{NDF}}$ = {:s}".format(LatexFormat(chi2/ndf)))
+#         label="{:s},\n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(y))))
+        label = "{:s}".format(label)
+        ax0.step(x_all, y_all, label=label, lw=5, color="C0", where="mid")
+        ax0.plot(x, y_hat, linestyle="--", label="Fit", lw=5, color="C1")
+        ax0.plot([], [], ' ', label="$\\chi^{{2}}$/NDF = {:s}".format(LatexFormat(chi2/ndf)))
 
         
         ax0.set_ylabel("Counts".format(xlabel, xlabel), fontsize=labelsize)
@@ -1012,13 +1131,12 @@ class PeakOTron:
 
 
         ax1 = plt.subplot(gs[1], sharex = ax0)
-        cond_sel = (y_err>0) & (y>0)
         
-        ax1.scatter(x[cond_sel], (y[cond_sel] - y_hat[cond_sel])/(y_err[cond_sel]), color="C1")    
+        ax1.scatter(x, (y - y_hat)/(y_hat_err), color="C1") 
 
         ax1.tick_params(axis="x", labelsize=ticksize, rotation=45)
         ax1.tick_params(axis="y", labelsize=ticksize)
-        ax1.set_ylabel("Residuals [$\sigma$]", fontsize=labelsize)
+        ax1.set_ylabel("Pull", fontsize=labelsize)
         ax1.set_xlabel(xlabel, fontsize=labelsize)
         ax1.set_ylim(residual_limits)
         ax1.set_yticks(np.arange(int(np.floor(np.min(residual_limits))), 
@@ -1034,12 +1152,13 @@ class PeakOTron:
 
         fig.tight_layout()
         
-        mf = ScalarFormatter(useMathText=True)
-        mf.set_powerlimits((-2,2))
-        plt.gca().xaxis.set_major_formatter(mf)
-        offset = ax1.xaxis.get_offset_text()
-        offset.set_size(int(0.8*ticksize))
-        
+        if(not plot_in_bins):
+            mf = ScalarFormatter(useMathText=True)
+            mf.set_powerlimits((-2,2))
+            plt.gca().xaxis.set_major_formatter(mf)
+            offset = ax1.xaxis.get_offset_text()
+            offset.set_size(int(0.8*ticksize))
+
 
 
         fig.subplots_adjust(hspace=.0)
@@ -1079,191 +1198,48 @@ class PeakOTron:
     ##
     ########################################################################################################
         
-            
-    def Fit(self, data, **kwargs):
-        
-        ########################################################################################################
-        ## Preparation
-        ########################################################################################################
-        ##
-        ## 1. Data dictionaries are initialised if previously filled.
-        ## 2. tau, tauR, t_0 and t_gate are checked to ensure they are provided to the program. 
-        ## 3. Perform checks of input parameters to ensure no disallowed values 
-        ########################################################################################################
-        
-        self.InitKwargs()
-        
-        
-        if not set(kwargs.keys()).issubset(self._fit_kwargs.keys()):
-            wrong_keys = list(set(kwargs.keys()).difference(set(self._fit_kwargs.keys())))
-            
-            raise Exception ("Arguments {:s} not recognised.".format(",".join(wrong_keys)))
-            
-
-        if not "tau" in kwargs.keys():
-            raise Exception ("Please provide 'tau' (slow component of SiPM pulse in nanoseconds).")
-            
-        elif(not isinstance(kwargs["tau"], float) or kwargs["tau"]<0):
-            raise Exception ("Please set 'tau' (slow component of SiPM pulse in nanoseconds) to a positive float.")   
-            
-            
-        if not "tau_R" in kwargs.keys():
-            raise Exception ("Please provide 'tau_R' (recovery time of SiPM in nanoseconds).")
-            
-        elif(not isinstance(kwargs["tau_R"], float) or kwargs["tau_R"]<0):
-            raise Exception ("Please set 'tau' (slow component of SiPM pulse in nanoseconds) to a positive float.") 
-            
-            
-        if not "t_0" in kwargs.keys():
-            raise Exception ("Please provide 't_0' (integration region before start of SiPM pulse integration gate in nanoseconds).")
-            
-        elif(kwargs["t_0"]<0):
-            raise Exception ("Please set 't_0' (integration region before start of SiPM pulse integration gate in nanoseconds) to a positive float.")   
-            
-            
-            
-        if not "t_gate" in kwargs.keys():
-            raise Exception ("Please provide 't_gate' (length of SiPM pulse integration gate) in nanoseconds.")
-            
-        elif(kwargs["t_0"]<0):
-            raise Exception ("Please set 't_0' (integration region before start of SiPM pulse integration gate in nanoseconds) to a positive float.")    
-            
-            
-       
-        self._fit_kwargs.update(kwargs)
-        
-
-
-        if self._fit_kwargs["truncate_nG"] is not None:
-            if(not isinstance(self._fit_kwargs["truncate_nG"], float)):
-                raise Exception ("Please set truncate_nG to a positive float which represents the number of estimated units of gain before the estimated pedestal which will be excluded from the minimisation process.")   
-            
-            if(self._fit_kwargs["truncate_nG"]<0):
-                raise Exception ("Please set truncate_nG to a positive float which represents the number of estimated units of gain before the estimated pedestal which will be excluded from the minimisation process.")   
-                             
-
-        if(not isinstance(self._fit_kwargs["peak_dist_nG"], float)):
-            raise Exception ("Please set peak_dist_nG to a positive float between 0 and 1 which represents the minimum distance as a fraction of one gain unit required between peaks")
-        elif(self._fit_kwargs["peak_dist_nG"]<=0 or self._fit_kwargs["peak_dist_nG"]>=1):
-               raise Exception ("Please set peak_dist_nG to a positive float between 0 and 1 which represents the minimum distance as a fraction of one gain unit required between peaks")
-                
-    
-        if self._fit_kwargs["bw"] is not None:
-            if(not isinstance(self._fit_kwargs["bw"], float)):
-                raise Exception ("Please set bw (bin width) to a positive float." )
-            if(self._fit_kwargs["bw"]<=0):
-                raise Exception ("Please set bw (bin width) to a positive float." )
-                
-
-        
-
-        if(self._verbose):
-            print("Prefitting...")
-            
-            
-        ########################################################################################################
-        ## Prefitting routine applied
-        ########################################################################################################
-    
-      
-        t_0 = time.time()
-        self.GetPreFit(data, **self._fit_kwargs)
-        t_1 = time.time()
         
-        self._time_prefit = t_1-t_0
-        
-        if(self._verbose):
-            print("Prefitting complete.")
-            
-        
-        _offset = self._hist_data["bc_min"]
-        _scale =  self._hist_data["bw"]
-            
-        if(self._fit_kwargs["prefit_only"]):
-            
-            _prefit_values_temp = self._prefit_values.copy()
-            _prefit_errors_temp = self._prefit_errors.copy()
-    
-            for key, value in _prefit_values_temp.items():
-                if(key=="Q_0"):
-                    _prefit_values_temp[key] = _offset + value*_scale
-                elif(key=="G"): 
-                    _prefit_values_temp[key] = value*_scale
-                elif(key=="sigma_0"):
-                    _prefit_values_temp[key] = value*_scale
-                elif(key=="sigma_1"):
-                    _prefit_values_temp[key] = value*_scale
-                else:
-                    _prefit_values_temp[key] = value
-
-            for key, value in _prefit_errors_temp.items():
-                if(key=="Q_0"):
-                    _prefit_errors_temp[key] = value*_scale
-                elif(key=="G"):
-                    _prefit_errors_temp[key] = value*_scale
-                elif(key=="sigma_0"):
-                    _prefit_errors_temp[key] = value*_scale
-                elif(key=="sigma_1"):
-                    _prefit_errors_temp[key] = value*_scale
-                else:
-                    _prefit_errors_temp[key] = value
-
-            self._prefit_values.update(_prefit_values_temp)
-            self._prefit_errors.update(_prefit_errors_temp)
-            
-
-
-            
-        else:
-            
-            
-            
-            ########################################################################################################
-            ## Truncate fit if supplied factor.
-            ########################################################################################################
+    def __MinuitFit(self, f_DRM, A):
         
-
-            f_DRM = BinnedLH(DRM, self._hist_data["bin_numbers"], self._hist_data["counts"], 1)
-
-
-
-
+            t_0 = time.time()
             ########################################################################################################
             ## Inititalise Minuit with hypothesis of no dark counts or afterpulsing. 
             ########################################################################################################
 
             m_DRM = Minuit(f_DRM,
-                             Q_0 = self._prefit_values["Q_0"],
-                             G = self._prefit_values["G"],
-                             mu = self._prefit_values["mu"],
-                             lbda = self._prefit_values["lbda"],
-                             sigma_0 = self._prefit_values["sigma_0"],
-                             sigma_1 = self._prefit_values["sigma_1"],
-                             tau_Ap = self._prefit_values["tau_Ap"],
-                             p_Ap = self._prefit_values["p_Ap"],
-                             DCR = 1e-9,
-                             tau = self._prefit_values["tau"],
-                             tau_R = self._prefit_values["tau_R"],
-                             t_gate = self._prefit_values["t_gate"],
-                             t_0  = self._prefit_values["t_0"],
-                             k_max = self._prefit_values["k_max"],
-                             k_max_dcr = self._prefit_values["k_max_dcr"],
-                             len_pad = self._prefit_values["len_pad"],
+                             Q_0 = self._prefit_values_bin["Q_0"],
+                             G = self._prefit_values_bin["G"],
+                             mu = self._prefit_values_bin["mu"],
+                             lbda = self._prefit_values_bin["lbda"],
+                             sigma_0 = self._prefit_values_bin["sigma_0"],
+                             sigma_1 = self._prefit_values_bin["sigma_1"],
+                             tau_Ap = self._prefit_values_bin["tau_Ap"],
+                             p_Ap = epsilon(),
+                             DCR = epsilon(),
+                             tau = self._prefit_values_bin["tau"],
+                             tau_R = self._prefit_values_bin["tau_R"],
+                             t_gate = self._prefit_values_bin["t_gate"],
+                             t_0  = self._prefit_values_bin["t_0"],
+                             k_max = self._prefit_values_bin["k_max"],
+                             k_max_dcr = self._prefit_values_bin["k_max_dcr"],
+                             len_pad = self._prefit_values_bin["len_pad"],
+                             A = A
                           )
 
 
 
 
 
-            m_DRM.errors["Q_0"]  =  self._prefit_errors["Q_0"]
-            m_DRM.errors["G"] =   self._prefit_errors["G"]
-            m_DRM.errors["mu"]  = self._prefit_errors["mu"]
-            m_DRM.errors["lbda"]  = self._prefit_errors["lbda"]
-            m_DRM.errors["sigma_0"]  = self._prefit_errors["sigma_0"]
-            m_DRM.errors["sigma_1"]  = self._prefit_errors["sigma_1"]
-            m_DRM.errors["tau_Ap"]  = self._prefit_errors["tau_Ap"]
-            m_DRM.errors["p_Ap"]  = self._prefit_errors["p_Ap"]
-            m_DRM.errors["DCR"]  = self._prefit_errors["DCR"]
+            m_DRM.errors["Q_0"]  =  self._prefit_errors_bin["Q_0"]
+            m_DRM.errors["G"] =   self._prefit_errors_bin["G"]
+            m_DRM.errors["mu"]  = self._prefit_errors_bin["mu"]
+            m_DRM.errors["lbda"]  = self._prefit_errors_bin["lbda"]
+            m_DRM.errors["sigma_0"]  = self._prefit_errors_bin["sigma_0"]
+            m_DRM.errors["sigma_1"]  = self._prefit_errors_bin["sigma_1"]
+            m_DRM.errors["tau_Ap"]  = self._prefit_errors_bin["tau_Ap"]
+            m_DRM.errors["p_Ap"]  = self._prefit_errors_bin["p_Ap"]
+            m_DRM.errors["DCR"]  = self._prefit_errors_bin["DCR"]
+            m_DRM.errors["A"]  = np.sqrt(A)
 
 
             ########################################################################################################
@@ -1274,22 +1250,26 @@ class PeakOTron:
 
 
         
-            m_DRM.limits["G"] =  (epsilon(), None)
+            m_DRM.limits["G"] =  (1, None)
 
-            m_DRM.limits["mu"]  = (epsilon(), None)
+            m_DRM.limits["mu"]  = (0.01, None)
 
-            m_DRM.limits["lbda"]  = (epsilon(), 1-epsilon())
+            m_DRM.limits["lbda"]  = (1e-6, 1-1e-4)
 
-            m_DRM.limits["sigma_0"]  = (epsilon(), None)
+            m_DRM.limits["sigma_0"]  = (0.1, None)
 
-            m_DRM.limits["sigma_1"]  = (epsilon(), None)
+            m_DRM.limits["sigma_1"]  = (0.1, None)
 
-            m_DRM.limits["tau_Ap"]  = (epsilon(), self._prefit_values["t_gate"]/2-epsilon())
+            m_DRM.limits["tau_Ap"]  = (3, self._prefit_values_bin["t_gate"]/2)
 
-            m_DRM.limits["p_Ap"]  = (epsilon(), 1-epsilon())
-
-            m_DRM.limits["DCR"]  = (epsilon(), None)
+            m_DRM.limits["p_Ap"]  = (1e-6, 1-1e-4)
 
+            m_DRM.limits["DCR"]  = (1e-6, None)
+            
+#             m_DRM.limits["A"]  = (self._prefit_values_bin["A"] - 3*np.sqrt(self._prefit_values_bin["A"]), 
+#                                   self._prefit_values_bin["A"] + 3*np.sqrt(self._prefit_values_bin["A"]))
+            
+            m_DRM.limits["A"]  = (1,None)
 
 
             ########################################################################################################
@@ -1301,9 +1281,9 @@ class PeakOTron:
                 m_DRM.fixed["tau"]=True
             else:
                 m_DRM.fixed["tau"]=False
-                m_DRM.errors["tau"] = self._prefit_errors["tau"]
-                m_DRM.limits["tau"] = (max(epsilon(), self._prefit_values["tau"]-self._prefit_errors["tau"]),
-                                       max(epsilon(), self._prefit_values["tau"]+self._prefit_errors["tau"])
+                m_DRM.errors["tau"] = self._prefit_errors_bin["tau"]
+                m_DRM.limits["tau"] = (max(epsilon(), self._prefit_values_bin["tau"]-self._prefit_errors_bin["tau"]),
+                                       max(epsilon(), self._prefit_values_bin["tau"]+self._prefit_errors_bin["tau"])
                                       )
 
             if(self._fit_kwargs["t_gate_err"] is None):
@@ -1311,9 +1291,9 @@ class PeakOTron:
             else:
 
                 m_DRM.fixed["t_gate"]=False
-                m_DRM.errors["t_gate"] = self._prefit_errors["t_gate"]
-                m_DRM.limits["t_gate"] = (max(epsilon(), self._prefit_values["t_gate"]-self._prefit_errors["t_gate"]),
-                                          max(epsilon(), self._prefit_values["t_gate"]+self._prefit_errors["t_gate"])
+                m_DRM.errors["t_gate"] = self._prefit_errors_bin["t_gate"]
+                m_DRM.limits["t_gate"] = (max(epsilon(), self._prefit_values_bin["t_gate"]-self._prefit_errors_bin["t_gate"]),
+                                          max(epsilon(), self._prefit_values_bin["t_gate"]+self._prefit_errors_bin["t_gate"])
                                          )
 
 
@@ -1321,9 +1301,9 @@ class PeakOTron:
                 m_DRM.fixed["t_0"]=True
             else:
                 m_DRM.fixed["t_0"]=False
-                m_DRM.errors["t_0"] = self._prefit_errors["t_0"]
-                m_DRM.limits["t_0"] = (max(epsilon(), self._prefit_values["t_0"]-self._prefit_errors["t_0"]),
-                                          max(epsilon(), self._prefit_values["t_0"]+self._prefit_errors["t_0"])
+                m_DRM.errors["t_0"] = self._prefit_errors_bin["t_0"]
+                m_DRM.limits["t_0"] = (max(epsilon(), self._prefit_values_bin["t_0"]-self._prefit_errors_bin["t_0"]),
+                                          max(epsilon(), self._prefit_values_bin["t_0"]+self._prefit_errors_bin["t_0"])
                                           )
 
 
@@ -1332,9 +1312,9 @@ class PeakOTron:
                 m_DRM.fixed["tau_R"]=True
             else:
                 m_DRM.fixed["tau_R"]=False
-                m_DRM.errors["tau_R"] =  self._prefit_errors["tau_R"]
-                m_DRM.limits["tau_R"] = (max(epsilon(), self._prefit_values["tau_R"]-self._prefit_errors["tau_R"]),
-                                          max(epsilon(), self._prefit_values["tau_R"]+self._prefit_errors["tau_R"])
+                m_DRM.errors["tau_R"] =  self._prefit_errors_bin["tau_R"]
+                m_DRM.limits["tau_R"] = (max(epsilon(), self._prefit_values_bin["tau_R"]-self._prefit_errors_bin["tau_R"]),
+                                          max(epsilon(), self._prefit_values_bin["tau_R"]+self._prefit_errors_bin["tau_R"])
                                           )
 
 
@@ -1358,7 +1338,9 @@ class PeakOTron:
             m_DRM.fixed["p_Ap"]=True
             m_DRM.fixed["tau_Ap"]=True
             m_DRM.fixed["DCR"]=True
-
+            m_DRM.fixed["A"]=False  
+            
+            
             if(self._verbose):
                 print("Fitting basic light...")
 
@@ -1385,8 +1367,9 @@ class PeakOTron:
             m_DRM.fixed["p_Ap"]=False
             m_DRM.fixed["tau_Ap"]=False
             m_DRM.fixed["DCR"]=False
-            m_DRM.values["DCR"] = self._prefit_values["DCR"]
-            
+            m_DRM.values["DCR"] = self._prefit_values_bin["DCR"]
+            m_DRM.values["p_Ap"] = self._prefit_values_bin["p_Ap"]
+            m_DRM.fixed["A"]=False  
           
             m_DRM.strategy=1
             m_DRM.errordef=m_DRM.LIKELIHOOD
@@ -1411,34 +1394,351 @@ class PeakOTron:
             m_DRM.fixed["p_Ap"]=False
             m_DRM.fixed["tau_Ap"]=False
             m_DRM.fixed["DCR"]=False
-
+            m_DRM.fixed["A"]=False  
                 
-            m_DRM.strategy=2
+            m_DRM.strategy=1
             m_DRM.migrad(ncall = self._n_call_minuit, 
                          iterate= self._n_iterations_minuit)
                  
             m_DRM.hesse()
             
   
-            t_2 = time.time()
-            self._time_fit = t_2 - t_1
+            t_1 = time.time()
+  
             
-
+            time_fit = t_1 - t_0
             if(self._verbose):
                 print("Prefit took {:3.3f} s, Fit took {:3.3f} s".format(self._time_prefit, 
-                                                                         self._time_fit))
+                                                                         time_fit))
                 print(m_DRM)
                 
                 
-            self._m_DRM = m_DRM
+                
+          
+            
+            return m_DRM, time_fit
+    
+    
+        
+        
+            
+    def Fit(self, data, **kwargs):
+        
+        ########################################################################################################
+        ## Preparation
+        ########################################################################################################
+        ##
+        ## 1. Data dictionaries are initialised if previously filled.
+        ## 2. tau, tauR, t_0 and t_gate are checked to ensure they are provided to the program. 
+        ## 3. Perform checks of input parameters to ensure no disallowed values 
+        ########################################################################################################
+        
+        self.InitKwargs()
+        
+        
+        if not set(kwargs.keys()).issubset(self._fit_kwargs.keys()):
+            wrong_keys = list(set(kwargs.keys()).difference(set(self._fit_kwargs.keys())))
+            
+            raise Exception ("Arguments {:s} not recognised.".format(",".join(wrong_keys)))
+            
 
+        if not "tau" in kwargs.keys():
+            raise Exception ("Please provide 'tau' (slow component of SiPM pulse in nanoseconds).")
+            
+        elif(not isinstance(kwargs["tau"], float) or kwargs["tau"]<=0):
+            raise Exception ("Please set 'tau' (slow component of SiPM pulse in nanoseconds) to a positive float.")   
+            
+            
+        if not "tau_R" in kwargs.keys():
+            raise Exception ("Please provide 'tau_R' (recovery time of SiPM in nanoseconds).")
+            
+        elif(not isinstance(kwargs["tau_R"], float) or kwargs["tau_R"]<=0):
+            raise Exception ("Please set 'tau' (slow component of SiPM pulse in nanoseconds) to a positive float.") 
+            
+            
+        if not "t_0" in kwargs.keys():
+            raise Exception ("Please provide 't_0' (integration region before start of SiPM pulse integration gate in nanoseconds).")
+            
+        elif(kwargs["t_0"]<=0):
+            raise Exception ("Please set 't_0' (integration region before start of SiPM pulse integration gate in nanoseconds) to a positive float.")   
+            
+            
+            
+        if not "t_gate" in kwargs.keys():
+            raise Exception ("Please provide 't_gate' (length of SiPM pulse integration gate) in nanoseconds.")
+            
+        elif(kwargs["t_0"]<=0):
+            raise Exception ("Please set 't_0' (integration region before start of SiPM pulse integration gate in nanoseconds) to a positive float.")    
+            
+
+        self._fit_kwargs.update(kwargs)
+        
+                   
+        if self._fit_kwargs["min_n_events_peak"] is not None:
+            if(not isinstance(self._fit_kwargs["min_n_events_peak"], int)):
+                raise Exception ("Please set max_n_events peak (max events to fit Gaus to peak) as an integer" )
+        else:
+            self._fit_kwargs["min_n_events_peak"] = self._min_n_events_peak
+            
+            
+        if self._fit_kwargs["truncate_nsigma0_up"] is not None:
+            
+            if(not isinstance(self._fit_kwargs["truncate_nsigma0_up"], float)):
+                raise Exception ("Please set truncate_nsigma0_up (# of sigma0 before pedestal to for Chi2/NDF to be evaluated) as a positive float" )
+                
+            elif(self._fit_kwargs["truncate_nsigma0_up"]<=0):
+                raise Exception ("Please set truncate_nsigma0_up  (# of sigma0 before pedestal to for Chi2/NDF to be evaluated) as a positive float" )
+            
+            
+        if self._fit_kwargs["truncate_nsigma0_do"] is not None:
+            
+            if(not isinstance(self._fit_kwargs["truncate_nsigma0_do"], float)):
+                raise Exception ("Please set truncate_nsigma0_do (max # of sigma0 before pedestal to for to be scanned) as a positive float between 0.0 and 4.0" )
+                
+            elif(self._fit_kwargs["truncate_nsigma0_do"]<=0 or self._fit_kwargs["truncate_nsigma0_do"]>4):
+                raise Exception ("Please set truncate_nsigma0_do  (# of sigma0 before pedestal to for Chi2/NDF to be evaluated) as a positive float between 0.0 and 4.0" )
+              
+            
+
+        if self._fit_kwargs["truncate_nsigma0_chi2scanlim"] is not None:
+            
+            if(not isinstance(self._fit_kwargs["truncate_nsigma0_chi2scanlim"], float)):
+                raise Exception ("Please set truncate_nsigma0_chi2scanlim (minimum Chi2/NDF for a fit to be selected in a scan) as a positive float" )
+                
+            elif(self._fit_kwargs["truncate_nsigma0_chi2scanlim"]<0):
+                raise Exception ("Please set truncate_nsigma0_chi2scanlim (minimum Chi2/NDF for a fit to be selected in a scan) as a positive float" )
+
+                    
+        if self._fit_kwargs["bw"] is not None:
+            if(not isinstance(self._fit_kwargs["bw"], float)):
+                raise Exception ("Please set bw (bin width) to a positive float." )
+            if(self._fit_kwargs["bw"]<=0):
+                raise Exception ("Please set bw (bin width) to a positive float." )
+                
+        if self._fit_kwargs["bin_0"] is not None:
+            if(not isinstance(self._fit_kwargs["bin_0"], float)):
+                raise Exception ("Please set bin_0 (first bin position) to a float." )
+  
+
+        if(self._verbose):
+            print("Prefitting...")
+            
+            
+        ########################################################################################################
+        ## Prefitting routine applied
+        ########################################################################################################
+    
+      
+        t_0 = time.time()
+        self.GetPreFit(data, **self._fit_kwargs)
+        t_1 = time.time()
+        
+        self._time_prefit = t_1-t_0
+        
+        if(self._verbose):
+            print("Prefitting complete.")
+            
+        
+        _offset = self._hist_data["bc_min"]
+        _scale =  self._hist_data["bw"]
+            
+        if(self._fit_kwargs["prefit_only"]):
+            
+            _prefit_values_temp = self._prefit_values_bin.copy()
+            _prefit_errors_temp = self._prefit_errors_bin.copy()
+    
+            for key, value in _prefit_values_temp.items():
+                if(key=="Q_0"):
+                    _prefit_values_temp[key] = _offset + value*_scale
+                elif(key=="G"): 
+                    _prefit_values_temp[key] = value*_scale
+                elif(key=="sigma_0"):
+                    _prefit_values_temp[key] = value*_scale
+                elif(key=="sigma_1"):
+                    _prefit_values_temp[key] = value*_scale
+                else:
+                    _prefit_values_temp[key] = value
+
+            for key, value in _prefit_errors_temp.items():
+                if(key=="Q_0"):
+                    _prefit_errors_temp[key] = value*_scale
+                elif(key=="G"):
+                    _prefit_errors_temp[key] = value*_scale
+                elif(key=="sigma_0"):
+                    _prefit_errors_temp[key] = value*_scale
+                elif(key=="sigma_1"):
+                    _prefit_errors_temp[key] = value*_scale
+                else:
+                    _prefit_errors_temp[key] = value
+
+            self._prefit_values.update(_prefit_values_temp)
+            self._prefit_errors.update(_prefit_errors_temp)
+            
+
+
+            
+        else:
+            
+            
+            
             ########################################################################################################
-            ##Rescale fitted parameters back to their input units. 
+            ## Truncate fit if supplied factor.
             ########################################################################################################
+        
+        
+#                       lim_nsigma0 = self._prefit_values_bin["Q_0"] - self._fit_kwargs["truncate_nsigma0"]*self._prefit_values_bin["sigma_0"]
+#                 if(self._verbose):
+#                     print("Truncating fit, starting from {:3.3f} estimated sigma_0 from estimated pedestal ({:3.3f} bins) ...".format(self._fit_kwargs["truncate_nsigma0"], lim_nsigma0))
+        
+        
+        
+            if(self._fit_kwargs["truncate_nsigma0_do"] is not None):
+                    
+                    nsigma_0_scan_range = np.linspace(4.0, 
+                                                      self._fit_kwargs["truncate_nsigma0_do"], 
+                                                      np.floor((4-self._fit_kwargs["truncate_nsigma0_do"])/0.5).astype(int)+1)
+                    
+                    
+           
+                    if(self._verbose):
+                        print("Performing full truncation scan. Evaluating nsigma0 = {0} before pedestal".format(nsigma_0_scan_range))
+                        
+                        
+                    if(self._fit_kwargs["truncate_nsigma0_chi2scanlim"] is not None):
+                        
+                        if(self._verbose):
+                            print("Minimum Chi2/NDF for convergence: {:3.3f}".format(self._fit_kwargs["truncate_nsigma0_chi2scanlim"]))
+                    
+                        else:
+                            print("Best loss taken.")
+                            
+                            
+                    if(self._fit_kwargs["truncate_nsigma0_up"] is not None):
+                       
+                        
+                        _nsigma0hi = self._fit_kwargs["truncate_nsigma0_up"]
+                    else:
+                        _nsigma0hi = 2
+                        
+                    if(self._verbose):
+                            print("Evaluating up to nsigma0 = {:3.3f} after pedestal".format(_nsigma0hi))
+                        
+                        
+   
+                    m_DRMs = np.full(len(nsigma_0_scan_range), None)
+                    chi2ndfs_range =  np.full(len(nsigma_0_scan_range), None)
+                    chi2ndfs_full =  np.full(len(nsigma_0_scan_range), None)
+                    time_fits = np.full(len(nsigma_0_scan_range), 0.0)
+                
+                    Q_0_prefit =  self._prefit_values_bin["Q_0"]
+                    sigma_0_prefit = self._prefit_values_bin["sigma_0"]
+                    
+                    fit_iters = 0
+
+
+
+                    for fit_i, _nsigma0low in enumerate(nsigma_0_scan_range):
+
+                        fit_iters+=1 
+
+                        lim_nsigma0 = Q_0_prefit - _nsigma0low*sigma_0_prefit
+                        
+                        
+                        print("Truncating fit, starting from {:3.3f} estimated sigma_0 from estimated pedestal ({:3.3f} bins) ...".format(_nsigma0low, lim_nsigma0))
+
+
+                        cond_sel_nsigma0 = (self._hist_data["bin_numbers"]>lim_nsigma0)
+
+
+
+                        f_DRM =  BinnedLH(DRM, 
+                                          self._hist_data["bin_numbers"][cond_sel_nsigma0],  
+                                          self._hist_data["counts"][cond_sel_nsigma0], 1)
+
+
+                        m_DRM, time_fit = self.__MinuitFit(f_DRM, np.sum(self._hist_data["counts"][cond_sel_nsigma0]))
+
+
+                        chi2_range, ndof_range = self.GetChi2(pars = m_DRM.values.to_dict(),
+                                                             n_sigma_lims = [_nsigma0low, _nsigma0hi]
+                                                             )
+                        
+                        red_chi2_range = chi2_range/ndof_range
+                        
+                        
+                        chi2_full, ndof_full = self.GetChi2(pars = m_DRM.values.to_dict(),
+                                                             n_sigma_lims = [_nsigma0low, None]
+                                                             )
+                        
+                        red_chi2_full = chi2_full/ndof_full
 
 
+                        m_DRMs[fit_i] = m_DRM
+                        chi2ndfs_range[fit_i] = red_chi2_range
+                        chi2ndfs_full[fit_i] = red_chi2_full
+                        time_fits[fit_i] = time_fit
 
-            for key, value in m_DRM.values.to_dict().items():
+
+                        if(self._fit_kwargs["truncate_nsigma0_chi2scanlim"] is not None):
+                            if(red_chi2_full<self._fit_kwargs["truncate_nsigma0_chi2scanlim"]):
+                                if(self._verbose):
+                                    print("Fit chi2/NDF less than limit. (Fit: {:3.3f} < Limit {:3.3f}. Scan complete".format(red_chi2, 
+                                                                                                                   self._fit_kwargs["truncate_nsigma0_chi2scanlim"]))
+
+                                self._m_DRM = deepcopy(m_DRM)
+                                self._time_fit = np.sum(time_fits)
+                                self._fit_kwargs["truncate_nsigma0_do"] = _nsigma0low
+                                
+
+                                break
+
+                            else:
+                                if(self._verbose):
+                                    print("Fit chi2/NDF greater than limit. (Fit: {:3.3f} < Limit {:3.3f}. Performing next scan step...".format(red_chi2, 
+                                                                                                                      self._fit_kwargs["truncate_nsigma0_chi2scanlim"])) 
+                            
+                    
+                    if(fit_iters==len(nsigma_0_scan_range)):
+                        fit_i_best = np.argmin(chi2ndfs_range)
+                        if(self._verbose):
+                            print("Scan Summary:")
+                            for fit_i, _nsigma0 in enumerate(nsigma_0_scan_range):
+                                if(fit_i==fit_i_best):
+                                    is_best = True
+                                else:
+                                    is_best = False
+
+                                print("n_sigma0 = {:3.3f}\tchi2/NDF in pedestal range = {:3.3f}\tBest = {:s}".format(_nsigma0, chi2ndfs_range[fit_i], str(is_best)))
+
+
+                            print("Scan complete. Using best fit at {:3.3f}".format(nsigma_0_scan_range[fit_i_best]))
+
+                        self._m_DRM = deepcopy(m_DRMs[fit_i_best])
+                        self._time_fit = np.sum(time_fits)
+                        self._fit_kwargs["truncate_nsigma0_do"] = nsigma_0_scan_range[fit_i_best]
+
+            else:
+                  
+                f_DRM = BinnedLH(DRM, 
+                                 self._hist_data["bin_numbers"], 
+                                 self._hist_data["counts"], 1)
+                
+                
+                m_DRM, time_fit = self.__MinuitFit(f_DRM, np.sum(self._hist_data["counts"]))
+                
+
+                
+                
+                self._time_fit = time_fit 
+                self._m_DRM = deepcopy(m_DRM)
+                
+
+            ########################################################################################################
+            ##Rescale fitted parameters back to their input units. 
+            ########################################################################################################
+
+
+            for key, value in self._m_DRM.values.to_dict().items():
                 if(key=="Q_0"):
                     self._fit_values[key] = _offset + value*_scale
                 elif(key=="G"):
@@ -1449,10 +1749,12 @@ class PeakOTron:
                     self._fit_values[key] = value*_scale
                 else:
                     self._fit_values[key] = value
+                    
+                self._fit_values_bin[key] = value
 
 
 
-            for key, value in m_DRM.errors.to_dict().items():
+            for key, value in self._m_DRM.errors.to_dict().items():
                 if(key=="Q_0"):
                     self._fit_errors[key] = value*_scale
                 elif(key=="G"):
@@ -1463,16 +1765,17 @@ class PeakOTron:
                     self._fit_errors[key] = value*_scale
                 else:
                     self._fit_errors[key] = value
+                
+                self._fit_errors_bin[key] = value
 
 
-
-            _prefit_values_temp = self._prefit_values.copy()
-            _prefit_errors_temp = self._prefit_errors.copy()
-
+            _prefit_values_temp = self._prefit_values_bin.copy()
+            _prefit_errors_temp = self._prefit_errors_bin.copy()
+    
             for key, value in _prefit_values_temp.items():
                 if(key=="Q_0"):
                     _prefit_values_temp[key] = _offset + value*_scale
-                elif(key=="G"):
+                elif(key=="G"): 
                     _prefit_values_temp[key] = value*_scale
                 elif(key=="sigma_0"):
                     _prefit_values_temp[key] = value*_scale
@@ -1495,13 +1798,8 @@ class PeakOTron:
 
             self._prefit_values.update(_prefit_values_temp)
             self._prefit_errors.update(_prefit_errors_temp)
-        
-        
-
-        
-        
-        
-
+            
+  
         
     ########################################################################################################
     ## GetPreFit(prefit=False)
@@ -1536,7 +1834,7 @@ class PeakOTron:
         ########################################################################################################
        
         
-        data, bw, nbins, bins = self.GetBins(data, kwargs["bw"], kwargs["bin_method"]) 
+        data, bw, nbins, bins = self.GetBins(data, kwargs["bw"], kwargs["bin_method"], kwargs["bin_0"]) 
          
         counts, _ = np.histogram(data, bins=bins)
         counts_error = np.where(counts<1, 1, np.sqrt(counts))
@@ -1617,7 +1915,7 @@ class PeakOTron:
         f_pedestal = lambda Q_0: (GP_gain(mu_unsub-Q_0, sigma, gamma) - G_fft)**2
         m_pedestal = Minuit(f_pedestal, Q_0 = max_peak)
         m_pedestal.errordef=m_pedestal.LEAST_SQUARES
-        m_pedestal.limits["Q_0"] = (epsilon(), max_peak+epsilon())
+        m_pedestal.limits["Q_0"] = (0, max_peak+epsilon())
         m_pedestal.migrad()
         Q_0_est = m_pedestal.values["Q_0"]
         
@@ -1680,22 +1978,24 @@ class PeakOTron:
             counts_bg[i_peak] = count_min_interpeak
                   
                 
-                
         try:
-                
             arg_sort_bg = np.argsort(idx_bg)
             idx_bg = idx_bg[arg_sort_bg]
             counts_bg = counts_bg[arg_sort_bg]
 
+            first_nz_bin = bin_numbers[np.argmax(counts>0)]
+            counts_first_nz_bin = counts[np.argmax(counts>0)] 
+
+            idx_bg = np.pad(idx_bg, (1,0), "constant", constant_values=first_nz_bin)
+            counts_bg = np.pad(counts_bg, (1,0), "constant", constant_values=counts_first_nz_bin)
+
+            f_interp_bg = interp1d(idx_bg, counts_bg, bounds_error=False, fill_value=(0,0))
 
-            idx_bg = np.pad(idx_bg, (1,1), "constant", constant_values=(0, nbins))
-            counts_bg = np.pad(counts_bg, (1,1), "constant", constant_values=(0, 0))
-            f_interp_bg = interp1d(idx_bg, np.where(counts_bg>0, counts_bg, 0), bounds_error=False, fill_value=(0,0))
             counts_bg = f_interp_bg(bin_numbers).astype(float)
             counts_bgsub = counts - counts_bg
             counts_bgsub = np.where(counts_bgsub<0, 0, counts_bgsub)
             counts_bgsub_error = np.where(counts_bgsub>1, np.sqrt(counts_bgsub), 1)
-            counts_bgsub_error = np.sqrt(counts_bgsub_error**2 + counts_error)
+            counts_bgsub_error = np.sqrt(counts_bgsub_error**2 + counts_error**2)
 
         except:
             if(self._verbose):
@@ -1753,66 +2053,24 @@ class PeakOTron:
         # - Fit line to variances to Get sigma0/sigma1
         ########################################################################################################
 
-
-        ####################
-        #MEAN IN RANGE METHOD
-        #####################
-        
-#         idx_peak_mus = np.full(len(idx_peaks), None)
-#         idx_peak_dmus = np.full(len(idx_peaks), None)
-#         idx_peak_sigmas =  np.full(len(idx_peaks), None)
-#         idx_peak_dsigmas =  np.full(len(idx_peaks), None)
-#         idx_peak_vars =  np.full(len(idx_peaks), None)
-#         idx_peak_dvars =  np.full(len(idx_peaks), None)
-#         cond_sel_peaks = np.full(len(idx_peaks), False)
-#         idx_peak_numbers= np.arange(0, len(idx_peaks)).astype(int)
-
-#         for p_i, peak in zip(idx_peak_numbers, idx_peaks):
-
-#             cond_sel_peak = (bin_numbers>peak - G_fft/2) & (bin_numbers<peak + G_fft/2)
-
-#             counts_bgsub_peak = counts_bgsub[cond_sel_peak]
-#             bin_numbers_peak = bin_numbers[cond_sel_peak]
-
-#             N_peak = np.sum(counts_bgsub_peak)
-
-#             if(N_peak>100):
-#                 cond_sel_peaks[p_i]=True
-#             else:
-#                 cond_sel_peaks[p_i]=False
-#                 continue
-
-
-#             mu_peak = HistMean(counts_bgsub_peak, bin_numbers_peak)
-#             sigma_peak = HistStd(counts_bgsub_peak, bin_numbers_peak)
-
-#             dmu_peak = sigma_peak/np.sqrt(N_peak)
-#             dsigma_peak = sigma_peak/np.sqrt(2*N_peak-2)
-
-
-#             idx_peak_mus[p_i] = mu_peak
-#             idx_peak_dmus[p_i] = dmu_peak
-
-
-#             idx_peak_sigmas[p_i] = sigma_peak
-#             idx_peak_dsigmas[p_i] = dsigma_peak
         
         
         ####################
         #ITERATIVE GAUS FIT METHOD
         #####################
         
-        idx_peak_mus = np.full(len(idx_peaks), None)
-        idx_peak_dmus = np.full(len(idx_peaks), None)
-        idx_peak_sigmas =  np.full(len(idx_peaks), None)
-        idx_peak_dsigmas =  np.full(len(idx_peaks), None)
-        idx_peak_vars =  np.full(len(idx_peaks), None)
-        idx_peak_dvars =  np.full(len(idx_peaks), None)
-        cond_sel_peaks = np.full(len(idx_peaks), False)
+
+        
+        idx_peak_mus = np.full(len(idx_peaks), np.nan)
+        idx_peak_dmus = np.full(len(idx_peaks), np.nan)
+        idx_peak_sigmas =  np.full(len(idx_peaks), np.nan)
+        idx_peak_dsigmas =  np.full(len(idx_peaks), np.nan)
+        idx_peak_vars =  np.full(len(idx_peaks), np.nan)
+        idx_peak_dvars =  np.full(len(idx_peaks), np.nan)
         idx_peak_numbers= np.arange(0, len(idx_peaks)).astype(int)
         
 
-        f_gaus = lambda x, mu, sigma: norm.pdf(x, loc=mu, scale=sigma)
+        f_gaus = lambda x, mu, sigma, A: A*norm.pdf(x, loc=mu, scale=sigma)
         
         for p_i, peak in zip(idx_peak_numbers, idx_peaks):
             
@@ -1824,13 +2082,8 @@ class PeakOTron:
             counts_bgsub_peak = counts_bgsub[cond_sel_peak]
             bin_numbers_peak = bin_numbers[cond_sel_peak]
 
-            N_peak = np.sum(counts_bgsub_peak)
+            N_peak = np.around(np.sum(counts_bgsub_peak), 0).astype(int)
 
-            if(N_peak>100):
-                cond_sel_peaks[p_i]=True
-            else:
-                cond_sel_peaks[p_i]=False
-                continue
                 
 
             mu_peak = HistMean(counts_bgsub_peak, bin_numbers_peak)
@@ -1839,86 +2092,94 @@ class PeakOTron:
             dmu_peak = sigma_peak/np.sqrt(N_peak)
             dsigma_peak = sigma_peak/np.sqrt(2*N_peak-2)
             
-            delta_mu = np.inf
-            delta_sigma = np.inf
-                
-            try:    
-                
-                iter_count = 0
-                iter_failed = False
+            
+            if(N_peak < kwargs["min_n_events_peak"]):
+                if(self._verbose):
+                    print("Peak {:d} had {:d} events, less than {:d} event limit. Using mean and standard deviation...".format(p_i, N_peak, kwargs["min_n_events_peak"]))
 
-                while(iter_count<10 and (delta_mu>1 and delta_sigma>1)):
+            else:        
+            
+                delta_mu = np.inf
+                delta_sigma = np.inf
 
-                    mu_mns2sigma = max(mu_peak-2*sigma_peak, min_rng_peak)
-                    mu_pls2sigma = min(mu_peak+2*sigma_peak, max_rng_peak)
-                    
-                    cond_sel_2sigma = (bin_numbers_peak>mu_mns2sigma) & (bin_numbers_peak<mu_pls2sigma)
-                    
-                    if(sum(cond_sel_2sigma)<3):
-                        iter_failed=True
-                        break
+                try:    
 
+                    iter_count = 0
+                    iter_failed = False
 
-                    f_gaus_peak = BinnedLH(f_gaus,
-                                       bin_numbers_peak[cond_sel_2sigma], 
-                                       counts_bgsub_peak[cond_sel_2sigma ],
-                                       1
-                                      )
+                    while(iter_count<10 and (delta_mu>0.01*G_fft and delta_sigma>0.01*G_fft)):
 
-                    m_gaus_peak = Minuit(f_gaus_peak, mu=mu_peak, sigma=sigma_peak)
-                    m_gaus_peak.limits['mu'] = (mu_mns2sigma, mu_pls2sigma)
-                    m_gaus_peak.limits['sigma'] = (epsilon(), 2.0*sigma_peak)
-                    m_gaus_peak.errordef=m_pedestal.LIKELIHOOD
-                    m_gaus_peak.migrad()
-                    m_gaus_peak.hesse()
+                        mu_mns2sigma = max(mu_peak-2*sigma_peak, min_rng_peak)
+                        mu_pls2sigma = min(mu_peak+2*sigma_peak, max_rng_peak)
 
+                        cond_sel_2sigma = (bin_numbers_peak>mu_mns2sigma) & (bin_numbers_peak<mu_pls2sigma)
 
-                    mu_peak_i = m_gaus_peak.values["mu"]            
-                    sigma_peak_i = m_gaus_peak.values["sigma"]
+                        if(sum(cond_sel_2sigma)<3):
+                            iter_failed=True
+                            break
 
-                    dmu_peak_i = m_gaus_peak.errors["mu"]            
-                    dsigma_peak_i = m_gaus_peak.errors["sigma"]
 
-                    if(iter_count==0):
-                        delta_mu = mu_peak_i
-                        delta_sigma = sigma_peak_i
+                        f_gaus_peak = BinnedLH(f_gaus,
+                                           bin_numbers_peak[cond_sel_2sigma], 
+                                           counts_bgsub_peak[cond_sel_2sigma ],
+                                           1
+                                          )
 
-                    else:
+                        m_gaus_peak = Minuit(f_gaus_peak, mu=mu_peak, sigma=sigma_peak, A = sum(cond_sel_2sigma))
+                        m_gaus_peak.limits['mu'] = (mu_mns2sigma, mu_pls2sigma)
+                        m_gaus_peak.limits['sigma'] = (epsilon(), 2.0*sigma_peak)
+                        m_gaus_peak.limits["A"] = (max(N_peak - 3*np.sqrt(N_peak), 1),  N_peak + 3*np.sqrt(N_peak))
+                        m_gaus_peak.errordef=m_pedestal.LIKELIHOOD
+                        m_gaus_peak.migrad()
+                        m_gaus_peak.hesse()
+                        
 
-                        delta_mu = abs(mu_peak - mu_peak_i)
-                        delta_sigma = abs(sigma_peak - sigma_peak_i)
+                        mu_peak_i = m_gaus_peak.values["mu"]            
+                        sigma_peak_i = m_gaus_peak.values["sigma"]
 
+                        dmu_peak_i = m_gaus_peak.errors["mu"]            
+                        dsigma_peak_i = m_gaus_peak.errors["sigma"]
 
-                    mu_peak = mu_peak_i
-                    sigma_peak = sigma_peak_i
+                        if(iter_count==0):
+                            delta_mu = mu_peak_i
+                            delta_sigma = sigma_peak_i
 
-                    dmu_peak = dmu_peak_i
-                    dsigma_peak = dsigma_peak_i
+                        else:
 
-                    iter_count+=1
-                    
-                if(iter_failed):
+                            delta_mu = abs(mu_peak - mu_peak_i)
+                            delta_sigma = abs(sigma_peak - sigma_peak_i)
+
+
+                        mu_peak = mu_peak_i
+                        sigma_peak = sigma_peak_i
+
+                        dmu_peak = dmu_peak_i
+                        dsigma_peak = dsigma_peak_i
+
+                        iter_count+=1
+
+                    if(iter_failed):
+                        if(self._verbose):
+                            print("Iterative 2-sigma Gaus fit for Peak {:d} failed because the fit range went below 3 bins in size. Using mean and standard deviation.".format(p_i))
+
+                        mu_peak = HistMean(counts_bgsub_peak, bin_numbers_peak)
+                        sigma_peak = HistStd(counts_bgsub_peak, bin_numbers_peak)
+
+                        dmu_peak = sigma_peak/np.sqrt(N_peak)
+                        dsigma_peak = sigma_peak/np.sqrt(2*N_peak-2)
+
+
+
+                except:
                     if(self._verbose):
-                        print("Iterative 2-sigma Gaus fit for Peak {:d} failed because the fit range went below 3 bins in size. Using mean and standard deviation.".format(p_i))
-                    
+                        print("Iterative 2-sigma Gaus fit for Peak {:d} failed. Using mean and standard deviation...".format(p_i))
+
+
                     mu_peak = HistMean(counts_bgsub_peak, bin_numbers_peak)
                     sigma_peak = HistStd(counts_bgsub_peak, bin_numbers_peak)
 
                     dmu_peak = sigma_peak/np.sqrt(N_peak)
                     dsigma_peak = sigma_peak/np.sqrt(2*N_peak-2)
-                    
- 
-            
-            except:
-                if(self._verbose):
-                    print("Iterative 2-sigma Gaus fit for Peak {:d} failed. Using mean and standard deviation.".format(p_i))
-
-
-                mu_peak = HistMean(counts_bgsub_peak, bin_numbers_peak)
-                sigma_peak = HistStd(counts_bgsub_peak, bin_numbers_peak)
-
-                dmu_peak = sigma_peak/np.sqrt(N_peak)
-                dsigma_peak = sigma_peak/np.sqrt(2*N_peak-2)
                 
 
             idx_peak_mus[p_i] = mu_peak
@@ -1928,65 +2189,65 @@ class PeakOTron:
             idx_peak_dsigmas[p_i] = dsigma_peak
 
 
-
+        
+        
+        cond_sel_peaks = (~np.isnan(idx_peak_mus)) & (~np.isnan(idx_peak_sigmas)) & (~np.isnan(idx_peak_dmus)) & (~np.isnan(idx_peak_dsigmas)) & (idx_peak_sigmas>0.1)
+        
         if(sum(cond_sel_peaks)<3):
-            if(self._verbose):
-                print("Not enough peaks to estimate starting parameters. Using nominal values")
-            Q_0 = idx_peaks[0]
-            dQ_0 = 0.05*G_fft
-            G = G_fft
-            dG = 0.05*G_fft
-            sigma_0 = 0.05*G_fft
-            dsigma_0 = 0.005*G_fft
-            sigma_1 = 0.01*G_fft
-            dsigma_1 = 0.001*G_fft
-
-        else:
-            idx_peak_vars[cond_sel_peaks] = idx_peak_sigmas[cond_sel_peaks]**2
-            idx_peak_dvars[cond_sel_peaks] = 2*idx_peak_sigmas[cond_sel_peaks]*idx_peak_dsigmas[cond_sel_peaks]
-
+            raise Exception("Less than three peaks found. Cannot proceed. Try reducing the min_n_events_peak variable.")
+        
+        
+        idx_peak_numbers = idx_peak_numbers[cond_sel_peaks]
+        idx_peak_mus = idx_peak_mus[cond_sel_peaks]
+        idx_peak_sigmas = idx_peak_sigmas[cond_sel_peaks]
+        idx_peak_dmus = idx_peak_dmus[cond_sel_peaks]
+        idx_peak_dsigmas = idx_peak_dsigmas[cond_sel_peaks]
 
+            
+  
+        idx_peak_vars = idx_peak_sigmas**2
+        idx_peak_dvars = 2*idx_peak_sigmas*idx_peak_dsigmas
+        
 
+        f_mu_lin = HuberRegression(lambda x, G, Q_0: Linear(x, G, Q_0), 
+                                   idx_peak_numbers, 
+                                   idx_peak_mus, 
+                                   idx_peak_dmus)
 
-            f_mu_lin = HuberRegression(lambda x, G, Q_0: Linear(x, G, Q_0), 
-                                       idx_peak_numbers[cond_sel_peaks], 
-                                       idx_peak_mus[cond_sel_peaks], 
-                                       idx_peak_dmus[cond_sel_peaks])
+        f_var_lin = HuberRegression(lambda x, var_1, var_0: Linear(x, var_1, var_0), 
+                                    idx_peak_numbers,
+                                    idx_peak_vars, 
+                                    idx_peak_dvars,
+                                   )   
 
-            f_var_lin = HuberRegression(lambda x, var_1, var_0: Linear(x, var_1, var_0), 
-                                        idx_peak_numbers[cond_sel_peaks],
-                                        idx_peak_vars[cond_sel_peaks], 
-                                        idx_peak_dvars[cond_sel_peaks],
-                                       )   
 
+        m_mu_lin = Minuit(f_mu_lin, Q_0 = Q_0, G = G_fft)
+        m_mu_lin.limits["Q_0"] = (epsilon(), nbins)
+        m_mu_lin.fixed["G"] = True
+        m_mu_lin.errordef = m_mu_lin.LEAST_SQUARES
+        m_mu_lin.migrad()
+        m_mu_lin.hesse()
 
-            m_mu_lin = Minuit(f_mu_lin, Q_0 = Q_0, G = G_fft)
-            m_mu_lin.limits["Q_0"] = (epsilon(), nbins)
-            m_mu_lin.fixed["G"] = True
-            m_mu_lin.errordef = m_mu_lin.LEAST_SQUARES
-            m_mu_lin.migrad()
-            m_mu_lin.hesse()
 
 
+        m_var_lin = Minuit(f_var_lin, var_0 = 1, var_1 = 1)
+        m_var_lin.limits["var_0"] = (epsilon(), nbins)
+        m_var_lin.limits["var_1"] = (epsilon(), nbins)
+        m_var_lin.errordef = m_var_lin.LEAST_SQUARES
+        m_var_lin.migrad()
+        m_var_lin.hesse()
 
-            m_var_lin = Minuit(f_var_lin, var_0 = 1, var_1 = 1)
-            m_var_lin.limits["var_0"] = (epsilon(), nbins)
-            m_var_lin.limits["var_1"] = (epsilon(), nbins)
-            m_var_lin.errordef = m_var_lin.LEAST_SQUARES
-            m_var_lin.migrad()
-            m_var_lin.hesse()
 
 
+        Q_0 = m_mu_lin.values["Q_0"]
+        dQ_0 = m_mu_lin.errors["Q_0"]
 
-            Q_0 = m_mu_lin.values["Q_0"]
-            dQ_0 = m_mu_lin.errors["Q_0"]
-            
 
-            sigma_0 =  np.sqrt(m_var_lin.values["var_0"])
-            dsigma_0 = (0.5/sigma_0)*m_var_lin.errors["var_0"]
+        sigma_0 =  np.sqrt(m_var_lin.values["var_0"])
+        dsigma_0 = (0.5/sigma_0)*m_var_lin.errors["var_0"]
 
-            sigma_1 =  np.sqrt(m_var_lin.values["var_1"])
-            dsigma_1 = (0.5/sigma_0)*m_var_lin.errors["var_1"]
+        sigma_1 =  np.sqrt(m_var_lin.values["var_1"])
+        dsigma_1 = (0.5/sigma_0)*m_var_lin.errors["var_1"]
             
             
         ########################################################################################################
@@ -2061,10 +2322,10 @@ class PeakOTron:
         
         mu_DCR = DCR*(kwargs["t_gate"] + kwargs["t_0"])
         
-        if(DCR!=DCR or DCR<1e-9):
+        if(DCR!=DCR or DCR<epsilon()):
             if(self._verbose):
-                print("DCR returned invalid, or less than 1e-9 GHz. Choosing DCR to be 1e-9 +/- 1e-9 GHz.")
-                DCR = 1e-9
+                print("DCR returned invalid, or less than machine epsilon. Choosing DCR to be machine epsilon...")
+                DCR = epsilon()
                 dDCR = 1e-9
                 
         
@@ -2102,7 +2363,6 @@ class PeakOTron:
 
         self._hist_data["peaks"]["peak_position"] = idx_peaks
         self._hist_data["peaks"]["peak_index"] = idx_peak_numbers
-        self._hist_data["peaks"]["cond_sel_peaks"] = cond_sel_peaks
         self._hist_data["peaks"]["peak_mean"] = idx_peak_mus
         self._hist_data["peaks"]["peak_mean_error"] = idx_peak_dmus
         self._hist_data["peaks"]["peak_variance"] = idx_peak_vars
@@ -2111,39 +2371,54 @@ class PeakOTron:
         self._hist_data["peaks"]["peak_std_deviation_error"] = idx_peak_dsigmas
         
 
-        self._prefit_values["Q_0"] = Q_0
-        self._prefit_values["G"] = G_fft
-        self._prefit_values["mu"] = mu_GP
-        self._prefit_values["lbda"] = lbda_GP
-        self._prefit_values["sigma_0"] = sigma_0
-        self._prefit_values["sigma_1"] = sigma_1
-        self._prefit_values["DCR"] = DCR
-        self._prefit_values["p_Ap"] = 0.00
-        self._prefit_values["tau_Ap"] = 5.0
-        self._prefit_values["tau"] = kwargs["tau"]
-        self._prefit_values["t_0"] = kwargs["t_0"]
-        self._prefit_values["t_gate"] = kwargs["t_gate"]
-        self._prefit_values["tau_R"] = kwargs["tau_R"]
-        self._prefit_values["k_max"]=k_max
-        self._prefit_values["k_max_dcr"]=k_max_dcr
-        self._prefit_values["len_pad"]=self._len_pad
-        
-        
-        
-        self._prefit_errors["Q_0"] = dQ_0
-        self._prefit_errors["G"] = 1
-        self._prefit_errors["mu"] = 0.1
-        self._prefit_errors["lbda"] = 0.01
-        self._prefit_errors["sigma_0"] = dsigma_0
-        self._prefit_errors["sigma_1"] = dsigma_1
-        self._prefit_errors["p_Ap"] = 0.05
-        self._prefit_errors["tau_Ap"] = 0.05
-        self._prefit_errors["tau"] = kwargs["tau_err"]
-        self._prefit_errors["t_0"] = kwargs["t_0_err"]
-        self._prefit_errors["tau_R"] = kwargs["tau_R_err"]
-        self._prefit_errors["t_gate"] = kwargs["t_gate_err"]
-        self._prefit_errors["DCR"] = dDCR
+        self._prefit_values_bin["Q_0"] = Q_0
+        self._prefit_values_bin["G"] = G_fft
+        self._prefit_values_bin["mu"] = mu_GP
+        self._prefit_values_bin["lbda"] = lbda_GP
+        self._prefit_values_bin["sigma_0"] = sigma_0
+        self._prefit_values_bin["sigma_1"] = sigma_1
+        self._prefit_values_bin["DCR"] = DCR
+        self._prefit_values_bin["p_Ap"] = 0.1
+        self._prefit_values_bin["tau_Ap"] = 5.0
+        self._prefit_values_bin["tau"] = kwargs["tau"]
+        self._prefit_values_bin["t_0"] = kwargs["t_0"]
+        self._prefit_values_bin["t_gate"] = kwargs["t_gate"]
+        self._prefit_values_bin["tau_R"] = kwargs["tau_R"]
+        self._prefit_values_bin["k_max"]=k_max
+        self._prefit_values_bin["k_max_dcr"]=k_max_dcr
+        self._prefit_values_bin["len_pad"]=self._len_pad
+        self._prefit_values_bin["A"]=np.sum(counts)
+        
+        
         
+        
+        
+        self._prefit_errors_bin["Q_0"] = dQ_0
+        self._prefit_errors_bin["G"] = 1
+        self._prefit_errors_bin["mu"] = 0.1
+        self._prefit_errors_bin["lbda"] = 0.01
+        self._prefit_errors_bin["sigma_0"] = dsigma_0
+        self._prefit_errors_bin["sigma_1"] = dsigma_1
+        self._prefit_errors_bin["p_Ap"] = 0.05
+        self._prefit_errors_bin["tau_Ap"] = 0.05
+        self._prefit_errors_bin["tau"] = kwargs["tau_err"]
+        self._prefit_errors_bin["t_0"] = kwargs["t_0_err"]
+        self._prefit_errors_bin["tau_R"] = kwargs["tau_R_err"]
+        self._prefit_errors_bin["t_gate"] = kwargs["t_gate_err"]
+        self._prefit_errors_bin["DCR"] = dDCR
+        self._prefit_errors_bin["A"] = np.sqrt(np.sum(counts))
+
+        
+
+
+
+
+
+
+
+
+
+
+
+
 
-    
-