From 22e97e90c2ffeec1709fc7ea42b37653f194b549 Mon Sep 17 00:00:00 2001 From: Tobias Quadfasel <tobias.loesche@studium.uni-hamburg.de> Date: Thu, 19 Jan 2023 19:13:06 +0100 Subject: [PATCH] Some minor bug fixes and re-formatting to PEP8 style. Double-checked that reformatting does not influence results by comparing output of --- AdditionalPDFs.py | 95 +- HelperFunctions.py | 79 +- LossFunctions.py | 80 +- Model.py | 1255 ++++++++++--------- PeakOTron.py | 2912 ++++++++++++++++++++++---------------------- example.py | 96 +- 6 files changed, 2259 insertions(+), 2258 deletions(-) diff --git a/AdditionalPDFs.py b/AdditionalPDFs.py index 2934d4c..3c9fba2 100644 --- a/AdditionalPDFs.py +++ b/AdditionalPDFs.py @@ -1,11 +1,9 @@ import numpy as np -from scipy.stats import rv_discrete, rv_continuous, uniform +from scipy.stats import uniform import scipy.special as sc -import matplotlib.pyplot as plt - - +import scipy from scipy.stats._distn_infrastructure import ( - rv_discrete, _ncx2_pdf, _ncx2_cdf, get_distribution_names) + rv_discrete, get_distribution_names) class gpd_gen(rv_discrete): @@ -34,7 +32,8 @@ class gpd_gen(rv_discrete): def _logpmf(self, k, mu, lbda): mu_pls_klmb = mu + lbda*k - return np.log(mu) + sc.xlogy(k-1, mu_pls_klmb) - mu_pls_klmb - sc.gammaln(k+1) + return (np.log(mu) + sc.xlogy(k-1, mu_pls_klmb) + - mu_pls_klmb - sc.gammaln(k+1)) def _munp(self, n, mu, lbda): if n == 1: @@ -45,10 +44,10 @@ class gpd_gen(rv_discrete): gpoisson = gpd_gen(name='gpoisson') - + class borel_gen(rv_discrete): def _argcheck(self, mu): - return ((mu > 0) & (mu<1)) + return ((mu > 0) & (mu < 1)) def _logpmf(self, k, mu): n = k+1 @@ -58,68 +57,68 @@ class borel_gen(rv_discrete): def _pmf(self, k, mu): return np.exp(self._logpmf(k, mu)) -# def _rvs(self, mu, size=None, random_state=None): -# u = uniform.rvs(loc=0, scale = 1, size=size) -# cum = np.cumsum([self._pmf(_k, mu) for _k in range(0, 100)]) -# print(cum) -# rnd = [ np.argmax( cum>=_u ) for _u in u ] -# return rnd - + # def _rvs(self, mu, size=None, random_state=None): + # u = uniform.rvs(loc=0, scale = 1, size=size) + # cum = np.cumsum([self._pmf(_k, mu) for _k in range(0, 100)]) + # print(cum) + # rnd = [ np.argmax( cum>=_u ) for _u in u ] + # return rnd + def _rvs(self, mu, size=None, random_state=None, epsilon=1e-4): - _u = uniform.rvs(loc=0, scale = 1-epsilon, size=size) + _u = uniform.rvs(loc=0, scale=1-epsilon, size=size) _sum = 0 - _k=0 + _k = 0 _elem = [] _max_u = np.max(_u) - - while(_sum<_max_u): + + while (_sum < _max_u): _pmf = self._pmf(_k, mu) _elem.append(_pmf) - _sum+=_pmf - _k+=1 - + _sum += _pmf + _k += 1 + _cum = np.cumsum(_elem) - _rnd = [ np.argmax( _cum>=__u ) for __u in _u ] - - return _rnd + _rnd = [np.argmax(_cum >= __u) for __u in _u] + return _rnd def _stats(self, mu): _mu = 1/(1-mu) _var = mu/(1-mu)**3 tmp = np.asarray(mu) - mu_nonzero = ((tmp > 0) & (tmp<1)) - #g1 and g2: Lagrangian Probability Distributions, 978-0-8176-4365-2, page 159 - g1 = scipy._lib._util._lazywhere(mu_nonzero, (tmp,), lambda x: (1+2*x)/scipy.sqrt(x*(1-x)), np.inf) - g2 = scipy._lib._util._lazywhere(mu_nonzero, (tmp,), lambda x: 3 + (1 + 8*x+6*x**2)/(x*(1-x)), np.inf) + mu_nonzero = ((tmp > 0) & (tmp < 1)) + + # g1 and g2: Lagrangian Probability Distributions, + # 978-0-8176-4365-2, page 159 + g1 = scipy._lib._util._lazywhere(mu_nonzero, (tmp,), + lambda x: (1+2*x)/scipy.sqrt(x*(1-x)), + np.inf) + + g2 = scipy._lib._util._lazywhere( + mu_nonzero, (tmp,), + lambda x: 3 + (1 + 8*x+6*x**2)/(x*(1-x)), np.inf) + return _mu, _var, g1, g2 -borel= borel_gen(name='borel') - - - +borel = borel_gen(name='borel') + + class erlang_gen(rv_discrete): - - - def _pdf(self, x, a): # gamma.pdf(x, a) = x**(a-1) * exp(-x) / gamma(a) return np.exp(self._logpdf(x, a)) def _logpdf(self, k, mu, nu): - return sc.xlogy(a-1.0, x) - x - sc.gammaln(a) - - - - - - -# def _rvs(self, mu, nu, size=None, random_state=None): -# u = scipy.stats.uniform.rvs(loc=0, scale = 1, size=size) -# cum = np.cumsum([self._pmf(_k, mu, nu) for _k in range(0, 100)]) -# rnd = [ np.argmax( cum>=_u ) for _u in u ] -# return rnd + # TODO: x and a not defined! + return sc.xlogy(a - 1.0, x) - x - sc.gammaln(a) + + # def _rvs(self, mu, nu, size=None, random_state=None): + # u = scipy.stats.uniform.rvs(loc=0, scale = 1, size=size) + # cum = np.cumsum([self._pmf(_k, mu, nu) for _k in range(0, 100)]) + # rnd = [ np.argmax( cum>=_u ) for _u in u ] + # return rnd + pairs = list(globals().items()) _distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete) diff --git a/HelperFunctions.py b/HelperFunctions.py index ca68181..0274022 100644 --- a/HelperFunctions.py +++ b/HelperFunctions.py @@ -1,26 +1,24 @@ -from iminuit import Minuit from iminuit.util import describe -from scipy.interpolate import interp1d from astropy.stats import bootstrap import numpy as np import matplotlib.pyplot as plt +from scipy import special as sc -#HELPER FUNCTIONS - class FakeFuncCode: def __init__(self, f, prmt=None, dock=0, append=None): - #f can either be tuple or function object + # f can either be tuple or function object self.co_varnames = describe(f) self.co_argcount = len(self.co_varnames) self.co_argcount -= dock self.co_varnames = self.co_varnames[dock:] - if prmt is not None: #rename parameters from the front + if prmt is not None: # rename parameters from the front for i, p in enumerate(prmt): self.co_varnames[i] = p - if isinstance(append, str): append = [append] + if isinstance(append, str): + append = [append] if append is not None: old_count = self.co_argcount @@ -31,10 +29,8 @@ class FakeFuncCode: list(self.co_varnames[old_count:])) - - -def LatexFormat(value, scirange=[0.01,1000]): - if(np.abs(value)>scirange[0] and np.abs(value)<scirange[1]): +def LatexFormat(value, scirange=[0.01, 1000]): + if(np.abs(value) > scirange[0] and np.abs(value) < scirange[1]): float_str = r"${:3.3f}$".format(value) else: try: @@ -42,7 +38,8 @@ def LatexFormat(value, scirange=[0.01,1000]): base, exponent = float_str.split("E") float_str = r"${0} \times 10^{{{1}}}$".format(base, int(exponent)) except: - float_str=str(value) + # TODO: Bare except! Is this a ValueError? + float_str = str(value) return float_str @@ -56,22 +53,22 @@ def FormatExponent(ax, axis='y'): ax_axis = ax.yaxis x_pos = 0.0 y_pos = 1.0 - horizontalalignment='left' - verticalalignment='bottom' + horizontalalignment = 'left' + verticalalignment = 'bottom' else: ax_axis = ax.xaxis x_pos = 1.0 y_pos = -0.05 - horizontalalignment='right' - verticalalignment='top' + horizontalalignment = 'right' + verticalalignment = 'top' # Run plt.tight_layout() because otherwise the offset text doesn't update plt.tight_layout() - ##### THIS IS A BUG - ##### Well, at least it's sub-optimal because you might not - ##### want to use tight_layout(). If anyone has a better way of - ##### ensuring the offset text is updated appropriately - ##### please comment! + # THIS IS A BUG + # Well, at least it's sub-optimal because you might not + # want to use tight_layout(). If anyone has a better way of + # ensuring the offset text is updated appropriately + # please comment! # Get the offset value offset = ax_axis.get_offset_text().get_text() @@ -80,15 +77,15 @@ def FormatExponent(ax, axis='y'): # Get that exponent value and change it into latex format minus_sign = u'\u2212' expo = np.float(offset.replace(minus_sign, '-').split('e')[-1]) - offset_text = r'x$\mathregular{10^{%d}}$' %expo + offset_text = r'x$\mathregular{10^{%d}}$' % expo # Turn off the offset text that's calculated automatically ax_axis.offsetText.set_visible(False) # Add in a text box at the top of the y axis ax.text(x_pos, y_pos, offset_text, transform=ax.transAxes, - horizontalalignment=horizontalalignment, - verticalalignment=verticalalignment) + horizontalalignment=horizontalalignment, + verticalalignment=verticalalignment) return ax @@ -96,11 +93,9 @@ def Bootstrap(data, statistic, n_bootstrap, alpha=0.95): if not (0 < alpha < 1): raise ValueError("confidence level must be in (0, 1)") - if len(data) < 1: raise ValueError("data must contain at least one measurement.") - boot_stat = bootstrap(data, n_bootstrap, bootfunc=statistic) stat_data = statistic(data) @@ -110,56 +105,58 @@ def Bootstrap(data, statistic, n_bootstrap, alpha=0.95): z_score = np.sqrt(2.0)*sc.erfinv(alpha) conf_interval = est_stat + z_score*np.array((-std_err, std_err)) - - return est_stat, std_err, conf_interval + return est_stat, std_err, conf_interval def Linear(x, m, c): return m*x + c + def HistMoment(counts, bin_centres, i, mu): return np.sum(counts*(bin_centres-mu)**i) + def HistMean(counts, bin_centres): N = np.sum(counts) mu = HistMoment(counts, bin_centres, 1, 0)/N return mu - + + def HistStd(counts, bin_centres): N = np.sum(counts) - mu = HistMean(counts, bin_centres) + mu = HistMean(counts, bin_centres) var = HistMoment(counts, bin_centres, 2, mu)/(N-1) sigma = np.sqrt(var) return sigma + def HistSkew(counts, bin_centres): N = np.sum(counts) mu = HistMean(counts, bin_centres) - gamma = (N*np.sqrt((N-1))/(N-2))*HistMoment(counts, bin_centres, 3, mu)/(HistMoment(counts, bin_centres, 2, mu)**(1.5)) + gamma = ((N*np.sqrt((N-1))/(N-2))*HistMoment(counts, bin_centres, 3, mu) + / (HistMoment(counts, bin_centres, 2, mu)**(1.5))) + return gamma +def f_tau(v, v_breakdown, v_0): + c_tau = (v - v_breakdown)/v_0 + result = -1/np.log((1-np.exp(c_tau*np.exp(-1)))/(1 - np.exp(c_tau))) + return result def GP_lbda(mu, sigma, gamma): - lbda = 0.5*(((mu*gamma)/(sigma))- 1) + lbda = 0.5*(((mu*gamma)/(sigma)) - 1) return lbda + def GP_gain(mu, sigma, gamma): lbda = GP_lbda(mu, sigma, gamma) gain = (sigma**2/mu)*((1 - lbda)**2) return gain + def GP_mu(mu, sigma, gamma): lbda = GP_lbda(mu, sigma, gamma) mu_gp = (1/(1-lbda))*(mu**2/sigma**2) return mu_gp - - - - - - - - - diff --git a/LossFunctions.py b/LossFunctions.py index 9f7fa45..b56c227 100644 --- a/LossFunctions.py +++ b/LossFunctions.py @@ -1,15 +1,9 @@ import numpy as np -from scipy.interpolate import interp1d -from iminuit.util import describe from Model import epsilon from HelperFunctions import FakeFuncCode -import matplotlib.pyplot as plt - - - - + + class BinnedLH: - def __init__(self, f, bin_centres, counts, bw): self.f = f @@ -18,44 +12,34 @@ class BinnedLH: self.dx = bw self.counts = counts self.N = np.sum(counts) - + self.last_arg = None self.func_code = FakeFuncCode(f, dock=True) - self.n_calls=0 + self.n_calls = 0 self.eps = epsilon() - self.mask = (self.counts>0) - - + self.mask = (self.counts > 0) def __call__(self, *arg): self.last_arg = arg - - - y_hat = self.f(self.x, *arg) - + y_hat = self.f(self.x, *arg) E = y_hat*self.dx h = self.counts mask = self.mask - - nlogL= np.zeros(self.len_x) - - nlogL[mask] = h[mask]*(np.log(E[mask]) - np.log(h[mask])) + (h[mask]-E[mask]) - nlogL[~mask] = -E[~mask] + nlogL = np.zeros(self.len_x) + + nlogL[mask] = (h[mask]*(np.log(E[mask]) - np.log(h[mask])) + + (h[mask]-E[mask])) - nlogL = -np.sum(nlogL) - - self.n_calls+=1 - + nlogL[~mask] = -E[~mask] + nlogL = -np.sum(nlogL) + self.n_calls += 1 return nlogL - - - - + + class Chi2Regression: - def __init__(self, f, x, y, y_err, epsilon=1.35): self.f = f @@ -63,27 +47,21 @@ class Chi2Regression: self.y = y self.y_err = y_err self.eps = np.finfo(np.float64).eps * 10 - self.y_err[self.y_err<self.eps] = self.eps + self.y_err[self.y_err < self.eps] = self.eps self.last_arg = None self.func_code = FakeFuncCode(f, dock=True) self.ndof = len(self.y) - (self.func_code.co_argcount - 1) - def __call__(self, *arg): - + self.last_arg = arg - - y_hat = self.f(self.x, *arg) - + y_hat = self.f(self.x, *arg) loss = ((y_hat - self.y)/(self.y_err))**2 - + return np.sum(loss) - - - - + + class HuberRegression: - def __init__(self, f, x, y, y_err, delta=1.345): self.f = f @@ -92,19 +70,19 @@ class HuberRegression: self.y_err = y_err self.delta = delta self.eps = np.finfo(np.float64).eps * 10 - self.y_err[self.y_err<self.eps] = self.eps + self.y_err[self.y_err < self.eps] = self.eps self.last_arg = None self.func_code = FakeFuncCode(f, dock=True) self.ndof = len(self.y) - (self.func_code.co_argcount - 1) - def __call__(self, *arg): - + self.last_arg = arg - + a = abs((self.y - self.f(self.x, *arg))/self.y_err) - cond_flag = (a>self.delta) - - loss = np.sum((~cond_flag) * (0.5 * a ** 2) - (cond_flag) * self.delta * (0.5 * self.delta - a), -1) - + cond_flag = (a > self.delta) + + loss = np.sum(((~cond_flag) * (0.5 * a ** 2) - (cond_flag) + * self.delta * (0.5 * self.delta - a)), -1) + return np.sum(loss) diff --git a/Model.py b/Model.py index c4b4a12..0532f35 100644 --- a/Model.py +++ b/Model.py @@ -1,820 +1,885 @@ from scipy.signal import fftconvolve as convolve -from itertools import product, combinations -from scipy.interpolate import interp1d from scipy.stats import poisson, norm, binom from AdditionalPDFs import gpoisson, borel -from scipy.integrate import simps -from numba import njit +from numba import njit import numpy as np from functools import lru_cache -import matplotlib.pyplot as plt -#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -######################################################################################################## +#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +############################################################################### ## DETECTOR RESPONSE MODEL -######################################################################################################## -#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +############################################################################### +#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - - -######################################################################################################## +############################################################################### ## VARIABLES: -######################################################################################################## - -######################################################################################################## -## Q: bin centres of studied charge distribution in arb. units; -## Q_0: pedestal position in arb. units -## G: gain in arb. units -## mu: mean number of Geiger discharges in n.p.e -## lbda: GP-branching parameter (unitless) -## sigma_0: electonics noise/pedestal width in arb. units -## sigma_1: gain spread width in arb. units; -## tau_Ap: afterpulse time constant in nanoseconds; -## p_Ap: afterpulse scaling parameter (unitless) -## DCR: dark count rate in gigahertz; -## tau: slow-component time constant of SiPM pulse in nanoseconds -## tau_R: recovery time of SiPM in nanoseconds -## t_gate: gate length in nanoseconds -## t_0: pre-gate integration window in nanoseconds. -## k_max: estimated maximum number of peaks to fit, based on percentile of Poisson distribution with estimated mean in PeakOTron. -## len_pad: number of bins to pad for convolution. Default value: 100 bins. -## -######################################################################################################## - - - - - -######################################################################################################## -## FUNCTION DEFINITIONS: -######################################################################################################## - - - - - - - - - +############################################################################### + +############################################################################### +# Q: bin centres of studied charge distribution in arb. units; +# Q_0: pedestal position in arb. units +# G: gain in arb. units +# mu: mean number of Geiger discharges in n.p.e +# lbda: GP-branching parameter (unitless) +# sigma_0: electonics noise/pedestal width in arb. units +# sigma_1: gain spread width in arb. units; +# tau_Ap: afterpulse time constant in nanoseconds; +# p_Ap: afterpulse scaling parameter (unitless) +# DCR: dark count rate in gigahertz; +# tau: slow-component time constant of SiPM pulse in nanoseconds +# tau_R: recovery time of SiPM in nanoseconds +# t_gate: gate length in nanoseconds +# t_0: pre-gate integration window in nanoseconds. +# k_max: estimated maximum number of peaks to fit, based on percentile of +# Poisson distribution with estimated mean in PeakOTron. +# len_pad: number of bins to pad for convolution. Default value: 100 bins. +# +############################################################################### +############################################################################### +# FUNCTION DEFINITIONS: +############################################################################### -######################################################################################################## -## epsilon() -######################################################################################################## -## -## Returns machine epsilon for thresholding zeros -## -######################################################################################################## +############################################################################### +# epsilon() +############################################################################### +# +# Returns machine epsilon for thresholding zeros +# +############################################################################### -@njit('float64()') +@njit("float64()") def epsilon(): return np.finfo(np.float64).eps -######################################################################################################## -## sigmak(k, sigma_0, sigma_1) -######################################################################################################## -## -## Returns width of peak number k, sigma_k(k, sigma_0, sigma_1) = sqrt(sigma_0^2 + k*sigma__1^2) -## -######################################################################################################## + +############################################################################### +# sigmak(k, sigma_0, sigma_1) +############################################################################### +# +# Returns width of peak number k, +# sigma_k(k, sigma_0, sigma_1) = sqrt(sigma_0^2 + k*sigma__1^2) +# +############################################################################### + @njit def sigmak(k, sigma_0, sigma_1): - return np.sqrt(sigma_0**2 + k*sigma_1**2) + return np.sqrt(sigma_0**2 + k * sigma_1**2) +############################################################################### +# Convolve(A, B, idx_0, length, dx, norm=True) +############################################################################### +# +# Calculates convolution of A and B using scipy fftconvolve, selects the +# convolved range relative to the pedestal index due to convolution shift, +# and multiplies by dx to renormalise the distribution. Then, corrects for any +# nans/infs and ensures normalisation by applying Simpson's rule. +# +############################################################################### -######################################################################################################## -## Convolve(A, B, idx_0, length, dx, norm=True) -######################################################################################################## -## -## Calculates convolution of A and B using scipy fftconvolve, selects the convolved range relative to the pedestal index due to convolution shift, -## and multiplies by dx to renormalise the distribution. Then, corrects for any nans/infs and ensures normalisation by applying Simpson's rule. -## -######################################################################################################## def Convolve(A, B, idx_0, length, dx, norm=True): - A_o_B = convolve(A, B)[idx_0:length+idx_0]*dx + A_o_B = convolve(A, B)[idx_0: length + idx_0] * dx A_o_B = np.nan_to_num(A_o_B, nan=0.0, posinf=0.0, neginf=0.0) - if(norm): - A_o_B = A_o_B/max(epsilon(), np.trapz(A_o_B, dx=dx)) + if norm: + A_o_B = A_o_B / max(epsilon(), np.trapz(A_o_B, dx=dx)) return A_o_B - -####################################################################################################### -# ConvolveList(A, B, idx_0, length, dx, norm=True) +############################################################################### +# ConvolveList(A, B, idx_0, length, dx, norm=True) # Applies recursive 'Convolve' for lists of functions. -####################################################################################################### +############################################################################### def ConvolveList(lst, idx_0, length, dx, norm=True): res = lst[0] for val in lst[1:]: - res = Convolve(res, val, idx_0, length, dx, norm=True) + res = Convolve(res, val, idx_0, length, dx, norm=True) return res -####################################################################################################### + +############################################################################### # Multichoose(n, k) # Finds combinations that add up to k -####################################################################################################### - -@lru_cache(maxsize=None) -def Multichoose(n,k): - if k < 0 or n < 0: return "Error" - if not k: return [[0]*n] - if not n: return [] - if n == 1: return [[k]] - return [[0]+val for val in Multichoose(n-1,k)] + \ - [[val[0]+1]+val[1:] for val in Multichoose(n,k-1)] - - - - - - - - - - +############################################################################### +@lru_cache(maxsize=None) +def Multichoose(n, k): + if k < 0 or n < 0: + return "Error" + if not k: + return [[0] * n] + if not n: + return [] + if n == 1: + return [[k]] + return [[0] + val for val in Multichoose(n - 1, k)] + [ + [val[0] + 1] + val[1:] for val in Multichoose(n, k - 1) + ] -######################################################################################################## -## AFTERPULSE FUNCTIONS -######################################################################################################## +############################################################################### +# AFTERPULSE FUNCTIONS +############################################################################### -######################################################################################################## -## Beta(tau_R, tau_Ap, t_gate): -######################################################################################################## +############################################################################### +# Beta(tau_R, tau_Ap, t_gate): +############################################################################### # # Normalisation constant for the afterpulse p.d.f -# -# Beta(tau_R, tau_Ap, t_gate) = ∫(1 - exp(-t/tauR)) x exp(-t/tauAp)/tauAp from 0 to tgate # -######################################################################################################## +# Beta(tau_R, tau_Ap, t_gate) = ∫(1 - exp(-t/tauR)) x exp(-t/tauAp)/tauAp +# from 0 to tgate +# +############################################################################### + @njit def Beta(tau_R, tau_Ap, t_gate): - return (tau_Ap - np.exp(-t_gate/tau_Ap)*(tau_Ap + tau_R*(1 - np.exp(-t_gate/tau_R))))/(tau_Ap + tau_R) + return ((tau_Ap - np.exp(-t_gate / tau_Ap) + * (tau_Ap + tau_R * (1 - np.exp(-t_gate / tau_R)))) + / (tau_Ap + tau_R)) -######################################################################################################## -## P_Ap(tau_R, tau_Ap, t_gate): -######################################################################################################## + +############################################################################### +# P_Ap(tau_R, tau_Ap, t_gate): +############################################################################### # -# Calculates afterpulse discharge probability from scaling factor p_Ap and maximum discharge scaling factor Beta. +# Calculates afterpulse discharge probability from scaling factor p_Ap and +# maximum discharge scaling factor Beta. # -# Afterpulses can only have a maximum -######################################################################################################## +# Afterpulses can only have a maximum +############################################################################### + + @njit def P_Ap(tau_R, tau_Ap, t_gate, p_Ap): return p_Ap*Beta(tau_R, tau_Ap, t_gate) -######################################################################################################## -## dPAp(tau_R, tau_Ap, t_gate, p_Ap) -######################################################################################################## -## -## Returns error on total afterpulse probability, dPAp, occurring between 0 and t_gate. -## -######################################################################################################## +############################################################################### +# dPAp(tau_R, tau_Ap, t_gate, p_Ap) +############################################################################### +# +# Returns error on total afterpulse probability, dPAp, occurring between 0 and +# t_gate. +# +############################################################################### + @njit def dP_Ap(tau_R, tau_Ap, t_gate, p_Ap, dtau_Ap, dp_Ap): - dPApdtauAp = p_Ap*((t_gate*(-tau_Ap - tau_R*(1 - np.exp(-t_gate/tau_R)))*np.exp(-t_gate/tau_Ap))/tau_Ap**2 + 1 - np.exp(-t_gate/tau_Ap))/(tau_Ap+tau_R) - p_Ap*(tau_Ap - (tau_Ap + tau_R*(1 - np.exp(-t_gate/tau_R)))*np.exp(-t_gate/tau_Ap))/(tau_Ap + tau_R)**2 - - dPApdpAp = (tau_Ap - (tau_Ap + tau_R*(1 - np.exp(-t_gate/tau_R)))*np.exp(-t_gate/tau_Ap))/(tau_Ap + tau_R) - - dPAp = np.sqrt((dPApdtauAp**2)*(dtau_Ap**2) + (dPApdpAp**2)*(dp_Ap**2)) - + dPApdtauAp = ( + p_Ap + * ( + ( + t_gate + * (-tau_Ap - tau_R * (1 - np.exp(-t_gate / tau_R))) + * np.exp(-t_gate / tau_Ap) + ) + / tau_Ap**2 + + 1 + - np.exp(-t_gate / tau_Ap) + ) + / (tau_Ap + tau_R) + - p_Ap + * ( + tau_Ap + - (tau_Ap + tau_R * (1 - np.exp(-t_gate / tau_R))) + * np.exp(-t_gate / tau_Ap) + ) + / (tau_Ap + tau_R) ** 2 + ) + + dPApdpAp = ( + tau_Ap + - ((tau_Ap + tau_R * (1 - np.exp(-t_gate / tau_R))) + * np.exp(-t_gate / tau_Ap)) + ) / (tau_Ap + tau_R) + + dPAp = np.sqrt((dPApdtauAp**2) * (dtau_Ap**2) + (dPApdpAp**2) * (dp_Ap**2)) + return dPAp -######################################################################################################## -## P1(tau_R, tau_Ap, t_gate): -######################################################################################################## +############################################################################### +# P1(tau_R, tau_Ap, t_gate): +############################################################################### # # P1(T<t)= ∫(1 - exp(-t/tauR))*exp(-t/tauAp)/tauAp from 0 to t, where t<t_gate # -######################################################################################################## +############################################################################### @njit def P1(t, tau_R, tau_Ap, t_gate): - p = -np.exp(-t/tau_Ap)*(tau_Ap + tau_R*(1 - np.exp(-t/tau_R)))/(tau_R + tau_Ap) + p = ( + -np.exp(-t / tau_Ap) + * (tau_Ap + tau_R * (1 - np.exp(-t / tau_R))) + / (tau_R + tau_Ap) + ) return p -######################################################################################################## -## EvalP1(t0, t1, tau_R, tau_Ap, t_gate): -######################################################################################################## + +############################################################################### +# EvalP1(t0, t1, tau_R, tau_Ap, t_gate): +############################################################################### # -# Cumulative Density Function of dp1dt -# P1(T<t)= Beta^-1(tau_R, tau_Ap, t_gate) x ∫(1 - exp(-t/tauR))*exp(-t/tauAp)/tauAp from 0 to t, where t<t_gate +# Cumulative Density Function of dp1dt +# P1(T<t)= Beta^-1(tau_R, tau_Ap, t_gate) x +# ∫(1 - exp(-t/tauR))*exp(-t/tauAp)/tauAp from 0 to t, where t<t_gate # -######################################################################################################## +############################################################################### @njit def EvalP1(t0, t1, tau_R, tau_Ap, t_gate): - return (P1(t1, tau_R, tau_Ap, t_gate) - P1(t0, tau_R, tau_Ap, t_gate)) + return P1(t1, tau_R, tau_Ap, t_gate) - P1(t0, tau_R, tau_Ap, t_gate) -######################################################################################################## -## tAp( Q, tau, t_gate): -######################################################################################################## + +############################################################################### +# tAp( Q, tau, t_gate): +############################################################################### # -# Evaluate tAp between -# P1(T<t)= Beta^-1(tau_R, tau_Ap, t_gate) x ∫(1 - exp(-t/tauR))*exp(-t/tauAp)/tauAp from 0 to t, where t<t_gate +# Evaluate tAp between +# P1(T<t) = Beta^-1(tau_R, tau_Ap, t_gate) x +# ∫(1 - exp(-t/tauR))*exp(-t/tauAp)/tauAp from 0 to t, where t<t_gate # -######################################################################################################## +############################################################################### @njit -def tAp( Q, tau, t_gate): - return t_gate/2 - tau*np.arccosh(((1-Q)*np.exp(t_gate/(2*tau)) + np.exp(-t_gate/(2*tau)))/2) - - -######################################################################################################## -## dp1dQ(Q, tau, tau_R, tau_Ap, t_gate) -######################################################################################################## -## -## Returns discretised afterpulse p.d.f as a function of n.p.e -## Note: as in Chmill et al. However, the two branches of solutions given by t and t_gate -t are summed together. This demonstrably yields a normalised probability distribution. -## -## To understand what is going on here, it takes a little explaining. -## We exploit the fact that the cumulative distribution distribution F_Q(Q<q) = F(T<tQ(Q)), from which the derivation of the paper can be derived. -## -## F_Q(Q<q) = F(T<tAp(Q)) -## f_Q(q) = F^'(tAp(q)) x |dtAp/dQ| -## |dtAp/dQ| = |dQ/dtAp|^-1 -## F^'(t) = fAp(t) -## f_Q(q) = fAp(tAp(q)) x |dQ/dtAp|^-1 -## -## We do not need to do all that complicated stuff. All we need to do is evaluate F(T<tAp(Q)) between the bins in the time domain, then divide through by dQ -## -## This is easy. We evaluate the integral of ∫(1 - exp(-t/tauR))*exp(-t/tauAp)/tauAp from 0 to t, (P1) between the bins given by Q +/- dQ/2, then divide through by beta and bin width. -## -## 1. Calculate Q_max for afterpulse spectrum. -## -## 2. Calculate beta for the afterpulse spectrum. -## -## 3. Select the indices that are within the range of 0 and Q_max -## -## 4. Create empty arrays for the pdfs for both branches (pdf_b0, pdf_b1) and the times we will be evaluating (t_m, t_p) -## -## 5. Find the bins in the t_Ap space by evaluating t_Ap_m = t_Ap(Q-dx/2), t_Ap_p = t_Ap(Q+dx/2) -## -## 6. Integrate over the bins forwards in time using EvalP1 for pdf_b0 -## -## 7. Now to evaluate the edges. The problem here is that t_Ap_m and t_Ap_p may have elements below 0/above Q_max, which we cannot account for. -## We replace the first bin of the distribution with the integral from 0 to the lower bin edge. -## -## Note that Q[idx_Q_min+1]-dx/2 = Q[idx_Q_min] + dx/2. So, t_m[idx_Q_0+1] is the lower edge of the last bin. -## The reason that we do it this way is because we do not have an 'upper' edge at t_p[idx_Q_0] -## So the integral of the first bin is evaluated between 0 and t_m[idx_Q_0+1] -## -## You can check the integral is correct by uncommenting the code. -## -## 8. We replace the last bin of the distribution with the integral from the upper edge to tgate/2. -## -## Note that Q[idx_Q_max-1]+dx/2 = Q[idx_Q_max] - dx/2. So, t_p[idx_Q_0+1] is the lower edge of the last bin. -## The reason that we do it this way is because we do not have an 'upper' edge at t_p[idx_Q_0] -## So the integral of the first bin is evaluated between 0 and t_p[idx_Q_0+1] -## -## You can check the integral is correct by uncommenting the code. -## -## 9. Repeat 6-8 for pdf_b1. Note that because the time is reversed in this part, all the elements of evaluating the edges of the bin have to be reversed too! -## -## 10. Normalise the integral to beta and dx -## -## IMPORTANT NOTE! -## As the granularity of the spectrum increases, so too do we encounter the problem where poles invariably occur. -## numpy cannot accurately assess the value of np.log(1-epsilon()). This happens where tau is very small relative to t0 or tgate. -## -## Unjit the function before printing! -######################################################################################################## +def tAp(Q, tau, t_gate): + return t_gate / 2 - tau*np.arccosh( + ((1 - Q)*np.exp(t_gate/(2*tau)) + np.exp(-t_gate/(2*tau)))/2 + ) + + +############################################################################### +# dp1dQ(Q, tau, tau_R, tau_Ap, t_gate) +############################################################################### +# +# Returns discretised afterpulse p.d.f as a function of n.p.e +# Note: as in Chmill et al. However, the two branches of solutions given by t +# and t_gate -t are summed together. This demonstrably yields a +# normalised probability distribution. +# +# To understand what is going on here, it takes a little explaining. +# We exploit the fact that the cumulative distribution distribution +# F_Q(Q<q) = F(T<tQ(Q)), from which the derivation of the paper can be derived. +# +# F_Q(Q<q) = F(T<tAp(Q)) +# f_Q(q) = F^'(tAp(q)) x |dtAp/dQ| +# |dtAp/dQ| = |dQ/dtAp|^-1 +# F^'(t) = fAp(t) +# f_Q(q) = fAp(tAp(q)) x |dQ/dtAp|^-1 +# +# We do not need to do all that complicated stuff. All we need to do is +# evaluate F(T<tAp(Q)) between the bins in the time domain, then divide through +# by dQ +# +# This is easy. We evaluate the integral of +# ∫(1 - exp(-t/tauR))*exp(-t/tauAp)/tauAp from 0 to t, (P1) between the bins +# given by Q +/- dQ/2, then divide through by beta and bin width. +# +# 1. Calculate Q_max for afterpulse spectrum. +# +# 2. Calculate beta for the afterpulse spectrum. +# +# 3. Select the indices that are within the range of 0 and Q_max +# +# 4. Create empty arrays for the pdfs for both branches (pdf_b0, pdf_b1) and +# the times we will be evaluating (t_m, t_p) +# +# 5. Find the bins in the t_Ap space by evaluating t_Ap_m = t_Ap(Q-dx/2), +# t_Ap_p = t_Ap(Q+dx/2) +# +# 6. Integrate over the bins forwards in time using EvalP1 for pdf_b0 +# +# 7. Now to evaluate the edges. The problem here is that t_Ap_m and t_Ap_p may +# have elements below 0/above Q_max, which we cannot account for. +# We replace the first bin of the distribution with the integral from 0 to +# the lower bin edge. +# +# Note that Q[idx_Q_min+1]-dx/2 = Q[idx_Q_min] + dx/2. So, t_m[idx_Q_0+1] +# is the lower edge of the last bin. The reason that we do it this way is +# because we do not have an 'upper' edge at t_p[idx_Q_0] +# So the integral of the first bin is evaluated between 0 and t_m[idx_Q_0+1] +# +# You can check the integral is correct by uncommenting the code. +# +# 8. We replace the last bin of the distribution with the integral from the +# upper edge to tgate/2. +# +# Note that Q[idx_Q_max-1]+dx/2 = Q[idx_Q_max] - dx/2. So, t_p[idx_Q_0+1] is +# the lower edge of the last bin. The reason that we do it this way is +# because we do not have an 'upper' edge at t_p[idx_Q_0] +# So the integral of the first bin is evaluated between 0 and t_p[idx_Q_0+1] +# +# You can check the integral is correct by uncommenting the code. +# +# 9. Repeat 6-8 for pdf_b1. Note that because the time is reversed in this +# part, all the elements of evaluating the edges of the bin have to be +# reversed too! +# +# 10. Normalise the integral to beta and dx +# +# IMPORTANT NOTE! +# As the granularity of the spectrum increases, so too do we encounter the +# problem where poles invariably occur. numpy cannot accurately assess the +# value of np.log(1-epsilon()). This happens where tau is very small +# relative to t0 or tgate. +# +# Unjit the function before printing! +############################################################################### @njit def dp1dQ(Q, tau, tau_R, tau_Ap, t_gate, dx): - - - #1. - Q_max = (1-np.exp(-t_gate/(2*tau)))**2 - - - if(Q_max>1-epsilon()): - raise Exception("The afterpulse spectrum of this distribution cannot be normalised, because of the pole at Q_max, resulting in infinite probability density. This is probably because tau is very small, or tgate is very large compared to the binning of the spectrum. Try increasing tau, or reducing tgate") - - #2 + + # 1. + Q_max = (1 - np.exp(-t_gate / (2 * tau))) ** 2 + + if Q_max > 1 - epsilon(): + raise Exception(("The afterpulse spectrum of this distribution cannot " + "be normalised, because of the pole at Q_max, " + "resulting in infinite probability density. This is " + "probably because tau is very small, or tgate is very" + " large compared to the binning of the spectrum. " + "Try increasing tau, or reducing tgate")) + + # 2 beta = Beta(tau_R, tau_Ap, t_gate) - #3 - idx_Q_max = np.argmax(Q>Q_max)-1 - idx_Q_min = np.argmax(Q>0) - - #4 + # 3 + idx_Q_max = np.argmax(Q > Q_max) - 1 + idx_Q_min = np.argmax(Q > 0) + + # 4 t_Ap_m = np.zeros_like(Q) t_Ap_p = np.zeros_like(Q) pdf_b0 = np.zeros_like(Q) pdf_b1 = np.zeros_like(Q) - #5 - t_Ap_m[idx_Q_min:idx_Q_max]= tAp(Q[idx_Q_min:idx_Q_max]-dx/2, tau, t_gate) - t_Ap_p[idx_Q_min:idx_Q_max]= tAp(Q[idx_Q_min:idx_Q_max]+dx/2, tau, t_gate) - - #6 - pdf_b0[idx_Q_min:idx_Q_max] = EvalP1(t_Ap_m[idx_Q_min:idx_Q_max], - t_Ap_p[idx_Q_min:idx_Q_max], - tau_R, - tau_Ap, t_gate) - - #7. - pdf_b0[idx_Q_min] = EvalP1(0, t_Ap_m[idx_Q_min+1], tau_R, tau_Ap, t_gate) - - #8. - pdf_b0[idx_Q_max] = EvalP1(t_Ap_p[idx_Q_max-1], t_gate/2, tau_R, tau_Ap, t_gate) - - ######################################### - ## Check integral of pdfb0: - ######################################### - -# print("CHECK INTEGRAL pdfb0 ", -# np.trapz(pdf_b0/beta/dx, dx=dx), -# EvalP1(0, t_gate/2, tau_R, tau_Ap, t_gate)/beta, -# np.allclose(np.trapz(pdf_b0/beta/dx, dx=dx), -# EvalP1(0, t_gate/2, tau_R, tau_Ap, t_gate)/beta)) - - - ######################################### - - - #9. - pdf_b1[idx_Q_min:idx_Q_max] = EvalP1(t_gate-t_Ap_p[idx_Q_min:idx_Q_max], - t_gate-t_Ap_m[idx_Q_min:idx_Q_max], - tau_R, - tau_Ap, t_gate) + # 5 + t_Ap_m[idx_Q_min:idx_Q_max] = tAp(Q[idx_Q_min:idx_Q_max] - dx / 2, tau, + t_gate) + + t_Ap_p[idx_Q_min:idx_Q_max] = tAp(Q[idx_Q_min:idx_Q_max] + dx / 2, tau, + t_gate) + # 6 + pdf_b0[idx_Q_min:idx_Q_max] = EvalP1( + t_Ap_m[idx_Q_min:idx_Q_max], t_Ap_p[idx_Q_min:idx_Q_max], tau_R, + tau_Ap, t_gate + ) + # 7. + pdf_b0[idx_Q_min] = EvalP1(0, t_Ap_m[idx_Q_min + 1], tau_R, tau_Ap, t_gate) + + # 8. + pdf_b0[idx_Q_max] = EvalP1(t_Ap_p[idx_Q_max - 1], t_gate / 2, tau_R, + tau_Ap, t_gate) - pdf_b1[idx_Q_min] = EvalP1(t_gate-t_Ap_m[idx_Q_min+1], t_gate, tau_R, tau_Ap, t_gate) - pdf_b1[idx_Q_max] = EvalP1(t_gate/2, t_gate-t_Ap_p[idx_Q_max-1], tau_R, tau_Ap, t_gate) - ######################################### - ## Check integral of pdfb1: + # Check integral of pdfb0: ######################################### - -# print("CHECK INTEGRAL pdfb0 ", -# np.trapz(pdf_b1/beta/dx, dx=dx), -# EvalP1(t_gate/2, t_gate, tau_R, tau_Ap, t_gate)/beta, -# np.allclose(np.trapz(pdf_b1/beta/dx, dx=dx), -# EvalP1(t_gate/2, t_gate, tau_R, tau_Ap, t_gate)/beta)) - ######################################### - #10 - pdf = (pdf_b0 + pdf_b1)/beta/dx - - - ######################################## - # Check integral of pdf: - ######################################## - -# print("CHECK INTEGRAL pdf ", np.trapz(pdf, dx=dx), 1, np.allclose(np.trapz(pdf, dx=dx), 1)) - -# plt.figure() -# plt.plot(Q, pdf) -# plt.xlim(0-5*dx, Q_max+5*dx) -# plt.yscale("log") -# plt.show() - - ######################################## + # print("CHECK INTEGRAL pdfb0 ", + # np.trapz(pdf_b0/beta/dx, dx=dx), + # EvalP1(0, t_gate/2, tau_R, tau_Ap, t_gate)/beta, + # np.allclose(np.trapz(pdf_b0/beta/dx, dx=dx), + # EvalP1(0, t_gate/2, tau_R, tau_Ap, t_gate)/beta)) - return pdf + ######################################### + # 9. + pdf_b1[idx_Q_min:idx_Q_max] = EvalP1( + t_gate - t_Ap_p[idx_Q_min:idx_Q_max], + t_gate - t_Ap_m[idx_Q_min:idx_Q_max], + tau_R, + tau_Ap, + t_gate, + ) + + pdf_b1[idx_Q_min] = EvalP1( + t_gate - t_Ap_m[idx_Q_min + 1], t_gate, tau_R, tau_Ap, t_gate + ) + pdf_b1[idx_Q_max] = EvalP1( + t_gate / 2, t_gate - t_Ap_p[idx_Q_max - 1], tau_R, tau_Ap, t_gate + ) + ######################################### + # Check integral of pdfb1: + ######################################### + # print("CHECK INTEGRAL pdfb0 ", + # np.trapz(pdf_b1/beta/dx, dx=dx), + # EvalP1(t_gate/2, t_gate, tau_R, tau_Ap, t_gate)/beta, + # np.allclose(np.trapz(pdf_b1/beta/dx, dx=dx), + # EvalP1(t_gate/2, t_gate, tau_R, tau_Ap, t_gate)/beta)) + ######################################### + # 10 + pdf = (pdf_b0 + pdf_b1) / beta / dx + ######################################## + # Check integral of pdf: + ######################################## + # print("CHECK INTEGRAL pdf ", np.trapz(pdf, dx=dx), 1, + # np.allclose(np.trapz(pdf, dx=dx), 1)) + # + # plt.figure() + # plt.plot(Q, pdf) + # plt.xlim(0-5*dx, Q_max+5*dx) + # plt.yscale("log") + # plt.show() + ######################################## + return pdf +############################################################################### +# DARK COUNT FUNCTIONS +############################################################################### +############################################################################### +# EvalDCRBFG(Q_0, Q_1, h_order) +############################################################################### +# +# Evaluates integral ∫1/(1-(Q/h_order)) between Q0 and Q1. +# h_order means the 'stretching factor' of the distribution for cross-talk +############################################################################### -######################################################################################################## -## DARK COUNT FUNCTIONS -######################################################################################################## +@njit +def EvalDCRBFG(Q_0, Q_1, h_order): + return h_order * np.log(Q_1) - h_order * np.log(Q_0) +############################################################################### +# EvalDCRDG(Q_0, Q_1, h_order) +############################################################################### +# +# Evaluates integral ∫1/(Q/h_order) between Q0 and Q1. +# h_order means the 'stretching factor' of the distribution for cross-talk +############################################################################### -######################################################################################################## -## EvalDCRBFG(Q_0, Q_1, h_order) -######################################################################################################## -## -## Evaluates integral ∫1/(1-(Q/h_order)) between Q0 and Q1. -## h_order means the 'stretching factor' of the distribution for cross-talk -######################################################################################################## @njit -def EvalDCRBFG(Q_0, Q_1, h_order): - return h_order*np.log(Q_1) - h_order*np.log(Q_0) +def EvalDCRDG(Q_0, Q_1, h_order): + return -h_order * np.log(h_order - Q_1) + h_order * np.log(h_order - Q_0) -######################################################################################################## -## EvalDCRDG(Q_0, Q_1, h_order) -######################################################################################################## -## -## Evaluates integral ∫1/(Q/h_order) between Q0 and Q1. -## h_order means the 'stretching factor' of the distribution for cross-talk -######################################################################################################## -@njit -def EvalDCRDG(Q_0, Q_1, h_order): - return -h_order*np.log(h_order-Q_1) + h_order*np.log(h_order-Q_0) - - - -######################################################################################################## -## dpDCR1dQ(Q, tau, t_gate, t_0, dx, h_order=1) -######################################################################################################## -## -## Evaluates discretised dark distribution at cross talk order of h_order, which stretches the distribution. -## h_order=1 means the primary dark count distribution -## -## The distribution has to be artificially discretised and normalised. -## There are poles of this distribution, so we have to be extremely careful to normalise the edges of the distribution -## The method is explained here. -## -## dpDCR1dQ(Q, tau, tgate, t0) = (tau/(t_gate + t_0))(1/Q + 1/(1-Q)) -## -## 1. Evaluate the maxima and minima Q_min, and Q_max. We cannot take Q/h_order, so for the integral -## to be properly normalised, we scale the limits by the stretching factor, h_order. -## -## 2. Evaluate the indices of the last values of Q in the spectrum before Q_min, Q_max and 0. -## Q_min to Q_max are the limits before the gate, 0 to Q_max are the limits after the gate. -## -## 3. Create empty zero arrays for the pdfs. pdf_bg is the pdf before the gate, and pdf_dg is the pdf during the gate. -## -## 4. For all the available indices between the limits, evaluate the integrals of EvalDCRBFG, between each bin edge. -## This will become our probability after scaling. -## -## 5. Find the integral from Q_min to the lower bin edge of the range that we have evaluated, -## and set it to the first available bin index. -## -## Note that Q[idx_Q_0+1]-dx/2 = Q[idx_Q_0] + dx/2. The reason that we do it this way is -## for consistency with the dp1dQ method, which has special considerations (see dp1dQ function) -## -## -## 6. Find the integral from the upper bin edge that we have evaluated to Q_max, -## and set it to the last available bin index. -## -## Note that Q[idx_Q_0-1]+dx/2 = Q[idx_Q_0] - dx/2. The reason that we do it this way is -## for consistency with the dp1dQ method, which has special considerations (see dp1dQ function) -## -## You can check the function is working at this stage by uncommenting the integral check. The integral should be t_0/tau -## -## 7. Repeat steps 4-6 for EvalDCRDG, this time using 0 as the lower limit. Everything stated in 4-6 still applies. -## -## You can check the function is working at this stage by uncommenting the integral check. The integral should be t_gate/tau -## -## 8. We obtain the probability per bin by multiplying by the normalisation constant tau/(t_0 + t_gate). -## -## We also need to divide through by h_order, because the integral is h_order times as large if we scale -## -## Then we divide through by dx to get the discretised probability distribution. -## -## You can check the function is working at this stage by uncommenting the integral check. The integral should be 1.0 -## -## 9. We divide through by the integral to correct for numerical error. -## -## IMPORTANT NOTE! -## As the granularity of the spectrum increases, so too do we encounter the problem where poles invariably occur. -## numpy cannot accurately assess the value of np.log(1-epsilon()). This happens where tau is very small relative to t0 or tgate. -## -######################################################################################################## +############################################################################### +# dpDCR1dQ(Q, tau, t_gate, t_0, dx, h_order=1) +############################################################################### +# +# Evaluates discretised dark distribution at cross talk order of h_order, +# which stretches the distribution. +# h_order=1 means the primary dark count distribution +# +# The distribution has to be artificially discretised and normalised. +# There are poles of this distribution, so we have to be extremely careful to +# normalise the edges of the distribution +# The method is explained here. +# +# dpDCR1dQ(Q, tau, tgate, t0) = (tau/(t_gate + t_0))(1/Q + 1/(1-Q)) +# +# 1. Evaluate the maxima and minima Q_min, and Q_max. We cannot take Q/h_order, +# so for the integral to be properly normalised, we scale the limits by the +# stretching factor, h_order. +# +# 2. Evaluate the indices of the last values of Q in the spectrum before Q_min, +# Q_max and 0. +# Q_min to Q_max are the limits before the gate, 0 to Q_max are the limits +# after the gate. +# +# 3. Create empty zero arrays for the pdfs. pdf_bg is the pdf before the gate, +# and pdf_dg is the pdf during the gate. +# +# 4. For all the available indices between the limits, evaluate the integrals +# of EvalDCRBFG, between each bin edge. +# This will become our probability after scaling. +# +# 5. Find the integral from Q_min to the lower bin edge of the range that we +# have evaluated, and set it to the first available bin index. +# +# Note that Q[idx_Q_0+1]-dx/2 = Q[idx_Q_0] + dx/2. The reason that we do it +# this way is +# for consistency with the dp1dQ method, which has special considerations +# (see dp1dQ function) +# +# +# 6. Find the integral from the upper bin edge that we have evaluated to Q_max, +# and set it to the last available bin index. +# +# Note that Q[idx_Q_0-1]+dx/2 = Q[idx_Q_0] - dx/2. The reason that we do it +# this way is for consistency with the dp1dQ method, which has special +# considerations (see dp1dQ function) +# +# You can check the function is working at this stage by uncommenting the +# integral check. The integral should be t_0/tau +# +# 7. Repeat steps 4-6 for EvalDCRDG, this time using 0 as the lower limit. +# Everything stated in 4-6 still applies. +# +# You can check the function is working at this stage by uncommenting the +# integral check. The integral should be t_gate/tau +# +# 8. We obtain the probability per bin by multiplying by the normalisation +# constant tau/(t_0 + t_gate). +# +# We also need to divide through by h_order, because the integral is +# h_order times as large if we scale +# +# Then we divide through by dx to get the discretised probability +# distribution. +# +# You can check the function is working at this stage by uncommenting the +# integral check. The integral should be 1.0 +# +# 9. We divide through by the integral to correct for numerical error. +# +# IMPORTANT NOTE! +# As the granularity of the spectrum increases, so too do we encounter the +# problem where poles invariably occur. numpy cannot accurately assess the +# value of np.log(1-epsilon()). This happens where tau is very small +# relative to t0 or tgate. +# +############################################################################### + @njit def dpDCR1dQ(Q, tau, t_gate, t_0, dx, h_order=1): - - #1. - - - Q_min = h_order*np.exp(-t_0/tau)*(1 - np.exp(-t_gate/tau)) - Q_max = h_order* (1 - np.exp(-t_gate/tau)) - - - if(Q_min<h_order*epsilon()): - raise Exception("The dark count spectrum of this distribution cannot be normalised, because of the pole at Q_min, resulting in infinite probability density. This is probably because tau is very small, or tgate/t0 is very large compared to the binning of the spectrum. Try increasing tau, or reducing tgate/t0") - - - - if(Q_max>h_order-h_order*epsilon()): - raise Exception("The dark count spectrum of this distribution cannot be normalised, because of the pole at Q_max, resulting in infinite probability density. This is probably because tau is very small, or tgate/t0 is very large compared to the binning of the spectrum") - - - #2. - - idx_Q_min = np.argmax(Q>Q_min) - idx_Q_max = np.argmax(Q>Q_max)-1 - idx_Q_0 = np.argmax(Q>0) - - - #3. + + # 1. + Q_min = h_order * np.exp(-t_0 / tau) * (1 - np.exp(-t_gate / tau)) + Q_max = h_order * (1 - np.exp(-t_gate / tau)) + + if Q_min < h_order * epsilon(): + raise Exception(("The dark count spectrum of this distribution cannot " + "be normalised, because of the pole at Q_min, " + "resulting in infinite probability density. This is " + "probably because tau is very small, or tgate/t0 is " + "very large compared to the binning of the spectrum. " + "Try increasing tau, or reducing tgate/t0")) + + if Q_max > h_order - h_order * epsilon(): + raise Exception(("The dark count spectrum of this distribution cannot " + "be normalised, because of the pole at Q_max, " + "resulting in infinite probability density. This is " + "probably because tau is very small, or tgate/t0 is " + "very large compared to the binning of the spectrum") + ) + + # 2. + idx_Q_min = np.argmax(Q > Q_min) + idx_Q_max = np.argmax(Q > Q_max) - 1 + idx_Q_0 = np.argmax(Q > 0) + + # 3. pdf_bfg = np.zeros_like(Q) pdf_dg = np.zeros_like(Q) - #4. - pdf_bfg[idx_Q_min:idx_Q_max] = EvalDCRBFG(Q[idx_Q_min:idx_Q_max]-dx/2, - Q[idx_Q_min:idx_Q_max]+dx/2, h_order) - - #5. - pdf_bfg[idx_Q_min]= EvalDCRBFG(Q_min, Q[idx_Q_min+1]-dx/2, h_order) - - #6. - pdf_bfg[idx_Q_max]= EvalDCRBFG(Q[idx_Q_max-1]+dx/2, Q_max, h_order) - - ######################################### - ## Check integral of pdf_bfg: + # 4. + pdf_bfg[idx_Q_min:idx_Q_max] = EvalDCRBFG( + Q[idx_Q_min:idx_Q_max] - dx / 2, + Q[idx_Q_min:idx_Q_max] + dx / 2, h_order + ) + + # 5. + pdf_bfg[idx_Q_min] = EvalDCRBFG(Q_min, Q[idx_Q_min + 1] - dx / 2, h_order) + + # 6. + pdf_bfg[idx_Q_max] = EvalDCRBFG(Q[idx_Q_max - 1] + dx / 2, Q_max, h_order) + ######################################### - - #print("CHECK INTEGRAL pdf_bfg ", np.trapz(pdf_bfg/dx, dx=dx), t_0/tau, np.allclose(np.trapz(pdf_bfg/dx, dx=dx), t_0/tau)) - + # Check integral of pdf_bfg: ######################################### - #7. - pdf_dg[idx_Q_0:idx_Q_max] = EvalDCRDG(Q[idx_Q_0:idx_Q_max]-dx/2, - Q[idx_Q_0:idx_Q_max]+dx/2, h_order) - - pdf_dg[idx_Q_0]= EvalDCRDG(0, Q[idx_Q_0+1]-dx/2, h_order) - pdf_dg[idx_Q_max]= EvalDCRDG(Q[idx_Q_max-1]+dx/2, Q_max, h_order) - - + # print("CHECK INTEGRAL pdf_bfg ", + # np.trapz(pdf_bfg/dx, dx=dx), t_0/tau, + # np.allclose(np.trapz(pdf_bfg/dx, dx=dx), t_0/tau)) + ######################################### - ## Check integral of pdf_dg: + + # 7. + pdf_dg[idx_Q_0:idx_Q_max] = EvalDCRDG( + Q[idx_Q_0:idx_Q_max] - dx / 2, Q[idx_Q_0:idx_Q_max] + dx / 2, h_order + ) + + pdf_dg[idx_Q_0] = EvalDCRDG(0, Q[idx_Q_0 + 1] - dx / 2, h_order) + pdf_dg[idx_Q_max] = EvalDCRDG(Q[idx_Q_max - 1] + dx / 2, Q_max, h_order) + ######################################### - - #print("CHECK INTEGRAL pdf_dg ", np.trapz(pdf_dg/dx, dx=dx), t_gate/tau, np.allclose(np.trapz(pdf_dg/dx, dx=dx), t_gate/tau)) - + # Check integral of pdf_dg: ######################################### - - - pdf = (pdf_bfg + pdf_dg)*(tau/(t_gate + t_0)) - pdf = pdf/dx/h_order - - - #8. - + + # print("CHECK INTEGRAL pdf_dg ", np.trapz(pdf_dg/dx, dx=dx), t_gate/tau, + # np.allclose(np.trapz(pdf_dg/dx, dx=dx), t_gate/tau)) + ######################################### - ## Check integral of pdf: + + pdf = (pdf_bfg + pdf_dg) * (tau / (t_gate + t_0)) + pdf = pdf / dx / h_order + + # 8. ######################################### - -# print("CHECK INTEGRAL pdf ", np.trapz(pdf, dx=dx), 1, np.allclose(np.trapz(pdf, dx=dx), 1)) - -# plt.figure() -# plt.plot(Q, pdf) -# plt.xlim(0-5*dx, Q_max+5*dx) -# plt.yscale("log") -# plt.show() - + # Check integral of pdf: ######################################### - - - pdf = pdf/max(epsilon(), np.trapz(pdf, dx=dx)) - - - return pdf + # print("CHECK INTEGRAL pdf ", np.trapz(pdf, dx=dx), 1, + # np.allclose(np.trapz(pdf, dx=dx), 1)) + + # plt.figure() + # plt.plot(Q, pdf) + # plt.xlim(0-5*dx, Q_max+5*dx) + # plt.yscale("log") + # plt.show() + ######################################### + pdf = pdf / max(epsilon(), np.trapz(pdf, dx=dx)) + return pdf -######################################################################################################## -## DRM(Q, Q_0, G, mu, lbda, sigma_0, sigma_1, tau_Ap, p_Ap, DCR, tau, tau_R, t_gate, t_0, k_max, len_pad) -######################################################################################################## -## -## Calculates full detector response model. -## Notes: -## - The model is fitted in n.p.e scale. k = (Q - Q_0)/G. -## - To rescale to the input parameter unit, the final pdf is scaled by a factor of 1/G - -## this is because dp/dQ = (dp/dk)*(dk/dQ), dk/dQ = d/dQ (Q - Q_0)/G = 1/G -## -######################################################################################################## -def DRM(Q, Q_0, G, mu, lbda, sigma_0, sigma_1, tau_Ap, p_Ap, DCR, tau, tau_R, t_gate, t_0, k_max, k_max_dcr, len_pad, A): - +############################################################################### +# DRM(Q, Q_0, G, mu, lbda, sigma_0, sigma_1, tau_Ap, p_Ap, DCR, tau, tau_R, +# t_gate, t_0, k_max, len_pad) +############################################################################### +# +# Calculates full detector response model. +# Notes: +# - The model is fitted in n.p.e scale. k = (Q - Q_0)/G. +# - To rescale to the input parameter unit, the final pdf is scaled by a +# factor of 1/G - this is because dp/dQ = (dp/dk)*(dk/dQ), +# dk/dQ = d/dQ (Q - Q_0)/G = 1/G +# +############################################################################### + + +def DRM( + Q, + Q_0, + G, + mu, + lbda, + sigma_0, + sigma_1, + tau_Ap, + p_Ap, + DCR, + tau, + tau_R, + t_gate, + t_0, + k_max, + k_max_dcr, + len_pad, + A, +): t_0 = abs(t_0) len_pad = int(len_pad) - - len_Q = len(Q) - len_Q_pad = len_Q + 2*len_pad + len_Q_pad = len_Q + 2 * len_pad Q_idx = np.arange(0, len_Q) - Q_idx_pad = np.arange(-len_pad, len_Q+len_pad) - dx = abs(Q[1]-Q[0]) - - Q_pad = (Q[0] - dx*len_pad) + dx*np.arange(len_Q_pad) - - k_pad = (Q_pad - Q_0)/G - - dx_k = dx/G - - idx_k_0_pad = np.argmin(abs(k_pad)) - + Q_idx_pad = np.arange(-len_pad, len_Q + len_pad) + dx = abs(Q[1] - Q[0]) + + Q_pad = (Q[0] - dx * len_pad) + dx * np.arange(len_Q_pad) + k_pad = (Q_pad - Q_0) / G - Q_idx_orig = np.where(np.in1d(Q_idx_pad, - np.intersect1d( - Q_idx_pad, - Q_idx - ) - ), True, False) + dx_k = dx / G + + idx_k_0_pad = np.argmin(abs(k_pad)) + + Q_idx_orig = np.where( + np.in1d(Q_idx_pad, np.intersect1d(Q_idx_pad, Q_idx)), True, False + ) - k_pg = np.arange(0, max(k_max, 3)).astype(int) - - - - - Ppgs = np.expand_dims(gpoisson.pmf(k_pg, mu, lbda), -1) - + + Ppgs = np.expand_dims(gpoisson.pmf(k_pg, mu, lbda), -1) + Pns = np.zeros((int(k_max), int(k_max))) - + alpha = P_Ap(tau_R, tau_Ap, t_gate, p_Ap) - - dp0dQs = np.vstack([norm.pdf(k_pad, loc=_k, scale=sigmak(_k, sigma_0/G, sigma_1/G)) for _k in k_pg]) - + + dp0dQs = np.vstack( + [ + norm.pdf(k_pad, loc=_k, scale=sigmak(_k, sigma_0 / G, sigma_1 / G)) + for _k in k_pg + ] + ) + dpndQs = [] - for _k in k_pg: - if(_k==0): + if _k == 0: _dpndQ = np.zeros(len(k_pad)) - Pns[0,0] = binom.pmf(0, 0, alpha) - elif(_k==1): + Pns[0, 0] = binom.pmf(0, 0, alpha) + elif _k == 1: _dpndQ = dp1dQ(k_pad, tau, tau_R, tau_Ap, t_gate, dx_k) - - - Pns[1,0:2] = np.array(binom.pmf(np.arange(0, 2).astype(int), _k, alpha)) + + Pns[1, 0:2] = np.array(binom.pmf(np.arange(0, 2).astype(int), _k, + alpha)) else: - _dpndQ = Convolve(dpndQs[1], dpndQs[-1], idx_k_0_pad, len_Q_pad, dx_k, norm=True) - Pns[_k,0:_k+1] = np.array(binom.pmf(np.arange(0, _k+1).astype(int), _k, alpha)) + _dpndQ = Convolve( + dpndQs[1], dpndQs[-1], idx_k_0_pad, len_Q_pad, dx_k, norm=True + ) + Pns[_k, 0: _k + 1] = np.array( + binom.pmf(np.arange(0, _k + 1).astype(int), _k, alpha) + ) dpndQs.append(_dpndQ) - + dpndQs = np.vstack(dpndQs) - - - - - mu_DCR = DCR*(t_gate + t_0) + + mu_DCR = DCR * (t_gate + t_0) k_dc = np.arange(0, max(k_max_dcr, 3)).astype(int) - fDCRs = [] + fDCRs = [] hDCRs = [] pdf_dark = np.zeros(len_Q_pad) for _k in k_dc: - if(_k==0): + if _k == 0: f = np.zeros(len_Q_pad) - f[idx_k_0_pad] = 1/dx_k - h=f - - elif(_k==1): - f = dpDCR1dQ(k_pad, tau, t_gate, t_0, dx_k, h_order=1) - h = dpDCR1dQ(k_pad, tau, t_gate, t_0, dx_k, h_order=2) - - + f[idx_k_0_pad] = 1 / dx_k + h = f + + elif _k == 1: + f = dpDCR1dQ(k_pad, tau, t_gate, t_0, dx_k, h_order=1) + h = dpDCR1dQ(k_pad, tau, t_gate, t_0, dx_k, h_order=2) + else: - f = Convolve(fDCRs[1], fDCRs[-1], idx_k_0_pad, len_Q_pad, dx_k, norm=True) - h = dpDCR1dQ(k_pad, tau, t_gate, t_0, dx_k, h_order=_k+1) + f = Convolve(fDCRs[1], fDCRs[-1], idx_k_0_pad, len_Q_pad, dx_k, + norm=True) + h = dpDCR1dQ(k_pad, tau, t_gate, t_0, dx_k, h_order=_k + 1) fDCRs.append(f) hDCRs.append(h) fDCRs = np.vstack(fDCRs) hDCRs = np.vstack(hDCRs) - - + pDCRs_0 = poisson.pmf(0, mu_DCR) pDCRs = [] dpDCRdQs = [] - + for k in k_dc: - if(k==0): + if k == 0: continue - + else: _pDCR_k = [] _dpDCRdQ_k = [] - - for j in range(1, k+1): + for j in range(1, k + 1): -# combs = np.array(list(product(range(k-j+1), repeat=j))) -# combs = combs[np.sum(combs, axis=1)==k-j] - combs = Multichoose(j, k-j) + # combs = np.array(list(product(range(k-j+1), repeat=j))) + # combs = combs[np.sum(combs, axis=1)==k-j] + combs = Multichoose(j, k - j) for comb in combs: - - _dpDCRdQ_jks = [] _lp_B_jks = [] - for _v, _f in zip(*np.unique(comb, return_counts=True)): - if(_v==0): + if _v == 0: _dpDCRdQ_jk = [fDCRs[_f]] - if(_f>1): - _lp_B_jk = [_f*borel.logpmf(0, lbda)] + if _f > 1: + _lp_B_jk = [_f * borel.logpmf(0, lbda)] else: _lp_B_jk = [borel.logpmf(0, lbda)] else: _dpDCRdQ_jk = [hDCRs[_v] for _ in range(_f)] - if(_f>1): - _lp_B_jk = [_f*borel.logpmf(_v, lbda)] + if _f > 1: + _lp_B_jk = [_f * borel.logpmf(_v, lbda)] else: _lp_B_jk = [borel.logpmf(_v, lbda)] _dpDCRdQ_jks.extend(_dpDCRdQ_jk) _lp_B_jks.extend(_lp_B_jk) - __pDCR_k = np.exp(np.sum(_lp_B_jks) + poisson.logpmf(j, mu_DCR)) - __dpDCRdQ_k = ConvolveList(_dpDCRdQ_jks, idx_k_0_pad, len_Q_pad, dx_k, norm=True) - + __pDCR_k = np.exp(np.sum(_lp_B_jks) + + poisson.logpmf(j, mu_DCR)) + + __dpDCRdQ_k = ConvolveList( + _dpDCRdQ_jks, idx_k_0_pad, len_Q_pad, dx_k, norm=True + ) + _pDCR_k.append(__pDCR_k) _dpDCRdQ_k.append(__dpDCRdQ_k) - + pDCRs.extend(_pDCR_k) dpDCRdQs.extend(_dpDCRdQ_k) - + pDCRs = np.vstack(pDCRs) - dpDCRdQs = np.vstack(dpDCRdQs) - - - pdf_dark = np.sum(pDCRs*dpDCRdQs, axis=0) - pdf_dark = pdf_dark/max(epsilon(), np.trapz(pdf_dark, dx=dx_k)) - ## normalised by factor 1/(1 - pDCRs_0), and then rescaled to original unit after convolution - - - + dpDCRdQs = np.vstack(dpDCRdQs) + + pdf_dark = np.sum(pDCRs * dpDCRdQs, axis=0) + pdf_dark = pdf_dark / max(epsilon(), np.trapz(pdf_dark, dx=dx_k)) + # normalised by factor 1/(1 - pDCRs_0), and then rescaled to + # original unit after convolution + pdf_drm = np.zeros(len_Q_pad) - + for _k in k_pg: - _dp0dQ = float(Pns[_k, 0])*dp0dQs[_k, :] - - if(_k>0): - - - _dpndQ_sum = np.sum(np.vstack([ - float(Pns[_k, _j])*Convolve(dpndQs[_j, :], - dp0dQs[_k, :], - idx_k_0_pad, - len_Q_pad, - dx_k, - norm=True - ) - for _j in range(1, _k+1) - ]), - axis=0) + _dp0dQ = float(Pns[_k, 0]) * dp0dQs[_k, :] + + if _k > 0: + + _dpndQ_sum = np.sum( + np.vstack( + [ + float(Pns[_k, _j]) + * Convolve( + dpndQs[_j, :], + dp0dQs[_k, :], + idx_k_0_pad, + len_Q_pad, + dx_k, + norm=True, + ) + for _j in range(1, _k + 1) + ] + ), + axis=0, + ) else: _dpndQ_sum = np.zeros(len_Q_pad) - + _dp0dQ = np.nan_to_num(_dp0dQ, nan=0.0, posinf=0.0, neginf=0.0) _dpndQ_sum = np.nan_to_num(_dpndQ_sum, nan=0.0, posinf=0.0, neginf=0.0) - - - _pdf_light_k = (_dp0dQ + _dpndQ_sum) - _pdf_drm_k = pDCRs_0*_pdf_light_k + (1-pDCRs_0)*Convolve(_pdf_light_k, pdf_dark, idx_k_0_pad, len_Q_pad, dx_k, norm=True) - _pdf_drm_k = _pdf_drm_k/max(epsilon(), np.trapz(_pdf_drm_k, dx=dx_k)) - - _pdf_drm_k = float(Ppgs[_k])*_pdf_drm_k - pdf_drm += _pdf_drm_k - - - - pdf_drm = pdf_drm[Q_idx_orig]/G - count_drm = A*pdf_drm - - - - return count_drm - - + _pdf_light_k = _dp0dQ + _dpndQ_sum + _pdf_drm_k = pDCRs_0 * _pdf_light_k + (1 - pDCRs_0) * Convolve( + _pdf_light_k, pdf_dark, idx_k_0_pad, len_Q_pad, dx_k, norm=True + ) + _pdf_drm_k = _pdf_drm_k / max(epsilon(), np.trapz(_pdf_drm_k, dx=dx_k)) + _pdf_drm_k = float(Ppgs[_k]) * _pdf_drm_k + pdf_drm += _pdf_drm_k + pdf_drm = pdf_drm[Q_idx_orig] / G + count_drm = A * pdf_drm - - - - + return count_drm diff --git a/PeakOTron.py b/PeakOTron.py index 53a2f36..3aa2bf7 100644 --- a/PeakOTron.py +++ b/PeakOTron.py @@ -1,442 +1,421 @@ -from HelperFunctions import GP_gain, GP_lbda, GP_mu, HistMean, HistStd, HistSkew +from HelperFunctions import (GP_gain, GP_lbda, GP_mu, + HistMean, HistStd, HistSkew) + +from Model import epsilon from HelperFunctions import LatexFormat, Linear -from AdditionalPDFs import gpoisson -from LossFunctions import * +from LossFunctions import BinnedLH, HuberRegression from Model import DRM, P_Ap, dP_Ap, sigmak, Beta from iminuit import Minuit -from scipy.interpolate import interp1d, UnivariateSpline, InterpolatedUnivariateSpline -from scipy.stats import poisson, norm +from scipy.interpolate import (interp1d, UnivariateSpline, + InterpolatedUnivariateSpline) + +from scipy.stats import norm from astropy.stats import knuth_bin_width, freedman_bin_width, scott_bin_width from matplotlib import cm import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec -from matplotlib.ticker import MaxNLocator, ScalarFormatter +from matplotlib.ticker import ScalarFormatter from itertools import chain from copy import deepcopy import time +import numpy as np - -import matplotlib.pyplot as plt - - -#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -######################################################################################################## +#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +############################################################################### ## PEAKOTRON -######################################################################################################## -#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +############################################################################### +#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% class PeakOTron: - - - ######################################################################################################## - ## __init__ - ######################################################################################################## - ## - ## VARIABLES: - ## n_call_minuit: number of calls to Minuit for each trial - ## n_iterations_minuit: number of trial iterations if convergence is not achieved - ## n_bootstrap: number of bootstraps for each statistic - ## plot_figsize: figure size in inches for prefit figures - ## plot_fontsize: fontsize for output plots - ## plot_cmap: colour map for output plots - ## - ## This function defines the basic information stored by PeakOTron and the default fit value dictionary that is updated with the input parameters of ## the user. - ## - ######################################################################################################## - + ########################################################################### + # __init__ + ########################################################################### + # + # VARIABLES: + # n_call_minuit: number of calls to Minuit for each trial + # n_iterations_minuit: number of trial iterations if convergence is not + # achieved + # + # n_bootstrap: number of bootstraps for each statistic + # plot_figsize: figure size in inches for prefit figures + # plot_fontsize: fontsize for output plots + # plot_cmap: colour map for output plots + # + # This function defines the basic information stored by PeakOTron and the + # default fit value dictionary that is updated with the input parameters of + # the user. + # + ########################################################################### + def __init__(self, verbose=False, n_call_minuit=10000, - n_iterations_minuit = 10, - plot_figsize=(10,10), + n_iterations_minuit=10, + plot_figsize=(10, 10), plot_fontsize=25, plot_legendfontsize=18, - plot_cmap = 'turbo', - eps = 1e-6 - ): - - self._default_hist_data={ - "count":None, - "density":None, - "bin_numbers":None, - "bin_centres":None, - "counts_bg":None, - "counts_bgsub":None, - "truncate_nsigma0_do_min":None, - "bw":None, - "peaks":{ - "peak_position":None, - "peak_height":None, - "peak_index":None, - "peak_mean":None, - "peak_mean_error":None, - "peak_variance":None, - "peak_variance_error":None, - "peak_std_deviation":None, - "peak_std_deviation_error":None + plot_cmap='turbo', + eps=1e-6 + ): + + self._default_hist_data = { + "count": None, + "density": None, + "bin_numbers": None, + "bin_centres": None, + "counts_bg": None, + "counts_bgsub": None, + "truncate_nsigma0_do_min": None, + "bw": None, + "peaks": { + "peak_position": None, + "peak_height": None, + "peak_index": None, + "peak_mean": None, + "peak_mean_error": None, + "peak_variance": None, + "peak_variance_error": None, + "peak_std_deviation": None, + "peak_std_deviation_error": None } } - - self._default_fit_dict={ - "Q_0":None, - "G":None, - "lbda":None, - "mu":None, - "sigma_0":None, - "sigma_1":None, - "DCR":None, - "tau":None, - "t_gate":None, - "t_0":None, - "k_max":None, - "len_pad":None + + self._default_fit_dict = { + "Q_0": None, + "G": None, + "lbda": None, + "mu": None, + "sigma_0": None, + "sigma_1": None, + "DCR": None, + "tau": None, + "t_gate": None, + "t_0": None, + "k_max": None, + "len_pad": None } - + self._eps = eps - self._plot_figsize= plot_figsize - self._plot_fontsize= plot_fontsize - self._plot_legendfontsize= plot_legendfontsize + self._plot_figsize = plot_figsize + self._plot_fontsize = plot_fontsize + self._plot_legendfontsize = plot_legendfontsize self._cmap = cm.get_cmap(plot_cmap) self._len_pad = int(100) - self._verbose=verbose + self._verbose = verbose self._n_call_minuit = n_call_minuit self._n_iterations_minuit = n_iterations_minuit - - + self._max_n_peaks = None self._max_n_peaks_dcr = 6 - self._min_n_events_peak=100 - + self._min_n_events_peak = 100 + self._m_DRM = None self._time_prefit = None self._time_fit = None - - self._default_fit_kwargs={ - "tau":None, - "tau_err":None, - "t_0":None, - "t_0_err":None, - "tau_R":None, - "tau_R_err":None, - "t_gate":None, - "t_gate_err":None, - "bw":None, - "bin_0":None, - "truncate_nsigma0_up":None, - "truncate_nsigma0_do":None, - "truncate_nsigma0_chi2scanlim":None, - "min_n_events_peak":None, - "prefit_only":False, - "bin_method":"knuth" + + self._default_fit_kwargs = { + "tau": None, + "tau_err": None, + "t_0": None, + "t_0_err": None, + "tau_R": None, + "tau_R_err": None, + "t_gate": None, + "t_gate_err": None, + "bw": None, + "bin_0": None, + "truncate_nsigma0_up": None, + "truncate_nsigma0_do": None, + "truncate_nsigma0_chi2scanlim": None, + "min_n_events_peak": None, + "prefit_only": False, + "bin_method": "knuth" } - + self.InitKwargs() - - - - - - ######################################################################################################## - ## InitKwargs() - ######################################################################################################## - ## - ## This function creates the fit dictionary and initialises the output dictionaries of the class. - ## - ######################################################################################################## - + + ########################################################################### + # InitKwargs() + ########################################################################### + # + # This function creates the fit dictionary and initialises the output + # dictionaries of the class. + # + ########################################################################### + def InitKwargs(self): self._fit_kwargs = self._default_fit_kwargs.copy() self._prefit_values = self._default_fit_dict.copy() self._prefit_errors = self._default_fit_dict.copy() - self._fit_values= self._default_fit_dict.copy() + self._fit_values = self._default_fit_dict.copy() self._fit_errors = self._default_fit_dict.copy() - self._prefit_values_bin = self._default_fit_dict.copy() self._prefit_errors_bin = self._default_fit_dict.copy() - self._fit_values_bin= self._default_fit_dict.copy() + self._fit_values_bin = self._default_fit_dict.copy() self._fit_errors_bin = self._default_fit_dict.copy() - - self._hist_data = self._default_hist_data.copy() - - - - ######################################################################################################## - ## SetMaxNPeaks(max_n_peaks) - ######################################################################################################## - ## - ## Typically, it is sufficient to determine the appropriate number of peaks to fit from mu. - ## However, to make sure we don't accidentally create a fit with a large number of peaks, - ## the default threshold of 20 peaks may be set with this function - ## - ######################################################################################################## - + + ########################################################################### + # SetMaxNPeaks(max_n_peaks) + ########################################################################### + # + # Typically, it is sufficient to determine the appropriate number of peaks + # to fit from mu. However, to make sure we don't accidentally create a fit + # with a large number of peaks, the default threshold of 20 peaks may be + # set with this function. + # + ########################################################################### + def SetMaxNPeaks(self, max_n_peaks): max_n_peaks = int(max_n_peaks) - if(max_n_peaks<4): - raise Exception("Maximum number of light peaks must be greater than 3.") - + if(max_n_peaks < 4): + raise Exception( + "Maximum number of light peaks must be greater than 3.") - self._max_n_peaks = max_n_peaks + self._max_n_peaks = max_n_peaks if(self._verbose): - print("Set maximum number of light peaks to {:d}.".format(self._max_n_peaks)) - - - ######################################################################################################## - ## SetMaxNPeaksDCR(max_n_peaks) - ######################################################################################################## - ## - ## Typically, it is sufficient to determine the appropriate number of peaks to fit from mu. - ## However, to make sure we don't accidentally create a fit with a large number of peaks, - ## the default threshold of 20 peaks may be set with this function - ## - ######################################################################################################## - + print("Set maximum number of light peaks to {:d}.".format( + self._max_n_peaks)) + + ########################################################################### + # SetMaxNPeaksDCR(max_n_peaks) + ########################################################################### + # + # Typically, it is sufficient to determine the appropriate number of peaks + # to fit from mu. However, to make sure we don't accidentally create a fit + # with a large number of peaks, the default threshold of 20 peaks may be + # set with this function. + # + ########################################################################### + def SetMaxNPeaksDCR(self, max_n_peaks_dcr): max_n_peaks_dcr = int(max_n_peaks_dcr) - if(max_n_peaks_dcr<4): - raise Exception("Maximum number of dark peaks must be greater than 3.") - + if(max_n_peaks_dcr < 4): + raise Exception( + "Maximum number of dark peaks must be greater than 3.") - self._max_n_peaks_dcr = max_n_peaks_dcr + self._max_n_peaks_dcr = max_n_peaks_dcr if(self._verbose): - print("Set maximum number of dark peaks to {:d}.".format(self._max_n_peaks_dcr)) - - - - - ######################################################################################################## - ## GetMinuit() - ######################################################################################################## - ## - ## Returns Minuit object after fit. Note this object has fit results stored in bins, not in the unit parameter of the input. - ## - ######################################################################################################## - + print("Set maximum number of dark peaks to {:d}.".format( + self._max_n_peaks_dcr)) + + ########################################################################### + # GetMinuit() + ########################################################################### + # + # Returns Minuit object after fit. Note this object has fit results stored + # in bins, not in the unit parameter of the input. + # + ########################################################################### def GetMinuit(self): if(self._m_DRM is None): - raise Exception ("Fit has not yet been performed. Please first perform a fit.") - + raise Exception( + "Fit has not yet been performed. Please first perform a fit.") else: return self._m_DRM - - - ######################################################################################################## - ## GetFitTime() - ######################################################################################################## - ## - ## Returns fit time, for prefit and fit. - ## - ######################################################################################################## - - - + + ########################################################################### + # GetFitTime() + ########################################################################### + # + # Returns fit time, for prefit and fit. + # + ########################################################################### + def GetFitTime(self, prefit=False): - if(prefit): if(self._time_prefit is None): - raise Exception ("Fit has not yet been performed. Please first perform a fit.") + raise Exception( + ("Fit has not yet been performed. " + "Please first perform a fit.")) else: return self._time_prefit - else: if(self._time_fit is None): - raise Exception ("Fit has not yet been performed. Please first perform a fit.") + raise Exception( + ("Fit has not yet been performed. " + "Please first perform a fit.")) else: return self._time_fit - - ######################################################################################################## - ## IsValid() - ######################################################################################################## - ## - ## Returns whether or not Minuit returned valid. - ## - ######################################################################################################## - + + ########################################################################### + # IsValid() + ########################################################################### + # + # Returns whether or not Minuit returned valid. + # + ########################################################################### + def IsValid(self): if(self._m_DRM is None): - raise Exception ("Fit has not yet been performed. Please first perform a fit.") - + raise Exception( + "Fit has not yet been performed. Please first perform a fit.") else: return self._m_DRM.valid - - - ######################################################################################################## - ## GetFitResults() - ######################################################################################################## - ## - ## Returns fit results and errors in the input charge unit. - ## - ######################################################################################################## - - - - def GetFitResults(self, debug=True, bin_units=True): - - + ########################################################################### + # GetFitResults() + ########################################################################### + # + # Returns fit results and errors in the input charge unit. + # + ########################################################################### + + def GetFitResults(self, debug=True, bin_units=True): if(self._m_DRM is None): - raise Exception ("Fit has not yet been performed. Please first perform a fit.") + raise Exception( + "Fit has not yet been performed. Please first perform a fit.") else: - if(bin_units): temp_values = deepcopy(self._fit_values_bin) temp_errors = deepcopy(self._fit_errors_bin) - + else: temp_values = deepcopy(self._fit_values) temp_errors = deepcopy(self._fit_errors) - + if(not debug): + temp_values["p_Ap"] = P_Ap(self._fit_values["tau_R"], + self._fit_values["tau_Ap"], + self._fit_values["t_gate"], + self._fit_values["p_Ap"]) + + temp_errors["p_Ap"] = dP_Ap(self._fit_values["tau_R"], + self._fit_values["tau_Ap"], + self._fit_values["t_gate"], + self._fit_values["p_Ap"], + self._fit_errors["tau_Ap"], + self._fit_errors["p_Ap"]) - temp_values["p_Ap"] = P_Ap(self._fit_values["tau_R"], self._fit_values["tau_Ap"], self._fit_values["t_gate"], self._fit_values["p_Ap"]) - temp_errors["p_Ap"] = dP_Ap(self._fit_values["tau_R"], self._fit_values["tau_Ap"], self._fit_values["t_gate"], self._fit_values["p_Ap"], self._fit_errors["tau_Ap"], self._fit_errors["p_Ap"]) - else: - - temp_values["P_Ap"] = P_Ap(self._fit_values["tau_R"], self._fit_values["tau_Ap"], self._fit_values["t_gate"], self._fit_values["p_Ap"]) - temp_errors["P_Ap"] = dP_Ap(self._fit_values["tau_R"], self._fit_values["tau_Ap"], self._fit_values["t_gate"], self._fit_values["p_Ap"], self._fit_errors["tau_Ap"], self._fit_errors["p_Ap"]) - + temp_values["P_Ap"] = P_Ap(self._fit_values["tau_R"], + self._fit_values["tau_Ap"], + self._fit_values["t_gate"], + self._fit_values["p_Ap"]) + + temp_errors["P_Ap"] = dP_Ap(self._fit_values["tau_R"], + self._fit_values["tau_Ap"], + self._fit_values["t_gate"], + self._fit_values["p_Ap"], + self._fit_errors["tau_Ap"], + self._fit_errors["p_Ap"]) + t_fit = self.GetFitTime(prefit=False) t_prefit = self.GetFitTime(prefit=True) - temp_values["fit_time"] = t_fit temp_values["prefit_time"] = t_prefit - + return temp_values, temp_errors - - - ######################################################################################################## - ## GetPrefitResults() - ######################################################################################################## - ## - ## Returns pre-fit results and errors in the input charge unit. - ## - ######################################################################################################## - + + ########################################################################### + # GetPrefitResults() + ########################################################################### + # + # Returns pre-fit results and errors in the input charge unit. + # + ########################################################################### + def GetPrefitResults(self, bin_units=True): if(bin_units): return self._prefit_values_bin, self._prefit_errors_bin else: return self._prefit_values, self._prefit_errors - - - ######################################################################################################## - ## GetPeakInfo() - ######################################################################################################## - ## - ## Returns peak information obtained during the fitting procedure. - ## - ######################################################################################################## - + + ########################################################################### + # GetPeakInfo() + ########################################################################### + # + # Returns peak information obtained during the fitting procedure. + # + ########################################################################### + def GetPeakInfo(self): - if(self._m_DRM is None): - raise Exception ("Fit has not yet been performed. Please first perform a fit.") + raise Exception( + "Fit has not yet been performed. Please first perform a fit.") else: return self._hist_data - - ######################################################################################################## - ## GetChi2(prefit=False) - ######################################################################################################## - ## - ## VARIABLES: - ## prefit(False): Evaluate the model using the prefit or fitted values - ## - ## - ## - ## Calculates the Chi2 of the resultant fit relative to the original density estimate. - ######################################################################################################## - - - - - def GetChi2(self, pars=None, prefit=False, n_sigma_lims=[None, None]): + ########################################################################### + # GetChi2(prefit=False) + ########################################################################### + # + # VARIABLES: + # prefit(False): Evaluate the model using the prefit or fitted values + # + # Calculates the Chi2 of the resultant fit relative to the original + # density estimate. + ########################################################################### - + def GetChi2(self, pars=None, prefit=False, n_sigma_lims=[None, None]): _x_all = self._hist_data["bin_numbers"] _y_all = self._hist_data["counts"] _y_err_all = self._hist_data["counts_error"] - + _Q_0_prefit = self._prefit_values_bin["Q_0"] _sigma_0_prefit = self._prefit_values_bin["sigma_0"] - - if(pars is None): _y_hat_all = self.GetModel(_x_all, prefit=prefit, bin_units=True) else: _y_hat_all = DRM(_x_all, **pars) - - - cond_count = (_y_all>1) - + cond_count = (_y_all > 1) + if(n_sigma_lims[0] is None): if(self._hist_data["truncate_nsigma0_do_min"] is not None): - _lim_nsigma0_low = _Q_0_prefit -self._hist_data["truncate_nsigma0_do_min"]*_sigma_0_prefit + _lim_nsigma0_low = ( + _Q_0_prefit - self._hist_data["truncate_nsigma0_do_min"] + * _sigma_0_prefit) else: _lim_nsigma0_low = -np.inf else: _lim_nsigma0_low = _Q_0_prefit - n_sigma_lims[0]*_sigma_0_prefit - + if(n_sigma_lims[1] is None): _lim_nsigma0_hi = np.inf else: _lim_nsigma0_hi = _Q_0_prefit + n_sigma_lims[1]*_sigma_0_prefit - - - + cond_nsigma0_low = (_x_all > _lim_nsigma0_low) cond_nsigma0_hi = (_x_all < _lim_nsigma0_hi) - conds = cond_count & cond_nsigma0_low & cond_nsigma0_hi - - _x = _x_all[conds] _y = _y_all[conds] + # TODO: _y_err not used. Remove this line? _y_err = _y_err_all[conds] _y_hat = _y_hat_all[conds] - _y_hat_err = np.sqrt(_y_hat_all[conds]) - - + _y_hat_err = np.sqrt(_y_hat_all[conds]) - - chi2 = np.sum(((_y_hat - _y)/_y_hat_err)**2) ndof = len(_x)-10 - - return chi2, ndof - - - - - ######################################################################################################## - ## GetModel(x, prefit=False) - ######################################################################################################## - ## - ## VARIABLES: - ## x: the x values over which to evaluate the model. - ## prefit (False): whether or not to use the prefit values in the model evaluation. - ## - ## - ## Evaluates model according to the fitted parameters and returns the Detector Response Model - ######################################################################################################## - + + ########################################################################### + # GetModel(x, prefit=False) + ########################################################################### + # + # VARIABLES: + # x: the x values over which to evaluate the model. + # prefit (False): whether or not to use the prefit values in the model + # evaluation. + # + # + # Evaluates model according to the fitted parameters and returns the + # Detector Response Model. + ########################################################################### def GetModel(self, x, prefit=False, bin_units=False): - if(self._m_DRM is None): - raise Exception ("Fit has not yet been performed. Please first perform a fit.") - else: - if(prefit): + raise Exception( + "Fit has not yet been performed. Please first perform a fit.") + else: + if(prefit): if(bin_units): return DRM(x, **self._prefit_values_bin) else: @@ -446,34 +425,39 @@ class PeakOTron: return DRM(x, **self._fit_values_bin) else: return DRM(x, **self._fit_values) - - - - ######################################################################################################## - ## GetBins(data, bw, bin_method) - ######################################################################################################## - ## - ## VARIABLES: - ## data: the input data, either a 1D array of individual discharges or a 2D array of [bin centres, counts] - ## bw: the bin width. If this is None, then the bin_method will be used to choose the binning if 1D - ## bin_method: the method for binning 1D data. This can be 'knuth', 'freedman', or 'scott' - ## - ## Automated calculation of bins and bin widths for 2D prebinned charge measurements or 1D charge measurements. - ## Note: because spectra from scopes have been observed with variable bin widths, the program also rebins 2D histograms using the same bin width as the original spectrum. - ######################################################################################################## - + + ########################################################################### + # GetBins(data, bw, bin_method) + ########################################################################### + # + # VARIABLES: + # data: the input data, either a 1D array of individual discharges or a 2D + # array of [bin centres, counts] + # + # bw: the bin width. If this is None, then the bin_method will be used to + # choose the binning if 1D + # + # bin_method: the method for binning 1D data. This can be 'knuth', + # 'freedman', or 'scott' + # + # + # Automated calculation of bins and bin widths for 2D prebinned charge + # measurements or 1D charge measurements. + # + # NOTE: because spectra from scopes have been observed with variable bin + # widths, the program also rebins 2D histograms using the same bin width + # as the original spectrum. + ########################################################################### + def GetBins(self, data, bw, bin_method, bin_0): - - if(data.ndim == 1): - if(self._verbose): print("Data is assumed 1D. Assuming list of charges.") if(bw is None): if(self._verbose): - print("Bin width not set. Binning with {:s}".format(bin_method)) - + print(f"Bin width not set. Binning with {bin_method}") + if(bin_method == "knuth"): bw = knuth_bin_width(data) elif(bin_method == "freedman"): @@ -481,54 +465,62 @@ class PeakOTron: elif(bin_method == "scott"): bw = scott_bin_width(data) else: - raise Exception("Binning method not recognised. Please select bin_method from 'knuth', 'freedman' or 'scott'") + raise Exception( + ("Binning method not recognised. Please select " + "bin_method from 'knuth', 'freedman' or 'scott'!")) x_min, x_max = data.min(), data.max() elif(data.ndim == 2): - if(self._verbose): - print("Data is assumed 2D. Assuming input is a histogram, with columns [bin_centre, counts].") - - _x, _y = data[:,0], data[:,1] - data = np.array(list(chain.from_iterable([[__x]*int(__y) for __x,__y in zip(_x,_y)]))) + print(("Data is assumed 2D. Assuming input is a histogram, " + "with columns [bin_centre, counts].")) + + _x, _y = data[:, 0], data[:, 1] + data = np.array(list( + chain.from_iterable([ + [__x]*int(__y) for __x, __y in zip(_x, _y)]))) + f_bin = interp1d(np.arange(0, len(_x)), _x) if(bw is None): bw = abs(f_bin(1) - f_bin(0)) - idx_nonzero = np.squeeze(np.argwhere((_y>0))) - x_min, x_max = _x[np.min(idx_nonzero)]-bw/2, _x[np.max(idx_nonzero)]+bw/2 - + + idx_nonzero = np.squeeze(np.argwhere((_y > 0))) + x_min, x_max = (_x[np.min(idx_nonzero)]-bw/2, + _x[np.max(idx_nonzero)]+bw/2) + else: - raise Exception("Data is neither 1D nor 2D. Please provide the correct dimensions of data to the fit program.") + raise Exception(("Data is neither 1D nor 2D. Please provide the " + "correct dimensions of data to the fit program.")) if(bin_0 is not None): if(self._verbose): - print("Setting first bin to be {:3.3f} input charge units".format(bin_0)) - + print( + f"Setting first bin to be {bin_0:3.3f} input charge units") + x_min = bin_0 - + nbins = np.ceil((x_max - x_min) / bw) nbins = max(1, nbins) bins = x_min + bw * np.arange(nbins + 1) return data, bw, nbins, bins - - - - def PlotHistogram(self, figsize=(10.0,10.0), save_directory=None, label="Histogram", x_label="ADC"): + def PlotHistogram(self, figsize=(10.0, 10.0), save_directory=None, + label="Histogram", x_label="ADC", display=True): fig = plt.figure(figsize=figsize) ax = plt.subplot(111) - + counts = self._hist_data["counts"] bin_numbers = self._hist_data["bin_numbers"] bin_centres = self._hist_data["bin_centres"] - - ax.step(bin_numbers, - counts, - #label="{:s} \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts)))), - label = "{:s}".format(label), - color="C0", + + ax.step(bin_numbers, + counts, + # label=(f"{label} \n $N_{{events}}$ " + # "= {LatexFormat(float(np.sum(counts)))}"), + label="{:s}".format(label), + color="C0", where="mid", lw=5) ax.grid(which="both") @@ -536,61 +528,66 @@ class PeakOTron: ax.set_xlabel("Bin", fontsize=self._plot_fontsize) ax.tick_params(axis="x", labelsize=self._plot_fontsize, rotation=45) ax.tick_params(axis="y", labelsize=self._plot_fontsize) - ax.set_ylim([1,None]) + ax.set_ylim([1, None]) ax.set_yscale("log") + ax.set_xlabel(x_label) ax.legend(fontsize=self._plot_legendfontsize) - - _f = interp1d(bin_numbers, bin_centres, bounds_error=False, fill_value="extrapolate") - _f_inv = interp1d(bin_centres, bin_centres, bounds_error=False, fill_value="extrapolate") - - -# secax = ax.secondary_xaxis('top', functions=(_f, _f_inv)) -# secax.set_xlabel('{:s}'.format(x_label), fontsize=self._plot_fontsize) -# secax.tick_params(axis="x", labelsize=self._plot_fontsize, rotation=45) - - -# mf = ScalarFormatter(useMathText=True) -# mf.set_powerlimits((-2,2)) -# secax.xaxis.set_major_formatter(mf) -# offset = secax.xaxis.get_offset_text() -# offset.set_size(int(0.8*self._plot_fontsize)) - + + # TODO: _f and _f_inv not accessed! Remove? + _f = interp1d(bin_numbers, bin_centres, bounds_error=False, + fill_value="extrapolate") + + _f_inv = interp1d(bin_centres, bin_centres, bounds_error=False, + fill_value="extrapolate") + + # secax = ax.secondary_xaxis('top', functions=(_f, _f_inv)) + # secax.set_xlabel('{:s}'.format(x_label), + # fontsize=self._plot_fontsize) + # + # secax.tick_params(axis="x", labelsize=self._plot_fontsize, + # rotation=45) + + # mf = ScalarFormatter(useMathText=True) + # mf.set_powerlimits((-2,2)) + # secax.xaxis.set_major_formatter(mf) + # offset = secax.xaxis.get_offset_text() + # offset.set_size(int(0.8*self._plot_fontsize)) + if(save_directory is not None): print("Saving figure to {0}...".format(save_directory)) fig.savefig(save_directory) - if(display): + + if display: plt.pause(0.01) fig.show() else: plt.close(fig) - - - - - def PlotFFT(self, figsize=(10.0,10.0), save_directory=None, label="Histogram"): - + def PlotFFT(self, figsize=(10.0, 10.0), save_directory=None, + label="Histogram", display=True): + fig = plt.figure(figsize=figsize) ax = plt.subplot(111) - - + fft_freq = self._hist_data["fft_freq"] + # TODO: counts never used? counts = self._hist_data["counts"] fft_amplitude = self._hist_data["fft_amplitude"] G_fft = self._hist_data["G_fft"] inv_G_fft = 1/G_fft - + ax.step(fft_freq, - fft_amplitude, - color="C0", - where="mid", - #label="{:s} \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts)))), - label = "{:s}".format(label), - lw=5) - - - ax.axvline(x=inv_G_fft, color="purple", linestyle="--", lw=4, label="$G^{{*}}_{{FFT}}$ = {:3.3f} Bin".format(G_fft)) - + fft_amplitude, + color="C0", + where="mid", + # label=(f"{label} \n $N_{{events}}$ " + # f"= {LatexFormat(float(np.sum(counts)))}"), + label="{:s}".format(label), + lw=5) + + ax.axvline(x=inv_G_fft, color="purple", linestyle="--", lw=4, + label="$G^{{*}}_{{FFT}}$ = {:3.3f} Bin".format(G_fft)) + ax.set_yscale("log") ax.set_xscale("log") ax.grid(which="both") @@ -599,143 +596,148 @@ class PeakOTron: ax.tick_params(axis="x", labelsize=self._plot_fontsize, rotation=45) ax.tick_params(axis="y", labelsize=self._plot_fontsize) ax.legend(fontsize=self._plot_legendfontsize) - + if(save_directory is not None): print("Saving figure to {0}...".format(save_directory)) fig.savefig(save_directory) + if(display): plt.pause(0.01) fig.show() else: plt.close(fig) - - def PlotBGSub(self, figsize=(10,10), save_directory=None, plot_bgsub=False, label="Histogram"): - + + def PlotBGSub(self, figsize=(10, 10), save_directory=None, + plot_bgsub=False, label="Histogram", display=True): + fig = plt.figure(figsize=figsize) ax = plt.subplot(111) - + counts = self._hist_data["counts"] counts_bg = self._hist_data["counts_bg"] counts_bgsub = self._hist_data["counts_bgsub"] bin_numbers = self._hist_data["bin_numbers"] if(plot_bgsub): - - ax.step(bin_numbers, - counts, - #label="{:s} \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts)))), - label = "{:s}".format(label), - color="C0", - where="mid", - lw=5) - - + counts, + # label=(f"{label} \n $N_{{events}}$ " + # f"= {LatexFormat(float(np.sum(counts)))}"), + label="{:s}".format(label), + color="C0", + where="mid", + lw=5) + ax.step(bin_numbers, - counts_bgsub, - #label="{:s} - BG \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts_bgsub)))), - label = "{:s} - Background".format(label), - color="r", - where="mid", - lw=5) - + counts_bgsub, + # label=(f"{label} - BG \n $N_{{events}}$ " + # "= {LatexFormat(float(np.sum(counts_bgsub)))}"), + label="{:s} - Background".format(label), + color="r", + where="mid", + lw=5) + else: ax.step(bin_numbers, - counts, - #label="{:s} \n $N_{{events}}$ = {:s}".format(label,LatexFormat(float(np.sum(counts)))), - label = "{:s}".format(label), - color="C0", - where="mid", - lw=5) - - ax.fill_between(bin_numbers[counts_bg>0], - counts_bg[counts_bg>0], - #label="{:s}, BG \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts_bg)))), - label = "{:s}, Background".format(label), - color='none', - hatch="///", - edgecolor="r", - step="mid", - lw=5) - + counts, + # label=(f"{label} \n $N_{{events}}$ " + # f"= {LatexFormat(float(np.sum(counts)))}"), + label="{:s}".format(label), + color="C0", + where="mid", + lw=5) + + ax.fill_between( + bin_numbers[counts_bg > 0], + counts_bg[counts_bg > 0], + # label=(f"{label} \n $N_{{events}}$ " + # f"= {LatexFormat(float(np.sum(counts_bg)))}"), + label="{:s}, Background".format(label), + color='none', + hatch="///", + edgecolor="r", + step="mid", + lw=5) + ax.grid(which="both") ax.set_xlabel("Bin", fontsize=25) ax.set_ylabel("Counts", fontsize=self._plot_fontsize) ax.tick_params(axis="x", labelsize=self._plot_fontsize, rotation=45) ax.tick_params(axis="y", labelsize=self._plot_fontsize) - ax.set_ylim([1,None]) + ax.set_ylim([1, None]) ax.set_yscale("log") ax.legend(fontsize=self._plot_legendfontsize) - + if(save_directory is not None): print("Saving figure to {0}...".format(save_directory)) fig.savefig(save_directory) + if(display): plt.pause(0.01) fig.show() else: plt.close(fig) - - - def PlotPeaks(self, figsize=(10.0,10.0), save_directory=None, label="Histogram"): - + + def PlotPeaks(self, figsize=(10.0, 10.0), save_directory=None, + label="Histogram", display=True): + fig = plt.figure(figsize=figsize) ax = plt.subplot(111) - + counts = self._hist_data["counts"] counts_bgsub = self._hist_data["counts_bgsub"] bin_numbers = self._hist_data["bin_numbers"] Q_0_est = self._hist_data["Q_0_est"] - + peak_position = self._hist_data["peaks"]["peak_position"] - ax.step(bin_numbers, + ax.step(bin_numbers, counts, - #label="{:s} \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts)))), - label = "{:s}".format(label), - color="C0", - lw=5, + # label=(f"{label} \n $N_{{events}}$ " + # f"= {LatexFormat(float(np.sum(counts)))}"), + label="{:s}".format(label), + color="C0", + lw=5, where="mid", zorder=-5) - + ax.step(bin_numbers, counts_bgsub, - #label="{:s} - BG \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts_bgsub)))), + # label=(f"{label} - BG \n $N_{{events}}$ " + # "= {LatexFormat(float(np.sum(counts_bgsub)))}"), label="{:s} - Background".format(label), - color="r", - lw=5, - where="mid", - zorder=-4) - - - color_vals = [self._cmap(_v) for _v in np.linspace(0,0.75, len(peak_position))] - + color="r", + lw=5, + where="mid", + zorder=-4) + + color_vals = [ + self._cmap(_v) for _v in np.linspace(0, 0.75, len(peak_position))] + for peak_i, pp in enumerate(peak_position): - if(peak_i == 0): annotation_text = "$Q_{{0}}$" color = "purple" - ls="--" - - ax.axvline(x=pp, - color=color, - linestyle=ls, - lw=2.5, - label = '{:s}'.format(annotation_text) - ) - + ls = "--" + + ax.axvline(x=pp, + color=color, + linestyle=ls, + lw=2.5, + label='{:s}'.format(annotation_text) + ) else: annotation_text = "Peak Position {:d}".format(peak_i) color = color_vals[peak_i] - ls="-" - - ax.axvline(x=pp, - color=color, - linestyle=ls, - lw=2.5 - ) -# plt.text(10.1,0, annotation_text, color=color, fontsize=self._plot_legend,rotation=90) - - + ls = "-" + + ax.axvline(x=pp, + color=color, + linestyle=ls, + lw=2.5 + ) + + # plt.text(10.1,0, annotation_text, color=color, + # fontsize=self._plot_legend,rotation=90) ax.scatter([Q_0_est], [counts[np.argmin(abs(bin_numbers-Q_0_est))]], @@ -743,71 +745,72 @@ class PeakOTron: s=200, color="purple", zorder=5, - label= "$Q^{est}_{{0}}$") - + label="$Q^{est}_{{0}}$") + ax.set_xlabel("Bin", fontsize=self._plot_fontsize) ax.set_ylabel("Counts", fontsize=self._plot_fontsize) ax.tick_params(axis="x", labelsize=self._plot_fontsize, rotation=45) ax.tick_params(axis="y", labelsize=self._plot_fontsize) ax.grid(which="both") ax.legend(fontsize=self._plot_legendfontsize) - + if(save_directory is not None): print("Saving figure to {0}...".format(save_directory)) fig.savefig(save_directory) + if(display): plt.pause(0.01) fig.show() else: plt.close(fig) + def PlotVarianceFit(self, figsize=(10.0, 10.0), save_directory=None, + y_limits=[None, None], x_limits=[None, None], + display=True): - def PlotVarianceFit(self, figsize=(10.0, 10.0), save_directory=None, y_limits=[None, None], x_limits=[None, None]): - fig = plt.figure(figsize=figsize) ax = plt.subplot(111) - + peak_index = self._hist_data["peaks"]["peak_index"] peak_variance = self._hist_data["peaks"]["peak_variance"] peak_variance_error = self._hist_data["peaks"]["peak_variance_error"] - - + sigma_0 = self._prefit_values_bin["sigma_0"] dsigma_0 = self._prefit_errors_bin["sigma_0"] sigma_1 = self._prefit_values_bin["sigma_1"] dsigma_1 = self._prefit_errors_bin["sigma_1"] - - ax.errorbar( peak_index, - peak_variance, - yerr=peak_variance_error, - fmt="o", - lw=3, - markersize=10, - color="C0", - label="Variances of Peaks" + + ax.errorbar(peak_index, + peak_variance, + yerr=peak_variance_error, + fmt="o", + lw=3, + markersize=10, + color="C0", + label="Variances of Peaks" ) - + + tmp_label = ( + f"Linear Fit: \n $\sigma^{{2}}_{{0}}$ = {LatexFormat(sigma_0**2)} " + f"$\pm$ {LatexFormat(2*sigma_0*dsigma_0)} Bin$^{{2}}$ \n " + f"$\sigma^{{2}}_{{1}}$ = {LatexFormat(sigma_1**1)} " + f"$\pm$ {LatexFormat(2*sigma_0*dsigma_1)} Bin$^{{2}}$" + ) + ax.plot(peak_index, - Linear(peak_index, - sigma_1**2, - sigma_0**2 - ), - color="C0", - linestyle="--", - lw=3, - label = "Linear Fit: \n $\sigma^{{2}}_{{0}}$ = {:s} $\pm$ {:s} Bin$^{{2}}$ \n $\sigma^{{2}}_{{1}}$ = {:s} $\pm$ {:s} Bin$^{{2}}$".format( - LatexFormat(sigma_0**2), - LatexFormat(2*sigma_0*dsigma_0), - LatexFormat(sigma_1**1), - LatexFormat(2*sigma_0*dsigma_1), - - ), + Linear(peak_index, + sigma_1**2, + sigma_0**2 + ), + color="C0", + linestyle="--", + lw=3, + label=tmp_label, ) - - - + ax.set_xlabel("Peak Index", fontsize=self._plot_fontsize) - ax.set_ylabel("$\sigma^{2}_{peak}$ [Bin$^{2}$]", fontsize=self._plot_fontsize) + ax.set_ylabel("$\sigma^{2}_{peak}$ [Bin$^{2}$]", + fontsize=self._plot_fontsize) ax.tick_params(axis="x", labelsize=self._plot_fontsize, rotation=45) ax.tick_params(axis="y", labelsize=self._plot_fontsize) ax.grid(which="both") @@ -818,20 +821,22 @@ class PeakOTron: if(save_directory is not None): print("Saving figure to {0}...".format(save_directory)) fig.savefig(save_directory) + if(display): plt.pause(0.01) fig.show() else: plt.close(fig) - - - def PlotMeanFit(self, figsize=(10.0, 10.0), y_limits=[None, None], x_limits=[None, None], save_directory=None): - + + def PlotMeanFit(self, figsize=(10.0, 10.0), y_limits=[None, None], + x_limits=[None, None], save_directory=None, display=True): + fig = plt.figure(figsize=figsize) ax = plt.subplot(111) - + + # TODO: bin_numbers never used? bin_numbers = self._hist_data["bin_numbers"] - + peak_index = self._hist_data["peaks"]["peak_index"] peak_mean = self._hist_data["peaks"]["peak_mean"] peak_mean_error = self._hist_data["peaks"]["peak_mean_error"] @@ -839,291 +844,267 @@ class PeakOTron: Q_0 = self._prefit_values_bin["Q_0"] dQ_0 = self._prefit_errors_bin["Q_0"] G = self._hist_data["G_fft"] - - ax.errorbar( peak_index, - peak_mean, - yerr=peak_mean_error, - fmt="o", - markersize=10, - lw=3, - color="C0", - label="Means of Peaks" + + ax.errorbar(peak_index, + peak_mean, + yerr=peak_mean_error, + fmt="o", + markersize=10, + lw=3, + color="C0", + label="Means of Peaks" ) - - ax.plot( peak_index, - Linear(peak_index, - G, - Q_0, - ), - color="C0", - linestyle="--", - lw=3, - markersize=10, - label = "Linear Fit: \n $Q_{{0}}$ = {:s} $\pm$ {:s} Bin \n $G^{{*}}_{{FFT}}$ = {:s} Bin".format( - LatexFormat(Q_0), - LatexFormat(dQ_0), - LatexFormat(G), - - ), + + tmp_label = (f"Linear Fit: \n $Q_{{0}}$ = {LatexFormat(Q_0)} " + f"$\pm$ {LatexFormat(dQ_0)} Bin \n " + f"$G^{{*}}_{{FFT}}$ = {LatexFormat(G)} Bin") + + ax.plot(peak_index, + Linear(peak_index, + G, + Q_0, + ), + color="C0", + linestyle="--", + lw=3, + markersize=10, + label=tmp_label, ) - - + ax.set_xlabel("Peak Index", fontsize=self._plot_fontsize) ax.set_ylabel("Peak Position [Bin]", fontsize=self._plot_fontsize) -# ax.set_ylabel("$\\langle Q_{{peak}} \\rangle$", fontsize=self._plot_fontsize) + # ax.set_ylabel("$\\langle Q_{{peak}} \\rangle$", + # fontsize=self._plot_fontsize) ax.tick_params(axis="x", labelsize=self._plot_fontsize, rotation=45) ax.tick_params(axis="y", labelsize=self._plot_fontsize) ax.grid(which="both") ax.set_ylim(y_limits) ax.set_xlim(x_limits) ax.legend(fontsize=self._plot_legendfontsize) - - + if(save_directory is not None): print("Saving figure to {0}...".format(save_directory)) fig.savefig(save_directory) + if(display): plt.pause(0.01) fig.show() else: plt.close(fig) - - - - - - def PlotDCR(self, figsize=(10.0, 10.0), save_directory=None, label = "Histogram"): - - + + def PlotDCR(self, figsize=(10.0, 10.0), save_directory=None, + label="Histogram", display=True): + fig = plt.figure(figsize=figsize) ax = plt.subplot(111) - + counts = self._hist_data["counts"] bin_numbers = self._hist_data["bin_numbers"] bw = self._hist_data["bw"] - - Q_0 = self._prefit_values_bin["Q_0"] G = self._prefit_values_bin["G"] - DCR = self._prefit_values_bin["DCR"] dDCR = self._prefit_errors_bin["DCR"] - - + K = (bin_numbers - Q_0)/G - - - cond_NN = (K>=0.45)&(K<=0.55) - cond_Nc = (K<=0.5) - cond_DCR = (K<=2.5) - - NN_sum = max(np.sum(counts[cond_NN]),1) + cond_NN = (K >= 0.45) & (K <= 0.55) + cond_Nc = (K <= 0.5) + cond_DCR = (K <= 2.5) + + NN_sum = max(np.sum(counts[cond_NN]), 1) NN = np.mean(counts[cond_NN]) - - if(NN!=NN): + + if(NN != NN): NN = float(counts[np.argmin(abs(K-0.5))]) + # TODO: NN_sum never accessed. Remove? NN_sum = float(NN) - - Nc = max(float(np.sum(counts[cond_Nc])), 1) - + # TODO: Nc never accessed. Remove? + Nc = max(float(np.sum(counts[cond_Nc])), 1) - ax.step(K[cond_DCR], - counts[cond_DCR], - label="{:s}".format(label), - color="C0", + ax.step(K[cond_DCR], + counts[cond_DCR], + label="{:s}".format(label), + color="C0", where="mid", lw=5) - - ax.fill_between( - K[cond_Nc], - counts[cond_Nc], - color='none', - hatch="///", - edgecolor="r", - lw=3, - step="mid", - #label = "$N_{{K \leq 0.5}}$={:d} events".format(int(Nc)) ) - label = "$N_{{K \leq 0.5}}$") - + K[cond_Nc], + counts[cond_Nc], + color='none', + hatch="///", + edgecolor="r", + lw=3, + step="mid", + # label="$N_{{K \leq 0.5}}$={:d} events".format(int(Nc))) + label="$N_{{K \leq 0.5}}$") + ax.fill_between( - K[cond_NN], - counts[cond_NN], - color='green', - edgecolor="g", - alpha=0.3, - step="mid", - #label = "$\\langle N \\rangle_{{0.45 < K \leq 0.55}}$={:3.1f} events".format(NN)) - label = "$\\langle N \\rangle_{{0.45 < K \leq 0.55}}$") - - legend_title="$DCR$={:s} $\pm$ {:s} MHz".format(LatexFormat(DCR*1e3), LatexFormat(dDCR*1e3)) - + K[cond_NN], + counts[cond_NN], + color='green', + edgecolor="g", + alpha=0.3, + step="mid", + # label=("$\\langle N \\rangle_{{0.45 < " + # f"K \leq 0.55}}$={NN:3.1f} events"), + label="$\\langle N \\rangle_{{0.45 < K \leq 0.55}}$") + + legend_title = "$DCR$={:s} $\pm$ {:s} MHz".format(LatexFormat(DCR*1e3), + LatexFormat(dDCR*1e3) + ) + ax.set_xlabel("K [p.e.]", fontsize=self._plot_fontsize) ax.grid(which="both") - + ax.set_ylabel("Counts", fontsize=self._plot_fontsize) - ax.tick_params(axis="x", which='both', labelsize=self._plot_fontsize, rotation=45) + ax.tick_params(axis="x", which='both', labelsize=self._plot_fontsize, + rotation=45) + ax.tick_params(axis="y", which='both', labelsize=self._plot_fontsize) ax.set_yscale("log") - ax.legend(title=legend_title, title_fontsize=int(self._plot_legendfontsize), fontsize=int(self._plot_legendfontsize)) - + ax.legend(title=legend_title, + title_fontsize=int(self._plot_legendfontsize), + fontsize=int(self._plot_legendfontsize)) if(save_directory is not None): print("Saving figure to {0}...".format(save_directory)) fig.savefig(save_directory) - if(display): + + if display: plt.pause(0.01) fig.show() else: plt.close(fig) - - def PlotFit(self, - figsize=(10.0,10.0), + figsize=(10.0, 10.0), labelsize=None, ticksize=None, titlesize=None, legendsize=None, title=None, - xlabel = None, + xlabel=None, scaled=False, save_directory=None, label=None, - y_limits = [1, None], - residual_limits = [-5, 5], + y_limits=[1, None], + residual_limits=[-5, 5], residualticksize=None, - linewidth_main = 5, - x_limits = [None, None], + linewidth_main=5, + x_limits=[None, None], display=True, prefit=False, - plot_in_bins=False - ): - - + plot_in_bins=False): + if(labelsize is None): - labelsize=self._plot_fontsize - + labelsize = self._plot_fontsize + if(ticksize is None): - ticksize=self._plot_fontsize - + ticksize = self._plot_fontsize + if(titlesize is None): titlesize = self._plot_fontsize - + if(legendsize is None): legendsize = self._plot_legendfontsize - + if(residualticksize is None): - residualticksize = int(0.8*self._plot_fontsize) - + residualticksize = int(0.8*self._plot_fontsize) - - - if(plot_in_bins): x_all = self._hist_data["bin_numbers"] bw = 1 else: x_all = self._hist_data["bin_centres"] bw = self._hist_data["bw"] - - - + y_all = self._hist_data["counts"] y_err_all = self._hist_data["counts_error"] - + Q_0_prefit = self._prefit_values_bin["Q_0"] sigma_0_prefit = self._prefit_values_bin["sigma_0"] - - + if(self._hist_data["truncate_nsigma0_do_min"] is not None): - lim_nsigma0 = Q_0_prefit - self._hist_data["truncate_nsigma0_do_min"]*sigma_0_prefit - cond_sel_nsigma0 = (self._hist_data["bin_numbers"]>lim_nsigma0) - + lim_nsigma0 = (Q_0_prefit + - self._hist_data["truncate_nsigma0_do_min"] + * sigma_0_prefit) + + cond_sel_nsigma0 = (self._hist_data["bin_numbers"] > lim_nsigma0) + else: cond_sel_nsigma0 = np.full(len(x_all), True).astype(bool) - + x = x_all[cond_sel_nsigma0] y = y_all[cond_sel_nsigma0] + # TODO: y_err never accessed! Remove? y_err = y_err_all[cond_sel_nsigma0] - + y_hat = self.GetModel(x, prefit=prefit, bin_units=plot_in_bins)*bw y_hat_err = np.sqrt(y_hat) - chi2, ndf = self.GetChi2(prefit=prefit) - - if(xlabel is None): - if(plot_in_bins): - xlabel="Bin" + if plot_in_bins: + xlabel = "Bin" else: xlabel = "Q" - + fig = plt.figure(figsize=figsize) gs = gridspec.GridSpec(2, 1, height_ratios=[2, 1]) - ax0 = plt.subplot(gs[0]) + ax0 = plt.subplot(gs[0]) if(label is None): - label = "Histogram" -# label="{:s},\n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(y)))) + label = "Histogram" + # label = (f"{label},\n $N_{{events}}$ " + # f"= {LatexFormat(float(np.sum(y)))}") + label = "{:s}".format(label) ax0.step(x_all, y_all, label=label, lw=5, color="C0", where="mid") ax0.plot(x, y_hat, linestyle="--", label="Fit", lw=5, color="C1") - ax0.plot([], [], ' ', label="$\\chi^{{2}}$/NDF = {:s}".format(LatexFormat(chi2/ndf))) + ax0.plot([], [], ' ', + label="$\\chi^{{2}}$/NDF = {:s}".format(LatexFormat(chi2/ndf)) + ) - - ax0.set_ylabel("Counts".format(xlabel, xlabel), fontsize=labelsize) + ax0.set_ylabel("Counts", fontsize=labelsize) ax0.tick_params(axis="y", labelsize=ticksize) - - if(y_limits[0] is None): - y_limits[0] = np.min(y[y>0]) + y_limits[0] = np.min(y[y > 0]) + ax0.set_ylim(y_limits) ax0.set_xlim(x_limits) if(title is not None): ax0.set_title(title, fontsize=titlesize) - + ax0.grid(which="both") - ax0.set_yscale("log") ax0.legend(fontsize=legendsize) - - - - ax1 = plt.subplot(gs[1], sharex = ax0) - - ax1.scatter(x, (y - y_hat)/(y_hat_err), color="C1") + ax1 = plt.subplot(gs[1], sharex=ax0) + ax1.scatter(x, (y - y_hat)/(y_hat_err), color="C1") ax1.tick_params(axis="x", labelsize=ticksize, rotation=45) ax1.tick_params(axis="y", labelsize=ticksize) ax1.set_ylabel("Pull", fontsize=labelsize) ax1.set_xlabel(xlabel, fontsize=labelsize) ax1.set_ylim(residual_limits) - ax1.set_yticks(np.arange(int(np.floor(np.min(residual_limits))), + ax1.set_yticks(np.arange(int(np.floor(np.min(residual_limits))), int(np.ceil(np.max(residual_limits))))) ax1.axhline(y=-1.0, color="purple", lw=2.5, linestyle="--") ax1.axhline(y=1.0, color="purple", lw=2.5, linestyle="--") ax1.tick_params(axis="x", labelsize=ticksize) ax1.tick_params(axis="y", labelsize=residualticksize) ax1.grid(which="both") - - - fig.tight_layout() - + if(not plot_in_bins): mf = ScalarFormatter(useMathText=True) - mf.set_powerlimits((-2,2)) + mf.set_powerlimits((-2, 2)) plt.gca().xaxis.set_major_formatter(mf) offset = ax1.xaxis.get_offset_text() offset.set_size(int(0.8*ticksize)) - - fig.subplots_adjust(hspace=.0) if(save_directory is not None): print("Saving figure to {0}...".format(save_directory)) @@ -1133,1019 +1114,1052 @@ class PeakOTron: fig.show() else: plt.close(fig) - - - - - ######################################################################################################## - ## Fit(prefit=False) - ######################################################################################################## - ## - ## VARIABLES: - ## tau: slow time-constant of SiPM pulse in nanoseconds - ## tau_err: optional error to allow tau to float - ## t_0: pre-gate integration window in nanoseconds. - ## t_0_err: optional error to allow t_0 to float - ## tau_R: recovery time of SiPM in nanoseconds - ## tau_R_err: optional error to allow tau_R to float - ## t_gate: gate length in nanoseconds - ## t_gate_err: optional error to allow t_gate to float - ## bw: optional bin width if data is 1D - ## truncate_nG: optional parameter to truncate distribution a certain fraction/number of estimated gain units before the estimated pedestal. - ## peak_dist_nG: required minimum distance between peaks in gain units to register a peaks. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html - ## peak_rel_height: Chooses the relative height at which the peak width is measured as a percentage of its prominence. 1.0 calculates the width of the peak at its lowest contour line while 0.5 evaluates at half the prominence height. Must be at least 0. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.peak_widths.html#scipy.signal.peak_widths - ## bin_method: binning method for unbinned data. "knuth", "freedman" or "scott" rules are available. - ## alpha_peaks: the maximum percentile of the distribution a found peak is allowed to have. This determines the threshold on the selected peak positions. - ## - ## Main fit function of PeakOTron, which performs the prefit and main fit using Minuit. - ## - ######################################################################################################## - - - def __MinuitFit(self, f_DRM, A): - - t_0 = time.time() - ######################################################################################################## - ## Inititalise Minuit with hypothesis of no dark counts or afterpulsing. - ######################################################################################################## - - m_DRM = Minuit(f_DRM, - Q_0 = self._prefit_values_bin["Q_0"], - G = self._prefit_values_bin["G"], - mu = self._prefit_values_bin["mu"], - lbda = self._prefit_values_bin["lbda"], - sigma_0 = self._prefit_values_bin["sigma_0"], - sigma_1 = self._prefit_values_bin["sigma_1"], - tau_Ap = self._prefit_values_bin["tau_Ap"], - p_Ap = epsilon(), - DCR = epsilon(), - tau = self._prefit_values_bin["tau"], - tau_R = self._prefit_values_bin["tau_R"], - t_gate = self._prefit_values_bin["t_gate"], - t_0 = self._prefit_values_bin["t_0"], - k_max = self._prefit_values_bin["k_max"], - k_max_dcr = self._prefit_values_bin["k_max_dcr"], - len_pad = self._prefit_values_bin["len_pad"], - A = A - ) - - - - - - m_DRM.errors["Q_0"] = self._prefit_errors_bin["Q_0"] - m_DRM.errors["G"] = self._prefit_errors_bin["G"] - m_DRM.errors["mu"] = self._prefit_errors_bin["mu"] - m_DRM.errors["lbda"] = self._prefit_errors_bin["lbda"] - m_DRM.errors["sigma_0"] = self._prefit_errors_bin["sigma_0"] - m_DRM.errors["sigma_1"] = self._prefit_errors_bin["sigma_1"] - m_DRM.errors["tau_Ap"] = self._prefit_errors_bin["tau_Ap"] - m_DRM.errors["p_Ap"] = self._prefit_errors_bin["p_Ap"] - m_DRM.errors["DCR"] = self._prefit_errors_bin["DCR"] - m_DRM.errors["A"] = np.sqrt(A) - - ######################################################################################################## - ## Set parameter limits - ## These limits have been chosen to best constrain the possible values for each of the parameters. - ## NOTE: we are working in bin widths, so the minimum allowed pedestal position is the first/second bin for Q_0, G, sigma_0, sigma_1 - ######################################################################################################## + ########################################################################### + # Fit(prefit=False) + ########################################################################### + # + # VARIABLES: + # tau: slow time-constant of SiPM pulse in nanoseconds + # tau_err: optional error to allow tau to float + # t_0: pre-gate integration window in nanoseconds. + # t_0_err: optional error to allow t_0 to float + # tau_R: recovery time of SiPM in nanoseconds + # tau_R_err: optional error to allow tau_R to float + # t_gate: gate length in nanoseconds + # t_gate_err: optional error to allow t_gate to float + # bw: optional bin width if data is 1D + # truncate_nG: optional parameter to truncate distribution a certain + # fraction/number of estimated gain units before the estimated + # pedestal. + # + # peak_dist_nG: required minimum distance between peaks in gain units to + # register a peaks. + # See https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html + # + # peak_rel_height: Chooses the relative height at which the peak width is + # measured as a percentage of its prominence. 1.0 + # calculates the width of the peak at its lowest contour + # line while 0.5 evaluates at half the prominence height. + # Must be at least 0. + # See https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.peak_widths.html#scipy.signal.peak_widths + # + # bin_method: binning method for unbinned data. "knuth", "freedman" or + # "scott" rules are available. + # + # alpha_peaks: the maximum percentile of the distribution a found peak is + # allowed to have. This determines the threshold on the + # selected peak positions. + # + # + # Main fit function of PeakOTron, which performs the prefit and main fit + # using Minuit. + # + ########################################################################### + def __MinuitFit(self, f_DRM, A): + t_0 = time.time() + ################################################################### + # Inititalise Minuit with hypothesis of no dark counts or + # afterpulsing. + ################################################################### + + m_DRM = Minuit(f_DRM, + Q_0=self._prefit_values_bin["Q_0"], + G=self._prefit_values_bin["G"], + mu=self._prefit_values_bin["mu"], + lbda=self._prefit_values_bin["lbda"], + sigma_0=self._prefit_values_bin["sigma_0"], + sigma_1=self._prefit_values_bin["sigma_1"], + tau_Ap=self._prefit_values_bin["tau_Ap"], + # TODO: Linter says epsilon() is not defined! + p_Ap=epsilon(), + DCR=epsilon(), + tau=self._prefit_values_bin["tau"], + tau_R=self._prefit_values_bin["tau_R"], + t_gate=self._prefit_values_bin["t_gate"], + t_0=self._prefit_values_bin["t_0"], + k_max=self._prefit_values_bin["k_max"], + k_max_dcr=self._prefit_values_bin["k_max_dcr"], + len_pad=self._prefit_values_bin["len_pad"], + A=A + ) + + m_DRM.errors["Q_0"] = self._prefit_errors_bin["Q_0"] + m_DRM.errors["G"] = self._prefit_errors_bin["G"] + m_DRM.errors["mu"] = self._prefit_errors_bin["mu"] + m_DRM.errors["lbda"] = self._prefit_errors_bin["lbda"] + m_DRM.errors["sigma_0"] = self._prefit_errors_bin["sigma_0"] + m_DRM.errors["sigma_1"] = self._prefit_errors_bin["sigma_1"] + m_DRM.errors["tau_Ap"] = self._prefit_errors_bin["tau_Ap"] + m_DRM.errors["p_Ap"] = self._prefit_errors_bin["p_Ap"] + m_DRM.errors["DCR"] = self._prefit_errors_bin["DCR"] + m_DRM.errors["A"] = np.sqrt(A) + + ################################################################### + # Set parameter limits + # These limits have been chosen to best constrain the possible + # values for each of the parameters. + # NOTE: we are working in bin widths, so the minimum allowed + # pedestal position is the first/second bin for Q_0, G, + # sigma_0, sigma_1 + ################################################################### + + m_DRM.limits["G"] = (1, None) + m_DRM.limits["mu"] = (0.01, None) + m_DRM.limits["lbda"] = (1e-6, 1-1e-4) + m_DRM.limits["sigma_0"] = (0.1, None) + m_DRM.limits["sigma_1"] = (0.1, None) + m_DRM.limits["tau_Ap"] = (3, self._prefit_values_bin["t_gate"]/2) + m_DRM.limits["p_Ap"] = (1e-6, 1-1e-4) + m_DRM.limits["DCR"] = (1e-6, None) + + # m_DRM.limits["A"] = ( + # (self._prefit_values_bin["A"] + # - 3*np.sqrt(self._prefit_values_bin["A"])), + # (self._prefit_values_bin["A"] + # + 3*np.sqrt(self._prefit_values_bin["A"])) + # ) + + m_DRM.limits["A"] = (1, None) + + ################################################################### + # Allow input tau, t_gate, tau_R and t_0 to float if errors are + # known. + ################################################################### + + if(self._fit_kwargs["tau_err"] is None): + m_DRM.fixed["tau"] = True + else: + m_DRM.fixed["tau"] = False + m_DRM.errors["tau"] = self._prefit_errors_bin["tau"] + m_DRM.limits["tau"] = (max(epsilon(), + (self._prefit_values_bin["tau"] + - self._prefit_errors_bin["tau"])), + max(epsilon(), + (self._prefit_values_bin["tau"] + + self._prefit_errors_bin["tau"])) + ) + + if(self._fit_kwargs["t_gate_err"] is None): + m_DRM.fixed["t_gate"] = True + else: - - m_DRM.limits["G"] = (1, None) - - m_DRM.limits["mu"] = (0.01, None) - - m_DRM.limits["lbda"] = (1e-6, 1-1e-4) - - m_DRM.limits["sigma_0"] = (0.1, None) - - m_DRM.limits["sigma_1"] = (0.1, None) - - m_DRM.limits["tau_Ap"] = (3, self._prefit_values_bin["t_gate"]/2) - - m_DRM.limits["p_Ap"] = (1e-6, 1-1e-4) - - m_DRM.limits["DCR"] = (1e-6, None) - -# m_DRM.limits["A"] = (self._prefit_values_bin["A"] - 3*np.sqrt(self._prefit_values_bin["A"]), -# self._prefit_values_bin["A"] + 3*np.sqrt(self._prefit_values_bin["A"])) - - m_DRM.limits["A"] = (1,None) + m_DRM.fixed["t_gate"] = False + m_DRM.errors["t_gate"] = self._prefit_errors_bin["t_gate"] + m_DRM.limits["t_gate"] = ( + max(epsilon(), (self._prefit_values_bin["t_gate"] + - self._prefit_errors_bin["t_gate"])), + max(epsilon(), (self._prefit_values_bin["t_gate"] + + self._prefit_errors_bin["t_gate"])) + ) + + if(self._fit_kwargs["t_0_err"] is None): + m_DRM.fixed["t_0"] = True + else: + m_DRM.fixed["t_0"] = False + m_DRM.errors["t_0"] = self._prefit_errors_bin["t_0"] + m_DRM.limits["t_0"] = ( + max(epsilon(), (self._prefit_values_bin["t_0"] + - self._prefit_errors_bin["t_0"])), + max(epsilon(), (self._prefit_values_bin["t_0"] + + self._prefit_errors_bin["t_0"]))) + + if(self._fit_kwargs["tau_R_err"] is None): + m_DRM.fixed["tau_R"] = True + else: + m_DRM.fixed["tau_R"] = False + m_DRM.errors["tau_R"] = self._prefit_errors_bin["tau_R"] + m_DRM.limits["tau_R"] = ( + max(epsilon(), (self._prefit_values_bin["tau_R"] + - self._prefit_errors_bin["tau_R"])), + max(epsilon(), (self._prefit_values_bin["tau_R"] + + self._prefit_errors_bin["tau_R"]))) + + m_DRM.fixed["k_max"] = True + m_DRM.fixed["k_max_dcr"] = True + m_DRM.fixed["len_pad"] = True + + ####################################################################### + # Perform fit under hypothesis there are no dark counts, afterpulses. + ####################################################################### + + m_DRM.fixed["Q_0"] = False + m_DRM.fixed["G"] = False + m_DRM.fixed["mu"] = False + m_DRM.fixed["lbda"] = False + m_DRM.fixed["sigma_0"] = False + m_DRM.fixed["sigma_1"] = False + m_DRM.fixed["p_Ap"] = True + m_DRM.fixed["tau_Ap"] = True + m_DRM.fixed["DCR"] = True + m_DRM.fixed["A"] = False + + if (self._verbose): + print("Fitting basic light...") + + m_DRM.strategy = 1 + m_DRM.errordef = m_DRM.LIKELIHOOD + m_DRM.migrad(ncall=self._n_call_minuit, + iterate=self._n_iterations_minuit) + if(self._verbose): + print("First pass complete...") + print(m_DRM) - ######################################################################################################## - ## Allow input tau, t_gate, tau_R and t_0 to float if errors are known. - ######################################################################################################## + if(self._verbose): + print("Fixing basic light and fitting noise...") + + m_DRM.fixed["Q_0"] = True + m_DRM.fixed["G"] = True + m_DRM.fixed["mu"] = True + m_DRM.fixed["lbda"] = True + m_DRM.fixed["sigma_0"] = True + m_DRM.fixed["sigma_1"] = True + m_DRM.fixed["p_Ap"] = False + m_DRM.fixed["tau_Ap"] = False + m_DRM.fixed["DCR"] = False + m_DRM.values["DCR"] = self._prefit_values_bin["DCR"] + m_DRM.values["p_Ap"] = self._prefit_values_bin["p_Ap"] + m_DRM.fixed["A"] = False + + m_DRM.strategy = 1 + m_DRM.errordef = m_DRM.LIKELIHOOD + m_DRM.migrad(ncall=self._n_call_minuit, + iterate=self._n_iterations_minuit) + if(self._verbose): + print("Second pass complete...") + print(m_DRM) - if(self._fit_kwargs["tau_err"] is None): - m_DRM.fixed["tau"]=True - else: - m_DRM.fixed["tau"]=False - m_DRM.errors["tau"] = self._prefit_errors_bin["tau"] - m_DRM.limits["tau"] = (max(epsilon(), self._prefit_values_bin["tau"]-self._prefit_errors_bin["tau"]), - max(epsilon(), self._prefit_values_bin["tau"]+self._prefit_errors_bin["tau"]) - ) - - if(self._fit_kwargs["t_gate_err"] is None): - m_DRM.fixed["t_gate"]=True - else: + if(self._verbose): + print("Fitting full model...") + + m_DRM.fixed["Q_0"] = False + m_DRM.fixed["G"] = False + m_DRM.fixed["mu"] = False + m_DRM.fixed["lbda"] = False + m_DRM.fixed["sigma_0"] = False + m_DRM.fixed["sigma_1"] = False + m_DRM.fixed["p_Ap"] = False + m_DRM.fixed["tau_Ap"] = False + m_DRM.fixed["DCR"] = False + m_DRM.fixed["A"] = False + + m_DRM.strategy = 1 + m_DRM.migrad(ncall=self._n_call_minuit, + iterate=self._n_iterations_minuit) + + m_DRM.hesse() + t_1 = time.time() - m_DRM.fixed["t_gate"]=False - m_DRM.errors["t_gate"] = self._prefit_errors_bin["t_gate"] - m_DRM.limits["t_gate"] = (max(epsilon(), self._prefit_values_bin["t_gate"]-self._prefit_errors_bin["t_gate"]), - max(epsilon(), self._prefit_values_bin["t_gate"]+self._prefit_errors_bin["t_gate"]) - ) + time_fit = t_1 - t_0 + if (self._verbose): + print("Prefit took {:3.3f} s, Fit took {:3.3f} s".format( + self._time_prefit, time_fit)) + print(m_DRM) - if(self._fit_kwargs["t_0_err"] is None): - m_DRM.fixed["t_0"]=True - else: - m_DRM.fixed["t_0"]=False - m_DRM.errors["t_0"] = self._prefit_errors_bin["t_0"] - m_DRM.limits["t_0"] = (max(epsilon(), self._prefit_values_bin["t_0"]-self._prefit_errors_bin["t_0"]), - max(epsilon(), self._prefit_values_bin["t_0"]+self._prefit_errors_bin["t_0"]) - ) + return m_DRM, time_fit + def Fit(self, data, **kwargs): + ####################################################################### + # Preparation + ####################################################################### + # + # 1. Data dictionaries are initialised if previously filled. + # 2. tau, tauR, t_0 and t_gate are checked to ensure they are provided + # to the program. + # 3. Perform checks of input parameters to ensure no disallowed values + ####################################################################### - if(self._fit_kwargs["tau_R_err"] is None): - m_DRM.fixed["tau_R"]=True - else: - m_DRM.fixed["tau_R"]=False - m_DRM.errors["tau_R"] = self._prefit_errors_bin["tau_R"] - m_DRM.limits["tau_R"] = (max(epsilon(), self._prefit_values_bin["tau_R"]-self._prefit_errors_bin["tau_R"]), - max(epsilon(), self._prefit_values_bin["tau_R"]+self._prefit_errors_bin["tau_R"]) - ) + self.InitKwargs() + if not set(kwargs.keys()).issubset(self._fit_kwargs.keys()): + wrong_keys = list( + set(kwargs.keys()).difference( + set(self._fit_kwargs.keys()))) + raise Exception("Arguments {:s} not recognised.".format( + ",".join(wrong_keys))) - m_DRM.fixed["k_max"]=True - m_DRM.fixed["k_max_dcr"]=True - m_DRM.fixed["len_pad"]=True + if "tau" not in kwargs.keys(): + raise Exception(("Please provide 'tau' (slow component " + "of SiPM pulse in nanoseconds).")) + elif(not isinstance(kwargs["tau"], float) or kwargs["tau"] <= 0): + raise Exception(("Please set 'tau' (slow component of SiPM pulse " + "in nanoseconds) to a positive float.")) + if "tau_R" not in kwargs.keys(): + raise Exception(("Please provide 'tau_R' (recovery time " + "of SiPM in nanoseconds).")) - ######################################################################################################## - ##Perform fit under hypothesis there are no dark counts, afterpulses. - ######################################################################################################## + elif(not isinstance(kwargs["tau_R"], float) or kwargs["tau_R"] <= 0): + raise Exception(("Please set 'tau' (slow component of SiPM pulse " + "in nanoseconds) to a positive float.")) - m_DRM.fixed["Q_0"] = False - m_DRM.fixed["G"] = False - m_DRM.fixed["mu"] = False - m_DRM.fixed["lbda"] = False - m_DRM.fixed["sigma_0"] = False - m_DRM.fixed["sigma_1"] = False - m_DRM.fixed["p_Ap"]=True - m_DRM.fixed["tau_Ap"]=True - m_DRM.fixed["DCR"]=True - m_DRM.fixed["A"]=False - - - if(self._verbose): - print("Fitting basic light...") + if "t_0" not in kwargs.keys(): + raise Exception(("Please provide 't_0' (integration region before " + "start of SiPM pulse integration gate in " + "nanoseconds).")) - m_DRM.strategy=1 - m_DRM.errordef=m_DRM.LIKELIHOOD - m_DRM.migrad(ncall = self._n_call_minuit, - iterate= self._n_iterations_minuit) + elif(kwargs["t_0"] <= 0): + raise Exception(("Please set 't_0' (integration region before " + "start of SiPM pulse integration gate in " + "nanoseconds) to a positive float.")) - - if(self._verbose): - print("First pass complete...") - print(m_DRM) - - if(self._verbose): - print("Fixing basic light and fitting noise...") - - - m_DRM.fixed["Q_0"] = True - m_DRM.fixed["G"] = True - m_DRM.fixed["mu"] = True - m_DRM.fixed["lbda"] = True - m_DRM.fixed["sigma_0"] = True - m_DRM.fixed["sigma_1"] = True - m_DRM.fixed["p_Ap"]=False - m_DRM.fixed["tau_Ap"]=False - m_DRM.fixed["DCR"]=False - m_DRM.values["DCR"] = self._prefit_values_bin["DCR"] - m_DRM.values["p_Ap"] = self._prefit_values_bin["p_Ap"] - m_DRM.fixed["A"]=False - - m_DRM.strategy=1 - m_DRM.errordef=m_DRM.LIKELIHOOD - m_DRM.migrad(ncall = self._n_call_minuit, - iterate= self._n_iterations_minuit) - - if(self._verbose): - print("Second pass complete...") - print(m_DRM) + if "t_gate" not in kwargs.keys(): + raise Exception(("Please provide 't_gate' (length of SiPM pulse " + "integration gate) in nanoseconds.")) - - if(self._verbose): - print("Fitting full model...") - - - m_DRM.fixed["Q_0"] = False - m_DRM.fixed["G"] = False - m_DRM.fixed["mu"] = False - m_DRM.fixed["lbda"] = False - m_DRM.fixed["sigma_0"] = False - m_DRM.fixed["sigma_1"] = False - m_DRM.fixed["p_Ap"]=False - m_DRM.fixed["tau_Ap"]=False - m_DRM.fixed["DCR"]=False - m_DRM.fixed["A"]=False - - m_DRM.strategy=1 - m_DRM.migrad(ncall = self._n_call_minuit, - iterate= self._n_iterations_minuit) - - m_DRM.hesse() - - - t_1 = time.time() - - - time_fit = t_1 - t_0 - if(self._verbose): - print("Prefit took {:3.3f} s, Fit took {:3.3f} s".format(self._time_prefit, - time_fit)) - print(m_DRM) - - - - - - return m_DRM, time_fit - - - - - - def Fit(self, data, **kwargs): - - ######################################################################################################## - ## Preparation - ######################################################################################################## - ## - ## 1. Data dictionaries are initialised if previously filled. - ## 2. tau, tauR, t_0 and t_gate are checked to ensure they are provided to the program. - ## 3. Perform checks of input parameters to ensure no disallowed values - ######################################################################################################## - - self.InitKwargs() - - - if not set(kwargs.keys()).issubset(self._fit_kwargs.keys()): - wrong_keys = list(set(kwargs.keys()).difference(set(self._fit_kwargs.keys()))) - - raise Exception ("Arguments {:s} not recognised.".format(",".join(wrong_keys))) - - - if not "tau" in kwargs.keys(): - raise Exception ("Please provide 'tau' (slow component of SiPM pulse in nanoseconds).") - - elif(not isinstance(kwargs["tau"], float) or kwargs["tau"]<=0): - raise Exception ("Please set 'tau' (slow component of SiPM pulse in nanoseconds) to a positive float.") - - - if not "tau_R" in kwargs.keys(): - raise Exception ("Please provide 'tau_R' (recovery time of SiPM in nanoseconds).") - - elif(not isinstance(kwargs["tau_R"], float) or kwargs["tau_R"]<=0): - raise Exception ("Please set 'tau' (slow component of SiPM pulse in nanoseconds) to a positive float.") - - - if not "t_0" in kwargs.keys(): - raise Exception ("Please provide 't_0' (integration region before start of SiPM pulse integration gate in nanoseconds).") - - elif(kwargs["t_0"]<=0): - raise Exception ("Please set 't_0' (integration region before start of SiPM pulse integration gate in nanoseconds) to a positive float.") - - - - if not "t_gate" in kwargs.keys(): - raise Exception ("Please provide 't_gate' (length of SiPM pulse integration gate) in nanoseconds.") - - elif(kwargs["t_0"]<=0): - raise Exception ("Please set 't_0' (integration region before start of SiPM pulse integration gate in nanoseconds) to a positive float.") - + elif(kwargs["t_0"] <= 0): + raise Exception(("Please set 't_0' (integration region before " + "start of SiPM pulse integration gate in " + "nanoseconds) to a positive float.")) self._fit_kwargs.update(kwargs) - - + if self._fit_kwargs["min_n_events_peak"] is not None: if(not isinstance(self._fit_kwargs["min_n_events_peak"], int)): - raise Exception ("Please set max_n_events peak (max events to fit Gaus to peak) as an integer" ) + raise Exception(("Please set max_n_events peak (max events to " + "fit Gaus to peak) as an integer")) else: self._fit_kwargs["min_n_events_peak"] = self._min_n_events_peak - - + if self._fit_kwargs["truncate_nsigma0_up"] is not None: - if(not isinstance(self._fit_kwargs["truncate_nsigma0_up"], float)): - raise Exception ("Please set truncate_nsigma0_up (# of sigma0 before pedestal to for Chi2/NDF to be evaluated) as a positive float" ) - - elif(self._fit_kwargs["truncate_nsigma0_up"]<=0): - raise Exception ("Please set truncate_nsigma0_up (# of sigma0 before pedestal to for Chi2/NDF to be evaluated) as a positive float" ) - - + raise Exception(("Please set truncate_nsigma0_up (# of sigma0 " + "before pedestal to for Chi2/NDF to be " + "evaluated) as a positive float")) + + elif(self._fit_kwargs["truncate_nsigma0_up"] <= 0): + raise Exception(("Please set truncate_nsigma0_up (# of sigma0" + " before pedestal to for Chi2/NDF " + "to be evaluated) as a positive float")) + if self._fit_kwargs["truncate_nsigma0_do"] is not None: - if(not isinstance(self._fit_kwargs["truncate_nsigma0_do"], float)): - raise Exception ("Please set truncate_nsigma0_do (max # of sigma0 before pedestal to for to be scanned) as a positive float between 0.0 and 4.0" ) - - elif(self._fit_kwargs["truncate_nsigma0_do"]<=0 or self._fit_kwargs["truncate_nsigma0_do"]>4): - raise Exception ("Please set truncate_nsigma0_do (# of sigma0 before pedestal to for Chi2/NDF to be evaluated) as a positive float between 0.0 and 4.0" ) - - + raise Exception(("Please set truncate_nsigma0_do (max # of " + "sigma0 before pedestal to for to be " + "scanned) as a positive float between " + "0.0 and 4.0")) + + elif(self._fit_kwargs["truncate_nsigma0_do"] <= 0 + or self._fit_kwargs["truncate_nsigma0_do"] > 4): + + raise Exception(("Please set truncate_nsigma0_do (# of " + "sigma0 before pedestal to for Chi2/NDF to " + "be evaluated) as a positive float between " + "0.0 and 4.0")) if self._fit_kwargs["truncate_nsigma0_chi2scanlim"] is not None: - - if(not isinstance(self._fit_kwargs["truncate_nsigma0_chi2scanlim"], float)): - raise Exception ("Please set truncate_nsigma0_chi2scanlim (minimum Chi2/NDF for a fit to be selected in a scan) as a positive float" ) - - elif(self._fit_kwargs["truncate_nsigma0_chi2scanlim"]<0): - raise Exception ("Please set truncate_nsigma0_chi2scanlim (minimum Chi2/NDF for a fit to be selected in a scan) as a positive float" ) - - + if(not isinstance(self._fit_kwargs["truncate_nsigma0_chi2scanlim"], + float)): + + raise Exception(("Please set truncate_nsigma0_chi2scanlim " + "(minimum Chi2/NDF for a fit to be selected " + "in a scan) as a positive float")) + + elif(self._fit_kwargs["truncate_nsigma0_chi2scanlim"] < 0): + raise Exception(("Please set truncate_nsigma0_chi2scanlim " + "(minimum Chi2/NDF for a fit to be selected " + "in a scan) as a positive float")) + if self._fit_kwargs["bw"] is not None: if(not isinstance(self._fit_kwargs["bw"], float)): - raise Exception ("Please set bw (bin width) to a positive float." ) - if(self._fit_kwargs["bw"]<=0): - raise Exception ("Please set bw (bin width) to a positive float." ) - + raise Exception(("Please set bw (bin width) " + "to a positive float.")) + + if(self._fit_kwargs["bw"] <= 0): + raise Exception(("Please set bw (bin width) " + "to a positive float.")) + if self._fit_kwargs["bin_0"] is not None: if(not isinstance(self._fit_kwargs["bin_0"], float)): - raise Exception ("Please set bin_0 (first bin position) to a float." ) - + raise Exception(("Please set bin_0 (first bin " + "position) to a float.")) - if(self._verbose): + if (self._verbose): print("Prefitting...") - - - ######################################################################################################## - ## Prefitting routine applied - ######################################################################################################## - - + + ####################################################################### + # Prefitting routine applied + ####################################################################### + t_0 = time.time() self.GetPreFit(data, **self._fit_kwargs) t_1 = time.time() - + self._time_prefit = t_1-t_0 - - if(self._verbose): + + if (self._verbose): print("Prefitting complete.") - - + _offset = self._hist_data["bc_min"] - _scale = self._hist_data["bw"] - + _scale = self._hist_data["bw"] + if(self._fit_kwargs["prefit_only"]): - _prefit_values_temp = self._prefit_values_bin.copy() _prefit_errors_temp = self._prefit_errors_bin.copy() - + for key, value in _prefit_values_temp.items(): - if(key=="Q_0"): + if(key == "Q_0"): _prefit_values_temp[key] = _offset + value*_scale - elif(key=="G"): + elif(key == "G"): _prefit_values_temp[key] = value*_scale - elif(key=="sigma_0"): + elif(key == "sigma_0"): _prefit_values_temp[key] = value*_scale - elif(key=="sigma_1"): + elif(key == "sigma_1"): _prefit_values_temp[key] = value*_scale else: _prefit_values_temp[key] = value for key, value in _prefit_errors_temp.items(): - if(key=="Q_0"): + if(key == "Q_0"): _prefit_errors_temp[key] = value*_scale - elif(key=="G"): + elif(key == "G"): _prefit_errors_temp[key] = value*_scale - elif(key=="sigma_0"): + elif(key == "sigma_0"): _prefit_errors_temp[key] = value*_scale - elif(key=="sigma_1"): + elif(key == "sigma_1"): _prefit_errors_temp[key] = value*_scale else: _prefit_errors_temp[key] = value self._prefit_values.update(_prefit_values_temp) self._prefit_errors.update(_prefit_errors_temp) - - - else: - - - - ######################################################################################################## - ## Truncate fit if supplied factor. - ######################################################################################################## - - -# lim_nsigma0 = self._prefit_values_bin["Q_0"] - self._fit_kwargs["truncate_nsigma0"]*self._prefit_values_bin["sigma_0"] -# if(self._verbose): -# print("Truncating fit, starting from {:3.3f} estimated sigma_0 from estimated pedestal ({:3.3f} bins) ...".format(self._fit_kwargs["truncate_nsigma0"], lim_nsigma0)) - - - - if(self._fit_kwargs["truncate_nsigma0_do"] is not None): - - nsigma_0_scan_range = np.linspace(4.0, - self._fit_kwargs["truncate_nsigma0_do"], - np.floor((4-self._fit_kwargs["truncate_nsigma0_do"])/0.5).astype(int)+1) - - - - if(self._verbose): - print("Performing full truncation scan. Evaluating nsigma0 = {0} before pedestal".format(nsigma_0_scan_range)) - - - if(self._fit_kwargs["truncate_nsigma0_chi2scanlim"] is not None): - - if(self._verbose): - print("Minimum Chi2/NDF for convergence: {:3.3f}".format(self._fit_kwargs["truncate_nsigma0_chi2scanlim"])) - - else: - print("Best loss taken.") - - - if(self._fit_kwargs["truncate_nsigma0_up"] is not None): - - - _nsigma0hi = self._fit_kwargs["truncate_nsigma0_up"] - else: - _nsigma0hi = 2 - - if(self._verbose): - print("Evaluating up to nsigma0 = {:3.3f} after pedestal".format(_nsigma0hi)) - - - - m_DRMs = np.full(len(nsigma_0_scan_range), None) - chi2ndfs_range = np.full(len(nsigma_0_scan_range), None) - chi2ndfs_full = np.full(len(nsigma_0_scan_range), None) - time_fits = np.full(len(nsigma_0_scan_range), 0.0) - - Q_0_prefit = self._prefit_values_bin["Q_0"] - sigma_0_prefit = self._prefit_values_bin["sigma_0"] - - fit_iters = 0 + ################################################################### + # Truncate fit if supplied factor. + ################################################################### + + # lim_nsigma0 = (self._prefit_values_bin["Q_0"] + # - self._fit_kwargs["truncate_nsigma0"] + # * self._prefit_values_bin["sigma_0"]) + + # if (self._verbose): + # tmp_val = self._fit_kwargs["truncate_nsigma0"] + # print(("Truncating fit, starting from " + # f"{tmp_val:3.3f} estimated sigma_0 from estimated " + # f"pedestal ({lim_nsigma0:3.3f} bins) ...")) + + if (self._fit_kwargs["truncate_nsigma0_do"] is not None): + nsigma_0_scan_range = np.linspace( + 4.0, self._fit_kwargs["truncate_nsigma0_do"], + np.floor( + (4-self._fit_kwargs["truncate_nsigma0_do"])/0.5 + ).astype(int)+1) + + if self._verbose: + print(("Performing full truncation scan. " + f"Evaluating nsigma0 = {nsigma_0_scan_range} " + "before pedestal")) + + if (self._fit_kwargs["truncate_nsigma0_chi2scanlim"] + is not None): + + if (self._verbose): + tmp_val = self._fit_kwargs[ + "truncate_nsigma0_chi2scanlim"] + + print(("Minimum Chi2/NDF for convergence: " + f"{tmp_val:3.3f}")) + else: + print("Best loss taken.") + if(self._fit_kwargs["truncate_nsigma0_up"] is not None): + _nsigma0hi = self._fit_kwargs["truncate_nsigma0_up"] + else: + _nsigma0hi = 2 - for fit_i, _nsigma0low in enumerate(nsigma_0_scan_range): + if (self._verbose): + print(f"Evaluating up to nsigma0 = {_nsigma0hi:3.3f} " + "after pedestal") - fit_iters+=1 + m_DRMs = np.full(len(nsigma_0_scan_range), None) + chi2ndfs_range = np.full(len(nsigma_0_scan_range), None) + chi2ndfs_full = np.full(len(nsigma_0_scan_range), None) + time_fits = np.full(len(nsigma_0_scan_range), 0.0) - lim_nsigma0 = Q_0_prefit - _nsigma0low*sigma_0_prefit - - - print("Truncating fit, starting from {:3.3f} estimated sigma_0 from estimated pedestal ({:3.3f} bins) ...".format(_nsigma0low, lim_nsigma0)) + Q_0_prefit = self._prefit_values_bin["Q_0"] + sigma_0_prefit = self._prefit_values_bin["sigma_0"] + fit_iters = 0 + for fit_i, _nsigma0low in enumerate(nsigma_0_scan_range): + fit_iters += 1 + lim_nsigma0 = Q_0_prefit - _nsigma0low*sigma_0_prefit - cond_sel_nsigma0 = (self._hist_data["bin_numbers"]>lim_nsigma0) + print( + f"Truncating fit, starting from {_nsigma0low:3.3f}" + " estimated sigma_0 from estimated pedestal " + f"({lim_nsigma0:3.3f} bins) ..." + ) + cond_sel_nsigma0 = ( + self._hist_data["bin_numbers"] > lim_nsigma0) + f_DRM = BinnedLH( + DRM, + self._hist_data["bin_numbers"][cond_sel_nsigma0], + self._hist_data["counts"][cond_sel_nsigma0], 1) - f_DRM = BinnedLH(DRM, - self._hist_data["bin_numbers"][cond_sel_nsigma0], - self._hist_data["counts"][cond_sel_nsigma0], 1) + m_DRM, time_fit = self.__MinuitFit( + f_DRM, + np.sum(self._hist_data["counts"][cond_sel_nsigma0]) + ) + chi2_range, ndof_range = self.GetChi2( + pars=m_DRM.values.to_dict(), + n_sigma_lims=[_nsigma0low, _nsigma0hi]) - m_DRM, time_fit = self.__MinuitFit(f_DRM, np.sum(self._hist_data["counts"][cond_sel_nsigma0])) + red_chi2_range = chi2_range/ndof_range + chi2_full, ndof_full = self.GetChi2( + pars=m_DRM.values.to_dict(), + n_sigma_lims=[_nsigma0low, None]) - chi2_range, ndof_range = self.GetChi2(pars = m_DRM.values.to_dict(), - n_sigma_lims = [_nsigma0low, _nsigma0hi] - ) - - red_chi2_range = chi2_range/ndof_range - - - chi2_full, ndof_full = self.GetChi2(pars = m_DRM.values.to_dict(), - n_sigma_lims = [_nsigma0low, None] - ) - - red_chi2_full = chi2_full/ndof_full + red_chi2_full = chi2_full/ndof_full + m_DRMs[fit_i] = m_DRM + chi2ndfs_range[fit_i] = red_chi2_range + chi2ndfs_full[fit_i] = red_chi2_full + time_fits[fit_i] = time_fit - m_DRMs[fit_i] = m_DRM - chi2ndfs_range[fit_i] = red_chi2_range - chi2ndfs_full[fit_i] = red_chi2_full - time_fits[fit_i] = time_fit + if (self._fit_kwargs["truncate_nsigma0_chi2scanlim"] + is not None): + tmp_val = self._fit_kwargs[ + "truncate_nsigma0_chi2scanlim"] - if(self._fit_kwargs["truncate_nsigma0_chi2scanlim"] is not None): - if(red_chi2_range<self._fit_kwargs["truncate_nsigma0_chi2scanlim"]): - if(self._verbose): - print("Fit chi2/NDF less than limit. (Fit: {:3.3f} < Limit {:3.3f}. Scan complete".format(red_chi2, - self._fit_kwargs["truncate_nsigma0_chi2scanlim"])) + if (red_chi2_range < tmp_val): + if (self._verbose): + # TODO: red_chi2 not defined! + print(("Fit chi2/NDF less than limit. " + f"Fit: {red_chi2:3.3f} < Limit " + f"{tmp_val:3.3f}. Scan complete.")) - self._m_DRM = deepcopy(m_DRM) - self._time_fit = np.sum(time_fits) - self._hist_data["truncate_nsigma0_do_min"] = _nsigma0low - + self._m_DRM = deepcopy(m_DRM) + self._time_fit = np.sum(time_fits) + self._hist_data["truncate_nsigma0_do_min"] = ( + _nsigma0low) - break + break + else: + if (self._verbose): + print(("Fit chi2/NDF greater than limit." + f" (Fit: {red_chi2:3.3f} < " + f"Limit {tmp_val:3.3f}. Performing " + "next scan step...")) + + if (fit_iters == len(nsigma_0_scan_range)): + fit_i_best = np.argmin(chi2ndfs_range) + if (self._verbose): + print("Scan Summary:") + for fit_i, _nsigma0 in enumerate( + nsigma_0_scan_range): + + if (fit_i == fit_i_best): + is_best = True else: - if(self._verbose): - print("Fit chi2/NDF greater than limit. (Fit: {:3.3f} < Limit {:3.3f}. Performing next scan step...".format(red_chi2, - self._fit_kwargs["truncate_nsigma0_chi2scanlim"])) - - - if(fit_iters==len(nsigma_0_scan_range)): - fit_i_best = np.argmin(chi2ndfs_range) - if(self._verbose): - print("Scan Summary:") - for fit_i, _nsigma0 in enumerate(nsigma_0_scan_range): - if(fit_i==fit_i_best): - is_best = True - else: - is_best = False + is_best = False - print("n_sigma0 = {:3.3f}\tchi2/NDF in pedestal range = {:3.3f}\tBest = {:s}".format(_nsigma0, chi2ndfs_range[fit_i], str(is_best))) + print((f"n_sigma0 = {_nsigma0:3.3f}\tchi2/NDF " + "in pedestal range = " + f"{chi2ndfs_range[fit_i]:3.3f}\t" + f"Best = {str(is_best)}")) + print(("Scan complete. Using best fit at " + f"{nsigma_0_scan_range[fit_i_best]:3.3f}")) - print("Scan complete. Using best fit at {:3.3f}".format(nsigma_0_scan_range[fit_i_best])) + self._m_DRM = deepcopy(m_DRMs[fit_i_best]) + self._time_fit = np.sum(time_fits) + self._hist_data["truncate_nsigma0_do_min"] = ( + nsigma_0_scan_range[fit_i_best]) - self._m_DRM = deepcopy(m_DRMs[fit_i_best]) - self._time_fit = np.sum(time_fits) - - #print(self._hist_data["truncate_nsigma0_do_min"] ) - self._hist_data["truncate_nsigma0_do_min"] = nsigma_0_scan_range[fit_i_best] - #print(self._hist_data["truncate_nsigma0_do_min"] ) else: - - f_DRM = BinnedLH(DRM, - self._hist_data["bin_numbers"], + + f_DRM = BinnedLH(DRM, + self._hist_data["bin_numbers"], self._hist_data["counts"], 1) - - - m_DRM, time_fit = self.__MinuitFit(f_DRM, np.sum(self._hist_data["counts"])) - - - - - self._time_fit = time_fit - self._m_DRM = deepcopy(m_DRM) - - ######################################################################################################## - ##Rescale fitted parameters back to their input units. - ######################################################################################################## + m_DRM, time_fit = self.__MinuitFit( + f_DRM, + np.sum(self._hist_data["counts"])) + + self._time_fit = time_fit + self._m_DRM = deepcopy(m_DRM) + ################################################################### + # Rescale fitted parameters back to their input units. + ################################################################### for key, value in self._m_DRM.values.to_dict().items(): - if(key=="Q_0"): + if (key == "Q_0"): self._fit_values[key] = _offset + value*_scale - elif(key=="G"): + elif (key == "G"): self._fit_values[key] = value*_scale - elif(key=="sigma_0"): + elif (key == "sigma_0"): self._fit_values[key] = value*_scale - elif(key=="sigma_1"): + elif (key == "sigma_1"): self._fit_values[key] = value*_scale else: self._fit_values[key] = value - - self._fit_values_bin[key] = value - + self._fit_values_bin[key] = value for key, value in self._m_DRM.errors.to_dict().items(): - if(key=="Q_0"): + if (key == "Q_0"): self._fit_errors[key] = value*_scale - elif(key=="G"): + elif (key == "G"): self._fit_errors[key] = value*_scale - elif(key=="sigma_0"): + elif (key == "sigma_0"): self._fit_errors[key] = value*_scale - elif(key=="sigma_1"): + elif (key == "sigma_1"): self._fit_errors[key] = value*_scale else: self._fit_errors[key] = value - - self._fit_errors_bin[key] = value + self._fit_errors_bin[key] = value _prefit_values_temp = self._prefit_values_bin.copy() _prefit_errors_temp = self._prefit_errors_bin.copy() - + for key, value in _prefit_values_temp.items(): - if(key=="Q_0"): + if (key == "Q_0"): _prefit_values_temp[key] = _offset + value*_scale - elif(key=="G"): + elif (key == "G"): _prefit_values_temp[key] = value*_scale - elif(key=="sigma_0"): + elif (key == "sigma_0"): _prefit_values_temp[key] = value*_scale - elif(key=="sigma_1"): + elif (key == "sigma_1"): _prefit_values_temp[key] = value*_scale else: _prefit_values_temp[key] = value for key, value in _prefit_errors_temp.items(): - if(key=="Q_0"): + if (key == "Q_0"): _prefit_errors_temp[key] = value*_scale - elif(key=="G"): + elif (key == "G"): _prefit_errors_temp[key] = value*_scale - elif(key=="sigma_0"): + elif (key == "sigma_0"): _prefit_errors_temp[key] = value*_scale - elif(key=="sigma_1"): + elif (key == "sigma_1"): _prefit_errors_temp[key] = value*_scale else: _prefit_errors_temp[key] = value self._prefit_values.update(_prefit_values_temp) self._prefit_errors.update(_prefit_errors_temp) - - - - ######################################################################################################## - ## GetPreFit(prefit=False) - ######################################################################################################## - ## - ## VARIABLES: - ## tau: slow time-constant of SiPM pulse in nanoseconds - ## tau_err: optional error to allow tau to float - ## t_0: pre-gate integration window in nanoseconds. - ## t_0_err: optional error to allow t_0 to float - ## tau_R: recovery time of SiPM in nanoseconds - ## tau_R_err: optional error to allow tau_R to float - ## t_gate: gate length in nanoseconds - ## t_gate_err: optional error to allow t_gate to float - ## bw: optional bin width if data is 1D - ## truncate_nG: optional parameter to truncate distribution a certain fraction/number of estimated gain units before the estimated pedestal. - ## peak_dist_nG: required minimum distance between peaks in gain units to register a peaks. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html - ## peak_rel_height: Chooses the relative height at which the peak width is measured as a percentage of its prominence. 1.0 calculates the width of the peak at its lowest contour line while 0.5 evaluates at half the prominence height. Must be at least 0. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.peak_widths.html#scipy.signal.peak_widths - ## bin_method: binning method for unbinned data. "knuth", "freedman" or "scott" rules are available. - ## alpha_peaks: the maximum percentile of the distribution a found peak is allowed to have. This determines the threshold on the selected peak positions. - ## - ## Perform pre-fitting routine. - ######################################################################################################## - - - + + ########################################################################### + # GetPreFit(prefit=False) + ########################################################################### + # + # VARIABLES: + # tau: slow time-constant of SiPM pulse in nanoseconds + # tau_err: optional error to allow tau to float + # t_0: pre-gate integration window in nanoseconds. + # t_0_err: optional error to allow t_0 to float + # tau_R: recovery time of SiPM in nanoseconds + # tau_R_err: optional error to allow tau_R to float + # t_gate: gate length in nanoseconds + # t_gate_err: optional error to allow t_gate to float + # bw: optional bin width if data is 1D + # truncate_nG: optional parameter to truncate distribution a certain + # fraction/number of estimated gain units before the estimated + # pedestal. + # + # peak_dist_nG: required minimum distance between peaks in gain units to + # register a peaks. + # See https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html + # + # peak_rel_height: Chooses the relative height at which the peak width is + # measured as a percentage of its prominence. 1.0 + # calculates the width of the peak at its lowest contour + # line while 0.5 evaluates at half the prominence height. + # Must be at least 0. + # See https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.peak_widths.html#scipy.signal.peak_widths + # + # bin_method: binning method for unbinned data. "knuth", "freedman" or + # "scott" rules are available. + # + # alpha_peaks: the maximum percentile of the distribution a found peak is + # allowed to have. This determines the threshold on the + # selected peak positions. + # + # + # Perform pre-fitting routine. + ########################################################################### def GetPreFit(self, data, **kwargs): - - ######################################################################################################## - ##Get reconstructed data, bin width, number of bins and bin values - ######################################################################################################## - - - data, bw, nbins, bins = self.GetBins(data, kwargs["bw"], kwargs["bin_method"], kwargs["bin_0"]) - + + ####################################################################### + # Get reconstructed data, bin width, number of bins and bin values + ####################################################################### + + data, bw, nbins, bins = self.GetBins(data, kwargs["bw"], + kwargs["bin_method"], + kwargs["bin_0"]) + counts, _ = np.histogram(data, bins=bins) - counts_error = np.where(counts<1, 1, np.sqrt(counts)) - bin_centres = (bins[:-1] + bins[1:])/2. + counts_error = np.where(counts < 1, 1, np.sqrt(counts)) + bin_centres = (bins[:-1] + bins[1:])/2. nbins = len(bin_centres) bin_numbers = np.arange(0, nbins) - - self._len_pad = nbins - - - - ######################################################################################################## - ## Perform FFT and calculate Power Spectrum. The inverse of the frequency of the maximum amplitude yields the gain. - ######################################################################################################## + ####################################################################### + # Perform FFT and calculate Power Spectrum. The inverse of the + # frequency of the maximum amplitude yields the gain. + ####################################################################### - nbins_pad = nbins+2*nbins - counts_pad = np.pad(counts, (nbins, nbins), "constant", constant_values=0) + nbins_pad = nbins+2*nbins + counts_pad = np.pad(counts, (nbins, nbins), "constant", + constant_values=0) - - fhat = np.fft.fft(counts_pad) #computes the fft + fhat = np.fft.fft(counts_pad) # computes the fft psd = fhat * np.conj(fhat)/nbins_pad - fft_freq_orig = (1/nbins_pad) * np.arange(nbins_pad) #frequency array + fft_freq_orig = (1/nbins_pad) * np.arange(nbins_pad) # frequency array idxs_half = np.arange(1, np.floor(nbins_pad/2), dtype=np.int32) fft_amplitude = np.abs(psd[idxs_half]) fft_freq = fft_freq_orig[idxs_half] try: - f_interp_fft = InterpolatedUnivariateSpline(fft_freq, fft_amplitude, k=4) + f_interp_fft = InterpolatedUnivariateSpline(fft_freq, + fft_amplitude, k=4) + # TODO: Bare except! except: raise Exception("Interpolation of FFT failed. Fit cannot proceed.") try: fft_cr_pts = f_interp_fft.derivative().roots() + # TODO: Bare except! except: raise Exception("Root finding of FFT failed. Fit cannot proceed.") - if(len(fft_cr_pts)<1): + if (len(fft_cr_pts) < 1): raise Exception("No roots found. Fit cannot proceed.") - fft_cr_vals = f_interp_fft(fft_cr_pts) inv_G_fft = fft_cr_pts[np.argmax(fft_cr_vals)] G_fft = 1/inv_G_fft - - - self._len_pad = np.around(G_fft, 0).astype(int) + self._len_pad = np.around(G_fft, 0).astype(int) - ######################################################################################################## - #Estimate the position of the pedestal by minimising the GP gain w.r.t the FFT Gain - ######################################################################################################## + ####################################################################### + # Estimate the position of the pedestal by minimising the GP gain + # w.r.t the FFT Gain + ####################################################################### mu_unsub = HistMean(counts, bin_numbers) sigma = HistStd(counts, bin_numbers) gamma = HistSkew(counts, bin_numbers) - - - f_interp_counts = UnivariateSpline(bin_numbers, counts, w=1/counts_error, k=4) - + f_interp_counts = UnivariateSpline(bin_numbers, counts, + w=1/counts_error, k=4) try: counts_cr_pts = f_interp_counts.derivative().roots() + # TODO: Bare except! except: - raise Exception("Root finding of spectrum failed. Fit cannot proceed.") + raise Exception( + "Root finding of spectrum failed. Fit cannot proceed.") - if(len(counts_cr_pts)<1): + if (len(counts_cr_pts) < 1): raise Exception("No roots of spectrum found. Fit cannot proceed.") - counts_cr_vals = f_interp_counts(counts_cr_pts) max_peak = counts_cr_pts[np.argmax(counts_cr_vals)] - - - f_pedestal = lambda Q_0: (GP_gain(mu_unsub-Q_0, sigma, gamma) - G_fft)**2 - m_pedestal = Minuit(f_pedestal, Q_0 = max_peak) - m_pedestal.errordef=m_pedestal.LEAST_SQUARES - m_pedestal.limits["Q_0"] = (0, max_peak+epsilon()) + + f_pedestal = (lambda Q_0: + (GP_gain(mu_unsub-Q_0, sigma, gamma) - G_fft)**2) + + m_pedestal = Minuit(f_pedestal, Q_0=max_peak) + m_pedestal.errordef = m_pedestal.LEAST_SQUARES + m_pedestal.limits["Q_0"] = (0, max_peak + epsilon()) m_pedestal.migrad() Q_0_est = m_pedestal.values["Q_0"] - - - idx_peaks = np.sort(np.concatenate( [np.arange(max_peak-G_fft, -G_fft/2, -G_fft), - np.array([max_peak]), - np.arange(max_peak+G_fft, nbins+G_fft/2, G_fft), - - ] )) - + + idx_peaks = np.sort(np.concatenate( + [np.arange(max_peak-G_fft, -G_fft/2, -G_fft), + np.array([max_peak]), + np.arange(max_peak+G_fft, nbins+G_fft/2, G_fft), ])) + idx_Q_0 = np.argmin(abs(idx_peaks-Q_0_est)) idx_peaks = idx_peaks[idx_Q_0:-1] - - - - ######################################################################################################## - #Select maximum peak position, and step in units of gain up until either end of the spectrum - ######################################################################################################## + ####################################################################### + # Select maximum peak position, and step in units of gain up until + # either end of the spectrum + ####################################################################### - ######################################################################################################## + ####################################################################### # - Select pedestal as closest peak position to estimated pedestal. # - Select only peaks up to pedestal # - Get midpoints and end values. # - Linearly interpolate midpoints # - Subtract 'background', and threshold to be a minimum of zero. - ######################################################################################################## - + ####################################################################### counts_bg = np.full(len(idx_peaks)-1, None) idx_bg = np.full(len(idx_peaks)-1, None) - - + for i_peak in range(len(idx_peaks)-1): - last_peak= idx_peaks[i_peak] - next_peak= idx_peaks[i_peak+1] + last_peak = idx_peaks[i_peak] + next_peak = idx_peaks[i_peak+1] mid_peak = idx_peaks[i_peak] + G_fft/2 - cond_sel_interpeak = (counts_cr_pts>=last_peak) & (counts_cr_pts<=next_peak) - - if(sum(cond_sel_interpeak)>0): - + cond_sel_interpeak = ((counts_cr_pts >= last_peak) + & (counts_cr_pts <= next_peak)) + + if(sum(cond_sel_interpeak) > 0): counts_cr_pts_interpeak = counts_cr_pts[cond_sel_interpeak] counts_cr_values_interpeak = counts_cr_vals[cond_sel_interpeak] idx_min_interpeak = np.argmin(counts_cr_values_interpeak) - count_min_interpeak = counts_cr_values_interpeak[idx_min_interpeak] + count_min_interpeak = counts_cr_values_interpeak[ + idx_min_interpeak] + idx_min_interpeak = counts_cr_pts_interpeak[idx_min_interpeak] - - + else: - if(self._verbose): - print("No local minimum found in peak range. Using value at peak + gain/2") - + if (self._verbose): + print(("No local minimum found in peak range. " + "Using value at peak + gain/2")) + idx_min_interpeak = mid_peak - count_min_interpeak = counts[np.argmin(abs(bin_numbers-mid_peak))] - + count_min_interpeak = counts[ + np.argmin(abs(bin_numbers-mid_peak))] + idx_bg[i_peak] = idx_min_interpeak counts_bg[i_peak] = count_min_interpeak - - + try: arg_sort_bg = np.argsort(idx_bg) idx_bg = idx_bg[arg_sort_bg] counts_bg = counts_bg[arg_sort_bg] - first_nz_bin = bin_numbers[np.argmax(counts>0)] - counts_first_nz_bin = counts[np.argmax(counts>0)] + first_nz_bin = bin_numbers[np.argmax(counts > 0)] + counts_first_nz_bin = counts[np.argmax(counts > 0)] + + idx_bg = np.pad(idx_bg, (1, 0), "constant", + constant_values=first_nz_bin) - idx_bg = np.pad(idx_bg, (1,0), "constant", constant_values=first_nz_bin) - counts_bg = np.pad(counts_bg, (1,0), "constant", constant_values=counts_first_nz_bin) + counts_bg = np.pad(counts_bg, (1, 0), "constant", + constant_values=counts_first_nz_bin) - f_interp_bg = interp1d(idx_bg, counts_bg, bounds_error=False, fill_value=(0,0)) + f_interp_bg = interp1d(idx_bg, counts_bg, bounds_error=False, + fill_value=(0, 0)) counts_bg = f_interp_bg(bin_numbers).astype(float) counts_bgsub = counts - counts_bg - counts_bgsub = np.where(counts_bgsub<0, 0, counts_bgsub) - counts_bgsub_error = np.where(counts_bgsub>1, np.sqrt(counts_bgsub), 1) - counts_bgsub_error = np.sqrt(counts_bgsub_error**2 + counts_error**2) + counts_bgsub = np.where(counts_bgsub < 0, 0, counts_bgsub) + counts_bgsub_error = np.where(counts_bgsub > 1, + np.sqrt(counts_bgsub), 1) + counts_bgsub_error = np.sqrt(counts_bgsub_error**2 + + counts_error**2) + + # TODO: Bare except! except: if(self._verbose): - print("Background subtraction failed. Setting background subtracted spectrum to have no events.") + print(("Background subtraction failed. Setting background " + "subtracted spectrum to have no events.")) + counts_bgsub = counts counts_bg = np.zeros_like(counts) counts_bgsub_error = counts_error - - - - ######################################################################################################## - #Find maximum from spline fit of background subtracted - ######################################################################################################## - - + + ####################################################################### + # Find maximum from spline fit of background subtracted + ####################################################################### + try: - f_interp_counts_bgsub = UnivariateSpline(bin_numbers, + f_interp_counts_bgsub = UnivariateSpline(bin_numbers, counts_bgsub, - w = 1/counts_bgsub_error, + w=1/counts_bgsub_error, k=4) + # TODO: Bare except! except: - raise Exception("Interpolation of background-subtracted spectrum failed. Fit cannot proceed.") + raise Exception(("Interpolation of background-subtracted spectrum " + "failed. Fit cannot proceed.")) try: counts_cr_pts_bgsub = f_interp_counts_bgsub.derivative().roots() + # TODO: Bare except! except: - raise Exception("Root finding of background-subtracted spectrum failed. Fit cannot proceed.") + raise Exception(("Root finding of background-subtracted " + "spectrum failed. Fit cannot proceed.")) - if(len(counts_cr_pts_bgsub)<1): - raise Exception("No roots found in background-subtracted spectrum. Fit cannot proceed.") + if (len(counts_cr_pts_bgsub) < 1): + raise Exception("No roots found in background-subtracted spectrum." + " Fit cannot proceed.") - counts_cr_vals_bgsub = f_interp_counts_bgsub(counts_cr_pts_bgsub) max_peak = counts_cr_pts_bgsub[np.argmax(counts_cr_vals_bgsub)] - - idx_peaks = np.sort(np.concatenate( [np.arange(max_peak-G_fft, -G_fft/2, -G_fft), - np.array([max_peak]), - np.arange(max_peak+G_fft, nbins+G_fft/2, G_fft)] )) - - ######################################################################################################## + + idx_peaks = np.sort(np.concatenate( + [np.arange(max_peak-G_fft, -G_fft/2, -G_fft), + np.array([max_peak]), + np.arange(max_peak+G_fft, nbins+G_fft/2, G_fft)])) + + ####################################################################### # - Select pedestal as closest peak position to estimated pedestal. # - Select only peaks up to pedestal # - Update pedestal position. - ######################################################################################################## + ####################################################################### - idx_Q_0 = np.argmin(abs(idx_peaks-Q_0_est)) idx_peaks = idx_peaks[idx_Q_0:-1] Q_0 = idx_peaks[0] - - ######################################################################################################## - # - Find means and variances of all peaks in background-subtracted spectrum with greater than 100 events. + ####################################################################### + # - Find means and variances of all peaks in background-subtracted + # spectrum with greater than 100 events. # - Fit line to means to get Gain/Pedestal - correction for binning # - Fit line to variances to Get sigma0/sigma1 - ######################################################################################################## + ####################################################################### - - #################### - #ITERATIVE GAUS FIT METHOD + # ITERATIVE GAUS FIT METHOD ##################### - - idx_peak_mus = np.full(len(idx_peaks), np.nan) idx_peak_dmus = np.full(len(idx_peaks), np.nan) - idx_peak_sigmas = np.full(len(idx_peaks), np.nan) - idx_peak_dsigmas = np.full(len(idx_peaks), np.nan) - idx_peak_vars = np.full(len(idx_peaks), np.nan) - idx_peak_dvars = np.full(len(idx_peaks), np.nan) - idx_peak_numbers= np.arange(0, len(idx_peaks)).astype(int) - + idx_peak_sigmas = np.full(len(idx_peaks), np.nan) + idx_peak_dsigmas = np.full(len(idx_peaks), np.nan) + idx_peak_vars = np.full(len(idx_peaks), np.nan) + idx_peak_dvars = np.full(len(idx_peaks), np.nan) + idx_peak_numbers = np.arange(0, len(idx_peaks)).astype(int) f_gaus = lambda x, mu, sigma, A: A*norm.pdf(x, loc=mu, scale=sigma) - + for p_i, peak in zip(idx_peak_numbers, idx_peaks): - + min_rng_peak = peak - G_fft/2 max_rng_peak = peak + G_fft/2 - cond_sel_peak = (bin_numbers>min_rng_peak) & (bin_numbers<max_rng_peak) + cond_sel_peak = ((bin_numbers > min_rng_peak) + & (bin_numbers < max_rng_peak)) counts_bgsub_peak = counts_bgsub[cond_sel_peak] bin_numbers_peak = bin_numbers[cond_sel_peak] N_peak = np.around(np.sum(counts_bgsub_peak), 0).astype(int) - - mu_peak = HistMean(counts_bgsub_peak, bin_numbers_peak) sigma_peak = HistStd(counts_bgsub_peak, bin_numbers_peak) - + dmu_peak = sigma_peak/np.sqrt(N_peak) dsigma_peak = sigma_peak/np.sqrt(2*N_peak-2) - - - if(N_peak < kwargs["min_n_events_peak"]): - if(self._verbose): - print("Peak {:d} had {:d} events, less than {:d} event limit. Using mean and standard deviation...".format(p_i, N_peak, kwargs["min_n_events_peak"])) - else: - + if (N_peak < kwargs["min_n_events_peak"]): + if(self._verbose): + print((f"Peak {p_i} had {N_peak} events, less than " + f"{kwargs['min_n_events_peak']} event limit. " + "Using mean and standard deviation...")) + else: delta_mu = np.inf delta_sigma = np.inf - try: - + try: iter_count = 0 iter_failed = False - while(iter_count<10 and (delta_mu>0.01*G_fft and delta_sigma>0.01*G_fft)): + while (iter_count < 10 + and (delta_mu > 0.01*G_fft + and delta_sigma > 0.01*G_fft)): mu_mns2sigma = max(mu_peak-2*sigma_peak, min_rng_peak) mu_pls2sigma = min(mu_peak+2*sigma_peak, max_rng_peak) - cond_sel_2sigma = (bin_numbers_peak>mu_mns2sigma) & (bin_numbers_peak<mu_pls2sigma) + cond_sel_2sigma = ((bin_numbers_peak > mu_mns2sigma) + & (bin_numbers_peak < mu_pls2sigma)) - if(sum(cond_sel_2sigma)<3): - iter_failed=True + if(sum(cond_sel_2sigma) < 3): + iter_failed = True break + f_gaus_peak = BinnedLH( + f_gaus, + bin_numbers_peak[cond_sel_2sigma], + counts_bgsub_peak[cond_sel_2sigma], + 1 + ) - f_gaus_peak = BinnedLH(f_gaus, - bin_numbers_peak[cond_sel_2sigma], - counts_bgsub_peak[cond_sel_2sigma ], - 1 - ) + m_gaus_peak = Minuit( + f_gaus_peak, + mu=mu_peak, + sigma=sigma_peak, + A=sum(cond_sel_2sigma) + ) - m_gaus_peak = Minuit(f_gaus_peak, mu=mu_peak, sigma=sigma_peak, A = sum(cond_sel_2sigma)) m_gaus_peak.limits['mu'] = (mu_mns2sigma, mu_pls2sigma) - m_gaus_peak.limits['sigma'] = (epsilon(), 2.0*sigma_peak) - m_gaus_peak.limits["A"] = (max(N_peak - 3*np.sqrt(N_peak), 1), N_peak + 3*np.sqrt(N_peak)) - m_gaus_peak.errordef=m_pedestal.LIKELIHOOD + m_gaus_peak.limits['sigma'] = (epsilon(), + 2.0*sigma_peak) + + m_gaus_peak.limits["A"] = ( + max(N_peak - 3*np.sqrt(N_peak), 1), + N_peak + 3*np.sqrt(N_peak)) + + m_gaus_peak.errordef = m_pedestal.LIKELIHOOD m_gaus_peak.migrad() m_gaus_peak.hesse() - - mu_peak_i = m_gaus_peak.values["mu"] + mu_peak_i = m_gaus_peak.values["mu"] sigma_peak_i = m_gaus_peak.values["sigma"] - dmu_peak_i = m_gaus_peak.errors["mu"] + dmu_peak_i = m_gaus_peak.errors["mu"] dsigma_peak_i = m_gaus_peak.errors["sigma"] - if(iter_count==0): + if (iter_count == 0): delta_mu = mu_peak_i delta_sigma = sigma_peak_i - else: - delta_mu = abs(mu_peak - mu_peak_i) delta_sigma = abs(sigma_peak - sigma_peak_i) - mu_peak = mu_peak_i sigma_peak = sigma_peak_i dmu_peak = dmu_peak_i dsigma_peak = dsigma_peak_i - iter_count+=1 + iter_count += 1 if(iter_failed): if(self._verbose): - print("Iterative 2-sigma Gaus fit for Peak {:d} failed because the fit range went below 3 bins in size. Using mean and standard deviation.".format(p_i)) + print(("Iterative 2-sigma Gaus fit for Peak " + f"{p_i} failed because the fit range went " + "below 3 bins in size. Using mean and " + "standard deviation.")) mu_peak = HistMean(counts_bgsub_peak, bin_numbers_peak) - sigma_peak = HistStd(counts_bgsub_peak, bin_numbers_peak) + sigma_peak = HistStd(counts_bgsub_peak, + bin_numbers_peak) dmu_peak = sigma_peak/np.sqrt(N_peak) dsigma_peak = sigma_peak/np.sqrt(2*N_peak-2) - - + # TODO: Bare except! except: if(self._verbose): - print("Iterative 2-sigma Gaus fit for Peak {:d} failed. Using mean and standard deviation...".format(p_i)) - + print(("Iterative 2-sigma Gaus fit for Peak " + f"{p_i} failed. Using mean and standard " + "deviation...")) mu_peak = HistMean(counts_bgsub_peak, bin_numbers_peak) sigma_peak = HistStd(counts_bgsub_peak, bin_numbers_peak) dmu_peak = sigma_peak/np.sqrt(N_peak) - dsigma_peak = sigma_peak/np.sqrt(2*N_peak-2) - + dsigma_peak = sigma_peak/np.sqrt(2*N_peak - 2) idx_peak_mus[p_i] = mu_peak idx_peak_dmus[p_i] = dmu_peak @@ -2153,178 +2167,166 @@ class PeakOTron: idx_peak_sigmas[p_i] = sigma_peak idx_peak_dsigmas[p_i] = dsigma_peak + cond_sel_peaks = ((~np.isnan(idx_peak_mus)) + & (~np.isnan(idx_peak_sigmas)) + & (~np.isnan(idx_peak_dmus)) + & (~np.isnan(idx_peak_dsigmas)) + & (idx_peak_sigmas > 0.1)) + + if(sum(cond_sel_peaks) < 3): + raise Exception(("Less than three peaks found. Cannot proceed. " + "Try reducing the min_n_events_peak variable.")) - - - cond_sel_peaks = (~np.isnan(idx_peak_mus)) & (~np.isnan(idx_peak_sigmas)) & (~np.isnan(idx_peak_dmus)) & (~np.isnan(idx_peak_dsigmas)) & (idx_peak_sigmas>0.1) - - if(sum(cond_sel_peaks)<3): - raise Exception("Less than three peaks found. Cannot proceed. Try reducing the min_n_events_peak variable.") - - idx_peak_numbers = idx_peak_numbers[cond_sel_peaks] idx_peak_mus = idx_peak_mus[cond_sel_peaks] idx_peak_sigmas = idx_peak_sigmas[cond_sel_peaks] idx_peak_dmus = idx_peak_dmus[cond_sel_peaks] idx_peak_dsigmas = idx_peak_dsigmas[cond_sel_peaks] - - idx_peak_vars = idx_peak_sigmas**2 idx_peak_dvars = 2*idx_peak_sigmas*idx_peak_dsigmas - - f_mu_lin = HuberRegression(lambda x, G, Q_0: Linear(x, G, Q_0), - idx_peak_numbers, - idx_peak_mus, + f_mu_lin = HuberRegression(lambda x, G, Q_0: Linear(x, G, Q_0), + idx_peak_numbers, + idx_peak_mus, idx_peak_dmus) - f_var_lin = HuberRegression(lambda x, var_1, var_0: Linear(x, var_1, var_0), - idx_peak_numbers, - idx_peak_vars, - idx_peak_dvars, - ) + f_var_lin = HuberRegression( + lambda x, var_1, var_0: Linear(x, var_1, var_0), + idx_peak_numbers, + idx_peak_vars, + idx_peak_dvars, + ) - - m_mu_lin = Minuit(f_mu_lin, Q_0 = Q_0, G = G_fft) + m_mu_lin = Minuit(f_mu_lin, Q_0=Q_0, G=G_fft) m_mu_lin.limits["Q_0"] = (epsilon(), nbins) m_mu_lin.fixed["G"] = True m_mu_lin.errordef = m_mu_lin.LEAST_SQUARES m_mu_lin.migrad() m_mu_lin.hesse() - - - m_var_lin = Minuit(f_var_lin, var_0 = 1, var_1 = 1) + m_var_lin = Minuit(f_var_lin, var_0=1, var_1=1) m_var_lin.limits["var_0"] = (epsilon(), nbins) m_var_lin.limits["var_1"] = (epsilon(), nbins) m_var_lin.errordef = m_var_lin.LEAST_SQUARES m_var_lin.migrad() m_var_lin.hesse() - - Q_0 = m_mu_lin.values["Q_0"] dQ_0 = m_mu_lin.errors["Q_0"] - - sigma_0 = np.sqrt(m_var_lin.values["var_0"]) + sigma_0 = np.sqrt(m_var_lin.values["var_0"]) dsigma_0 = (0.5/sigma_0)*m_var_lin.errors["var_0"] - sigma_1 = np.sqrt(m_var_lin.values["var_1"]) + sigma_1 = np.sqrt(m_var_lin.values["var_1"]) dsigma_1 = (0.5/sigma_0)*m_var_lin.errors["var_1"] - - - ######################################################################################################## + + ####################################################################### # Use final estimate of pedestal to determine the mu, lambda - # Calculate k_max by summing up the GP distribution probabilities (cumulative distribution function) for all peaks in + # Calculate k_max by summing up the GP distribution probabilities + # (cumulative distribution function) for all peaks in # the spectrum, and then finding the peak closest to the C_% - ######################################################################################################## - - + ####################################################################### + mu_GP = max(epsilon(), GP_mu(mu_unsub-Q_0, sigma, gamma)) - lbda_GP = min(max(epsilon(), GP_lbda(mu_unsub-Q_0, sigma, gamma)), 1-epsilon()) + lbda_GP = min(max(epsilon(), GP_lbda(mu_unsub-Q_0, sigma, gamma)), + 1-epsilon()) + k_max = len(idx_peaks) if(self._max_n_peaks is not None): - if(k_max>self._max_n_peaks): + if(k_max > self._max_n_peaks): if(self._verbose): - print("Max # light peaks ({:d}) exceeded set maximum number of light peaks. Setting # peaks to {:d}".format(k_max, self._max_n_peaks)) + print((f"Max # light peaks ({k_max}) exceeded set maximum " + "number of light peaks. Setting # peaks " + f"to {self._max_n_peaks}")) + k_max = self._max_n_peaks - - elif(k_max<4): + + elif(k_max < 4): if(self._verbose): - print("Max # light peaks ({:d}) less than 4, so cannot fit. Setting # peaks to 4".format(k_max)) - k_max = 4 + print((f"Max # light peaks ({k_max}) less than 4, so " + "cannot fit. Setting # peaks to 4")) + k_max = 4 - ######################################################################################################## + ####################################################################### # - Convert to charge units: (K= Q - Q_0)/G - # - Estimate Probability density in a range of 0.1 about the minimum, K=0.5. N_DCR_min = mean(counts[0.45<K<0.55])/0.1 - # - Get N_DCR = sum(counts[0<K<0.5]) + 0.5*sum(counts[0.5<counts<1]) + # - Estimate Probability density in a range of 0.1 about the minimum, + # K=0.5. N_DCR_min = mean(counts[0.45<K<0.55])/0.1 + # - Get N_DCR = sum(counts[0<K<0.5]) + 0.5*sum(counts[0.5<counts<1]) # - calculate DCR - ######################################################################################################## - + ####################################################################### - K = (bin_numbers - Q_0)/G_fft dK = abs(K[1]-K[0]) - ################################ - # In order to do the interpolated integral, we need to change co-ordinate - #bw = dx = 1 - #dK = dK/dx = 1/G dx - #dx = G*dK - #so ∫counts.dx = G*∫counts.dK - #This is necessary for correct calculation of the error. + # In order to do the interpolated integral, we need to + # change co-ordinate + # + # bw = dx = 1 + # dK = dK/dx = 1/G dx + # dx = G*dK + # so ∫counts.dx = G*∫counts.dK + # This is necessary for correct calculation of the error. ################################ - - cond_NN = (K>=0.45)&(K<=0.55) - cond_Nc = (K<0.5) - - - NN_sum = max(np.sum(counts[cond_NN]),1) + cond_NN = (K >= 0.45) & (K <= 0.55) + cond_Nc = (K < 0.5) + + NN_sum = max(np.sum(counts[cond_NN]), 1) NN = np.mean(counts[cond_NN]) - - if(NN!=NN): + + if(NN != NN): NN = float(counts[np.argmin(abs(K-0.5))]) NN_sum = float(NN) - - - Nc = max(float(np.sum(counts[cond_Nc])), 1) - R0p5 = NN/Nc - - DCR = R0p5/(4*kwargs["tau"]*dK) DCR = DCR*np.exp(DCR*kwargs["tau"]) dDCR = DCR*(1/np.sqrt(max(1, NN_sum))) - + + # TODO: mu_DCR not used? mu_DCR = DCR*(kwargs["t_gate"] + kwargs["t_0"]) - - if(DCR!=DCR or DCR<epsilon()): + + if(DCR != DCR or DCR < epsilon()): if(self._verbose): - print("DCR returned invalid, or less than machine epsilon. Choosing DCR to be machine epsilon...") + print(("DCR returned invalid, or less than machine epsilon. " + "Choosing DCR to be machine epsilon...")) DCR = epsilon() dDCR = 1e-9 - - - + if(self._max_n_peaks_dcr is not None): if(self._verbose): - print("Setting # dark peaks to {:d}".format( self._max_n_peaks_dcr)) + print(("Setting # dark peaks to " + f"{self._max_n_peaks_dcr}")) k_max_dcr = self._max_n_peaks_dcr - - elif(k_max_dcr<4): + + elif(k_max_dcr < 4): if(self._verbose): - print("Max # dark peaks ({:d}) less than 4, so cannot fit. Setting # peaks to 4".format(k_max_dcr)) - k_max_dcr = 4 - + print((f"Max # dark peaks ({k_max_dcr}) less than 4, " + "so cannot fit. Setting # peaks to 4")) + k_max_dcr = 4 - - self._hist_data["bw"] = bw + self._hist_data["bw"] = bw self._hist_data["bc_min"] = bin_centres[0] - self._hist_data["bins"]=bins - self._hist_data["nbins"]=nbins + self._hist_data["bins"] = bins + self._hist_data["nbins"] = nbins self._hist_data["counts"] = counts self._hist_data["counts_error"] = counts_error self._hist_data["counts_bg"] = counts_bg self._hist_data["counts_bgsub"] = counts_bgsub self._hist_data["bin_centres"] = bin_centres self._hist_data["bin_numbers"] = bin_numbers - self._hist_data["fft_amplitude"] = fft_amplitude self._hist_data["fft_freq"] = fft_freq self._hist_data["G_fft"] = G_fft self._hist_data["Q_0_est"] = Q_0_est - self._hist_data["peaks"]["peak_position"] = idx_peaks self._hist_data["peaks"]["peak_index"] = idx_peak_numbers @@ -2334,7 +2336,6 @@ class PeakOTron: self._hist_data["peaks"]["peak_variance_error"] = idx_peak_dvars self._hist_data["peaks"]["peak_std_deviation"] = idx_peak_sigmas self._hist_data["peaks"]["peak_std_deviation_error"] = idx_peak_dsigmas - self._prefit_values_bin["Q_0"] = Q_0 self._prefit_values_bin["G"] = G_fft @@ -2349,15 +2350,11 @@ class PeakOTron: self._prefit_values_bin["t_0"] = kwargs["t_0"] self._prefit_values_bin["t_gate"] = kwargs["t_gate"] self._prefit_values_bin["tau_R"] = kwargs["tau_R"] - self._prefit_values_bin["k_max"]=k_max - self._prefit_values_bin["k_max_dcr"]=k_max_dcr - self._prefit_values_bin["len_pad"]=self._len_pad - self._prefit_values_bin["A"]=np.sum(counts) - - - - - + self._prefit_values_bin["k_max"] = k_max + self._prefit_values_bin["k_max_dcr"] = k_max_dcr + self._prefit_values_bin["len_pad"] = self._len_pad + self._prefit_values_bin["A"] = np.sum(counts) + self._prefit_errors_bin["Q_0"] = dQ_0 self._prefit_errors_bin["G"] = 1 self._prefit_errors_bin["mu"] = 0.1 @@ -2372,16 +2369,3 @@ class PeakOTron: self._prefit_errors_bin["t_gate"] = kwargs["t_gate_err"] self._prefit_errors_bin["DCR"] = dDCR self._prefit_errors_bin["A"] = np.sqrt(np.sum(counts)) - - - - - - - - - - - - - diff --git a/example.py b/example.py index 47c1b70..f86c9b3 100644 --- a/example.py +++ b/example.py @@ -1,105 +1,86 @@ import os -import sys import numpy as np import pandas as pd from PeakOTron import PeakOTron from joblib import dump -import time +from HelperFunctions import f_tau - -C_tau = lambda V, V_bd, V_0: (V - V_bd)/V_0 -f_tau = lambda V, V_bd, V_0: -1/np.log((1-np.exp(C_tau(V, V_bd, V_0)*np.exp(-1)))/(1 - np.exp(C_tau(V, V_bd, V_0)))) - -V_bd_hmt = 51.570574 + 0.307 +V_bd_hmt = 51.877574 V_0_hmt = 2.906 -tau = 21.953 ##SLOW COMPONENT OF SIPM PULSE -t_0 = 100.0 ## PRE-INTEGRATION TIME -t_gate = 104.0 ## GATE LENGTH -bin_0=-100.0 ## SELECT FIRST BIN OF SPECTRUM (CAN BE AUTOMATIC) -truncate_nsigma0_up = 2.0 ## SCAN SPECTRUM FROM Q < Q_0 - 4 sigma_0 -truncate_nsigma0_do = 2.0 ## EVALUATE SPECTRUM CHI2 IN Q_0 - x*sigma_0 < Q < Q_0 + 2*sigma_0 -prefit_only=False ## FIT THE WHOLE SPECTRUM - - +tau = 21.953 # SLOW COMPONENT OF SIPM PULSE +t_0 = 100.0 # PRE-INTEGRATION TIME +t_gate = 104.0 # GATE LENGTH +bin_0 = -100.0 # SELECT FIRST BIN OF SPECTRUM (CAN BE AUTOMATIC) +truncate_nsigma0_up = 2.0 # SCAN SPECTRUM FROM Q < Q_0 - 4 sigma_0 +# EVALUATE SPECTRUM CHI2 IN Q_0 - x*sigma_0 < Q < Q_0 + 2*sigma_0 +truncate_nsigma0_do = 2.0 +prefit_only = False # FIT THE WHOLE SPECTRUM print("--------------------") print('EXAMPLE SIPM CALIBRATION RUN') print("--------------------") - out_dict = {} files_to_fit = [] -## Find all histograms in directory +# Find all histograms in directory for root, dirs, files in os.walk("./data/hamamatsu_pcb6/Light"): - - for file in files: - - if file.endswith(".txt"): - files_to_fit.append([file, os.path.join(root, file)]) - - -## Print files. + for file in files: + if file.endswith(".txt"): + files_to_fit.append([file, os.path.join(root, file)]) + +# Print files. print("Files to fit:") for i, (file, _) in enumerate(files_to_fit): print('File {0}: {1}'.format(i, file)) - - - -SiPM = "PM1125NS_SBO" +SiPM = "PM1125NS_SBO" -## Loop thorough files +# Loop thorough files for i, (file, path) in enumerate(files_to_fit): items = file.split('_') V = float(items[2].replace('V', '').replace('p', '.')) - f_tau_hmt = f_tau(V, V_bd_hmt, V_0_hmt) - + f_tau_hmt = f_tau(V, V_bd_hmt, V_0_hmt) print("\n\n") print("===============================================================") print("FIT {:d} - {:s}".format(i, file)) print("===============================================================") print("\n\n") - - ## Load files. + # Load files. data = np.loadtxt(path, skiprows=8) - ## Create a PeakOTron Fit Object. + # Create a PeakOTron Fit Object. f_data = PeakOTron(verbose=False) - ## Perform fit. - f_data.Fit(data, - tau=tau, #SLOW PULSE COMPONENT TIME CONSTANT (ns) - t_gate=t_gate, #GATE LENGTH (ns) - t_0 = t_0, #INTEGRATION TIME BEFORE GATE (ns) - tau_R=f_tau_hmt*tau, - bin_0 = bin_0, - truncate_nsigma0_up = truncate_nsigma0_up, - truncate_nsigma0_do = truncate_nsigma0_do - - - ) #BINNING RULE "knuth", "freedman", "scott" - use bw= #### to override. it is still required for DCR calculation. + # Perform fit. + # NOTE: BINNING RULE "knuth", "freedman", "scott" + # - use bw= #### to override. It is still required for DCR calculation. + f_data.Fit(data, + tau=tau, # SLOW PULSE COMPONENT TIME CONSTANT (ns) + t_gate=t_gate, # GATE LENGTH (ns) + t_0=t_0, # INTEGRATION TIME BEFORE GATE (ns) + tau_R=f_tau_hmt*tau, + bin_0=bin_0, + truncate_nsigma0_up=truncate_nsigma0_up, + truncate_nsigma0_do=truncate_nsigma0_do) - f_data.PlotFit(plot_in_bins=True, display=False, save_directory="./Results/{0}_fit.png".format(os.path.splitext(file)[0])) - + f_data.PlotFit( + plot_in_bins=True, display=False, + save_directory=f"./Results/{os.path.splitext(file)[0]}_fit.png") + dump(f_data, f"./Results/{os.path.splitext(file)[0]}.joblib") - - dump(f_data, "./Results/{0}.joblib".format(os.path.splitext(file)[0])) - - - fit_out = {} fit_val, fit_err = f_data.GetFitResults() for key, val in fit_val.items(): print("{:s} : {:3.3E}".format(key, val)) - + fit_out["SiPM"] = SiPM fit_out["V"] = V @@ -117,9 +98,6 @@ for i, (file, path) in enumerate(files_to_fit): print("===============================================================") print("\n\n") - - - df = pd.DataFrame.from_dict(out_dict) df.to_csv("./fit_results_{:s}.csv".format(SiPM)) -- GitLab