From 602e550e91c8847d4d27e7a60a9d3591124c29a8 Mon Sep 17 00:00:00 2001 From: Jack Christopher Hutchinson Rolph <jack.rolph@desy.de> Date: Fri, 30 Sep 2022 11:18:50 +0200 Subject: [PATCH] Update example.py, requirements.txt, Model.py, LossFunctions.py, HelperFunctions.py, AdditionalPDFs.py, PeakOTron.py --- AdditionalPDFs.py | 127 +++ HelperFunctions.py | 165 ++++ LossFunctions.py | 105 +++ Model.py | 824 +++++++++++++++++ PeakOTron.py | 2149 ++++++++++++++++++++++++++++++++++++++++++++ example.py | 100 +++ requirements.txt | 8 + 7 files changed, 3478 insertions(+) create mode 100644 AdditionalPDFs.py create mode 100644 HelperFunctions.py create mode 100644 LossFunctions.py create mode 100644 Model.py create mode 100644 PeakOTron.py create mode 100644 example.py create mode 100644 requirements.txt diff --git a/AdditionalPDFs.py b/AdditionalPDFs.py new file mode 100644 index 0000000..2934d4c --- /dev/null +++ b/AdditionalPDFs.py @@ -0,0 +1,127 @@ +import numpy as np +from scipy.stats import rv_discrete, rv_continuous, uniform +import scipy.special as sc +import matplotlib.pyplot as plt + + +from scipy.stats._distn_infrastructure import ( + rv_discrete, _ncx2_pdf, _ncx2_cdf, get_distribution_names) + + +class gpd_gen(rv_discrete): + def _argcheck(self, mu, lbda): + return mu >= 0.0 and lbda >= 0.0 and lbda <= 1.0 + + def _rvs(self, mu, lbda): + population = np.asarray( + self._random_state.poisson(mu, self._size) + ) + if population.shape == (): + population = population.reshape(-1) + offspring = population.copy() + while np.any(offspring > 0): + # probability dists are NOT ufuncs + # print("offspring", offspring) + offspring[:] = [ + self._random_state.poisson(m) + for m in lbda*offspring + ] + population += offspring + return population + + def _pmf(self, k, mu, lbda): + return np.exp(self._logpmf(k, mu, lbda)) + + def _logpmf(self, k, mu, lbda): + mu_pls_klmb = mu + lbda*k + return np.log(mu) + sc.xlogy(k-1, mu_pls_klmb) - mu_pls_klmb - sc.gammaln(k+1) + + def _munp(self, n, mu, lbda): + if n == 1: + return mu/(1-lbda) + elif n == 2: + return (mu/(1-lbda))**2+mu/(1-lbda)**3 + + +gpoisson = gpd_gen(name='gpoisson') + + +class borel_gen(rv_discrete): + def _argcheck(self, mu): + return ((mu > 0) & (mu<1)) + + def _logpmf(self, k, mu): + n = k+1 + Pk = sc.xlogy(n-1, mu*n) - sc.gammaln(n + 1) - mu*n + return Pk + + def _pmf(self, k, mu): + return np.exp(self._logpmf(k, mu)) + +# def _rvs(self, mu, size=None, random_state=None): +# u = uniform.rvs(loc=0, scale = 1, size=size) +# cum = np.cumsum([self._pmf(_k, mu) for _k in range(0, 100)]) +# print(cum) +# rnd = [ np.argmax( cum>=_u ) for _u in u ] +# return rnd + + def _rvs(self, mu, size=None, random_state=None, epsilon=1e-4): + _u = uniform.rvs(loc=0, scale = 1-epsilon, size=size) + _sum = 0 + _k=0 + _elem = [] + _max_u = np.max(_u) + + while(_sum<_max_u): + _pmf = self._pmf(_k, mu) + _elem.append(_pmf) + _sum+=_pmf + _k+=1 + + _cum = np.cumsum(_elem) + _rnd = [ np.argmax( _cum>=__u ) for __u in _u ] + + return _rnd + + + def _stats(self, mu): + _mu = 1/(1-mu) + _var = mu/(1-mu)**3 + tmp = np.asarray(mu) + mu_nonzero = ((tmp > 0) & (tmp<1)) + #g1 and g2: Lagrangian Probability Distributions, 978-0-8176-4365-2, page 159 + g1 = scipy._lib._util._lazywhere(mu_nonzero, (tmp,), lambda x: (1+2*x)/scipy.sqrt(x*(1-x)), np.inf) + g2 = scipy._lib._util._lazywhere(mu_nonzero, (tmp,), lambda x: 3 + (1 + 8*x+6*x**2)/(x*(1-x)), np.inf) + return _mu, _var, g1, g2 + + +borel= borel_gen(name='borel') + + + +class erlang_gen(rv_discrete): + + + + def _pdf(self, x, a): + # gamma.pdf(x, a) = x**(a-1) * exp(-x) / gamma(a) + return np.exp(self._logpdf(x, a)) + + def _logpdf(self, k, mu, nu): + return sc.xlogy(a-1.0, x) - x - sc.gammaln(a) + + + + + + +# def _rvs(self, mu, nu, size=None, random_state=None): +# u = scipy.stats.uniform.rvs(loc=0, scale = 1, size=size) +# cum = np.cumsum([self._pmf(_k, mu, nu) for _k in range(0, 100)]) +# rnd = [ np.argmax( cum>=_u ) for _u in u ] +# return rnd + +pairs = list(globals().items()) +_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete) + +__all__ = _distn_names + _distn_gen_names diff --git a/HelperFunctions.py b/HelperFunctions.py new file mode 100644 index 0000000..ca68181 --- /dev/null +++ b/HelperFunctions.py @@ -0,0 +1,165 @@ +from iminuit import Minuit +from iminuit.util import describe +from scipy.interpolate import interp1d +from astropy.stats import bootstrap +import numpy as np +import matplotlib.pyplot as plt + + +#HELPER FUNCTIONS + +class FakeFuncCode: + def __init__(self, f, prmt=None, dock=0, append=None): + #f can either be tuple or function object + self.co_varnames = describe(f) + self.co_argcount = len(self.co_varnames) + self.co_argcount -= dock + self.co_varnames = self.co_varnames[dock:] + + if prmt is not None: #rename parameters from the front + for i, p in enumerate(prmt): + self.co_varnames[i] = p + + if isinstance(append, str): append = [append] + + if append is not None: + old_count = self.co_argcount + self.co_argcount += len(append) + self.co_varnames = tuple( + list(self.co_varnames[:old_count]) + + append + + list(self.co_varnames[old_count:])) + + + + +def LatexFormat(value, scirange=[0.01,1000]): + if(np.abs(value)>scirange[0] and np.abs(value)<scirange[1]): + float_str = r"${:3.3f}$".format(value) + else: + try: + float_str = "{:3.3E}".format(value) + base, exponent = float_str.split("E") + float_str = r"${0} \times 10^{{{1}}}$".format(base, int(exponent)) + except: + float_str=str(value) + return float_str + + +def FormatExponent(ax, axis='y'): + + # Change the ticklabel format to scientific format + ax.ticklabel_format(axis=axis, style='sci', scilimits=(-2, 2)) + + # Get the appropriate axis + if axis == 'y': + ax_axis = ax.yaxis + x_pos = 0.0 + y_pos = 1.0 + horizontalalignment='left' + verticalalignment='bottom' + else: + ax_axis = ax.xaxis + x_pos = 1.0 + y_pos = -0.05 + horizontalalignment='right' + verticalalignment='top' + + # Run plt.tight_layout() because otherwise the offset text doesn't update + plt.tight_layout() + ##### THIS IS A BUG + ##### Well, at least it's sub-optimal because you might not + ##### want to use tight_layout(). If anyone has a better way of + ##### ensuring the offset text is updated appropriately + ##### please comment! + + # Get the offset value + offset = ax_axis.get_offset_text().get_text() + + if len(offset) > 0: + # Get that exponent value and change it into latex format + minus_sign = u'\u2212' + expo = np.float(offset.replace(minus_sign, '-').split('e')[-1]) + offset_text = r'x$\mathregular{10^{%d}}$' %expo + + # Turn off the offset text that's calculated automatically + ax_axis.offsetText.set_visible(False) + + # Add in a text box at the top of the y axis + ax.text(x_pos, y_pos, offset_text, transform=ax.transAxes, + horizontalalignment=horizontalalignment, + verticalalignment=verticalalignment) + return ax + + +def Bootstrap(data, statistic, n_bootstrap, alpha=0.95): + if not (0 < alpha < 1): + raise ValueError("confidence level must be in (0, 1)") + + + if len(data) < 1: + raise ValueError("data must contain at least one measurement.") + + + boot_stat = bootstrap(data, n_bootstrap, bootfunc=statistic) + + stat_data = statistic(data) + mean_stat = np.mean(boot_stat) + est_stat = 2*stat_data - mean_stat + std_err = np.std(boot_stat) + z_score = np.sqrt(2.0)*sc.erfinv(alpha) + conf_interval = est_stat + z_score*np.array((-std_err, std_err)) + + + return est_stat, std_err, conf_interval + + +def Linear(x, m, c): + return m*x + c + +def HistMoment(counts, bin_centres, i, mu): + return np.sum(counts*(bin_centres-mu)**i) + +def HistMean(counts, bin_centres): + N = np.sum(counts) + mu = HistMoment(counts, bin_centres, 1, 0)/N + return mu + +def HistStd(counts, bin_centres): + N = np.sum(counts) + mu = HistMean(counts, bin_centres) + var = HistMoment(counts, bin_centres, 2, mu)/(N-1) + sigma = np.sqrt(var) + return sigma + +def HistSkew(counts, bin_centres): + N = np.sum(counts) + mu = HistMean(counts, bin_centres) + gamma = (N*np.sqrt((N-1))/(N-2))*HistMoment(counts, bin_centres, 3, mu)/(HistMoment(counts, bin_centres, 2, mu)**(1.5)) + return gamma + + + + +def GP_lbda(mu, sigma, gamma): + lbda = 0.5*(((mu*gamma)/(sigma))- 1) + return lbda + +def GP_gain(mu, sigma, gamma): + lbda = GP_lbda(mu, sigma, gamma) + gain = (sigma**2/mu)*((1 - lbda)**2) + return gain + +def GP_mu(mu, sigma, gamma): + lbda = GP_lbda(mu, sigma, gamma) + mu_gp = (1/(1-lbda))*(mu**2/sigma**2) + return mu_gp + + + + + + + + + diff --git a/LossFunctions.py b/LossFunctions.py new file mode 100644 index 0000000..87eadd3 --- /dev/null +++ b/LossFunctions.py @@ -0,0 +1,105 @@ +import numpy as np +from scipy.interpolate import interp1d +from iminuit.util import describe +from Model import epsilon +from HelperFunctions import FakeFuncCode +import matplotlib.pyplot as plt + + + + +class BinnedLH: + + + def __init__(self, f, bin_centres, counts, bw): + self.f = f + self.x = bin_centres + self.dx = bw + self.counts = counts + self.N = np.sum(counts) + + self.last_arg = None + self.func_code = FakeFuncCode(f, dock=True) + self.n_calls=0 + self.eps = epsilon() + + + def __call__(self, *arg): + self.last_arg = arg + + + y_hat = self.f(self.x, *arg) + + y_hat = np.nan_to_num(y_hat, nan=self.eps, posinf=self.eps, neginf=self.eps) + y_hat = np.where(y_hat<self.eps, self.eps, y_hat) + + E = y_hat*self.N*self.dx + h = self.counts + mask = (h>0) + E = E[mask] + h = h[mask] + + nlogL = -np.sum(h*(np.log(E) - np.log(h)) + (h-E)) + + self.n_calls+=1 + + + return nlogL + + + + +class Chi2Regression: + + + def __init__(self, f, x, y, y_err, epsilon=1.35): + self.f = f + self.x = x + self.y = y + self.y_err = y_err + self.eps = np.finfo(np.float64).eps * 10 + self.y_err[self.y_err<self.eps] = self.eps + self.last_arg = None + self.func_code = FakeFuncCode(f, dock=True) + self.ndof = len(self.y) - (self.func_code.co_argcount - 1) + + + def __call__(self, *arg): + + self.last_arg = arg + + y_hat = self.f(self.x, *arg) + + loss = ((y_hat - self.y)/(self.y_err))**2 + + return np.sum(loss) + + + + +class HuberRegression: + + + def __init__(self, f, x, y, y_err, delta=1.345): + self.f = f + self.x = x + self.y = y + self.y_err = y_err + self.delta = delta + self.eps = np.finfo(np.float64).eps * 10 + self.y_err[self.y_err<self.eps] = self.eps + self.last_arg = None + self.func_code = FakeFuncCode(f, dock=True) + self.ndof = len(self.y) - (self.func_code.co_argcount - 1) + + + def __call__(self, *arg): + + self.last_arg = arg + + a = abs((self.y - self.f(self.x, *arg))/self.y_err) + cond_flag = (a>self.delta) + + loss = np.sum((~cond_flag) * (0.5 * a ** 2) - (cond_flag) * self.delta * (0.5 * self.delta - a), -1) + + return np.sum(loss) \ No newline at end of file diff --git a/Model.py b/Model.py new file mode 100644 index 0000000..6fdc8b2 --- /dev/null +++ b/Model.py @@ -0,0 +1,824 @@ +from scipy.signal import fftconvolve as convolve +from itertools import product, combinations +from scipy.interpolate import interp1d +from scipy.stats import poisson, norm, binom +from AdditionalPDFs import gpoisson, borel +from scipy.integrate import simps +from numba import njit +import numpy as np +from functools import lru_cache +import matplotlib.pyplot as plt + +#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +######################################################################################################## +## DETECTOR RESPONSE MODEL +######################################################################################################## +#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + + +######################################################################################################## +## VARIABLES: +######################################################################################################## + +######################################################################################################## +## Q: bin centres of studied charge distribution in arb. units; +## Q_0: pedestal position in arb. units +## G: gain in arb. units +## mu: mean number of Geiger discharges in n.p.e +## lbda: GP-branching parameter (unitless) +## sigma_0: electonics noise/pedestal width in arb. units +## sigma_1: gain spread width in arb. units; +## tau_Ap: afterpulse time constant in nanoseconds; +## p_Ap: afterpulse scaling parameter (unitless) +## DCR: dark count rate in gigahertz; +## tau: slow-component time constant of SiPM pulse in nanoseconds +## tau_R: recovery time of SiPM in nanoseconds +## t_gate: gate length in nanoseconds +## t_0: pre-gate integration window in nanoseconds. +## k_max: estimated maximum number of peaks to fit, based on percentile of Poisson distribution with estimated mean in PeakOTron. +## len_pad: number of bins to pad for convolution. Default value: 100 bins. +## +######################################################################################################## + + + + + +######################################################################################################## +## FUNCTION DEFINITIONS: +######################################################################################################## + + + + + + + + + + + + +######################################################################################################## +## epsilon() +######################################################################################################## +## +## Returns machine epsilon for thresholding zeros +## +######################################################################################################## + + +@njit('float64()') +def epsilon(): + return np.finfo(np.float64).eps + +######################################################################################################## +## sigmak(k, sigma_0, sigma_1) +######################################################################################################## +## +## Returns width of peak number k, sigma_k(k, sigma_0, sigma_1) = sqrt(sigma_0^2 + k*sigma__1^2) +## +######################################################################################################## + +@njit +def sigmak(k, sigma_0, sigma_1): + return np.sqrt(sigma_0**2 + k*sigma_1**2) + + + +######################################################################################################## +## Convolve(A, B, idx_0, length, dx, norm=True) +######################################################################################################## +## +## Calculates convolution of A and B using scipy fftconvolve, selects the convolved range relative to the pedestal index due to convolution shift, +## and multiplies by dx to renormalise the distribution. Then, corrects for any nans/infs and ensures normalisation by applying Simpson's rule. +## +######################################################################################################## + +def Convolve(A, B, idx_0, length, dx, norm=True): + A_o_B = convolve(A, B)[idx_0:length+idx_0]*dx + A_o_B = np.nan_to_num(A_o_B, nan=0.0, posinf=0.0, neginf=0.0) + if(norm): + A_o_B = A_o_B/max(epsilon(), np.trapz(A_o_B, dx=dx)) + return A_o_B + + + +####################################################################################################### +# ConvolveList(A, B, idx_0, length, dx, norm=True) +# Applies recursive 'Convolve' for lists of functions. +####################################################################################################### + + +def ConvolveList(lst, idx_0, length, dx, norm=True): + res = lst[0] + for val in lst[1:]: + res = Convolve(res, val, idx_0, length, dx, norm=True) + return res + +####################################################################################################### +# Multichoose(n, k) +# Finds combinations that add up to k +####################################################################################################### + +@lru_cache(maxsize=None) +def Multichoose(n,k): + if k < 0 or n < 0: return "Error" + if not k: return [[0]*n] + if not n: return [] + if n == 1: return [[k]] + return [[0]+val for val in Multichoose(n-1,k)] + \ + [[val[0]+1]+val[1:] for val in Multichoose(n,k-1)] + + + + + + + + + + + + + + + + + +######################################################################################################## +## AFTERPULSE FUNCTIONS +######################################################################################################## + + +######################################################################################################## +## Beta(tau_R, tau_Ap, t_gate): +######################################################################################################## +# +# Normalisation constant for the afterpulse p.d.f +# +# Beta(tau_R, tau_Ap, t_gate) = ∫(1 - exp(-t/tauR)) x exp(-t/tauAp)/tauAp from 0 to tgate +# +######################################################################################################## +@njit +def Beta(tau_R, tau_Ap, t_gate): + return (tau_Ap - np.exp(-t_gate/tau_Ap)*(tau_Ap + tau_R*(1 - np.exp(-t_gate/tau_R))))/(tau_Ap + tau_R) + +######################################################################################################## +## P_Ap(tau_R, tau_Ap, t_gate): +######################################################################################################## +# +# Calculates afterpulse discharge probability from scaling factor p_Ap and maximum discharge scaling factor Beta. +# +# Afterpulses can only have a maximum +######################################################################################################## +@njit +def P_Ap(tau_R, tau_Ap, t_gate, p_Ap): + return p_Ap*Beta(tau_R, tau_Ap, t_gate) + + +######################################################################################################## +## dPAp(tau_R, tau_Ap, t_gate, p_Ap) +######################################################################################################## +## +## Returns error on total afterpulse probability, dPAp, occurring between 0 and t_gate. +## +######################################################################################################## + +@njit +def dP_Ap(tau_R, tau_Ap, t_gate, p_Ap, dtau_Ap, dp_Ap): + dPApdtauAp = p_Ap*((t_gate*(-tau_Ap - tau_R*(1 - np.exp(-t_gate/tau_R)))*np.exp(-t_gate/tau_Ap))/tau_Ap**2 + 1 - np.exp(-t_gate/tau_Ap))/(tau_Ap+tau_R) - p_Ap*(tau_Ap - (tau_Ap + tau_R*(1 - np.exp(-t_gate/tau_R)))*np.exp(-t_gate/tau_Ap))/(tau_Ap + tau_R)**2 + + dPApdpAp = (tau_Ap - (tau_Ap + tau_R*(1 - np.exp(-t_gate/tau_R)))*np.exp(-t_gate/tau_Ap))/(tau_Ap + tau_R) + + dPAp = np.sqrt((dPApdtauAp**2)*(dtau_Ap**2) + (dPApdpAp**2)*(dp_Ap**2)) + + return dPAp + + +######################################################################################################## +## P1(tau_R, tau_Ap, t_gate): +######################################################################################################## +# +# P1(T<t)= ∫(1 - exp(-t/tauR))*exp(-t/tauAp)/tauAp from 0 to t, where t<t_gate +# +######################################################################################################## +@njit +def P1(t, tau_R, tau_Ap, t_gate): + p = -np.exp(-t/tau_Ap)*(tau_Ap + tau_R*(1 - np.exp(-t/tau_R)))/(tau_R + tau_Ap) + return p + +######################################################################################################## +## EvalP1(t0, t1, tau_R, tau_Ap, t_gate): +######################################################################################################## +# +# Cumulative Density Function of dp1dt +# P1(T<t)= Beta^-1(tau_R, tau_Ap, t_gate) x ∫(1 - exp(-t/tauR))*exp(-t/tauAp)/tauAp from 0 to t, where t<t_gate +# +######################################################################################################## +@njit +def EvalP1(t0, t1, tau_R, tau_Ap, t_gate): + return (P1(t1, tau_R, tau_Ap, t_gate) - P1(t0, tau_R, tau_Ap, t_gate)) + +######################################################################################################## +## tAp( Q, tau, t_gate): +######################################################################################################## +# +# Evaluate tAp between +# P1(T<t)= Beta^-1(tau_R, tau_Ap, t_gate) x ∫(1 - exp(-t/tauR))*exp(-t/tauAp)/tauAp from 0 to t, where t<t_gate +# +######################################################################################################## +@njit +def tAp( Q, tau, t_gate): + return t_gate/2 - tau*np.arccosh(((1-Q)*np.exp(t_gate/(2*tau)) + np.exp(-t_gate/(2*tau)))/2) + + +######################################################################################################## +## dp1dQ(Q, tau, tau_R, tau_Ap, t_gate) +######################################################################################################## +## +## Returns discretised afterpulse p.d.f as a function of n.p.e +## Note: as in Chmill et al. However, the two branches of solutions given by t and t_gate -t are summed together. This demonstrably yields a normalised probability distribution. +## +## To understand what is going on here, it takes a little explaining. +## We exploit the fact that the cumulative distribution distribution F_Q(Q<q) = F(T<tQ(Q)), from which the derivation of the paper can be derived. +## +## F_Q(Q<q) = F(T<tAp(Q)) +## f_Q(q) = F^'(tAp(q)) x |dtAp/dQ| +## |dtAp/dQ| = |dQ/dtAp|^-1 +## F^'(t) = fAp(t) +## f_Q(q) = fAp(tAp(q)) x |dQ/dtAp|^-1 +## +## We do not need to do all that complicated stuff. All we need to do is evaluate F(T<tAp(Q)) between the bins in the time domain, then divide through by dQ +## +## This is easy. We evaluate the integral of ∫(1 - exp(-t/tauR))*exp(-t/tauAp)/tauAp from 0 to t, (P1) between the bins given by Q +/- dQ/2, then divide through by beta and bin width. +## +## 1. Calculate Q_max for afterpulse spectrum. +## +## 2. Calculate beta for the afterpulse spectrum. +## +## 3. Select the indices that are within the range of 0 and Q_max +## +## 4. Create empty arrays for the pdfs for both branches (pdf_b0, pdf_b1) and the times we will be evaluating (t_m, t_p) +## +## 5. Find the bins in the t_Ap space by evaluating t_Ap_m = t_Ap(Q-dx/2), t_Ap_p = t_Ap(Q+dx/2) +## +## 6. Integrate over the bins forwards in time using EvalP1 for pdf_b0 +## +## 7. Now to evaluate the edges. The problem here is that t_Ap_m and t_Ap_p may have elements below 0/above Q_max, which we cannot account for. +## We replace the first bin of the distribution with the integral from 0 to the lower bin edge. +## +## Note that Q[idx_Q_min+1]-dx/2 = Q[idx_Q_min] + dx/2. So, t_m[idx_Q_0+1] is the lower edge of the last bin. +## The reason that we do it this way is because we do not have an 'upper' edge at t_p[idx_Q_0] +## So the integral of the first bin is evaluated between 0 and t_m[idx_Q_0+1] +## +## You can check the integral is correct by uncommenting the code. +## +## 8. We replace the last bin of the distribution with the integral from the upper edge to tgate/2. +## +## Note that Q[idx_Q_max-1]+dx/2 = Q[idx_Q_max] - dx/2. So, t_p[idx_Q_0+1] is the lower edge of the last bin. +## The reason that we do it this way is because we do not have an 'upper' edge at t_p[idx_Q_0] +## So the integral of the first bin is evaluated between 0 and t_p[idx_Q_0+1] +## +## You can check the integral is correct by uncommenting the code. +## +## 9. Repeat 6-8 for pdf_b1. Note that because the time is reversed in this part, all the elements of evaluating the edges of the bin have to be reversed too! +## +## 10. Normalise the integral to beta and dx +## +## IMPORTANT NOTE! +## As the granularity of the spectrum increases, so too do we encounter the problem where poles invariably occur. +## numpy cannot accurately assess the value of np.log(1-epsilon()). This happens where tau is very small relative to t0 or tgate. +## +## Unjit the function before printing! +######################################################################################################## +@njit +def dp1dQ(Q, tau, tau_R, tau_Ap, t_gate, dx): + + + #1. + Q_max = (1-np.exp(-t_gate/(2*tau)))**2 + + + if(Q_max>1-epsilon()): + raise Exception("The afterpulse spectrum of this distribution cannot be normalised, because of the pole at Q_max, resulting in infinite probability density. This is probably because tau is very small, or tgate is very large compared to the binning of the spectrum. Try increasing tau, or reducing tgate") + + #2 + beta = Beta(tau_R, tau_Ap, t_gate) + + #3 + idx_Q_max = np.argmax(Q>Q_max)-1 + idx_Q_min = np.argmax(Q>0) + + #4 + t_Ap_m = np.zeros_like(Q) + t_Ap_p = np.zeros_like(Q) + pdf_b0 = np.zeros_like(Q) + pdf_b1 = np.zeros_like(Q) + + #5 + t_Ap_m[idx_Q_min:idx_Q_max]= tAp(Q[idx_Q_min:idx_Q_max]-dx/2, tau, t_gate) + t_Ap_p[idx_Q_min:idx_Q_max]= tAp(Q[idx_Q_min:idx_Q_max]+dx/2, tau, t_gate) + + #6 + pdf_b0[idx_Q_min:idx_Q_max] = EvalP1(t_Ap_m[idx_Q_min:idx_Q_max], + t_Ap_p[idx_Q_min:idx_Q_max], + tau_R, + tau_Ap, t_gate) + + #7. + pdf_b0[idx_Q_min] = EvalP1(0, t_Ap_m[idx_Q_min+1], tau_R, tau_Ap, t_gate) + + #8. + pdf_b0[idx_Q_max] = EvalP1(t_Ap_p[idx_Q_max-1], t_gate/2, tau_R, tau_Ap, t_gate) + + ######################################### + ## Check integral of pdfb0: + ######################################### + +# print("CHECK INTEGRAL pdfb0 ", +# np.trapz(pdf_b0/beta/dx, dx=dx), +# EvalP1(0, t_gate/2, tau_R, tau_Ap, t_gate)/beta, +# np.allclose(np.trapz(pdf_b0/beta/dx, dx=dx), +# EvalP1(0, t_gate/2, tau_R, tau_Ap, t_gate)/beta)) + + + ######################################### + + + #9. + pdf_b1[idx_Q_min:idx_Q_max] = EvalP1(t_gate-t_Ap_p[idx_Q_min:idx_Q_max], + t_gate-t_Ap_m[idx_Q_min:idx_Q_max], + tau_R, + tau_Ap, t_gate) + + + + pdf_b1[idx_Q_min] = EvalP1(t_gate-t_Ap_m[idx_Q_min+1], t_gate, tau_R, tau_Ap, t_gate) + pdf_b1[idx_Q_max] = EvalP1(t_gate/2, t_gate-t_Ap_p[idx_Q_max-1], tau_R, tau_Ap, t_gate) + + ######################################### + ## Check integral of pdfb1: + ######################################### + +# print("CHECK INTEGRAL pdfb0 ", +# np.trapz(pdf_b1/beta/dx, dx=dx), +# EvalP1(t_gate/2, t_gate, tau_R, tau_Ap, t_gate)/beta, +# np.allclose(np.trapz(pdf_b1/beta/dx, dx=dx), +# EvalP1(t_gate/2, t_gate, tau_R, tau_Ap, t_gate)/beta)) + + ######################################### + #10 + pdf = (pdf_b0 + pdf_b1)/beta/dx + + + ######################################## + # Check integral of pdf: + ######################################## + +# print("CHECK INTEGRAL pdf ", np.trapz(pdf, dx=dx), 1, np.allclose(np.trapz(pdf, dx=dx), 1)) + +# plt.figure() +# plt.plot(Q, pdf) +# plt.xlim(0-5*dx, Q_max+5*dx) +# plt.yscale("log") +# plt.show() + + ######################################## + + return pdf + + + + + + + + + + + + + +######################################################################################################## +## DARK COUNT FUNCTIONS +######################################################################################################## + + + + +######################################################################################################## +## EvalDCRBFG(Q_0, Q_1, h_order) +######################################################################################################## +## +## Evaluates integral ∫1/(1-(Q/h_order)) between Q0 and Q1. +## h_order means the 'stretching factor' of the distribution for cross-talk +######################################################################################################## + +@njit +def EvalDCRBFG(Q_0, Q_1, h_order): + return h_order*np.log(Q_1) - h_order*np.log(Q_0) + +######################################################################################################## +## EvalDCRDG(Q_0, Q_1, h_order) +######################################################################################################## +## +## Evaluates integral ∫1/(Q/h_order) between Q0 and Q1. +## h_order means the 'stretching factor' of the distribution for cross-talk +######################################################################################################## + +@njit +def EvalDCRDG(Q_0, Q_1, h_order): + return -h_order*np.log(h_order-Q_1) + h_order*np.log(h_order-Q_0) + + + +######################################################################################################## +## dpDCR1dQ(Q, tau, t_gate, t_0, dx, h_order=1) +######################################################################################################## +## +## Evaluates discretised dark distribution at cross talk order of h_order, which stretches the distribution. +## h_order=1 means the primary dark count distribution +## +## The distribution has to be artificially discretised and normalised. +## There are poles of this distribution, so we have to be extremely careful to normalise the edges of the distribution +## The method is explained here. +## +## dpDCR1dQ(Q, tau, tgate, t0) = (tau/(t_gate + t_0))(1/Q + 1/(1-Q)) +## +## 1. Evaluate the maxima and minima Q_min, and Q_max. We cannot take Q/h_order, so for the integral +## to be properly normalised, we scale the limits by the stretching factor, h_order. +## +## 2. Evaluate the indices of the last values of Q in the spectrum before Q_min, Q_max and 0. +## Q_min to Q_max are the limits before the gate, 0 to Q_max are the limits after the gate. +## +## 3. Create empty zero arrays for the pdfs. pdf_bg is the pdf before the gate, and pdf_dg is the pdf during the gate. +## +## 4. For all the available indices between the limits, evaluate the integrals of EvalDCRBFG, between each bin edge. +## This will become our probability after scaling. +## +## 5. Find the integral from Q_min to the lower bin edge of the range that we have evaluated, +## and set it to the first available bin index. +## +## Note that Q[idx_Q_0+1]-dx/2 = Q[idx_Q_0] + dx/2. The reason that we do it this way is +## for consistency with the dp1dQ method, which has special considerations (see dp1dQ function) +## +## +## 6. Find the integral from the upper bin edge that we have evaluated to Q_max, +## and set it to the last available bin index. +## +## Note that Q[idx_Q_0-1]+dx/2 = Q[idx_Q_0] - dx/2. The reason that we do it this way is +## for consistency with the dp1dQ method, which has special considerations (see dp1dQ function) +## +## You can check the function is working at this stage by uncommenting the integral check. The integral should be t_0/tau +## +## 7. Repeat steps 4-6 for EvalDCRDG, this time using 0 as the lower limit. Everything stated in 4-6 still applies. +## +## You can check the function is working at this stage by uncommenting the integral check. The integral should be t_gate/tau +## +## 8. We obtain the probability per bin by multiplying by the normalisation constant tau/(t_0 + t_gate). +## +## We also need to divide through by h_order, because the integral is h_order times as large if we scale +## +## Then we divide through by dx to get the discretised probability distribution. +## +## You can check the function is working at this stage by uncommenting the integral check. The integral should be 1.0 +## +## 9. We divide through by the integral to correct for numerical error. +## +## IMPORTANT NOTE! +## As the granularity of the spectrum increases, so too do we encounter the problem where poles invariably occur. +## numpy cannot accurately assess the value of np.log(1-epsilon()). This happens where tau is very small relative to t0 or tgate. +## +######################################################################################################## + +@njit +def dpDCR1dQ(Q, tau, t_gate, t_0, dx, h_order=1): + + #1. + + + Q_min = h_order*np.exp(-t_0/tau)*(1 - np.exp(-t_gate/tau)) + Q_max = h_order* (1 - np.exp(-t_gate/tau)) + + + if(Q_min<h_order*epsilon()): + raise Exception("The dark count spectrum of this distribution cannot be normalised, because of the pole at Q_min, resulting in infinite probability density. This is probably because tau is very small, or tgate/t0 is very large compared to the binning of the spectrum. Try increasing tau, or reducing tgate/t0") + + + + if(Q_max>h_order-h_order*epsilon()): + raise Exception("The dark count spectrum of this distribution cannot be normalised, because of the pole at Q_max, resulting in infinite probability density. This is probably because tau is very small, or tgate/t0 is very large compared to the binning of the spectrum") + + + #2. + + idx_Q_min = np.argmax(Q>Q_min) + idx_Q_max = np.argmax(Q>Q_max)-1 + idx_Q_0 = np.argmax(Q>0) + + + #3. + pdf_bfg = np.zeros_like(Q) + pdf_dg = np.zeros_like(Q) + + #4. + pdf_bfg[idx_Q_min:idx_Q_max] = EvalDCRBFG(Q[idx_Q_min:idx_Q_max]-dx/2, + Q[idx_Q_min:idx_Q_max]+dx/2, h_order) + + #5. + pdf_bfg[idx_Q_min]= EvalDCRBFG(Q_min, Q[idx_Q_min+1]-dx/2, h_order) + + #6. + pdf_bfg[idx_Q_max]= EvalDCRBFG(Q[idx_Q_max-1]+dx/2, Q_max, h_order) + + ######################################### + ## Check integral of pdf_bfg: + ######################################### + + #print("CHECK INTEGRAL pdf_bfg ", np.trapz(pdf_bfg/dx, dx=dx), t_0/tau, np.allclose(np.trapz(pdf_bfg/dx, dx=dx), t_0/tau)) + + ######################################### + + #7. + pdf_dg[idx_Q_0:idx_Q_max] = EvalDCRDG(Q[idx_Q_0:idx_Q_max]-dx/2, + Q[idx_Q_0:idx_Q_max]+dx/2, h_order) + + pdf_dg[idx_Q_0]= EvalDCRDG(0, Q[idx_Q_0+1]-dx/2, h_order) + pdf_dg[idx_Q_max]= EvalDCRDG(Q[idx_Q_max-1]+dx/2, Q_max, h_order) + + + ######################################### + ## Check integral of pdf_dg: + ######################################### + + #print("CHECK INTEGRAL pdf_dg ", np.trapz(pdf_dg/dx, dx=dx), t_gate/tau, np.allclose(np.trapz(pdf_dg/dx, dx=dx), t_gate/tau)) + + ######################################### + + + pdf = (pdf_bfg + pdf_dg)*(tau/(t_gate + t_0)) + pdf = pdf/dx/h_order + + + #8. + + ######################################### + ## Check integral of pdf: + ######################################### + +# print("CHECK INTEGRAL pdf ", np.trapz(pdf, dx=dx), 1, np.allclose(np.trapz(pdf, dx=dx), 1)) + +# plt.figure() +# plt.plot(Q, pdf) +# plt.xlim(0-5*dx, Q_max+5*dx) +# plt.yscale("log") +# plt.show() + + ######################################### + + + pdf = pdf/max(epsilon(), np.trapz(pdf, dx=dx)) + + + return pdf + + + + + + + + + + +######################################################################################################## +## DRM(Q, Q_0, G, mu, lbda, sigma_0, sigma_1, tau_Ap, p_Ap, DCR, tau, tau_R, t_gate, t_0, k_max, len_pad) +######################################################################################################## +## +## Calculates full detector response model. +## Notes: +## - The model is fitted in n.p.e scale. k = (Q - Q_0)/G. +## - To rescale to the input parameter unit, the final pdf is scaled by a factor of 1/G - +## this is because dp/dQ = (dp/dk)*(dk/dQ), dk/dQ = d/dQ (Q - Q_0)/G = 1/G +## +######################################################################################################## + +def DRM(Q, Q_0, G, mu, lbda, sigma_0, sigma_1, tau_Ap, p_Ap, DCR, tau, tau_R, t_gate, t_0, k_max, k_max_dcr, len_pad): + + + t_0 = abs(t_0) + len_pad = int(len_pad) + + + + len_Q = len(Q) + len_Q_pad = len_Q + 2*len_pad + Q_idx = np.arange(0, len_Q) + Q_idx_pad = np.arange(-len_pad, len_Q+len_pad) + dx = abs(Q[1]-Q[0]) + + Q_pad = (Q[0] - dx*len_pad) + dx*np.arange(len_Q_pad) + + k_pad = (Q_pad - Q_0)/G + + dx_k = dx/G + + idx_k_0_pad = np.argmin(abs(k_pad)) + + + + Q_idx_orig = np.where(np.in1d(Q_idx_pad, + np.intersect1d( + Q_idx_pad, + Q_idx + ) + ), True, False) + + + k_pg = np.arange(0, max(k_max, 3)).astype(int) + + + + + Ppgs = np.expand_dims(gpoisson.pmf(k_pg, mu, lbda), -1) + + Pns = np.zeros((int(k_max), int(k_max))) + + alpha = P_Ap(tau_R, tau_Ap, t_gate, p_Ap) + + dp0dQs = np.vstack([norm.pdf(k_pad, loc=_k, scale=sigmak(_k, sigma_0/G, sigma_1/G)) for _k in k_pg]) + + dpndQs = [] + + + for _k in k_pg: + if(_k==0): + _dpndQ = np.zeros(len(k_pad)) + Pns[0,0] = binom.pmf(0, 0, alpha) + elif(_k==1): + _dpndQ = dp1dQ(k_pad, tau, tau_R, tau_Ap, t_gate, dx_k) + + + Pns[1,0:2] = np.array(binom.pmf(np.arange(0, 2).astype(int), _k, alpha)) + else: + _dpndQ = Convolve(dpndQs[1], dpndQs[-1], idx_k_0_pad, len_Q_pad, dx_k, norm=True) + Pns[_k,0:_k+1] = np.array(binom.pmf(np.arange(0, _k+1).astype(int), _k, alpha)) + + dpndQs.append(_dpndQ) + + dpndQs = np.vstack(dpndQs) + + + + + mu_DCR = DCR*(t_gate + t_0) + k_dc = np.arange(0, max(k_max_dcr, 3)).astype(int) + + fDCRs = [] + hDCRs = [] + + pdf_dark = np.zeros(len_Q_pad) + + for _k in k_dc: + if(_k==0): + f = np.zeros(len_Q_pad) + f[idx_k_0_pad] = 1/dx_k + h=f + + elif(_k==1): + f = dpDCR1dQ(k_pad, tau, t_gate, t_0, dx_k, h_order=1) + h = dpDCR1dQ(k_pad, tau, t_gate, t_0, dx_k, h_order=2) + + + else: + f = Convolve(fDCRs[1], fDCRs[-1], idx_k_0_pad, len_Q_pad, dx_k, norm=True) + h = dpDCR1dQ(k_pad, tau, t_gate, t_0, dx_k, h_order=_k+1) + + + fDCRs.append(f) + hDCRs.append(h) + + fDCRs = np.vstack(fDCRs) + hDCRs = np.vstack(hDCRs) + + + pDCRs_0 = poisson.pmf(0, mu_DCR) + pDCRs = [] + dpDCRdQs = [] + + for k in k_dc: + + if(k==0): + continue + + else: + + _pDCR_k = [] + _dpDCRdQ_k = [] + + for j in range(1, k+1): + + +# combs = np.array(list(product(range(k-j+1), repeat=j))) +# combs = combs[np.sum(combs, axis=1)==k-j] + combs = Multichoose(j, k-j) + + for comb in combs: + + + + _dpDCRdQ_jks = [] + _lp_B_jks = [] + + + for _v, _f in zip(*np.unique(comb, return_counts=True)): + if(_v==0): + _dpDCRdQ_jk = [fDCRs[_f]] + if(_f>1): + _lp_B_jk = [_f*borel.logpmf(0, lbda)] + else: + _lp_B_jk = [borel.logpmf(0, lbda)] + + else: + _dpDCRdQ_jk = [hDCRs[_v] for _ in range(_f)] + if(_f>1): + _lp_B_jk = [_f*borel.logpmf(_v, lbda)] + else: + _lp_B_jk = [borel.logpmf(_v, lbda)] + + _dpDCRdQ_jks.extend(_dpDCRdQ_jk) + _lp_B_jks.extend(_lp_B_jk) + + __pDCR_k = np.exp(np.sum(_lp_B_jks) + poisson.logpmf(j, mu_DCR)) + __dpDCRdQ_k = ConvolveList(_dpDCRdQ_jks, idx_k_0_pad, len_Q_pad, dx_k, norm=True) + + _pDCR_k.append(__pDCR_k) + _dpDCRdQ_k.append(__dpDCRdQ_k) + + pDCRs.extend(_pDCR_k) + dpDCRdQs.extend(_dpDCRdQ_k) + + pDCRs = np.vstack(pDCRs) + dpDCRdQs = np.vstack(dpDCRdQs) + + + pdf_dark = np.sum(pDCRs*dpDCRdQs, axis=0) + pdf_dark = pdf_dark/max(epsilon(), np.trapz(pdf_dark, dx=dx_k)) + ## normalised by factor 1/(1 - pDCRs_0), and then rescaled to original unit after convolution + + + + pdf_drm = np.zeros(len_Q_pad) + + for _k in k_pg: + + _dp0dQ = float(Pns[_k, 0])*dp0dQs[_k, :] + + if(_k>0): + + + _dpndQ_sum = np.sum(np.vstack([ + float(Pns[_k, _j])*Convolve(dpndQs[_j, :], + dp0dQs[_k, :], + idx_k_0_pad, + len_Q_pad, + dx_k, + norm=True + ) + for _j in range(1, _k+1) + ]), + axis=0) + + else: + _dpndQ_sum = np.zeros(len_Q_pad) + + _dp0dQ = np.nan_to_num(_dp0dQ, nan=0.0, posinf=0.0, neginf=0.0) + _dpndQ_sum = np.nan_to_num(_dpndQ_sum, nan=0.0, posinf=0.0, neginf=0.0) + + + _pdf_light_k = (_dp0dQ + _dpndQ_sum) + _pdf_drm_k = pDCRs_0*_pdf_light_k + (1-pDCRs_0)*Convolve(_pdf_light_k, pdf_dark, idx_k_0_pad, len_Q_pad, dx_k, norm=True) + _pdf_drm_k = _pdf_drm_k/max(epsilon(), np.trapz(_pdf_drm_k, dx=dx_k)) + + _pdf_drm_k = float(Ppgs[_k])*_pdf_drm_k + pdf_drm += _pdf_drm_k + + + + pdf_drm = pdf_drm[Q_idx_orig]/G + + + + return pdf_drm + + + + + + + + + + diff --git a/PeakOTron.py b/PeakOTron.py new file mode 100644 index 0000000..31c9e97 --- /dev/null +++ b/PeakOTron.py @@ -0,0 +1,2149 @@ +from HelperFunctions import GP_gain, GP_lbda, GP_mu, HistMean, HistStd, HistSkew +from HelperFunctions import LatexFormat, Linear +from AdditionalPDFs import gpoisson +from LossFunctions import * +from Model import DRM, P_Ap, dP_Ap, sigmak, Beta +from iminuit import Minuit +from scipy.interpolate import interp1d, UnivariateSpline, InterpolatedUnivariateSpline +from scipy.stats import poisson, norm +from astropy.stats import knuth_bin_width, freedman_bin_width, scott_bin_width +from matplotlib import cm +import matplotlib.pyplot as plt +import matplotlib.gridspec as gridspec +from matplotlib.ticker import MaxNLocator, ScalarFormatter +from itertools import chain +from copy import deepcopy +import time + + + +import matplotlib.pyplot as plt + + +#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +######################################################################################################## +## PEAKOTRON +######################################################################################################## +#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + + +class PeakOTron: + + + ######################################################################################################## + ## __init__ + ######################################################################################################## + ## + ## VARIABLES: + ## n_call_minuit: number of calls to Minuit for each trial + ## n_iterations_minuit: number of trial iterations if convergence is not achieved + ## n_bootstrap: number of bootstraps for each statistic + ## plot_figsize: figure size in inches for prefit figures + ## plot_fontsize: fontsize for output plots + ## plot_cmap: colour map for output plots + ## + ## This function defines the basic information stored by PeakOTron and the default fit value dictionary that is updated with the input parameters of ## the user. + ## + ######################################################################################################## + + def __init__(self, + verbose=False, + n_call_minuit=30000, + n_iterations_minuit = 50, + n_bootstrap=100, + plot_figsize=(10,10), + plot_fontsize=25, + plot_legendfontsize=18, + plot_cmap = 'turbo' + ): + + self._default_hist_data={ + "count":None, + "density":None, + "bin_numbers":None, + "bin_centres":None, + "counts_bg":None, + "counts_bgsub":None, + "f_dcr_interpeak":None, + "bw":None, + "peaks":{ + "peak_position":None, + "peak_height":None, + "peak_index":None, + "peak_mean":None, + "peak_mean_error":None, + "peak_variance":None, + "peak_variance_error":None, + "peak_std_deviation":None, + "peak_std_deviation_error":None + } + } + + self._default_fit_dict={ + "Q_0":None, + "G":None, + "lbda":None, + "mu":None, + "sigma_0":None, + "sigma_1":None, + "DCR":None, + "tau":None, + "t_gate":None, + "t_0":None, + "k_max":None, + "len_pad":None + } + + + self._plot_figsize= plot_figsize + self._plot_fontsize= plot_fontsize + self._plot_legendfontsize= plot_legendfontsize + self._cmap = cm.get_cmap(plot_cmap) + self._n_bootstrap=n_bootstrap + self._len_pad = int(100) + self._verbose=verbose + self._n_call_minuit = n_call_minuit + self._n_iterations_minuit = n_iterations_minuit + + + self._max_n_peaks = None + self._max_n_peaks_dcr = 6 + self._max_tau_Ap = 30.0 + + + self._m_DRM = None + self._time_prefit = None + self._time_fit = None + + self._default_fit_kwargs={ + "tau":None, + "tau_err":None, + "t_0":None, + "t_0_err":None, + "tau_R":None, + "tau_R_err":None, + "t_gate":None, + "t_gate_err":None, + "bw":None, + "truncate_nG":None, + "prefit_only":False, + "peak_dist_nG":0.8, + "bin_method":"knuth" + } + + self.InitKwargs() + + + + + + ######################################################################################################## + ## InitKwargs() + ######################################################################################################## + ## + ## This function creates the fit dictionary and initialises the output dictionaries of the class. + ## + ######################################################################################################## + + def InitKwargs(self): + self._fit_kwargs = self._default_fit_kwargs.copy() + self._prefit_values = self._default_fit_dict.copy() + self._prefit_errors = self._default_fit_dict.copy() + self._fit_values= self._default_fit_dict.copy() + self._fit_errors = self._default_fit_dict.copy() + self._hist_data = self._default_hist_data.copy() + + + + ######################################################################################################## + ## SetMaxNPeaks(max_n_peaks) + ######################################################################################################## + ## + ## Typically, it is sufficient to determine the appropriate number of peaks to fit from mu. + ## However, to make sure we don't accidentally create a fit with a large number of peaks, + ## the default threshold of 20 peaks may be set with this function + ## + ######################################################################################################## + + def SetMaxNPeaks(self, max_n_peaks): + max_n_peaks = int(max_n_peaks) + if(max_n_peaks<4): + raise Exception("Maximum number of light peaks must be greater than 3.") + + + self._max_n_peaks = max_n_peaks + if(self._verbose): + print("Set maximum number of light peaks to {:d}.".format(self._max_n_peaks)) + + + ######################################################################################################## + ## SetMaxNPeaksDCR(max_n_peaks) + ######################################################################################################## + ## + ## Typically, it is sufficient to determine the appropriate number of peaks to fit from mu. + ## However, to make sure we don't accidentally create a fit with a large number of peaks, + ## the default threshold of 20 peaks may be set with this function + ## + ######################################################################################################## + + def SetMaxNPeaksDCR(self, max_n_peaks_dcr): + max_n_peaks_dcr = int(max_n_peaks_dcr) + if(max_n_peaks_dcr<4): + raise Exception("Maximum number of dark peaks must be greater than 3.") + + + self._max_n_peaks_dcr = max_n_peaks_dcr + if(self._verbose): + print("Set maximum number of dark peaks to {:d}.".format(self._max_n_peaks_dcr)) + + + ######################################################################################################## + ## SetMaxNPeaksDCR(max_n_peaks) + ######################################################################################################## + ## + ## Typically, it is sufficient to determine the appropriate number of peaks to fit from mu. + ## However, to make sure we don't accidentally create a fit with a large number of peaks, + ## the default threshold of 20 peaks may be set with this function + ## + ######################################################################################################## + + def SetMaxTauAp(self, max_tau_Ap, override=False): + max_tau_Ap = float(max_tau_Ap) + if(max_tau_Ap<1.0): + raise Exception("Maximum tau_Ap must exceed 1 ns.") + + + self._max_tau_Ap = max_tau_Ap + if(self._verbose): + print("Set maximum tau_Ap to {:3.3f} ns".format(self._max_tau_Ap)) + + + + + ######################################################################################################## + ## GetMinuit() + ######################################################################################################## + ## + ## Returns Minuit object after fit. Note this object has fit results stored in bins, not in the unit parameter of the input. + ## + ######################################################################################################## + + + def GetMinuit(self): + if(self._m_DRM is None): + raise Exception ("Fit has not yet been performed. Please first perform a fit.") + + else: + return self._m_DRM + + + ######################################################################################################## + ## GetFitTime() + ######################################################################################################## + ## + ## Returns fit time, for prefit and fit. + ## + ######################################################################################################## + + + + def GetFitTime(self, prefit=False): + + if(prefit): + if(self._time_prefit is None): + raise Exception ("Fit has not yet been performed. Please first perform a fit.") + else: + return self._time_prefit + + else: + if(self._time_fit is None): + raise Exception ("Fit has not yet been performed. Please first perform a fit.") + else: + return self._time_fit + + ######################################################################################################## + ## IsValid() + ######################################################################################################## + ## + ## Returns whether or not Minuit returned valid. + ## + ######################################################################################################## + + def IsValid(self): + if(self._m_DRM is None): + raise Exception ("Fit has not yet been performed. Please first perform a fit.") + + else: + return self._m_DRM.valid + + + ######################################################################################################## + ## GetFitResults() + ######################################################################################################## + ## + ## Returns fit results and errors in the input charge unit. + ## + ######################################################################################################## + + + + def GetFitResults(self, debug=True): + + + + if(self._m_DRM is None): + raise Exception ("Fit has not yet been performed. Please first perform a fit.") + else: + if(not debug): + temp_values = deepcopy(self._fit_values) + temp_errors = deepcopy(self._fit_errors) + + temp_values["p_Ap"] = P_Ap(self._fit_values["tau_R"], self._fit_values["tau_Ap"], self._fit_values["t_gate"], self._fit_values["p_Ap"]) + temp_errors["p_Ap"] = dP_Ap(self._fit_values["tau_R"], self._fit_values["tau_Ap"], self._fit_values["t_gate"], self._fit_values["p_Ap"], self._fit_errors["tau_Ap"], self._fit_errors["p_Ap"]) + + t_fit = self.GetFitTime(prefit=False) + t_prefit = self.GetFitTime(prefit=True) + + temp_values["fit_time"] = t_fit + temp_values["prefit_time"] = t_prefit + + + return temp_values, temp_errors + + else: + + temp_values = deepcopy(self._fit_values) + temp_errors = deepcopy(self._fit_errors) + + temp_values["P_Ap"] = P_Ap(self._fit_values["tau_R"], self._fit_values["tau_Ap"], self._fit_values["t_gate"], self._fit_values["p_Ap"]) + temp_errors["P_Ap"] = dP_Ap(self._fit_values["tau_R"], self._fit_values["tau_Ap"], self._fit_values["t_gate"], self._fit_values["p_Ap"], self._fit_errors["tau_Ap"], self._fit_errors["p_Ap"]) + + t_fit = self.GetFitTime(prefit=False) + t_prefit = self.GetFitTime(prefit=True) + + temp_values["fit_time"] = t_fit + temp_values["prefit_time"] = t_prefit + + return temp_values, temp_errors + + + ######################################################################################################## + ## GetPrefitResults() + ######################################################################################################## + ## + ## Returns pre-fit results and errors in the input charge unit. + ## + ######################################################################################################## + + def GetPrefitResults(self): + return self._prefit_values, self._prefit_errors + + + ######################################################################################################## + ## GetPeakInfo() + ######################################################################################################## + ## + ## Returns peak information obtained during the fitting procedure. + ## + ######################################################################################################## + + def GetPeakInfo(self): + + if(self._m_DRM is None): + raise Exception ("Fit has not yet been performed. Please first perform a fit.") + else: + return self._hist_data + + + ######################################################################################################## + ## GetChi2(prefit=False) + ######################################################################################################## + ## + ## VARIABLES: + ## prefit(False): Evaluate the model using the prefit or fitted values + ## + ## + ## + ## Calculates the Chi2 of the resultant fit relative to the original density estimate. + ######################################################################################################## + + + def GetChi2(self, prefit=False): + + if(self._m_DRM is None): + raise Exception ("Fit has not yet been performed. Please first perform a fit.") + else: + + x = self._hist_data["bin_centres"] + y = self._hist_data["counts"] + y_err = self._hist_data["counts_error"] + + bw = self._hist_data["bw"] + N = np.sum(y) + y_hat = N*bw*self.GetModel(x, prefit) + + + chi2 = np.nansum(((y_hat - y)/y_err)**2) + ndof = len(x) - 9 + return chi2, ndof + + + + ######################################################################################################## + ## GetModel(x, prefit=False) + ######################################################################################################## + ## + ## VARIABLES: + ## x: the x values over which to evaluate the model. + ## prefit (False): whether or not to use the prefit values in the model evaluation. + ## + ## + ## Evaluates model according to the fitted parameters and returns the Detector Response Model + ######################################################################################################## + + + def GetModel(self, x, prefit=False): + + if(self._m_DRM is None): + raise Exception ("Fit has not yet been performed. Please first perform a fit.") + else: + if(prefit): + return DRM(x, **self._prefit_values) + else: + return DRM(x, **self._fit_values) + + + + ######################################################################################################## + ## GetBins(data, bw, bin_method) + ######################################################################################################## + ## + ## VARIABLES: + ## data: the input data, either a 1D array of individual discharges or a 2D array of [bin centres, counts] + ## bw: the bin width. If this is None, then the bin_method will be used to choose the binning if 1D + ## bin_method: the method for binning 1D data. This can be 'knuth', 'freedman', or 'scott' + ## + ## Automated calculation of bins and bin widths for 2D prebinned charge measurements or 1D charge measurements. + ## Note: because spectra from scopes have been observed with variable bin widths, the program also rebins 2D histograms using the same bin width as the original spectrum. + ######################################################################################################## + + def GetBins(self, data, bw, bin_method): + + + if(data.ndim == 1): + + if(self._verbose): + print("Data is assumed 1D. Assuming list of charges.") + + if(bw is None): + if(self._verbose): + print("Bin width not set. Binning with {:s}".format(bin_method)) + + if(bin_method == "knuth"): + bw = knuth_bin_width(data) + elif(bin_method == "freedman"): + bw = freedman_bin_width(data) + elif(bin_method == "scott"): + bw = scott_bin_width(data) + else: + raise Exception("Binning method not recognised. Please select bin_method from 'knuth', 'freedman' or 'scott'") + + x_min, x_max = data.min(), data.max() + + elif(data.ndim == 2): + + if(self._verbose): + print("Data is assumed 2D. Assuming input is a histogram, with columns [bin_centre, counts].") + + _x, _y = data[:,0], data[:,1] + data = np.array(list(chain.from_iterable([[__x]*int(__y) for __x,__y in zip(_x,_y)]))) + f_bin = interp1d(np.arange(0, len(_x)), _x) + if(bw is None): + bw = abs(f_bin(1) - f_bin(0)) + idx_nonzero = np.squeeze(np.argwhere((_y>0))) + x_min, x_max = _x[np.min(idx_nonzero)]-bw/2, _x[np.max(idx_nonzero)]+bw/2 + + else: + raise Exception("Data is neither 1D nor 2D. Please provide the correct dimensions of data to the fit program.") + + + nbins = np.ceil((x_max - x_min) / bw) + nbins = max(1, nbins) + bins = x_min + bw * np.arange(nbins + 1) + return data, bw, nbins, bins + + + + + def PlotHistogram(self, figsize=(10.0,10.0), save_directory=None, label="Histogram", x_label="ADC"): + + fig = plt.figure(figsize=figsize) + ax = plt.subplot(111) + + counts = self._hist_data["counts"] + bin_numbers = self._hist_data["bin_numbers"] + bin_centres = self._hist_data["bin_centres"] + + ax.plot(bin_numbers, + counts, + label="{:s} \n $N_{{events}}$ = {:s}".format(label, + LatexFormat(float(np.sum(counts)))), + color="C0", + lw=5) + ax.grid(which="both") + ax.set_ylabel("Counts", fontsize=self._plot_fontsize) + ax.set_xlabel("Bin Index", fontsize=self._plot_fontsize) + ax.tick_params(axis="x", labelsize=self._plot_fontsize, rotation=45) + ax.tick_params(axis="y", labelsize=self._plot_fontsize) + ax.set_ylim([1,None]) + ax.set_yscale("log") + ax.legend(fontsize=self._plot_legendfontsize) + + _f = interp1d(bin_numbers, bin_centres, bounds_error=False, fill_value="extrapolate") + _f_inv = interp1d(bin_centres, bin_centres, bounds_error=False, fill_value="extrapolate") + + + secax = ax.secondary_xaxis('top', functions=(_f, _f_inv)) + secax.set_xlabel('{:s}'.format(x_label), fontsize=self._plot_fontsize) + secax.tick_params(axis="x", labelsize=self._plot_fontsize, rotation=45) + + + mf = ScalarFormatter(useMathText=True) + mf.set_powerlimits((-2,2)) + secax.xaxis.set_major_formatter(mf) + offset = secax.xaxis.get_offset_text() + offset.set_size(int(0.8*self._plot_fontsize)) + + if(save_directory is not None): + print("Saving figure to {0}...".format(save_directory)) + fig.savefig(save_directory) + if(display): + plt.pause(0.01) + fig.show() + else: + plt.close(fig) + + + + + + def PlotFFT(self, figsize=(10.0,10.0), save_directory=None, label="Histogram"): + + fig = plt.figure(figsize=figsize) + ax = plt.subplot(111) + + + fft_freq = self._hist_data["fft_freq"] + counts = self._hist_data["counts"] + fft_amplitude = self._hist_data["fft_amplitude"] + G_fft = self._hist_data["G_fft"] + inv_G_fft = 1/G_fft + + ax.plot(fft_freq, + fft_amplitude, + color="C0", + label="{:s} \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts)))), + lw=5) + + + ax.axvline(x=inv_G_fft, color="green", lw=2, label="$G^{{*}}_{{FFT}}$ = {:3.3f} bins".format(G_fft)) + + ax.set_yscale("log") + ax.set_xscale("log") + ax.grid(which="both") + ax.set_xlabel("Frequency of Bin Index", fontsize=25) + ax.set_ylabel("Power Spectral Density", fontsize=25) + ax.tick_params(axis="x", labelsize=self._plot_fontsize, rotation=45) + ax.tick_params(axis="y", labelsize=self._plot_fontsize) + ax.legend(fontsize=self._plot_legendfontsize) + + if(save_directory is not None): + print("Saving figure to {0}...".format(save_directory)) + fig.savefig(save_directory) + if(display): + plt.pause(0.01) + fig.show() + else: + plt.close(fig) + + def PlotBGSub(self, figsize=(10,10), save_directory=None, plot_bgsub=False, label="Histogram"): + + fig = plt.figure(figsize=figsize) + ax = plt.subplot(111) + + counts = self._hist_data["counts"] + counts_bg = self._hist_data["counts_bg"] + counts_bgsub = self._hist_data["counts_bgsub"] + bin_numbers = self._hist_data["bin_numbers"] + + if(plot_bgsub): + + + ax.plot(bin_numbers, + counts, + label="{:s} \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts)))), + color="C0", + lw=5) + + + ax.plot(bin_numbers, + counts_bgsub, + label="{:s} - BG \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts_bgsub)))), + color="r", + lw=5) + + else: + ax.plot(bin_numbers, + counts, + label="{:s} \n $N_{{events}}$ = {:s}".format(label,LatexFormat(float(np.sum(counts)))), + color="C0", + lw=5) + + ax.fill_between(bin_numbers[counts_bg>0], + counts_bg[counts_bg>0], + label="{:s}, BG \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts_bg)))), + color='none', + hatch="///", + edgecolor="r", + lw=5) + + ax.grid(which="both") + ax.set_xlabel("Bin Index", fontsize=25) + ax.set_ylabel("Counts", fontsize=self._plot_fontsize) + ax.tick_params(axis="x", labelsize=self._plot_fontsize, rotation=45) + ax.tick_params(axis="y", labelsize=self._plot_fontsize) + ax.set_ylim([1,None]) + ax.set_yscale("log") + ax.legend(fontsize=self._plot_legendfontsize) + + if(save_directory is not None): + print("Saving figure to {0}...".format(save_directory)) + fig.savefig(save_directory) + if(display): + plt.pause(0.01) + fig.show() + else: + plt.close(fig) + + + def PlotPeaks(self, figsize=(10.0,10.0), save_directory=None, label="Histogram"): + + fig = plt.figure(figsize=figsize) + ax = plt.subplot(111) + + counts = self._hist_data["counts"] + counts_bgsub = self._hist_data["counts_bgsub"] + bin_numbers = self._hist_data["bin_numbers"] + Q_0_est = self._hist_data["Q_0_est"] + + peak_position = self._hist_data["peaks"]["peak_position"] + + ax.plot(bin_numbers, + counts, + label="{:s} \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts)))), + color="C0", + lw=5, + zorder=-5) + + ax.plot(bin_numbers, counts_bgsub, + label="{:s} - BG \n $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(counts_bgsub)))), + color="r", + lw=5, + zorder=-4) + + + color_vals = [self._cmap(_v) for _v in np.linspace(0,0.75, len(peak_position))] + + for peak_i, pp in enumerate(peak_position): + + if(peak_i == 0): + annotation_text = "$Q_{{0}}$" + color = "purple" + ls="--" + + ax.axvline(x=pp, + color=color, + linestyle=ls, + lw=2.5, + label = '{:s}'.format(annotation_text) + ) + + else: + annotation_text = "Peak {:d}".format(peak_i) + color = color_vals[peak_i] + ls="-" + + ax.axvline(x=pp, + color=color, + linestyle=ls, + lw=2.5 + ) + + + + ax.scatter([Q_0_est], + [counts[np.argmin(abs(bin_numbers-Q_0_est))]], + marker="v", + s=200, + color="purple", + zorder=5, + label= "$Q_{{0}}$ Estimate") + + ax.set_xlabel("Bin Index", fontsize=self._plot_fontsize) + ax.set_ylabel("Counts", fontsize=self._plot_fontsize) + ax.tick_params(axis="x", labelsize=self._plot_fontsize, rotation=45) + ax.tick_params(axis="y", labelsize=self._plot_fontsize) + ax.grid(which="both") + ax.legend(fontsize=self._plot_legendfontsize) + + if(save_directory is not None): + print("Saving figure to {0}...".format(save_directory)) + fig.savefig(save_directory) + if(display): + plt.pause(0.01) + fig.show() + else: + plt.close(fig) + + + def PlotVarianceFit(self, figsize=(10.0, 10.0), save_directory=None, y_limits=[None, None]): + + fig = plt.figure(figsize=figsize) + ax = plt.subplot(111) + + cond_sel_peaks = self._hist_data["peaks"]["cond_sel_peaks"] + peak_index = self._hist_data["peaks"]["peak_index"][cond_sel_peaks] + peak_variance = self._hist_data["peaks"]["peak_variance"][cond_sel_peaks] + peak_variance_error = self._hist_data["peaks"]["peak_variance_error"][cond_sel_peaks] + + + _offset = self._hist_data["bc_min"] + _scale = self._hist_data["bw"] + + sigma_0 = self._prefit_values["sigma_0"]/_scale + dsigma_0 = self._prefit_errors["sigma_0"]/_scale + sigma_1 = self._prefit_values["sigma_1"]/_scale + dsigma_1 = self._prefit_errors["sigma_1"]/_scale + + ax.errorbar( peak_index, + peak_variance, + yerr=peak_variance_error, + fmt="o", + lw=3, + markersize=10, + color="C0", + label="Variances of Peaks" + ) + + ax.plot(peak_index, + Linear(peak_index, + sigma_1**2, + sigma_0**2 + ), + color="C0", + linestyle="--", + lw=3, + label = "Linear Fit: \n $\sigma^{{2}}_{{0}}$ = {:s} $\pm$ {:s} bins \n $\sigma^{{2}}_{{1}}$ = {:s} $\pm$ {:s} bins".format( + LatexFormat(sigma_0**2), + LatexFormat(2*sigma_0*dsigma_0), + LatexFormat(sigma_1**1), + LatexFormat(2*sigma_0*dsigma_1), + + ), + ) + + + + ax.set_xlabel("Peak Index", fontsize=self._plot_fontsize) + ax.set_ylabel("$\sigma^{2}_{peak}$", fontsize=self._plot_fontsize) + ax.tick_params(axis="x", labelsize=self._plot_fontsize, rotation=45) + ax.tick_params(axis="y", labelsize=self._plot_fontsize) + ax.grid(which="both") + ax.set_ylim(y_limits) + ax.legend(fontsize=self._plot_legendfontsize) + + if(save_directory is not None): + print("Saving figure to {0}...".format(save_directory)) + fig.savefig(save_directory) + if(display): + plt.pause(0.01) + fig.show() + else: + plt.close(fig) + + + def PlotMeanFit(self, figsize=(10.0, 10.0), save_directory=None): + + fig = plt.figure(figsize=figsize) + ax = plt.subplot(111) + + bin_numbers = self._hist_data["bin_numbers"] + + cond_sel_peaks = self._hist_data["peaks"]["cond_sel_peaks"] + peak_index = self._hist_data["peaks"]["peak_index"][cond_sel_peaks] + peak_mean = self._hist_data["peaks"]["peak_mean"][cond_sel_peaks] + peak_mean_error = self._hist_data["peaks"]["peak_mean_error"][cond_sel_peaks] + + + _offset = self._hist_data["bc_min"] + _scale = self._hist_data["bw"] + + Q_0 = (self._prefit_values["Q_0"] - _offset)/_scale + dQ_0 = self._prefit_errors["Q_0"]/_scale + G = self._hist_data["G_fft"] + + ax.errorbar( peak_index, + peak_mean, + yerr=peak_mean_error, + fmt="o", + markersize=10, + lw=3, + color="C0", + label="Means of Peak" + ) + + ax.plot( peak_index, + Linear(peak_index, + G, + Q_0, + ), + color="C0", + linestyle="--", + lw=3, + markersize=10, + label = "Linear Fit: \n $Q_{{0}}$ = {:s} $\pm$ {:s} bins \n $G^{{*}}_{{FFT}}$ = {:s} bins".format( + LatexFormat(Q_0), + LatexFormat(dQ_0), + LatexFormat(G), + + ), + ) + + + + ax.set_xlabel("Peak Index", fontsize=self._plot_fontsize) + ax.set_ylabel("$\\langle Q_{{peak}} \\rangle$", fontsize=self._plot_fontsize) + ax.tick_params(axis="x", labelsize=self._plot_fontsize, rotation=45) + ax.tick_params(axis="y", labelsize=self._plot_fontsize) + ax.grid(which="both") + ax.legend(fontsize=self._plot_legendfontsize) + + + if(save_directory is not None): + print("Saving figure to {0}...".format(save_directory)) + fig.savefig(save_directory) + if(display): + plt.pause(0.01) + fig.show() + else: + plt.close(fig) + + + + + + def PlotDCR(self, figsize=(10.0, 10.0), save_directory=None, label = "Histogram"): + + + fig = plt.figure(figsize=figsize) + ax = plt.subplot(111) + + counts = self._hist_data["counts"] + bin_numbers = self._hist_data["bin_numbers"] + bw = self._hist_data["bw"] + + _offset = self._hist_data["bc_min"] + _scale = self._hist_data["bw"] + + + Q_0 = (self._prefit_values["Q_0"] - _offset)/_scale + G = self._prefit_values["G"]/_scale + + DCR = self._prefit_values["DCR"] + dDCR = self._prefit_errors["DCR"] + + + K = (bin_numbers - Q_0)/G + + + cond_NN = (K>=0.45)&(K<=0.55) + cond_Nc = (K<=0.5) + cond_DCR = (K<=1.8) + + NN_sum = max(np.sum(counts[cond_NN]),1) + NN = np.mean(counts[cond_NN]) + + if(NN!=NN): + NN = float(counts[np.argmin(abs(K-0.5))]) + NN_sum = float(NN) + + Nc = max(float(np.sum(counts[cond_Nc])), 1) + + + + ax.plot(K[cond_DCR], + counts[cond_DCR], + label="{:s}".format(label), + color="C0", + lw=5) + + + + ax.fill_between( + K[cond_Nc], + counts[cond_Nc], + color='none', + hatch="///", + edgecolor="r", + lw=3, + label = "$N_{{K \leq 0.5}}$={:d} events".format(int(Nc)) ) + + + ax.fill_between( + K[cond_NN], + counts[cond_NN], + color='green', + edgecolor="g", + alpha=0.3, + label = "$\\langle N \\rangle_{{0.45 < K \leq 0.55}}$={:3.1f} events".format(NN)) + + legend_title="$DCR$={:s} $\pm$ {:s} MHz".format(LatexFormat(DCR*1e3), LatexFormat(dDCR*1e3)) + + ax.set_xlabel("K [p.e.]", fontsize=self._plot_fontsize) + ax.grid(which="both") + + ax.set_ylabel("Counts", fontsize=self._plot_fontsize) + ax.tick_params(axis="x", which='both', labelsize=self._plot_fontsize, rotation=45) + ax.tick_params(axis="y", which='both', labelsize=self._plot_fontsize) +# ax.set_xlim([0,1]) + ax.set_yscale("log") + ax.legend(title=legend_title, title_fontsize=int(self._plot_legendfontsize), fontsize=int(self._plot_legendfontsize)) + + + if(save_directory is not None): + print("Saving figure to {0}...".format(save_directory)) + fig.savefig(save_directory) + if(display): + plt.pause(0.01) + fig.show() + else: + plt.close(fig) + + + + def PlotFit(self, + figsize=(10.0,10.0), + labelsize=None, + ticksize=None, + titlesize=None, + legendsize=None, + title=None, + xlabel = None, + scaled=False, + save_directory=None, + label=None, + y_limits = [1, None], + residual_limits = [-5, 5], + residualticksize=None, + linewidth_main = 5, + x_limits = [None, None], + display=True, + prefit=False + ): + + + if(labelsize is None): + labelsize=self._plot_fontsize + + if(ticksize is None): + ticksize=self._plot_fontsize + + if(titlesize is None): + titlesize = self._plot_fontsize + + if(legendsize is None): + legendsize = self._plot_legendfontsize + + if(residualticksize is None): + residualticksize = int(0.8*self._plot_fontsize) + + x = self._hist_data["bin_centres"] + y = self._hist_data["counts"] + y_err = self._hist_data["counts_error"] + bw = self._hist_data["bw"] + N = np.sum(y) + + y_hat = N*bw*self.GetModel(x, prefit) + + chi2, ndf = self.GetChi2(prefit) + + if(xlabel is None): + xlabel = "Q" + + fig = plt.figure(figsize=figsize) + gs = gridspec.GridSpec(2, 1, height_ratios=[2, 1]) + ax0 = plt.subplot(gs[0]) + if(label is None): + label = "Histogram" + label="{:s}, $N_{{events}}$ = {:s}".format(label, LatexFormat(float(np.sum(y)))) + ax0.plot(x, y, label=label, lw=5, color="C0") + ax0.plot(x, y_hat, linestyle="--", label="Model", lw=5, color="C1") + ax0.plot([], [], ' ', label="$\\frac{{\\chi^{{2}}}}{{NDF}}$ = {:s}".format(LatexFormat(chi2/ndf))) + + + ax0.set_ylabel("Counts".format(xlabel, xlabel), fontsize=labelsize) + ax0.tick_params(axis="y", labelsize=ticksize) + + + + if(y_limits[0] is None): + y_limits[0] = np.min(y[y>0]) + ax0.set_ylim(y_limits) + ax0.set_xlim(x_limits) + if(title is not None): + ax0.set_title(title, fontsize=titlesize) + + ax0.grid(which="both") + + ax0.set_yscale("log") + ax0.legend(fontsize=legendsize) + + + + ax1 = plt.subplot(gs[1], sharex = ax0) + cond_sel = (y_err>0) & (y>0) + + ax1.scatter(x[cond_sel], (y[cond_sel] - y_hat[cond_sel])/(y_err[cond_sel]), color="C1") + + ax1.tick_params(axis="x", labelsize=ticksize, rotation=45) + ax1.tick_params(axis="y", labelsize=ticksize) + ax1.set_ylabel("Residuals [$\sigma$]", fontsize=labelsize) + ax1.set_xlabel(xlabel, fontsize=labelsize) + ax1.set_ylim(residual_limits) + ax1.set_yticks(np.arange(int(np.floor(np.min(residual_limits))), + int(np.ceil(np.max(residual_limits))))) + ax1.axhline(y=-1.0, color="purple", lw=2.5, linestyle="--") + ax1.axhline(y=1.0, color="purple", lw=2.5, linestyle="--") + ax1.tick_params(axis="x", labelsize=ticksize) + ax1.tick_params(axis="y", labelsize=residualticksize) + ax1.grid(which="both") + + + + + fig.tight_layout() + + mf = ScalarFormatter(useMathText=True) + mf.set_powerlimits((-2,2)) + plt.gca().xaxis.set_major_formatter(mf) + offset = ax1.xaxis.get_offset_text() + offset.set_size(int(0.8*ticksize)) + + + + fig.subplots_adjust(hspace=.0) + if(save_directory is not None): + print("Saving figure to {0}...".format(save_directory)) + fig.savefig(save_directory) + if(display): + plt.pause(0.01) + fig.show() + else: + plt.close(fig) + + + + + ######################################################################################################## + ## Fit(prefit=False) + ######################################################################################################## + ## + ## VARIABLES: + ## tau: slow time-constant of SiPM pulse in nanoseconds + ## tau_err: optional error to allow tau to float + ## t_0: pre-gate integration window in nanoseconds. + ## t_0_err: optional error to allow t_0 to float + ## tau_R: recovery time of SiPM in nanoseconds + ## tau_R_err: optional error to allow tau_R to float + ## t_gate: gate length in nanoseconds + ## t_gate_err: optional error to allow t_gate to float + ## bw: optional bin width if data is 1D + ## truncate_nG: optional parameter to truncate distribution a certain fraction/number of estimated gain units before the estimated pedestal. + ## peak_dist_nG: required minimum distance between peaks in gain units to register a peaks. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html + ## peak_rel_height: Chooses the relative height at which the peak width is measured as a percentage of its prominence. 1.0 calculates the width of the peak at its lowest contour line while 0.5 evaluates at half the prominence height. Must be at least 0. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.peak_widths.html#scipy.signal.peak_widths + ## bin_method: binning method for unbinned data. "knuth", "freedman" or "scott" rules are available. + ## alpha_peaks: the maximum percentile of the distribution a found peak is allowed to have. This determines the threshold on the selected peak positions. + ## + ## Main fit function of PeakOTron, which performs the prefit and main fit using Minuit. + ## + ######################################################################################################## + + + def Fit(self, data, **kwargs): + + ######################################################################################################## + ## Preparation + ######################################################################################################## + ## + ## 1. Data dictionaries are initialised if previously filled. + ## 2. tau, tauR, t_0 and t_gate are checked to ensure they are provided to the program. + ## 3. Perform checks of input parameters to ensure no disallowed values + ######################################################################################################## + + self.InitKwargs() + + + if not set(kwargs.keys()).issubset(self._fit_kwargs.keys()): + wrong_keys = list(set(kwargs.keys()).difference(set(self._fit_kwargs.keys()))) + + raise Exception ("Arguments {:s} not recognised.".format(",".join(wrong_keys))) + + + if not "tau" in kwargs.keys(): + raise Exception ("Please provide 'tau' (slow component of SiPM pulse in nanoseconds).") + + elif(not isinstance(kwargs["tau"], float) or kwargs["tau"]<0): + raise Exception ("Please set 'tau' (slow component of SiPM pulse in nanoseconds) to a positive float.") + + + if not "tau_R" in kwargs.keys(): + raise Exception ("Please provide 'tau_R' (recovery time of SiPM in nanoseconds).") + + elif(not isinstance(kwargs["tau_R"], float) or kwargs["tau_R"]<0): + raise Exception ("Please set 'tau' (slow component of SiPM pulse in nanoseconds) to a positive float.") + + + if not "t_0" in kwargs.keys(): + raise Exception ("Please provide 't_0' (integration region before start of SiPM pulse integration gate in nanoseconds).") + + elif(kwargs["t_0"]<0): + raise Exception ("Please set 't_0' (integration region before start of SiPM pulse integration gate in nanoseconds) to a positive float.") + + + + if not "t_gate" in kwargs.keys(): + raise Exception ("Please provide 't_gate' (length of SiPM pulse integration gate) in nanoseconds.") + + elif(kwargs["t_0"]<0): + raise Exception ("Please set 't_0' (integration region before start of SiPM pulse integration gate in nanoseconds) to a positive float.") + + + + self._fit_kwargs.update(kwargs) + + + + if self._fit_kwargs["truncate_nG"] is not None: + if(not isinstance(self._fit_kwargs["truncate_nG"], float)): + raise Exception ("Please set truncate_nG to a positive float which represents the number of estimated units of gain before the estimated pedestal which will be excluded from the minimisation process.") + + if(self._fit_kwargs["truncate_nG"]<0): + raise Exception ("Please set truncate_nG to a positive float which represents the number of estimated units of gain before the estimated pedestal which will be excluded from the minimisation process.") + + + if(not isinstance(self._fit_kwargs["peak_dist_nG"], float)): + raise Exception ("Please set peak_dist_nG to a positive float between 0 and 1 which represents the minimum distance as a fraction of one gain unit required between peaks") + elif(self._fit_kwargs["peak_dist_nG"]<=0 or self._fit_kwargs["peak_dist_nG"]>=1): + raise Exception ("Please set peak_dist_nG to a positive float between 0 and 1 which represents the minimum distance as a fraction of one gain unit required between peaks") + + + if self._fit_kwargs["bw"] is not None: + if(not isinstance(self._fit_kwargs["bw"], float)): + raise Exception ("Please set bw (bin width) to a positive float." ) + if(self._fit_kwargs["bw"]<=0): + raise Exception ("Please set bw (bin width) to a positive float." ) + + + + + if(self._verbose): + print("Prefitting...") + + + ######################################################################################################## + ## Prefitting routine applied + ######################################################################################################## + + + t_0 = time.time() + self.GetPreFit(data, **self._fit_kwargs) + t_1 = time.time() + + self._time_prefit = t_1-t_0 + + if(self._verbose): + print("Prefitting complete.") + + + _offset = self._hist_data["bc_min"] + _scale = self._hist_data["bw"] + + if(self._fit_kwargs["prefit_only"]): + + _prefit_values_temp = self._prefit_values.copy() + _prefit_errors_temp = self._prefit_errors.copy() + + for key, value in _prefit_values_temp.items(): + if(key=="Q_0"): + _prefit_values_temp[key] = _offset + value*_scale + elif(key=="G"): + _prefit_values_temp[key] = value*_scale + elif(key=="sigma_0"): + _prefit_values_temp[key] = value*_scale + elif(key=="sigma_1"): + _prefit_values_temp[key] = value*_scale + else: + _prefit_values_temp[key] = value + + for key, value in _prefit_errors_temp.items(): + if(key=="Q_0"): + _prefit_errors_temp[key] = value*_scale + elif(key=="G"): + _prefit_errors_temp[key] = value*_scale + elif(key=="sigma_0"): + _prefit_errors_temp[key] = value*_scale + elif(key=="sigma_1"): + _prefit_errors_temp[key] = value*_scale + else: + _prefit_errors_temp[key] = value + + self._prefit_values.update(_prefit_values_temp) + self._prefit_errors.update(_prefit_errors_temp) + + + + + else: + + + + ######################################################################################################## + ## Truncate fit if supplied factor. + ######################################################################################################## + + + f_DRM = BinnedLH(DRM, self._hist_data["bin_numbers"], self._hist_data["counts"], 1) + + + + + ######################################################################################################## + ## Inititalise Minuit with hypothesis of no dark counts or afterpulsing. + ######################################################################################################## + + m_DRM = Minuit(f_DRM, + Q_0 = self._prefit_values["Q_0"], + G = self._prefit_values["G"], + mu = self._prefit_values["mu"], + lbda = self._prefit_values["lbda"], + sigma_0 = self._prefit_values["sigma_0"], + sigma_1 = self._prefit_values["sigma_1"], + tau_Ap = self._prefit_values["tau_Ap"], + p_Ap = self._prefit_values["p_Ap"], + DCR = 1e-9, + tau = self._prefit_values["tau"], + tau_R = self._prefit_values["tau_R"], + t_gate = self._prefit_values["t_gate"], + t_0 = self._prefit_values["t_0"], + k_max = self._prefit_values["k_max"], + k_max_dcr = self._prefit_values["k_max_dcr"], + len_pad = self._prefit_values["len_pad"], + ) + + + + + + m_DRM.errors["Q_0"] = self._prefit_errors["Q_0"] + m_DRM.errors["G"] = self._prefit_errors["G"] + m_DRM.errors["mu"] = self._prefit_errors["mu"] + m_DRM.errors["lbda"] = self._prefit_errors["lbda"] + m_DRM.errors["sigma_0"] = self._prefit_errors["sigma_0"] + m_DRM.errors["sigma_1"] = self._prefit_errors["sigma_1"] + m_DRM.errors["tau_Ap"] = self._prefit_errors["tau_Ap"] + m_DRM.errors["p_Ap"] = self._prefit_errors["p_Ap"] + m_DRM.errors["DCR"] = self._prefit_errors["DCR"] + + + ######################################################################################################## + ## Set parameter limits + ## These limits have been chosen to best constrain the possible values for each of the parameters. + ## NOTE: we are working in bin widths, so the minimum allowed pedestal position is the first/second bin for Q_0, G, sigma_0, sigma_1 + ######################################################################################################## + + + + m_DRM.limits["G"] = (epsilon(), None) + + m_DRM.limits["mu"] = (epsilon(), None) + + m_DRM.limits["lbda"] = (epsilon(), 1-epsilon()) + + m_DRM.limits["sigma_0"] = (epsilon(), None) + + m_DRM.limits["sigma_1"] = (epsilon(), None) + + m_DRM.limits["tau_Ap"] = (epsilon(), self._prefit_values["t_gate"]/2-epsilon()) + + m_DRM.limits["p_Ap"] = (epsilon(), 1-epsilon()) + + m_DRM.limits["DCR"] = (epsilon(), None) + + + + ######################################################################################################## + ## Allow input tau, t_gate, tau_R and t_0 to float if errors are known. + ######################################################################################################## + + + if(self._fit_kwargs["tau_err"] is None): + m_DRM.fixed["tau"]=True + else: + m_DRM.fixed["tau"]=False + m_DRM.errors["tau"] = self._prefit_errors["tau"] + m_DRM.limits["tau"] = (max(epsilon(), self._prefit_values["tau"]-self._prefit_errors["tau"]), + max(epsilon(), self._prefit_values["tau"]+self._prefit_errors["tau"]) + ) + + if(self._fit_kwargs["t_gate_err"] is None): + m_DRM.fixed["t_gate"]=True + else: + + m_DRM.fixed["t_gate"]=False + m_DRM.errors["t_gate"] = self._prefit_errors["t_gate"] + m_DRM.limits["t_gate"] = (max(epsilon(), self._prefit_values["t_gate"]-self._prefit_errors["t_gate"]), + max(epsilon(), self._prefit_values["t_gate"]+self._prefit_errors["t_gate"]) + ) + + + if(self._fit_kwargs["t_0_err"] is None): + m_DRM.fixed["t_0"]=True + else: + m_DRM.fixed["t_0"]=False + m_DRM.errors["t_0"] = self._prefit_errors["t_0"] + m_DRM.limits["t_0"] = (max(epsilon(), self._prefit_values["t_0"]-self._prefit_errors["t_0"]), + max(epsilon(), self._prefit_values["t_0"]+self._prefit_errors["t_0"]) + ) + + + + if(self._fit_kwargs["tau_R_err"] is None): + m_DRM.fixed["tau_R"]=True + else: + m_DRM.fixed["tau_R"]=False + m_DRM.errors["tau_R"] = self._prefit_errors["tau_R"] + m_DRM.limits["tau_R"] = (max(epsilon(), self._prefit_values["tau_R"]-self._prefit_errors["tau_R"]), + max(epsilon(), self._prefit_values["tau_R"]+self._prefit_errors["tau_R"]) + ) + + + + m_DRM.fixed["k_max"]=True + m_DRM.fixed["k_max_dcr"]=True + m_DRM.fixed["len_pad"]=True + + + + ######################################################################################################## + ##Perform fit under hypothesis there are no dark counts, afterpulses. + ######################################################################################################## + + m_DRM.fixed["Q_0"] = False + m_DRM.fixed["G"] = False + m_DRM.fixed["mu"] = False + m_DRM.fixed["lbda"] = False + m_DRM.fixed["sigma_0"] = False + m_DRM.fixed["sigma_1"] = False + m_DRM.fixed["p_Ap"]=True + m_DRM.fixed["tau_Ap"]=True + m_DRM.fixed["DCR"]=True + + if(self._verbose): + print("Fitting basic light...") + + m_DRM.strategy=1 + m_DRM.errordef=m_DRM.LIKELIHOOD + m_DRM.migrad(ncall = self._n_call_minuit, + iterate= self._n_iterations_minuit) + + + if(self._verbose): + print("First pass complete...") + print(m_DRM) + + if(self._verbose): + print("Fixing basic light and fitting noise...") + + + m_DRM.fixed["Q_0"] = True + m_DRM.fixed["G"] = True + m_DRM.fixed["mu"] = True + m_DRM.fixed["lbda"] = True + m_DRM.fixed["sigma_0"] = True + m_DRM.fixed["sigma_1"] = True + m_DRM.fixed["p_Ap"]=False + m_DRM.fixed["tau_Ap"]=False + m_DRM.fixed["DCR"]=False + m_DRM.values["DCR"] = self._prefit_values["DCR"] + + + m_DRM.strategy=1 + m_DRM.errordef=m_DRM.LIKELIHOOD + m_DRM.migrad(ncall = self._n_call_minuit, + iterate= self._n_iterations_minuit) + + if(self._verbose): + print("Second pass complete...") + print(m_DRM) + + + if(self._verbose): + print("Fitting full model...") + + + m_DRM.fixed["Q_0"] = False + m_DRM.fixed["G"] = False + m_DRM.fixed["mu"] = False + m_DRM.fixed["lbda"] = False + m_DRM.fixed["sigma_0"] = False + m_DRM.fixed["sigma_1"] = False + m_DRM.fixed["p_Ap"]=False + m_DRM.fixed["tau_Ap"]=False + m_DRM.fixed["DCR"]=False + + + m_DRM.strategy=2 + m_DRM.migrad(ncall = self._n_call_minuit, + iterate= self._n_iterations_minuit) + + m_DRM.hesse() + + + t_2 = time.time() + self._time_fit = t_2 - t_1 + + + if(self._verbose): + print("Prefit took {:3.3f} s, Fit took {:3.3f} s".format(self._time_prefit, + self._time_fit)) + print(m_DRM) + + + self._m_DRM = m_DRM + + ######################################################################################################## + ##Rescale fitted parameters back to their input units. + ######################################################################################################## + + + + for key, value in m_DRM.values.to_dict().items(): + if(key=="Q_0"): + self._fit_values[key] = _offset + value*_scale + elif(key=="G"): + self._fit_values[key] = value*_scale + elif(key=="sigma_0"): + self._fit_values[key] = value*_scale + elif(key=="sigma_1"): + self._fit_values[key] = value*_scale + else: + self._fit_values[key] = value + + + + for key, value in m_DRM.errors.to_dict().items(): + if(key=="Q_0"): + self._fit_errors[key] = value*_scale + elif(key=="G"): + self._fit_errors[key] = value*_scale + elif(key=="sigma_0"): + self._fit_errors[key] = value*_scale + elif(key=="sigma_1"): + self._fit_errors[key] = value*_scale + else: + self._fit_errors[key] = value + + + + _prefit_values_temp = self._prefit_values.copy() + _prefit_errors_temp = self._prefit_errors.copy() + + for key, value in _prefit_values_temp.items(): + if(key=="Q_0"): + _prefit_values_temp[key] = _offset + value*_scale + elif(key=="G"): + _prefit_values_temp[key] = value*_scale + elif(key=="sigma_0"): + _prefit_values_temp[key] = value*_scale + elif(key=="sigma_1"): + _prefit_values_temp[key] = value*_scale + else: + _prefit_values_temp[key] = value + + for key, value in _prefit_errors_temp.items(): + if(key=="Q_0"): + _prefit_errors_temp[key] = value*_scale + elif(key=="G"): + _prefit_errors_temp[key] = value*_scale + elif(key=="sigma_0"): + _prefit_errors_temp[key] = value*_scale + elif(key=="sigma_1"): + _prefit_errors_temp[key] = value*_scale + else: + _prefit_errors_temp[key] = value + + self._prefit_values.update(_prefit_values_temp) + self._prefit_errors.update(_prefit_errors_temp) + + + + + + + + + ######################################################################################################## + ## GetPreFit(prefit=False) + ######################################################################################################## + ## + ## VARIABLES: + ## tau: slow time-constant of SiPM pulse in nanoseconds + ## tau_err: optional error to allow tau to float + ## t_0: pre-gate integration window in nanoseconds. + ## t_0_err: optional error to allow t_0 to float + ## tau_R: recovery time of SiPM in nanoseconds + ## tau_R_err: optional error to allow tau_R to float + ## t_gate: gate length in nanoseconds + ## t_gate_err: optional error to allow t_gate to float + ## bw: optional bin width if data is 1D + ## truncate_nG: optional parameter to truncate distribution a certain fraction/number of estimated gain units before the estimated pedestal. + ## peak_dist_nG: required minimum distance between peaks in gain units to register a peaks. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html + ## peak_rel_height: Chooses the relative height at which the peak width is measured as a percentage of its prominence. 1.0 calculates the width of the peak at its lowest contour line while 0.5 evaluates at half the prominence height. Must be at least 0. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.peak_widths.html#scipy.signal.peak_widths + ## bin_method: binning method for unbinned data. "knuth", "freedman" or "scott" rules are available. + ## alpha_peaks: the maximum percentile of the distribution a found peak is allowed to have. This determines the threshold on the selected peak positions. + ## + ## Perform pre-fitting routine. + ######################################################################################################## + + + + + def GetPreFit(self, data, **kwargs): + + ######################################################################################################## + ##Get reconstructed data, bin width, number of bins and bin values + ######################################################################################################## + + + data, bw, nbins, bins = self.GetBins(data, kwargs["bw"], kwargs["bin_method"]) + + counts, _ = np.histogram(data, bins=bins) + counts_error = np.where(counts<1, 1, np.sqrt(counts)) + bin_centres = (bins[:-1] + bins[1:])/2. + nbins = len(bin_centres) + bin_numbers = np.arange(0, nbins) + + + self._len_pad = nbins + + + + ######################################################################################################## + ## Perform FFT and calculate Power Spectrum. The inverse of the frequency of the maximum amplitude yields the gain. + ######################################################################################################## + + + nbins_pad = nbins+2*nbins + counts_pad = np.pad(counts, (nbins, nbins), "constant", constant_values=0) + + + fhat = np.fft.fft(counts_pad) #computes the fft + psd = fhat * np.conj(fhat)/nbins_pad + fft_freq_orig = (1/nbins_pad) * np.arange(nbins_pad) #frequency array + idxs_half = np.arange(1, np.floor(nbins_pad/2), dtype=np.int32) + fft_amplitude = np.abs(psd[idxs_half]) + fft_freq = fft_freq_orig[idxs_half] + + try: + f_interp_fft = InterpolatedUnivariateSpline(fft_freq, fft_amplitude, k=4) + except: + raise Exception("Interpolation of FFT failed. Fit cannot proceed.") + + try: + fft_cr_pts = f_interp_fft.derivative().roots() + except: + raise Exception("Root finding of FFT failed. Fit cannot proceed.") + + if(len(fft_cr_pts)<1): + raise Exception("No roots found. Fit cannot proceed.") + + + fft_cr_vals = f_interp_fft(fft_cr_pts) + + inv_G_fft = fft_cr_pts[np.argmax(fft_cr_vals)] + G_fft = 1/inv_G_fft + + + self._len_pad = np.around(G_fft, 0).astype(int) + + + ######################################################################################################## + #Estimate the position of the pedestal by minimising the GP gain w.r.t the FFT Gain + ######################################################################################################## + + mu_unsub = HistMean(counts, bin_numbers) + sigma = HistStd(counts, bin_numbers) + gamma = HistSkew(counts, bin_numbers) + + + + f_interp_counts = UnivariateSpline(bin_numbers, counts, w=1/counts_error, k=4) + + + try: + counts_cr_pts = f_interp_counts.derivative().roots() + except: + raise Exception("Root finding of spectrum failed. Fit cannot proceed.") + + if(len(counts_cr_pts)<1): + raise Exception("No roots of spectrum found. Fit cannot proceed.") + + + counts_cr_vals = f_interp_counts(counts_cr_pts) + max_peak = counts_cr_pts[np.argmax(counts_cr_vals)] + + + f_pedestal = lambda Q_0: (GP_gain(mu_unsub-Q_0, sigma, gamma) - G_fft)**2 + m_pedestal = Minuit(f_pedestal, Q_0 = max_peak) + m_pedestal.errordef=m_pedestal.LEAST_SQUARES + m_pedestal.limits["Q_0"] = (epsilon(), max_peak+epsilon()) + m_pedestal.migrad() + Q_0_est = m_pedestal.values["Q_0"] + + + idx_peaks = np.sort(np.concatenate( [np.arange(max_peak-G_fft, -G_fft/2, -G_fft), + np.array([max_peak]), + np.arange(max_peak+G_fft, nbins+G_fft/2, G_fft), + + ] )) + + idx_Q_0 = np.argmin(abs(idx_peaks-Q_0_est)) + idx_peaks = idx_peaks[idx_Q_0:-1] + + + + + ######################################################################################################## + #Select maximum peak position, and step in units of gain up until either end of the spectrum + ######################################################################################################## + + ######################################################################################################## + # - Select pedestal as closest peak position to estimated pedestal. + # - Select only peaks up to pedestal + # - Get midpoints and end values. + # - Linearly interpolate midpoints + # - Subtract 'background', and threshold to be a minimum of zero. + ######################################################################################################## + + + counts_bg = np.full(len(idx_peaks)-1, None) + idx_bg = np.full(len(idx_peaks)-1, None) + + + for i_peak in range(len(idx_peaks)-1): + last_peak= idx_peaks[i_peak] + next_peak= idx_peaks[i_peak+1] + mid_peak = idx_peaks[i_peak] + G_fft/2 + + cond_sel_interpeak = (counts_cr_pts>=last_peak) & (counts_cr_pts<=next_peak) + + if(sum(cond_sel_interpeak)>0): + + counts_cr_pts_interpeak = counts_cr_pts[cond_sel_interpeak] + counts_cr_values_interpeak = counts_cr_vals[cond_sel_interpeak] + + idx_min_interpeak = np.argmin(counts_cr_values_interpeak) + + count_min_interpeak = counts_cr_values_interpeak[idx_min_interpeak] + idx_min_interpeak = counts_cr_pts_interpeak[idx_min_interpeak] + + + else: + if(self._verbose): + print("No local minimum found in peak range. Using value at peak + gain/2") + + idx_min_interpeak = mid_peak + count_min_interpeak = counts[np.argmin(abs(bin_numbers-mid_peak))] + + idx_bg[i_peak] = idx_min_interpeak + counts_bg[i_peak] = count_min_interpeak + + + + try: + + arg_sort_bg = np.argsort(idx_bg) + idx_bg = idx_bg[arg_sort_bg] + counts_bg = counts_bg[arg_sort_bg] + + + idx_bg = np.pad(idx_bg, (1,1), "constant", constant_values=(0, nbins)) + counts_bg = np.pad(counts_bg, (1,1), "constant", constant_values=(0, 0)) + f_interp_bg = interp1d(idx_bg, np.where(counts_bg>0, counts_bg, 0), bounds_error=False, fill_value=(0,0)) + counts_bg = f_interp_bg(bin_numbers).astype(float) + counts_bgsub = counts - counts_bg + counts_bgsub = np.where(counts_bgsub<0, 0, counts_bgsub) + counts_bgsub_error = np.where(counts_bgsub>1, np.sqrt(counts_bgsub), 1) + counts_bgsub_error = np.sqrt(counts_bgsub_error**2 + counts_error) + + except: + if(self._verbose): + print("Background subtraction failed. Setting background subtracted spectrum to have no events.") + counts_bgsub = counts + counts_bg = np.zeros_like(counts) + counts_bgsub_error = counts_error + + + + ######################################################################################################## + #Find maximum from spline fit of background subtracted + ######################################################################################################## + + + try: + f_interp_counts_bgsub = UnivariateSpline(bin_numbers, + counts_bgsub, + w = 1/counts_bgsub_error, + k=4) + except: + raise Exception("Interpolation of background-subtracted spectrum failed. Fit cannot proceed.") + + try: + counts_cr_pts_bgsub = f_interp_counts_bgsub.derivative().roots() + except: + raise Exception("Root finding of background-subtracted spectrum failed. Fit cannot proceed.") + + if(len(counts_cr_pts_bgsub)<1): + raise Exception("No roots found in background-subtracted spectrum. Fit cannot proceed.") + + + counts_cr_vals_bgsub = f_interp_counts_bgsub(counts_cr_pts_bgsub) + max_peak = counts_cr_pts_bgsub[np.argmax(counts_cr_vals_bgsub)] + + idx_peaks = np.sort(np.concatenate( [np.arange(max_peak-G_fft, -G_fft/2, -G_fft), + np.array([max_peak]), + np.arange(max_peak+G_fft, nbins+G_fft/2, G_fft)] )) + + ######################################################################################################## + # - Select pedestal as closest peak position to estimated pedestal. + # - Select only peaks up to pedestal + # - Update pedestal position. + ######################################################################################################## + + + idx_Q_0 = np.argmin(abs(idx_peaks-Q_0_est)) + idx_peaks = idx_peaks[idx_Q_0:-1] + Q_0 = idx_peaks[0] + + + ######################################################################################################## + # - Find means and variances of all peaks in background-subtracted spectrum with greater than 100 events. + # - Fit line to means to get Gain/Pedestal - correction for binning + # - Fit line to variances to Get sigma0/sigma1 + ######################################################################################################## + + + #################### + #MEAN IN RANGE METHOD + ##################### + +# idx_peak_mus = np.full(len(idx_peaks), None) +# idx_peak_dmus = np.full(len(idx_peaks), None) +# idx_peak_sigmas = np.full(len(idx_peaks), None) +# idx_peak_dsigmas = np.full(len(idx_peaks), None) +# idx_peak_vars = np.full(len(idx_peaks), None) +# idx_peak_dvars = np.full(len(idx_peaks), None) +# cond_sel_peaks = np.full(len(idx_peaks), False) +# idx_peak_numbers= np.arange(0, len(idx_peaks)).astype(int) + +# for p_i, peak in zip(idx_peak_numbers, idx_peaks): + +# cond_sel_peak = (bin_numbers>peak - G_fft/2) & (bin_numbers<peak + G_fft/2) + +# counts_bgsub_peak = counts_bgsub[cond_sel_peak] +# bin_numbers_peak = bin_numbers[cond_sel_peak] + +# N_peak = np.sum(counts_bgsub_peak) + +# if(N_peak>100): +# cond_sel_peaks[p_i]=True +# else: +# cond_sel_peaks[p_i]=False +# continue + + +# mu_peak = HistMean(counts_bgsub_peak, bin_numbers_peak) +# sigma_peak = HistStd(counts_bgsub_peak, bin_numbers_peak) + +# dmu_peak = sigma_peak/np.sqrt(N_peak) +# dsigma_peak = sigma_peak/np.sqrt(2*N_peak-2) + + +# idx_peak_mus[p_i] = mu_peak +# idx_peak_dmus[p_i] = dmu_peak + + +# idx_peak_sigmas[p_i] = sigma_peak +# idx_peak_dsigmas[p_i] = dsigma_peak + + + #################### + #ITERATIVE GAUS FIT METHOD + ##################### + + idx_peak_mus = np.full(len(idx_peaks), None) + idx_peak_dmus = np.full(len(idx_peaks), None) + idx_peak_sigmas = np.full(len(idx_peaks), None) + idx_peak_dsigmas = np.full(len(idx_peaks), None) + idx_peak_vars = np.full(len(idx_peaks), None) + idx_peak_dvars = np.full(len(idx_peaks), None) + cond_sel_peaks = np.full(len(idx_peaks), False) + idx_peak_numbers= np.arange(0, len(idx_peaks)).astype(int) + + + f_gaus = lambda x, mu, sigma: norm.pdf(x, loc=mu, scale=sigma) + + for p_i, peak in zip(idx_peak_numbers, idx_peaks): + + min_rng_peak = peak - G_fft/2 + max_rng_peak = peak + G_fft/2 + + cond_sel_peak = (bin_numbers>min_rng_peak) & (bin_numbers<max_rng_peak) + + counts_bgsub_peak = counts_bgsub[cond_sel_peak] + bin_numbers_peak = bin_numbers[cond_sel_peak] + + N_peak = np.sum(counts_bgsub_peak) + + if(N_peak>100): + cond_sel_peaks[p_i]=True + else: + cond_sel_peaks[p_i]=False + continue + + + mu_peak = HistMean(counts_bgsub_peak, bin_numbers_peak) + sigma_peak = HistStd(counts_bgsub_peak, bin_numbers_peak) + + dmu_peak = sigma_peak/np.sqrt(N_peak) + dsigma_peak = sigma_peak/np.sqrt(2*N_peak-2) + + delta_mu = np.inf + delta_sigma = np.inf + + try: + + iter_count = 0 + iter_failed = False + + while(iter_count<10 and (delta_mu>1 and delta_sigma>1)): + + mu_mns2sigma = max(mu_peak-2*sigma_peak, min_rng_peak) + mu_pls2sigma = min(mu_peak+2*sigma_peak, max_rng_peak) + + cond_sel_2sigma = (bin_numbers_peak>mu_mns2sigma) & (bin_numbers_peak<mu_pls2sigma) + + if(sum(cond_sel_2sigma)<3): + iter_failed=True + break + + + f_gaus_peak = BinnedLH(f_gaus, + bin_numbers_peak[cond_sel_2sigma], + counts_bgsub_peak[cond_sel_2sigma ], + 1 + ) + + m_gaus_peak = Minuit(f_gaus_peak, mu=mu_peak, sigma=sigma_peak) + m_gaus_peak.limits['mu'] = (mu_mns2sigma, mu_pls2sigma) + m_gaus_peak.limits['sigma'] = (epsilon(), 2.0*sigma_peak) + m_gaus_peak.errordef=m_pedestal.LIKELIHOOD + m_gaus_peak.migrad() + m_gaus_peak.hesse() + + + mu_peak_i = m_gaus_peak.values["mu"] + sigma_peak_i = m_gaus_peak.values["sigma"] + + dmu_peak_i = m_gaus_peak.errors["mu"] + dsigma_peak_i = m_gaus_peak.errors["sigma"] + + if(iter_count==0): + delta_mu = mu_peak_i + delta_sigma = sigma_peak_i + + else: + + delta_mu = abs(mu_peak - mu_peak_i) + delta_sigma = abs(sigma_peak - sigma_peak_i) + + + mu_peak = mu_peak_i + sigma_peak = sigma_peak_i + + dmu_peak = dmu_peak_i + dsigma_peak = dsigma_peak_i + + iter_count+=1 + + if(iter_failed): + if(self._verbose): + print("Iterative 2-sigma Gaus fit for Peak {:d} failed because the fit range went below 3 bins in size. Using mean and standard deviation.".format(p_i)) + + mu_peak = HistMean(counts_bgsub_peak, bin_numbers_peak) + sigma_peak = HistStd(counts_bgsub_peak, bin_numbers_peak) + + dmu_peak = sigma_peak/np.sqrt(N_peak) + dsigma_peak = sigma_peak/np.sqrt(2*N_peak-2) + + + + except: + if(self._verbose): + print("Iterative 2-sigma Gaus fit for Peak {:d} failed. Using mean and standard deviation.".format(p_i)) + + + mu_peak = HistMean(counts_bgsub_peak, bin_numbers_peak) + sigma_peak = HistStd(counts_bgsub_peak, bin_numbers_peak) + + dmu_peak = sigma_peak/np.sqrt(N_peak) + dsigma_peak = sigma_peak/np.sqrt(2*N_peak-2) + + + idx_peak_mus[p_i] = mu_peak + idx_peak_dmus[p_i] = dmu_peak + + idx_peak_sigmas[p_i] = sigma_peak + idx_peak_dsigmas[p_i] = dsigma_peak + + + + if(sum(cond_sel_peaks)<3): + if(self._verbose): + print("Not enough peaks to estimate starting parameters. Using nominal values") + Q_0 = idx_peaks[0] + dQ_0 = 0.05*G_fft + G = G_fft + dG = 0.05*G_fft + sigma_0 = 0.05*G_fft + dsigma_0 = 0.005*G_fft + sigma_1 = 0.01*G_fft + dsigma_1 = 0.001*G_fft + + else: + idx_peak_vars[cond_sel_peaks] = idx_peak_sigmas[cond_sel_peaks]**2 + idx_peak_dvars[cond_sel_peaks] = 2*idx_peak_sigmas[cond_sel_peaks]*idx_peak_dsigmas[cond_sel_peaks] + + + + + f_mu_lin = HuberRegression(lambda x, G, Q_0: Linear(x, G, Q_0), + idx_peak_numbers[cond_sel_peaks], + idx_peak_mus[cond_sel_peaks], + idx_peak_dmus[cond_sel_peaks]) + + f_var_lin = HuberRegression(lambda x, var_1, var_0: Linear(x, var_1, var_0), + idx_peak_numbers[cond_sel_peaks], + idx_peak_vars[cond_sel_peaks], + idx_peak_dvars[cond_sel_peaks], + ) + + + m_mu_lin = Minuit(f_mu_lin, Q_0 = Q_0, G = G_fft) + m_mu_lin.limits["Q_0"] = (epsilon(), nbins) + m_mu_lin.fixed["G"] = True + m_mu_lin.errordef = m_mu_lin.LEAST_SQUARES + m_mu_lin.migrad() + m_mu_lin.hesse() + + + + m_var_lin = Minuit(f_var_lin, var_0 = 1, var_1 = 1) + m_var_lin.limits["var_0"] = (epsilon(), nbins) + m_var_lin.limits["var_1"] = (epsilon(), nbins) + m_var_lin.errordef = m_var_lin.LEAST_SQUARES + m_var_lin.migrad() + m_var_lin.hesse() + + + + Q_0 = m_mu_lin.values["Q_0"] + dQ_0 = m_mu_lin.errors["Q_0"] + + + sigma_0 = np.sqrt(m_var_lin.values["var_0"]) + dsigma_0 = (0.5/sigma_0)*m_var_lin.errors["var_0"] + + sigma_1 = np.sqrt(m_var_lin.values["var_1"]) + dsigma_1 = (0.5/sigma_0)*m_var_lin.errors["var_1"] + + + ######################################################################################################## + # Use final estimate of pedestal to determine the mu, lambda + # Calculate k_max by summing up the GP distribution probabilities (cumulative distribution function) for all peaks in + # the spectrum, and then finding the peak closest to the C_% + ######################################################################################################## + + + mu_GP = max(epsilon(), GP_mu(mu_unsub-Q_0, sigma, gamma)) + lbda_GP = min(max(epsilon(), GP_lbda(mu_unsub-Q_0, sigma, gamma)), 1-epsilon()) + k_max = len(idx_peaks) + + if(self._max_n_peaks is not None): + if(k_max>self._max_n_peaks): + if(self._verbose): + print("Max # light peaks ({:d}) exceeded set maximum number of light peaks. Setting # peaks to {:d}".format(k_max, self._max_n_peaks)) + k_max = self._max_n_peaks + + elif(k_max<4): + if(self._verbose): + print("Max # light peaks ({:d}) less than 4, so cannot fit. Setting # peaks to 4".format(k_max)) + k_max = 4 + + + ######################################################################################################## + # - Convert to charge units: (K= Q - Q_0)/G + # - Estimate Probability density in a range of 0.1 about the minimum, K=0.5. N_DCR_min = mean(counts[0.45<K<0.55])/0.1 + # - Get N_DCR = sum(counts[0<K<0.5]) + 0.5*sum(counts[0.5<counts<1]) + # - calculate DCR + ######################################################################################################## + + + + K = (bin_numbers - Q_0)/G_fft + dK = abs(K[1]-K[0]) + + + ################################ + # In order to do the interpolated integral, we need to change co-ordinate + #bw = dx = 1 + #dK = dK/dx = 1/G dx + #dx = G*dK + #so ∫counts.dx = G*∫counts.dK + #This is necessary for correct calculation of the error. + ################################ + + cond_NN = (K>=0.45)&(K<=0.55) + cond_Nc = (K<0.5) + + + + NN_sum = max(np.sum(counts[cond_NN]),1) + NN = np.mean(counts[cond_NN]) + + if(NN!=NN): + NN = float(counts[np.argmin(abs(K-0.5))]) + NN_sum = float(NN) + + + + + Nc = max(float(np.sum(counts[cond_Nc])), 1) + + + R0p5 = NN/Nc + + + DCR = R0p5/(4*kwargs["tau"]*dK) + DCR = DCR*np.exp(DCR*kwargs["tau"]) + dDCR = DCR*(1/np.sqrt(max(1, NN_sum))) + + mu_DCR = DCR*(kwargs["t_gate"] + kwargs["t_0"]) + + if(DCR!=DCR or DCR<1e-9): + if(self._verbose): + print("DCR returned invalid, or less than 1e-9 GHz. Choosing DCR to be 1e-9 +/- 1e-9 GHz.") + DCR = 1e-9 + dDCR = 1e-9 + + + + if(self._max_n_peaks_dcr is not None): + if(self._verbose): + print("Setting # dark peaks to {:d}".format( self._max_n_peaks_dcr)) + k_max_dcr = self._max_n_peaks_dcr + + elif(k_max_dcr<4): + if(self._verbose): + print("Max # dark peaks ({:d}) less than 4, so cannot fit. Setting # peaks to 4".format(k_max_dcr)) + k_max_dcr = 4 + + + + + self._hist_data["bw"] = bw + self._hist_data["bc_min"] = bin_centres[0] + self._hist_data["bins"]=bins + self._hist_data["nbins"]=nbins + self._hist_data["counts"] = counts + self._hist_data["counts_error"] = counts_error + self._hist_data["counts_bg"] = counts_bg + self._hist_data["counts_bgsub"] = counts_bgsub + self._hist_data["bin_centres"] = bin_centres + self._hist_data["bin_numbers"] = bin_numbers + + + self._hist_data["fft_amplitude"] = fft_amplitude + self._hist_data["fft_freq"] = fft_freq + self._hist_data["G_fft"] = G_fft + self._hist_data["Q_0_est"] = Q_0_est + + + self._hist_data["peaks"]["peak_position"] = idx_peaks + self._hist_data["peaks"]["peak_index"] = idx_peak_numbers + self._hist_data["peaks"]["cond_sel_peaks"] = cond_sel_peaks + self._hist_data["peaks"]["peak_mean"] = idx_peak_mus + self._hist_data["peaks"]["peak_mean_error"] = idx_peak_dmus + self._hist_data["peaks"]["peak_variance"] = idx_peak_vars + self._hist_data["peaks"]["peak_variance_error"] = idx_peak_dvars + self._hist_data["peaks"]["peak_std_deviation"] = idx_peak_sigmas + self._hist_data["peaks"]["peak_std_deviation_error"] = idx_peak_dsigmas + + + self._prefit_values["Q_0"] = Q_0 + self._prefit_values["G"] = G_fft + self._prefit_values["mu"] = mu_GP + self._prefit_values["lbda"] = lbda_GP + self._prefit_values["sigma_0"] = sigma_0 + self._prefit_values["sigma_1"] = sigma_1 + self._prefit_values["DCR"] = DCR + self._prefit_values["p_Ap"] = 0.00 + self._prefit_values["tau_Ap"] = 5.0 + self._prefit_values["tau"] = kwargs["tau"] + self._prefit_values["t_0"] = kwargs["t_0"] + self._prefit_values["t_gate"] = kwargs["t_gate"] + self._prefit_values["tau_R"] = kwargs["tau_R"] + self._prefit_values["k_max"]=k_max + self._prefit_values["k_max_dcr"]=k_max_dcr + self._prefit_values["len_pad"]=self._len_pad + + + + self._prefit_errors["Q_0"] = dQ_0 + self._prefit_errors["G"] = 1 + self._prefit_errors["mu"] = 0.1 + self._prefit_errors["lbda"] = 0.01 + self._prefit_errors["sigma_0"] = dsigma_0 + self._prefit_errors["sigma_1"] = dsigma_1 + self._prefit_errors["p_Ap"] = 0.05 + self._prefit_errors["tau_Ap"] = 0.05 + self._prefit_errors["tau"] = kwargs["tau_err"] + self._prefit_errors["t_0"] = kwargs["t_0_err"] + self._prefit_errors["tau_R"] = kwargs["tau_R_err"] + self._prefit_errors["t_gate"] = kwargs["t_gate_err"] + self._prefit_errors["DCR"] = dDCR + + + + diff --git a/example.py b/example.py new file mode 100644 index 0000000..f8a493d --- /dev/null +++ b/example.py @@ -0,0 +1,100 @@ + +import os +import sys +import numpy as np +import pandas as pd +from PeakOTron import PeakOTron +from joblib import dump +import time + +print("--------------------") +print('EXAMPLE SIPM CALIBRATION RUN') +print("--------------------") + + +out_dict = {} +files_to_fit = [] + +## Find all histograms in directory +for root, dirs, files in os.walk("./data/hamamatsu_pcb6/Light"): + + for file in files: + + if file.endswith(".txt"): + files_to_fit.append([file, os.path.join(root, file)]) + + +## Print files. +print("Files to fit:") +for i, (file, _) in enumerate(files_to_fit): + print('File {0}: {1}'.format(i, file)) + + + +SiPM = "PM1125NS_SBO" + + +## Loop thorough files +for i, (file, path) in enumerate(files_to_fit): + items = file.split('_') + + V = float(items[2].replace('V', '').replace('p', '.')) + + print("\n\n") + print("===============================================================") + print("FIT {:d} - {:s}".format(i, file)) + print("===============================================================") + print("\n\n") + + + ## Load files. + data = np.loadtxt(path, skiprows=8) + + ## Create a PeakOTron Fit Object. + f_data = PeakOTron(verbose=True) + + ## Perform fit. + f_data.Fit(data, + tau=21.953, #SLOW PULSE COMPONENT TIME CONSTANT (ns) + t_gate=104, #GATE LENGTH (ns) + t_0 = 64, #INTEGRATION TIME BEFORE GATE (ns) + tau_R=21.953 + + ) #BINNING RULE "knuth", "freedman", "scott" - use bw= #### to override. it is still required for DCR calculation. + + f_data.PlotFit(xlabel="ADC", display=False, save_directory="./Results/{0}_fit.png".format(os.path.splitext(file)[0])) + + + + dump(f_data, "./Results/{0}.joblib".format(os.path.splitext(file)[0])) + + + + fit_out = {} + fit_val, fit_err = f_data.GetFitResults() + for key, val in fit_val.items(): + print("{:s} : {:3.3E}".format(key, val)) + + fit_out["SiPM"] = SiPM + fit_out["V"] = V + + for key, value in fit_err.items(): + fit_out["d_{:s}".format(key)] = value + + fit_out.update(fit_val) + out_dict.update() + if out_dict == {}: + for key in fit_out.keys(): + out_dict[key] = [] + + for key in fit_out.keys(): + out_dict[key].append(fit_out[key]) + + print("===============================================================") + print("\n\n") + + + + +df = pd.DataFrame.from_dict(out_dict) +df.to_csv("./fit_results_{:s}.csv".format(SiPM)) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..0b369a4 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,8 @@ +astropy==3.0.2 +iminuit==2.17.0 +joblib==0.13.2 +matplotlib==3.1.3 +numba==0.45.1 +numpy==1.17.2 +pandas==1.1.3 +scipy==1.3.1 -- GitLab