from scipy.signal import fftconvolve as convolve
from itertools import product, combinations
from scipy.interpolate import interp1d
from scipy.stats import poisson, norm, binom
from AdditionalPDFs import gpoisson, borel
from scipy.integrate import simps
from numba import njit 
import numpy as np
from functools import lru_cache
import matplotlib.pyplot as plt

#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
########################################################################################################
## DETECTOR RESPONSE MODEL
########################################################################################################
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%



########################################################################################################
## VARIABLES:
########################################################################################################

########################################################################################################
## Q: bin centres of studied charge distribution in arb. units;
## Q_0: pedestal position in arb. units
## G: gain in arb. units
## mu: mean number of Geiger discharges in n.p.e
## lbda: GP-branching parameter (unitless) 
## sigma_0: electonics noise/pedestal width in arb. units
## sigma_1: gain spread width in arb. units;
## tau_Ap: afterpulse time constant in nanoseconds;
## p_Ap: afterpulse scaling parameter (unitless)
## DCR: dark count rate in gigahertz;
## tau: slow-component time constant of SiPM pulse in nanoseconds
## tau_R: recovery time of SiPM in nanoseconds
## t_gate: gate length in nanoseconds
## t_0: pre-gate integration window in nanoseconds.
## k_max: estimated maximum number of peaks to fit, based on percentile of Poisson distribution with estimated mean in PeakOTron. 
## len_pad: number of bins to pad for convolution. Default value: 100 bins.
##
########################################################################################################





########################################################################################################
## FUNCTION DEFINITIONS:
########################################################################################################












########################################################################################################
## epsilon() 
########################################################################################################
##
## Returns machine epsilon for thresholding zeros
##
########################################################################################################


@njit('float64()')
def epsilon():
    return np.finfo(np.float64).eps

########################################################################################################
## sigmak(k, sigma_0, sigma_1) 
########################################################################################################
##
## Returns width of peak number k, sigma_k(k, sigma_0, sigma_1) = sqrt(sigma_0^2 + k*sigma__1^2)
##
########################################################################################################

@njit
def sigmak(k, sigma_0, sigma_1):
    return np.sqrt(sigma_0**2 + k*sigma_1**2)



########################################################################################################
## Convolve(A, B, idx_0, length, dx, norm=True) 
########################################################################################################
##
## Calculates convolution of A and B using scipy fftconvolve, selects the convolved range relative to the pedestal index due to convolution shift, 
## and multiplies by dx to renormalise the distribution. Then, corrects for any nans/infs and ensures normalisation by applying Simpson's rule.
##
########################################################################################################

def Convolve(A, B, idx_0, length, dx, norm=True):
    A_o_B = convolve(A, B)[idx_0:length+idx_0]*dx
    A_o_B = np.nan_to_num(A_o_B, nan=0.0, posinf=0.0, neginf=0.0)
    if(norm):
        A_o_B = A_o_B/max(epsilon(), np.trapz(A_o_B, dx=dx))
    return A_o_B



#######################################################################################################
# ConvolveList(A, B, idx_0, length, dx, norm=True) 
# Applies recursive 'Convolve' for lists of functions.
#######################################################################################################


def ConvolveList(lst, idx_0, length, dx, norm=True):
    res = lst[0]
    for val in lst[1:]:
        res =  Convolve(res, val, idx_0, length, dx, norm=True) 
    return res

#######################################################################################################
# Multichoose(n, k)
# Finds combinations that add up to k
#######################################################################################################
    
@lru_cache(maxsize=None)
def Multichoose(n,k):
    if k < 0 or n < 0: return "Error"
    if not k: return [[0]*n]
    if not n: return []
    if n == 1: return [[k]]
    return [[0]+val for val in Multichoose(n-1,k)] + \
        [[val[0]+1]+val[1:] for val in Multichoose(n,k-1)]

















########################################################################################################
## AFTERPULSE FUNCTIONS
########################################################################################################


########################################################################################################
## Beta(tau_R, tau_Ap, t_gate):
########################################################################################################
#
# Normalisation constant for the afterpulse p.d.f
# 
# Beta(tau_R, tau_Ap, t_gate)  = ∫(1 - exp(-t/tauR)) x exp(-t/tauAp)/tauAp from 0 to tgate
#
########################################################################################################
@njit
def Beta(tau_R, tau_Ap, t_gate):
    return (tau_Ap - np.exp(-t_gate/tau_Ap)*(tau_Ap + tau_R*(1 - np.exp(-t_gate/tau_R))))/(tau_Ap + tau_R)

########################################################################################################
## P_Ap(tau_R, tau_Ap, t_gate):
########################################################################################################
#
# Calculates afterpulse discharge probability from scaling factor p_Ap and maximum discharge scaling factor Beta.
#
# Afterpulses can only have a maximum 
########################################################################################################
@njit
def P_Ap(tau_R, tau_Ap, t_gate, p_Ap):
    return p_Ap*Beta(tau_R, tau_Ap, t_gate)


########################################################################################################
## dPAp(tau_R, tau_Ap, t_gate, p_Ap)
########################################################################################################
##
## Returns error on total afterpulse probability, dPAp, occurring between 0 and t_gate.
##
########################################################################################################

@njit
def dP_Ap(tau_R, tau_Ap, t_gate, p_Ap, dtau_Ap, dp_Ap):
    dPApdtauAp = p_Ap*((t_gate*(-tau_Ap - tau_R*(1 - np.exp(-t_gate/tau_R)))*np.exp(-t_gate/tau_Ap))/tau_Ap**2 + 1 - np.exp(-t_gate/tau_Ap))/(tau_Ap+tau_R) - p_Ap*(tau_Ap - (tau_Ap + tau_R*(1 - np.exp(-t_gate/tau_R)))*np.exp(-t_gate/tau_Ap))/(tau_Ap + tau_R)**2
    
    dPApdpAp = (tau_Ap - (tau_Ap + tau_R*(1 - np.exp(-t_gate/tau_R)))*np.exp(-t_gate/tau_Ap))/(tau_Ap + tau_R)
    
    dPAp =  np.sqrt((dPApdtauAp**2)*(dtau_Ap**2) + (dPApdpAp**2)*(dp_Ap**2))
    
    return dPAp


########################################################################################################
## P1(tau_R, tau_Ap, t_gate):
########################################################################################################
#
# P1(T<t)= ∫(1 - exp(-t/tauR))*exp(-t/tauAp)/tauAp from 0 to t, where t<t_gate
#
########################################################################################################
@njit
def P1(t, tau_R, tau_Ap, t_gate):
    p = -np.exp(-t/tau_Ap)*(tau_Ap + tau_R*(1 - np.exp(-t/tau_R)))/(tau_R + tau_Ap)
    return p

########################################################################################################
## EvalP1(t0, t1, tau_R, tau_Ap, t_gate):
########################################################################################################
#
# Cumulative Density Function of dp1dt 
# P1(T<t)= Beta^-1(tau_R, tau_Ap, t_gate) x  ∫(1 - exp(-t/tauR))*exp(-t/tauAp)/tauAp from 0 to t, where t<t_gate
#
########################################################################################################
@njit
def EvalP1(t0, t1, tau_R, tau_Ap, t_gate):
    return (P1(t1, tau_R, tau_Ap, t_gate) - P1(t0, tau_R, tau_Ap, t_gate))

########################################################################################################
## tAp( Q, tau, t_gate):
########################################################################################################
#
# Evaluate tAp between 
# P1(T<t)= Beta^-1(tau_R, tau_Ap, t_gate) x  ∫(1 - exp(-t/tauR))*exp(-t/tauAp)/tauAp from 0 to t, where t<t_gate
#
########################################################################################################
@njit
def tAp( Q, tau, t_gate):
    return t_gate/2 - tau*np.arccosh(((1-Q)*np.exp(t_gate/(2*tau)) + np.exp(-t_gate/(2*tau)))/2)


########################################################################################################
## dp1dQ(Q, tau, tau_R, tau_Ap, t_gate) 
########################################################################################################
##
## Returns discretised afterpulse p.d.f as a function of n.p.e 
## Note: as in Chmill et al. However, the two branches of solutions given by t and t_gate -t are summed together. This demonstrably yields a normalised probability distribution. 
##
## To understand what is going on here, it takes a little explaining.
## We exploit the fact that the cumulative distribution distribution F_Q(Q<q) = F(T<tQ(Q)), from which the derivation of the paper can be derived.
## 
## F_Q(Q<q) = F(T<tAp(Q))
## f_Q(q) = F^'(tAp(q)) x |dtAp/dQ|
## |dtAp/dQ| = |dQ/dtAp|^-1
## F^'(t) = fAp(t)
## f_Q(q) = fAp(tAp(q)) x |dQ/dtAp|^-1
## 
## We do not need to do all that complicated stuff. All we need to do is evaluate F(T<tAp(Q)) between the bins in the time domain, then divide through by dQ
## 
## This is easy. We evaluate the integral of  ∫(1 - exp(-t/tauR))*exp(-t/tauAp)/tauAp from 0 to t, (P1) between the bins given by Q +/- dQ/2, then divide through by beta and bin width.
## 
## 1. Calculate Q_max for afterpulse spectrum.
## 
## 2. Calculate beta for the afterpulse spectrum. 
## 
## 3. Select the indices that are within the range of 0 and Q_max
##
## 4. Create empty arrays for the pdfs for both branches (pdf_b0, pdf_b1) and the times we will be evaluating (t_m, t_p)
##
## 5. Find the bins in the t_Ap space by evaluating t_Ap_m = t_Ap(Q-dx/2), t_Ap_p = t_Ap(Q+dx/2)
## 
## 6. Integrate over the bins forwards in time using EvalP1 for pdf_b0
## 
## 7. Now to evaluate the edges. The problem here is that t_Ap_m and t_Ap_p may have elements below 0/above Q_max, which we cannot account for. 
##    We replace the first bin of the distribution with the integral from 0 to the lower bin edge.
## 
##    Note that Q[idx_Q_min+1]-dx/2 = Q[idx_Q_min] + dx/2. So, t_m[idx_Q_0+1] is the lower edge of the last bin.
##    The reason that we do it this way is because we do not have an 'upper' edge at t_p[idx_Q_0]
##    So the integral of the first bin is evaluated between 0 and t_m[idx_Q_0+1]
## 
##    You can check the integral is correct by uncommenting the code. 
## 
## 8. We replace the last bin of the distribution with the integral from the upper edge to tgate/2.
## 
##    Note that Q[idx_Q_max-1]+dx/2 = Q[idx_Q_max] - dx/2. So, t_p[idx_Q_0+1] is the lower edge of the last bin.
##    The reason that we do it this way is because we do not have an 'upper' edge at t_p[idx_Q_0]
##    So the integral of the first bin is evaluated between 0 and t_p[idx_Q_0+1]
## 
##    You can check the integral is correct by uncommenting the code. 
## 
## 9. Repeat 6-8 for pdf_b1. Note that because the time is reversed in this part, all the elements of evaluating the edges of the bin have to be reversed too!
## 
## 10. Normalise the integral to beta and dx
## 
##    IMPORTANT NOTE! 
##    As the granularity of the spectrum increases, so too do we encounter the problem where poles invariably occur.
##    numpy cannot accurately assess the value of np.log(1-epsilon()). This happens where tau is very small relative to t0 or tgate.
##
##    Unjit the function before printing!
########################################################################################################
@njit
def dp1dQ(Q, tau, tau_R, tau_Ap, t_gate, dx):
    

    #1. 
    Q_max = (1-np.exp(-t_gate/(2*tau)))**2
    
      
    if(Q_max>1-epsilon()):
        raise Exception("The afterpulse spectrum of this distribution cannot be normalised, because of the pole at Q_max, resulting in infinite probability density. This is probably because tau is very small, or tgate is very large compared to the binning of the spectrum. Try increasing tau, or reducing tgate")
  
    #2
    beta = Beta(tau_R, tau_Ap, t_gate)

    #3 
    idx_Q_max = np.argmax(Q>Q_max)-1
    idx_Q_min = np.argmax(Q>0)
    
    #4
    t_Ap_m = np.zeros_like(Q)
    t_Ap_p = np.zeros_like(Q)
    pdf_b0 = np.zeros_like(Q)
    pdf_b1 = np.zeros_like(Q)

    #5
    t_Ap_m[idx_Q_min:idx_Q_max]= tAp(Q[idx_Q_min:idx_Q_max]-dx/2, tau, t_gate)
    t_Ap_p[idx_Q_min:idx_Q_max]= tAp(Q[idx_Q_min:idx_Q_max]+dx/2, tau, t_gate)

    #6
    pdf_b0[idx_Q_min:idx_Q_max] = EvalP1(t_Ap_m[idx_Q_min:idx_Q_max], 
                                   t_Ap_p[idx_Q_min:idx_Q_max], 
                                   tau_R, 
                                   tau_Ap, t_gate)

    #7.
    pdf_b0[idx_Q_min] = EvalP1(0, t_Ap_m[idx_Q_min+1], tau_R, tau_Ap, t_gate)
    
    #8.
    pdf_b0[idx_Q_max] = EvalP1(t_Ap_p[idx_Q_max-1], t_gate/2, tau_R, tau_Ap, t_gate)
    
    #########################################
    ## Check integral of pdfb0:
    #########################################
    
#     print("CHECK INTEGRAL pdfb0 ", 
#           np.trapz(pdf_b0/beta/dx, dx=dx),
#           EvalP1(0, t_gate/2, tau_R, tau_Ap, t_gate)/beta,
#           np.allclose(np.trapz(pdf_b0/beta/dx, dx=dx), 
#                       EvalP1(0, t_gate/2, tau_R, tau_Ap, t_gate)/beta))
    
    
    #########################################
    
    
    #9.
    pdf_b1[idx_Q_min:idx_Q_max] = EvalP1(t_gate-t_Ap_p[idx_Q_min:idx_Q_max], 
                                   t_gate-t_Ap_m[idx_Q_min:idx_Q_max], 
                                   tau_R, 
                                   tau_Ap, t_gate)



    pdf_b1[idx_Q_min] = EvalP1(t_gate-t_Ap_m[idx_Q_min+1], t_gate, tau_R, tau_Ap, t_gate)
    pdf_b1[idx_Q_max] = EvalP1(t_gate/2, t_gate-t_Ap_p[idx_Q_max-1], tau_R, tau_Ap, t_gate)
    
    #########################################
    ## Check integral of pdfb1:
    #########################################
        
#     print("CHECK INTEGRAL pdfb0 ", 
#           np.trapz(pdf_b1/beta/dx, dx=dx),
#           EvalP1(t_gate/2, t_gate, tau_R, tau_Ap, t_gate)/beta,
#           np.allclose(np.trapz(pdf_b1/beta/dx, dx=dx), 
#                       EvalP1(t_gate/2, t_gate, tau_R, tau_Ap, t_gate)/beta))

    #########################################
    #10
    pdf = (pdf_b0 + pdf_b1)/beta/dx
    
    
    ########################################
    # Check integral of pdf:
    ########################################
    
#     print("CHECK INTEGRAL pdf ", np.trapz(pdf, dx=dx), 1, np.allclose(np.trapz(pdf, dx=dx), 1))

#     plt.figure()
#     plt.plot(Q, pdf)
#     plt.xlim(0-5*dx, Q_max+5*dx)
#     plt.yscale("log")
#     plt.show()
    
    ########################################

    return pdf













########################################################################################################
## DARK COUNT FUNCTIONS
########################################################################################################




########################################################################################################
## EvalDCRBFG(Q_0, Q_1, h_order)
########################################################################################################
##
## Evaluates integral ∫1/(1-(Q/h_order)) between Q0 and Q1. 
## h_order means the 'stretching factor' of the distribution for cross-talk
########################################################################################################

@njit
def EvalDCRBFG(Q_0, Q_1, h_order):
    return h_order*np.log(Q_1) - h_order*np.log(Q_0)

########################################################################################################
## EvalDCRDG(Q_0, Q_1, h_order)
########################################################################################################
##
## Evaluates integral ∫1/(Q/h_order) between Q0 and Q1. 
## h_order means the 'stretching factor' of the distribution for cross-talk
########################################################################################################

@njit
def EvalDCRDG(Q_0, Q_1, h_order):
    return -h_order*np.log(h_order-Q_1) + h_order*np.log(h_order-Q_0)



########################################################################################################
## dpDCR1dQ(Q, tau, t_gate, t_0, dx, h_order=1)
########################################################################################################
##
## Evaluates discretised dark distribution at cross talk order of h_order, which stretches the distribution. 
## h_order=1 means the primary dark count distribution  
##
## The distribution has to be artificially discretised and normalised. 
## There are poles of this distribution, so we have to be extremely careful to normalise the edges of the distribution
## The method is explained here. 
##
## dpDCR1dQ(Q, tau, tgate, t0) = (tau/(t_gate + t_0))(1/Q + 1/(1-Q))
## 
## 1. Evaluate the maxima and minima Q_min, and Q_max. We cannot take Q/h_order, so for the integral 
##    to be properly normalised, we scale the limits by the stretching factor, h_order.
## 
## 2. Evaluate the indices of the last values of Q in the spectrum before Q_min, Q_max and 0. 
##    Q_min to Q_max are the limits before the gate, 0 to Q_max are the limits after the gate. 
## 
## 3. Create empty zero arrays for the pdfs. pdf_bg is the pdf before the gate, and pdf_dg is the pdf during the gate.
## 
## 4. For all the available indices between the limits, evaluate the integrals of EvalDCRBFG, between each bin edge.
##    This will become our probability after scaling. 
## 
## 5. Find the integral from Q_min to the lower bin edge of the range that we have evaluated, 
##    and set it to the first available bin index.
##
##    Note that Q[idx_Q_0+1]-dx/2 = Q[idx_Q_0] + dx/2. The reason that we do it this way is 
##    for consistency with the dp1dQ method, which has special considerations (see dp1dQ function)
##    
## 
## 6. Find the integral from the upper bin edge that we have evaluated to Q_max,
##    and set it to the last available bin index.
##   
##    Note that Q[idx_Q_0-1]+dx/2 = Q[idx_Q_0] - dx/2. The reason that we do it this way is 
##    for consistency with the dp1dQ method, which has special considerations (see dp1dQ function)
##    
##    You can check the function is working at this stage by uncommenting the integral check. The integral should be t_0/tau
## 
## 7. Repeat steps 4-6 for EvalDCRDG, this time using 0 as the lower limit. Everything stated in 4-6 still applies.
##
##    You can check the function is working at this stage by uncommenting the integral check. The integral should be t_gate/tau
## 
## 8. We obtain the probability per bin by multiplying by the normalisation constant tau/(t_0 + t_gate).
##    
##    We also need to divide through by h_order, because the integral is h_order times as large if we scale 
## 
##    Then we divide through by dx to get the discretised probability distribution.
## 
##    You can check the function is working at this stage by uncommenting the integral check. The integral should be 1.0
##   
## 9. We divide through by the integral to correct for numerical error.
##    
##    IMPORTANT NOTE! 
##    As the granularity of the spectrum increases, so too do we encounter the problem where poles invariably occur.
##    numpy cannot accurately assess the value of np.log(1-epsilon()). This happens where tau is very small relative to t0 or tgate.
##    
########################################################################################################

@njit
def dpDCR1dQ(Q, tau, t_gate, t_0, dx, h_order=1):
    
    #1. 
    
    
    Q_min = h_order*np.exp(-t_0/tau)*(1 - np.exp(-t_gate/tau))
    Q_max = h_order* (1 - np.exp(-t_gate/tau))
    

    if(Q_min<h_order*epsilon()):
        raise Exception("The dark count spectrum of this distribution cannot be normalised, because of the pole at Q_min, resulting in infinite probability density. This is probably because tau is very small, or tgate/t0 is very large compared to the binning of the spectrum. Try increasing tau, or reducing tgate/t0")
    

    
    if(Q_max>h_order-h_order*epsilon()):
        raise Exception("The dark count spectrum of this distribution cannot be normalised, because of the pole at Q_max, resulting in infinite probability density. This is probably because tau is very small, or tgate/t0 is very large compared to the binning of the spectrum")
    

    #2.
    
    idx_Q_min = np.argmax(Q>Q_min)
    idx_Q_max = np.argmax(Q>Q_max)-1
    idx_Q_0 = np.argmax(Q>0)
    
    
    #3.
    pdf_bfg = np.zeros_like(Q)
    pdf_dg = np.zeros_like(Q)

    #4.
    pdf_bfg[idx_Q_min:idx_Q_max] = EvalDCRBFG(Q[idx_Q_min:idx_Q_max]-dx/2, 
                                              Q[idx_Q_min:idx_Q_max]+dx/2, h_order)
    
    #5.
    pdf_bfg[idx_Q_min]= EvalDCRBFG(Q_min, Q[idx_Q_min+1]-dx/2, h_order)
    
    #6.
    pdf_bfg[idx_Q_max]= EvalDCRBFG(Q[idx_Q_max-1]+dx/2, Q_max, h_order)
    
    #########################################
    ## Check integral of pdf_bfg:
    #########################################
    
    #print("CHECK INTEGRAL pdf_bfg ", np.trapz(pdf_bfg/dx, dx=dx), t_0/tau, np.allclose(np.trapz(pdf_bfg/dx, dx=dx), t_0/tau))
    
    #########################################

    #7.
    pdf_dg[idx_Q_0:idx_Q_max] = EvalDCRDG(Q[idx_Q_0:idx_Q_max]-dx/2, 
                                            Q[idx_Q_0:idx_Q_max]+dx/2, h_order)
    
    pdf_dg[idx_Q_0]= EvalDCRDG(0, Q[idx_Q_0+1]-dx/2, h_order)
    pdf_dg[idx_Q_max]= EvalDCRDG(Q[idx_Q_max-1]+dx/2, Q_max, h_order)
    
    
    #########################################
    ## Check integral of pdf_dg:
    #########################################
    
    #print("CHECK INTEGRAL pdf_dg ", np.trapz(pdf_dg/dx, dx=dx), t_gate/tau, np.allclose(np.trapz(pdf_dg/dx, dx=dx), t_gate/tau))
    
    #########################################
    
    
    pdf = (pdf_bfg + pdf_dg)*(tau/(t_gate + t_0))
    pdf = pdf/dx/h_order
    

    #8.
    
    #########################################
    ## Check integral of pdf:
    #########################################
    
#     print("CHECK INTEGRAL pdf ", np.trapz(pdf, dx=dx), 1, np.allclose(np.trapz(pdf, dx=dx), 1))

#     plt.figure()
#     plt.plot(Q, pdf)
#     plt.xlim(0-5*dx, Q_max+5*dx)
#     plt.yscale("log")
#     plt.show()
    
    #########################################
    
    
    pdf = pdf/max(epsilon(), np.trapz(pdf, dx=dx))
    
    
    return pdf





########################################################################################################
## DRM(Q, Q_0, G, mu, lbda, sigma_0, sigma_1, tau_Ap, p_Ap, DCR, tau, tau_R, t_gate, t_0, k_max, len_pad) 
########################################################################################################
##
## Calculates full detector response model. 
## Notes: 
## - The model is fitted in n.p.e scale. k = (Q - Q_0)/G. 
## - To rescale to the input parameter unit, the final pdf is scaled by a factor of 1/G -
##   this is because dp/dQ = (dp/dk)*(dk/dQ),  dk/dQ = d/dQ (Q - Q_0)/G = 1/G
##
########################################################################################################

def DRM(Q, Q_0, G, mu, lbda, sigma_0, sigma_1, tau_Ap, p_Ap, DCR, tau, tau_R, t_gate, t_0, k_max, k_max_dcr, len_pad, A):
    

    t_0 = abs(t_0)
    len_pad = int(len_pad)
    

    
    len_Q = len(Q)
    len_Q_pad = len_Q + 2*len_pad
    Q_idx = np.arange(0, len_Q)
    Q_idx_pad =  np.arange(-len_pad, len_Q+len_pad)
    dx = abs(Q[1]-Q[0])
    
    Q_pad = (Q[0] - dx*len_pad) + dx*np.arange(len_Q_pad)
    
    k_pad = (Q_pad - Q_0)/G
    
    dx_k = dx/G
    
    idx_k_0_pad = np.argmin(abs(k_pad))
    


    Q_idx_orig = np.where(np.in1d(Q_idx_pad,
                                    np.intersect1d(
                                    Q_idx_pad,
                                    Q_idx
                                )
                        ), True, False)

        
    k_pg = np.arange(0, max(k_max, 3)).astype(int)
    
     
            
    
    Ppgs = np.expand_dims(gpoisson.pmf(k_pg, mu, lbda), -1)   
    
    Pns = np.zeros((int(k_max), int(k_max)))
    
    alpha = P_Ap(tau_R, tau_Ap, t_gate, p_Ap)
    
    dp0dQs = np.vstack([norm.pdf(k_pad, loc=_k, scale=sigmak(_k, sigma_0/G, sigma_1/G)) for _k in k_pg])
    
    dpndQs = []
        

    for _k in k_pg:
        if(_k==0):
            _dpndQ = np.zeros(len(k_pad))
            Pns[0,0] = binom.pmf(0, 0, alpha)
        elif(_k==1):
            _dpndQ = dp1dQ(k_pad, tau, tau_R, tau_Ap, t_gate, dx_k)
            
            
            Pns[1,0:2] = np.array(binom.pmf(np.arange(0, 2).astype(int), _k, alpha))
        else:
            _dpndQ = Convolve(dpndQs[1], dpndQs[-1], idx_k_0_pad, len_Q_pad, dx_k, norm=True)
            Pns[_k,0:_k+1] = np.array(binom.pmf(np.arange(0, _k+1).astype(int), _k, alpha))

        dpndQs.append(_dpndQ)
            
    dpndQs = np.vstack(dpndQs)
    
    
    
    
    mu_DCR = DCR*(t_gate + t_0)    
    k_dc = np.arange(0, max(k_max_dcr, 3)).astype(int)

    fDCRs =  []
    hDCRs = []

    pdf_dark = np.zeros(len_Q_pad)

    for _k in k_dc:
        if(_k==0):
            f = np.zeros(len_Q_pad)
            f[idx_k_0_pad] = 1/dx_k
            h=f

        elif(_k==1):
            f = dpDCR1dQ(k_pad, tau, t_gate, t_0, dx_k,  h_order=1)
            h = dpDCR1dQ(k_pad, tau, t_gate, t_0, dx_k,  h_order=2)
            
            
        else:
            f = Convolve(fDCRs[1], fDCRs[-1], idx_k_0_pad, len_Q_pad, dx_k, norm=True)
            h = dpDCR1dQ(k_pad, tau, t_gate, t_0, dx_k, h_order=_k+1)


        fDCRs.append(f)
        hDCRs.append(h)

    fDCRs = np.vstack(fDCRs)
    hDCRs = np.vstack(hDCRs)
    
    
    pDCRs_0 = poisson.pmf(0, mu_DCR)
    pDCRs = []
    dpDCRdQs = []
    
    for k in k_dc:

        if(k==0):
            continue
            
        else:

            _pDCR_k = []
            _dpDCRdQ_k = []
            
            for j in range(1, k+1):


#                 combs = np.array(list(product(range(k-j+1), repeat=j)))
#                 combs = combs[np.sum(combs, axis=1)==k-j]
                combs = Multichoose(j, k-j)

                for comb in combs:



                    _dpDCRdQ_jks = []
                    _lp_B_jks = []


                    for _v, _f in zip(*np.unique(comb, return_counts=True)):
                        if(_v==0):
                            _dpDCRdQ_jk = [fDCRs[_f]]
                            if(_f>1):
                                _lp_B_jk = [_f*borel.logpmf(0, lbda)]
                            else:
                                _lp_B_jk = [borel.logpmf(0, lbda)]

                        else:
                            _dpDCRdQ_jk = [hDCRs[_v] for _ in range(_f)]
                            if(_f>1):
                                _lp_B_jk = [_f*borel.logpmf(_v, lbda)]
                            else:
                                _lp_B_jk = [borel.logpmf(_v, lbda)]

                        _dpDCRdQ_jks.extend(_dpDCRdQ_jk)
                        _lp_B_jks.extend(_lp_B_jk)

                    __pDCR_k = np.exp(np.sum(_lp_B_jks) + poisson.logpmf(j, mu_DCR))
                    __dpDCRdQ_k = ConvolveList(_dpDCRdQ_jks, idx_k_0_pad, len_Q_pad, dx_k, norm=True)
                    
                    _pDCR_k.append(__pDCR_k)
                    _dpDCRdQ_k.append(__dpDCRdQ_k)
            
            pDCRs.extend(_pDCR_k)
            dpDCRdQs.extend(_dpDCRdQ_k)
            
    pDCRs = np.vstack(pDCRs)
    dpDCRdQs =  np.vstack(dpDCRdQs)
    
  
    pdf_dark = np.sum(pDCRs*dpDCRdQs, axis=0)
    pdf_dark = pdf_dark/max(epsilon(), np.trapz(pdf_dark, dx=dx_k)) 
    ## normalised by factor 1/(1 - pDCRs_0), and then rescaled to original unit after convolution 
    

    
    pdf_drm = np.zeros(len_Q_pad)
  
    for _k in k_pg:

        _dp0dQ = float(Pns[_k, 0])*dp0dQs[_k, :]
        
        if(_k>0):
            
            
            _dpndQ_sum = np.sum(np.vstack([
                                    float(Pns[_k, _j])*Convolve(dpndQs[_j, :],
                                                             dp0dQs[_k, :],
                                                             idx_k_0_pad,
                                                             len_Q_pad,
                                                             dx_k,
                                                             norm=True   
                                                            )
                                    for _j in range(1, _k+1)
                                ]),
                               axis=0)

        else:
            _dpndQ_sum = np.zeros(len_Q_pad)
            
        _dp0dQ = np.nan_to_num(_dp0dQ, nan=0.0, posinf=0.0, neginf=0.0)
        _dpndQ_sum = np.nan_to_num(_dpndQ_sum, nan=0.0, posinf=0.0, neginf=0.0)
            
  
        _pdf_light_k = (_dp0dQ + _dpndQ_sum)
        _pdf_drm_k = pDCRs_0*_pdf_light_k + (1-pDCRs_0)*Convolve(_pdf_light_k, pdf_dark, idx_k_0_pad, len_Q_pad, dx_k, norm=True)
        _pdf_drm_k = _pdf_drm_k/max(epsilon(), np.trapz(_pdf_drm_k, dx=dx_k))
        
        _pdf_drm_k = float(Ppgs[_k])*_pdf_drm_k
        pdf_drm += _pdf_drm_k
        
        
    
    pdf_drm = pdf_drm[Q_idx_orig]/G
    count_drm = A*pdf_drm
    

        
    return count_drm