From efddabc8d4c317e1752804886fa0564d9dae904f Mon Sep 17 00:00:00 2001 From: albs10101 Date: Wed, 9 Aug 2023 00:36:41 +0800 Subject: [PATCH] July 12th GN CA files - Added July 12th GN CA code - Replaced AnalyzePDE, ProcessWaveform, Runinfo for my analysis code --- .DS_Store | Bin 0 -> 6148 bytes AnalyzePDE.py | 873 +++++---------- Analyze_July_11_dark_405nm_CA.py | 241 +++++ ProcessWaveforms_MultiGaussian.py | 1649 ++++++++++++----------------- RunInfo.py | 715 ++++++------- 5 files changed, 1475 insertions(+), 2003 deletions(-) create mode 100644 .DS_Store create mode 100644 Analyze_July_11_dark_405nm_CA.py diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..87c253ffafba243b16f1eb65fd8fe04f15de1afb GIT binary patch literal 6148 zcmeHK!Ab)$5Pi`eO1%^h;z{-gY^|c_wYI3BA{IT{)hY$umUfHuwx8~sNh;R0hzL?; z24-F|JCp1@HpvEnYfkPPKovllDp;z}d?NB(v>+?NQzAMYV}cQSc&4U>Xmc0?#=x>M zAZvFE=UlN}jPZW`W;o?4e!u_`8rVe*dun%ZK+hp_Y8YZV%#yRoI2&=CBXlt3D4Fg7 zb=)xbiDSxsFh(6M&XG~;xMsh{y+XWQ#w6Oy@WNh-M~xleF65@nlE`E9**c*&VXuQr zw6V=MNp%dFxvz0QujnFFwEhJ%$^a|hz!l#rUBKA>A7Jde70ei_e5icJ$g+Ls_C7@h zcxH=~*F2gv28;n?;Ee&P3pW&}vvd4N!{K6&W{m-3V4i_h->u5|-(GzGpHH%$ zF<=b*D+XMt)oC?ZQaD=+#mQM4QE#at5|?|_6q;}xYeSCWO{xl?MbaR~0dtS^Q0zxQ LX)t39{3-*ln|XJ* literal 0 HcmV?d00001 diff --git a/AnalyzePDE.py b/AnalyzePDE.py index 6e5b342..2a436e5 100644 --- a/AnalyzePDE.py +++ b/AnalyzePDE.py @@ -8,56 +8,21 @@ import numpy as np import lmfit as lm import matplotlib.pyplot as plt - # import GainCalibration_2022 as GainCalibration import pandas as pd -from ProcessWaveforms_MultiGaussian import WaveformProcessor - def CA_func(x, A, B): - return (A * np.exp(B * x) + 1.0) / (1.0 + A) - 1.0 - + return (A * np.exp(B * x) + 1.0) / (1.0 + A) - 1.0 class SPE_data: - def __init__( - self, - campaign: list[WaveformProcessor], - invC: float, - invC_err: float, - filtered: bool, - ) -> None: - """ - Initialize the SPE_data class. This class is used for collecting all the WaveformProcessor results for - an entire campaign into one object. Can plot absolute gain and Correlated Avalance (CA) as a - function of bias voltage. - - Parameters: - campaign (List[WaveformProcessor]): The campaign data. - invC (float): The inverse of the capacitance. - invC_err (float): The error in the inverse of the capacitance. - filtered (bool): A flag indicating whether the data is filtered. - - Returns: - None - """ + def __init__(self, campaign, invC, invC_err, filtered): self.campaign = campaign self.invC = invC self.invC_err = invC_err self.filtered = filtered self.analyze_spe() - - def analyze_spe(self) -> None: - """ - Analyze the single photoelectron (SPE) data. - - This method unpacks the SPE amplitudes, bias voltages, and CA calculation from the - WaveformProcessor objects. It then performs a linear fit on the SPE values as a - function of bias voltage, and determines the breakdown voltage. It also calculates - and populates the absolute gain and overvoltage attributes. - - Returns: - None - """ + + def analyze_spe(self): self.bias_vals = [] self.bias_err = [] self.spe_vals = [] @@ -68,414 +33,273 @@ def analyze_spe(self) -> None: self.CA_err = [] self.CA_rms_vals = [] self.CA_rms_err = [] + self.avg = [] + for x in self.campaign: + self.avg.append(x.A_avg) + print(self.avg) for wp in self.campaign: self.bias_vals.append(wp.info.bias) - self.bias_err.append(0.0025 * wp.info.bias + 0.015) # error from keysight + self.bias_err.append(0.0025*wp.info.bias + 0.015) # error from keysight curr_spe = wp.get_spe() self.spe_vals.append(curr_spe[0]) self.spe_err.append(curr_spe[1]) - - self.bias_vals = np.array(self.bias_vals) + + self.bias_vals = np.array(self.bias_vals) self.bias_err = np.array(self.bias_err) - self.spe_vals = np.array(self.spe_vals) + self.spe_vals = np.array(self.spe_vals) self.spe_err = np.array(self.spe_err) - self.absolute_spe_vals = self.spe_vals / (self.invC * 1.60217662e-7) + self.absolute_spe_vals = self.spe_vals / (self.invC * 1.60217662E-7) self.absolute_spe_err = self.absolute_spe_vals * np.sqrt( - (self.spe_err * self.spe_err) / (self.spe_vals * self.spe_vals) - + (self.invC_err * self.invC_err) / (self.invC * self.invC) - ) + (self.spe_err * self.spe_err) / (self.spe_vals * self.spe_vals) + + (self.invC_err * self.invC_err) / (self.invC * self.invC)) spe_wgts = [1.0 / curr_std for curr_std in self.spe_err] absolute_spe_wgts = [1.0 / curr_std for curr_std in self.absolute_spe_err] + model = lm.models.LinearModel() params = model.make_params() - self.spe_res = model.fit( - self.spe_vals, params=params, x=self.bias_vals, weights=spe_wgts - ) - self.absolute_spe_res = model.fit( - self.absolute_spe_vals, - params=params, - x=self.bias_vals, - weights=absolute_spe_wgts, - ) # linear fit - b_spe = self.spe_res.params["intercept"].value - m_spe = self.spe_res.params["slope"].value + self.spe_res = model.fit(self.spe_vals, params=params, x=self.bias_vals, weights=spe_wgts) + self.absolute_spe_res = model.fit(self.absolute_spe_vals, params=params, x=self.bias_vals, weights=absolute_spe_wgts) # linear fit + b_spe = self.spe_res.params['intercept'].value + m_spe = self.spe_res.params['slope'].value self.v_bd = -b_spe / m_spe - vec_spe = np.array([b_spe / (m_spe * m_spe), -1.0 / m_spe]) - # print('check ' + str(self.bias_vals)) - self.v_bd_err = np.sqrt( - np.matmul( - np.reshape(vec_spe, (1, 2)), - np.matmul(self.spe_res.covar, np.reshape(vec_spe, (2, 1))), - )[0, 0] - ) # breakdown error calculation using covariance matrix - + vec_spe = np.array([b_spe / (m_spe * m_spe), -1.0/m_spe]) + print('check ' + str(self.bias_vals)) + self.v_bd_err = np.sqrt(np.matmul(np.reshape(vec_spe, (1, 2)), np.matmul(self.spe_res.covar, np.reshape(vec_spe, (2, 1))))[0, 0]) # breakdown error calculation using covariance matrix + self.ov = [] self.ov_err = [] - + for b, db in zip(self.bias_vals, self.bias_err): curr_ov = b - self.v_bd curr_ov_err = np.sqrt(db * db + self.v_bd_err * self.v_bd_err) self.ov.append(curr_ov) self.ov_err.append(curr_ov_err) - + self.ov = np.array(self.ov) print(self.ov) self.ov_err = np.array(self.ov_err) - for wp, curr_bias, curr_bias_err in zip( - self.campaign, self.bias_vals, self.bias_err - ): + for wp, curr_bias, curr_bias_err in zip(self.campaign, self.bias_vals, self.bias_err): curr_spe = self.spe_res.eval(params=self.spe_res.params, x=curr_bias) - curr_spe_err = self.spe_res.eval_uncertainty( - x=curr_bias, params=self.spe_res.params, sigma=1 - )[0] + curr_spe_err = self.spe_res.eval_uncertainty(x = curr_bias, params = self.spe_res.params, sigma = 1)[0] curr_CA_val, curr_CA_err = wp.get_CA_spe(curr_spe, curr_spe_err) curr_CA_rms_val, curr_CA_rms_err = wp.get_CA_rms(curr_spe, curr_spe_err) self.CA_vals.append(curr_CA_val) self.CA_err.append(curr_CA_err) self.CA_rms_vals.append(curr_CA_rms_val) self.CA_rms_err.append(curr_CA_rms_err) - + self.CA_vals = np.array(self.CA_vals) self.CA_err = np.array(self.CA_err) - + + # self.CA_vals = (self.CA_vals + 1)/2 - 1 + + self.CA_rms_vals = np.array(self.CA_rms_vals) self.CA_rms_err = np.array(self.CA_rms_err) - # i am stinky + #i am stinky CA_model = lm.Model(CA_func) - CA_params = CA_model.make_params(A=1, B=0.1) + CA_params = CA_model.make_params(A = 1, B = .1) CA_wgts = [1.0 / curr_std for curr_std in self.CA_err] - self.CA_res = CA_model.fit( - self.CA_vals, params=CA_params, x=self.ov, weights=CA_wgts - ) - - def get_CA_ov(self, input_ov_vals: np.ndarray) -> tuple[np.ndarray, np.ndarray]: - """ - Evaluate the CA values and their uncertainties at the given overvoltage values. - - Parameters: - input_ov_vals (np.ndarray): The input overvoltage values. + self.CA_res = CA_model.fit(self.CA_vals, params=CA_params, x=self.ov, weights=CA_wgts) - Returns: - Tuple[np.ndarray, np.ndarray]: A tuple containing the evaluated CA values and their uncertainties. - """ - out_vals = self.CA_res.eval(params=self.CA_res.params, x=input_ov_vals) - out_err = self.CA_res.eval_uncertainty(x=input_ov_vals, sigma=1) + def get_CA_ov(self, input_ov_vals): + out_vals = self.CA_res.eval(params = self.CA_res.params, x = input_ov_vals) + out_err = self.CA_res.eval_uncertainty(x = input_ov_vals, sigma = 1) return out_vals, out_err - - def get_spe_ov(self, input_ov_vals: np.ndarray) -> tuple[np.ndarray, np.ndarray]: - """ - Evaluate the SPE values and their uncertainties at the given overvoltage values. - - This method first calculates the bias values by adding the breakdown voltage to the input - overvoltage values. It then evaluates the SPE values and their uncertainties at these bias values. - - Parameters: - input_ov_vals (np.ndarray): The input overvoltage values. - - Returns: - Tuple[np.ndarray, np.ndarray]: A tuple containing the evaluated SPE values and their uncertainties. - """ + + def get_spe_ov(self, input_ov_vals): input_bias_vals = input_ov_vals + self.v_bd - out_vals = self.spe_res.eval(params=self.spe_res.params, x=input_bias_vals) - out_err = self.spe_res.eval_uncertainty(x=input_bias_vals, sigma=1) + out_vals = self.spe_res.eval(params = self.spe_res.params, x = input_bias_vals) + out_err = self.spe_res.eval_uncertainty(x = input_bias_vals, sigma = 1) return out_vals, out_err - - def plot_spe( - self, - in_ov: bool = False, - absolute: bool = False, - color: str = "blue", - out_file: str = None, - ) -> None: - """ - Plot the single photoelectron (SPE) data. - - This method creates a plot of the SPE data. It can plot either the absolute gain or the SPE amplitude, - and it can plot the data as a function of either the bias voltage or the overvoltage. The plot includes - the best fit line and its uncertainty, the data points with their errors, and a text box with additional - information about the data and the fit. The plot can be saved to a file. - - Parameters: - in_ov (bool, optional): If True, plot the data as a function of the overvoltage. If False, plot the data as a function of the bias voltage. Default is False. - absolute (bool, optional): If True, plot the absolute gain. If False, plot the SPE amplitude. Default is False. - color (str, optional): The color to use for the text box. Default is 'blue'. - out_file (str, optional): The name of the file to save the plot to. If None, the plot is not saved to a file. Default is None. - - Returns: - None - """ - - color = "tab:" + color + + def plot_spe(self, in_ov = False, absolute = False, color = 'blue', out_file = None): + color = 'tab:' + color fig = plt.figure() - + start_bias = self.v_bd - end_bias = np.amax(self.bias_vals) + 1.0 + end_bias = np.amax(self.bias_vals) + 1.0 fit_bias = np.linspace(start_bias, end_bias, 20) - + if absolute: - fit_y = self.absolute_spe_res.eval( - params=self.absolute_spe_res.params, x=fit_bias - ) - fit_y_err = self.absolute_spe_res.eval_uncertainty( - x=fit_bias, params=self.absolute_spe_res.params, sigma=1 - ) - fit_label = "Absolute Gain Best Fit" - data_label = "Absolute Gain values" - y_label = "Absolute Gain" + fit_y = self.absolute_spe_res.eval(params=self.absolute_spe_res.params, x = fit_bias) + fit_y_err = self.absolute_spe_res.eval_uncertainty(x = fit_bias, params = self.absolute_spe_res.params, sigma = 1) + fit_label = 'Absolute Gain Best Fit' + data_label = 'Absolute Gain values' + y_label = 'Absolute Gain' data_y = self.absolute_spe_vals data_y_err = self.absolute_spe_err chi_sqr = self.absolute_spe_res.redchi - slope_text = rf"""Slope: {self.absolute_spe_res.params['slope'].value:0.4} $\pm$ {self.absolute_spe_res.params['slope'].stderr:0.2} [1/V]""" - intercept_text = rf"""Intercept: {self.absolute_spe_res.params['intercept'].value:0.4} $\pm$ {self.absolute_spe_res.params['intercept'].stderr:0.2} [V]""" - plt.ticklabel_format(style="sci", axis="y", scilimits=(0, 0)) + slope_text = rf'''Slope: {self.absolute_spe_res.params['slope'].value:0.4} $\pm$ {self.absolute_spe_res.params['slope'].stderr:0.2} [1/V]''' + intercept_text = rf'''Intercept: {self.absolute_spe_res.params['intercept'].value:0.4} $\pm$ {self.absolute_spe_res.params['intercept'].stderr:0.2} [V]''' + plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) else: - fit_y = self.spe_res.eval(params=self.spe_res.params, x=fit_bias) - fit_y_err = self.spe_res.eval_uncertainty( - x=fit_bias, params=self.spe_res.params, sigma=1 - ) - fit_label = "SPE Amplitude Best Fit" - data_label = "SPE Amplitude values" - y_label = "SPE Amplitude [V]" + fit_y = self.spe_res.eval(params=self.spe_res.params, x = fit_bias) + fit_y_err = self.spe_res.eval_uncertainty(x = fit_bias, params = self.spe_res.params, sigma = 1) + fit_label = 'SPE Amplitude Best Fit' + data_label = 'SPE Amplitude values' + y_label = 'SPE Amplitude [V]' data_y = self.spe_vals data_y_err = self.spe_err chi_sqr = self.spe_res.redchi - slope_text = rf"""Slope: {self.spe_res.params['slope'].value:0.4} $\pm$ {self.spe_res.params['slope'].stderr:0.2} [V/V]""" - intercept_text = rf"""Intercept: {self.spe_res.params['intercept'].value:0.4} $\pm$ {self.spe_res.params['intercept'].stderr:0.2} [V]""" - + slope_text = rf'''Slope: {self.spe_res.params['slope'].value:0.4} $\pm$ {self.spe_res.params['slope'].stderr:0.2} [V/V]''' + intercept_text = rf'''Intercept: {self.spe_res.params['intercept'].value:0.4} $\pm$ {self.spe_res.params['intercept'].stderr:0.2} [V]''' + parameter_text = slope_text if in_ov: fit_x = np.linspace(start_bias - self.v_bd, end_bias - self.v_bd, 20) data_x = self.ov data_x_err = self.ov_err - x_label = "Overvoltage [V]" + x_label = 'Overvoltage [V]' else: fit_x = fit_bias data_x = self.bias_vals data_x_err = self.bias_err - x_label = "Bias Voltage [V]" - parameter_text += f"""\n""" + x_label = 'Bias Voltage [V]' + parameter_text += f'''\n''' parameter_text += intercept_text - - parameter_text += f"""\n""" - parameter_text += rf"""Reduced $\chi^2$: {chi_sqr:0.4}""" - parameter_text += f"""\n""" - plt.fill_between( - fit_x, fit_y + fit_y_err, fit_y - fit_y_err, color="red", alpha=0.5 - ) - plt.plot(fit_x, fit_y, color="red", label=fit_label) - plt.errorbar( - data_x, - data_y, - xerr=data_x_err, - yerr=data_y_err, - markersize=10, - fmt=".", - label=data_label, - ) - + + parameter_text += f'''\n''' + parameter_text += rf'''Reduced $\chi^2$: {chi_sqr:0.4}''' + parameter_text += f'''\n''' + if self.campaign[0].status == 0: + plt.fill_between(fit_x, fit_y + fit_y_err, fit_y - fit_y_err, color = 'red', alpha = .2) + plt.plot(fit_x, fit_y, color = 'red', label = 'All ' + fit_label) + plt.errorbar(data_x, data_y, xerr = data_x_err, yerr = data_y_err, markersize = 10, fmt = '.', label = data_label) #label = data_label + elif self.campaign[1].status == 1: + plt.fill_between(fit_x, fit_y + fit_y_err, fit_y - fit_y_err, color = 'orange', alpha = .2) + plt.plot(fit_x, fit_y, color = 'orange', label = 'LED-on ' + fit_label) + plt.errorbar(data_x, data_y, xerr = data_x_err, yerr = data_y_err, markersize = 10, fmt = '.') #label = data_label + else: + plt.fill_between(fit_x, fit_y + fit_y_err, fit_y - fit_y_err, color = 'green', alpha = .2) + plt.plot(fit_x, fit_y, color = 'green', label = 'LED-off ' + fit_label) + plt.errorbar(data_x, data_y, xerr = data_x_err, yerr = data_y_err, markersize = 10, fmt = '.') #label = data_label + plt.xlabel(x_label) plt.ylabel(y_label) plt.legend() - - textstr = f"Date: {self.campaign[0].info.date}\n" - textstr += f"Condition: {self.campaign[0].info.condition}\n" - textstr += f"RTD4: {self.campaign[0].info.temperature} [K]\n" + + textstr = f'Date: {self.campaign[0].info.date}\n' + textstr += f'Condition: {self.campaign[0].info.condition}\n' + textstr += f'RTD4: {self.campaign[0].info.temperature} [K]\n' if self.filtered: - textstr += f"Filtering: Lowpass, 400kHz\n" + textstr += f'Filtering: Lowpass, 400kHz\n' else: - textstr += f"Filtering: None\n" - textstr += f"--\n" + textstr += f'Filtering: None\n' + textstr += f'--\n' textstr += parameter_text - textstr += f"--\n" - textstr += rf"Breakdown Voltage: {self.v_bd:0.4} $\pm$ {self.v_bd_err:0.3} [V]" + textstr += f'--\n' + textstr += rf'Breakdown Voltage: {self.v_bd:0.4} $\pm$ {self.v_bd_err:0.3} [V]' - props = dict(boxstyle="round", facecolor=color, alpha=0.4) - fig.text(0.6, 0.45, textstr, fontsize=8, verticalalignment="top", bbox=props) + props = dict(boxstyle='round', facecolor=color, alpha=0.4) + fig.text(0.6, 0.45, textstr, fontsize=8, + verticalalignment='top', bbox=props) plt.tight_layout() - + if out_file: - data = { - x_label: data_x, - x_label + " error": data_x_err, - y_label: data_y, - y_label + " error": data_y_err, - } + data = {x_label: data_x, x_label + ' error': data_x_err, y_label: data_y, y_label + ' error': data_y_err} df = pd.DataFrame(data) df.to_csv(out_file) - def plot_CA(self, color: str = "blue", out_file: str = None) -> None: - """ - Plot the correlated avalanche (CA) as a function of overvoltage. - - This method creates a plot of the CA as a function of overvoltage. The plot includes - the best fit line and its uncertainty, the data points with their errors, and a text box - with additional information about the data and the fit. The plot can be saved to a file. - - Parameters: - color (str, optional): The color to use for the text box. Default is 'blue'. - out_file (str, optional): The name of the file to save the plot to. If None, the plot is not saved to a file. Default is None. - - Returns: - None - """ - color = "tab:" + color + def plot_CA(self, color = 'blue', out_file = None): + color = 'tab:' + color fig = plt.figure() - + data_x = self.ov data_x_err = self.ov_err data_y = self.CA_vals data_y_err = self.CA_err - - fit_x = np.linspace(0.0, np.amax(self.ov) + 1.0, num=100) - fit_y = self.CA_res.eval(params=self.CA_res.params, x=fit_x) - fit_y_err = self.CA_res.eval_uncertainty( - x=fit_x, params=self.CA_res.params, sigma=1 - ) - - plt.fill_between( - fit_x, fit_y + fit_y_err, fit_y - fit_y_err, color="deeppink", alpha=0.5 - ) - plt.plot( - fit_x, - fit_y, - color="deeppink", - label=r"$\frac{Ae^{B*V_{OV}}+1}{A + 1} - 1$ fit", - ) - plt.errorbar( - data_x, - data_y, - xerr=data_x_err, - yerr=data_y_err, - markersize=10, - fmt=".", - label=r"$\frac{1}{N}\sum_{i=1}^{N}{\frac{A_i}{\bar{A}_{1 PE}}-1}$", - ) - - x_label = "Overvoltage [V]" - y_label = "Number of CA [PE]" + + fit_x = np.linspace(0.0, np.amax(self.ov) + 1.0, num = 100) + fit_y = self.CA_res.eval(params=self.CA_res.params, x = fit_x) + fit_y_err = self.CA_res.eval_uncertainty(x = fit_x, params = self.CA_res.params, sigma = 1) + + # if self.campaign[0].status == 0: + plt.fill_between(fit_x, fit_y + fit_y_err, fit_y - fit_y_err, color = 'deeppink', alpha = .5) + plt.plot(fit_x, fit_y, color = 'deeppink', label = 'All') + plt.errorbar(data_x, data_y, xerr = data_x_err, yerr = data_y_err, markersize = 10, fmt = '.') #label = r'$\frac{1}{N}\sum_{i=1}^{N}{\frac{A_i}{\bar{A}_{1 PE}}-1}$' + # elif self.campaign[0].status == 1: + # plt.fill_between(fit_x, fit_y + fit_y_err, fit_y - fit_y_err, color = 'orange', alpha = .5) + # plt.plot(fit_x, fit_y, color = 'orange', label = 'LED off') + # plt.errorbar(data_x, data_y, xerr = data_x_err, yerr = data_y_err, markersize = 10, fmt = '.') #label = r'$\frac{1}{N}\sum_{i=1}^{N}{\frac{A_i}{\bar{A}_{1 PE}}-1}$' + # else: + # plt.fill_between(fit_x, fit_y + fit_y_err, fit_y - fit_y_err, color = 'green', alpha = .5) + # plt.plot(fit_x, fit_y, color = 'green', label = 'LED on') + # plt.errorbar(data_x, data_y, xerr = data_x_err, yerr = data_y_err, markersize = 10, fmt = '.') #label = r'$\frac{1}{N}\sum_{i=1}^{N}{\frac{A_i}{\bar{A}_{1 PE}}-1}$' + + x_label = 'Overvoltage [V]' + y_label = 'Number of CA [PE]' plt.xlabel(x_label) plt.ylabel(y_label) - plt.legend(loc="upper left") - textstr = f"Date: {self.campaign[0].info.date}\n" - textstr += f"Condition: {self.campaign[0].info.condition}\n" - textstr += f"RTD4: {self.campaign[0].info.temperature} [K]\n" + plt.legend(loc = 'upper left') + textstr = f'Date: {self.campaign[0].info.date}\n' + textstr += f'Condition: {self.campaign[0].info.condition}\n' + textstr += f'RTD4: {self.campaign[0].info.temperature} [K]\n' if self.filtered: - textstr += f"Filtering: Lowpass, 400kHz\n" + textstr += f'Filtering: Lowpass, 400kHz\n' else: - textstr += f"Filtering: None\n" - textstr += f"--\n" - textstr += f"""A: {self.CA_res.params['A'].value:0.3f} $\pm$ {self.CA_res.params['A'].stderr:0.3f}\n""" - textstr += f"""B: {self.CA_res.params['B'].value:0.2} $\pm$ {self.CA_res.params['B'].stderr:0.2}\n""" - textstr += rf"""Reduced $\chi^2$: {self.CA_res.redchi:0.4}""" - - props = dict(boxstyle="round", facecolor=color, alpha=0.4) - fig.text(0.15, 0.65, textstr, fontsize=8, verticalalignment="top", bbox=props) + textstr += f'Filtering: None\n' + textstr += f'--\n' + textstr += f'''A: {self.CA_res.params['A'].value:0.3f} $\pm$ {self.CA_res.params['A'].stderr:0.3f}\n''' + textstr += f'''B: {self.CA_res.params['B'].value:0.2} $\pm$ {self.CA_res.params['B'].stderr:0.2}\n''' + textstr += rf'''Reduced $\chi^2$: {self.CA_res.redchi:0.4}''' + + props = dict(boxstyle='round', facecolor=color, alpha=0.4) + fig.text(0.15, 0.65, textstr, fontsize=8, + verticalalignment='top', bbox=props) + plt.tight_layout() if out_file: - data = { - x_label: data_x, - x_label + " error": data_x_err, - y_label: data_y, - y_label + " error": data_y_err, - } + data = {x_label: data_x, x_label + ' error': data_x_err, y_label: data_y, y_label + ' error': data_y_err} df = pd.DataFrame(data) df.to_csv(out_file) + + # plt.figure() #plots average amplitude against overvoltage + # plt.plot(data_x,self.avg, '.') - def plot_CA_rms(self, color: str = "blue", out_file: str = None) -> None: - """ - Plot the root mean square (RMS) of the charge amplification (CA) as a function of overvoltage. - - This method creates a plot of the RMS of the CA as a function of overvoltage. The plot includes - the data points with their errors and a text box with additional information about the data. - The plot can be saved to a file. - - Parameters: - color (str, optional): The color to use for the text box. Default is 'blue'. - out_file (str, optional): The name of the file to save the plot to. If None, the plot is not saved to a file. Default is None. - - Returns: - None - """ - color = "tab:" + color + + def plot_CA_rms(self, color = 'blue', out_file = None): + color = 'tab:' + color fig = plt.figure() - + data_x = self.ov data_x_err = self.ov_err data_y = self.CA_rms_vals data_y_err = self.CA_rms_err - plt.errorbar( - data_x, - data_y, - xerr=data_x_err, - yerr=data_y_err, - markersize=10, - fmt=".", - label=r"$\sqrt{\frac{\sum_{i=1}^{N}\left(\frac{A_i}{\bar{A}_{1 PE}}-\left(\langle\Lambda\rangle+1\right)\right)^2}{N}}$", - ) - - x_label = "Overvoltage [V]" - y_label = "RMS CAs [PE]" + plt.errorbar(data_x, data_y, xerr = data_x_err, yerr = data_y_err, markersize = 10, fmt = '.', label = r'$\sqrt{\frac{\sum_{i=1}^{N}\left(\frac{A_i}{\bar{A}_{1 PE}}-\left(\langle\Lambda\rangle+1\right)\right)^2}{N}}$') + + x_label = 'Overvoltage [V]' + y_label = 'RMS CAs [PE]' plt.xlabel(x_label) plt.ylabel(y_label) - plt.legend(loc="upper left") - textstr = f"Date: {self.campaign[0].info.date}\n" - textstr += f"Condition: {self.campaign[0].info.condition}\n" - textstr += f"RTD4: {self.campaign[0].info.temperature} [K]\n" + plt.legend(loc = 'upper left') + textstr = f'Date: {self.campaign[0].info.date}\n' + textstr += f'Condition: {self.campaign[0].info.condition}\n' + textstr += f'RTD4: {self.campaign[0].info.temperature} [K]\n' if self.filtered: # textstr += f'Filtering: Lowpass, 400kHz\n' - textstr += f"Filtering: Bandpass [1E4, 1E6]\n" + textstr += f'Filtering: Bandpass [1E4, 1E6]\n' else: - textstr += f"Filtering: None\n" + textstr += f'Filtering: None\n' - props = dict(boxstyle="round", facecolor=color, alpha=0.4) - fig.text(0.15, 0.65, textstr, fontsize=8, verticalalignment="top", bbox=props) + props = dict(boxstyle='round', facecolor=color, alpha=0.4) + fig.text(0.15, 0.65, textstr, fontsize=8, + verticalalignment='top', bbox=props) plt.tight_layout() if out_file: - data = { - x_label: data_x, - x_label + " error": data_x_err, - y_label: data_y, - y_label + " error": data_y_err, - } + data = {x_label: data_x, x_label + ' error': data_x_err, y_label: data_y, y_label + ' error': data_y_err} df = pd.DataFrame(data) df.to_csv(out_file) - # I HAVE A SECRET TO TELL YOU! (it was reed who wrote that message and thwy are pinning it on me) - - # it worked. - class Alpha_data: - def __init__( - self, - campaign: list[WaveformProcessor], - invC: float, - invC_err: float, - spe_data: SPE_data, - v_bd: float, - v_bd_err: float, - ) -> None: - """ - Initialize the Alpha_data class. This class is used for collecting all the WaveformProcessor results for - an entire campaign into one object. Can then be used to determine PDE and the equivalent number of detected photons. - - Parameters: - campaign (List): The campaign data. - invC (float): The inverse of the capacitance. - invC_err (float): The error in the inverse of the capacitance. - spe_data (SPE_data): The single photoelectron data. - v_bd (float): The breakdown voltage. - v_bd_err (float): The error in the breakdown voltage. - - Returns: - None - """ + def __init__(self, campaign, invC, invC_err, spe_data, v_bd, v_bd_err): self.campaign = campaign self.invC = invC self.invC_err = invC_err @@ -483,350 +307,185 @@ def __init__( self.v_bd = v_bd self.v_bd_err = v_bd_err self.analyze_alpha() - - def analyze_alpha(self) -> None: - """ - Analyze the alpha data. - - This method calculates various parameters related to the alpha data such as bias values, - alpha values, overvoltage values, and the number of detected photons. - - Returns: - None - """ + + def analyze_alpha(self): self.bias_vals = [] self.bias_err = [] self.alpha_vals = [] self.alpha_err = [] - for wp in self.campaign: + for wp in self.campaign: self.bias_vals.append(wp.info.bias) self.bias_err.append(0.005) curr_alpha = wp.get_alpha() self.alpha_vals.append(curr_alpha[0]) self.alpha_err.append(curr_alpha[1]) - + self.bias_vals = np.array(self.bias_vals) self.bias_err = np.array(self.bias_err) self.alpha_vals = np.array(self.alpha_vals) self.alpha_err = np.array(self.alpha_err) - + self.ov = [] self.ov_err = [] - + for b, db in zip(self.bias_vals, self.bias_err): curr_ov = b - self.v_bd curr_ov_err = np.sqrt(db * db + self.v_bd_err * self.v_bd_err) self.ov.append(curr_ov) self.ov_err.append(curr_ov_err) - + self.ov = np.array(self.ov) self.ov_err = np.array(self.ov_err) - + self.CA_vals, self.CA_err = self.spe_data.get_CA_ov(self.ov) self.spe_vals, self.spe_err = self.spe_data.get_spe_ov(self.ov) - print("CA Vals: " + str(self.CA_vals)) - print("SPE Vals: " + str(self.spe_vals)) - - self.num_det_photons = ( - self.alpha_vals - * self.spe_data.invC - / (self.spe_vals * self.invC * (1.0 + self.CA_vals)) - ) - self.num_det_photons_err = self.num_det_photons * np.sqrt( - (self.alpha_err * self.alpha_err) / (self.alpha_vals * self.alpha_vals) - + (self.spe_data.invC_err * self.spe_data.invC_err) - / (self.spe_data.invC * self.spe_data.invC) - + (self.spe_err * self.spe_err) / (self.spe_vals * self.spe_vals) - + (self.invC_err * self.invC_err) / (self.invC * self.invC) - + (self.CA_err * self.CA_err) / (self.CA_vals * self.CA_vals) - ) - - def plot_alpha(self, color: str = "purple", out_file: str = None) -> None: - """ - Plot the alpha data as a function of overvoltage. - - This method creates a plot of the alpha data as a function of overvoltage. The plot includes - the data points with their errors and a text box with additional information about the data. - The plot can be saved to a file. - - Parameters: - color (str, optional): The color to use for the data points. Default is 'purple'. - out_file (str, optional): The name of the file to save the plot to. If None, the plot is not saved to a file. Default is None. - - Returns: - None - """ - color = "tab:" + color + + # self.CA_vals = (self.CA_vals + 1) / 2.0 - 1.0 + # self.alpha_vals /= 2.0 + # self.spe_vals *= 2.0 + print('here') + print('CA Vals: ' + str(self.CA_vals)) + + self.num_det_photons = self.alpha_vals * self.spe_data.invC / (self.spe_vals * self.invC * (1.0 + self.CA_vals)) + self.num_det_photons_err = self.num_det_photons * np.sqrt((self.alpha_err * self.alpha_err) / (self.alpha_vals * self.alpha_vals) + + (self.spe_data.invC_err * self.spe_data.invC_err) / (self.spe_data.invC * self.spe_data.invC) + + (self.spe_err * self.spe_err) / (self.spe_vals * self.spe_vals) + + (self.invC_err * self.invC_err) / (self.invC * self.invC) + + (self.CA_err * self.CA_err) / (self.CA_vals * self.CA_vals)) + + def plot_alpha(self, color = 'purple', out_file = None): + color = 'tab:' + color fig = plt.figure() - fig.tight_layout() - plt.rc("font", size=22) - + data_x = self.ov data_x_err = self.ov_err data_y = self.alpha_vals data_y_err = self.alpha_err - - # fit_x = np.linspace(0.0, np.amax(self.ov) + 1.0, num = 100) - # fit_y = self.CA_res.eval(params=self.CA_res.params, x = fit_x) - # fit_y_err = self.CA_res.eval_uncertainty(x = fit_x, params = self.CA_res.params, sigma = 1) - - # plt.fill_between(fit_x, fit_y + fit_y_err, fit_y - fit_y_err, color = 'red', alpha = .5) - # plt.plot(fit_x, fit_y, color = 'red', label = r'$\frac{Ae^{B*V_{OV}}+1}{A + 1} - 1$ fit') - plt.errorbar( - data_x, - data_y, - xerr=data_x_err, - yerr=data_y_err, - markersize=10, - fmt=".", - color=color, - ) - - plt.xlabel("Overvoltage [V]") - plt.ylabel("Alpha Pulse Amplitude [V]") - textstr = f"Date: {self.campaign[0].info.date}\n" - textstr += f"Condition: {self.campaign[0].info.condition}\n" - textstr += f"RTD4: {self.campaign[0].info.temperature} [K]" - # if self.filtered: - # textstr += f'Filtering: Lowpass, 400kHz\n' - # else: - # textstr += f'Filtering: None\n' - # textstr += f'--\n' - # textstr += f'''A: {self.CA_res.params['A'].value:0.3f} $\pm$ {self.CA_res.params['A'].stderr:0.3f}\n''' - # textstr += f'''B: {self.CA_res.params['B'].value:0.2} $\pm$ {self.CA_res.params['B'].stderr:0.2}\n''' - # textstr += rf'''Reduced $\chi^2$: {self.CA_res.redchi:0.4}''' - - props = dict(boxstyle="round", facecolor=color, alpha=0.4) - fig.text(0.75, 0.25, textstr, fontsize=8, verticalalignment="top", bbox=props) + +# fit_x = np.linspace(0.0, np.amax(self.ov) + 1.0, num = 100) +# fit_y = self.CA_res.eval(params=self.CA_res.params, x = fit_x) +# fit_y_err = self.CA_res.eval_uncertainty(x = fit_x, params = self.CA_res.params, sigma = 1) + +# plt.fill_between(fit_x, fit_y + fit_y_err, fit_y - fit_y_err, color = 'red', alpha = .5) +# plt.plot(fit_x, fit_y, color = 'red', label = r'$\frac{Ae^{B*V_{OV}}+1}{A + 1} - 1$ fit') + plt.errorbar(data_x, data_y, xerr = data_x_err, yerr = data_y_err, markersize = 10, fmt = '.', color = color) + + plt.xlabel('Overvoltage [V]') + plt.ylabel('Alpha Pulse Amplitude [V]') + textstr = f'Date: {self.campaign[0].info.date}\n' + textstr += f'Condition: {self.campaign[0].info.condition}\n' + textstr += f'RTD4: {self.campaign[0].info.temperature} [K]' +# if self.filtered: +# textstr += f'Filtering: Lowpass, 400kHz\n' +# else: +# textstr += f'Filtering: None\n' +# textstr += f'--\n' +# textstr += f'''A: {self.CA_res.params['A'].value:0.3f} $\pm$ {self.CA_res.params['A'].stderr:0.3f}\n''' +# textstr += f'''B: {self.CA_res.params['B'].value:0.2} $\pm$ {self.CA_res.params['B'].stderr:0.2}\n''' +# textstr += rf'''Reduced $\chi^2$: {self.CA_res.redchi:0.4}''' + + props = dict(boxstyle='round', facecolor=color, alpha=0.4) + fig.text(0.70, 0.25, textstr, fontsize=8, + verticalalignment='top', bbox=props) + plt.tight_layout() if out_file: - data = { - x_label: data_x, - x_label + " error": data_x_err, - y_label: data_y, - y_label + " error": data_y_err, - } + data = {x_label: data_x, x_label + ' error': data_x_err, y_label: data_y, y_label + ' error': data_y_err} df = pd.DataFrame(data) df.to_csv(out_file) - plt.show() - - def plot_num_det_photons(self, color: str = "purple", out_file: str = None) -> None: - """ - Plot the number of detected photons as a function of overvoltage. - - This method creates a plot of the number of detected photons as a function of overvoltage. - The plot includes the data points with their errors and a text box with additional information - about the data. The plot can be saved to a file. - - Parameters: - color (str, optional): The color to use for the data points. Default is 'purple'. - out_file (str, optional): The name of the file to save the plot to. If None, the plot is not saved to a file. Default is None. - - Returns: - None - """ - color = "tab:" + color + + def plot_num_det_photons(self, color = 'purple', out_file = None): + color = 'tab:' + color fig = plt.figure() - + data_x = self.ov data_x_err = self.ov_err data_y = self.num_det_photons data_y_err = self.num_det_photons_err - plt.errorbar( - data_x, - data_y, - xerr=data_x_err, - yerr=data_y_err, - markersize=10, - fmt=".", - color=color, - ) - - plt.xlabel("Overvoltage [V]") - plt.ylabel("Number of Detected Photons") - textstr = f"Date: {self.campaign[0].info.date}\n" - textstr += f"Condition: {self.campaign[0].info.condition}\n" - textstr += f"RTD4: {self.campaign[0].info.temperature} [K]" - # if self.filtered: - # textstr += f'Filtering: Lowpass, 400kHz\n' - # else: - # textstr += f'Filtering: None\n' - - props = dict(boxstyle="round", facecolor=color, alpha=0.4) - fig.text(0.75, 0.25, textstr, fontsize=8, verticalalignment="top", bbox=props) + plt.errorbar(data_x, data_y, xerr = data_x_err, yerr = data_y_err, markersize = 10, fmt = '.', color = color) + + plt.xlabel('Overvoltage [V]') + plt.ylabel('Number of Detected Photons') + textstr = f'Date: {self.campaign[0].info.date}\n' + textstr += f'Condition: {self.campaign[0].info.condition}\n' + textstr += f'RTD4: {self.campaign[0].info.temperature} [K]' +# if self.filtered: +# textstr += f'Filtering: Lowpass, 400kHz\n' +# else: +# textstr += f'Filtering: None\n' + + + props = dict(boxstyle='round', facecolor=color, alpha=0.4) + fig.text(0.75, 0.25, textstr, fontsize=8, + verticalalignment='top', bbox=props) plt.xlim(0, np.amax(self.ov) + 1.0) ylow, yhigh = plt.ylim() plt.ylim(-1, yhigh * 1.1) plt.tight_layout() - - def plot_PDE( - self, - num_incident: int, - color: str = "purple", - other_data: list = None, - out_file: str = None, - ) -> None: - """ - Plot the Photon Detection Efficiency (PDE) as a function of overvoltage. - - This method creates a plot of the PDE as a function of overvoltage. The plot includes - the data points with their errors and a text box with additional information about the data. - The plot can also include data from other sources. The plot can be saved to a file. - - Parameters: - num_incident (int): The number of incident photons. - color (str, optional): The color to use for the data points. Default is 'purple'. - other_data (List, optional): A list of other data to include in the plot. Each item in the list should be a dictionary with keys 'ov', 'pde', 'ov_err', 'pde_err', and 'label'. Default is None. - out_file (str, optional): The name of the file to save the plot to. If None, the plot is not saved to a file. Default is None. - - Returns: - None - """ - color = "tab:" + color + + def plot_PDE(self, num_incident, color = 'purple', other_data = None, out_file = None): + color = 'tab:' + color fig = plt.figure() - + data_x = self.ov data_x_err = self.ov_err data_y = self.num_det_photons / num_incident data_y_err = self.num_det_photons_err / num_incident - plt.errorbar( - data_x, - data_y, - xerr=data_x_err, - yerr=data_y_err, - markersize=10, - fmt=".", - color=color, - label="UMass, 175nm, 190K", - ) - + plt.errorbar(data_x, data_y, xerr = data_x_err, yerr = data_y_err, markersize = 10, fmt = '.', color = color, label = 'UMass, 175nm, 190K') + if other_data: for od in other_data: - plt.errorbar( - od.ov, - od.pde, - xerr=od.ov_err, - yerr=od.pde_err, - markersize=10, - fmt=".", - label=od.label, - ) - - plt.xlabel("Overvoltage [V]") - plt.ylabel("Photon Detection Efficiency") - textstr = f"Date: {self.campaign[0].info.date}\n" - textstr += f"Condition: {self.campaign[0].info.condition}\n" - textstr += f"RTD4: {self.campaign[0].info.temperature} [K]" - - props = dict(boxstyle="round", facecolor=color, alpha=0.4) - fig.text(0.75, 0.25, textstr, fontsize=8, verticalalignment="top", bbox=props) + plt.errorbar(od.ov, od.pde, xerr = od.ov_err, yerr = od.pde_err, markersize = 10, fmt = '.', label = od.label) + + plt.xlabel('Overvoltage [V]') + plt.ylabel('Photon Detection Efficiency') + textstr = f'Date: {self.campaign[0].info.date}\n' + textstr += f'Condition: {self.campaign[0].info.condition}\n' + textstr += f'RTD4: {self.campaign[0].info.temperature} [K]' + + props = dict(boxstyle='round', facecolor=color, alpha=0.4) + fig.text(0.75, 0.25, textstr, fontsize=8, + verticalalignment='top', bbox=props) plt.xlim(0, np.amax(self.ov) + 1.0) ylow, yhigh = plt.ylim() plt.ylim(-0.01, yhigh * 1.1) if other_data: - plt.legend(loc="lower left") + plt.legend(loc = 'lower left') plt.tight_layout() if out_file: - data = { - x_label: data_x, - x_label + " error": data_x_err, - y_label: data_y, - y_label + " error": data_y_err, - } + data = {x_label: data_x, x_label + ' error': data_x_err, y_label: data_y, y_label + ' error': data_y_err} df = pd.DataFrame(data) df.to_csv(out_file) - - + class Collab_PDE: - """ - A class used to represent the Photon Detection Efficiency (PDE) data from a collaboration. - - Attributes: - filename (str): The name of the file containing the PDE data. - groupname (str): The name of the group that collected the data. - wavelength (float): The wavelength of the photons used in the experiment. - temp (float): The temperature at which the data was collected. - df (DataFrame): A pandas DataFrame containing the data from the file. - ov (np.array): An array of overvoltage values. - ov_err (np.array): An array of errors in the overvoltage values. - pde (np.array): An array of PDE values. - pde_err (np.array): An array of errors in the PDE values. - """ - - def __init__( - self, filename: str, groupname: str, wavelength: float, temp: float - ) -> None: - """ - Initialize a Collab_PDE object. - - Parameters: - filename (str): The name of the file containing the PDE data. - groupname (str): The name of the group that collected the data. - wavelength (float): The wavelength of the photons used in the experiment. - temp (float): The temperature at which the data was collected. - - Returns: - None - """ + def __init__(self, filename, groupname, wavelength, temp): self.filename = filename self.groupname = groupname self.wavelength = wavelength self.temp = temp self.df = pd.read_csv(self.filename) - self.ov = np.array(self.df["OV"]) - self.ov_err = np.array(self.df["OV error"]) - self.pde = np.array(self.df["PDE"]) / 100.0 - self.pde_err = np.array(self.df["PDE error"]) / 100.0 - - -# %% -class multi_campaign: # class to compile multiple campaigns - def __init__( - self, campaigns: list, invC: float, invC_err: float, filtered: bool - ) -> None: - """ - Initialize a multi_campaign object. - - Parameters: - campaigns (List): A list of campaigns to compile. - invC (float): The inverse of the capacitance of the device. - invC_err (float): The error in the inverse of the capacitance. - filtered (bool): Whether the data has been filtered. - - Returns: - None - """ + self.ov = np.array(self.df['OV']) + self.ov_err = np.array(self.df['OV error']) + self.pde = np.array(self.df['PDE']) / 100. + self.pde_err = np.array(self.df['PDE error']) / 100. + +#%% +class multi_campaign: #class to compile multiple campaigns + def __init__(self, campaigns, invC, invC_err, filtered): self.campaigns = campaigns self.invC = invC self.invC_err = invC_err self.filtered = filtered self.create_SPEs() - - def create_SPEs( - self, - ) -> None: # does SPE_data on all the campaigns and returns a list of objects - """ - Create SPE_data objects for all the campaigns. - - This method creates an SPE_data object for each campaign in the list of campaigns and stores them in the data attribute. - - Parameters: - None - - Returns: - None - """ + + def create_SPEs(self): #does SPE_data on all the campaigns and returns a list of objects self.data = [] for curr_campaign in self.campaigns: - self.data.append( - SPE_data( - curr_campaign, invC_spe_filter, invC_spe_err_filter, filtered=True - ) - ) + self.data.append(SPE_data(curr_campaign, invC_spe_filter, invC_spe_err_filter, filtered = True)) -# %% +#%% # ihep_pde = Collab_PDE('C:/Users/lab-341/Desktop/Analysis/fresh_start/PDE_175nm_HD3_iHEP_233K.csv', 'IHEP', 175, 233) # triumf_pde = Collab_PDE('C:/Users/lab-341/Desktop/Analysis/fresh_start/PDE_176nm_HD3_Triumf_163K.csv', 'TRIUMF', 176, 163) diff --git a/Analyze_July_11_dark_405nm_CA.py b/Analyze_July_11_dark_405nm_CA.py new file mode 100644 index 0000000..898e6b5 --- /dev/null +++ b/Analyze_July_11_dark_405nm_CA.py @@ -0,0 +1,241 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Jul 13 12:37:16 2023 + +@author: albertwang +""" +import sys +import numpy as np +# append necessary file paths, and change E -> D or vice versa +sys.path.append('/Users/albertwang/Desktop/nEXO') +from MeasurementInfo import MeasurementInfo +from RunInfo import RunInfo +import heapq +from scipy import signal +from scipy.optimize import curve_fit +import AnalyzePDE +from AnalyzePDE import SPE_data +from AnalyzePDE import Alpha_data +import matplotlib.pyplot as plt +import matplotlib as mpl +import ProcessWaveforms_MultiGaussian +from ProcessWaveforms_MultiGaussian import WaveformProcessor as WaveformProcessor + +#%% +# def get_subtract_hist_mean(data1, data2, bias = '', numbins = 500, plot = False): +# if plot: +# plt.figure() +# (n, b, p) = plt.hist(data1, bins = numbins, density = False, label = 'data 1', histtype='step') +# plt.axvline(x = np.mean(data1), color = 'blue') +# print('LED on hist: ' + str(np.mean(data1))) +# print('LED off hist: ' + str(np.mean(data2))) +# plt.axvline(x = np.mean(data2), color = 'orange') +# plt.hist(data2, bins = b, density = False, label = 'data 2', histtype='step') +# counts1, bins1 = np.histogram(data1, bins = numbins, density = False) +# counts2, bins2 = np.histogram(data2, bins = bins1, density = False) +# centers = (bins1[1:] + bins1[:-1])/2 +# subtracted_counts = counts1 - counts2 +# # subtracted_counts[subtracted_counts < 0] = 0 +# if plot: +# plt.step(centers, subtracted_counts, label = 'subtracted hist') +# plt.legend() + +# norm_subtract_hist = subtracted_counts / np.sum(subtracted_counts) +# # weights = 1.0 / subtracted_counts / +# mean_value = np.sum(centers * norm_subtract_hist) +# if plot: +# plt.axvline(x = mean_value, color = 'green') +# plt.title('The bias: ' + str(bias) + ', The mean value: ' + str(mean_value)) +# return mean_value + +#%% 405nm CA analysis + +# test_high_bias = RunInfo(['/Users/albertwang/Desktop/nEXO/HDF Data/july_11th_dark_data/CA data/405nm/Run_1689177434.hdf5'], do_filter = True, upper_limit = 1, baseline_correct = True, prominence = 0.007, plot_waveforms= False, is_led = True) +# bias_test = test_high_bias.run_meta_data['/Users/albertwang/Desktop/nEXO/HDF Data/july_11th_dark_data/CA data/405nm/Run_1689177434.hdf5']['RunNotes'] +# get_subtract_hist_mean(test_high_bias.all_led_peak_data, test_high_bias.all_dark_peak_data, bias = bias_test ,plot = True) +# # test_high_bias.plot_hists('169', '.') #regular histogram +# # test_high_bias.plot_peak_waveform_hist() #2D plot +# # test_high_bias.plot_led_dark_hists('171', '.') #LED comparison plot + +#%% + +files2 = ['Run_1689176196.hdf5', 'Run_1689173976.hdf5','Run_1689175049.hdf5','Run_1689175759.hdf5','Run_1689178348.hdf5','Run_1689177955.hdf5','Run_1689176952.hdf5','Run_1689177434.hdf5'] #31.5V, 32V, 32.5, 33.0V, 33.5V, 34.0V, 34.5V, 35.0V +proms2 = [0.0065, 0.0063,0.0065,0.0065,0.0065,0.0065,0.0065,0.007] +upperlim2 = [1 for file in files2] +runs2 = [] +for file in range(len(files2)): + run_spe2 = RunInfo(['/Users/albertwang/Desktop/nEXO/HDF Data/july_11th_dark_data/CA data/405nm/' + files2[file]], specifyAcquisition = False, do_filter = False, upper_limit = upperlim2[file], baseline_correct = True, prominence = proms2[file], is_led = True) + runs2.append(run_spe2) +biases2 = [run.bias for run in runs2] + + + + +invC_spe_filter2 = 0.0132 +invC_spe_err_filter2 = 0.000089 + +#%% take a look at waveforms (solicited) +run_spe_solicited2 = RunInfo(['/Users/albertwang/Desktop/nEXO/HDF Data/july_11th_dark_data/CA data/405nm/Run_1689175515.hdf5'], do_filter = False, is_solicit = True, upper_limit = .02, baseline_correct = True, plot_waveforms = False, fourier = False) +#%% take a look at finger plots + +# runs2[1].plot_led_dark_hists('169', '.') +for i in range(len(files2)): + plt.figure() + runs2[i].plot_led_dark_hists('171', '.') + +#%% set the initial guesses and fitting range + +range_lows2 = [12,0.00465,0.00495, 0.004984, 0.00540,0.00523, 0.00583, 0.00592] +centers2 = [12,0.006174, 0.006249, 0.006636, 0.00704, 0.00723,0.00786, 0.00834] # +range_highs2 = [12,0.022,0.0246, 0.02671, 0.02947, 0.03110,0.0332, 0.03658] #only use from 33V and onwards + +#%% plot with peak fit - test + +# n= 6#test the different biases +# T = 171 +# con = 'GN' +# info_spe = MeasurementInfo() +# info_spe.condition = con +# info_spe.date = runs2[n].date +# info_spe.temperature = T +# info_spe.bias = runs2[n].bias +# info_spe.baseline_numbins = 50 +# info_spe.peaks_numbins = 200 #unused +# info_spe.data_type = 'h5' +# wp = WaveformProcessor(info_spe, run_info_self = runs2[n], run_info_solicit = run_spe_solicited2, baseline_correct = True, range_low = range_lows2[n], range_high = range_highs2[n], center = centers2[n], peak_range = (1,4)) +# wp.process(do_spe = True, do_alpha = False) +# wp.plot_both_histograms() +# wp.plot_peak_histograms() +# wp.plot_spe() + +#%% make a list of ProcessWaveforms objects +campaign_spe2 = [] + +for i in range(3, len(runs2)): + peak_num = () + if i == 0: #these are Gauss fitted with 3 peaks (32V and 32.5V) + continue + else: + peak_num = (1,4) + T = 171 + con = 'GN' + info_spe = MeasurementInfo() + info_spe.condition = con + info_spe.date = runs2[i].date + info_spe.temperature = T + info_spe.bias = runs2[i].bias + info_spe.baseline_numbins = 50 + info_spe.peaks_numbins = 200 + info_spe.data_type = 'h5' + wp_spe = WaveformProcessor(info_spe, run_info_self = runs2[i], run_info_solicit = run_spe_solicited2, baseline_correct = True, range_low = range_lows2[i], range_high = range_highs2[i], center = centers2[i], peak_range = peak_num) + wp_spe.process(do_spe = True, do_alpha = False) + campaign_spe2.append(wp_spe) + +# campaign_spe2 = [] + +# for i in range(1, len(runs2)): +# peak_num = () +# if i == 2: #these are Gauss fitted with 3 peaks (32V and 32.5V) +# continue +# else: +# peak_num = (1,4) +# T = 171 +# con = 'GN' +# info_spe = MeasurementInfo() +# info_spe.condition = con +# info_spe.date = runs2[i].date +# info_spe.temperature = T +# info_spe.bias = runs2[i].bias +# info_spe.baseline_numbins = 50 +# info_spe.peaks_numbins = 200 +# info_spe.data_type = 'h5' +# wp_spe = WaveformProcessor(info_spe, run_info_self = runs2[i], run_info_solicit = run_spe_solicited, baseline_correct = True, range_low = range_lows[i], range_high = range_highs[i], center = centers[i], peak_range = peak_num, status = 1) +# wp_spe.process(do_spe = True, do_alpha = False) +# campaign_spe2.append(wp_spe) + +# campaign_spe3 = [] + +# for i in range(1,len(runs2)): +# peak_num = () +# if i == 2: #these are Gauss fitted with 3 peaks (32V and 32.5V) +# continue +# else: +# peak_num = (1,4) +# T = 171 +# con = 'GN' +# info_spe = MeasurementInfo() +# info_spe.condition = con +# info_spe.date = runs2[i].date +# info_spe.temperature = T +# info_spe.bias = runs2[i].bias +# info_spe.baseline_numbins = 50 +# info_spe.peaks_numbins = 200 +# info_spe.data_type = 'h5' +# wp_spe = WaveformProcessor(info_spe, run_info_self = runs2[i], run_info_solicit = run_spe_solicited, baseline_correct = True, range_low = range_lows[i], range_high = range_highs[i], center = centers[i], peak_range = peak_num, status = 2) +# wp_spe.process(do_spe = True, do_alpha = False) +# campaign_spe3.append(wp_spe) + + +#%% plot linear fit to the breakdown voltage + +# curr_campaign = campaign_spe + +# filtered_spe = SPE_data(curr_campaign, invC_spe_filter, invC_spe_err_filter, filtered = False) #not filtered for this liquefaction +# filtered_spe.plot_spe(in_ov = False, absolute = False, out_file = None) + +# print(filtered_spe.v_bd) +# print(filtered_spe.v_bd_err) + +#%% + +for i in range(len(campaign_spe2)): + campaign_spe2[i].plot_peak_histograms() + campaign_spe2[i].plot_spe() + + +#%% plot linear fit to the breakdown voltage + +curr_campaign2 = campaign_spe2 + +filtered_spe2 = SPE_data(curr_campaign2, invC_spe_filter2, invC_spe_err_filter2, filtered = False) + +filtered_spe2.plot_CA() +# filtered_spe2.plot_spe(in_ov = False, absolute = False, out_file = None) +#%% plot linear fit to the breakdown voltage + +# curr_campaign3 = campaign_spe3 + +# filtered_spe3 = SPE_data(curr_campaign3, invC_spe_filter, invC_spe_err_filter, filtered = False) +# filtered_spe3.plot_spe(in_ov = False, absolute = False, out_file = None) +#%% plot linear fit to the breakdown voltage + +# breakdown_vals = [] +# breakdown_err_vals= [] +# conditions = ['GN', 'LXe', 'LED-on'] +# fig = plt.figure() + +# # filtered_spe.plot_spe(in_ov = False, absolute = False, out_file = None) +# # breakdown_vals.append(filtered_spe.v_bd) +# # breakdown_err_vals.append(filtered_spe.v_bd_err) +# filtered_spe2.plot_spe(in_ov = False, absolute = False, out_file = None) +# breakdown_vals.append(filtered_spe2.v_bd) +# breakdown_err_vals.append(filtered_spe2.v_bd_err) +# # filtered_spe3.plot_spe(in_ov = False, absolute = False, out_file = None) +# breakdown_vals.append(filtered_spe3.v_bd) +# breakdown_err_vals.append(filtered_spe3.v_bd_err) + +# textstr = f'Date: {filtered_spe.campaign[0].info.date}\n' +# textstr += f'Condition: {filtered_spe.campaign[0].info.condition}\n' +# textstr += f'RTD4: {filtered_spe.campaign[0].info.temperature} [K] (nominal) \n' +# textstr += f'LED: 405 [nm] \n' +# textstr += f'V$_L$$_E$$_D$: 2.5500 $\pm$ 0.0005 [V] \n' + +# for i in range(3): +# textstr += f'\n--\n' +# textstr += rf'V$_b$$_d$ ({conditions[i]}): {breakdown_vals[i]:0.3} $\pm$ {breakdown_err_vals[i]:0.1} [V]' + + +# props = dict(boxstyle='round', facecolor= 'blue', alpha=0.2) +# fig.text(0.72, 0.48, textstr, fontsize=8, +# verticalalignment='top', bbox=props) diff --git a/ProcessWaveforms_MultiGaussian.py b/ProcessWaveforms_MultiGaussian.py index 473514b..a405797 100644 --- a/ProcessWaveforms_MultiGaussian.py +++ b/ProcessWaveforms_MultiGaussian.py @@ -18,76 +18,37 @@ import glob from scipy.fft import fft, fftfreq from lmfit.models import LinearModel, GaussianModel, ExponentialModel -from typing import Any, Dict, List, Tuple, Optional -from RunInfo import RunInfo - - -def get_waveform(w: str) -> tuple[list[float], list[float]]: - # TODO: getting the axe - """ - Processes a waveform file, extracting metadata and data points. Metadata is skipped until - the line 'Time (s)' is found. Subsequent lines are treated as data with the first column - as time and the second as amplitude. - - Args: - w (str): Path to the waveform file. The file should be tab-delimited with the first - column being time in seconds, and the second column being the amplitude. - The first non-metadata line may start with 'Time (s)'. - - Returns: - tuple: Two lists, first one containing the time data in microseconds, and the second - one containing the amplitude data, both extracted from the waveform file. - """ +def get_waveform(w): time = [] amp = [] - f = open(w, "r") - + f = open(w, 'r') + metadata = {} data = {} - + header = True for x in f: - line = x.split("\t") + line = x.split('\t') if header: - if line[0] == "Time (s)": + if line[0] == 'Time (s)': header = False elif len(line) < 10: continue else: metadata[line[0]] = line[1] else: - t = float(line[0]) * 1e6 + t = float(line[0]) * 1E6 a = float(line[1]) time.append(t) amp.append(a) f.close() return (time, amp) - # REED DID THIS <3 -def get_peaks( - waveform_dir: str, peak_search_params: dict[str, type[int | float]] -) -> list[float]: - """ - Searches for peaks in a set of waveforms specified in a directory using parameters for peak - searching. This function applies signal.find_peaks to each waveform and stores the amplitude - of each peak. - - Args: - waveform_dir (str): The directory containing waveform files. Each file should be named - with the prefix 'w' and have the extension '.txt'. - peak_search_params (Dict[str, Union[int, float]]): Parameters to be passed to - scipy.signal.find_peaks. These parameters may include - properties like 'height', 'threshold', 'distance', etc., - depending on the requirements for a valid peak. - - Returns: - List[float]: A list containing the amplitudes of all detected peaks across all - waveforms in the specified directory. - """ - waveform_filenames = glob.glob(waveform_dir + "w*.txt") +def get_peaks(waveform_dir, peak_search_params): + waveform_filenames = glob.glob(waveform_dir + 'w*.txt') all_peaks = [] for idx, w in enumerate(waveform_filenames): if idx % 100 == 0: @@ -99,35 +60,14 @@ def get_peaks( all_peaks.append(amp[peak]) return all_peaks - -def get_peak_waveforms( - waveform_dir: str, num: int = -1 -) -> tuple[list[float], list[float], int]: - """ - Reads a specified number of waveform files from a directory, and combines their time and - amplitude values into two lists. Also returns the total number of waveforms processed. - - Args: - waveform_dir (str): The directory containing waveform files. Each file should be named - with the prefix 'w' and have the extension '.txt'. - num (int, optional): The maximum number of waveform files to process. If this is set to - a positive number, only the first 'num' waveforms will be processed. - If it is set to -1 (default), all waveforms in the directory will - be processed. - - Returns: - Tuple[List[float], List[float], int]: A tuple containing three elements: - 1. A list of amplitude values combined from all processed waveforms. - 2. A list of corresponding time values combined from all processed waveforms. - 3. The total number of waveforms processed. - """ - # wfs = fnmatch.filter(os.listdir(filepath), 'w*') - # read in solicited trigger waveforms - waveform_filenames = glob.glob(waveform_dir + "w*.txt") +def get_peak_waveforms(waveform_dir, num = -1): + #wfs = fnmatch.filter(os.listdir(filepath), 'w*') +# read in solicited trigger waveforms + waveform_filenames = glob.glob(waveform_dir + 'w*.txt') values = [] times = [] num_w = 0 - # search each waveform for pulses, reject those with any +# search each waveform for pulses, reject those with any if num > 0: waveform_filenames = waveform_filenames[:num] for idx, w in enumerate(waveform_filenames): @@ -139,1099 +79,848 @@ def get_peak_waveforms( times += time return values, times, num_w - def get_baseline(waveform_dir, peak_search_params): - """_summary_ - - Args: - waveform_dir (_type_): _description_ - peak_search_params (_type_): _description_ - - Returns: - _type_: _description_ - """ - # wfs = fnmatch.filter(os.listdir(filepath), 'w*') - # read in solicited trigger waveforms - waveform_filenames = glob.glob(waveform_dir + "w*.txt") + #wfs = fnmatch.filter(os.listdir(filepath), 'w*') +# read in solicited trigger waveforms + waveform_filenames = glob.glob(waveform_dir + 'w*.txt') values = [] times = [] num_w = 0 - # search each waveform for pulses, reject those with any +# search each waveform for pulses, reject those with any for idx, w in enumerate(waveform_filenames): if idx % 100 == 0: print(idx) time, amp = get_waveform(w) peaks, props = signal.find_peaks(amp, **peak_search_params) - # aggregate all pulseless data +# aggregate all pulseless data if len(amp) < 1: continue - if len(peaks) == 0 and np.amin(amp) > -0.25: + if len(peaks) == 0 and np.amin(amp) > -.25: num_w += 1 values += amp[300:-300] times += time[300:-300] return values, times, num_w - def save_baseline_csv(waveform_dir, savedir, peak_search_params): waveform_data, waveform_times, _ = get_baseline(waveform_dir, peak_search_params) - data = {"waveform data": waveform_data} + data = {'waveform data': waveform_data} df = pd.DataFrame(data) df.to_csv(savedir) - def save_peaks_csv(waveform_dir, savedir, peak_search_params): peaks = get_peaks(waveform_dir, peak_search_params) - data = {"peaks": peaks} + data = {'peaks': peaks} df = pd.DataFrame(data) df.to_csv(savedir) - def read_data_csv(filename): df = pd.read_csv(filename) return df - -def Gauss(x: np.ndarray, A: float, B: float, C: float) -> np.ndarray: - """ - Calculates the value of a Gaussian function at a given point or array of points. - - Args: - x (Union[float, np.ndarray]): The point(s) at which to evaluate the Gaussian function. - This can be a single float or a numpy array of floats. - A (float): The height of the Gaussian's peak. - B (float): The position of the center of the peak. - C (float): The standard deviation, which controls the width of the peak. - - Returns: - Union[float, np.ndarray]: The value(s) of the Gaussian function at the given point(s). - """ - y = A * np.exp(-((x - B) ** 2) / (2 * C * C)) +def Gauss(x, A, B, C): + y = A * np.exp(-(x - B) ** 2 / (2 * C * C)) return y - -def fit_gauss( - values: list[float], range_low: float, range_high: float -) -> lm.model.ModelResult: - """ - Fits a Gaussian model to the provided data within the specified range using the - lmfit.models.GaussianModel method. - - Args: - values (List[float]): A list of values to which a Gaussian function will be fitted. - range_low (float): The lower bound of the range to consider for fitting. - range_high (float): The upper bound of the range to consider for fitting. - - Returns: - lm.model.ModelResult: An instance of the ModelResult class in the lmfit package, containing the results - of the model fitting process. This includes parameters of the fitted model and - other statistical information. - """ - histogram = np.histogram(values, bins=40) +def fit_gauss(values, range_low, range_high): + histogram = np.histogram(values, bins = 40) counts = histogram[0] bins = histogram[1] - centers = (bins[1:] + bins[:-1]) / 2 + centers = (bins[1:] + bins[:-1])/2 model = lm.models.GaussianModel() - params = model.make_params( - amplitude=max(counts), center=np.mean(values), sigma=np.std(values) - ) - res = model.fit(counts, params=params, x=centers, weights=np.sqrt(1 / counts)) + params = model.make_params(amplitude=max(counts), center=np.mean(values), sigma=np.std(values)) + res = model.fit(counts, params=params, x=centers, weights=np.sqrt(1/counts)) return res - -def fit_baseline_gauss( - values: list[float], binnum: int = 50, alpha: bool = False -) -> dict[str, type[float | Any]]: - """ - Fits a Gaussian model to the provided data using the lmfit.models.GaussianModel method. - This function also allows to handle alpha-type peak fitting based on the 'alpha' parameter. - - Args: - values (List[float]): A list of values to which a Gaussian function will be fitted. - binnum (int, optional): The number of bins to use for histogramming the data. Defaults to 50. - alpha (bool, optional): If True, the function assumes the data represents an alpha - peak, and sets fitting range accordingly. If False, the function - estimates the fitting range based on the data's standard deviation. - Defaults to False. - - Returns: - Dict[str, Union[float, Any]]: A dictionary containing the center, low, and high bounds of the - fitting range, as well as the fitted model result. - """ +def fit_baseline_gauss(values, binnum = 50, alpha = False): f_range = {} - if alpha: # TODO no hardcoded parameters !! - f_range["low"] = -0.0005 - # f_range['low'] = 0.0 - f_range["high"] = 0.0045 - # f_range['high'] = 0.003 - f_range["center"] = (f_range["high"] + f_range["low"]) / 2.0 + if alpha: + f_range['low'] = -0.0005 + # f_range['low'] = 0.0 + f_range['high'] = 0.0045 + # f_range['high'] = 0.003 + f_range['center'] = (f_range['high'] + f_range['low']) / 2.0 else: - f_range["center"] = np.mean(values) + f_range['center'] = np.mean(values) std_guess = np.std(values) - f_range["low"] = f_range["center"] - 2.0 * std_guess - f_range["high"] = f_range["center"] + 2.0 * std_guess + f_range['low'] = f_range['center'] - 2.0 * std_guess + f_range['high'] = f_range['center'] + 2.0 * std_guess bin_density = float(binnum) / (np.amax(values) - np.amin(values)) - new_binnum = int(bin_density * (f_range["high"] - f_range["low"])) - limit_values = values[(values >= f_range["low"]) & (values <= f_range["high"])] - curr_hist = np.histogram(limit_values, bins=new_binnum) + new_binnum = int(bin_density * (f_range['high'] - f_range['low'])) + limit_values = values[(values >= f_range['low']) & (values <= f_range['high'])] + curr_hist = np.histogram(limit_values, bins = new_binnum) # plt.hist(values, bins= binnum) counts = curr_hist[0] bins = curr_hist[1] - centers = (bins[1:] + bins[:-1]) / 2 - + centers = (bins[1:] + bins[:-1])/2 + model = lm.models.GaussianModel() - params = model.make_params( - amplitude=np.amax(counts), center=np.mean(limit_values), sigma=np.std(values) - ) - res = model.fit(counts, params=params, x=centers, weights=np.sqrt(1 / counts)) + params = model.make_params(amplitude=np.amax(counts), center=np.mean(limit_values), sigma=np.std(values)) + res = model.fit(counts, params=params, x=centers, weights=np.sqrt(1/counts)) # plt.step(centers, counts, where = 'mid') # plt.plot(centers, res.eval(params = res.params, x = centers), '--') - f_range["fit"] = res - # return {'center': np.mean(values), 'low': np.amin(values), 'high': np.amax(values), 'fit': res} + f_range['fit'] = res +# return {'center': np.mean(values), 'low': np.amin(values), 'high': np.amax(values), 'fit': res} return f_range -def fit_peaks_multigauss( - values: np.ndarray, - baseline_width: float, - centers: list[float], - numpeaks: int = 4, - cutoff: tuple[float, float] = (0, np.infty), -) -> lm.model.ModelResult: - """ - Fits multiple Gaussian functions to a 'finger plot' made from given values using the - lmfit.models.GaussianModel method. - - Args: - values (List[float]): Heights of peaks extracted from waveforms. - baseline_width (float): An estimate of the width in Volts of the noise. - centers (List[float]): Initial guesses for the centroid of each Gaussian. - numpeaks (int, optional): The number of peaks you want to fit. Defaults to 4. - cutoff (Tuple[float, float], optional): Low and high cutoff values. Defaults to (0, np.infty). - - Returns: - ModelResult: An lmfit ModelResult object containing all fit information. - """ - curr_peak_data = values[(values >= cutoff[0]) & (values <= cutoff[1])] - binnum = round(np.sqrt(len(curr_peak_data))) - counts, bins = np.histogram(curr_peak_data, bins=binnum) - bin_centers = (bins[1:] + bins[:-1]) / 2 - - model = LinearModel(prefix="l_") - - for i in range(1, numpeaks + 1): - model += GaussianModel(prefix=f"g{i}_") - - params = model.make_params( - l_slope=0, - l_intercept=counts[0], - ) - - peak_scale = max(counts) * np.sqrt(2 * np.pi) * baseline_width - for i in range(1, numpeaks + 1): - params[f"g{i}_amplitude"].value = peak_scale / (2**i) - params[f"g{i}_center"].value = centers[i - 1] - params[f"g{i}_center"].max = params[f"g{i}_center"].value * 1.3 - params[f"g{i}_center"].min = params[f"g{i}_center"].value * 0.8 - - params[f"g{i}_sigma"].value = 0.5 * baseline_width - params[f"g{i}_sigma"].min = 0.3 * 0.5 * baseline_width - params[f"g{i}_sigma"].max = 2 * 0.5 * baseline_width - - params[f"g{i}_amplitude"].min = 0 - - res = model.fit(counts, params=params, x=bin_centers, weights=1 / np.sqrt(counts)) +def fit_peaks_multigauss(values, baseline_loc, baseline_width, binnum=400, range_low = 0, range_high = 2, center = 0.1, offset_num = 0, peak_range = (0,4)): + + low_peak = peak_range[0] + high_peak = peak_range[1] + + fit_range = [] #defines estimated center and the locations of the left and right "edges" of the "finger" + fit_range.append({'low': range_low, 'high': range_high}) + curr_peak_data = values[(values >= range_low) & (values <= range_high)] + # binnum = int(np.sqrt(len(curr_peak_data))) # bin number = square root of number of data points + curr_hist = np.histogram(curr_peak_data, bins = binnum) + # plt.hist(curr_peak_data, bins = binnum) + counts = curr_hist[0] + + print('bins: ' + str(binnum)) - print(res.fit_report()) + bins = curr_hist[1] + centers = (bins[1:] + bins[:-1])/2 + + + + for peak in range(low_peak, high_peak + 1): + if peak == low_peak: + model = GaussianModel(prefix='g' + str(low_peak) + '_') + else: + model = model + GaussianModel(prefix='g' + str(peak) + '_') + + model = model + LinearModel(prefix= 'l_') + + + g_center = [center * (idx + offset_num) for idx in range(low_peak, high_peak + 1)] + + #constraints for center + + g_center_index = 0 + for peak in range(low_peak, high_peak + 1): + if peak == low_peak: + model.set_param_hint('g' + str(peak) + '_center', value = g_center[g_center_index], min = range_low, max = baseline_width + g_center[g_center_index]) + g_center_index += 1 + elif peak == high_peak: + g_center_last = len(g_center) - 1 #last index of g_center + model.set_param_hint('g' + str(peak) + '_center', value = g_center[g_center_last], min = g_center[g_center_last] - baseline_width, max = range_high) + else: + model.set_param_hint('g' + str(peak) + '_center', value = g_center[ g_center_index], min = g_center[g_center_index] - baseline_width, max = baseline_width + g_center[g_center_index]) + g_center_index += 1 + + + # model.set_param_hint('g1_center', value = g_center[0], min = range_low, max = baseline_width + g_center[0]) + # model.set_param_hint('g2_center', value = g_center[1], min = g_center[1] - baseline_width, max = baseline_width + g_center[1]) + # model.set_param_hint('g3_center', value = g_center[2], min = g_center[2] - baseline_width, max = baseline_width + g_center[2]) + # model.set_param_hint('g4_center', value = g_center[3], min = g_center[3] - baseline_width, max = range_high) + + #constraints for sigma + + for peak in range(low_peak, high_peak + 1): + model.set_param_hint('g' + str(peak) + '_sigma', value = 0.5 * baseline_width, min = 0, max = baseline_width) + + # model.set_param_hint('g1_sigma', value = 0.5 * baseline_width, min = 0, max = baseline_width) + # model.set_param_hint('g2_sigma', value = 0.5 * baseline_width, min = 0, max = baseline_width) + # model.set_param_hint('g3_sigma', value = 0.5 * baseline_width, min = 0, max = baseline_width) + # model.set_param_hint('g4_sigma', value = 0.5 * baseline_width, min = 0, max = baseline_width) + + #constraints for amplitude + g_amplitude = [np.amax(counts)*np.sqrt(2*np.pi)*baseline_width/(2**num) for num in range(low_peak, high_peak + 1)] + + g_amplitude_index = 0 + for peak in range(low_peak, high_peak + 1): + model.set_param_hint('g' + str(peak) + '_amplitude', value = g_amplitude[g_amplitude_index], min = 0) + g_amplitude_index += 1 + + # model.set_param_hint('g1_amplitude', value = g_amplitude[0], min = 0) + # model.set_param_hint('g2_amplitude', value = g_amplitude[1], min = 0) + # model.set_param_hint('g3_amplitude', value = g_amplitude[2], min = 0) + # model.set_param_hint('g4_amplitude', value = g_amplitude[3], min = 0) + + model.set_param_hint('l_slope', value = 0, max = 0) #constraint the slope fit to be less or equal to 0 + model.set_param_hint('l_intercept', value = counts[0]) + + params = model.make_params() + + # params = model.make_params( + # g1_amplitude=max(counts)*np.sqrt(2*np.pi)*baseline_width/2, + # g2_amplitude=max(counts)*np.sqrt(2*np.pi)*center/4, + # g3_amplitude=max(counts)*np.sqrt(2*np.pi)*baseline_width/8, + # g4_amplitude=max(counts)*np.sqrt(2*np.pi)*baseline_width/16, + # g1_center=center*(1+offset_num), + # g2_center=center*(2+offset_num), + # g3_center=center*(3+offset_num), + # g4_center=center*(4+offset_num), + # g1_sigma= 0.5*baseline_width, + # g2_sigma= 0.5*baseline_width, + # g3_sigma= 0.5*baseline_width, + # g4_sigma= 0.5*baseline_width, + # l_slope = 0, + # l_intercept = counts[0], + # ) + + + # params['g1_sigma'].max = baseline_width + # params['g2_sigma'].max = baseline_width + # params['g3_sigma'].max = baseline_width + # params['g4_sigma'].max = baseline_width + # params['g5_sigma'].max = baseline_width + # params['g6_sigma'].max = baseline_width + # params['g1_center'].min = range_low + # params['g1_center'].max = baseline_width + g1_c + # params['g2_center'].min = range_low + # params['g2_center'].max = baseline_width + g2_c + # params['g3_center'].min = g3_c - baseline_width + # params['g3_center'].max = g3_c + baseline_width + # params['g4_center'].min = g4_c - baseline_width + # params['g4_center'].max = range_high + # params['g5_center'].min = range_low + # params['g5_center'].max = center*5 + 0.5*baseline_width + # params['g6_center'].min = range_low + # params['g6_center'].max = center*5 + 0.5*baseline_width + # params['g1_amplitude'].min = 0 + # params['g2_amplitude'].min = 0 + # params['g3_amplitude'].min = 0 + # params['g4_amplitude'].min = 0 + # params['g5_amplitude'].min = 0 + # params['g6_amplitude'].min = 0 + + # print(center*(1+offset_num)) + res = model.fit(counts, params=params, x=centers, weights = 1/np.sqrt(counts)) + + # y_fit = res.eval(x=centers) + # plt.plot(centers, y_fit) + #**************** + # ci = res.conf_interval() + # lm.printfuncs.report_ci(ci) + #**************** + # print(res.fit_report()) return res - -def fit_alpha_gauss( - values: List[float], binnum: int = 20 -) -> Dict[str, lm.model.ModelResult]: - """ - Performs a Gaussian fit to a given data set. The fitting process is repeated twice to - refine the center and standard deviation estimates, and provide a narrower fit range. - - Args: - values (List[float]): List of values to perform the Gaussian fit on. - binnum (int, optional): The number of bins to use when creating the histogram. Defaults to 20. - - Returns: - Dict[str, ModelResult]: A dictionary containing the range ('low', 'high', 'center') and the final fit result ('fit') from the Gaussian model. - """ - # TODO fit_info should be a class +def fit_alpha_gauss(values, binnum=20): f_range = {} - curr_hist = np.histogram(values, bins=binnum) + curr_hist = np.histogram(values, bins = binnum) counts = curr_hist[0] bins = curr_hist[1] - centers = (bins[1:] + bins[:-1]) / 2 - f_range["center"] = centers[np.argmax(counts)] + centers = (bins[1:] + bins[:-1])/2 + f_range['center'] = centers[np.argmax(counts)] std_guess = np.std(values) mean_guess = centers[np.argmax(counts)] - f_range["low"] = mean_guess - 0.25 * std_guess - f_range["high"] = mean_guess + 0.5 * std_guess - # print(f_range['center'], f_range['low'], f_range['high']) - curr_peak_data = values[(values >= f_range["low"]) & (values <= f_range["high"])] - - # high_val = 3.5 - # low_val = 2.4 - # center_val = (high_val - low_val) / 2.0 - # curr_peak_data = values[(values > low_val) & (values < high_val)] - curr_hist = np.histogram(curr_peak_data, bins=binnum) - # plt.hist(curr_peak_data, bins = binnum) + f_range['low'] = mean_guess - 0.25 * std_guess + f_range['high'] = mean_guess + 0.5 * std_guess +# print(f_range['center'], f_range['low'], f_range['high']) + curr_peak_data = values[(values >= f_range['low']) & (values <= f_range['high'])] + +# high_val = 3.5 +# low_val = 2.4 +# center_val = (high_val - low_val) / 2.0 +# curr_peak_data = values[(values > low_val) & (values < high_val)] + curr_hist = np.histogram(curr_peak_data, bins = binnum) +# plt.hist(curr_peak_data, bins = binnum) counts = curr_hist[0] bins = curr_hist[1] - centers = (bins[1:] + bins[:-1]) / 2.0 + centers = (bins[1:] + bins[:-1])/2.0 model = lm.models.GaussianModel() - params = model.make_params( - amplitude=max(counts), center=mean_guess, sigma=std_guess - ) - res = model.fit( - counts, params=params, x=centers, weights=np.sqrt(1 / counts), nan_policy="omit" - ) - - mean_guess = res.params["center"].value - std_guess = res.params["sigma"].value - f_range["low"] = mean_guess - 2.0 * std_guess - f_range["high"] = mean_guess + 3.0 * std_guess - curr_peak_data = values[(values >= f_range["low"]) & (values <= f_range["high"])] - curr_hist = np.histogram(curr_peak_data, bins=binnum) + params = model.make_params(amplitude=max(counts), center=mean_guess, sigma=std_guess) + res = model.fit(counts, params=params, x=centers, weights=np.sqrt(1/counts)) + + mean_guess = res.params['center'].value + std_guess = res.params['sigma'].value + f_range['low'] = mean_guess - 2.0 * std_guess + f_range['high'] = mean_guess + 3.0 * std_guess + curr_peak_data = values[(values >= f_range['low']) & (values <= f_range['high'])] + curr_hist = np.histogram(curr_peak_data, bins = binnum) counts = curr_hist[0] bins = curr_hist[1] - centers = (bins[1:] + bins[:-1]) / 2.0 + centers = (bins[1:] + bins[:-1])/2.0 model = lm.models.GaussianModel() - params = model.make_params( - amplitude=max(counts), center=mean_guess, sigma=std_guess - ) - res = model.fit( - counts, params=params, x=centers, weights=np.sqrt(1 / counts), nan_policy="omit" - ) - - f_range["fit"] = res + params = model.make_params(amplitude=max(counts), center=mean_guess, sigma=std_guess) + res = model.fit(counts, params=params, x=centers, weights=np.sqrt(1/counts)) + + f_range['fit'] = res return f_range - - -def plot_fit( - fit_info: Dict[str, lm.model.ModelResult], - values: np.ndarray, - binnum: int = 20, - plot_hists: bool = True, - label: str | None = None, -) -> None: - """ - Plots the histogram of the data and the Gaussian fit. - - Args: - fit_info (Dict[str, ModelResult]): A dictionary containing the range ('low', 'high', 'center') and - the final fit result ('fit') from the Gaussian model. - values (List[float]): List of values to plot in the histogram. - binnum (int, optional): The number of bins to use when creating the histogram. Defaults to 20. - plot_hists (bool, optional): If True, the histogram is plotted. Defaults to True. - label (str, optional): Label for the Gaussian fit line. Defaults to None. - """ - fit_data = values[(values >= fit_info["low"]) & (values <= fit_info["high"])] + +def plot_fit(fit_info, values, binnum = 20, plot_hists = True, label = None): + fit_data = values[(values >= fit_info['low']) & (values <= fit_info['high'])] numvalues = len(fit_data) - h = 3.49 * (numvalues) ** (-1 / 3) * np.std(fit_data) - binnum = int(np.ceil((max(fit_data) - min(fit_data)) / h)) + h = 3.49*(numvalues)**(-1/3) * np.std(fit_data) + binnum = int(np.ceil((max(fit_data) - min(fit_data))/h)) if plot_hists: - curr_hist = plt.hist(fit_data, bins=binnum) - x = np.linspace(fit_info["low"], fit_info["high"], num=200) - plt.plot( - x, - fit_info["fit"].eval(params=fit_info["fit"].params, x=x), - color="red", - label=label, - ) - - -def get_mode(hist_data: Tuple[np.ndarray, np.ndarray]) -> Tuple[float, int]: - """ - Determines the mode (the value that appears most often) from histogram data. - - Args: - hist_data (Tuple[np.ndarray, np.ndarray]): A tuple containing the counts per bin and the bin edges, typically the output of a numpy histogram function. - - Returns: - Tuple[float, int]: The center of the bin with the highest count (mode) and the maximum count. - """ + curr_hist = plt.hist(fit_data, bins = binnum) + x = np.linspace(fit_info['low'],fit_info['high'],num=200) + plt.plot(x, fit_info['fit'].eval(params=fit_info['fit'].params, x=x), color='red', label = label) + +def get_mode(hist_data): counts = hist_data[0] bins = hist_data[1] - centers = (bins[1:] + bins[:-1]) / 2.0 + centers = (bins[1:] + bins[:-1])/2.0 max_index = np.argmax(counts) return centers[max_index], np.amax(counts) -# takes in measurement info, and processes it at waveform level -# constructs different histograms, and does gaussian fits +# takes in measurement info, and processes it at waveform level, constructs different histograms, and does gaussian fits class WaveformProcessor: - def __init__( - self, - info: MeasurementInfo, - centers: List[float] | np.ndarray, - run_info_self: Optional[RunInfo] = None, - run_info_solicit: Optional[RunInfo] = None, - baseline_correct: bool = False, - cutoff: Tuple[float, float] = (0, np.infty), - numpeaks: int = 4, - no_solicit: bool = False, - offset_num: int = 0, - ): - """ - Initializes the WaveformProcessor class with the provided parameters. - - Args: - info (MeasurementInfo): Class containing info regarding measurement. - centers (List[float]): Initial guesses for centroid of each gaussian. - run_info_self (Optional[RunInfo]): RunInfo class containing SPE or Alpha data. - run_info_solicit (Optional[RunInfo]): RunInfo class containing baseline data. - baseline_correct (bool): Boolean value indicating if baseline correction needs to be applied. Defaults to False. - cutoff (Tuple[float, float]): Low and high cutoff values. Defaults to (0,np.infty). - numpeaks (int): The number of peaks you want to fit. Defaults to 4. - no_solicit (bool): A flag indicating if there is no solicited waveform available. Defaults to False. - offset_num (int): The number of peaks to skip in the histogram. Defaults to 0. - """ - + def __init__(self, info, run_info_self = None, run_info_solicit = None, baseline_correct = False, range_low = 0, range_high = 2, center = 0.1, offset_num = 0, peak_range = (1,4), no_solicit = False, status = 0): self.baseline_correct = baseline_correct self.info = info self.run_info_self = run_info_self - self.cutoff = cutoff - self.centers = centers - self.numpeaks = numpeaks - self.no_solicit = no_solicit + self.range_low = range_low + self.range_high = range_high + self.center = center + self.numpeaks = peak_range[1] - (peak_range[0] - 1) self.offset_num = offset_num - # options for if you forgot to take pre-breakdown data....... - if no_solicit: + self.peak_range = peak_range #a tuple that contains the lowest peak and highest peak to Gauss fit + self.low_peak = peak_range[0] + self.high_peak = peak_range[1] + self.no_solicit = no_solicit + self.status = status + if no_solicit == False: + self.run_info_solicit = run_info_solicit + self.baseline_mode = run_info_solicit.baseline_mode + else: self.baseline_mode = run_info_self.baseline_mode_mean # self.baseline_mode = 1 #PLACEHOLDER self.baseline_rms = run_info_self.baseline_mode_rms - self.baseline_std = 0.25 * run_info_self.baseline_mode_std + self.baseline_std = 0.25*run_info_self.baseline_mode_std self.baseline_err = run_info_self.baseline_mode_err self.baseline_rms = run_info_self.baseline_mode_rms # self.baseline_std = 1 # self.baseline_err = 1 - else: - self.run_info_solicit = run_info_solicit - self.baseline_mode = run_info_solicit.baseline_mode - - def process_h5(self) -> None: - """Processes the .h5 files associated with the WaveformProcessor instance. - - The method extracts peak data and, if available, baseline data from .h5 files. - It filters the peak data based on a predefined cutoff range and also handles solicit data if it's not disabled. - """ + + def process_h5(self): for curr_file in self.run_info_self.hd5_files: - for curr_acquisition_name in self.run_info_self.acquisition_names[ - curr_file - ]: - # self.peak_values = np.array(self.run_info_self.peak_data[curr_file][curr_acquisition_name]) - self.peak_values = np.array(self.run_info_self.all_peak_data) - self.peak_values = self.peak_values[ - (self.peak_values >= self.cutoff[0]) - & (self.peak_values <= self.cutoff[1]) - ] # peaks in a range - + for curr_acquisition_name in self.run_info_self.acquisition_names[curr_file]: +# self.peak_values = np.array(self.run_info_self.peak_data[curr_file][curr_acquisition_name]) + # if self.no_solicit == True: + if self.status == 0: + self.all = np.array(self.run_info_self.all_peak_data) + elif self.status == 1: + self.all = np.array(self.run_info_self.all_dark_peak_data) + else: + self.all = np.array(self.run_info_self.all_led_peak_data) + + self.peak_values = self.all[(self.all >= self.range_low) & (self.all <= self.range_high)] #peaks in a range - if not self.no_solicit: + + if self.no_solicit == False: for curr_file in self.run_info_solicit.hd5_files: - for curr_acquisition_name in self.run_info_solicit.acquisition_names[ - curr_file - ]: + for curr_acquisition_name in self.run_info_solicit.acquisition_names[curr_file]: # try: if self.run_info_solicit.specifyAcquisition: - curr_acquisition_name = self.run_info_solicit.acquisition - # except: + curr_acquisition_name = self.run_info_solicit.acquisition + #except: else: - self.baseline_values = np.array( - self.run_info_solicit.peak_data[curr_file][ - curr_acquisition_name - ] - ) - self.baseline_values = np.array( - self.run_info_solicit.peak_data[curr_file][ - curr_acquisition_name - ] - ) - - # reads in the waveform data either from the raw data or from a pre-saved .csv file - def process(self, overwrite=False, do_spe=True, do_alpha=False): - """Processes the waveform data, extracting various statistical information from it. - - Args: - overwrite (bool, optional): If True, any previous processing results are overwritten. Defaults to False. - do_spe (bool, optional): If True, Single Photoelectron (SPE) data is processed, including fitting multiple peaks and calculating signal-to-noise ratio (SNR). Defaults to True. - do_alpha (bool, optional): If True, alpha particle data is processed. Defaults to False. - """ - self.process_h5() + self.baseline_values = np.array(self.run_info_solicit.peak_data[curr_file]) + + self.baseline_values = np.array(self.run_info_solicit.peak_data[curr_file][curr_acquisition_name]) + + def process_text(self, overwrite): +# check if already saved as csv + if not self.info.saved_to_csv or overwrite: +# if not save, read in waveform data and save to .csv + print('reading self-trig waveforms') + save_peaks_csv(self.info.selftrig_path, self.info.selftrig_savedir, self.info.peak_search_params) + print('reading solicit waveforms') + save_baseline_csv(self.info.solicit_path, self.info.solicit_savedir, self.info.peak_search_params) + self.baseline_values = read_data_csv(self.info.solicit_savedir)['waveform data'] + self.peak_values = read_data_csv(self.info.selftrig_savedir)['peaks'] + +# reads in the waveform data either from the raw data or from a pre-saved .csv file + def process(self, overwrite = False, do_spe = True, do_alpha = False, range_low = 0, range_high = 2, center = 0.1): + if self.info.data_type == 'text': + self.process_text(overwrite) + elif self.info.data_type == 'h5': + self.process_h5() + else: + return if do_alpha: - self.peak_values = self.peak_values[ - self.peak_values > self.info.min_alpha_value - ] - - self.numbins = int( - round(np.sqrt(len(self.peak_values))) - ) #!!! attr defined outside init - print(f"len: {len(self.peak_values)}") - print(f"{self.numbins}") - if self.no_solicit: - self.baseline_mean = self.baseline_mode - self.baseline_std = 0.002 # arbitrary - print("baseline mode: " + str(self.baseline_mode)) - print("baseline std: " + str(self.baseline_std)) + self.peak_values = self.peak_values[self.peak_values > self.info.min_alpha_value] + + # self.numbins = int(round(np.sqrt(len(self.peak_values)))) + # self.numbins = self.info.peaks_numbins + if self.peak_range != (1,4): #if doing 4 peaks, the bin number are calculated using proper stats + self.numbins = self.info.peaks_numbins else: + self.numbins = int(np.sqrt(len(self.peak_values))) + + if self.no_solicit == False: #added code for alpha analysis self.baseline_fit = fit_baseline_gauss( - self.baseline_values, binnum=self.info.baseline_numbins, alpha=do_alpha - ) - self.baseline_std = self.baseline_fit["fit"].values["sigma"] - self.baseline_mean = self.baseline_fit["fit"].values["center"] - self.baseline_err = self.baseline_fit["fit"].params["center"].stderr + self.baseline_values, + binnum = self.info.baseline_numbins, + alpha = do_alpha + ) + self.baseline_std = self.baseline_fit['fit'].values['sigma'] + self.baseline_mean = self.baseline_fit['fit'].values['center'] + self.baseline_err = self.baseline_fit['fit'].params['center'].stderr self.baseline_rms = np.sqrt(np.mean(self.baseline_values**2)) - print("baseline mean: " + str(self.baseline_mean)) - print("baseline std: " + str(self.baseline_std)) - + print('baseline mean: ' + str(self.baseline_mean)) + print('baseline std: ' + str(self.baseline_std)) + else: + self.baseline_mean = self.baseline_mode + self.baseline_std = 0.002 #arbitrary + print('baseline mode: ' + str(self.baseline_mode)) + print('baseline std: ' + str(self.baseline_std)) if do_spe: self.peak_fit = fit_peaks_multigauss( - values = self.peak_values, - baseline_width = 2.0 * self.baseline_std, - centers = self.centers, - numpeaks = self.numpeaks, - cutoff = self.cutoff + self.peak_values, + self.baseline_mean, + 2.0 * self.baseline_std, + binnum = self.numbins, + range_low = self.range_low, + range_high = self.range_high, + center = self.center, + offset_num = self.offset_num, + peak_range = self.peak_range ) - - self.peak_locs = [self.peak_fit.params[f'g{i}_center'].value for i in range(1, numpeaks + 1)] - self.peak_sigmas = [self.peak_fit.params[f'g{i}_sigma'].value for i in range(1, numpeaks + 1)] - self.peak_stds = [self.peak_fit.params[f'g{i}_center'].stderr for i in range(1, numpeaks + 1)] + self.peak_locs = [self.peak_fit.params['g' + str(idx + 1) + '_center'].value for idx in range(self.low_peak-1, self.high_peak)] + # self.peak_locs = [self.peak_fit.params['g1_center'].value, self.peak_fit.params['g2_center'].value, self.peak_fit.params['g3_center'].value, self.peak_fit.params['g4_center'].value,] + print(self.peak_locs) + + self.peak_sigmas = [self.peak_fit.params['g' + str(idx + 1) + '_sigma'].value for idx in range(self.low_peak-1, self.high_peak)] + # self.peak_sigmas = [self.peak_fit.params['g1_sigma'].value, self.peak_fit.params['g2_sigma'].value, self.peak_fit.params['g3_sigma'].value, self.peak_fit.params['g4_sigma'].value] + print(self.peak_sigmas) + + self.peak_stds = [self.peak_fit.params['g' + str(idx + 1) + '_center'].stderr for idx in range(self.low_peak-1, self.high_peak)] + # self.peak_stds = [self.peak_fit.params['g1_center'].stderr, self.peak_fit.params['g2_center'].stderr, self.peak_fit.params['g3_center'].stderr, self.peak_fit.params['g4_center'].stderr] + print(self.peak_stds) + + # self.peak_err = [np.sqrt(sigma**2 - self.baseline_std**2) for sigma in self.peak_sigmas] #error on peak location as rms difference between peak and baseline width + # self.peak_stds = self.peak_err - self.peak_wgts = [1.0 / curr_std for curr_std in self.peak_stds] self.spe_num = [] - - self.resolution = [ - (self.peak_locs[i + 1] - self.peak_locs[i]) - / np.sqrt(self.peak_sigmas[i] ** 2 + self.peak_sigmas[i + 1] ** 2) - for i in range(len(self.peak_locs) - 1) - ] - print("sigma SNR: " + str(self.resolution)) - - for idx in range(self.numpeaks): + + + self.resolution = [(self.peak_locs[i+1]-self.peak_locs[i])/np.sqrt(self.peak_sigmas[i]**2 + self.peak_sigmas[i+1]**2) for i in range(len(self.peak_locs)-1)] + print('sigma SNR: ' + str(self.resolution)) + + for idx in range(self.low_peak-1, self.high_peak): self.spe_num.append(float(idx + 1 + self.offset_num)) # self.peak_locs = sorted(self.peak_locs) - + # linear fit to the peak locations model = lm.models.LinearModel() params = model.make_params() + + self.spe_res = model.fit(self.peak_locs[:self.numpeaks], params=params, x=self.spe_num, weights=self.peak_wgts[:self.numpeaks]) # creates linear fit model +# + + print('SNR: ' + str(self.spe_res.params['slope'].value/self.baseline_mode)) + print('SNR 2-3: ' + str((self.peak_locs[2]-self.peak_locs[1])/self.baseline_mode)) + print('SNR 1-2: ' + str((self.peak_locs[1]-self.peak_locs[0])/self.baseline_mode)) + + if self.baseline_correct: #maybe changing this baseline correction might help clipping the baseline data to the right more + # self.A_avg = np.mean(self.all) - self.spe_res.params['intercept'].value # spectrum specific baseline correction + self.A_avg = np.mean(self.all) - self.spe_res.params['intercept'].value - self.spe_res = model.fit( - self.peak_locs[: self.numpeaks], - params=params, - x=self.spe_num, - weights=self.peak_wgts[: self.numpeaks], - ) # creates linear fit model - # - - print( - "SNR: " + str(self.spe_res.params["slope"].value / self.baseline_mode) - ) - print( - "SNR 2-3: " - + str((self.peak_locs[2] - self.peak_locs[1]) / self.baseline_mode) - ) - print( - "SNR 1-2: " - + str((self.peak_locs[1] - self.peak_locs[0]) / self.baseline_mode) - ) - - if self.baseline_correct: - self.A_avg = ( - np.mean(self.peak_values) - self.spe_res.params["intercept"].value - ) # spectrum specific baseline correction - # self.A_avg_err = self.A_avg * np.sqrt((sem(self.all) / np.mean(self.all))** 2 + (self.spe_res.params['intercept'].stderr / self.spe_res.params['intercept'].value)** 2) - self.A_avg_err = np.sqrt( - (sem(self.peak_values)) ** 2 - + (self.spe_res.params["intercept"].stderr) ** 2 - ) + # #self.A_avg_err = self.A_avg * np.sqrt((sem(self.all) / np.mean(self.all))** 2 + (self.spe_res.params['intercept'].stderr / self.spe_res.params['intercept'].value)** 2) + # self.A_avg_err = np.sqrt((sem(self.all))** 2 + (self.spe_res.params['intercept'].stderr)** 2) + self.A_avg_err = np.sqrt((sem(self.all))** 2 + (self.spe_res.params['intercept'].stderr)** 2) else: - self.A_avg = np.mean(self.peak_values) - self.A_avg_err = self.A_avg * np.sqrt( - (sem(self.peak_values) / np.mean(self.peak_values)) ** 2 - ) - - self.CA = self.A_avg / self.spe_res.params["slope"].value - 1 + # self.A_avg = np.mean(self.all) + # self.A_avg_err = self.A_avg * np.sqrt((sem(self.all) / np.mean(self.all)) ** 2) + self.A_avg = np.mean(self.all) + self.A_avg_err = self.A_avg * np.sqrt((sem(self.all) / np.mean(self.all)) ** 2) + + if self.run_info_self.led: + self.A_subtract_avg = self.get_subtract_hist_mean(self.run_info_self.all_led_peak_data, self.run_info_self.all_dark_peak_data, plot = False) + + # wesley_vals = (self.all - self.spe_res.params['intercept'].value) + # plt.figure() + # plt.hist(wesley_vals, bins = 1000, label = self.info.bias) + # plt.title(f'Average amplitude: {np.mean(wesley_vals):0.03}') + # plt.legend() + # plt.axvline(x = np.mean(wesley_vals), color = 'red') + + # print('here') #checks histograms and avg peak location + # plt.figure() + # plt.hist(self.led_off, bins = 150) + # plt.title(str(self.A_avg) + ' ' + str(self.spe_res.params['slope'].value)) + + # rest of this function is for CA + # self.A_avg = self.A_subtract_avg #changes the average value using the subtraction hist method + self.CA = self.A_avg / self.spe_res.params['slope'].value - 1 self.CA_err = self.CA * np.sqrt( - (self.A_avg_err / self.A_avg) ** 2 - + ( - self.spe_res.params["slope"].stderr - / self.spe_res.params["slope"].value - ) - ** 2 - ) - + (self.A_avg_err / self.A_avg) ** 2 + + (self.spe_res.params['slope'].stderr / self.spe_res.params['slope'].value) ** 2) + + if do_alpha: - self.alpha_fit = fit_alpha_gauss( - self.peak_values, binnum=self.info.peaks_numbins - ) - self.alpha_res = self.alpha_fit["fit"] + self.alpha_fit = fit_alpha_gauss(self.peak_values, binnum = self.info.peaks_numbins) + self.alpha_res = self.alpha_fit['fit'] def get_alpha_data(self): - """Retrieves the alpha pulse peak heights. - - Returns: - numpy.ndarray: An array of processed alpha particle data. - """ - return self.peak_values + return self.peak_values def get_baseline_data(self): - """Retrieves the raw aggregated baseline data. - - Returns: - numpy.ndarray: An array of processed baseline data values. - """ return self.baseline_values - - def get_alpha_fit(self) -> lm.model.ModelResult: - """Retrieves the fit results for the alpha particle data. - - Returns: - object: An object that represents the fitting results of the alpha particle data. - """ + + def get_alpha_fit(self): return self.alpha_res def get_baseline_fit(self): - """Retrieves the fit results for the baseline data. - - Returns: - object: An object that represents the fitting results of the baseline data. - """ - return self.baseline_fit["fit"] - - def get_spe(self) -> Tuple[float, float]: - """Retrieves the slope value and its error from the spe fit results. - - Returns: - Tuple[float, float]: A tuple containing the slope value and its error. - """ - return (self.spe_res.params["slope"].value, self.spe_res.params["slope"].stderr) + return self.baseline_fit['fit'] - def get_CA(self) -> Tuple[float, float]: - """Retrieves the Correlated Avalanche (CA) correction factor and its error. - - Returns: - Tuple[float, float]: A tuple containing the CA factor and its error. - """ + def get_spe(self): + return (self.spe_res.params['slope'].value, self.spe_res.params['slope'].stderr) + + def get_CA(self): return (self.CA, self.CA_err) - def get_CA_spe(self, spe: float, spe_err: float) -> Tuple[float, float] - """Computes the Correlated Avalanche (CA) factor and its error based on given spe and its error. - - Args: - spe (float): The spe value. - spe_err (float): The error in the spe value. - - Returns: - Tuple[float, float]: A tuple containing the computed CA factor and its error. - """ + def get_CA_spe(self, spe, spe_err): + # print('average A error', self.A_avg_err) currCA = self.A_avg / spe - 1 currCA_err = currCA * np.sqrt( - (self.A_avg_err / self.A_avg) ** 2 + (spe_err / spe) ** 2 - ) + (self.A_avg_err / self.A_avg) ** 2 + + (spe_err / spe) ** 2) return (currCA, currCA_err) - - def get_CA_rms(self, spe: float, spe_err: float) -> Tuple[float, float]: - """Computes the root mean square (rms) of the Correlated Avalanche (CA) factor and its error based on given spe and its error. - - Args: - spe (float): The spe value. - spe_err (float): The error in the spe value. - - Returns: - Tuple[float, float]: A tuple containing the rms value of the computed CA factor and its error. - """ + + def get_CA_rms(self, spe, spe_err): currCA = self.A_avg / spe - 1 - Q_twi = self.peak_values - self.spe_res.params["intercept"].value + Q_twi = self.peak_values - self.spe_res.params['intercept'].value Q_1pe = spe sqrtval = Q_twi / Q_1pe - (currCA + 1) val = sqrtval * sqrtval rms = np.sqrt(np.mean(val)) rms_err = rms * np.sqrt( - (self.A_avg_err / self.A_avg) ** 2 + (spe_err / spe) ** 2 - ) + (self.A_avg_err / self.A_avg) ** 2 + + (spe_err / spe) ** 2) return (rms, rms_err) - - - def get_alpha(self, sub_baseline: bool = True) -> Tuple[float, float]: - """Retrieves the center value and its error from the alpha fit results. It subtracts the baseline mean if sub_baseline is set to True. - - Args: - sub_baseline (bool, optional): If True, subtracts the baseline mean from the alpha center value. Defaults to True. - - Returns: - Tuple[float, float]: A tuple containing the center value of alpha and its error. - """ - alpha_value = self.alpha_res.params["center"].value - alpha_error = self.alpha_res.params["center"].stderr + + def get_alpha(self, sub_baseline = True): + alpha_value = self.alpha_res.params['center'].value + alpha_error = self.alpha_res.params['center'].stderr if sub_baseline: baseline_value = self.baseline_mean baseline_error = self.baseline_err alpha_value -= baseline_value - alpha_error = np.sqrt( - alpha_error * alpha_error + baseline_error * baseline_error - ) + alpha_error = np.sqrt(alpha_error * alpha_error + baseline_error * baseline_error) return alpha_value, alpha_error - - def get_alpha_std(self) -> Tuple[float, float]: - """Retrieves the standard deviation and its error from the alpha fit results. - - Returns: - Tuple[float, float]: A tuple containing the standard deviation of alpha and its error. - """ - alpha_value = self.alpha_res.params["sigma"].value - alpha_error = self.alpha_res.params["sigma"].stderr + + def get_alpha_std(self): + alpha_value = self.alpha_res.params['sigma'].value + alpha_error = self.alpha_res.params['sigma'].stderr return alpha_value, alpha_error - - - def plot_spe( - self, - with_baseline: bool = True, - baselinecolor: str = "orange", - peakcolor: str = "blue", - savefig: bool = False, - path: Optional[str] = None, - ) -> None: - """Plots average pulse amplitudes as a function of # of Photoelectrons (PE). - - Args: - with_baseline (bool, optional): If True, plots the baseline data. Defaults to True. - baselinecolor (str, optional): Color used for the baseline data. Defaults to "orange". - peakcolor (str, optional): Color used for the SPE peak data. Defaults to "blue". - savefig (bool, optional): If True, saves the figure to the provided path. Defaults to False. - path (str, optional): Path where the figure should be saved. Used only if savefig is set to True. Defaults to None. - """ + + def plot_spe(self, with_baseline = True, baselinecolor = 'orange', peakcolor = 'blue', savefig = False, path = None): fig = plt.figure() - fig.tight_layout() - plt.rc("font", size=22) - plt.errorbar( - self.spe_num, - self.peak_locs[: self.numpeaks], - yerr=self.peak_stds[: self.numpeaks], - fmt=".", - label="Self-Triggered Peaks", - color="tab:" + peakcolor, - markersize=10, - ) + plt.errorbar(self.spe_num, self.peak_locs[:self.numpeaks], yerr = self.peak_stds[:self.numpeaks], fmt = '.', label = 'Solicited-LED-Triggered Peaks', color = 'tab:' + peakcolor, markersize = 10) if with_baseline: if self.no_solicit == False: - plt.errorbar( - 0, - self.baseline_mean, - yerr=self.baseline_err, - fmt=".", - label="Solicited Baseline Peak", - color="tab:" + baselinecolor, - markersize=10, - ) - # else: - # plt.errorbar(0, self.baseline_mode, yerr = self.baseline_err, fmt='.', label = 'Solicited Baseline Peak', color = 'tab:' + baselinecolor, markersize = 10) - - b = self.spe_res.params["intercept"].value - m = self.spe_res.params["slope"].value +# plt.errorbar(0, self.baseline_mean, yerr = self.baseline_std, fmt='.', label = 'Solicited Baseline Peak') + plt.errorbar(0, self.baseline_mean, yerr = self.baseline_err, fmt='.', label = 'Solicited Baseline Peak', color = 'tab:' + baselinecolor, markersize = 10) + + b = self.spe_res.params['intercept'].value + m = self.spe_res.params['slope'].value x_values = np.linspace(0, len(self.spe_num) + 1, 20) y_values = m * x_values + b - plt.plot( - x_values, - y_values, - "--", - color="tab:" + peakcolor, - label="Self-Triggered Fit", - ) - # dely = self.spe_res.eval_uncertainty(x=x_values, sigma=1) - # plt.fill_between(x_values, y_values+dely, y_values-dely) - # plt.plot(self.spe_num, self.spe_res.best_fit, 'r', label='Self-Triggered Fit') - - plt.xlabel("Photoelectron Peak Number") - plt.ylabel("Peak Location [V]") - + plt.plot(x_values, y_values, '--', color = 'tab:' + peakcolor, label = 'Solicited-LED-Triggered Fit') +# dely = self.spe_res.eval_uncertainty(x=x_values, sigma=1) +# plt.fill_between(x_values, y_values+dely, y_values-dely) +# plt.plot(self.spe_num, self.spe_res.best_fit, 'r', label='Self-Triggered Fit') + + plt.xlabel('Photoelectron Peak Number') + plt.ylabel('Peak Location [V]') + plt.legend() - - textstr = f"Date: {self.info.date}\n" - textstr += f"Condition: {self.info.condition}\n" - textstr += f"Bias: {self.info.bias:0.4} [V]\n" - textstr += f"RTD4: {self.info.temperature} [K]\n" - textstr += f"--\n" - textstr += f"""Slope: {self.spe_res.params['slope'].value:0.4} +- {self.spe_res.params['slope'].stderr:0.2} [V/p.e.]\n""" - textstr += f"""Intercept: {self.spe_res.params['intercept'].value:0.4} +- {self.spe_res.params['intercept'].stderr:0.2} [V]\n""" - textstr += rf"""Reduced $\chi^2$: {self.spe_res.redchi:0.4}""" - textstr += f"""\n""" - textstr += f"--\n" + + textstr = f'Date: {self.info.date}\n' + textstr += f'Condition: {self.info.condition}\n' + textstr += f'Bias: {self.info.bias:0.4} [V]\n' + textstr += f'RTD4: {self.info.temperature} [K]\n' + textstr += f'--\n' + textstr += f'''Slope: {self.spe_res.params['slope'].value:0.4} +- {self.spe_res.params['slope'].stderr:0.2} [V/p.e.]\n''' + textstr += f'''Intercept: {self.spe_res.params['intercept'].value:0.4} +- {self.spe_res.params['intercept'].stderr:0.2} [V]\n''' + textstr += rf'''Reduced $\chi^2$: {self.spe_res.redchi:0.4}''' + textstr += f'''\n''' + textstr += f'--\n' if not self.no_solicit: - textstr += ( - f"Baseline: {self.baseline_mean:0.4} +- {self.baseline_err:0.2} [V]" - ) - - props = dict(boxstyle="round", facecolor="tab:" + peakcolor, alpha=0.4) - fig.text(0.6, 0.4, textstr, fontsize=20, verticalalignment="top", bbox=props) + textstr += f'Baseline: {self.baseline_mean:0.4} +- {self.baseline_err:0.2} [V]' + props = dict(boxstyle='round', facecolor='tab:' + peakcolor, alpha=0.4) + fig.text(0.6, 0.45, textstr, fontsize=8, + verticalalignment='top', bbox=props) + plt.tight_layout() + if savefig: - plt.savefig(path) + plt.savefig(path) plt.close(fig) - def plot_baseline_histogram( - self, - with_fit: bool = True, - log_scale: bool = False, - color: str = "orange", - savefig: bool = False, - path: Optional[str] = None - ) -> None: - """Plots a histogram of the baseline data. - - Args: - with_fit (bool, optional): If True, overlays the fit of the data on the plot. Defaults to True. - log_scale (bool, optional): If True, sets the y-axis to a logarithmic scale. Defaults to False. - color (str, optional): The color of the histogram bars. Defaults to "orange". - savefig (bool, optional): If True, saves the figure to the provided path. Defaults to False. - path (str, optional): Path where the figure should be saved. Used only if savefig is set to True. Defaults to None. - """ + def plot_baseline_histogram(self, with_fit = True, log_scale = False, color = 'orange', savefig = False, path = None): fig = plt.figure() - plt.hist( - self.baseline_values, - bins=self.info.baseline_numbins, - label="Solicited Baseline Data", - color="tab:" + color, - ) + plt.hist(self.baseline_values, bins = self.info.baseline_numbins, label = 'Solicited Baseline Data', color = 'tab:' + color) if with_fit: - plot_fit( - self.baseline_fit, - self.baseline_values, - binnum=self.info.baseline_numbins, - plot_hists=False, - label="Solicited Baseline Fit", - ) - # plt.legend(loc = 'center left') - plt.xlabel("Waveform Amplitude [V]") - plt.ylabel("Counts") + plot_fit(self.baseline_fit, self.baseline_values, binnum = self.info.baseline_numbins, plot_hists = False, label = 'Solicited Baseline Fit') +# plt.legend(loc = 'center left') + plt.xlabel('Waveform Amplitude [V]') + plt.ylabel('Counts') if log_scale: - plt.yscale("log") - textstr = f"Date: {self.info.date}\n" - textstr += f"Condition: {self.info.condition}\n" - textstr += f"Bias: {self.info.bias:0.4} [V]\n" - textstr += f"RTD4: {self.info.temperature} [K]\n" - textstr += f"--\n" - textstr += f"""Baseline Mean: {self.baseline_fit['fit'].params['center'].value:0.4} +- {self.baseline_fit['fit'].params['center'].stderr:0.1} [V]\n""" - textstr += f"""Baseline Sigma: {self.baseline_fit['fit'].params['sigma'].value:0.4} +- {self.baseline_fit['fit'].params['sigma'].stderr:0.1} [V]\n""" - textstr += f"""Reduced $\chi^2$: {self.baseline_fit['fit'].redchi:0.4}""" - - plt.ticklabel_format(style="sci", axis="y", scilimits=(0, 0)) - props = dict(boxstyle="round", facecolor="tab:" + color, alpha=0.5) - fig.text(0.15, 0.9, textstr, fontsize=8, verticalalignment="top", bbox=props) + plt.yscale('log') + textstr = f'Date: {self.info.date}\n' + textstr += f'Condition: {self.info.condition}\n' + textstr += f'Bias: {self.info.bias:0.4} [V]\n' + textstr += f'RTD4: {self.info.temperature} [K]\n' + textstr += f'--\n' + textstr += f'''Baseline Mean: {self.baseline_fit['fit'].params['center'].value:0.4} +- {self.baseline_fit['fit'].params['center'].stderr:0.1} [V]\n''' + textstr += f'''Baseline Sigma: {self.baseline_fit['fit'].params['sigma'].value:0.4} +- {self.baseline_fit['fit'].params['sigma'].stderr:0.1} [V]\n''' + textstr += f'''Reduced $\chi^2$: {self.baseline_fit['fit'].redchi:0.4}''' + + plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) + props = dict(boxstyle='round', facecolor='tab:' + color, alpha=0.5) + fig.text(0.15, 0.9, textstr, fontsize=8, + verticalalignment='top', bbox=props) plt.tight_layout() - + if savefig: - plt.savefig(path) + plt.savefig(path) plt.close(fig) - # zoomed in version - # def plot_peak_histograms(self, with_fit = True, log_scale = True, peakcolor = 'blue', savefig = False, path = None): - # fig = plt.figure() - - # total_num_bins = self.numbins - - # textstr = f'Date: {self.info.date}\n' - # textstr += f'Condition: {self.info.condition}\n' - # textstr += f'Bias: {self.info.bias} [V]\n' - # textstr += f'RTD4: {self.info.temperature} [K]\n' - # textstr += f'--\n' - # textstr += f'''Peak 1: {self.peak_fit.params['g1_center'].value:0.2} +- {self.peak_fit.params['g1_center'].stderr:0.2}\n''' - # textstr += f'''Peak 2: {self.peak_fit.params['g2_center'].value:0.2} +- {self.peak_fit.params['g2_center'].stderr:0.2}\n''' - # textstr += f'''Peak 3: {self.peak_fit.params['g3_center'].value:0.2} +- {self.peak_fit.params['g3_center'].stderr:0.2}\n''' - # textstr += f'''Peak 4: {self.peak_fit.params['g4_center'].value:0.2} +- {self.peak_fit.params['g4_center'].stderr:0.2}\n''' - # textstr += f'''Reduced $\chi^2$: {self.peak_fit.redchi:0.2}\n''' - # textstr += f'''SNR (quadrature): {self.resolution[0]:0.2}\n''' - # textstr += f'''SNR 1-2 (mode): {(self.peak_locs[1]-self.peak_locs[0])/self.baseline_mode:0.2}\n''' - # textstr += f'''SNR 2-3 (mode): {(self.peak_locs[2]-self.peak_locs[1])/self.baseline_mode:0.2}\n''' - - # curr_hist = np.histogram(self.peak_values, bins = self.numbins) - # counts = curr_hist[0] - # bins = curr_hist[1] - # centers = (bins[1:] + bins[:-1])/2 - # plt.plot(np.linspace(self.range_low,self.range_high,len(self.peak_fit.best_fit)), self.peak_fit.best_fit, '-', label='best fit', color = 'red') - - # x = np.linspace(self.range_low,self.range_high,200) - # plt.plot(x, self.peak_fit.best_values['l_intercept'] + self.peak_fit.best_values['l_slope']*x, '-', label='best fit - line', color = 'blue') - - # props = dict(boxstyle='round', facecolor='tab:' + peakcolor, alpha=0.4) - # # plt.scatter(centers, counts, s = 7, color = 'black') - # plt.hist(self.peak_values, bins = int(total_num_bins), color = 'tab:' + peakcolor) - - # fig.text(0.55, 0.925, textstr, fontsize=8, - # verticalalignment='top', bbox=props) - # plt.ylabel('Counts') - # plt.xlabel('Pulse Amplitude [V]') - - # if log_scale: - # plt.ylim(1E-1) - # plt.yscale('log') - # plt.tight_layout() - - # if savefig: - # plt.savefig(path) - # plt.close(fig) - - # see entire hist - def plot_peak_histograms( - self, - with_fit: bool = True, - log_scale: bool = True, - peakcolor: str = "blue", - savefig: bool = False, - path: Optional[str] = None - ) -> None: - """Plots a histogram of peak data with possible fitted models. - - Args: - with_fit (bool, optional): If True, overlays the fitted model of the data on the plot. Defaults to True. - log_scale (bool, optional): If True, sets the y-axis to a logarithmic scale. Defaults to True. - peakcolor (str, optional): The color of the histogram bars. Defaults to "blue". - savefig (bool, optional): If True, saves the figure to the provided path. Defaults to False. - path (str, optional): Path where the figure should be saved. Used only if savefig is set to True. Defaults to None. - """ + def plot_peak_histograms(self, with_fit = True, log_scale = True, peakcolor = 'blue', savefig = False, path = None): fig = plt.figure() - bin_width = (max(self.peak_values) - min(self.peak_values)) / self.numbins - # print(bin_width) - - total_num_bins = (max(self.peak_values) - min(self.peak_values)) / bin_width - textstr = f"Date: {self.info.date}\n" - textstr += f"Condition: {self.info.condition}\n" - textstr += f"Bias: {self.info.bias} [V]\n" - textstr += f"RTD4: {self.info.temperature} [K]\n" - textstr += f"--\n" - textstr += f"""Peak 1: {self.peak_fit.params['g1_center'].value:0.2} +- {self.peak_fit.params['g1_center'].stderr:0.2}\n""" - textstr += f"""Peak 2: {self.peak_fit.params['g2_center'].value:0.2} +- {self.peak_fit.params['g2_center'].stderr:0.2}\n""" - textstr += f"""Peak 3: {self.peak_fit.params['g3_center'].value:0.2} +- {self.peak_fit.params['g3_center'].stderr:0.2}\n""" - textstr += f"""Peak 4: {self.peak_fit.params['g4_center'].value:0.2} +- {self.peak_fit.params['g4_center'].stderr:0.2}\n""" - textstr += f"""Reduced $\chi^2$: {self.peak_fit.redchi:0.2}\n""" - textstr += f"""SNR (quadrature): {self.resolution[0]:0.2}\n""" - textstr += f"""SNR 1-2 (mode): {(self.peak_locs[1]-self.peak_locs[0])/self.baseline_mode:0.2}\n""" - textstr += f"""SNR 2-3 (mode): {(self.peak_locs[2]-self.peak_locs[1])/self.baseline_mode:0.2}\n""" - - curr_hist = np.histogram(self.peak_values, bins=self.numbins) + # total_num_bins = self.numbins + # bin_density = int(np.sqrt(len(self.peak_values))) / (self.range_high - self.range_low) + if self.peak_range != (1,4): #if doing 4 peaks, the bin number are calculated using proper stats + bin_density = self.info.peaks_numbins / (self.range_high - self.range_low) + else: + bin_density = int(np.sqrt(len(self.peak_values))) / (self.range_high - self.range_low) + + + + total_num_bins = bin_density * (np.amax(self.all) - np.amin(self.all)) + + textstr = f'Date: {self.info.date}\n' + textstr += f'Condition: {self.info.condition}\n' + textstr += f'Bias: {self.info.bias} [V]\n' + textstr += f'RTD4: {self.info.temperature} [K]\n' + textstr += f'--\n' + textstr += f'Peak Locations ($\mu$) [V]\n' + for peak in range(len(self.peak_sigmas)): + actual_peak = peak + 1 + textstr += f'''Peak {actual_peak}: {self.peak_fit.params['g' + str(actual_peak) + '_center'].value:0.2} $\pm$ {self.peak_fit.params['g' + str(actual_peak) + '_center'].stderr:0.2}\n''' + # textstr += f'''SNR (quadrature): {self.resolution[0]:0.2}\n''' + # textstr += f'''SNR 1-2 (mode): {(self.peak_locs[1]-self.peak_locs[0])/self.baseline_mode:0.2}\n''' + # textstr += f'''SNR 2-3 (mode): {(self.peak_locs[2]-self.peak_locs[1])/self.baseline_mode:0.2}\n''' + + textstr += f'--\n' + textstr += 'Peak Width (\u03C3) [V]\n' + for peak in range(len(self.peak_sigmas)): + curr_sigma_err = self.peak_fit.params['g' + str(peak + 1) + '_sigma'].stderr + textstr += f'''{peak + 1}: {round(self.peak_sigmas[peak],5)} $\pm$ {curr_sigma_err:0.2}\n''' + + textstr += f'--\n' + textstr += f'''Reduced $\chi^2$: {self.peak_fit.redchi:0.2}\n''' + + + curr_hist = np.histogram(self.peak_values, bins = self.numbins) counts = curr_hist[0] bins = curr_hist[1] - centers = (bins[1:] + bins[:-1]) / 2 - plt.plot( - np.linspace(self.cutoff[0], self.cutoff[1], len(self.peak_fit.best_fit)), - self.peak_fit.best_fit, - "-", - label="best fit", - color="red", - ) # plot the best fit model func - - x = np.linspace(self.cutoff[0], self.cutoff[1], 200) - plt.plot( - x, - self.peak_fit.best_values["l_intercept"] - + self.peak_fit.best_values["l_slope"] * x, - "-", - label="best fit - line", - color="blue", - ) # plot linear component of best fit model func - - props = dict(boxstyle="round", facecolor="tab:" + peakcolor, alpha=0.4) + centers = (bins[1:] + bins[:-1])/2 + y_line_fit = self.peak_fit.eval(x=centers) + + plt.plot(centers, y_line_fit,'r-', label='best fit') + + # plt.plot(np.linspace(self.range_low,self.range_high,len(self.peak_fit.best_fit)), self.peak_fit.best_fit, 'r-', label='best fit', color = 'red') + + # x = np.linspace(self.range_low,self.range_high,200) + plt.plot(centers, self.peak_fit.best_values['l_intercept'] + self.peak_fit.best_values['l_slope']*centers, 'b-', label='best fit - line') + + # print('here') + # print(self.peak_fit.best_values) + + props = dict(boxstyle='round', facecolor='tab:' + peakcolor, alpha=0.4) # plt.scatter(centers, counts, s = 7, color = 'black') - plt.hist(self.peak_values, bins=int(total_num_bins), color="tab:" + peakcolor) - - fig.text(0.55, 0.925, textstr, fontsize=8, verticalalignment="top", bbox=props) - plt.ylabel("Counts") - plt.xlabel("Pulse Amplitude [V]") - + # plt.hist(self.peak_values, bins = int(total_num_bins), color = 'tab:' + peakcolor) + plt.hist(self.all, bins = int(total_num_bins), color = 'tab:' + peakcolor) + + fig.text(0.70, 0.925, textstr, fontsize=8, + verticalalignment='top', bbox=props) + plt.ylabel('Counts') + plt.xlabel('Pulse Amplitude [V]') + if log_scale: - plt.ylim(1e-1) - plt.yscale("log") + plt.ylim(1E-1) + plt.yscale('log') plt.tight_layout() - + if savefig: plt.savefig(path) plt.close(fig) - def plot_alpha_histogram(self, with_fit: bool = True, log_scale: bool = False, peakcolor: str = "purple")-> None: - """Plots a histogram of alpha values with or without fitting. - - This method creates a histogram of alpha values (self.peak_values), calculated as the number of peaks (self.info.peaks_numbins) over the range of alpha fit (self.alpha_fit["high"] - self.alpha_fit["low"]). - - It supports options to show/hide fitted model on the plot (with_fit), use a logarithmic scale (log_scale), and change the color of the histogram bars (peakcolor). - - Additional information about the measurement, such as date, condition, bias, temperature, and peak statistics are displayed on the plot. - - Args: - with_fit (bool, optional): If True, overlays the fitted model of the alpha values on the histogram. Defaults to True. - log_scale (bool, optional): If True, sets the y-axis to a logarithmic scale. Defaults to False. - peakcolor (str, optional): The color of the histogram bars. Defaults to "purple". - """ + def plot_alpha_histogram(self, with_fit = True, log_scale = False, peakcolor = 'purple'): fig = plt.figure() - fig.tight_layout() - plt.rc("font", size=22) - bin_density = self.info.peaks_numbins / ( - self.alpha_fit["high"] - self.alpha_fit["low"] - ) - total_num_bins = bin_density * ( - np.amax(self.peak_values) - np.amin(self.peak_values) - ) - plt.hist(self.peak_values, bins=int(total_num_bins), color="tab:" + peakcolor) + bin_density = self.info.peaks_numbins / (self.alpha_fit['high'] - self.alpha_fit['low']) + total_num_bins = bin_density * (np.amax(self.peak_values) - np.amin(self.peak_values)) + plt.hist(self.peak_values, bins = int(total_num_bins), color = 'tab:' + peakcolor) if with_fit: - plot_fit( - self.alpha_fit, - self.peak_values, - binnum=self.info.peaks_numbins, - plot_hists=False, - ) - # plt.legend(loc = 'center left') - plt.xlabel("Waveform Amplitude [V]") - plt.ylabel("Counts") + plot_fit(self.alpha_fit, self.peak_values, binnum = self.info.peaks_numbins, plot_hists = False) +# plt.legend(loc = 'center left') + plt.xlabel('Waveform Amplitude [V]') + plt.ylabel('Counts') plt.xlim(0.0) if log_scale: - plt.yscale("log") - plt.ylim(0.1) - textstr = f"Date: {self.info.date}\n" - textstr += f"Condition: {self.info.condition}\n" - textstr += f"Bias: {self.info.bias:0.4} [V]\n" - textstr += f"RTD4: {self.info.temperature} [K]\n" - textstr += f"--\n" - textstr += f"""Alpha Peak Mean: {self.alpha_fit['fit'].params['center'].value:0.4} +- {self.alpha_fit['fit'].params['center'].stderr:0.1} [V]\n""" - textstr += f"""Alpha Peak Sigma: {self.alpha_fit['fit'].params['sigma'].value:0.4} +- {self.alpha_fit['fit'].params['sigma'].stderr:0.1} [V]\n""" - textstr += f"""Reduced $\chi^2$: {self.alpha_res.redchi:0.4}""" - - props = dict(boxstyle="round", facecolor="tab:" + peakcolor, alpha=0.4) - fig.text( - 0.175, 0.925, textstr, fontsize=20, verticalalignment="top", bbox=props - ) - plt.show() - - def plot_both_histograms( - self, - log_scale: bool = True, - density: bool = True, - alphas: bool = False, - baselinecolor: str = "orange", - peakcolor: str = "blue", - savefig: bool = False, - path: Optional[str] = None, - ): - """Plots histograms for both baseline and peak values. - - This method creates histograms for baseline and peak values with control over the - logarithmic scale, normalization (density), inclusion of alpha peaks, and color schemes - for baseline and peak histograms. - - It can also save the plot to a specified file. - - Args: - log_scale (bool, optional): If True, sets the y-axis to a logarithmic scale. Defaults to True. - density (bool, optional): If True, normalizes the histogram to form a probability density. Defaults to True. - alphas (bool, optional): If True, includes alpha peaks in the histogram. Defaults to False. - baselinecolor (str, optional): The color of the baseline histogram bars. Defaults to "orange". - peakcolor (str, optional): The color of the peak histogram bars. Defaults to "blue". - savefig (bool, optional): If True, saves the plot to the file specified in 'path'. Defaults to False. - path (str, optional): The file path to save the plot. Used only if 'savefig' is True. Defaults to None. - """ - if self.no_solicit: - print("NO PRE BREAKDOWN DATA TO PLOT") + plt.yscale('log') + plt.ylim(.1) + textstr = f'Date: {self.info.date}\n' + textstr += f'Condition: {self.info.condition}\n' + textstr += f'Bias: {self.info.bias:0.4} [V]\n' + textstr += f'RTD4: {self.info.temperature} [K]\n' + textstr += f'--\n' + textstr += f'''Alpha Peak Mean: {self.alpha_fit['fit'].params['center'].value:0.4} +- {self.alpha_fit['fit'].params['center'].stderr:0.1} [V]\n''' + textstr += f'''Alpha Peak Sigma: {self.alpha_fit['fit'].params['sigma'].value:0.4} +- {self.alpha_fit['fit'].params['sigma'].stderr:0.1} [V]\n''' + textstr += f'''Reduced $\chi^2$: {self.alpha_res.redchi:0.4}''' + + + props = dict(boxstyle='round', facecolor='tab:' + peakcolor, alpha=0.4) + fig.text(0.175, 0.925, textstr, fontsize=8, + verticalalignment='top', bbox=props) + plt.tight_layout() + + def plot_both_histograms(self, log_scale = True, density = True, alphas = False, baselinecolor = 'orange', peakcolor = 'blue', savefig = False, path = None): + if self.no_solicit == True: + print('NO PRE BREAKDOWN DATA TO PLOT') fig = plt.figure() - plt.hist( - self.baseline_values, - bins=self.info.baseline_numbins, - label="Solicited Baseline Data", - density=density, - color="tab:" + baselinecolor, - ) + plt.hist(self.baseline_values, bins = self.info.baseline_numbins, label = 'Solicited Baseline Data', density = density, color = 'tab:' + baselinecolor) if alphas: - bin_density = self.info.peaks_numbins / ( - self.alpha_fit["high"] - self.alpha_fit["low"] - ) + bin_density = self.info.peaks_numbins / (self.alpha_fit['high'] - self.alpha_fit['low']) else: - bin_density = self.info.peaks_numbins / (4.0 * self.baseline_std) - # total_num_bins = bin_density * (np.amax(self.peak_values) - np.amin(self.peak_values)) - total_num_bins = self.info.peaks_numbins - plt.hist( - self.peak_values, - bins=int(total_num_bins), - density=density, - label="Self-Triggered Pulse Height Data", - color="tab:" + peakcolor, - ) + # bin_density = self.info.peaks_numbins / (4.0 * self.baseline_std) + bin_density = int(np.sqrt(len(self.peak_values))) / (self.range_high - self.range_low) + total_num_bins = bin_density * (np.amax(self.all) - np.amin(self.all)) + # total_num_bins = self.info.peaks_numbins + plt.hist(self.all, bins = int(total_num_bins), density = density, label = 'Solicited-LED-Triggered Pulse Height Data', color = 'tab:' + peakcolor) if log_scale: - plt.ylim(1e-1) - plt.yscale("log") - plt.ylabel("Frequency" if density else "Counts") - plt.xlabel("Amplitude [V]") - + plt.ylim(1E-1) + plt.yscale('log') + if density: + plt.ylabel('Frequency') + else: + plt.ylabel('Counts') + plt.xlabel('Amplitude [V]') + plt.legend() - textstr = f"Date: {self.info.date}\n" - textstr += f"Condition: {self.info.condition}\n" - textstr += f"Bias: {self.info.bias:0.4} [V]\n" - textstr += f"RTD4: {self.info.temperature} [K]" - props = dict(boxstyle="round", facecolor="tab:" + peakcolor, alpha=0.4) - fig.text(0.75, 0.75, textstr, fontsize=8, verticalalignment="top", bbox=props) + textstr = f'Date: {self.info.date}\n' + textstr += f'Condition: {self.info.condition}\n' + textstr += f'Bias: {self.info.bias:0.4} [V]\n' + textstr += f'RTD4: {self.info.temperature} [K]' + props = dict(boxstyle='round', facecolor='tab:' + peakcolor, alpha=0.4) + fig.text(0.70, 0.75, textstr, fontsize=8, + verticalalignment='top', bbox=props) plt.tight_layout() - + if savefig: plt.savefig(path) plt.close(fig) - - # currently broken: - def plot_baseline_waveform_hist(self, num: int = -1, color: str = "orange"): - """Plots a 2D histogram of baseline waveform data over time. - - The method generates a 2D histogram of baseline waveform data over time. - The number of waveforms and their color in the plot can be controlled. - - Args: - num (int, optional): Number of waveforms to include in the superposition. Defaults to -1, indicating all. - color (str, optional): Color of the histogram. Defaults to "orange". - """ + +#currently broken: + def plot_baseline_waveform_hist(self, num = -1, color = 'orange'): fig = plt.figure() - waveform_data, waveform_times, num_w = get_baseline( - self.info.solicit_path, self.info.peak_search_params - ) - plt.hist2d(waveform_times, waveform_data, bins=100, norm=mpl.colors.LogNorm()) - plt.xlabel(r"Time [$\mu$s]") - plt.ylabel("Waveform Amplitude [V]") - textstr = f"Date: {self.info.date}\n" - textstr += f"Condition: {self.info.condition}\n" - textstr += f"Bias: {self.info.bias:0.4} [V]\n" - textstr += f"RTD4: {self.info.temperature} [K]\n" - textstr += f"Superposition of {num_w} waveforms" - props = dict(boxstyle="round", facecolor="tab:" + color, alpha=0.5) - fig.text(0.6, 0.3, textstr, fontsize=8, verticalalignment="top", bbox=props) + waveform_data, waveform_times, num_w = get_baseline(self.info.solicit_path, self.info.peak_search_params) + plt.hist2d(waveform_times, waveform_data, bins = 100, norm=mpl.colors.LogNorm()) + plt.xlabel(r'Time [$\mu$s]') + plt.ylabel('Waveform Amplitude [V]') + textstr = f'Date: {self.info.date}\n' + textstr += f'Condition: {self.info.condition}\n' + textstr += f'Bias: {self.info.bias:0.4} [V]\n' + textstr += f'RTD4: {self.info.temperature} [K]\n' + textstr += f'Superposition of {num_w} waveforms' + props = dict(boxstyle='round', facecolor='tab:' + color, alpha=0.5) + fig.text(0.6, 0.3, textstr, fontsize=8, + verticalalignment='top', bbox=props) plt.tight_layout() - - def plot_peak_waveform_hist(self, num: int = -1, color: str = "blue"): - """Plots a 2D histogram of peak waveform data over time. - - The method generates a 2D histogram of peak waveform data over time. - The number of waveforms and their color in the plot can be controlled. - - Args: - num (int, optional): Number of waveforms to include in the superposition. Defaults to -1, indicating all. - color (str, optional): Color of the histogram. Defaults to "blue". - """ + + def plot_peak_waveform_hist(self, num = -1, color = 'blue'): fig = plt.figure() - waveform_data, waveform_times, num_w = get_peak_waveforms( - self.info.selftrig_path, num - ) - plt.hist2d(waveform_times, waveform_data, bins=1000, norm=mpl.colors.LogNorm()) - plt.xlabel(r"Time [$\mu$s]") - plt.ylabel("Waveform Amplitude [V]") - textstr = f"Date: {self.info.date}\n" - textstr += f"Condition: {self.info.condition}\n" - textstr += f"Bias: {self.info.bias:0.4} [V]\n" - textstr += f"RTD4: {self.info.temperature} [K]\n" - textstr += f"Superposition of {num_w} waveforms" - props = dict(boxstyle="round", facecolor="tab:" + color, alpha=0.4) + waveform_data, waveform_times, num_w = get_peak_waveforms(self.info.selftrig_path, num) + plt.hist2d(waveform_times, waveform_data, bins = 1000, norm=mpl.colors.LogNorm()) + plt.xlabel(r'Time [$\mu$s]') + plt.ylabel('Waveform Amplitude [V]') + textstr = f'Date: {self.info.date}\n' + textstr += f'Condition: {self.info.condition}\n' + textstr += f'Bias: {self.info.bias:0.4} [V]\n' + textstr += f'RTD4: {self.info.temperature} [K]\n' + textstr += f'Superposition of {num_w} waveforms' + props = dict(boxstyle='round', facecolor='tab:' + color, alpha=0.4) low, high = plt.ylim() plt.ylim(low, 4.5) - fig.text(0.6, 0.9, textstr, fontsize=8, verticalalignment="top", bbox=props) + fig.text(0.6, 0.9, textstr, fontsize=8, + verticalalignment='top', bbox=props) plt.tight_layout() + + def plot_waveform(self, i, wf_type = 'selftrig'): + if wf_type == 'solicit': + x, y = get_waveform(self.info.solicit_path + f'w{i}.txt') + elif wf_type == 'selftrig': + x, y = get_waveform(self.info.selftrig_path + f'w{i}.txt') + else: + print('Fuck You <3') + return + + fig = plt.figure() + ax = fig.gca() + ax.plot(1,1) + + peaks, props = signal.find_peaks(y, **self.info.peak_search_params) + for peak in peaks: + plt.scatter(x[peak], y[peak]) + ax.plot(x, y, 'b-') + ax.set_xlabel('Time (us)') + ax.set_ylabel('Amplitude (V)') + ax.set_title('Waveform ' + str(i) + ', ' + str(len(peaks)) + ' peaks') + plt.draw_all() + plt.show() + return x, y + + def plot_fft(self, i, wf_type = 'solicit'): + if wf_type == 'solicit': + x, y = get_waveform(self.info.solicit_path + f'w{i}.txt') + elif wf_type == 'selftrig': + x, y = get_waveform(self.info.selftrig_path + f'w{i}.txt') + else: + print('Fuck You <3') + return + + fig = plt.figure() + ax = fig.gca() + ax.plot(1,1) + + N = len(x) + T = (x[-1]-x[0])/len(x) + yf = fft(y) + xf = fftfreq(N, T)[:N//2] + ax.plot(xf, 2.0/N * np.abs(yf[0:N//2])) + ax.set_xlabel('Frequency (MHz)') + ax.set_ylabel('Amplitude') + ax.set_title(f'Fourier Transform of Waveform {i}') + + def get_subtract_hist_mean(self, data1, data2, numbins = 2000, plot = False): + if plot: + plt.figure() + (n, b, p) = plt.hist(data1, bins = numbins, density = False, label = 'LED-On', histtype='step') + plt.axvline(x = np.mean(data1), color = 'red') + print('LED on hist: ' + str(np.mean(data1))) + print('LED off hist: ' + str(np.mean(data2))) + plt.axvline(x = np.mean(data2), color = 'green') + plt.hist(data2, bins = b, density = False, label = 'LED-Off', histtype='step') + counts1, bins1 = np.histogram(data1, bins = numbins, density = False) + counts2, bins2 = np.histogram(data2, bins = bins1, density = False) + centers = (bins1[1:] + bins1[:-1])/2 + subtracted_counts = counts1 - counts2 + # subtracted_counts[subtracted_counts < 0] = 0 + if plot: + plt.step(centers, subtracted_counts, label = 'subtracted hist') + plt.legend() + + norm_subtract_hist = subtracted_counts / np.sum(subtracted_counts) + # weights = 1.0 / subtracted_counts / + mean_value = np.sum(centers * norm_subtract_hist) + ca_value = mean_value / self.spe_res.params['slope'].value - 1 + if plot: + plt.title(f'OV: {round(self.run_info_self.bias - 27.1,3)}, CA value: {round(ca_value,3)}') + plt.axvline(x = mean_value, color = 'orange') + return mean_value diff --git a/RunInfo.py b/RunInfo.py index a4f0560..e5bdd80 100644 --- a/RunInfo.py +++ b/RunInfo.py @@ -12,335 +12,220 @@ from scipy import stats from scipy import optimize from scipy import interpolate -import matplotlib as mpl import peakutils +import matplotlib as mpl +import sys from scipy.stats import sem -import pprint - -# %% - - -def get_data(h5path: str, groupname: str) -> np.ndarray: - """Extrats data from an h5 file provided by h5 path - - Args: - h5path (str): Path to h5 file. - groupname (str): Group name of desired group in h5 file +#%% - Returns: - np.ndarray : waveform data - """ - with h5py.File(h5path, "r") as hdf: - # TODO: check data format can be cast to ndarray - data: np.ndarray = hdf["RunData"][groupname][:] +def get_data(h5path, groupname): + with h5py.File(h5path, 'r') as hdf: + data = hdf['RunData'].get(groupname)[:] return data - - -def get_grp_meta(h5path: str, groupname: str) -> dict: - """Get metadata of specified group - - Args: - h5path (str): Path to h5 file - groupname (str): Name of group to accquire metadata from - - Returns: - dict: hdf5 group metadata - """ - with h5py.File(h5path, "r") as hdf: - meta_group = dict(hdf["RunData"][groupname].attrs) + +def get_grp_meta(h5path, groupname): + with h5py.File(h5path, 'r') as hdf: + meta_group = dict(hdf['RunData'][groupname].attrs) return meta_group - -def get_run_meta(h5path: str) -> dict: - """Get metadata for specific run - - Args: - h5path (str): path to h5 file - - Returns: - dict : hdf5 run metadata - """ - with h5py.File(h5path, "r") as hdf: - run_meta = dict(hdf["RunData"].attrs) +def get_run_meta(h5path): + with h5py.File(h5path, 'r') as hdf: + run_meta = dict(hdf['RunData'].attrs) return run_meta - -def get_grp_names(h5path: str) -> list: - """Get names of groups in specified h5 file - - Args: - h5path (str): Path to h5 file - - Returns: - list : acquisition names - """ - with h5py.File(h5path, "r") as hdf: - group_names = list(hdf["RunData"].keys()) +def get_grp_names(h5path): + with h5py.File(h5path, 'r') as hdf: + group_names = list(hdf['RunData'].keys()) return group_names - -def get_mode(hist_data: list or np.array) -> tuple[float, float]: - """Get the mode of histogram data - - Args: - hist_data (list or np.array): Histogram data - - Returns: - tuple[float, float]: value (in volts), number of counts - """ +def get_mode(hist_data): counts = hist_data[0] bins = hist_data[1] - centers = (bins[1:] + bins[:-1]) / 2.0 + centers = (bins[1:] + bins[:-1])/2.0 max_index = np.argmax(counts) return centers[max_index], np.amax(counts) - + class RunInfo: - def __init__( - self, - f: list, - acquisition: str = "placeholder", - is_solicit: bool = False, - do_filter: bool = False, - plot_waveforms: bool = False, - upper_limit: float = 4.4, - baseline_correct: bool = False, - prominence: float = 0.005, - specifyAcquisition: bool = False, - fourier: bool = False, - ): - # TODO: - # combine acquisition and specify_acquisition inputs - # baseline_correct should allow for choice of peakutils vs mode or mean subtraction - # - - """Extract raw waveform data from either txt or h5 files. Apply filtering and baseline correciton - to waveforms. Can process alpha pulse waveforms, SPE waveforms (in LXe or Vacuum), or baseline data. - Records alpha pulse heights, SPE pulse heights, or aggregate y-axis data respectively. - Can also collect baseline info from SPE data in the absence of dedicated baseline data (why?). - Optionally plots waveforms to inspect data for tuning peak finding algo. - Args: - f list: list of h5 file names - acquisition (str, optional): specified file name. Defaults to 'placeholder'. - is_solicit (bool, optional): specified whether data is solicited, AKA 'empty' baseline data. Defaults to False. - do_filter (bool, optional): activates butterworth lowpass filter if True. Defaults to False. - plot_waveforms (bool, optional): plots waveforms if True. Defaults to False. - upper_limit (float, optional): amplitude threshold for discarding waveforms. Defaults to 4.4. - baseline_correct (bool, optional): baseline corrects waveforms if True. Defaults to False. - prominence (float, optional): parameter used for peak finding algo. Defaults to 0.005. - specifyAcquisition (bool, optional): if True, uses acquisition to extract just one acquisition dataset. Defaults to False. - fourier (bool, optional): if True performs fourier frequency subtraction. Defaults to False. - - Raises: - TypeError: _description_ - """ + def __init__(self, f, acquisition = 'placeholder', is_solicit = False, do_filter = False, plot_waveforms = False, upper_limit = 4.4, baseline_correct = False, prominence = 0.005, specifyAcquisition = False, fourier = False, is_led = False): if not isinstance(f, list): - raise TypeError( - "Files must be a in a list" - ) # TODO replace with list conversion - # f = [f] + raise TypeError('Files must be a in a list') self.do_filter = do_filter self.plot_waveforms = plot_waveforms self.hd5_files = f self.upper_limit = upper_limit self.baseline_correct = baseline_correct - # holds all acquisition data index by first file name then by acquisition name +# holds all acquisition data index by first file name then by acquisition name self.acquisitions_data = {} - # holds all acquisition names indexed by file name indexed by file name then by acquisition name +# holds all acquisition names indexed by file name indexed by file name then by acquisition name self.acquisition_names = {} - # holds acquisition time axis for all acquisitions in a single acquisition data array +# holds acquisition time axis for all acquisitions in a single acquisition data array self.acquisitions_time = {} - # holds all acquisition meta data, indexed first by file name then by acquisition name +# holds all acquisition meta data, indexed first by file name then by acquisition name self.acquisition_meta_data = {} self.all_peak_data = [] + self.led = is_led + if self.led: + self.all_dark_peak_data = [] + self.all_led_peak_data = [] + + # self.a_led = np.mean(self.all_led_peak_data) + # self.a_dark = np.mean(self.all_dark_peak_data) self.acquisition = acquisition self.specifyAcquisition = specifyAcquisition - self.fourier = fourier + self.fourier=fourier self.prominence = prominence - self.baseline_levels = [] # list of mode of waveforms - + self.baseline_levels = [] #list of mode of waveforms + for curr_file in self.hd5_files: self.acquisition_names[curr_file] = get_grp_names(curr_file) - # for curr_acquisition_name in self.acquisition_names[curr_file]: - - # holds all run data indexed by file name + print(self.acquisition_names[curr_file]) + # for curr_acquisition_name in self.acquisition_names[curr_file]: + +# holds all run data indexed by file name self.run_meta_data = {} + # self.LED_operating_voltage = [] for curr_file in self.hd5_files: + self.acquisition_names[curr_file] = get_grp_names(curr_file) self.acquisitions_time[curr_file] = {} self.acquisitions_data[curr_file] = {} self.acquisition_meta_data[curr_file] = {} - + self.run_meta_data[curr_file] = get_run_meta(curr_file) - print(f"Run Meta: {self.run_meta_data[curr_file]}") - - # TODO simplify reducency, if single acquisition given put it in a list + led_operating_v = self.run_meta_data[curr_file].get("RunNotes") + # self.LED_operating_voltage.append(led_operating_v) + if specifyAcquisition: - curr_data = get_data(curr_file, acquisition) - self.acquisitions_data[curr_file][acquisition] = curr_data[:, 1:] - self.acquisitions_time[curr_file][acquisition] = curr_data[:, 0] - self.acquisition_meta_data[curr_file][acquisition] = get_grp_meta( - curr_file, acquisition - ) - # print(f"Group Meta: {self.acquisition_meta_data[curr_file][acquisition]}") - pprint.pprint(self.acquisition_meta_data[curr_file][acquisition]) - self.bias = self.acquisition_meta_data[curr_file][acquisition][ - "Bias(V)" - ] - self.condition = "LXe" - self.date = self.acquisition_meta_data[curr_file][acquisition][ - "AcquisitionStart" - ] - self.trig = self.acquisition_meta_data[curr_file][acquisition][ - "LowerLevel" - ] - self.yrange = self.acquisition_meta_data[curr_file][acquisition][ - "Range" - ] - self.offset = self.acquisition_meta_data[curr_file][acquisition][ - "Offset" - ] - + curr_data = get_data(curr_file, acquisition) + self.acquisitions_data[curr_file][acquisition] = curr_data[:, 1:] + self.acquisitions_time[curr_file][acquisition] = curr_data[:, 0] + self.acquisition_meta_data[curr_file][acquisition] = get_grp_meta(curr_file, acquisition) + self.bias = self.acquisition_meta_data[curr_file][acquisition]['Bias(V)'] + self.condition = 'Liquid Xenon' + self.date = self.acquisition_meta_data[curr_file][acquisition]['AcquisitionStart'] + self.trig = self.acquisition_meta_data[curr_file][acquisition]['LowerLevel'] + self.yrange = self.acquisition_meta_data[curr_file][acquisition]['Range'] + self.offset = self.acquisition_meta_data[curr_file][acquisition]['Offset'] + else: for curr_acquisition_name in self.acquisition_names[curr_file]: curr_data = get_data(curr_file, curr_acquisition_name) - self.acquisitions_data[curr_file][ - curr_acquisition_name - ] = curr_data[:, 1:] - self.acquisitions_time[curr_file][ - curr_acquisition_name - ] = curr_data[:, 0] - self.acquisition_meta_data[curr_file][ - curr_acquisition_name - ] = get_grp_meta(curr_file, curr_acquisition_name) - # pprint.pprint(self.acquisition_meta_data[curr_file][curr_acquisition_name]) - self.bias = self.acquisition_meta_data[curr_file][ - curr_acquisition_name - ]["Bias(V)"] - self.condition = "LXe" - self.date = self.acquisition_meta_data[curr_file][ - curr_acquisition_name - ]["AcquisitionStart"] - self.trig = self.acquisition_meta_data[curr_file][ - curr_acquisition_name - ]["LowerLevel"] - self.yrange = self.acquisition_meta_data[curr_file][ - curr_acquisition_name - ]["Range"] - self.offset = self.acquisition_meta_data[curr_file][ - curr_acquisition_name - ]["Offset"] - + self.acquisitions_data[curr_file][curr_acquisition_name] = curr_data[:, 1:] + self.acquisitions_time[curr_file][curr_acquisition_name] = curr_data[:, 0] + self.acquisition_meta_data[curr_file][curr_acquisition_name] = get_grp_meta(curr_file, curr_acquisition_name) + self.bias = self.acquisition_meta_data[curr_file][curr_acquisition_name]['Bias(V)'] + self.condition = '1426.93g LXe' + self.date = self.acquisition_meta_data[curr_file][curr_acquisition_name]['AcquisitionStart'] + self.trig = self.acquisition_meta_data[curr_file][curr_acquisition_name]['LowerLevel'] + self.yrange = self.acquisition_meta_data[curr_file][curr_acquisition_name]['Range'] + self.offset = self.acquisition_meta_data[curr_file][curr_acquisition_name]['Offset'] + if not is_solicit: self.peak_search_params = { - "height": 0.0, # SPE - "threshold": None, # SPE - "distance": None, # SPE - "prominence": prominence, - "width": None, # SPE - "wlen": 100, # SPE - "rel_height": None, # SPE - "plateau_size": None, # SPE + 'height':0.0,# SPE + 'threshold':None,# SPE + 'distance':None,# SPE + 'prominence':prominence, + 'width':None,# SPE + 'wlen':100, # SPE + 'rel_height':None,# SPE + 'plateau_size':None,# SPE # 'distance':10 #ADDED 2/25/2023 - } - self.get_peak_data() + } + self.get_peak_data() else: self.get_peak_data_solicit() - + self.baseline_mode_err = sem(self.baseline_levels) self.baseline_mode_std = np.std(self.baseline_levels) - self.baseline_mode_mean = np.mean(self.baseline_levels) + self.baseline_mode_mean= np.mean(self.baseline_levels) rms = [i**2 for i in self.baseline_levels] - self.baseline_mode_rms = np.sqrt(np.mean(np.sum(rms))) - + self.baseline_mode_rms=np.sqrt(np.mean(np.sum(rms))) + if not is_solicit: - print( - "mean mode of amplitudes, standard deviation, SEM: " - + str(self.baseline_mode_mean) - + ", " - + str(self.baseline_mode_std) - + "," - + str(self.baseline_mode_err) - ) - - def plot_hists(self, temp_mean: str, temp_std: str, new: bool = False) -> None: - """Plot histograms of file. Plots will display when called. No return value - - Args: - temp_mean (str): Used in label of title. Mean of data (?) - temp_std (str): Used in label of title. Error of mean - new (bool, optional): Whether or not to create new figure. Defaults to False. - """ + print('mean mode of amplitudes, standard deviation, SEM: ' + str(self.baseline_mode_mean) + ', ' + str(self.baseline_mode_std) + ',' + str(self.baseline_mode_err)) + + def plot_hists(self, temp_mean, temp_std, new = False): if new: - plt.figure() # makes new - # TODO bin arg - (n, b, p) = plt.hist( - self.all_peak_data, bins=1000, histtype="step", density=False - ) + plt.figure() #makes new + (n, b, p) = plt.hist(self.all_peak_data, bins = 200, histtype = 'step', density = False) for curr_file in self.hd5_files: print(curr_file) for curr_acquisition_name in self.acquisition_names[curr_file]: print(curr_acquisition_name) - + if not self.plot_waveforms: - font = { - "family": "serif", - "color": "black", - "weight": "normal", - "size": 14, - } - plt.xlabel("Amplitude (V)", fontdict=font) - plt.ylabel("Frequency", fontdict=font) + font = {'family': 'serif', 'color': 'black', 'weight': 'normal', 'size': 14,} + plt.xlabel('Amplitude (V)', fontdict = font) + plt.ylabel('Frequency', fontdict = font) bias = self.bias condition = self.condition date = self.date trig = self.trig yrange = self.yrange offset = self.offset - plt.annotate( - " Trig: " - + str(trig) - + "\n Range: " - + str(yrange) - + "\n Offset: " - + str(offset), - xy=(0.65, 0.75), - xycoords="axes fraction", - size=15, - ) - plt.title( - str(date) - + ", " - + str(condition) - + ", " - + temp_mean - + " $\pm$ " - + temp_std - + " K, " - + str(bias) - + " V", - fontdict=font, - ) - # plt.title(date + ', ' + str(condition) + ', ' + temp_mean + ' $\pm$ ' + temp_std + ' K, ' + str(bias) + ' V', fontdict=font) #old way + plt.annotate(' Trig: ' + str(trig) + '\n Range: ' + str(yrange) + '\n Offset: ' + str(offset), xy=(0.65, 0.75), xycoords = 'axes fraction', size=15) + #plt.title(str(date.decode('utf-8')) + ', ' + str(condition) + ', ' + temp_mean + ' $\pm$ ' + temp_std + ' K, ' + str(bias) + ' V', fontdict=font) + plt.title(date + ', ' + str(condition) + ', ' + temp_mean + ' $\pm$ ' + temp_std + ' K, ' + str(bias) + ' V', fontdict=font) #old way plt.subplots_adjust(top=0.9) - plt.yscale("log") + plt.yscale('log') plt.tight_layout() plt.show() - - # - def get_peaks(self, filename: str, acquisition_name: str) -> list[float]: - """Uses scipy.signal.find_peaks to find the peaks of the data. - - Args: - filename (str): Name of file to analyze - acquisition_name (str): Name of particular acquisition - - Returns: - list: List of peak heights. - """ + + def plot_led_dark_hists(self, temp_mean, temp_std, new = False): + if not self.led: + return + if new: + plt.figure() #makes new + (n, b, p) = plt.hist(self.all_peak_data, bins = 1500, histtype = 'step', density = False, label = 'All ') + (n1, b1, p1) = plt.hist(self.all_led_peak_data, bins = b, histtype = 'step', density = False, label = 'LED on') + (n2, b2, p2) = plt.hist(self.all_dark_peak_data, bins = b, histtype = 'step', density = False, label = 'LED off') + plt.legend() + for curr_file in self.hd5_files: + print(curr_file) + for curr_acquisition_name in self.acquisition_names[curr_file]: + print(curr_acquisition_name) + + if not self.plot_waveforms: + font = {'family': 'serif', 'color': 'black', 'weight': 'normal', 'size': 14,} + plt.xlabel('Amplitude (V)', fontdict = font) + plt.ylabel('Frequency', fontdict = font) + bias = self.bias + condition = self.condition + date = self.date + trig = self.trig + yrange = self.yrange + offset = self.offset + + dark_count = float(len(self.all_dark_peak_data)) + led_count = float(len(self.all_led_peak_data)) - dark_count + + ratio1 = led_count / dark_count + + self.led_ratio = ratio1 + + ratio2 = float(len(self.all_led_peak_data)) / dark_count + #print(ratio1) + #print(ratio2) + + plt.annotate(' Trig: ' + str(trig) + '\n Range: ' + str(yrange) + '\n Offset: ' + str(offset) + '\n Ratio: ' + str(round(ratio1,2)), xy=(0.80, 0.60), xycoords = 'axes fraction', size=12) + #plt.title(str(date.decode('utf-8')) + ', ' + str(condition) + ', ' + temp_mean + ' $\pm$ ' + temp_std + ' K, ' + str(bias) + ' V', fontdict=font) + plt.title(date + ', ' + str(condition) + ', ' + temp_mean + ' $\pm$ ' + temp_std + ' K, ' + str(bias) + ' V', fontdict=font) #old way + print(condition) + plt.subplots_adjust(top=0.9) + plt.yscale('log') + plt.tight_layout() + plt.show() + +# + def get_peaks(self, filename, acquisition_name): all_peaks = [] + if self.led: + dark_peaks = [] + led_peaks = [] print(filename, acquisition_name) curr_data = self.acquisitions_data[filename][acquisition_name] time = self.acquisitions_time[filename][acquisition_name] @@ -349,178 +234,173 @@ def get_peaks(self, filename: str, acquisition_name: str) -> list[float]: fs = num_points / window_length num_wavefroms = np.shape(curr_data)[1] if self.plot_waveforms: - num_wavefroms = 30 - # if self.plot_waveforms: - # fig = plt.figure() - # fig, axs = plt.subplots(1, 2) + num_wavefroms = 20 + # fig = plt.figure() +# fig, axs = plt.subplots(1, 2) for idx in range(num_wavefroms): - # for idx in range(num_wavefroms if not self.plot_waveforms else 300): - # idx = idx + 8000 #uncomment if plotting waveforms and want to see waveforms at different indices + +# idx = idx + 8000 #uncomment if plotting waveforms and want to see waveforms at different indices + time = self.acquisitions_time[filename][acquisition_name] + if idx % 1000 == 0: print(idx) - + amp = curr_data[:, idx] + if np.amax(amp) > self.upper_limit: continue - + # peaks, props = signal.find_peaks(amp, height = 0.0, prominence = 0.3) #*** - - use_bins = np.linspace(-self.upper_limit, self.upper_limit, 1000) - curr_hist = np.histogram(amp, bins=use_bins) + + use_bins = np.linspace(-self.upper_limit, self.upper_limit, 1000) #added code for alpha + curr_hist = np.histogram(amp, bins = use_bins) baseline_mode_raw, max_counts = get_mode(curr_hist) self.baseline_levels.append(baseline_mode_raw) - # rms = [i**2 for i in amp[(amp <= self.prominence)]] - # self.baseline_rms.append(np.sqrt(np.mean(np.sum(rms)))) - + + if self.baseline_correct: - # baseline_level = peakutils.baseline(amp, deg=2) - # amp = amp - baseline_level + + +# # print('baseline:', baseline_level) - use_bins = np.linspace(-self.upper_limit, self.upper_limit, 1000) - curr_hist = np.histogram(amp, bins=use_bins) + baseline_level = peakutils.baseline(amp, deg=2) + self.baseline_mode = baseline_level + amp = amp - baseline_level + + #second baseline correction + + use_bins = np.linspace(-self.upper_limit, self.upper_limit, 2500) + + curr_hist = np.histogram(amp, bins = use_bins) + baseline_level, max_counts = get_mode(curr_hist) - # self.baseline_mode = baseline_level - # print('baseline:', baseline_level) + + self.baseline_mode = baseline_level amp = amp - baseline_level - + if self.do_filter and np.shape(amp) != (0,): - # sos = signal.butter(3, 1E6, btype = 'lowpass', fs = fs, output = 'sos') - sos = signal.butter( - 3, 4e5, btype="lowpass", fs=fs, output="sos" - ) # SPE dark/10g + # sos = signal.butter(3, 1E6, btype = 'lowpass', fs = fs, output = 'sos') + sos = signal.butter(3, 4E5, btype = 'lowpass', fs = fs, output = 'sos') # SPE dark/10g filtered = signal.sosfilt(sos, amp) - amp = filtered - - peaks, props = signal.find_peaks( - amp, **self.peak_search_params - ) # peak search algorithm - + amp = filtered + + peaks, props = signal.find_peaks(amp, **self.peak_search_params) + if self.plot_waveforms: plt.title(acquisition_name) plt.tight_layout() - if len(peaks) > 0: # only plot peaks - plt.plot(time, amp) + if len(peaks) > 0: #only plot peaks + plt.plot(time,amp) for peak in peaks: - plt.plot(time[peaks], amp[peaks], ".") + plt.plot(time[peaks], amp[peaks], '.') + + if self.led: + led_time_thresh = (time[-1] + time[1]) / 2.0 + for peak in peaks: all_peaks.append(amp[peak]) - - plt.show() - return all_peaks - - def get_peak_data(self) -> None: - """ - Collects peak data and stores as a dict in self.peak_data - """ + if self.led: + curr_time = time[peak] + if curr_time < led_time_thresh: + dark_peaks.append(amp[peak]) + else: + led_peaks.append(amp[peak]) + + + if self.led: + # print(f'LED off: {np.mean(dark_peaks)} $+-$ {np.std(dark_peaks,ddof=1)/np.sqrt(len(dark_peaks))}') + # print(f'LED on: {np.mean(led_peaks)} $+-$ {np.std(led_peaks,ddof=1)/np.sqrt(len(led_peaks))}') + return all_peaks, dark_peaks, led_peaks + else: + return all_peaks + + + def get_peak_data(self): self.peak_data = {} for curr_file in self.hd5_files: self.peak_data[curr_file] = {} for curr_acquisition_name in self.acquisition_names[curr_file]: - if ( - self.specifyAcquisition - ): # TODO fix, rm specify, put acquision into names list in init + if self.specifyAcquisition: curr_acquisition_name = self.acquisition - curr_peaks = self.get_peaks(curr_file, curr_acquisition_name) + if self.led: + curr_peaks, curr_dark_peaks, curr_led_peaks = self.get_peaks(curr_file, curr_acquisition_name) + else: + curr_peaks = self.get_peaks(curr_file, curr_acquisition_name) self.peak_data[curr_file][curr_acquisition_name] = curr_peaks self.all_peak_data = self.all_peak_data + curr_peaks - if self.plot_waveforms or self.specifyAcquisition: + if self.led: + self.all_dark_peak_data = self.all_dark_peak_data + curr_dark_peaks + self.all_led_peak_data = self.all_led_peak_data + curr_led_peaks + if self.plot_waveforms == True or self.specifyAcquisition: break - def get_peaks_solicit(self, filename: str, acquisition_name: str) -> list: - """Aggregates y-axis data of all solicited waveforms after optionally - filtering (why?), baseline subtracting (why?), and removing first - 100 data points (why?). Also optionally plots waveforms. - - Args: - filename (str): path on disk of hdf5 data to be processed - acquisition_name (str): name of acquisition to be processed - - Returns: - list: concatened list of all amplitude values for all waveforms - """ + def get_peaks_solicit(self, filename, acquisition_name): all_peaks = [] curr_data = self.acquisitions_data[filename][acquisition_name] time = self.acquisitions_time[filename][acquisition_name] window_length = time[-1] - time[0] num_points = float(len(time)) fs = num_points / window_length - # print(fs) +# print(fs) num_wavefroms = np.shape(curr_data)[1] - print(f"num_wavefroms: {num_wavefroms}") - if ( - self.plot_waveforms - ): # TODO replace w/ num_wavefroms if not self.plot_waveforms else 20 +# print(num_wavefroms) + if self.plot_waveforms: num_wavefroms = 20 for idx in range(num_wavefroms): if idx % 100 == 0: print(idx) - + amp = curr_data[:, idx] - if ( - np.amax(amp) > self.upper_limit - ): # skips waveform if amplitude exceeds upper_limit + if np.amax(amp) > self.upper_limit: continue if self.baseline_correct: - use_bins = np.linspace(-self.upper_limit, self.upper_limit, 1000) - curr_hist = np.histogram(amp, bins=use_bins) - baseline_level, _ = get_mode(curr_hist) + # use_bins = np.linspace(-self.upper_limit, self.upper_limit, 1000) + # curr_hist = np.histogram(amp, bins = use_bins) + # baseline_level, _ = get_mode(curr_hist) + + baseline_level = peakutils.baseline(amp, deg=2) amp = amp - baseline_level self.baseline_mode = baseline_level + + #second baseline correction + + use_bins = np.linspace(-self.upper_limit, self.upper_limit, 1000) + curr_hist = np.histogram(amp, bins = use_bins) + baseline_level, max_counts = get_mode(curr_hist) + self.baseline_mode = baseline_level + amp = amp - baseline_level if self.do_filter: - sos = signal.butter(3, 4e5, btype="lowpass", fs=fs, output="sos") + sos = signal.butter(3, 4E5, btype = 'lowpass', fs = fs, output = 'sos') filtered = signal.sosfilt(sos, amp) amp = filtered - # peaks, props = signal.find_peaks(amp, **self.peak_search_params) +# peaks, props = signal.find_peaks(amp, **self.peak_search_params) if self.plot_waveforms: if self.fourier: fourier = np.fft.fft(amp) n = amp.size - duration = 1e-4 - freq = np.fft.fftfreq(n, d=duration / n) - colors = [ - "b", - "g", - "r", - "m", - "c", - "y", - "k", - "aquamarine", - "pink", - "gray", - ] - marker, stemlines, baseline = plt.stem( - freq, - np.abs(fourier), - linefmt=colors[0], - use_line_collection=True, - markerfmt=" ", - ) - plt.setp( - stemlines, - linestyle="-", - linewidth=1, - color=colors[0], - alpha=5 / num_wavefroms, - ) # num_wavefroms always 20? - plt.yscale("log") + duration = 1E-4 + freq = np.fft.fftfreq(n, d= duration/n) + colors = ['b', 'g', 'r', 'm', 'c', 'y', 'k', 'aquamarine', 'pink', 'gray'] + marker, stemlines, baseline = plt.stem(freq, np.abs(fourier), linefmt=colors[0], use_line_collection = True, markerfmt = " ") + plt.setp(stemlines, linestyle = "-", linewidth = 1, color = colors[0], alpha = 5/num_wavefroms) + plt.yscale('log') plt.show() + else: plt.title(acquisition_name) plt.tight_layout() - plt.plot(time, amp) - plt.show() + plt.plot(time,amp) amp = list(amp[100:]) - all_peaks += amp + all_peaks+=amp return all_peaks - - def get_peak_data_solicit(self) -> None: - """Get peak data and add as a dict to self.peak_data. No return value.""" + + + def get_peak_data_solicit(self): self.peak_data = {} for curr_file in self.hd5_files: self.peak_data[curr_file] = {} @@ -528,20 +408,11 @@ def get_peak_data_solicit(self) -> None: if self.specifyAcquisition: curr_acquisition_name = self.acquisition curr_peaks = self.get_peaks_solicit(curr_file, curr_acquisition_name) - self.peak_data[curr_file][curr_acquisition_name] = curr_peaks - if self.plot_waveforms or self.specifyAcquisition: + self.peak_data[curr_file][curr_acquisition_name] = curr_peaks + if self.plot_waveforms == True or self.specifyAcquisition: break - - def plot_peak_waveform_hist(self, num=-1, color="blue") -> None: - """Plot peak waveforms as a 2D histogram. The function does not return any value. - - Args: - num (int, optional): The number of waveforms to process for each acquisition. - If set to -1 (default), all waveforms are processed. If set to a positive integer, - only that many waveforms are processed for each acquisition. - - color (str, optional): The color to be used for the information box in the plot. Defaults to 'blue'. - """ + + def plot_peak_waveform_hist(self, num = -1, color = 'blue'): fig = plt.figure() waveform_data = [] @@ -556,56 +427,68 @@ def plot_peak_waveform_hist(self, num=-1, color="blue") -> None: window_length = time[-1] - time[0] num_points = float(len(time)) fs = num_points / window_length - # print(fs) + # print(fs) if num < 1: num_w = np.shape(curr_data)[1] else: num_w = num - # print(num_wavefroms) + # print(num_wavefroms) for idx in range(num_w): - # for idx in range(200): - if idx % 1000 == 0: + # for idx in range(200): + if idx % 100 == 0: print(idx) - + amp = curr_data[:, idx] - - if np.amax(amp) > self.upper_limit: # SPE + + if np.amax(amp) > self.upper_limit: #SPE continue - sos = signal.butter(3, 4e5, btype="lowpass", fs=fs, output="sos") + + #first baseline correction + + baseline_level = peakutils.baseline(amp, deg=2) + amp = amp - baseline_level + self.baseline_mode = baseline_level + + #second baseline correction + + use_bins = np.linspace(-self.upper_limit, self.upper_limit, 1000) + curr_hist = np.histogram(amp, bins = use_bins) + baseline_level, max_counts = get_mode(curr_hist) + self.baseline_mode = baseline_level + amp = amp - baseline_level + + sos = signal.butter(3, 4E5, btype = 'lowpass', fs = fs, output = 'sos') filtered = signal.sosfilt(sos, amp) amp = filtered + good_idx = (amp > 0.05) & (time > 1E-5) + amp = amp[good_idx] + time_curr = time[good_idx] waveform_data += list(amp) - waveform_times += list(time) - if self.plot_waveforms or self.specifyAcquisition: + waveform_times += list(time_curr) + if self.plot_waveforms == True or self.specifyAcquisition: break - - plt.hist2d(waveform_times, waveform_data, bins=30, norm=mpl.colors.LogNorm()) - plt.xlabel(r"Time [$\mu$s]") - plt.ylabel("Waveform Amplitude [V]") - textstr = f"Date: {self.date}\n" - textstr += f"Condition: {self.condition}\n" - textstr += f"Bias: {self.bias:0.4} [V]\n" - # textstr += f'RTD4: {self.temperature} [K]\n' - textstr += f"Superposition of {num_w} waveforms" - props = dict(boxstyle="round", facecolor="tab:" + color, alpha=0.4) + + plt.hist2d(waveform_times, waveform_data, bins = 50, norm=mpl.colors.LogNorm()) + plt.xlabel(r'Time [$\mu$s]') + plt.ylabel('Waveform Amplitude [V]') + + led_time_thresh = (time[-1] + time[1]) / 2.0 + + plt.axvline(x = led_time_thresh, color = 'r', label = 'LED on/off time') + # textstr = f'Date: {self.info.date}\n' + # textstr += f'Condition: {self.info.condition}\n' + # textstr += f'Bias: {self.info.bias:0.4} [V]\n' + # textstr += f'RTD4: {self.info.temperature} [K]\n' + # textstr += f'Superposition of {num_w} waveforms' + # props = dict(boxstyle='round', facecolor='tab:' + color, alpha=0.4) low, high = plt.ylim() - plt.ylim(low, 4.5) - fig.text(0.6, 0.9, textstr, fontsize=8, verticalalignment="top", bbox=props) + # plt.ylim(low, 4.5) + # fig.text(0.6, 0.9, textstr, fontsize=8, + # verticalalignment='top', bbox=props) plt.tight_layout() - - def average_all_waveforms(self) -> tuple: - """Calculates and returns the average of waveform data from multiple acquisitions across several files. - - The function iterates over all files and their respective acquisitions, collects the waveform data, - calculates the average across all collected waveforms, and returns this average alongside the time data - of the last processed acquisition. - - Returns: - tuple: A tuple consisting of two NumPy arrays: - - The first element is the averaged waveform data across all acquisitions and files. - - The second element is the time data of the last processed acquisition. - """ + + def average_all_waveforms(self): waveform_data = [] for curr_file in self.hd5_files: for curr_acquisition_name in self.acquisition_names[curr_file]: @@ -613,5 +496,5 @@ def average_all_waveforms(self) -> tuple: waveform_data.append(curr_data) time_data = self.acquisitions_time[curr_file][curr_acquisition_name] all_data = np.array(waveform_data) - averaged_data = np.average(all_data, axis=(0, 2)) + averaged_data = np.average(all_data, axis = (0, 2)) return averaged_data, time_data