Skip to content
Snippets Groups Projects
Commit dcbae6c7 authored by Christian Darsow-Fromm's avatar Christian Darsow-Fromm
Browse files

Merge branch 'develop'

parents 5385d74a 9c05d335
Branches
Tags
No related merge requests found
Showing
with 492 additions and 58 deletions
......@@ -23,3 +23,4 @@ src/tests/test_io/gzip_data_files/
*.gz
\.dmypy\.json
TestCSV*
......@@ -53,9 +53,23 @@ test:
- src/tests/htmlcov
expire_in: 30 days
.codequality:
image: lasnq/openqlab
stage: test
allow_failure: true
before_script:
- pip install pycodestyle
script:
- pycodestyle src/openqlab
cache:
key: openqlab:pep8
paths:
- .pip/
pylint:
image: lasnq/openqlab
stage: test
allow_failure: true
before_script:
script:
- make pylint
......@@ -123,3 +137,37 @@ update rrz tag:
- git push $PUBLIC_REPO $CI_COMMIT_TAG
only:
- tag
update nqlab version:
image: alpine
stage: deploy
before_script:
- apk add git gawk
- sh bin/enable_ssh
- git config --global user.email "lasnq-ci@physnet"
- git config --global user.name "LasNQ CI"
script:
- export VERSION=`git tag | tail -1 | sed 's/v//g'`
# clone git repo
- git clone git@git.physnet.uni-hamburg.de:las-nq/nqlab.git
- cd nqlab
- git checkout master
# change openqlab version in requirements
- sed -i '/openqlab.*/d' requirements.txt
- echo "openqlab>=$VERSION" >> requirements.txt
# push changes
- git add requirements.txt
- git commit -m "openqlab version $VERSION"
- git push
# make nqlab version
- export old_version=`git tag | tail -1`
- export new_version=`gawk -f ../bin/increment_version.sh $old_version`
- echo $new_version
- git tag $new_version -m 'Autoupdated version from openqlab'
- git push --tags
only:
- tag
......@@ -234,7 +234,7 @@ const-naming-style=UPPER_CASE
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=-1
docstring-min-length=10
# Naming style matching correct function names.
function-naming-style=snake_case
......
......@@ -8,5 +8,5 @@ WORKDIR /code
RUN apt-get update -y && apt-get install -y $BUILD_PACKAGES
RUN pip install --upgrade pip
ADD requirements.txt /code/
RUN pip install --upgrade -r requirements.txt
ADD requirements.txt requirements-devel.txt /code/
RUN pip install --upgrade -r requirements.txt -r requirements-devel.txt
......@@ -6,6 +6,12 @@ requirements:
requirements-user:
@pip3 install --user -r requirements.txt
requirements-devel:
@pip3 install -r requirements-devel.txt
requirements-devel-user:
@pip3 install --user -r requirements-devel.txt
doc:
@export PYTHONPATH=`pwd`/src:$(PYTHONPATH); cd doc; make html
......@@ -22,7 +28,7 @@ mypy:
@export PYTHONPATH=`pwd`/src:$(PYTHONPATH); mypy src
pylint:
@export PYTHONPATH=`pwd`/src:$(PYTHONPATH); pylint --exit-zero --disable=C src/openqlab
@export PYTHONPATH=`pwd`/src:$(PYTHONPATH); pylint --disable=C src/openqlab
all-tests: mypy test pylint
......
#!/usr/bin/env python3
import json
import os.path
from pycodestyle import BaseReport, StyleGuide
import sys
class CodeClimateReport(BaseReport):
"""Print results of the checks in Code Climate format."""
def error(self, line_number, offset, text, check):
"""Print an error in Code Climate format."""
code = super(CodeClimateReport, self).error(line_number, offset,
text, check)
if code:
issue = {
'type': 'issue',
'check_name': code,
'categories': ['Style'],
'description': text[5:].capitalize(),
'remediation_points': 50000,
'location': {
'path': os.path.normpath(self.filename),
'positions': {
'begin': {
'line': line_number,
'column': offset + 1
},
'end': {
'line': line_number,
'column': offset + 1
}
}
},
'content': {
'body': check.__doc__.strip()
}
}
print(json.dumps(issue)+'\0')
return code
def run(paths):
"""Parse options and run checks on Python source."""
import signal
# Handle "Broken pipe" gracefully
try:
signal.signal(signal.SIGPIPE, lambda signum, frame: sys.exit(1))
except AttributeError:
pass # not supported on Windows
style_guide = StyleGuide(
paths=paths,
reporter=CodeClimateReport)
options = style_guide.options
report = style_guide.check_files()
if report.total_errors:
if options.count:
sys.stderr.write(str(report.total_errors) + '\n')
sys.exit(1)
include_paths = ["."]
if os.path.exists("/config.json"):
contents = open("/config.json").read()
config = json.loads(contents)
i_paths = config.get("include_paths")
if type(i_paths) == list:
python_paths = []
for i in i_paths:
ext = os.path.splitext(i)[1]
if os.path.isdir(i) or ext == ".py":
python_paths.append(i)
include_paths = python_paths
if len(include_paths) > 0:
args = " ".join(include_paths)
sys.stderr.write(args)
run(include_paths)
#os.system("/usr/src/app/pep8.py --codeclimate {0}".format(args))
......@@ -6,4 +6,6 @@ mkdir -p ~/.ssh
chmod 700 ~/.ssh
echo "$SSH_PRIVATE_KEY" | tr -d '\r' > ~/.ssh/id_rsa
echo "gitlab.rrz.uni-hamburg.de,134.100.32.38 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKSK3dLzO0LTDNqjV7xH9713WXTCyhDUoxqNLrD/gUdK3m+dDfIXzZ46FeQB/xjmzz6kRk/n7VrujZM5NkISk8Y=" > ~/.ssh/known_hosts
echo "git.physnet.uni-hamburg.de,134.100.109.28 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBB1c6yDYUhd5zYspAYSVYmdVah0HhxoeviJvNxpe98Z9ywFYMLDoSoRAYR7tqVQV23Unmf7lxiLOMOl5DEZ2Xgs=" >> ~/.ssh/known_hosts
chmod 600 ~/.ssh/id_rsa
#!/usr/bin/gawk -f
BEGIN{
var = ARGV[1]
printf("%s\n", inc(var))
}
function inc(s, a, len1, len2, len3, head, tail)
{
echo s
split(s, a, ".")
len1 = length(a)
if(len1==0)
return -1
else if(len1==1)
return s+1
len2 = length(a[len1])
len3 = length(a[len1]+1)
head = join(a, 1, len1-1)
tail = sprintf("%0*d", len2, (a[len1]+1)%(10^len2))
if(len2==len3)
return head "." tail
else
return inc(head) "." tail
}
function join(a, x, y, s)
{
for(i=x; i<y; i++)
s = s a[i] "."
return s a[y]
}
......@@ -16,6 +16,14 @@ Fit beam data obtained from a beam analyzer to the gaussian beam model using non
.. automodule:: openqlab.analysis.gaussian_beam
:members:
Cavity
------
Cavity calculations
.. automodule:: openqlab.analysis.cavity
:members:
Phase
-----
......
......@@ -46,3 +46,14 @@ If you want to install it for all users on the PC, run:
```bash
sudo python3 setup.py install
```
## Development
If you want to take part in the development install the additional requirements from `requirements-devel.txt`:
```python
make requirements-devel
# If you want to install at user level:
make requirements-devel-user
```
Sphinx
recommonmark
sphinx-markdown-tables
wheel
pytest
pytest-cov>=2.5.1
sphinx_rtd_theme
setuptools
twine
mypy
pylint
......@@ -87,3 +87,8 @@ exclude =
# This will be used when updating. Do not change!
version = 3.0.3
package = openqlab
[pycodestyle]
max-line-length = 100
statistics = True
exclude = OldImporters
"""Automatically calculate cavity parameters with data taken from an oscilloscope."""
from typing import Optional, Tuple, List
import logging as log
from pandas import Series
from scipy.signal import find_peaks, peak_widths
import numpy as np
import matplotlib.pyplot as plt
def modematching(data: Series, # pylint: disable=invalid-name, too-many-arguments
plot: bool = False,
U_max: Optional[float] = None,
offset: Optional[float] = None,
rel_prominence: float = .02,
without_main_peaks: bool = False,
) -> float:
"""Calculate the mode matching.
It assumes a cavity scan bounded by two peaks of the main mode.
The method looks for the smaller peaks where the detection threshold
can be adjusted with :obj:`rel_prominence`.
Offset
The default method to find out the offset is by calculating the mean value.
If you have measured it more precisely, use the parameter :obj:`offset`.
Improve precision
To get a better resolution for small peaks there is an option to take data
with a clipped main mode. Use the parameter :obj:`U_max` to manually set
the measured maximum value.
Parameters
----------
data : Series
Measured data (just one column).
plot : bool
Make a plot to see, if the correct peaks where detected.
U_max : Optional[float]
U_max is the parameter to set the peak voltage of the clipped main peak.
rel_prominence : float
rel_prominence is the parameter to adjust the threshold for the detection
of small peaks.
without_main_peaks : bool
Takes all peaks as minor peaks, if the main peaks are clipped.
This requires the parameter :obj:`U_max` to be set.
Returns
-------
float
Calculated mode matching value.
"""
if len(data.shape) != 1:
raise ValueError('The DataFrame should only contain one single column.')
if without_main_peaks and not U_max:
raise ValueError('without_main_peaks can not be used without U_max.')
data = data.dropna()
# Adjust offset
if offset is None:
offset = np.median(data)
data -= offset
# Make peaks positive if necessary
_adjust_peak_sign(data)
# Find highest value
if U_max is None:
U_max = np.max(data)
else:
U_max = abs(U_max - offset)
peaks, main_mode = _find_peaks(data, rel_prominence, U_max)
if not without_main_peaks:
if len(main_mode) != 2:
raise ValueError('The main mode must occur exactly two times for the algorithm to work,'
f' but it found {len(main_mode)} main modes.')
# Main peak voltage
log.info(f'U_max: {U_max}')
# Sum of all different modes (excluding 2nd main mode)
if without_main_peaks:
U_sum = sum(data.iloc[peaks], U_max) # pylint: disable=invalid-name
else:
U_sum = sum(data.iloc[peaks[main_mode[0]+1:main_mode[1]]], U_max) # pylint: disable=invalid-name
# This version with U_max makes it possible to manually
# include a clipped value for the main peak
log.info(f'U_sum: {U_sum}')
# Mode matching
mode_matching = U_max / U_sum
# Plotting
if plot:
_main_plot(data, peaks=peaks, main_peaks=peaks[main_mode])
if not without_main_peaks:
index_first, index_last = peaks[main_mode]
plt.axvline(x=data.index[index_first], color='gray')
plt.axvline(x=data.index[index_last], color='gray')
plt.axvspan(data.index[0], data.index[index_first], color='gray', alpha=0.5)
plt.axvspan(data.index[index_last], data.index[-1], color='gray', alpha=0.5)
print(f'Mode matching: {round(mode_matching*100, 2)}%')
return mode_matching
def _main_plot(data: Series,
peaks: Optional[np.ndarray] = None,
main_peaks: Optional[np.ndarray] = None):
axes = data.plot()
axes.set_xlim(data.index[0], data.index[-1])
if peaks is not None:
data.iloc[peaks].plot(style='.')
if main_peaks is not None:
data.iloc[main_peaks].plot(style='o')
def _find_peaks(data: Series,
rel_prominence: float,
max_value: Optional[float] = None
) -> Tuple[np.ndarray, np.ndarray]:
if max_value is None:
max_value = np.max(data)
# Find peaks
peaks, peak_dict = find_peaks(data, prominence=max_value*rel_prominence)
# Find occurences of the main mode.
main_mode = np.where(peak_dict['prominences'] >= np.max(data)*.9)[0]
return peaks, main_mode
def finesse(data: Series, plot: bool = False) -> List[float]:
"""Finesse calculation using a cavity scan.
Parameters
----------
data : Series
data is the amplitude column with two main modes.
plot : bool
plot is giving out a plot to make shure the algorithm has found the correct points.
Returns
-------
list(float)
Calculated finesse for both peaks.
"""
if len(data.shape) != 1:
raise ValueError('The DataFrame should only contain one single column.')
data = data.dropna()
_adjust_peak_sign(data)
peaks, main_mode = _find_peaks(data, .9)
result = _calculate_finesse(data, peaks, main_mode, plot)
print(f'Finesse first peak: {round(result[0], 2)}, second peak: {round(result[1], 2)}')
return result
def _calculate_finesse(data: Series,
peaks: np.ndarray,
main_mode: np.ndarray,
plot: bool = False
) -> List[float]:
peak_data = peak_widths(data, peaks)
peak_fwhm = peak_data[0]
peaks_left = peak_data[2]
peaks_right = peak_data[3]
main_width = peak_fwhm[main_mode]
fsr = peaks[main_mode[1]] - peaks[main_mode[0]]
if plot:
_main_plot(data, main_peaks=peaks[main_mode])
for x in np.concatenate([peaks_left[main_mode], peaks_right[main_mode]]):
plt.axvline(x=data.index[int(x)], ls=':', color='green')
return [fsr / main_width[0], fsr / main_width[1]]
def _adjust_peak_sign(data: Series):
minimum = np.min(data)
maximum = np.max(data)
if abs(minimum) > abs(maximum):
data *= -1
......@@ -26,3 +26,10 @@ def accumulated_phase(data: Series, limit: float = 340) -> None:
for i in range(1, len(data)):
if abs(data.iloc[i-1] - data.iloc[i]) > limit:
data.iloc[i:] += 360 * sign(data.iloc[i-1] - data.iloc[i])
def clamp_phase(phase):
"""
Returns phase with all values mapped to between +/- 180 degrees.
"""
return (phase + 180.0) % 360.0 - 180.0
......@@ -4,13 +4,12 @@ import numpy as np
import pandas as pd
from scipy import signal
from tabulate import tabulate
from warnings import warn
from ..conversion import db
from .. import plots
from ..io import DataContainer
from ..conversion.utils import human_readable
# TODO update documentation in servodesign.rst`
def _handle_keysight_files(df):
value = df.copy()
......@@ -203,14 +202,14 @@ class Differentiator(Filter):
cF: :obj:`float`
The corner frequency.
sF: :obj:`float`, optional
Frequency were the ~f slope stops, defaults to 1000 * `cF`.
Frequency were the ~f slope stops, defaults to 10 * `cF`.
"""
def calculate(self):
z = -self.cF
if self.sF is None:
self._second_parameter = self.cF * 1000
self._second_parameter = self.cF * 10
p = -self.sF
k = self.sF / self.cF
return z, p, k
......@@ -310,9 +309,15 @@ class ServoDesign:
Current purpose is mainly filter handling and should be used as follows:
The object itself holds a set of (currently maximum) 5 filters. Filter types can be defined as new subclasses to the Filter class.
The object itself holds a set of (currently maximum) 5 filters.
Filter types can be defined as new subclasses to the Filter class.
The FILTER UTILITY section contains all methods handling filter operations, including clear and read. Filters should be added using either 'add' or 'addIndexed'. Normal 'add' will simply append the filter to the list and fail when the list is fullself. 'addIndexed' however will overwrite the the filter at the current position and fail in case an index out of range was specified.
The FILTER UTILITY section contains all methods handling filter operations,
including clear and read.
Filters should be added using either 'add' or 'addIndexed'.
Normal 'add' will simply append the filter to the list and fail when the list is fullself.
'addIndexed' however will overwrite the the filter at the current position
and fail in case an index out of range was specified.
"""
MAX_FILTERS = 5
......@@ -569,7 +574,7 @@ class ServoDesign:
df['Servo+TF P'] = phase + p
return df
def plot(self, freq=None, plot=True, correct_latency=False):
def plot(self, freq=None, plot=True, correct_latency=False, **kwargs):
"""
Plot the servo response over the frequencies given in `freq`.
......@@ -588,6 +593,8 @@ class ServoDesign:
correct_latency: :obj:`bool` or :obj:`float`
If the data has been taken piping through ADwin an extra phase has been added.
This can be corrected by giving ADwins sample rate (Default 200 kHz).
**kwargs
Parameters are passed to the :obj:`pandas.DataFrame.plot` method
Returns
-------
......@@ -599,8 +606,8 @@ class ServoDesign:
if self.plant is None:
df = self._apply(freq)
dfA = df['Servo A']
dfP = df['Servo P']
df_amplitude = df['Servo A']
df_phase = df['Servo P']
else:
df = self._apply(self.plant.index,
self.plant.iloc[:, 0], self.plant.iloc[:, 1])
......@@ -610,15 +617,15 @@ class ServoDesign:
correct_latency = self.SAMPLING_RATE
print(type(correct_latency))
df['Servo+TF P'] = df['Servo+TF P'] + 360 * df.index / correct_latency
dfA = df[['Servo A', 'Servo+TF A']]
dfP = df[['Servo P', 'Servo+TF P']]
df_amplitude = df[['Servo A', 'Servo+TF A']]
df_phase = df[['Servo P', 'Servo+TF P']]
if not plot:
return df
# Plotting
plt = plots.amplitude_phase(dfA, dfP)
plt = plots.amplitude_phase(df_amplitude, df_phase, **kwargs)
# add 0dB and -135deg markers
plt.axes[0].hlines(0, *plt.axes[0].get_xlim(),
colors=(0.6, 0.6, 0.6), linestyles='dashed')
......@@ -626,33 +633,38 @@ class ServoDesign:
colors=(0.6, 0.6, 0.6), linestyles='dashed')
return plt
def discrete_form(self, fs=SAMPLING_RATE, filename=None):
def discrete_form(self, sampling_frequency=SAMPLING_RATE, # pylint: disable=invalid-name
filename=None,
fs=None):
"""
Convert the servo and its filters to a digital, discrete-time representation in terms of second-order sections at a sampling frequency of `fs`.
Convert the servo and its filters to a digital,
discrete-time representation in terms of second-order sections
at a sampling frequency of `sampling_frequency`.
If `filename` is given, write the filter coefficients in
machine-readable format into this file.
.. todo:: this should probably return a usable form of this filter for
plotting the discrete-time response (using signal.dbode)
Returns
-------
:obj:`dict`
a dictionary containing sample rate, gain and SOS coefficients for
each filter
"""
if fs is not None:
warn('fs is deprecated. use sampling_frequency.', DeprecationWarning)
sampling_frequency = fs
coeffs = []
for f in self._filters:
if f is not None:
coeffs.append(f.discrete_SOS(fs).flatten())
coeffs.append(f.discrete_SOS(sampling_frequency).flatten())
filters = {}
for f, d in zip(self._filters, coeffs):
if f is not None:
filters[f.description] = d
data = {
'fs': fs,
'fs': sampling_frequency,
'gain': self.gain,
'filters': filters
}
......
......@@ -156,6 +156,9 @@ class DataContainer(pd.DataFrame, metaclass=MetaDataContainer):
return string
def __repr__(self):
return super().__repr__()
def __getitem__(self, key):
header = self.header
output = super().__getitem__(key)
......@@ -214,9 +217,13 @@ class DataContainer(pd.DataFrame, metaclass=MetaDataContainer):
@staticmethod
def from_csv(file, *args, index_col=0, **kwargs) -> 'DataContainer':
try:
with open(file, 'r') as f:
header_dict = DataContainer._json_to_header(f)
output = DataContainer(pd.read_csv(f, *args, index_col=index_col, **kwargs), header=header_dict)
except TypeError:
header_dict = DataContainer._json_to_header(file)
output = DataContainer(pd.read_csv(file, *args, index_col=index_col, **kwargs), header=header_dict)
return output
def to_json(self, path_or_buf=None, *args, orient='split', **kwargs):
......
......@@ -6,10 +6,10 @@ from openqlab.io.importers import utils
class DataContainerCSV(StreamImporter):
NAME = 'DataContainerCSV'
AUTOIMPORTER = True
STARTING_LINES = ['^' + DataContainer.json_prefix]
STARTING_LINES = [DataContainer.json_prefix]
def read(self):
self._check_header()
self._stream.seek(0)
output = DataContainer.from_csv(self._stream, parse_dates=True)
if output.empty:
raise utils.ImportFailed(
......
"""Plotting scripts in frequency domain."""
from typing import Optional
from warnings import warn
import matplotlib.pyplot as plt
from matplotlib.axes import Axes
from openqlab import analysis
from ..conversion import db
from ..io import DataContainer
......@@ -9,11 +12,18 @@ def _clamp_phase(phase):
"""
Returns phase with all values mapped to between +/- 180 degrees.
"""
return (phase + 180.0) % 360.0 - 180.0
def amplitude_phase(amplitude, phase, logf=True, bodeplot=True, clamp_phase=True,
dbunits=True, title='Transfer Function'):
warn('DEPRECATED: _clamp_phase is deprecated, \
use openqlab.analysis.phase.clamp_phase.', DeprecationWarning)
return analysis.phase.clamp_phase(phase)
def amplitude_phase(amplitude, phase, # pylint: disable=too-many-arguments
logf=True,
bodeplot=True,
clamp_phase=True,
dbunits=True,
title='Transfer Function',
**kwargs):
"""
Create an amplitude-phase plot to display transfer function measurements.
......@@ -35,6 +45,8 @@ def amplitude_phase(amplitude, phase, logf=True, bodeplot=True, clamp_phase=True
Use dB units for display. Otherwise convert to linear units.
title: :obj:`str`, optional
The figure title.
**kwargs:
Parameters are passed to the :obj:`pandas.DataFrame.plot` method
Returns
-------
......@@ -51,17 +63,17 @@ def amplitude_phase(amplitude, phase, logf=True, bodeplot=True, clamp_phase=True
if dbunits:
ampl = amplitude
else:
ampl = db.to_lin(amplitude / 2.0) # the /2.0 accounts for power/amplitude
ampl: DataContainer = db.to_lin(amplitude / 2.0) # the /2.0 accounts for power/amplitude
ampl.plot(ax=ax, title=title, legend=False, logx=logf, logy=(not dbunits))
ampl.plot(ax=ax, title=title, legend=False, logx=logf, logy=(not dbunits), **kwargs)
if not bodeplot:
for ii in range(len(ax.lines) + 1):
for _ in range(len(ax.lines) + 1):
# HACKHACK: advance color cycler so we get continuous colours across
# the two plots. May break in future versions of matplotlib.
next(ax2._get_lines.prop_cycler)
if clamp_phase:
phase = _clamp_phase(phase)
phase = analysis.phase.clamp_phase(phase)
phase.plot(ax=ax2, legend=False, logx=logf)
ax.set_ylabel('Amplitude (dB)')
ax2.set_ylabel(u'Phase (º)')
......@@ -96,7 +108,6 @@ def power_spectrum(data, normalize_to=None, logf=True, title='Power Spectrum'):
A handle to the created matplotlib figure.
"""
# TODO: implement linear/power spectral density (/Hz) plots
fig, ax = plt.subplots()
if normalize_to is not None:
......@@ -113,12 +124,12 @@ def power_spectrum(data, normalize_to=None, logf=True, title='Power Spectrum'):
return fig
def relative_input_noise(data: DataContainer,
def relative_input_noise(data: DataContainer, # pylint: disable=too-many-arguments
volt_dc: float,
logf: bool = True,
logy: bool = True,
title: Optional[str] = None,
ylabel: str = 'RIN ($1/\sqrt{\mathrm{Hz}}$)',
ylabel: str = r'RIN ($1/\sqrt{\mathrm{Hz}}$)',
**kwargs) -> Axes:
"""Create a plot for relative input noise.
......
......@@ -93,7 +93,7 @@ def scope(traces, title='Oscilloscope View'):
line, = host.plot(traces.index, traces.iloc[:, 0], label=traces.columns[0])
host.set_ylabel(traces.columns[0])
host.set_xlabel(traces.index.NAME)
host.set_xlabel(traces.index.name)
host.axis['left'].label.set_color(line.get_color())
for ii in range(1, Ntraces):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment