diff --git a/.gitignore b/.gitignore
index c39454a3b8793a9094ba292ec1eed9a92cbd555d..19fe45cfe7e135d339bf358eeed400ea49878482 100644
--- a/.gitignore
+++ b/.gitignore
@@ -31,6 +31,8 @@ wheels/
 *.ipynb
 .ipynb_checkpoints
 .mypy_cache
+.apollon-devel/
+notebooks/
 
 # PyInstaller
 #  Usually these files are written by a python script from a template
@@ -69,7 +71,11 @@ instance/
 .scrapy
 
 # Sphinx documentation
+<<<<<<< HEAD
 docs/_build/
+=======
+>>>>>>> docs
+docs/source/_build/
 
 # PyBuilder
 target/
@@ -90,7 +96,7 @@ celerybeat-schedule
 .env
 
 # virtualenv
-.venv
+.venv*
 venv/
 ENV/
 
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2fe6b94a5f1647858be90fb0838d57825000a789
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,7 @@
+build:
+  image: python:latest
+  before_script:
+    - pip install numpy
+    - which python  
+  script:
+    - pip install . 
diff --git a/.pylintrc b/.pylintrc
index 8258ce3017043bf1ac43a1701663b27136f59a70..b64bb376cbc80f2722f0e650fadb46cb3a31e8a7 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -414,16 +414,7 @@ function-naming-style=snake_case
 #function-rgx=
 
 # Good variable names which should always be accepted, separated by a comma.
-good-names=i,
-           j,
-           k,
-           ex,
-           Run,
-           _,
-	   m,
-           X,
-           N,
-	   ax    # matplotlib axis
+good-names=_, ax, i, j, k, m, n, t, x, y, z
 
 # Include a hint for the correct naming format with invalid-name.
 include-naming-hint=no
@@ -528,10 +519,10 @@ valid-metaclass-classmethod-first-arg=cls
 [DESIGN]
 
 # Maximum number of arguments for function / method.
-max-args=5
+max-args=8    # original was 5
 
 # Maximum number of attributes for a class (see R0902).
-max-attributes=7
+max-attributes=10    # original was 8
 
 # Maximum number of boolean expressions in an if statement.
 max-bool-expr=5
diff --git a/.readthedocs.yml b/.readthedocs.yaml
similarity index 79%
rename from .readthedocs.yml
rename to .readthedocs.yaml
index 50d940da68b590a9ed12d3d7f28884278a11b3ae..14edd72ddabf18d561b629d21e20f110458b105a 100644
--- a/.readthedocs.yml
+++ b/.readthedocs.yaml
@@ -2,10 +2,8 @@
 # Read the Docs configuration file
 # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
 
-# Required
 version: 2
 
-# Build documentation in the docs/ directory with Sphinx
 sphinx:
   builder: html
   configuration: docs/source/conf.py
@@ -16,9 +14,8 @@ formats: all
 
 # Optionally set the version of Python and requirements required to build your docs
 python:
-  version: 3.7
+  version: 3.8
   install:
     - requirements: docs/requirements.txt
-    - method: pip
+    - method: setuptools
       path: .
-  system_packages: true
\ No newline at end of file
diff --git a/LICENSE b/LICENSE.txt
similarity index 100%
rename from LICENSE
rename to LICENSE.txt
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000000000000000000000000000000000000..b6cd7d63783f67c6957d6b5a85e9b034feacd40c
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,4 @@
+graft json
+include *.md
+include LICENSE.txt
+recursive-include include *.h
diff --git a/README.md b/README.md
index 03aaf0817aca4d284801ec5f60bc8cfddea720a5..0dbaa329cdb1d3b31f8aaee6bea2d60e624d125b 100644
--- a/README.md
+++ b/README.md
@@ -1,15 +1,19 @@
 # Apollon
+Apollon is a Python framework for audio feature extraction and music similarity
+estimation. It includes subpackages for
 
-Apollon is a tool for music modelling. It comprises
-* low-level audio feature extraction
-* Hidden-Markov Models
+* Audio feature extraction
+* Hidden Markov Models
 * Self-Organizing Map
 
 ## 1. Installation
-This repository. Navigate the packages root directory
-and install apollon using pip.
+### 1.1 Install from PyPi
+The latest version of apollon is available on PyPi. Just open a terminal an run
+the following command to download and install apollon:
+
 ```
-cd path/to/apollon
-pip install .
+pip install apollon 
 ```
-Note that the period on the end of the last line is necessary.
+
+## 2. Documentation
+Full [documentation](https://apollon.readthedocs.io) is available on readthedocs.
diff --git a/apollon/analyses.py b/apollon/analyses.py
deleted file mode 100644
index c551aab4bed1ea15ae5d379eb2642c801cce98f8..0000000000000000000000000000000000000000
--- a/apollon/analyses.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# Licensed under the terms of the BSD-3-Clause license.
-# Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
-
-
-from . audio import load_audio
-from . io import dump_json, decode_array
-from . signal.spectral import stft, Spectrum
-from . signal.features import FeatureSpace
-from . tools import time_stamp
-from . types import PathType
-from . types import Array as _Array
-from . onsets import FluxOnsetDetector
-from . import segment
-
-def rhythm_track(file_path: PathType) -> dict:
-    """Perform rhythm track analysis of given audio file.
-
-    Args:
-        file_path:  Path to audio file.
-
-    Returns:
-        Rhythm track parameters and data.
-    """
-    snd = load_audio(file_path)
-    onsets = FluxOnsetDetector(snd.data, snd.fps)
-    segs = segment.by_onsets(snd.data, 2**11, onsets.index())
-    spctr = Spectrum(segs, snd.fps, window='hamming')
-
-    onsets_features = {
-        'peaks': onsets.peaks,
-        'index': onsets.index(),
-        'times': onsets.times(snd.fps)
-    }
-
-    track_data = {
-        'meta': {'source': file_path, 'time_stamp': time_stamp()},
-        'params': {'onsets': onsets.params(), 'spectrum': spctr.params()},
-        'features': {'onsets': onsets_features,
-                     'spectrum': spctr.extract().as_dict()}
-    }
-
-
-    return track_data
-
-
-def timbre_track(file_path: PathType) -> dict:
-    """Perform timbre track analysis of given audio file.
-
-    Args:
-        file_path:  Path to input file.
-
-    Returns:
-        Timbre track parameters and data.
-    """
-    snd = load_audio(file_path)
-    spctrgr = stft(snd.data, snd.fps, n_perseg=1024, hop_size=512)
-
-    track_data = {
-        'meta': {'source': file_path, 'time_stamp':time_stamp()},
-        'params': {'spectrogram': spctrgr.params()},
-        'features': spctrgr.extract().as_dict()
-    }
-    return track_data
diff --git a/apollon/audio.py b/apollon/audio.py
deleted file mode 100644
index 646d44eb1a231243010f48a5a1762becefec4f9a..0000000000000000000000000000000000000000
--- a/apollon/audio.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Licensed under the terms of the BSD-3-Clause license.
-# Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
-
-"""
-apollon/audio.py -- Wrapper classes for audio data.
-
-Classes:
-    AudioFile   Representation of an audio file.
-
-Functions:
-    load_audio   Load .wav file.
-"""
-import pathlib as _pathlib
-
-import matplotlib.pyplot as _plt
-import soundfile as _sf
-
-from . signal import tools as _ast
-from . types import PathType
-
-
-class AudioFile:
-    """Representation of an audio file.
-
-        Args:
-            path:   Path to file.
-            norm:   If True, signal will be normalized ]-1, 1[.
-            mono:   If True, mixdown all channels.
-    """
-    def __init__(self, path: PathType, norm: bool = False, mono: bool = True) -> None:
-        """Load an audio file."""
-
-        self.file = _pathlib.Path(path)
-        self.data, self.fps = _sf.read(self.file, dtype='float')
-        self.size = self.data.shape[0]
-
-        if mono and self.data.ndim > 1:
-            self.data = self.data.sum(axis=1) / self.data.shape[1]
-
-        if norm:
-            self.data = _ast.normalize(self.data)
-
-    def plot(self) -> None:
-        """Plot audio as wave form."""
-        fig = _plt.figure(figsize=(14, 7))
-        ax1 = fig.add_subplot(1, 1, 1)
-        ax1.plot(self.data)
-
-    def __str__(self):
-        return "<{}, {} kHz, {:.3} s>" \
-        .format(self.file.name, self.fps/1000, self.size/self.fps)
-
-    def __repr__(self):
-        return self.__str__()
-
-    def __len__(self):
-        return self.size
-
-    def __getitem__(self, item):
-        return self.data[item]
-
-
-def load_audio(path: PathType, norm: bool = False, mono: bool = True) -> AudioFile:
-    """Load an audio file.
-
-    Args:
-        path:   Path to audio file.
-        norm:   True if data should be normalized.
-        mono:   If True, mixdown channels.
-
-    Return:
-        Audio file representation.
-    """
-    return AudioFile(path, norm, mono)
diff --git a/apollon/commands/__init__.py b/apollon/commands/__init__.py
deleted file mode 100644
index 18039fc2401993bf277409eac92ac7dc4a848a49..0000000000000000000000000000000000000000
--- a/apollon/commands/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# Licensed under the terms of the BSD-3-Clause license.
-# Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
-
-from . import apollon_export
-from . import apollon_features
-from . import apollon_onsets
-from . import apollon_hmm
diff --git a/apollon/commands/apollon_export.py b/apollon/commands/apollon_export.py
deleted file mode 100644
index 222665c983ddccc044a370d934253b6fddc327d1..0000000000000000000000000000000000000000
--- a/apollon/commands/apollon_export.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Licensed under the terms of the BSD-3-Clause license.
-# Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
-
-import argparse
-import json
-import sys
-import typing
-
-from .. io import dump_json, decode_array
-from .. signal.spectral import stft
-from .. signal.features import FeatureSpace
-from .. tools import time_stamp
-from .. types import PathType
-
-
-def _parse_cml(argv):
-    parser = argparse.ArgumentParser(description='Apollon feature extraction engine')
-
-    parser.add_argument('--csv', action='store_true',
-                        help='Export csv')
-
-    parser.add_argument('-o', '--outpath', action='store',
-                        help='Output file path')
-
-    parser.add_argument('csv_data', type=str, nargs=1)
-
-    return parser.parse_args(argv)
-
-
-def _export_csv(data: typing.Dict[str, typing.Any], path: PathType = None) -> None:
-
-    fspace = json.loads(data, object_hook=decode_array)
-    fspace = FeatureSpace(**fspace)
-    fspace.to_csv()
-
-
-def main(argv=None):
-    if argv is None:
-        argv = sys.argv
-
-    args = _parse_cml(argv)
-
-    if args.csv:
-        _export_csv(args.csv_data[0], args.outpath)
-        return 0
-
-if __name__ == '__main__':
-    sys.exit(main)
diff --git a/apollon/commands/apollon_features.py b/apollon/commands/apollon_features.py
deleted file mode 100644
index 0258a9b20290fe479c62d09fbc8b45fffc5331a9..0000000000000000000000000000000000000000
--- a/apollon/commands/apollon_features.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Licensed under the terms of the BSD-3-Clause license.
-# Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
-
-
-import argparse
-import json
-import sys
-import typing
-
-from .. import analyses
-from .. types import PathType
-from .. import io
-from .. signal.features import FeatureSpace
-
-def _export_csv(
-        data: typing.Dict[str, typing.Any],
-        path: PathType = None) -> None:
-    """"""
-    fspace = json.loads(data, object_hook=io.decode_array)
-    fspace = FeatureSpace(**fspace)
-    fspace.to_csv()
-
-
-def main(args: argparse.Namespace) -> int:
-    if args.export:
-        if args.export == 'csv':
-            _export_csv(args.file[0], args.outpath)
-            return 0
-
-    track_data = {}
-    if args.rhythm:
-        track_data['rhythm'] = analyses.rhythm_track(args.file[0])
-
-    if args.timbre:
-        track_data['timbre'] = analyses.timbre_track(args.file[0])
-
-    if args.pitch:
-        track_data['pitch'] = analyses.pitch_track(args.file[0])
-
-    io.dump_json(track_data, args.outpath)
-
-    return 0
-
-
-if __name__ == '__main__':
-    sys.exit(main())
diff --git a/apollon/commands/apollon_hmm.py b/apollon/commands/apollon_hmm.py
deleted file mode 100644
index a5fa167681e92e9feddf2e7e5daec16258744e5d..0000000000000000000000000000000000000000
--- a/apollon/commands/apollon_hmm.py
+++ /dev/null
@@ -1,93 +0,0 @@
-#!/usr/bin/env python3
-
-# Licensed under the terms of the BSD-3-Clause license.
-# Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
-
-
-import argparse
-import json
-import pathlib
-import sys
-import typing
-
-from .. import io
-from .. hmm import PoissonHmm
-from .. types import Array as _Array
-
-
-def _load_track_file(track_file: str) -> dict:
-    track_file = pathlib.Path(track_file)
-    with track_file.open('r') as fobj:
-        track_data = json.load(fobj, object_hook=io.decode_array)
-    return track_data
-
-
-def _parse_feature(track_data: dict, feature_path: str) -> _Array:
-    feature = track_data
-    for key in feature_path.split('.'):
-        try:
-            feature = feature[key]
-        except KeyError:
-            print('Error. Invalid node "{}" in feature path.'.format(key))
-            exit(10)
-    return feature
-
-
-def _generate_outpath(in_path, out_path: str, feature_path: str) -> None:
-    in_path = pathlib.Path(in_path)
-    default_fname = '{}.hmm'.format(in_path.stem)
-    if out_path is None:
-        out_path = pathlib.Path(default_fname)
-    else:
-        out_path = pathlib.Path(out_path)
-        if not out_path.suffix:
-            out_path = out_path.joinpath(default_fname)
-        if not out_path.parent.is_dir():
-            print('Error. Path "{!s}" does not exist.'.format(out_path.parent))
-            exit(10)
-    return out_path
-
-
-def _train_n_hmm(data: _Array, m_states: int, n_trails: int):
-    """Trains ``n_trails`` HMMs each initialized with a random tpm.
-
-    Args:
-        data:      Possibly unporcessed input data set.
-        m_states:  Number of states.
-        n_trails:  Number of trails.
-
-    Returns:
-        Best model regarding to log-likelihood.
-    """
-    feat = data.round().astype(int)
-    trails = []
-    for i in range(n_trails):
-        hmm = PoissonHmm(feat, m_states, init_gamma='softmax')
-        hmm.fit(feat)
-        if hmm.success:
-            trails.append(hmm)
-
-    if len(trails) == 0:
-        return None
-    return min(trails, key=lambda hmm: abs(hmm.quality.nll))
-
-
-def main(argv=None) -> int:
-    if argv is None:
-        argv = sys.argv
-
-    for trf in argv.track_files:
-        track_data = _load_track_file(trf)
-        feature = _parse_feature(track_data, argv.feature_path)
-        hmm = _train_n_hmm(feature, argv.mstates, 5)
-        if hmm is None:
-            print('Error. Could not train HMM on {}'.format(trf))
-            continue
-        out_path = _generate_outpath(trf, argv.outpath, argv.feature_path)
-        io.dump_json(hmm.to_dict(), out_path)
-    return 0
-
-
-if __name__ == '__main__':
-    sys.exit(main())
diff --git a/apollon/commands/apollon_onsets.py b/apollon/commands/apollon_onsets.py
deleted file mode 100644
index a01f509eab0ddbc5ad472f296038fc0c2b67155e..0000000000000000000000000000000000000000
--- a/apollon/commands/apollon_onsets.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# Licensed under the terms of the BSD-3-Clause license.
-# Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
-
-import argparse
-import multiprocessing as mp
-import sys
-
-from .. import onsets
-
-
-def _parse_cml(argv):
-    parser = argparse.ArgumentParser(description='Apollon onset detection engine')
-
-    parser.add_argument('--amplitude', action='store_true',
-                        help='Detect onsets based on local extrema in the time domain signal.')
-
-    parser.add_argument('--entropy', action='store_true',
-                        help='Detect onsets based on time domain entropy maxima.')
-
-    parser.add_argument('--flux', action='store_true',
-                        help='Detect onsets based on spectral flux.')
-
-    parser.add_argument('-o', '--outpath', action='store',
-                        help='Output file path.')
-
-    parser.add_argument('filepath', type=str, nargs=1)
-    return parser.parse_args(argv)
-
-
-def _amp(a):
-    print('Amplitude')
-    return a
-
-def _entropy(a):
-    print('Entropy')
-    return a
-
-def _flux(a):
-    print('Flux')
-    return a
-
-
-def main(argv=None):
-    if argv is None:
-        argv = sys.argv
-
-    args = _parse_cml(argv)
-
-
-    args = _parse_cml(argv)
-    detectors = {'amplitude': _amp,
-                 'entropy': _entropy,
-                 'flux': _flux}
-
-    methods = [func for name, func in detectors.items() if getattr(args, name)]
-    if len(methods) == 0:
-        print('At least one detection method required. Aborting.')
-        return 1
-
-    with mp.Pool(processes=3) as pool:
-        results = [pool.apply_async(meth, (i,)) for i, meth in enumerate(methods)]
-        out = [res.get() for res in results]
-    return out
-
-if __name__ == '__main__':
-    sys.exit(main())
diff --git a/apollon/fractal.py b/apollon/fractal.py
deleted file mode 100644
index 2e5ac354cce3dbe0e8f71d2f51d59ab2aa4a535d..0000000000000000000000000000000000000000
--- a/apollon/fractal.py
+++ /dev/null
@@ -1,184 +0,0 @@
-# Licensed under the terms of the BSD-3-Clause license.
-# Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
-
-"""apollon/fractal.py
-
-Tools for estimating fractal dimensions.
-
-Function:
-    corr_dim           Estimate correlation dimension.
-    embdedding         Pseudo-phase space embdedding.
-    lorenz_attractor   Simulate Lorenz system.
-    pps_entropy        Entropy of pps embdedding.
-"""
-
-import numpy as _np
-from scipy import stats as _stats
-from scipy.spatial import distance as _distance
-
-
-def correlation_dimension(data, tau, m, r, mode='cut', fit_n_points=10):
-    """Compute an estimate of the correlation dimension D_2.
-
-    TODO:
-        - Implement algo for linear region detection
-        - Implement orbital delay parameter \gamma
-        - Implement multiprocessing
-        - Find a way to use L_\inf norm with distance.pdist
-
-    Args:
-        data    (1d array)  Input time series.
-        tau     (int)       Reconstruction delay.
-        m       (iterable)  of embedding dimensions
-        r       (iterable)  of radii
-        mode    (str)       See doc of `embedding`.
-
-    Returns:
-        lCrm    (array) Logarithm of correlation sums given r_i.
-        lr      (array) Logarithm of radii.
-        d2      (float) Estimate of correlation dimension.
-    """
-    N = data.size
-    sd = data.std()
-
-    M = len(m)
-
-    lr = _np.log(r)
-    Nr = len(r)
-
-    # output arrays
-    lCrm = _np.zeros((M, Nr))    # Log correlation sum given `r` at dimension `m`
-    D2m = _np.zeros(M)           # Corr-dim estimate at embdedding `m`
-
-    # iterate over each dimension dimensions
-    for i, mi in enumerate(m):
-
-        # compute embedding
-        emb = embedding(data, tau, mi, mode)
-
-        # compute distance matrix
-        # we should use L_\inf norm here
-        pairwise_distances = _distance.squareform(
-            _distance.pdist(emb.T, metric='euclidean'))
-
-        # compute correlation sums
-        Cr = _np.array([_np.sum(pairwise_distances < ri) for ri in r],
-                       dtype=float)
-        Cr *= 1 / (N * (N-1))
-
-        # transform sums to log domain
-        lCrm[i] = _np.log(Cr)
-
-        # fit 1d polynominal in the of range of s +- n
-        cde, inter = _np.polyfit(lr, lCrm[i], 1)
-        D2m[i] = cde
-
-    return lCrm, lr, D2m
-
-
-def embedding(inp_sig, tau, m=2, mode='zero'):
-    """Generate n-dimensional pseudo-phase space embedding.
-
-    Params:
-        inp_sig    (iterable) Input signal.
-        tau        (int) Time shift.
-        m          (int) Embedding dimensions.
-        mode       (str) Either `zero` for zero padding,
-                                `wrap` for wrapping the signal around, or
-                                `cut`, which cuts the signal at the edges.
-                         Note: In cut-mode, each dimension is only
-                               len(sig) - tau * (m - 1) samples long.
-    Return:
-        (np.ndarray) of shape
-                        (m, len(inp_sig)) in modes 'wrap' or 'zeros', or
-                        (m, len(sig) - tau * (m - 1)) in cut-mode.
-    """
-    inp_sig = _np.atleast_1d(inp_sig)
-    N = len(inp_sig)
-
-    if mode == 'zero':
-        # perform zero padding
-        out = _np.zeros((m, N))
-        out[0] = inp_sig
-        for i in range(1, m):
-            out[i, tau*i:] = inp_sig[:-tau*i]
-
-    elif mode == 'wrap':
-        # wraps the signal around at the bounds
-        out = _np.empty((m, N))
-        for i in range(m):
-            out[i] = _np.roll(inp_sig, i*tau)
-
-    elif mode == 'cut':
-        # cut every index beyond the bounds
-        Nm = N - tau * (m-1)    # number of vectors
-        if Nm < 1:
-            raise ValueError('Embedding params to large for input.')
-        out = _np.empty((m, Nm))
-        for i in range(m):
-            off = N - i * tau
-            out[i] = inp_sig[off-Nm:off]
-
-    else:
-        raise ValueError('Unknown mode `{}`.'.format(pad))
-
-    return out
-
-
-def embedding_entropy(emb, bins, extent=(-1, 1)):
-    """Calculate entropy of given embedding unsing log_e.
-
-    Args:
-        emb    (ndarray)     Embedding.
-        bins   (int)         Number of histogram bins per axis.""
-        extent (tuple)       Extent per dimension
-
-    Return:
-        (float) Entropy of pps.
-    """
-    pps, _ = _np.histogramdd(emb.T, bins, range=[extent]*emb.shape[0])
-    entropy = _stats.entropy(pps.flat) / _np.log(pps.size)
-    return entropy
-
-
-def __lorenz_system(x, y, z, s, r, b):
-    """Compute the derivatives of the Lorenz system of coupled
-       differential equations.
-
-    Params:
-        x, y, z    (float) Current system state.
-        s, r, b    (float) System parameters.
-
-    Return:
-        xyz_dot    (array) Derivatives of current system state.
-    """
-    xyz_dot = _np.array([s * (y - x),
-                         x * (r - z) - y,
-                         x * y - b * z])
-    return xyz_dot
-
-
-def lorenz_attractor(n, sigma=10, rho=28, beta=8/3,
-                     init_xyz=(0., 1., 1.05), dt=0.01):
-    """Simulate a Lorenz system with given parameters.
-
-    Params:
-        n        (int)   Number of data points to generate.
-        sigma    (float) System parameter.
-        rho      (rho)   System parameter.
-        beta     (beta)  System parameter.
-        init_xyz (tuple) Initial System state.
-        dt       (float) Step size.
-
-    Return:
-        xyz    (array) System states.
-    """
-    xyz = _np.empty((n, 3))
-    xyz[0] = init_xyz
-
-    for i in range(n-1):
-        xyz_prime = __lorenz_system(*xyz[i], sigma, rho, beta)
-        xyz[i+1] = xyz[i] + xyz_prime * dt
-
-    return xyz
diff --git a/apollon/hmm/__init__.py b/apollon/hmm/__init__.py
deleted file mode 100644
index 64e6446d8b9daaabafa742db276ae6e6e9d5b69e..0000000000000000000000000000000000000000
--- a/apollon/hmm/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# Licensed under the terms of the BSD-3-Clause license.
-# Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
-
-from . poisson.poisson_hmm import PoissonHmm
diff --git a/apollon/hmm/graph/__init__.py b/apollon/hmm/graph/__init__.py
deleted file mode 100644
index 781cff0538b337a72f6d16b6d3e0bcc7b0d43a10..0000000000000000000000000000000000000000
--- a/apollon/hmm/graph/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# Licensed under the terms of the BSD-3-Clause license.
-# Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
-
-from . grapher import draw_matrix, draw_network, save_hmmfig
diff --git a/apollon/hmm/graph/grapher.py b/apollon/hmm/graph/grapher.py
deleted file mode 100644
index 377dd8d3286e95b5ce31d264166632054b21b4f0..0000000000000000000000000000000000000000
--- a/apollon/hmm/graph/grapher.py
+++ /dev/null
@@ -1,263 +0,0 @@
-# Licensed under the terms of the BSD-3-Clause license.
-# Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
-
-"""
-grapher.py -- Plot graphs from HMMs.
-"""
-
-from matplotlib import cm
-from matplotlib.patches import ArrowStyle
-from matplotlib.patches import Circle
-from matplotlib.patches import ConnectionStyle
-from matplotlib.patches import FancyArrowPatch
-import matplotlib.pyplot as plt
-
-import numpy as np
-import networkx as nx
-from scipy.spatial import distance
-
-from apollon import tools
-
-
-def _prepare_fig(pos):
-    """Prepare a figure with the correct size.
-
-    Params:
-        pos    (dict) with structur {node_name_i: np.array([pos_x, pos_y])}
-                      as return by nx.layout methods.
-    Return:
-        (Figure, AxesSubplot)
-    """
-    pos_data = np.array(list(pos.values()))
-    diameter = distance.pdist(pos_data).max()
-    dd = diameter / 2 + 1
-
-    fig = plt.figure(figsize=(7, 7), frameon=False)
-    ax = fig.add_subplot(111)
-    r = 1.5
-    ax.axis([-(dd+r), (dd+r), -(dd+r), (dd+r)])
-    ax.set_axis_off()
-
-    return fig, ax
-
-
-def _draw_nodes(G, pos, ax):
-    """Draw the nodes of a (small) networkx graph.
-
-    Params:
-        G    (nx.classes.*) a networkx graph.
-        pos  (dict)         returned by nx.layout methods.
-        ax   (AxesSubplot)  mpl axe.
-
-    Return:
-        (dict) of Circle patches.
-    """
-    #degree = np.array([deg for node, deg in G.degree], dtype=float)
-    #degree /= degree.sum()
-
-    flare_kwargs = {'alpha'    : 0.2,
-                    'edgecolor': (0, 0, 0, 1),
-                    'facecolor': None}
-
-    node_kwargs = {'alpha'    : 0.8,
-                   'edgecolor': (0, 0, 0, 1),
-                   'facecolor': None}
-
-    nodes = {}
-    node_params = zip(pos.items())
-
-    for i, (label, xy) in enumerate(pos.items()):
-        size = G.nodes[label]['size']
-        fsize = G.nodes[label]['fsize']
-        flare_kwargs['facecolor'] = 'C{}'.format(i)
-        flare = Circle(xy, size+fsize, **flare_kwargs)
-
-        node_kwargs['facecolor'] = 'C{}'.format(i)
-        node = Circle(xy, size, **node_kwargs)
-
-        ax.add_patch(flare)
-        ax.add_patch(node)
-
-        font_style = {'size':15, 'weight':'bold'}
-        text_kwargs = {'color': (0, 0, 0, .8),
-                       'verticalalignment': 'center',
-                       'horizontalalignment': 'center',
-                       'fontdict': font_style}
-        ax.text(*xy, i+1, **text_kwargs)
-
-        nodes[label] = node
-
-    return nodes
-
-
-def _draw_edges(G, pos, nodes, ax):
-    """Draw the edges of a (small) networkx graph.
-
-    Params:
-        G       (nx.classes.*)  a networkx graph.
-        pos     (dict)          returned by nx.layout methods.
-        nodes   (dict)          of Circle patches.
-        ax      (AxesSubplot)   mpl axe.
-
-    Return:
-        (dict) of Circle patches.
-    """
-    pointer = ArrowStyle.Fancy(head_width=10, head_length=15)
-    curved_edge = ConnectionStyle('arc3', rad=.2)
-
-    arrow_kwargs = {'arrowstyle': pointer,
-                    'antialiased': True,
-                    'connectionstyle': curved_edge,
-                    'edgecolor': None,
-                    'facecolor': None,
-                    'linewidth': None}
-
-    edges = {}
-    for i, (a, b, attr) in enumerate(G.edges.data()):
-        arrow_kwargs['edgecolor'] = attr['color']
-        arrow_kwargs['facecolor'] = attr['color']
-        arrow_kwargs['linewidth'] = 1.0
-
-        edge = FancyArrowPatch(pos[a], pos[b],
-                               patchA=nodes[a], patchB=nodes[b],
-                               shrinkA=5, shrinkB=5,
-                               **arrow_kwargs)
-        ax.add_patch(edge)
-        edges[(a, b)] = edge
-
-    return edges
-
-
-def _legend(G, nodes, ax):
-    """Draw the legend for a (small) nx graph.
-
-    Params:
-        G       (nx.classes.*) a networkx graph.
-        nodes   (list)         of Circle patches.
-        ax      (AxesSubplot)  mpl axe.
-
-    Return:
-        (AxesSubplot)
-    """
-    legend_kwargs = {'fancybox': True,
-                     'fontsize': 14,
-                     'bbox_to_anchor': (1.02, 1.0)}
-
-    labels = [r'$f_c = {:>9.3f}$ Hz'.format(k) for k in G.nodes.keys()]
-    legend = ax.legend(nodes.values(), labels, **legend_kwargs, borderaxespad=0)
-
-    return legend
-
-
-def draw_network(labels, tpm, delta):
-    """Draw the graph of a HMM's transition probability matrix.
-
-    Params:
-        lables  (iterable)      Labels for each state.
-        tpm     (np.ndarray)    A two-dimensional (row) stochastic matrix.
-        delta   (iterable)
-
-    Return:
-        (Figure, AxesSubplot)
-    """
-    G = nx.MultiDiGraph()
-    #scaled_tpm = np.exp(tools.scale(tpm, 0, 1.5))
-
-    for i, from_state in enumerate(labels):
-        G.add_node(from_state, fsize=np.exp(delta[i]))
-
-        for j, to_state in enumerate(labels):
-            if not np.isclose(tpm[i, j], 0.0):
-                G.add_edge(from_state, to_state,
-                           weight=tpm[i, j],
-                           color='k')
-
-    sd = np.sum([np.exp(degree) for node, degree in G.degree()])
-
-    for node, degree in G.degree():
-        G.node[node]['size'] = .5 + np.exp(degree) / sd
-
-    #pos = nx.layout.circular_layout(G, center=(0., 0.), scale=4)
-    pos = nx.layout.spring_layout(G, center=(0.0, 0.0), scale=4)
-
-    fig, ax = _prepare_fig(pos)
-    nodes = _draw_nodes(G, pos, ax)
-    edges = _draw_edges(G, pos, nodes, ax)
-    legend = _legend(G, nodes, ax)
-
-    return fig, ax, G
-
-
-def draw_matrix(tpm):
-    """Draw a heatmap from a transition probability matrix.
-
-    Args:
-        tpm (np.ndarray)    Two-dimensional, row-stochastic square matrix.
-
-    Returns:
-        (fig, ax, img)
-    """
-    img_kwargs = {'origin': 'upper',
-                 'interpolation': 'nearest',
-                 'aspect': 'equal',
-                 'cmap': 'viridis',
-                 'vmin': 0.0, 'vmax': 1.0}
-
-    nx, ny = tpm.shape
-
-    fig = plt.figure(figsize=(10, 10))
-    ax = fig.add_subplot(1, 1, 1)
-    img = ax.imshow(tpm, **img_kwargs)
-
-    # colorbar
-    cbar = ax.figure.colorbar(img, ax=ax)
-    cbar.ax.set_ylabel('Probability', rotation=-90, va="bottom")
-
-    # major ticks
-    ax.set_xticks(np.arange(nx))
-    ax.set_yticks(np.arange(ny))
-    ax.tick_params(which='major', top=True, bottom=False,
-                   labeltop=True, labelbottom=False)
-
-    # minor ticks (for grid)
-    ax.set_xticks(np.arange(nx)-.5, minor=True)
-    ax.set_yticks(np.arange(ny)-.5, minor=True)
-    ax.tick_params(which="minor", bottom=False, left=False)
-    ax.grid(which="minor", color="w", linestyle='-', linewidth=2)
-
-    # spines
-    ax.spines['top'].set_position(('outward', 10))
-    ax.spines['left'].set_position(('outward', 10))
-    ax.spines['bottom'].set_visible(False)
-    ax.spines['right'].set_visible(False)
-
-    # cell labels
-    font_kw = {'fontsize': 14}
-    text_kw = {'ha': 'center', 'va': 'center', 'fontdict': font_kw}
-    for i in range(nx):
-        for j in range(ny):
-            val = tpm[i, j].astype(float).round(2)
-            bc = cm.viridis(val)
-            tc = cm.viridis(1-val)
-            if np.array_equal(tc, bc):
-                tc = 'k'
-            ax.text(j, i, '{:.3f}'.format(val), **text_kw, color=tc)
-
-    return fig, ax, img
-
-
-def save_hmmfig(fig, path, **kwargs):
-    """Save the figure to file.
-
-    This saves the figure and ensures that the out-of-axes legend
-    is completely visible in the saved version.
-
-    All kwargs are passed on to plt.savefig.
-
-    Params:
-        fig     (Figure)    Figure of HMM tpm.
-        path    (str)       Path to save file.
-    """
-    fig.savefig(fname=path,
-                bbox_extra_artists=(fig.axes[0].legend_,))
diff --git a/apollon/hmm/graph/img/hubert.png b/apollon/hmm/graph/img/hubert.png
deleted file mode 100644
index 9aaa129c24420d331736dc4f89cbd65483b8977e..0000000000000000000000000000000000000000
Binary files a/apollon/hmm/graph/img/hubert.png and /dev/null differ
diff --git a/apollon/hmm/poisson/poisson_core.py b/apollon/hmm/poisson/poisson_core.py
deleted file mode 100644
index d8c2406cb5591ebd57b6cadfc94e4be3cddc6806..0000000000000000000000000000000000000000
--- a/apollon/hmm/poisson/poisson_core.py
+++ /dev/null
@@ -1,162 +0,0 @@
-#!/usr/bin/python3
-# -*- coding: utf-8 -*-
-
-"""poisson_core.py
-Core functionality for Poisson HMM.
-"""
-
-import numpy as _np
-from scipy import stats as _stats
-from scipy.special import logsumexp as _logsumexp
-import warnings
-warnings.filterwarnings("ignore")
-
-def log_poisson_fwbw(x, m, _lambda, _gamma, _delta):
-    """Compute forward and backward probabilities for Poisson HMM.
-
-    Note: this alogorithm fails if `_delta` has zeros.
-
-    Params:
-        x           (np.ndarray)    One-dimensional array of integer values.
-        theta       (tuple)         Initial guesses (lambda, gamma, delta).
-        maxiter     (int)           Mmaximum number of EM iterations.
-        tol         (float)         Convergence criterion.
-    """
-
-    n = len(x)
-    lalpha, lbeta = _np.zeros((2, n, m))
-
-    # init forward
-    pprob = _stats.poisson.pmf(x[:, None], _lambda)
-    a_0 = _delta * pprob[0]
-
-    # normalize
-    sum_a = a_0.sum()
-    a_t = a_0 / sum_a
-
-    # scale factor in log domain
-    lscale = _np.log(sum_a)
-
-    # set first forward prob
-    lalpha[0] = _np.log(a_t) + lscale
-
-    # start recursion
-    for i in range(1, n):
-        a_t = a_t @ _gamma * pprob[i]
-        sum_a = a_t.sum()
-        a_t /= sum_a
-        lscale += _np.log(sum_a)
-        lalpha[i] = _np.log(a_t) + lscale
-
-    # init backward
-    lbeta[-1] = 0
-    b_t = _np.repeat(1/m, m)
-    lscale = _np.log(m)
-
-    # start backward recursion
-    for i in range(n-1, 0, -1):    # ugly reverse iteration in python
-        b_t = _gamma @ (pprob[i] * b_t)
-        lbeta[i-1] = _np.log(b_t) + lscale
-        sum_b = b_t.sum()
-        b_t /= sum_b
-        lscale += _np.log(sum_b)
-
-    return lalpha, lbeta, _np.log(pprob)
-
-
-def poisson_EM(x, m, theta, maxiter=1000, tol=1e-6):
-    """Estimate the parameters of an m-state PoissonHMM.
-
-    Params:
-        x           (np.ndarray)    One-dimensional array of integer values.
-        theta       (tuple)         Initial guesses (lambda, gamma, delta).
-        maxiter     (int)           Mmaximum number of EM iterations.
-        tol         (float)         Convergence criterion.
-    """
-    n = len(x)
-
-    this_lambda = theta[0].copy()
-    this_gamma = theta[1].copy()
-    this_delta = theta[2].copy()
-
-    next_lambda = theta[0].copy()
-    next_gamma = theta[1].copy()
-    next_delta = theta[2].copy()
-
-    for i in range(maxiter):
-
-        lalpha, lbeta, lpprob = log_poisson_fwbw(x, m, this_lambda, this_gamma, this_delta)
-
-        c = max(lalpha[-1])
-        log_likelihood = c + _logsumexp(lalpha[-1] - c)
-
-        for j in range(m):
-            for k in range(m):
-                next_gamma[j, k] *= _np.sum(_np.exp(lalpha[:n-1, j] +
-                                            lbeta[1:n, k] +
-                                            lpprob[1:n, k] -
-                                            log_likelihood))
-        next_gamma /= _np.sum(next_gamma, axis=1, keepdims=True)
-        rab = _np.exp(lalpha + lbeta - log_likelihood)
-        next_lambda = (rab * x[:, None]).sum(axis=0) / rab.sum(axis=0)
-
-        next_delta = rab[0] / rab[0].sum()
-
-        crit = (_np.abs(this_lambda - next_lambda).sum() +
-                _np.abs(this_gamma - next_gamma).sum()  +
-                _np.abs(this_delta - next_delta).sum())
-
-        if crit < tol:
-            theta_ = (next_lambda, next_gamma, next_delta)
-            return theta_, log_likelihood, True
-        else:
-            this_lambda = next_lambda.copy()
-            this_gamma = next_gamma.copy()
-            this_delta = next_delta.copy()
-
-    theta_ = (next_lambda, next_gamma, next_delta)
-    return theta_, log_likelihood, False
-
-
-def poisson_viterbi(mod, x):
-    """Calculate the Viterbi path (global decoding) of a PoissonHMM
-       given some data x.
-
-       Params:
-            x       (array-like) observations
-            mod     (HMM-Object)
-
-        Return:
-            (np.ndarray) Most probable sequence of hidden states given x.
-    """
-    n = len(x)
-
-    # Make sure that x is an array
-    x = _np.atleast_1d(x)
-
-    # calculate the probability mass for each x_i and for each mean
-    pmf_x = _stats.poisson.pmf(x[:, None], mod.lambda_)
-
-    # allocate forward pass array
-    xi = _np.zeros((n, mod.m))
-
-    # Probabilities of oberseving x_0 give each state
-    probs = mod.delta_ * pmf_x[0]
-    xi[0] = probs / probs.sum()
-
-    # Interate over the remaining observations
-    for i in range(1, n):
-        foo = _np.max(xi[i-1] * mod.gamma_, axis=1) * pmf_x[i]
-        xi[i] = foo / foo.sum()
-
-    # allocate backward pass array
-    phi = _np.zeros(n, dtype=int)
-
-    # calculate most probable state on last time step
-    phi[-1] = _np.argmax(xi[-1])
-
-    # backtrack to first time step
-    for i in range(n-2, -1, -1):
-        phi[i] = _np.argmax(mod.gamma_[phi[i+1]] * xi[i])
-
-    return phi
diff --git a/apollon/io.py b/apollon/io.py
deleted file mode 100644
index 50c35512a7e771c592f2f59783b3d7b03116ff00..0000000000000000000000000000000000000000
--- a/apollon/io.py
+++ /dev/null
@@ -1,219 +0,0 @@
-# Licensed under the terms of the BSD-3-Clause license.
-# Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
-
-"""apollon/io.py -- General I/O functionallity.
-
-Classes:
-    ArrayEncoder            Serialize numpy array to JSON.
-    FileAccessControl       Descriptor for file name attributes.
-
-Functions:
-    array_print_opt         Set format for printing numpy arrays.
-    decode_array            Decode numpy array from JSON.
-    files_in_folder         Iterate over all files in given folder.
-    load                    Load pickled data.
-    repath                  Change path but keep file name.
-    save                    Pickle some data.
-"""
-from contextlib import contextmanager as _contextmanager
-import json as _json
-import pathlib as _pathlib
-import pickle
-import typing
-
-import numpy as _np
-
-from . import types as _types
-
-
-class ArrayEncoder(_json.JSONEncoder):
-    # pylint: disable=E0202
-    # Issue: False positive for E0202 (method-hidden) #414
-    # https://github.com/PyCQA/pylint/issues/414
-    """Encode np.ndarrays to JSON.
-
-    Simply set the `cls` parameter of the dump method to this class.
-    """
-    def default(self, o):
-        """Custon default JSON encoder. Properly handles numpy arrays and JSONEncoder.default
-        for all other types.
-
-        Params:
-            o (any)  Object to encode.
-
-        Returns:
-            (dict)
-        """
-        if isinstance(o, _np.ndarray):
-            out = {'__ndarray__': True,
-                   '__dtype__': o.dtype.str,
-                   'data': o.astype('float64').tolist()}
-            return out
-        return _json.JSONEncoder.default(self, o)
-
-
-def decode_array(json_data: dict) -> typing.Any:
-    """Properly decodes numpy arrays from a JSON data stream.
-
-    This method need to be called on the return value of ``json.load`` or ``json.loads``.
-
-    Args:
-        json_data (dict)    JSON formatted dict to encode.
-
-    Returns:
-        (any)
-    """
-    if '__ndarray__' in json_data and '__dtype__' in json_data:
-        return _np.array(json_data['data'], dtype=json_data['__dtype__'])
-    return json_data
-
-
-class PoissonHmmEncoder(ArrayEncoder):
-    """JSON encoder for PoissonHmm.
-    """
-    def default(self, o):
-        """Custon default JSON encoder. Properly handles <class 'PoissonHMM'>.
-
-        Note: Falls back to ``ArrayEncoder`` for all types that do not implement
-        a ``to_dict()`` method.
-
-        Params:
-            o (any)  Object to encode.
-
-        Returns:
-            (dict)
-        """
-        if isinstance(o, HMM):
-            items = {}
-            for attr in o.__slots__:
-                try:
-                    items[attr] = getattr(o, attr).to_dict()
-                except AttributeError:
-                    items[attr] = getattr(o, attr)
-            return items
-        return ArrayEncoder.default(self, o)
-
-
-def dump_json(obj, path: _types.PathType = None) -> None:
-    """Write ``obj`` to JSON.
-
-    This function can handel numpy arrays.
-
-    If ``path`` is None, this fucntion writes to stdout.  Otherwise, encoded
-    object is written to ``path``.
-
-    Args:
-        obj  (any)         Object to be encoded.
-        path (PathType)    Output file path.
-    """
-    if path is None:
-        print(_json.dumps(obj, cls=ArrayEncoder))
-    else:
-        path = _pathlib.Path(path)
-        with path.open('w') as json_file:
-            _json.dump(obj, json_file, cls=ArrayEncoder)
-
-
-class WavFileAccessControl:
-    """Control initialization and access to the ``file`` attribute of class:``AudioData``.
-
-    This assures that the path indeed points to a file, which has to be a .wav file. Otherwise
-    an error is raised. The path to the file is saved as absolute path and the attribute is
-    read-only.
-    """
-
-    def __init__(self):
-        """Hi there!"""
-        self.__attribute = {}
-
-    def __get__(self, obj, objtype):
-        return self.__attribute[obj]
-
-    def __set__(self, obj, file_name):
-        if obj not in self.__attribute.keys():
-            _path = _pathlib.Path(file_name).resolve()
-            if _path.exists():
-                if _path.is_file():
-                    if _path.suffix == '.wav':
-                        self.__attribute[obj] = _path
-                    else:
-                        raise IOError('`{}` is not a .wav file.'
-                                      .format(file_name))
-                else:
-                    raise IOError('`{}` is not a file.'.format(file_name))
-            else:
-                raise FileNotFoundError('`{}` does not exists.'
-                                        .format(file_name))
-        else:
-            raise AttributeError('File name cannot be changed.')
-
-    def __delete__(self, obj):
-        del self.__attribute[obj]
-
-
-@_contextmanager
-def array_print_opt(*args, **kwargs):
-    """Set print format for numpy arrays.
-
-    Thanks to unutbu:
-    https://stackoverflow.com/questions/2891790/how-to-pretty-print-a-numpy-array-without-
-    scientific-notation-and-with-given-pre
-    """
-    std_options = _np.get_printoptions()
-    _np.set_printoptions(*args, **kwargs)
-
-    try:
-        yield
-    finally:
-        _np.set_printoptions(**std_options)
-
-
-def load(path: _types.PathType) -> typing.Any:
-    """Load a pickled file.
-
-    Args:
-        path    (str) Path to file.
-
-    Returns:
-        (object) unpickled object
-    """
-    path = _pathlib.Path(path)
-    with path.open('rb') as file:
-        data = pickle.load(file)
-    return data
-
-def repath(current_path: _types.PathType, new_path: _types.PathType,
-           ext: str = None) -> _types.PathType:
-    """Change the path and keep the file name. Optinally change the extension, too.
-
-    Args:
-        current_path (str or Path)  The path to change.
-        new_path     (str or Path)  The new path.
-        ext          (str or None)  Change file extension if ``ext`` is not None.
-
-    Returns:
-        (pathlib.Path)
-    """
-    current_path = _pathlib.Path(current_path)
-    new_path = _pathlib.Path(new_path)
-    file_path = current_path.stem
-
-    if ext is not None:
-        if not ext.startswith('.'):
-            ext = '.' + ext
-        file_path.join(ext)
-
-    return new_path.joinpath(file_path)
-
-
-def save(data: typing.Any, path: _types.PathType):
-    """Pickles data to path.
-
-    Args:
-        data    (Any)         Pickleable object.
-        path    (str or Path) Path to safe the file.
-    """
-    path = _pathlib.Path(path)
-    with path.open('wb') as file:
-        pickle.dump(data, file)
diff --git a/apollon/onsets.py b/apollon/onsets.py
deleted file mode 100644
index 4bb9b195e70f7a64589c58f194e066915c850cb5..0000000000000000000000000000000000000000
--- a/apollon/onsets.py
+++ /dev/null
@@ -1,245 +0,0 @@
-# Licensed under the terms of the BSD-3-Clause license.
-# Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
-
-"""
-apollon/onsets.py -- Onset detection routines.
-
-Classes:
-    OnsetDetector           Base class for onset detection.
-    EntropyOnsetDetector    Onset detection based on phase pace entropy estimation.
-    FluxOnsetDetector       Onset detection based on spectral flux.
-
-Functions:
-    peak_picking            Identify local peaks in time series.
-    evaluate_onsets         Evaluation of onset detection results given ground truth.
-"""
-from typing import Dict, Tuple
-
-import numpy as _np
-import scipy.signal as _sps
-
-from . import fractal as _fractal
-from . import segment as _segment
-from . signal.spectral import stft as _stft
-from . signal.tools import trim_spectrogram as _trim_spectrogram
-from . types import Array as _Array
-
-
-class OnsetDetector:
-    """Onset detection base class.
-
-    Subclasses have to implement an __init__ method to take in custom arguments. It necessarily has to call
-    the base classes __init__ method. Additionally, subclasses have to implement a custom onset detection
-    function named _odf. This method should return an one-dimensional ndarray.
-    """
-    def __init__(self):
-        self.pp_params = {'pre_window': 10, 'post_window': 10, 'alpha': .1, 'delta': .1}
-        self.align = 'center'
-
-    def _odf(self, inp: _Array) -> _Array:
-        pass
-
-    def _detect(self):
-        """Detect local maxima of the onset detection function.
-
-        Returns:
-            Position of onset as index of the odf.
-        """
-        return peak_picking(self.odf, **self.pp_params)
-
-    def index(self) -> _Array:
-        """Compute onset index.
-
-        Onset values are centered within the detection window.
-
-        Returns:
-            Onset position in samples
-        """
-        left = self.peaks * self.hop_size
-
-        if self.align == 'left':
-            return left
-
-        if self.align == 'center':
-            return left + self.n_perseg // 2
-
-        if self.align == 'right':
-            return left + self.n_perseg
-
-        raise ValueError('Unknown alignment method `{}`.'.format(pp_params['align']))
-
-    def times(self, fps: int) -> _Array:
-        """Compute time code im ms for each onset give the sample rate.
-
-        Args:
-            fps: Sample rate.
-
-        Returns:
-            Time code of onsets.
-        """
-        return self.index() / fps
-
-
-class EntropyOnsetDetector(OnsetDetector):
-    """Detect onsets based on entropy maxima.
-
-    Args:
-        inp:        Audio signal.
-        delay:      Embedding delay.
-        m_dim:      Embedding dimension.
-        bins:       Boxes per axis.
-        n_perseg:   Length of segments in samples.
-        hop_size:   Displacement in samples.
-        smooth:     Smoothing filter length.
-    """
-    def __init__(self, inp: _Array, delay: int = 10, m_dims: int = 3, bins: int = 10,
-                 n_perseg: int = 1024, hop_size: int = 512, pp_params = None) -> None:
-        super().__init__()
-
-        self.delay = delay
-        self.m_dims = m_dims
-        self.bins = bins
-        self.n_perseg = n_perseg
-        self.hop_size = hop_size
-
-        if pp_params is not None:
-            self.pp_params = pp_params
-
-        self.odf = self._odf(inp)
-        self.peaks = self._detect()
-
-
-    def _odf(self, inp: _Array) -> _Array:
-        """Compute onset detection function as the information entropy of ```m_dims```-dimensional
-        delay embedding per segment.
-
-        Args:
-            inp:    Audio data.
-
-        Returns:
-            Onset detection function.
-        """
-        segments = _segment.by_samples(inp, self.n_perseg, self.hop_size)
-        odf = _np.empty(segments.shape[0])
-        for i, seg in enumerate(segments):
-            emb = _fractal.embedding(seg, self.delay, self.m_dims, mode='wrap')
-            odf[i] = _fractal.embedding_entropy(emb, self.bins)
-        return _np.maximum(odf, odf.mean())
-
-
-class FluxOnsetDetector(OnsetDetector):
-    """Onset detection based on spectral flux.
-
-    Args:
-        inp:
-        stft_params:    Parameters for the STFT
-        pp_params:      Peak picking paraneters
-    """
-
-    def __init__(self, inp: _Array, fps: int, window: str = 'hamming', n_perseg: int = 2048,
-            hop_size: int = 441, cutoff=(80, 10000), n_fft: int = None, pp_params = None):
-
-        super().__init__()
-
-        self.fps = fps
-        self.window = window
-        self.n_perseg = n_perseg
-        self.hop_size = hop_size
-
-        if n_fft is None:
-            self.n_fft = n_perseg
-        else:
-            self.n_fft = n_fft
-
-        self.cutoff = cutoff
-
-        if pp_params is not None:
-            self.pp_params = pp_params
-
-        self.odf = self._odf(inp)
-        self.peaks = self._detect()
-
-
-    def _odf(self, inp: _Array) -> _Array:
-        """Onset detection function based on spectral flux.
-
-        Args:
-            inp:    Audio data.
-
-        Returns:
-            Onset detection function.
-        """
-        spctrgrm = _stft(inp, self.fps, self.window, self.n_perseg, self.hop_size)
-        sb_flux, _ = _trim_spectrogram(spctrgrm.flux(subband=True), spctrgrm.frqs, *self.cutoff)
-        odf = sb_flux.sum(axis=0)
-        return _np.maximum(odf, odf.mean())
-
-    def params(self) -> dict:
-        _params = ('window', 'n_perseg', 'hop_size', 'n_fft', 'pp_params', 'align')
-        out = {param: getattr(self, param) for param in _params}
-        out['cutoff'] = {'lower': self.cutoff[0], 'upper': self.cutoff[1]}
-        return out
-
-
-def peak_picking(odf: _Array, post_window: int = 10, pre_window: int = 10, alpha: float = .1,
-                 delta: float=.1) -> _Array:
-    """Pick local maxima from a numerical time series.
-
-    Pick local maxima from the onset detection function `odf`, which is assumed
-    to be an one-dimensional array. Typically, `odf` is the Spectral Flux per
-    time step.
-
-    Params:
-        odf:         Onset detection function, e.g., Spectral Flux.
-        post_window: Window lenght to consider after now.
-        pre_window:  Window lenght to consider before now.
-        alpha:       Smoothing factor. Must be in ]0, 1[.
-        delta:       Difference to the mean.
-
-    Return:
-        Peak indices.
-    """
-    g = [0]
-    out = []
-
-    for n, val in enumerate(odf):
-
-        # set local window
-        idx = _np.arange(n-pre_window, n+post_window+1, 1)
-        window = _np.take(odf, idx, mode='clip')
-
-        cond1 = _np.all(val >= window)
-        cond2 = val >= (_np.mean(window) + delta)
-
-        foo = max(val, alpha*g[n] + (1-alpha)*val)
-        g.append(foo)
-        cond3 = val >= foo
-
-        if cond1 and cond2 and cond3:
-            out.append(n)
-
-    return _np.array(out)
-
-
-def evaluate_onsets(targets: Dict[str, _np.ndarray], estimates: Dict[str, _np.ndarray]) -> Tuple[float, float, float]:
-    """Evaluate onset detection performance.
-
-    This function uses the mir_eval package for evaluation.
-
-    Args:
-        targets:    Ground truth onset times, with dict keys being file names,
-                    and values being target onset time codes in ms.
-
-        estimates:  Estimated onsets times, with dictkeys being file names,
-                    and values being the estimated onset time codes in ms.
-
-    Return:
-        Precison, recall, f-measure.
-    """
-    out = []
-    for name, tvals in targets.items():
-        od_eval = _me.onset.evaluate(tvals, estimates[name])
-        out.append([i for i in od_eval.values()])
-
-    return _np.array(out)
diff --git a/apollon/segment.py b/apollon/segment.py
deleted file mode 100644
index b154fd3624437b3e45df001ca76181e2a22a7ced..0000000000000000000000000000000000000000
--- a/apollon/segment.py
+++ /dev/null
@@ -1,163 +0,0 @@
-# Licensed under the terms of the BSD-3-Clause license.
-# Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
-
-"""
-"""
-
-import numpy as _np
-
-from . signal.tools import zero_padding as _zero_padding
-from . types import Array as _Array
-
-
-def _by_samples(x: _Array, n_perseg: int) -> _Array:
-    """Split `x` into segments of lenght `n_perseg` samples.
-
-    This function automatically applies zero padding for inputs that cannot be
-    split evenly.
-
-    Args:
-        x        (np.ndarray)    One-dimensional input array.
-        n_perseg (int)           Length of segments in samples.
-
-    Returns:
-        (np.ndarray)    Two-dimensional array of segments.
-    """
-    if not isinstance(n_perseg, int):
-        raise TypeError('Param `n_perchunk` must be of type int.')
-
-    if n_perseg < 1:
-        raise ValueError('`n_perchunk` out of range. Expected 1 <= n_perchunk.')
-
-    fit_size = int(_np.ceil(x.size / n_perseg) * n_perseg)
-    n_ext = fit_size - x.size
-    x = _zero_padding(x, n_ext)
-
-    return x.reshape(-1, n_perseg)
-
-
-def _by_samples_with_hop(x: _Array, n_perseg: int, hop_size: int) -> _Array:
-    """Split `x` into segments of lenght `n_perseg` samples. Move the extraction
-    window `hop_size` samples.
-
-    This function automatically applies zero padding for inputs that cannot be
-    split evenly.
-
-    Args:
-        x        (np.ndarray)    One-dimensional input array.
-        n_perseg (int)           Length of segments in samples.
-        hop_size (int)           Hop size in samples
-
-    Returns:
-        (np.ndarray)    Two-dimensional array of segments.
-    """
-    if not (isinstance(n_perseg, int) and isinstance(hop_size, int)):
-        raise TypeError('Params must be of type int.')
-
-    if not 1 < n_perseg <= x.size:
-        raise ValueError('n_perseg out of range. Expected 1 < n_perseg <= len(x).')
-
-    if hop_size < 1:
-        raise ValueError('hop_size out of range. Expected 1 < hop_size.')
-
-    n_hops = (x.size - n_perseg) // hop_size + 1
-    n_segs = n_hops
-
-    if (x.size - n_perseg) % hop_size != 0:
-        n_segs += 1
-
-    fit_size = hop_size * n_hops + n_perseg
-    n_ext = fit_size - x.size
-    x = _zero_padding(x, n_ext)
-
-    out = _np.empty((n_segs, n_perseg), dtype=x.dtype)
-    for i in range(n_segs):
-        off = i * hop_size
-        out[i] = x[off:off+n_perseg]
-
-    return out
-
-
-def by_samples(x: _Array, n_perseg: int, hop_size: int = 0) -> _Array:
-    """Segment the input into n segments of length n_perseg and move the
-    window `hop_size` samples.
-
-    This function automatically applies zero padding for inputs that cannot be
-    split evenly.
-
-    If `hop_size` is less than one, it is reset to `n_perseg`.
-
-    Overlap in percent is calculated as ov = hop_size / n_perseg * 100.
-
-    Args:
-        x           One-dimensional input array.
-        n_perseg    Length of segments in samples.
-        hop_size    Hop size in samples. If < 1, hop_size = n_perseg.
-
-    Returns:
-        (np.ndarray)    Two-dimensional array of segments.
-    """
-    if hop_size < 1:
-        return _by_samples(x, n_perseg)
-    else:
-        return _by_samples_with_hop(x, n_perseg, hop_size)
-
-
-def by_ms(x: _Array, fs: int, ms_perseg: int, hop_size: int = 0) -> _Array:
-    """Segment the input into n segments of length ms_perseg and move the
-    window `hop_size` milliseconds.
-
-    This function automatically applies zero padding for inputs that cannot be
-    split evenly.
-
-    If `hop_size` is less than one, it is reset to `n_perseg`.
-
-    Overlap in percent is calculated as ov = hop_size / n_perseg * 100.
-
-    Args:
-        x           One-dimensional input array.
-        fs          Sampling frequency.
-        n_perseg    Length of segments in milliseconds.
-        hop_size    Hop size in milliseconds. If < 1, hop_size = n_perseg.
-
-    Returns:
-        (np.ndarray)    Two-dimensional array of segments.
-        """
-    n_perseg = fs * ms_perseg // 1000
-    hop_size = fs * hop_size // 1000
-
-    return by_samples(x, n_perseg, hop_size)
-
-
-def by_onsets(x: _Array, n_perseg: int, ons_idx: _Array, off: int = 0) -> _Array:
-    """Split input `x` into len(ons_idx) segments of length `n_perseg`.
-
-    Extraction windos start at `ons_idx[i]` + `off`.
-
-    Args:
-        x        (np.ndarray)    One-dimensional input array.
-        n_perseg (int)           Length of segments in samples.
-        ons_idx  (np.ndarray)    One-dimensional array of onset positions.
-        off      (int)           Length of offset.
-
-    Returns:
-        (np.ndarray)    Two-dimensional array of shape (len(ons_idx), n_perseg).
-    """
-    n_ons = ons_idx.size
-    out = _np.empty((n_ons, n_perseg))
-
-    for i, idx in enumerate(ons_idx):
-        pos = idx + off
-        if pos < 0:
-            pos = 0
-        elif pos >= x.size:
-            pos = x.size - 1
-
-        if pos + n_perseg >= x.size:
-            buff = x[pos:]
-            out[i] = _zero_padding(buff, n_perseg-buff.size)
-        else:
-            out[i] = x[pos:pos+n_perseg]
-
-    return out
diff --git a/apollon/signal/features.py b/apollon/signal/features.py
deleted file mode 100644
index 26ff5b0dd9a06f892c396b04ba11811834e9bcb0..0000000000000000000000000000000000000000
--- a/apollon/signal/features.py
+++ /dev/null
@@ -1,310 +0,0 @@
-# Licensed under the terms of the BSD-3-Clause license.
-# Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
-
-"""
-apollon/signal/features.py -- Feature extraction routines
-"""
-
-import json as _json
-import csv as _csv
-import sys as _sys
-from typing import Any, Dict, List, Optional, Tuple
-
-import numpy as _np
-from scipy.signal import hilbert as _hilbert
-
-from .. import segment as _segment
-from .. tools import array2d_fsum
-from .. types import Array as _Array
-from .. io import ArrayEncoder
-from .  import critical_bands as _cb
-from .  roughness import roughness
-from .  tools import trim_spectrogram
-
-
-class FeatureSpace:
-    """Container class for feature vectors."""
-    def __init__(self, **kwargs):
-        for key, val in kwargs.items():
-            if isinstance(val, dict):
-                val = FeatureSpace(**val)
-            self.update(key, val)
-
-    def update(self, key: str, val: Any) -> None:
-        """Update the FeatureSpace.
-
-        Args:
-            key:  Field name.
-            val:  Field value.
-        """
-        self.__dict__[key] = val
-
-    def items(self) -> List[Tuple[str, Any]]:
-        """Provides the the FeatureSpace's items.
-
-        Returns:
-            List of (key, value) pairs.
-        """
-        return list(self.__dict__.items())
-
-    def keys(self) -> List[str]:
-        """Provides the FeatureSpaces's keys.
-
-        Returns:
-            List of keys.
-        """
-        return list(self.__dict__.keys())
-
-    def values(self) -> List[Any]:
-        """Provides the FeatureSpace's values.
-
-        Returns:
-            List of values.
-        """
-        return list(self.__dict__.values())
-
-    def as_dict(self) -> Dict[str, Any]:
-        """Returns the FeatureSpace converted to a dict."""
-        flat_dict = {}
-        for key, val in self.__dict__.items():
-            try:
-                flat_dict[key] = val.as_dict()
-            except AttributeError:
-                flat_dict[key] = val
-        return flat_dict
-
-    def to_csv(self, path: str = None) -> None:
-        """Write FeatureSpace to csv file.
-
-        If ``path`` is ``None``, comma separated values are written stdout.
-
-        Args:
-            path:  Output file path.
-
-        Returns:
-            FeatureSpace as csv-formatted string if ``path`` is ``None``,
-            else ``None``.
-        """
-        features = {}
-        for name, space in self.items():
-            try:
-                features.update({feat: val for feat, val in space.items()})
-            except AttributeError:
-                features.update({name: space})
-
-        field_names = ['']
-        field_names.extend(features.keys())
-
-        if path is None:
-            csv_writer = _csv.DictWriter(_sys.stdout, delimiter=',', fieldnames=field_names)
-            self._write(csv_writer, features)
-        else:
-            with open(path, 'w', newline='') as csv_file:
-                csv_writer = _csv.DictWriter(csv_file, delimiter=',', fieldnames=field_names)
-                self._write(csv_writer, features)
-
-    @staticmethod
-    def _write(csv_writer, features):
-        csv_writer.writeheader()
-
-        i = 0
-        while True:
-            try:
-                row = {key: val[i] for key, val in features.items()}
-                row[''] = i
-                csv_writer.writerow(row)
-                i += 1
-            except IndexError:
-                break
-
-    def to_json(self, path: str = None) -> Optional[str]:
-        """Convert FeaturesSpace to JSON.
-
-        If ``path`` is ``None``, this method returns of the data of the
-        ``FeatureSpace`` in JSON format. Otherwise, data is written to
-        ``path``.
-
-        Args:
-            path:  Output file path.
-
-        Returns:
-            FeatureSpace as JSON-formatted string if path is not ``None``,
-            else ``None``.
-        """
-        if path is None:
-            return _json.dumps(self.as_dict(), cls=ArrayEncoder)
-
-        with open(path, 'w') as json_file:
-            _json.dump(self.as_dict(), json_file, cls=ArrayEncoder)
-
-        return None
-
-
-def spectral_centroid(inp: _Array, frqs: _Array) -> _Array:
-    """Estimate the spectral centroid frequency.
-
-    Calculation is applied to the second axis. One-dimensional
-    arrays will be promoted.
-
-    Args:
-        inp:   Input data. Each row is assumend FFT bins scaled by `freqs`.
-        frqs:  One-dimensional array of FFT frequencies.
-
-    Returns:
-        Array of Spectral centroid frequencies.
-    """
-    inp = _np.atleast_2d(inp).astype('float64')
-
-    weighted_nrgy = _np.multiply(inp, frqs).sum(axis=1).squeeze()
-    total_nrgy = inp.sum(axis=1).squeeze()
-    total_nrgy[total_nrgy == 0.0] = 1.0
-
-    return _np.divide(weighted_nrgy, total_nrgy)
-
-
-def spectral_flux(inp: _Array, delta: float = 1.0) -> _Array:
-    """Estimate the spectral flux
-
-    Args:
-        inp:    Input data. Each row is assumend FFT bins.
-        delta:  Sample spacing.
-
-    Returns:
-        Array of Spectral flux.
-    """
-    inp = _np.atleast_2d(inp).astype('float64')
-    return _np.maximum(_np.gradient(inp, delta, axis=-1), 0).squeeze()
-
-
-def spectral_shape(inp: _Array, frqs: _Array, cf_low: float = 50,
-                   cf_high: float = 16000) -> FeatureSpace:
-    """Compute low-level spectral shape descriptors.
-
-    This function computes the first four central moments of
-    the input spectrum. If input is two-dimensional, the first
-    axis is assumed to represent frequency.
-
-    The central moments are:
-        - Spectral Centroid (SC)
-        - Spectral Spread (SSP),
-        - Spectral Skewness (SSK)
-        - Spectral Kurtosis (SKU).
-
-    Spectral Centroid represents the center of gravity of the spectrum.
-    It correlates well with the perception of auditory brightness.
-
-    Spectral Spread is a measure for the frequency deviation around the
-    centroid.
-
-    Spectral Skewness is a measure of spectral symmetry. For values of
-    SSK = 0 the spectral distribution is exactly symmetric. SSK > 0 indicates
-    more power in the frequency domain below the centroid and vice versa.
-
-    Spectral Kurtosis is a measure of flatness. The lower the value, the faltter
-    the distribution.
-
-    Args:
-        inp:      Input spectrum or spectrogram.
-        frqs:     Frequency axis.
-        cf_low:   Lower cutoff frequency.
-        cf_high:  Upper cutoff frequency.
-
-    Returns:
-        Spectral centroid, spread, skewness, and kurtosis.
-    """
-    if inp.ndim < 2:
-        inp = inp[:, None]
-
-    vals, frqs = trim_spectrogram(inp, frqs, cf_low, cf_high)
-
-    total_nrgy = array2d_fsum(vals)
-    total_nrgy[total_nrgy == 0.0] = 1.0    # Total energy is zero iff input signal is all zero.
-                                           # Replace these bin values with 1, so division by
-                                           # total energy will not lead to nans.
-
-    centroid = frqs @ vals / total_nrgy
-    deviation = frqs[:, None] - centroid
-
-    spread = array2d_fsum(_np.power(deviation, 2) * vals)
-    skew = array2d_fsum(_np.power(deviation, 3) * vals)
-    kurt = array2d_fsum(_np.power(deviation, 4) * vals)
-
-    spread = _np.sqrt(spread / total_nrgy)
-    skew = skew / total_nrgy / _np.power(spread, 3)
-    kurt = kurt / total_nrgy / _np.power(spread, 4)
-
-    return FeatureSpace(centroid=centroid, spread=spread, skewness=skew, kurtosis=kurt)
-
-
-def log_attack_time(inp: _Array, fps: int, ons_idx: _Array,
-                    wlen: float = 0.05) -> _Array:
-    """Estimate the attack time of each onset and return its logarithm.
-
-    This function estimates the attack time as the duration between the
-    onset and the local maxima of the magnitude of the Hilbert transform
-    of the local window.
-
-    Args:
-        inp:      Input signal.
-        fps:      Sampling frequency.
-        ons_idx:  Sample indices of onsets.
-        wlen:     Local window length in samples.
-
-    Returns:
-        Logarithm of the attack time.
-    """
-    wlen = int(fps * wlen)
-    segs = _segment.by_onsets(inp, wlen, ons_idx)
-    attack_time = _np.absolute(_hilbert(segs)).argmax(axis=1) / fps
-    attack_time[attack_time == 0.0] = 1.0
-    return _np.log(attack_time)
-
-
-def perceptual_shape(inp: _Array, frqs: _Array) -> FeatureSpace:
-    """Extracts psychoacoustical features from the spectrum.
-
-    Returns:
-        Loudness, roughness, and sharpness.
-    """
-    if inp.ndim < 2:
-        inp = inp[:, None]
-
-    cbrs = _cb.filter_bank(frqs) @ inp
-    loud_specific = _np.maximum(_cb.specific_loudness(cbrs),
-                                _np.finfo('float64').eps)
-    loud_total = array2d_fsum(loud_specific, axis=0)
-
-
-    zfn = _np.arange(1, 25)
-    sharp = ((zfn * _cb.weight_factor(zfn)) @ cbrs) / loud_total
-    rough = roughness(inp, frqs)
-
-    return FeatureSpace(loudness=loud_total, sharpness=sharp, roughness=rough)
-
-
-def loudness(inp: _Array, frqs: _Array) -> _Array:
-    """Calculate a measure for the perceived loudness from a spectrogram.
-
-    Args:
-        inp:  Magnitude spectrogram.
-
-    Returns:
-        Loudness
-    """
-    cbrs = _cb.filter_bank(frqs) @ inp
-    return _cb.total_loudness(cbrs)
-
-
-def sharpness(inp: _Array, frqs: _Array) -> _Array:
-    """Calculate a measure for the perception of auditory sharpness from a spectrogram.
-
-    Args:
-        inp:   Two-dimensional input array. Assumed to be an magnitude spectrogram.
-        frqs:  Frequency axis of the spectrogram.
-
-    Returns:
-        Sharpness for each time instant of the spectragram.
-    """
-    cbrs = _cb.filter_bank(frqs) @ inp
-    return _cb.sharpness(cbrs)
diff --git a/apollon/signal/makefile b/apollon/signal/makefile
deleted file mode 100644
index e12f08a39525b5f4c7ebf56b33fe2040ee6554a6..0000000000000000000000000000000000000000
--- a/apollon/signal/makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-FLAGS=-fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -arch x86_64
-WARNINGS=-Wno-unused-result -Wsign-compare -Wunreachable-code -Wall
-INCLUDE_PYTHON=-I/Library/Frameworks/Python.framework/Versions/3.7/include/python3.7m/
-INCLUDE_NUMPY=-I/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/numpy/core/include/
-
-pf: psycho_features_module.c
-	gcc -c $(FLAGS) $(WARNINGS) $(INCLUDE_PYTHON) $(INCLUDE_NUMPY) psycho_features_module.c -o psycho_features.so
diff --git a/apollon/signal/roughness.c b/apollon/signal/roughness.c
deleted file mode 100644
index e828ae88189dfbd4d8d97e5de7c5cd4005bc0d22..0000000000000000000000000000000000000000
--- a/apollon/signal/roughness.c
+++ /dev/null
@@ -1,113 +0,0 @@
-#define NPY_NO_DEPRECATED_API NPY_1_8_API_VERSION
-
-#include <Python.h>
-#include <numpy/arrayobject.h>
-#include <math.h>
-
-
-double _roughness (double d_frq, double amp_1, double amp_2);
-
-
-static PyObject *
-ext_roughness (PyObject *self, PyObject *args)
-{
-    PyObject *py_spectrogram = NULL;
-    PyObject *py_frqs        = NULL;
-
-    PyArrayObject *spctrgrm = NULL;
-    PyArrayObject *frqs     = NULL;
-    PyArrayObject *rghnss   = NULL;
-    npy_intp      *shape    = NULL;
-    size_t         n_times  = 0;
-    size_t         n_frqs   = 0;
-
-    if (!PyArg_ParseTuple (args, "OO", &py_spectrogram, &py_frqs))
-    {
-        return NULL;
-    }
-
-    spctrgrm = (PyArrayObject *) PyArray_FROM_OTF (py_spectrogram, NPY_DOUBLE, NPY_ARRAY_IN_ARRAY);
-    frqs     = (PyArrayObject *) PyArray_FROM_OTF (py_frqs,        NPY_DOUBLE, NPY_ARRAY_IN_ARRAY);
-
-    if (spctrgrm == NULL)
-    {
-        PyErr_SetString (PyExc_MemoryError, "Could not convert spectrogram buffer.\n");
-        Py_RETURN_NONE;
-    }
-
-    if (frqs == NULL)
-    {
-        PyErr_SetString (PyExc_MemoryError, "Could not convert frquency buffer.\n");
-        Py_RETURN_NONE;
-    }
-
-    shape   = PyArray_SHAPE (spctrgrm);
-    n_frqs  = shape[0];
-    n_times = shape[1];
-    rghnss  = (PyArrayObject *) PyArray_ZEROS (1, (npy_intp *) &n_times, NPY_DOUBLE, 0);
-
-    if (rghnss == NULL)
-    {
-        PyErr_SetString (PyExc_MemoryError, "Could not allocate spectrogram buffer.\n");
-        Py_RETURN_NONE;
-    }
-
-    double *amp_data = PyArray_DATA (spctrgrm);
-    double *frq_data = PyArray_DATA (frqs);
-    double *r_data   = PyArray_DATA (rghnss);
-
-    for (size_t t = 0; t < n_times; t++)
-    {
-        r_data[t] = 0.0f;
-
-        for (size_t i = 0; i < n_frqs - 1; i++)
-        {
-            for (size_t j = i+1; j < n_frqs; j++)
-            {
-                double d_frq = fabs (frq_data[i] - frq_data[j]);
-                if (d_frq >= 300.0f)
-                {
-                    break;
-                }
-                // printf ("t: %zu/%zu\t i: %zu/%zu\t j: %zu/%zut a1: %zu/%zu\t a2: %zu/%zu\t\n", t, n_times, i, n_frqs-1, j, n_frqs, i*n_times+t, n_times*n_frqs, j*n_times+t, n_times*n_frqs);
-                // printf ("f1: %f, f2: %f, a1: %f, a2: %f\n", frq_data[i], frq_data[j], amp_data[i*n_times+t], amp_data[j*n_times+t]);
-                r_data[t] += _roughness (d_frq, amp_data[i*n_times+t], amp_data[j*n_times+t]);
-            }
-        }
-    }
-
-    Py_INCREF (rghnss);
-    return (PyObject *) rghnss;
-}
-
-
-double _roughness (double d_frq, double amp_1, double amp_2)
-{
-    double frq_rmax = 33.0f;
-    return amp_1 * amp_2 * (d_frq / (frq_rmax * exp (-1.0))) * exp (-d_frq/frq_rmax);
-}
-
-
-static PyMethodDef
-PF_Methods[] = {
-    {"roughness", ext_roughness, METH_VARARGS,
-     "roughness(spctrgrm, frqs)"},
-
-    {NULL, NULL, 0, NULL}
-};
-
-static struct PyModuleDef
-roughness_module = {
-    PyModuleDef_HEAD_INIT,
-    "roughness",
-    NULL,
-    -1,
-    PF_Methods
-};
-
-PyMODINIT_FUNC
-PyInit_roughness (void)
-{
-    import_array ();
-    return PyModule_Create (&roughness_module);
-}
diff --git a/apollon/signal/spectral.py b/apollon/signal/spectral.py
deleted file mode 100644
index a1d01a4189a09ea3726ed3921204e2a4cbd60d2a..0000000000000000000000000000000000000000
--- a/apollon/signal/spectral.py
+++ /dev/null
@@ -1,360 +0,0 @@
-# Licensed under the terms of the BSD-3-Clause license.
-# Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
-
-"""spectral.py    (c) Michael Blaß 2016
-
-Provide easy access to frequency spectra obtained by the DFT.
-
-Classes:
-    _Spectrum_Base      Utility class
-    _Spectrum           Representation of a frequency spectrum
-
-Functions:
-    fft                 Easy to use discrete fourier transform
-"""
-import json as _json
-import matplotlib.pyplot as _plt
-import numpy as _np
-from scipy.signal import get_window as _get_window
-
-from . import features as _features
-from . import tools as _tools
-from .. types import Array as _Array
-
-
-class _Spectrum_Base:
-    def __abs__(self):
-        return _np.absolute(self.bins)
-
-    def __add__(self, other):
-        if isinstance(other, _Spectrum):
-            if self.sr == other.sr and self.n == other.n:
-                return _Spectrum(self.bins + other.bins, sr=self.sr,
-                                 n=self.n, window=self.window)
-            else:
-                raise ValueError('Spectra not compatible.')
-        else:
-            return _Spectrum(self.bins + other, sr=self.sr,
-                             n=self.n, window=self.window)
-
-    def __radd__(self, other):
-        if isinstance(other, _Spectrum):
-            if self.sr == other.sr and self.n == other.n:
-                return _Spectrum(self.bins + other.bins, sr=self.sr,
-                                 n=self.n, window=self.window)
-            else:
-                raise ValueError('Spectra not compatible.')
-        else:
-            return _Spectrum(self.bins + other, sr=self.sr,
-                             n=self.n, window=self.window)
-
-    def __sub__(self, other):
-        if isinstance(other, _Spectrum):
-            if self.sr == other.sr and self.n == other.n:
-                return _Spectrum(self.bins - other.bins, sr=self.sr,
-                                 n=self.n, window=self.window)
-            else:
-                raise ValueError('Spectra not compatible.')
-        else:
-            return _Spectrum(self.bins - other, sr=self.sr,
-                             n=self.n, window=self.window)
-
-    def __rsub__(self, other):
-        if isinstance(other, _Spectrum):
-            if self.sr == other.sr and self.n == other.n:
-                return_Spectrum(self.bins - other.bins, sr=self.sr,
-                                n=self.n, window=self.window)
-            else:
-                raise ValueError('Spectra not compatible.')
-        else:
-            return _Spectrum(self.bins - other, sr=self.sr,
-                             n=self.n, window=self.window)
-
-    def __mul__(self, other):
-        if isinstance(other, _Spectrum):
-            if self.sr == other.sr and self.n == other.n:
-                return _Spectrum(self.bins * other.bins, sr=self.sr,
-                                 n=self.n, window=self.window)
-            else:
-                raise ValueError('Spectra not compatible.')
-        else:
-            return _Spectrum(self.bins * other, sr=self.sr,
-                             n=self.n, window=self.window)
-
-    def __rmul__(self, other):
-        if isinstance(other, _Spectrum):
-            if self.sr == other.sr and self.n == other.n:
-                return _Spectrum(self.bins * other.bins, sr=self.sr,
-                                 n=self.n, window=self.window)
-            else:
-                raise ValueError('Spectra not compatible.')
-        else:
-            return _Spectrum(self.bins * other, sr=self.sr,
-                             n=self.n, window=self.window)
-
-
-
-def fft(sig, window=None, n_fft=None):
-    """Return the Discrete Fourier Transform for real input.
-
-    Params:
-        sig    (array-like)    Input time domain signal
-        n_fft  (int)           FFT length
-        window (str)           Name of window function
-
-    Returns:
-        (ndarray) FFT bins.
-
-    Raises:
-        AttributeError
-    """
-    sig = _np.atleast_2d(sig).astype('float64')
-    n_sig = sig.shape[-1]
-
-    if n_fft is None:
-        n_fft = n_sig
-
-    if window is not None:
-        try:
-            win_func = getattr(_np, window)
-        except AttributeError:
-            raise AttributeError('Unknown window function `{}`.'.format(window))
-        sig = _np.multiply(sig, win_func(n_sig))
-
-    bins = _np.fft.rfft(sig, n_fft)
-    bins = _np.divide(bins, float(n_fft))
-
-    if n_fft % 2 != 0:
-        bins = _np.multiply(bins[:, :-1], 2.0)
-    else:
-        bins = _np.multiply(bins, 2.0)
-
-    return bins.squeeze()
-
-
-class Spectrum(_Spectrum_Base):
-
-    def __init__(self, inp: _Array, fps: int, n_fft: int = None,
-                 window: str = None):
-
-        self.fps = fps
-        self.n_fft = inp.shape[-1] if n_fft is None else n_fft
-        self.window = window
-        self.bins = fft(inp, self.window, self.n_fft)
-        self.frqs = _np.fft.rfftfreq(self.n_fft, 1.0/self.fps)
-
-    def __getitem__(self, key):
-        return self.bins[key]
-
-    def __len__(self):
-        return self.length
-
-    def __repr__(self):
-        return ('Spectrum(fps={}, n_fft={}, window={})'
-                .format(self.bins, self.fps, self.n_fft, self.window))
-
-    def params(self) -> dict:
-        return {'fps': self.fps, 'n_fft': self.n_fft, 'window': self.window}
-
-    def centroid(self, power=True):
-        if power is True:
-            inp = self.power()
-        else:
-            inp = self.abs()
-        return _features.spectral_centroid(inp, self.frqs)
-
-    def extract(self):
-        spctr = _features.spectral_shape(self.power().T, self.frqs)
-        prcpt = _features.perceptual_shape(self.abs().T, self.frqs)
-        return _features.FeatureSpace(spectral=spctr, perceptual=prcpt)
-
-    def __abs__(self):
-        return _np.absolute(self.bins)
-
-    def abs(self):
-        """Return magnitude spectrum."""
-        return self.__abs__()
-
-    def power(self):
-        """Retrun power spectrum."""
-        return _np.square(self.__abs__())
-
-    def phase(self):
-        """Return phase spectrum."""
-        return _np.angle(self.bins)
-
-
-class Spectrogram:
-    """Compute a spectrogram from an one-dimensional input signal."""
-
-    # pylint: disable=too-many-instance-attributes, too-many-arguments
-
-    __slots__ = ('inp_size', 'fps', 'window', 'n_perseg', 'hop_size', 'n_overlap', 'n_fft',
-                 'd_frq', 'd_time', 'times', 'frqs', 'bins', 'shape')
-
-    def __init__(self, inp: _Array, fps: int, window: str, n_perseg: int, hop_size: int,
-                 n_fft: int = None) -> None:
-        """Compute a spectrogram of the input data.
-
-        The input signal is segmented according to `n_perseg` and `hop_size`. To each
-        segment FFT for real input is applied.
-
-        If the segmentation parameters do not match the shape of the input array, the
-        array is cropped.
-
-        Args:
-            inp      (ndarray)    Input signal.
-            fps      (int)        Sampling frequency of input signal.
-            window   (str)        Name of window function.
-            n_perseg (int)        Number of samples per DFT.
-            hop_size (int)        Number of samples to shift the window.
-            n_fft    (int)        Number of FFT bins.
-        """
-        self.inp_size = inp.size
-        self.fps = fps
-        self.window = window
-        self.n_perseg = n_perseg
-        self.hop_size = hop_size
-        self.n_overlap = self.n_perseg - self.hop_size
-
-        if n_fft is None:
-            self.n_fft = self.n_perseg
-        else:
-            self.n_fft = n_fft
-
-        self.d_frq = self.fps / self.n_fft
-        self.d_time = self.hop_size / self.fps
-
-        self.times = self._compute_time_axis(inp)
-        self.frqs = _np.fft.rfftfreq(self.n_fft, 1.0/self.fps)
-        self.bins = self._compute_spectrogram(inp)
-
-
-    def _compute_time_axis(self, inp: _Array) -> _Array:
-        """Compute the time axis of the spectrogram"""
-        t_start = self.n_perseg / 2
-        t_stop = inp.size - self.n_perseg / 2 + 1
-        return _np.arange(t_start, t_stop, self.hop_size) / float(self.fps)
-
-    def _compute_spectrogram(self, inp: _Array) -> _Array:
-        """Core spectrogram computation.
-
-        Args:
-            inp (ndarray)    Input signal.
-        """
-        shp_x = (self.inp_size - self.n_overlap) // self.hop_size
-        shp_y = self.n_perseg
-
-        strd_x = self.hop_size * inp.strides[0]
-        strd_y = inp.strides[0]
-
-        inp_strided = _np.lib.stride_tricks.as_strided(inp, (shp_x, shp_y), (strd_x, strd_y))
-
-        return _np.transpose(fft(inp_strided, self.window, self.n_fft))
-
-    def abs(self):
-        """Return the magnitude spectrogram."""
-        return self.__abs__()
-
-    def power(self):
-        """Return the power spectrogram."""
-        return _np.square(self.__abs__())
-
-    def centroid(self, power=True):
-        if power is True:
-            inp = self.power()
-        else:
-            inp = self.abs()
-
-        return _features.spectral_centroid(inp.T, self.frqs)
-
-    def flux(self, subband=False):
-        flux = _features.spectral_flux(self.abs(), self.times)
-        if subband is True:
-            return flux
-        return flux.sum(axis=0)
-
-    def extract(self):
-        spctr = _features.spectral_shape(self.power(), self.frqs)
-        prcpt = _features.perceptual_shape(self.abs(), self.frqs)
-        tmpr = _features.FeatureSpace(flux=self.flux())
-        return _features.FeatureSpace(spectral=spctr, perceptual=prcpt, temporal=tmpr)
-
-    def params(self):
-        return {'window': self.window, 'n_perseg': self.n_perseg,
-                'hop_size': self.hop_size, 'n_fft': self.n_fft}
-
-    def plot(self, cmap: str = 'nipy_spectral', log_frq: float = None,
-             low: float = None, high: float = None, figsize: tuple = (14, 6),
-             cbar: bool = True ) -> tuple:
-        """Plot the spectrogram in dB scaling. The 0-frequency component
-        is ommitted in plots.
-
-        Args:
-            cmap    (str)      Colormarp name.
-            log_frq (float)    If None, plot the frequency axis linearly, else
-                               plot it in log domain, centered on `log_frq` Hz.
-            cbar    (bool)     Display a color scale if True.
-            figsize (tuple)    Width and height of figure.
-
-        Returns:
-            Tuple    (fig, ax)
-        """
-        fig, ax = _plt.subplots(1, figsize=figsize)
-        ax.set_xlabel('Time [s]')
-        ax.set_ylabel('Frequency [Hz]')
-
-        if low is None:
-            low = 50
-
-        if high is None:
-            high = 16000
-
-        low_idx = int(_np.floor(low/self.d_frq)) + 1
-        high_idx = int(_np.floor(high/self.d_frq))
-
-        vals = _tools.amp2db(self.abs()[low_idx:high_idx, :])
-        frq_range = self.frqs[low_idx:high_idx]
-        cmesh_frqs = _np.append(frq_range, frq_range[-1]+self.d_frq)
-        if log_frq is not None:
-            cmesh_frqs = _np.log2(cmesh_frqs/log_frq)
-
-        cmesh_times = _np.append(self.times, self.times[-1]+self.d_time)
-        cmesh = ax.pcolormesh(cmesh_times, cmesh_frqs, vals, cmap=cmap)
-
-        if cbar:
-            clr_bar = fig.colorbar(cmesh, ax=ax)
-            clr_bar.set_label('db SPL')
-
-        return fig, ax
-
-    def __abs__(self):
-        return _np.absolute(self.bins)
-
-
-def stft(inp: _Array, fps: int, window: str = 'hanning', n_perseg: int = 512, hop_size: int = None,
-         n_fft: int = None) -> Spectrogram:
-    """Perform Short Time Fourier Transformation of `inp`
-
-    `inp` is assumed to be an one-dimensional array of real values.
-
-    Args:
-        inp      (ndarray)    Input signal.
-        fps      (int)        Sampling frequency of input signal.
-        window   (str)        Name of window function.
-        n_perseg (int)        Number of samples per DFT.
-        hop_size (int)        Number of samples to shift the window.
-        n_fft    (int)        Number of FFT bins.
-
-    Returns:
-        (Spectrogram)
-    """
-
-    # pylint: disable=too-many-arguments
-
-    if hop_size is None:
-        hop_size = n_perseg // 2
-
-    return Spectrogram(inp, fps, window, n_perseg, hop_size, n_fft)
-
diff --git a/apollon/signal/tools.py b/apollon/signal/tools.py
deleted file mode 100644
index d12725a1875a806edaefb0922d3922883b607008..0000000000000000000000000000000000000000
--- a/apollon/signal/tools.py
+++ /dev/null
@@ -1,250 +0,0 @@
-# Licensed under the terms of the BSD-3-Clause license.
-# Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
-
-"""apollon/signal/tools.py    (c) Michael Blaß 2016
-
-Signal processing tools
-
-Functions:
-    acf                 Normalized autocorrelation.
-    acf_pearson         Normalized Pearson acf.
-    amp2db              Transform amplitude to dB.
-    corr_coef_pearson   Correlation coefficient after Pearson.
-    freq2mel            Transform frequency to mel.
-    mel2freq            Transform mel to frequency.
-    frq2bark            Transform frequency to Bark scale.
-    maxamp              Maximal amplitude of signal.
-    minamp              Minimal amplitude of signal.
-    normalize           Scale data betwee -1.0 and 1.0.
-    noise               Generate withe noise.
-    sinusoid            Generate sinusoidal signal.
-    zero_padding        Append array with zeros.
-    trim_spectrogram    Trim spectrogram to a frequency range.
-"""
-
-
-import numpy as _np
-from scipy import stats as _stats
-
-from .. types import Array as _Array
-
-def acf(inp_sig):
-    """Normalized estimate of the autocorrelation function of `inp_sig`
-       by means of cross correlation."""
-
-    N = len(inp_sig)
-    norm = inp_sig @ inp_sig
-
-    out = _np.empty(N)
-    out[0] = 1
-    for m in range(1, N):
-        a = inp_sig[:-m]
-        b = inp_sig[m:]
-        s = a @ b
-        if s == 0:
-            out[m] = 0
-        else:
-            out[m] = s / norm
-
-    return out
-
-
-def acf_pearson(inp_sig):
-    """Normalized estimate of the autocorrelation function of `inp_sig`
-       by means of pearson correlation coefficient."""
-
-    N = len(inp_sig)
-    out = _np.empty(N-1)
-
-    out[0] = 1
-    for m in range(1, N-1):
-
-        a = inp_sig[:-m]
-        b = inp_sig[m:]
-
-        s = corr_coef_pearson(a, b)
-
-        if s == 0:
-            out[m] = 0
-        else:
-            out[m] = s
-
-    return out
-
-
-def amp2db(amp, ref:float = 20e-6) -> _Array:
-    """Transform amplitude to dB.
-
-    Return a copy of `amp` in dB scaling regarding a reference pressure `ref`.
-    The reference pressure is commonly the human hearing treshold at
-    20 micro Pa.
-
-    `amp` is supposed to be a inon-negative scalar or numpy.array taken from a
-    magnitude spectrum.
-
-    This function set all values of `amp` smaller then `ref` to `ref`, hence
-    eliminating inaudible singnal energy in the log domain.
-
-    Params:
-        amp    (array-like or number) Given amplitude values.
-
-    Return:
-        (ndarray)    values in dB.
-    """
-    return 20 * _np.log10(_np.maximum(amp, ref) / ref)
-
-
-def corr_coef_pearson(x, y):
-    """Fast perason correlation coefficient."""
-    detr_x = x - _np.mean(x)
-    detr_y = y - _np.mean(y)
-
-    r_xy = _np.convolve(detr_x, detr_y[::-1], mode='valid')
-    r_xx_yy = (detr_x @ detr_x) * (detr_y @ detr_y)
-
-    return r_xy / r_xx_yy
-
-
-def freq2mel(f):
-    """Transforms Hz to Mel-Frequencies.
-
-    Params:
-        f:    (real number) Frequency in Hz
-
-    Return:
-        (real number) Mel-Frequency
-    """
-    f = _np.atleast_1d(f)
-    return 1125 * _np.log(1 + f / 700)
-
-
-def mel2freq(z):
-    """Transforms Mel-Frequencies to Hz.
-
-    Params:
-        z:     (real number) Mel-Frequency.
-
-    Return:
-        (real number) Frequency in Hz.
-    """
-    z = _np.atleast_1d(z)
-    return 700 * (_np.exp(z / 1125) - 1)
-
-
-
-
-def maxamp(sig):
-    """Maximal absolute elongation within the signal.
-
-    Params:
-        sig    (array-like) Input signal.
-
-    Return:
-        (scalar) Maximal amplitude.
-    """
-    return _np.max(_np.absolute(sig))
-
-
-def minamp(sig):
-    """Minimal absolute elongation within the signal.
-
-    Params
-        sig    (array-like) Input signal.
-
-    Return:
-        (scalar) Maximal amplitude.
-    """
-    return _np.min(_np.absolute(sig))
-
-
-def noise(level, n=9000):
-    """Generate withe noise.
-
-    Params:
-        level       (float) Noise level as standard deviation of a gaussian.
-        n           (int) Length of noise signal in samples.
-
-    Return:
-        (ndarray)   White noise signal.
-    """
-    return _stats.norm.rvs(0, level, size=n)
-
-
-def normalize(sig):
-    """Normlize a signal to [-1.0, 1.0].
-
-    Params:
-        sig (np.nadarray)    Signal to normalize.
-
-    Return:
-        (np.ndarray) Normalized signal.
-    """
-    return sig / _np.max(_np.absolute(sig), axis=0)
-
-    
-def sinusoid(f, amps=1, fs=9000, length=1, retcomps=False):
-    """Generate sinusoidal signal.
-
-    Params:
-        f       (iterable) Component frequencies.
-        amps    (int or interable) Amplitude of each component in f.
-                    If `amps` is an integer each component of f will be
-                    scaled according to `amps`. If `amps` is an iterable
-                    each frequency will be scaled with the respective amplitude.
-        fs      (int) Sample rate.
-        length  (number) Length of signal in seconds.
-        retcomps(bool) If True return the components of the signal,
-                    otherwise return the sum.
-
-    Return:
-        (ndarray)   Sinusoidal signal.
-    """
-    f = _np.atleast_1d(f)
-    amps = _np.atleast_1d(amps)
-
-    if f.shape == amps.shape or amps.size == 1:
-        t = _np.arange(fs*length)[:, None] / fs
-        sig = _np.sin(2*_np.pi*f*t) * amps
-    else:
-        raise ValueError('Shapes of f and amps must be equal.')
-
-    if retcomps:
-        return sig
-    else:
-        return sig.sum(axis=1)
-
-
-def zero_padding(sig, n):
-    """Append n zeros to signal. `sig` must be 1D array.
-
-    Params:
-        sig    (np.ndarray) a list of data points.
-        n      (int) number of zeros to be appended.
-
-    Return:
-        (array) zero-padded input signal.
-    """
-    container = _np.zeros(sig.size+n)
-    container[:sig.size] = sig
-    return container
-
-
-def trim_spectrogram(inp: _Array, frqs: _Array, low: float, high: float) -> _Array:
-    """Trim spectrogram and frequency array to the frequency range [low, high].
-
-    Args:
-        inp  (ndarray)    Input spectrogram.
-        frqs (ndarray)    Spectrogram frequency axis.
-        low  (float)      Lower trim boundary.
-        high (float)      Upper trim boundary.
-
-    Returns:
-        (tuple)    (trimmed_spectrogram, trimmed_frqs)
-    """
-    lower_bound = _np.maximum(low, frqs[0])
-    upper_bound = _np.minimum(high, frqs[-1])
-
-    clip_range = _np.logical_and(lower_bound <= frqs, frqs <= upper_bound)
-
-    return inp[clip_range], frqs[clip_range]
diff --git a/apollon/som/som.py b/apollon/som/som.py
deleted file mode 100644
index 4e1dbaa83d90400a2b5b53b7f7c73484e498e107..0000000000000000000000000000000000000000
--- a/apollon/som/som.py
+++ /dev/null
@@ -1,554 +0,0 @@
-# Licensed under the terms of the BSD-3-Clause license.
-# Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
-
-# apollon/som/som.py
-# SelfOrganizingMap module
-#
-
-import numpy as _np
-import matplotlib.pyplot as _plt
-from scipy import stats as _stats
-from scipy.spatial import distance as _distance
-
-from apollon.io import save as _save
-from apollon.som import utilities as _utilities
-from apollon.som import defaults as _defaults
-from apollon.aplot import _new_axis, _new_axis_3d
-from apollon import aplot as aplot
-
-class _som_base:
-    def __init__(self, dims, eta, nhr, n_iter, metric, mode, init_distr, seed=None):
-
-        # check dimensions
-        for d in dims:
-            if not isinstance(d, int) or not d >= 1:
-                raise ValueError('Dimensions must be integer > 0.')
-
-
-        # shape parameters
-        self.dims = dims
-        self.dx = self.dims[0]
-        self.dy = self.dims[1]
-        self.dw = self.dims[2]
-        self.shape = (self.dx, self.dy)
-
-        # total number of neuros on the map
-        self.n_N = self.dx * self.dy
-
-        # center index of the map
-        self.center = self.dx // 2, self.dy // 2
-
-        # number of iterations to perform
-        self.n_iter = n_iter
-
-        # training mode
-        self.mode = mode
-
-        # metric for similarity ratings
-        self.metric = metric
-
-        # check training parameters
-        if eta is None:
-            self.init_eta = None
-        else:
-            if (0 <= eta <= 1.):
-                self.init_eta = eta
-            else:
-                raise ValueError('eta not in [0, 1]')
-
-        if isinstance(nhr, int) and nhr > 1:
-            self.init_nhr = nhr
-            #self.final_nhr = max(self.dx, self.dy) / _defaults.nhr_scale_factor
-        else:
-            raise ValueError('Neighbourhood radius must be int > 0.')
-
-        # Initialize the weights
-        if seed is not None:
-            _np.random.seed(seed)
-
-        # TODO: init range should be in terms of data
-        if init_distr == 'uniform':
-            self.weights = _np.random.uniform(0, 1, size=(self.n_N, self.dw))
-        elif init_distr == 'simplex':
-            self.weights = self._init_st_mat()
-
-        # Allocate array for winner histogram
-        # TODO: add array to collect for every winner the correspondig inp vector.
-        self.whist = _np.zeros(self.n_N)
-
-        # grid data for neighbourhood calculation
-        self._grid = _np.mgrid[:self.dx, :self.dy]
-        self._grid = _np.dstack(self._grid).reshape(self.n_N, 2)
-
-        # calibration
-        self.isCalibrated = False
-        self.calibration = None
-
-        # measures
-        self.quantization_error = []
-
-        # winner trajectories on Map
-        self.trajectories = []
-
-    def get_winners(self, data, argax=1):
-        """Get the best matching neurons for every vector in data.
-
-            Args:
-                data:  Input data set
-                argax: Axis used for minimization 1=x, 0=y.
-
-            Returna:
-                Indices of bmus and min dists.
-        """
-        # TODO: if the distance between an input vector and more than one lattice
-        #       neuro is the same, choose winner randomly.
-
-        if data.ndim == 1:
-            d = _distance.cdist(data[None, :], self.weights, metric=self.metric)
-            return _np.argmin(d), _np.min(d**2, axis=1)
-        elif data.ndim == 2:
-            ds = _distance.cdist(data, self.weights, metric=self.metric)
-            return _np.argmin(ds, axis=argax), _np.sum(_np.min(ds, axis=argax)**2)
-        else:
-            raise ValueError('Wrong dimension of input data: {}'.format(data.ndim))
-
-
-    def nh_gaussian_L2(self, center, r):
-        """Compute 2D Gaussian neighbourhood around `center`. Distance between
-           center and m_i is calculate by Euclidean distance.
-        """
-        d = _distance.cdist(_np.array(center)[None, :], self._grid,
-                           metric='sqeuclidean')
-        ssq = 2 * r**2
-        return _np.exp(-d/ssq).reshape(-1, 1)
-
-
-    def _init_st_mat(self):
-        """Initialize the weights with stochastic matrices.
-
-        The rows of each n by n stochastic matrix are sampes drawn from the
-        Dirichlet distribution, where n is the number of rows and cols of the
-        matrix. The diagonal elemets of the matrices are set to twice the
-        probability of the remaining elements.
-        The square root n of the weight vectors' size must be element of the
-        natural numbers, so that the weight vector is reshapeable to a square
-        matrix.
-        """
-        # check for square matrix
-        d = _np.sqrt(self.dw)
-        is_not_qm = bool(d - int(d))
-        if is_not_qm:
-            raise ValueError('Weight vector (len={}) must be reshapeable to square matrix.'.format(self.dw))
-        else:
-            d = int(d)
-
-        # set alpha
-        alpha = _np.full((d, d), 500)
-        _np.fill_diagonal(alpha, 1000)
-
-        # sample from dirichlet distributions
-        st_matrix = _np.hstack([_stats.dirichlet.rvs(alpha=a, size=self.n_N)
-                               for a in alpha])
-        return st_matrix
-
-
-    def calibrate(self, data, targets):
-        """Retriev for every map unit the best matching vector of the input
-        data set. Save its target value at the map units position on a
-        new array called `calibration`.
-
-        Args:
-            data:     Input data set.
-            targets:  Target labels.
-        """
-        bmiv, err = self.get_winners(data, argax=0)
-        self._cmap = targets[bmiv]
-        self.isCalibrated = True
-
-
-    def plot_calibration(self, lables=None, ax=None, cmap='plasma', **kwargs):
-        """Plot calibrated map.
-
-        Args:
-            labels:
-            ax
-            cmap:
-
-        Returns:
-        """
-        if not self.isCalibrated:
-            raise ValueError('Map not calibrated.')
-        else:
-            if ax is None:
-                fig, ax = _new_axis()
-            ax.set_title('Calibration')
-            ax.set_xlabel('# units')
-            ax.set_ylabel('# units')
-            ax.imshow(self._cmap.reshape(self.dx, self.dy), origin='lower',
-                      cmap=cmap)
-            #return ax
-
-
-    def plot_datamap(self, data, targets, interp='None', marker=False,
-                     cmap='viridis', **kwargs):
-        """Represent the input data on the map by retrieving the best
-        matching unit for every element in `data`. Mark each map unit
-        with the corresponding target value.
-
-        Args:
-            data:    Input data set.
-            targets: Class labels or values.
-            interp:  matplotlib interpolation method name.
-            marker:  Plot markers in bmu position if True.
-
-        Returns:
-           axis, umatrix, bmu_xy
-        """
-        ax, udm = self.plot_umatrix(interp=interp, cmap=cmap, **kwargs)
-
-        #
-        # TODO: Use .transform() instead
-        #
-        bmu, err = self.get_winners(data)
-
-        x, y = _np.unravel_index(bmu, (self.dx, self.dy))
-        fd = {'color':'#cccccc'}
-        if marker:
-            ax.scatter(y, x, s=40, marker='x', color='r')
-
-        for i, j, t in zip(x, y, targets):
-            ax.text(j, i, t, fontdict=fd,
-                    horizontalalignment='center',
-                    verticalalignment='center')
-        return (ax, udm, (x, y))
-
-
-    def plot_qerror(self, ax=None, **kwargs):
-        """Plot quantization error."""
-        if ax is None:
-            fig, ax = _new_axis(**kwargs)
-
-        ax.set_title('Quantization Errors per iteration')
-        ax.set_xlabel('# interation')
-        ax.set_ylabel('Error')
-
-        ax.plot(self.quantization_error, lw=3, alpha=.8,
-                label='Quantizationerror')
-
-
-    def plot_umatrix(self, interp='None', cmap='viridis', ax=None, **kwargs):
-        """Plot unified distance matrix.
-
-        The unified distance matrix (udm) allows to visualize weight matrices
-        of high dimensional weight vectors. The entries (x, y) of the udm
-        correspondto the arithmetic mean of the distances between weight
-        vector (x, y) and its 4-neighbourhood.
-
-        Args:
-            w:        Neighbourhood width.
-            interp:   matplotlib interpolation method name.
-            ax:       Provide custom axis object.
-
-       Returns:
-           axis, umatrix
-        """
-        if ax is None:
-            fig, ax = aplot._new_axis()
-        udm = _utilities.umatrix(self.weights, self.shape, metric=self.metric)
-
-        ax.set_title('Unified distance matrix')
-        ax.set_xlabel('# units')
-        ax.set_ylabel('# units')
-        ax.imshow(udm, interpolation=interp, cmap=cmap, origin='lower')
-        return ax, udm
-
-
-    def plot_umatrix3d(self, w=1, cmap='viridis', **kwargs):
-        """Plot the umatrix in 3d. The color on each unit (x, y) represents its
-           mean distance to all direct neighbours.
-
-        Args:
-            w: Neighbourhood width.
-
-        Returns:
-            axis, umatrix
-        """
-        fig, ax = _new_axis_3d(**kwargs)
-        udm = _utilities.umatrix(self.weights, self.shape, metric=self.metric)
-        X, Y = _np.mgrid[:self.dx, :self.dy]
-        ax.plot_surface(X, Y, udm, cmap=cmap)
-        return ax, udm
-
-
-    def plot_features(self, figsize=(8, 8)):
-        """Values of each feature of the weight matrix per map unit.
-
-        This works currently ony for feature vectors of len dw**2.
-
-        Args:
-            Size of figure.
-        """
-        d = _np.sqrt(self.dw).astype(int)
-        rweigths = self.weights.reshape(self.dims)
-
-        fig, _ = _plt.subplots(d, d, figsize=figsize, sharex=True, sharey=True)
-        for i, ax in enumerate(fig.axes):
-            ax.axison=False
-            ax.imshow(rweigths[..., i], origin='lower')
-
-
-    def plot_whist(self, interp='None', ax=None, **kwargs):
-        """Plot the winner histogram.
-
-        The darker the color on position (x, y) the more often neuron (x, y)
-        was choosen as winner. The number of winners at edge neuros is
-        magnitudes of order higher than on the rest of the map. Thus, the
-        histogram is shown in log-mode.
-
-        Args:
-            interp: matplotlib interpolation method name.
-            ax:     Provide custom axis object.
-
-        Returns:
-            The axis.
-        """
-        if ax is None:
-            fig, ax = _new_axis(**kwargs)
-        ax.imshow(_np.log1p(self.whist.reshape(self.dx, self.dy)),
-                  vmin=0, cmap='Greys', interpolation=interp, origin='lower')
-        return ax
-
-
-    def save(self, path):
-        """Save som object to file using pickle.
-
-        Args:
-            path: Save SOM to this path.
-        """
-        _save(self, path)
-
-
-    def transform(self, data, flat=True):
-        """Transform input data to feature space.
-
-        Args:
-            data:  2d array of shape (N_vect, N_features).
-            flat:  Return flat index of True else 2d multi index.
-
-        Returns:
-            Position of each data item in the feature space.
-        """
-        bmu, err = self.get_winners(data)
-
-        if flat:
-            return bmu
-
-        else:
-            midx = _np.unravel_index(bmu, (self.dx, self.dy))
-            return _np.array(midx)
-
-
-    def inspect(self):
-        fig = _plt.figure(figsize=(12, 5))
-        ax1 = _new_axis(sp_pos=(1, 3, 1), fig=fig)
-        ax2 = _new_axis(sp_pos=(1, 3, 2), fig=fig)
-        ax3 = _new_axis(sp_pos=(1, 3, 3), fig=fig)
-
-        _, _ = self.plot_umatrix(ax=ax1)
-
-        if self.isCalibrated:
-            _ = self.plot_calibration(ax=ax2)
-        else:
-            _ = self.plot_whist(ax=ax2)
-
-        self.plot_qerror(ax=ax3)
-
-
-
-class SelfOrganizingMap(_som_base):
-
-    def __init__(self, dims=(10, 10, 3), eta=.8, nh=5, n_iter=100,
-                 metric='euclidean', mode='incremental', init_distr='simplex',
-                 seed=None):
-
-        super().__init__(dims, eta, nh, n_iter, metric, mode, init_distr, seed)
-
-        #
-        # TODO: Implement mechanism to choose nh_function
-        #
-        self._neighbourhood = self.nh_gaussian_L2
-
-    def _incremental_update(self, data_set, c_eta, c_nhr):
-        total_qE = 0
-        for fv in data_set:
-            bm_units, c_qE = self.get_winners(fv)
-            total_qE += c_qE
-
-            # update activation map
-            self.whist[bm_units] += 1
-            self.trajectories.append(bm_units)
-
-            # get bmu's multi index
-            bmu_midx = _np.unravel_index(bm_units, self.shape)
-
-            # calculate neighbourhood over bmu given current radius
-            c_nh = self._neighbourhood(bmu_midx, c_nhr)
-
-            # update lattice
-            self.weights += c_eta * c_nh * (fv - self.weights)
-        self.quantization_error.append(total_qE)
-
-
-    def _batch_update(self, data_set, c_nhr):
-        # get bmus for vector in data_set
-        bm_units, total_qE = self.get_winners(data_set)
-        self.quantization_error.append(total_qE)
-
-        # get bmu's multi index
-        bmu_midx = _np.unravel_index(bm_units, self.shape)
-
-        w_nh = _np.zeros((self.n_N, 1))
-        w_lat = _np.zeros((self.n_N, self.dw))
-
-        for bx, by, fv in zip(*bmu_midx, data_set):
-            # TODO:  Find a way for faster nh computation
-            c_nh = self._neighbourhood((bx, by), c_nhr)
-            w_nh += c_nh
-            w_lat += c_nh * fv
-
-        self.weights = w_lat / w_nh
-
-
-    def train_batch(self, data, verbose=False):
-        """Feed the whole data set to the network and update once
-           after each iteration.
-
-        Args:
-            data:    Input data set.
-            verbose: Print verbose messages if True.
-        """
-        # main loop
-        for (c_iter, c_nhr) in \
-            zip(range(self.n_iter),
-                _utilities.decrease_linear(self.init_nhr, self.n_iter)):
-
-            if verbose:
-                print(c_iter, end=' ')
-
-            self._batch_update(data, c_nhr)
-
-
-    def train_minibatch(self, data, verbose=False):
-        raise NotImplementedError
-
-    def train_incremental(self, data, verbose=False):
-        """Randomly feed the data to the network and update after each
-           data item.
-
-        Args:
-            data:     Input data set.
-            verbose:  Print verbose messages if True.
-        """
-        # main loop
-        for (c_iter, c_eta, c_nhr) in \
-            zip(range(self.n_iter),
-                _utilities.decrease_linear(self.init_eta, self.n_iter, _defaults.final_eta),
-                _utilities.decrease_expo(self.init_nhr, self.n_iter, _defaults.final_nhr)):
-
-            if verbose:
-                print('iter: {:2} -- eta: {:<5} -- nh: {:<6}' \
-                 .format(c_iter, _np.round(c_eta, 4), _np.round(c_nhr, 5)))
-
-            # always shuffle data
-            self._incremental_update(_np.random.permutation(data), c_eta, c_nhr)
-
-
-    def fit(self, data, verbose=False):
-        """Train the SOM on the given data set."""
-
-        if self.mode == 'incremental':
-            self.train_incremental(data, verbose)
-
-        elif self.mode == 'batch':
-            self.train_batch(data, verbose)
-
-
-    def predict(self, data):
-        """Predict a class label for each item in input data. SOM needs to be
-        calibrated in order to predict class labels.
-        """
-        if self.isCalibrated:
-            midx = self.transform(data)
-            return self._cmap[midx]
-        else:
-            raise AttributeError('SOM is not calibrated.')
-
-
-#from apollon.hmm.poisson_hmm import hmm_distance
-
-class DotSom(_som_base):
-    def __init__(self, dims=(10, 10, 3), eta=.8, nh=8, n_iter=10,
-                 metric='euclidean', mode=None, init_distr='uniform', seed=None):
-        """ This SOM assumes a stationary PoissonHMM on each unit. The weight vector
-        represents the HMMs distribution parameters in the following order
-        [lambda1, ..., lambda_m, gamma_11, ... gamma_mm]
-
-        Args:
-            dims    (tuple) dx, dy, m
-        """
-        super().__init__(dims, eta, nh, n_iter, metric, mode, init_distr, seed)
-        self._neighbourhood = self.nh_gaussian_L2
-
-    def get_winners(self, data, argax=1):
-        """Get the best matching neurons for every vector in data.
-
-        Args:
-            data:  Input data set
-            argax: Axis used for minimization 1=x, 0=y.
-
-        Returns:
-            Indices of bmus and min dists.
-        """
-        # TODO: if the distance between an input vector and more than one lattice
-        #       neuro is the same, choose winner randomly.
-
-        d = _np.inner(data, self.weights)
-        return _np.argmax(d), 0
-
-
-
-    def fit(self, data, verbose=True):
-        for (c_iter, c_eta, c_nhr) in \
-            zip(range(self.n_iter),
-                _utilities.decrease_linear(self.init_eta, self.n_iter, _defaults.final_eta),
-                _utilities.decrease_expo(self.init_nhr, self.n_iter, _defaults.final_nhr)):
-
-            if verbose:
-                print('iter: {:2} -- eta: {:<5} -- nh: {:<6}' \
-                 .format(c_iter, _np.round(c_eta, 4), _np.round(c_nhr, 5)))
-
-            # always shuffle data
-            self._incremental_update(_np.random.permutation(data), c_eta, c_nhr)
-
-
-    def _incremental_update(self, data_set, c_eta, c_nhr):
-        total_qE = 0
-        for fv in data_set:
-            bm_units, c_qE = self.get_winners(fv)
-            total_qE += c_qE
-
-            # update activation map
-            self.whist[bm_units] += 1
-
-            # get bmu's multi index
-            bmu_midx = _np.unravel_index(bm_units, self.shape)
-
-            # calculate neighbourhood over bmu given current radius
-            c_nh = self._neighbourhood(bmu_midx, c_nhr)
-
-            # update lattice
-            u = self.weights + c_eta * fv
-            self.weights = u / _distance.norm(u)
-
-        self.quantization_error.append(total_qE)
diff --git a/apollon/som/utilities.py b/apollon/som/utilities.py
deleted file mode 100644
index e17d903bcb49723ce482bc04ef3b5cd5f840a6a4..0000000000000000000000000000000000000000
--- a/apollon/som/utilities.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# Licensed under the terms of the BSD-3-Clause license.
-# Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
-
-"""apollon/som/uttilites.py
-
-(c) Michael Blaß, 2016
-
-Utilites for self.organizing maps.
-
-Functions:
-    activation_map    Plot activation map
-    distance_map      Plot a distance map
-    distance_map3d    Plot a 3d distance map
-"""
-
-import matplotlib.pyplot as _plt
-from mpl_toolkits.mplot3d import Axes3D
-import numpy as _np
-from scipy.spatial import distance as _distance
-
-import apollon.som.topologies as _topologies
-
-
-def activation_map(som, **kwargs):
-    ax = _plt.gca()
-    am = som.activation_map.reshape(som.shape[:2])
-    ax.imshow(_np.flipud(am), vmin=0, vmax=som.activation_map.max(), **kwargs)
-
-
-def decrease_linear(start, step, stop=1):
-    '''Linearily decrease `start`  in `step` steps to `stop`.'''
-    if step < 1 or not isinstance(step, int):
-        raise ValueError('Param `step` must be int >= 1.')
-    elif step == 1:
-        yield start
-    else:
-        a = (stop - start) / (step-1)
-        for x in range(step):
-            yield a * x + start
-
-
-def decrease_expo(start, step, stop=1):
-    '''Exponentially decrease `start`  in `step` steps to `stop`.'''
-    if step < 1 or not isinstance(step, int):
-        raise ValueError('Param `step` must be int >= 1.')
-    elif step == 1:
-        yield start
-    else:
-        b = _np.log(stop / start) / (step-1)
-        for x in range(step):
-            yield start * _np.exp(b*x)
-
-
-def umatrix(weights, dxy, metric='euclidean'):
-    """ Compute unified distance matrix.
-
-    Params:
-        weights (ndarray)    SOM weights matrix.
-        dxy (tuple)
-        metric (str)         Metric to use.
-
-    Return:
-        (ndarray)    unified distance matrix.
-    """
-    out = _np.empty(dxy, dtype='float64')
-
-    for i, mi in enumerate(_np.ndindex(dxy)):
-        nh_flat_idx = _topologies.vn_neighbourhood(*mi, *dxy, flat=True)
-        p = weights[i][None]
-        nh = weights[nh_flat_idx]
-        out[mi] = _distance.cdist(p, nh).sum() / len(nh)
-
-    return out / out.max()
diff --git a/apollon/tools.py b/apollon/tools.py
deleted file mode 100644
index cabb8c2eb11ae2ffe663941c577b0538c566f65c..0000000000000000000000000000000000000000
--- a/apollon/tools.py
+++ /dev/null
@@ -1,251 +0,0 @@
-# Licensed under the terms of the BSD-3-Clause license.
-# Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
-
-"""
-apollon/tools.py -- Common tool library.
-
-Functions:
-    assert_array        Raise if array does not match given params.
-
-    L1_Norm             Compute L1_Norm.
-    normalize           Return _normalized version of input.
-    in2out              Create a save from an input path.
-    offdiag             Access to off-diagonal elements of square array.
-    rowdiag             kth array diagonal sorted by array's rows.
-    scale               Scale array between bounds.
-    smooth_stat         Return smoothed input.
-    standardize         Scale to zero mean and unit standard deviation.
-    time_stamp          Return time stamp.
-    within              Test wether val is in window.
-    within_any          Test wether val is in any of windows.
-    array2d_fsum        Sum array entry with machine precision.
-"""
-import datetime as _dt
-import math as _math
-from typing import Any, Tuple
-
-import numpy as _np
-
-from . import _defaults
-from . types import Array as _Array
-
-
-def assert_array(arr: _Array, ndim: int, size: int,     # pylint: disable=R0913
-                 lower_bound: float = -_np.inf,
-                 upper_bound: float = _np.inf,
-                 name: str = 'arr'):
-    """Raise an error if shape of `arr` does not match given arguments.
-
-    Args:
-        arr    (np.ndarray)    Array to test.
-        ndim   (int)           Expected number of dimensions.
-        size   (int)           Expected total number of elements.
-        lower_bound (float)    Lower bound for array elements.
-        upper_bound (float)    Upper bound for array elements.
-
-    Raises:
-        ValueError
-    """
-    if arr.ndim != ndim:
-        raise ValueError(('Shape of {} does not match. Expected '
-                          '{}, got {}.\n').format(name, ndim, arr.ndim))
-
-    if arr.size != size:
-        raise ValueError(('Size of {} does not match. Expected '
-                          '{}, got {}.\n').format(name, size, arr.size))
-
-    if _np.any(arr < lower_bound):
-        raise ValueError(('Elements of {} must '
-                          'be >= {}.'.format(name, lower_bound)))
-
-    if _np.any(arr > upper_bound):
-        raise ValueError(('Elements of {} must '
-                          'be <= {}.'.format(name, upper_bound)))
-
-
-def jsonify(inp: Any):
-    """Returns a representation of ``inp`` that can be serialized to JSON.
-
-    This method passes through Python objects of type dict, list, str, int
-    float, True, False, and None. Tuples will be converted to list by the JSON
-    encoder. Numpy arrays will be converted to list using thier .to_list() method.
-    On all other types, the method will try to call str() and raises
-    on error.
-
-    Args:
-        inp:    Input to be jsonified.
-
-    Returns:
-        Jsonified  input.
-    """
-    valid_types = (dict, list, tuple, str, int, float)
-    valid_vals = (True, False, None)
-
-    xx = [isinstance(inp, v_type) for v_type in valid_types]
-    yy = [inp is v_vals for v_vals in valid_vals]
-
-    if any(xx) or any(yy):
-        return inp
-
-    if isinstance(inp, _np.ndarray):
-        return inp.to_list()
-
-    return str(inp)
-
-
-#TODO Move to better place
-def L1_Norm(arr: _Array) -> float:
-    """Compute the L_1 norm of input vector `x`.
-
-    This implementation is generally faster than np.norm(arr, ord=1).
-    """
-    return _np.abs(arr).sum(axis=0)
-
-
-def normalize(arr, mode='array'):
-    """Normalize an arbitrary array_like.
-
-    Params:
-        arr   (numerical array-like) Input signal.
-        axis  (str) Normalization mode:
-                    'array' -> (default) Normalize whole array.
-                    'rows'  -> Normalize each row separately.
-                    'cols'  -> Normalize each col separately.
-    Return:
-        (array) Normalized input.
-    """
-
-    arr = _np.atleast_1d(arr)
-
-    if mode == 'array':
-        return _normalize(arr)
-
-    if mode == 'rows':
-        return _np.vstack(_normalize(row) for row in arr)
-
-    if mode == 'cols':
-        return _np.hstack(_normalize(col[:, None]) for col in arr.T)
-
-    raise ValueError('Unknown normalization mode')
-
-
-# TODO: This normalizes in [0, 1]; for audio we need [-1, 1]
-def _normalize(arr):
-    """Normalize array."""
-    arr_min = arr.min()
-    arr_max = arr.max()
-    return (arr - arr_min) / (arr_max - arr_min)
-
-
-def assert_and_pass(assert_func, arg):
-    """Call `assert_func` with `arg` and return `arg`. Additionally allow arg to be None."""
-    if arg is not None:
-        assert_func(arg)
-    return arg
-
-
-def rowdiag(v, k=0):
-    """Get or set k'th diagonal of square matrix.
-
-    Get the k'th diagonal of a square matrix sorted by rows
-    or construct a sqare matrix with the elements of v as the
-    main diagonal of the second and third dimension.
-
-    Params:
-        v    (array) Square matrix.
-        k    (int) Number diagonal.
-
-    Return:
-        (1d-array) Values of diagonal.
-    """
-    return _np.diag(v, k)[:, None]
-
-
-def scale(x, new_min=0, new_max=1, axis=-1):
-    """Scale `x` between `new_min` and `new_max`.
-
-    Parmas:
-        x         (np.array)          Array to be scaled.
-        new_min   (real numerical)    Lower bound.
-        new_max   (real numerical)    Upper bound.
-
-    Return:
-        (np.ndarray)    One-dimensional array of transformed values.
-    """
-    xmax = x.max(axis=axis, keepdims=True)
-    xmin = x.min(axis=axis, keepdims=True)
-
-    a = (x-xmin) / (xmax - xmin)
-    y = a * (new_max - new_min) + new_min
-
-    return y
-
-
-def smooth_stat(sig):
-    """Smooth the signal based on its mean and standard deviation.
-
-    Params:
-        sig    (array-like) Input signal.
-
-    Return:
-        (ndarray) smoothed input signal.
-    """
-    out = []
-    sig_mean = sig.mean()
-    sig_std = sig.std()
-    for i in sig:
-        if i < sig_mean - sig_std or i > sig_mean + sig_std:
-            out.append(i)
-        else:
-            out.append(sig_mean)
-
-    return _np.array(out)
-
-
-def standardize(x: _np.ndarray) -> _np.ndarray:
-    """Retrun z-transformed values of x.
-
-    Params:
-        x    (array) Input values
-
-    Return:
-        (array) z-transformed values
-    """
-    return (x - x.mean(axis=0)) / x.std(axis=0)
-
-
-def time_stamp() -> str:
-    """Return default time stamp."""
-    return _dt.datetime.now().strftime(_defaults.TIME_STAMP_FMT)
-
-
-def within(val: float, bounds: Tuple[float, float]) -> bool:
-    """Return True if x is in window.
-
-    Args:
-        val (float)    Value to test.
-
-    Retrns:
-        (bool)    True if ``val`` is within ``bounds``.
-    """
-    return bounds[0] <= val <= bounds[1]
-
-
-def within_any(val: float, windows: _Array) -> bool:
-    """Return True if x is in any of the given windows"""
-    a = windows[:, 0] <= val
-    b = val <= windows[:, 1]
-    c = _np.logical_and(a, b)
-
-    return _np.any(c)
-
-
-def array2d_fsum(arr: _Array, axis: int = 0) -> _Array:
-    """Return math.fsum along the specifyed axis."""
-    if axis == 0:
-        vals = arr.T
-    else:
-        vals = arr
-
-    return _np.array([_math.fsum(ax_slice) for ax_slice in vals])
diff --git a/apollon/types.py b/apollon/types.py
deleted file mode 100644
index a0ecc6a0d332b44a6bf0c6f6e9bd8a69483b52a3..0000000000000000000000000000000000000000
--- a/apollon/types.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# Licensed under the terms of the BSD-3-Clause license.
-# Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
-
-"""
-"""
-import pathlib
-from typing import (Any, Dict, Generator, Iterable, List, Optional, Tuple, Union)
-import numpy as _np    # type: ignore
-
-Array = _np.ndarray    # pylint: disable = C0103
-
-ArrayOrStr = Union[Array, str]
-IterOrNone = Union[Iterable, None]
-
-ParamsType = Dict[str, Any]
-ParameterSet = Optional[ParamsType]
-ParserType = Tuple[ParamsType, List[str]]
-PathType = Union[str, pathlib.Path]
-PathGen = Generator[PathType, None, None]
diff --git a/audio/beat_5ch.wav b/audio/beat_5ch.wav
new file mode 100644
index 0000000000000000000000000000000000000000..f27f26b668a7564616253feee261ade22b0f6102
Binary files /dev/null and b/audio/beat_5ch.wav differ
diff --git a/apollon/datasets/earthquakes.data b/datasets/earthquakes.data
similarity index 100%
rename from apollon/datasets/earthquakes.data
rename to datasets/earthquakes.data
diff --git a/apollon/datasets/earthquakes.md b/datasets/earthquakes.md
similarity index 100%
rename from apollon/datasets/earthquakes.md
rename to datasets/earthquakes.md
diff --git a/docs/Makefile b/docs/Makefile
index 69fe55ecfa9aade66e1412aef0ee7d04a9bcde86..5a1a12e83a9ec29d77f9715dc73e98a93c88b30d 100644
--- a/docs/Makefile
+++ b/docs/Makefile
@@ -7,6 +7,9 @@ SPHINXBUILD   = sphinx-build
 SOURCEDIR     = source
 BUILDDIR      = build
 
+clean:
+	rm -rf build/* source/api/*
+
 # Put it first so that "make" without argument is like "make help".
 help:
 	@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
@@ -16,4 +19,4 @@ help:
 # Catch-all target: route all unknown targets to Sphinx using the new
 # "make mode" option.  $(O) is meant as a shortcut for $(SPHINXOPTS).
 %: Makefile
-	@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
\ No newline at end of file
+	@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 566c4674c0ec9cf4ab0f08b3d31dfaa19b631c2d..24fd1a73ca842bc4f075944899f8d1d4ca041ab1 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,6 +1,4 @@
-numpy
-scipy
-matplotlib
-soundfile
-sphinxcontrib_apidoc
-https://gitlab.rrz.uni-hamburg.de/bal7668/hmm/-/archive/master/hmm-master.zip
\ No newline at end of file
+numpy==1.20
+sphinx==3.5.1
+sphinx-rtd-theme==0.5.1
+sphinxcontrib-apidoc==0.3.0
diff --git a/apollon/hmm/poisson/__init__.py b/docs/source/_static/.gitkeep
similarity index 100%
rename from apollon/hmm/poisson/__init__.py
rename to docs/source/_static/.gitkeep
diff --git a/docs/source/clt.rst b/docs/source/clt.rst
deleted file mode 100644
index 591e323d21260251e6a3081c94b288d89aaf1f24..0000000000000000000000000000000000000000
--- a/docs/source/clt.rst
+++ /dev/null
@@ -1,14 +0,0 @@
-Command line tools
-==================
-
-Apollon comes with a set of command line tools for those who do not want or
-do not need to work with the API. These utilities provide access to the most
-common use cases, that is, extracting features, training HMMs, training SOMS.
-
-The command line tools, however, cannot replace the API completely. Many 
-thinks like setting HMM hyper parameters are not possible at the moment.
-
-All command line tools are invoked using the master command ``apollon``. Each
-use case is implemented as a subcommand.
-
-.. function:: apollon TRACK_FILE FEATURE_PATH [-m --mstates] [-o --outpath] 
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 83a41508e46100285fad462685a64a1495b363d0..ea543be4779aae038c0bbc998abded986e1832f0 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -1,3 +1,5 @@
+import sphinx_rtd_theme
+
 # Configuration file for the Sphinx documentation builder.
 #
 # This file only contains a selection of the most common options. For a full
@@ -10,42 +12,39 @@
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
 #
-# import os
-# import sys
-# sys.path.insert(0, os.path.abspath('.'))
+import os
+import sys
+sys.path.insert(0, os.path.abspath('./src/apollon/'))
 
-master_doc = 'index'    # needed for read the docs
 
 # -- Project information -----------------------------------------------------
 
-project = 'Apollon'
-copyright = '2019, Michael Blaß'
+project = 'apollon'
 author = 'Michael Blaß'
+copyright = '2019, Michael Blaß'
 
 # The full version, including alpha/beta/rc tags
+version = '0.1'
 release = '0.1.3'
 
+master_doc = 'index'
+
 
 # -- General configuration ---------------------------------------------------
+source_suffix = {'.rst': 'restructuredtext'}
+language = 'en'
+#nitpicky = True
+numfig = True
+pygments_style = 'sphinx'
 
 # Add any Sphinx extension module names here, as strings. They can be
 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
 # ones.
-extensions = ['sphinxcontrib.apidoc', 'sphinx.ext.napoleon']
-
-# apidoc config
-apidoc_module_dir = '../../apollon/'
-apidoc_output_dir = 'reference'
-apidoc_separate_modules = True
-apidoc_module_first = True
-apidoc_full = True
-
-# napoleaon config
-napoleon_numpy_docstring = False
-napoleon_google_docstring = True
-napoleon_use_param = True
-napoleon_use_rtype = True
-napoleon_use_keyword = True
+extensions = [
+    'sphinx.ext.autosummary',
+    'sphinx.ext.napoleon',
+    'sphinxcontrib.apidoc',
+    'sphinx_rtd_theme']
 
 
 # Add any paths that contain templates here, relative to this directory.
@@ -54,7 +53,7 @@ templates_path = ['_templates']
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
 # This pattern also affects html_static_path and html_extra_path.
-exclude_patterns = []
+exclude_patterns = ['_build', '.DS_Store']
 
 
 # -- Options for HTML output -------------------------------------------------
@@ -68,3 +67,30 @@ html_theme = 'sphinx_rtd_theme'
 # relative to this directory. They are copied after the builtin static files,
 # so a file named "default.css" will overwrite the builtin "default.css".
 html_static_path = ['_static']
+
+
+# -- Options for autosummary ------------------------------------------------
+#
+
+
+
+# -- Options for apidoc -----------------------------------------------------
+#
+apidoc_module_dir = '../../src/apollon'
+apidoc_output_dir = 'generated/api'
+apidoc_separate_modules = True
+apidoc_module_first = True
+apidoc_full = True
+
+
+# -- Options for Napoleon ---------------------------------------------------
+#
+napoleon_google_docstring = True
+napoleon_numpy_docstring = False
+napoleon_include_init_with_doc = False
+napoleon_include_private_with_doc = False
+napoleon_include_special_with_doc = False
+napoleon_use_ivar = True
+napoleon_use_param = True
+napoleon_use_keyword = True
+napoleon_use_rtype = True
diff --git a/docs/source/download.rst b/docs/source/download.rst
new file mode 100644
index 0000000000000000000000000000000000000000..65a72583bfb3e56849f9f4012ee6a6f3de3918b4
--- /dev/null
+++ b/docs/source/download.rst
@@ -0,0 +1,11 @@
+****************************************
+Download
+****************************************
+You can either download the source code from the `apollon GitHub repository`_
+or clone it directly with
+
+.. code-block:: Bash
+
+   git clone https://github.com/teagum/apollon.git 
+
+.. _apollon GitHub repository: https://github.com/teagum/apollon.git
diff --git a/docs/source/framework.rst b/docs/source/framework.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f7741e49ed5b808300d3c9034e0c8cdfed0cdc9c
--- /dev/null
+++ b/docs/source/framework.rst
@@ -0,0 +1,23 @@
+****************************************
+Framework
+****************************************
+
+.. _fw-fe:
+
+Audio Feature Extraction
+========================================
+Extract some of the most common low-level audio feauters.
+
+
+.. _fw-hmm:
+
+Hidden Markov Model
+========================================
+Estimate Poisson-distributed Hidden Markov Models.
+
+
+.. _fw-som:
+
+Self-Organizing Map
+========================================
+Train some Self-organizing maps.
diff --git a/docs/source/reference/apollon.aplot.rst b/docs/source/generated/api/apollon.aplot.rst
similarity index 56%
rename from docs/source/reference/apollon.aplot.rst
rename to docs/source/generated/api/apollon.aplot.rst
index caf67e66c9de81b92ab5a7b58eba0fb1f92d0980..82e0150e959172c02ad9137fc0e9e3b38f36922d 100644
--- a/docs/source/reference/apollon.aplot.rst
+++ b/docs/source/generated/api/apollon.aplot.rst
@@ -2,6 +2,6 @@ apollon.aplot module
 ====================
 
 .. automodule:: apollon.aplot
-    :members:
-    :undoc-members:
-    :show-inheritance:
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/reference/apollon.audio.rst b/docs/source/generated/api/apollon.audio.rst
similarity index 56%
rename from docs/source/reference/apollon.audio.rst
rename to docs/source/generated/api/apollon.audio.rst
index 64efcb2b3a10d870384b5290479f22573bf7c528..032df5fa65ade3b46cc19f46a1be1f5a8bd9350e 100644
--- a/docs/source/reference/apollon.audio.rst
+++ b/docs/source/generated/api/apollon.audio.rst
@@ -2,6 +2,6 @@ apollon.audio module
 ====================
 
 .. automodule:: apollon.audio
-    :members:
-    :undoc-members:
-    :show-inheritance:
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/generated/api/apollon.container.rst b/docs/source/generated/api/apollon.container.rst
new file mode 100644
index 0000000000000000000000000000000000000000..29eb7275f7fbeb2f7bb06e8d951e11845c74a5f9
--- /dev/null
+++ b/docs/source/generated/api/apollon.container.rst
@@ -0,0 +1,7 @@
+apollon.container module
+========================
+
+.. automodule:: apollon.container
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/reference/apollon.datasets.rst b/docs/source/generated/api/apollon.datasets.rst
similarity index 58%
rename from docs/source/reference/apollon.datasets.rst
rename to docs/source/generated/api/apollon.datasets.rst
index fcca8082ac27fa2ba8f8a85e390e2ce71f9b03b1..d0a604155df3850f40fd8e63bba6ccc19d749cbd 100644
--- a/docs/source/reference/apollon.datasets.rst
+++ b/docs/source/generated/api/apollon.datasets.rst
@@ -2,6 +2,6 @@ apollon.datasets module
 =======================
 
 .. automodule:: apollon.datasets
-    :members:
-    :undoc-members:
-    :show-inheritance:
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/reference/apollon.fractal.rst b/docs/source/generated/api/apollon.fractal.rst
similarity index 58%
rename from docs/source/reference/apollon.fractal.rst
rename to docs/source/generated/api/apollon.fractal.rst
index 8a5ae9ed4baeeecc8afe92af011f37fbf5021c28..013847aa9ce6c95fd39951ca6b3b435b40c80876 100644
--- a/docs/source/reference/apollon.fractal.rst
+++ b/docs/source/generated/api/apollon.fractal.rst
@@ -2,6 +2,6 @@ apollon.fractal module
 ======================
 
 .. automodule:: apollon.fractal
-    :members:
-    :undoc-members:
-    :show-inheritance:
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/generated/api/apollon.hmm.poisson.rst b/docs/source/generated/api/apollon.hmm.poisson.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7831d9b77a4453351603324ff36cab20fc83d512
--- /dev/null
+++ b/docs/source/generated/api/apollon.hmm.poisson.rst
@@ -0,0 +1,7 @@
+apollon.hmm.poisson module
+==========================
+
+.. automodule:: apollon.hmm.poisson
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/generated/api/apollon.hmm.rst b/docs/source/generated/api/apollon.hmm.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8829e76a6851f3766b08e1ea2e11b33e5d9c2399
--- /dev/null
+++ b/docs/source/generated/api/apollon.hmm.rst
@@ -0,0 +1,16 @@
+apollon.hmm package
+===================
+
+.. automodule:: apollon.hmm
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+Submodules
+----------
+
+.. toctree::
+   :maxdepth: 4
+
+   apollon.hmm.poisson
+   apollon.hmm.utilities
diff --git a/docs/source/generated/api/apollon.hmm.utilities.rst b/docs/source/generated/api/apollon.hmm.utilities.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6fd1d6b2392a95e528d9b7fa3eb99502ae607680
--- /dev/null
+++ b/docs/source/generated/api/apollon.hmm.utilities.rst
@@ -0,0 +1,7 @@
+apollon.hmm.utilities module
+============================
+
+.. automodule:: apollon.hmm.utilities
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/generated/api/apollon.io.io.rst b/docs/source/generated/api/apollon.io.io.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c2609f923ec1e4e9e4258bfe123c7040cc4a1681
--- /dev/null
+++ b/docs/source/generated/api/apollon.io.io.rst
@@ -0,0 +1,7 @@
+apollon.io.io module
+====================
+
+.. automodule:: apollon.io.io
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/generated/api/apollon.io.json.rst b/docs/source/generated/api/apollon.io.json.rst
new file mode 100644
index 0000000000000000000000000000000000000000..287a240b9edbbf710da800aa9ec3bfc3b89cc1a8
--- /dev/null
+++ b/docs/source/generated/api/apollon.io.json.rst
@@ -0,0 +1,7 @@
+apollon.io.json module
+======================
+
+.. automodule:: apollon.io.json
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/generated/api/apollon.io.rst b/docs/source/generated/api/apollon.io.rst
new file mode 100644
index 0000000000000000000000000000000000000000..297d236e7c1b25736fad3c1fecaea20e008ebc0e
--- /dev/null
+++ b/docs/source/generated/api/apollon.io.rst
@@ -0,0 +1,16 @@
+apollon.io package
+==================
+
+.. automodule:: apollon.io
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+Submodules
+----------
+
+.. toctree::
+   :maxdepth: 4
+
+   apollon.io.io
+   apollon.io.json
diff --git a/docs/source/reference/apollon.onsets.rst b/docs/source/generated/api/apollon.onsets.rst
similarity index 57%
rename from docs/source/reference/apollon.onsets.rst
rename to docs/source/generated/api/apollon.onsets.rst
index 7aa82bb90e7fda362cbbcc567191456fb5b54d82..5f4a444311d11c1ad155dce41c18e8da6731147e 100644
--- a/docs/source/reference/apollon.onsets.rst
+++ b/docs/source/generated/api/apollon.onsets.rst
@@ -2,6 +2,6 @@ apollon.onsets module
 =====================
 
 .. automodule:: apollon.onsets
-    :members:
-    :undoc-members:
-    :show-inheritance:
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/reference/apollon.rst b/docs/source/generated/api/apollon.rst
similarity index 65%
rename from docs/source/reference/apollon.rst
rename to docs/source/generated/api/apollon.rst
index 22613bb8a73716ec5ec1033227db1e42005e808c..f445d6d3928343869583aa241ccac3ad21465443 100644
--- a/docs/source/reference/apollon.rst
+++ b/docs/source/generated/api/apollon.rst
@@ -2,33 +2,33 @@ apollon package
 ===============
 
 .. automodule:: apollon
-    :members:
-    :undoc-members:
-    :show-inheritance:
+   :members:
+   :undoc-members:
+   :show-inheritance:
 
 Subpackages
 -----------
 
 .. toctree::
+   :maxdepth: 4
 
-    apollon.commands
-    apollon.hmm
-    apollon.signal
-    apollon.som
+   apollon.hmm
+   apollon.io
+   apollon.signal
+   apollon.som
 
 Submodules
 ----------
 
 .. toctree::
+   :maxdepth: 4
 
-   apollon.analyses
    apollon.aplot
    apollon.audio
+   apollon.container
    apollon.datasets
    apollon.fractal
-   apollon.io
    apollon.onsets
    apollon.segment
    apollon.tools
    apollon.types
-
diff --git a/docs/source/reference/apollon.segment.rst b/docs/source/generated/api/apollon.segment.rst
similarity index 58%
rename from docs/source/reference/apollon.segment.rst
rename to docs/source/generated/api/apollon.segment.rst
index 938324d4d44bfe566e3da5dc89d866178655ed83..2bae755a7e30270ee43c93d60d6a50ce2e4d02fa 100644
--- a/docs/source/reference/apollon.segment.rst
+++ b/docs/source/generated/api/apollon.segment.rst
@@ -2,6 +2,6 @@ apollon.segment module
 ======================
 
 .. automodule:: apollon.segment
-    :members:
-    :undoc-members:
-    :show-inheritance:
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/generated/api/apollon.signal.container.rst b/docs/source/generated/api/apollon.signal.container.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9c6b0f3004619a9ca16de2b837e065b2c3bcbdae
--- /dev/null
+++ b/docs/source/generated/api/apollon.signal.container.rst
@@ -0,0 +1,7 @@
+apollon.signal.container module
+===============================
+
+.. automodule:: apollon.signal.container
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/reference/apollon.signal.critical_bands.rst b/docs/source/generated/api/apollon.signal.critical_bands.rst
similarity index 68%
rename from docs/source/reference/apollon.signal.critical_bands.rst
rename to docs/source/generated/api/apollon.signal.critical_bands.rst
index af36c5ae335c8743238e588fc7933fa3b1f8fde0..9f64c1526cddcbca0f8d0d937ec81620ff02d422 100644
--- a/docs/source/reference/apollon.signal.critical_bands.rst
+++ b/docs/source/generated/api/apollon.signal.critical_bands.rst
@@ -2,6 +2,6 @@ apollon.signal.critical\_bands module
 =====================================
 
 .. automodule:: apollon.signal.critical_bands
-    :members:
-    :undoc-members:
-    :show-inheritance:
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/reference/apollon.signal.features.rst b/docs/source/generated/api/apollon.signal.features.rst
similarity index 64%
rename from docs/source/reference/apollon.signal.features.rst
rename to docs/source/generated/api/apollon.signal.features.rst
index 354c9c30bbb197fdb384bf97ec4ce56a058ba554..d5e161b790dc2462046eaf5121a4f3aea604acc6 100644
--- a/docs/source/reference/apollon.signal.features.rst
+++ b/docs/source/generated/api/apollon.signal.features.rst
@@ -2,6 +2,6 @@ apollon.signal.features module
 ==============================
 
 .. automodule:: apollon.signal.features
-    :members:
-    :undoc-members:
-    :show-inheritance:
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/reference/apollon.signal.filter.rst b/docs/source/generated/api/apollon.signal.filter.rst
similarity index 62%
rename from docs/source/reference/apollon.signal.filter.rst
rename to docs/source/generated/api/apollon.signal.filter.rst
index fbaaee915cbb56bd3c3a200e0a6869c917a477dd..7b6f7fa47c2df5b625e75c52c89c09ecffb93d22 100644
--- a/docs/source/reference/apollon.signal.filter.rst
+++ b/docs/source/generated/api/apollon.signal.filter.rst
@@ -2,6 +2,6 @@ apollon.signal.filter module
 ============================
 
 .. automodule:: apollon.signal.filter
-    :members:
-    :undoc-members:
-    :show-inheritance:
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/reference/apollon.signal.rst b/docs/source/generated/api/apollon.signal.rst
similarity index 72%
rename from docs/source/reference/apollon.signal.rst
rename to docs/source/generated/api/apollon.signal.rst
index 32fcbab0acd1bf603c42c1edf1b4a038d486514a..577552920adcd82ff2da11b436611410288ea229 100644
--- a/docs/source/reference/apollon.signal.rst
+++ b/docs/source/generated/api/apollon.signal.rst
@@ -2,18 +2,19 @@ apollon.signal package
 ======================
 
 .. automodule:: apollon.signal
-    :members:
-    :undoc-members:
-    :show-inheritance:
+   :members:
+   :undoc-members:
+   :show-inheritance:
 
 Submodules
 ----------
 
 .. toctree::
+   :maxdepth: 4
 
+   apollon.signal.container
    apollon.signal.critical_bands
    apollon.signal.features
    apollon.signal.filter
    apollon.signal.spectral
    apollon.signal.tools
-
diff --git a/docs/source/reference/apollon.signal.spectral.rst b/docs/source/generated/api/apollon.signal.spectral.rst
similarity index 64%
rename from docs/source/reference/apollon.signal.spectral.rst
rename to docs/source/generated/api/apollon.signal.spectral.rst
index f398387c2c4f467053b3df29f9cff157781cd182..4dc66ef52e019c590b8fd08feec21b66bd11ad39 100644
--- a/docs/source/reference/apollon.signal.spectral.rst
+++ b/docs/source/generated/api/apollon.signal.spectral.rst
@@ -2,6 +2,6 @@ apollon.signal.spectral module
 ==============================
 
 .. automodule:: apollon.signal.spectral
-    :members:
-    :undoc-members:
-    :show-inheritance:
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/reference/apollon.signal.tools.rst b/docs/source/generated/api/apollon.signal.tools.rst
similarity index 62%
rename from docs/source/reference/apollon.signal.tools.rst
rename to docs/source/generated/api/apollon.signal.tools.rst
index b2fc3fb813bdbd3e78657cda9bca09ff4fc46816..ba4471a11aa7322b27d21d33754bace663ec633e 100644
--- a/docs/source/reference/apollon.signal.tools.rst
+++ b/docs/source/generated/api/apollon.signal.tools.rst
@@ -2,6 +2,6 @@ apollon.signal.tools module
 ===========================
 
 .. automodule:: apollon.signal.tools
-    :members:
-    :undoc-members:
-    :show-inheritance:
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/generated/api/apollon.som.datasets.rst b/docs/source/generated/api/apollon.som.datasets.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9fa46f21370c8b67062af219a6fee9370c04ddd4
--- /dev/null
+++ b/docs/source/generated/api/apollon.som.datasets.rst
@@ -0,0 +1,7 @@
+apollon.som.datasets module
+===========================
+
+.. automodule:: apollon.som.datasets
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/reference/apollon.som.defaults.rst b/docs/source/generated/api/apollon.som.defaults.rst
similarity index 62%
rename from docs/source/reference/apollon.som.defaults.rst
rename to docs/source/generated/api/apollon.som.defaults.rst
index 2fe2ffcab44035b0caef2d8d77d0c00886a748ad..f3a7b5278a9702cf849fc34e854c68ab2e556988 100644
--- a/docs/source/reference/apollon.som.defaults.rst
+++ b/docs/source/generated/api/apollon.som.defaults.rst
@@ -2,6 +2,6 @@ apollon.som.defaults module
 ===========================
 
 .. automodule:: apollon.som.defaults
-    :members:
-    :undoc-members:
-    :show-inheritance:
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/generated/api/apollon.som.neighbors.rst b/docs/source/generated/api/apollon.som.neighbors.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d658db39f373fbec2bd03f7eb9ff4db259e283b0
--- /dev/null
+++ b/docs/source/generated/api/apollon.som.neighbors.rst
@@ -0,0 +1,7 @@
+apollon.som.neighbors module
+============================
+
+.. automodule:: apollon.som.neighbors
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/generated/api/apollon.som.plot.rst b/docs/source/generated/api/apollon.som.plot.rst
new file mode 100644
index 0000000000000000000000000000000000000000..08fcd8ddc0664cc8f731f8e6f0cc233ea5ec7b60
--- /dev/null
+++ b/docs/source/generated/api/apollon.som.plot.rst
@@ -0,0 +1,7 @@
+apollon.som.plot module
+=======================
+
+.. automodule:: apollon.som.plot
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/reference/apollon.som.rst b/docs/source/generated/api/apollon.som.rst
similarity index 59%
rename from docs/source/reference/apollon.som.rst
rename to docs/source/generated/api/apollon.som.rst
index ec8deb1a5178d0699cf56ee66ff09b9e4907dbbf..5d31d062a4bacb1a6f7cd4667141d9fa04faed4e 100644
--- a/docs/source/reference/apollon.som.rst
+++ b/docs/source/generated/api/apollon.som.rst
@@ -2,17 +2,20 @@ apollon.som package
 ===================
 
 .. automodule:: apollon.som
-    :members:
-    :undoc-members:
-    :show-inheritance:
+   :members:
+   :undoc-members:
+   :show-inheritance:
 
 Submodules
 ----------
 
 .. toctree::
+   :maxdepth: 4
 
+   apollon.som.datasets
    apollon.som.defaults
+   apollon.som.neighbors
+   apollon.som.plot
    apollon.som.som
    apollon.som.topologies
    apollon.som.utilities
-
diff --git a/docs/source/reference/apollon.som.som.rst b/docs/source/generated/api/apollon.som.som.rst
similarity index 58%
rename from docs/source/reference/apollon.som.som.rst
rename to docs/source/generated/api/apollon.som.som.rst
index cae42e9208112dde5c2b69d726a11f29fa2c1b36..492bf9acc2b984e528ef40572b138f83665f84bc 100644
--- a/docs/source/reference/apollon.som.som.rst
+++ b/docs/source/generated/api/apollon.som.som.rst
@@ -2,6 +2,6 @@ apollon.som.som module
 ======================
 
 .. automodule:: apollon.som.som
-    :members:
-    :undoc-members:
-    :show-inheritance:
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/reference/apollon.som.topologies.rst b/docs/source/generated/api/apollon.som.topologies.rst
similarity index 63%
rename from docs/source/reference/apollon.som.topologies.rst
rename to docs/source/generated/api/apollon.som.topologies.rst
index 800f123376fe4dd367a4a42e4bb003715631ff60..5385bb3af337a4ce69e41f395d93ac54121cf289 100644
--- a/docs/source/reference/apollon.som.topologies.rst
+++ b/docs/source/generated/api/apollon.som.topologies.rst
@@ -2,6 +2,6 @@ apollon.som.topologies module
 =============================
 
 .. automodule:: apollon.som.topologies
-    :members:
-    :undoc-members:
-    :show-inheritance:
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/reference/apollon.som.utilities.rst b/docs/source/generated/api/apollon.som.utilities.rst
similarity index 62%
rename from docs/source/reference/apollon.som.utilities.rst
rename to docs/source/generated/api/apollon.som.utilities.rst
index 16f42c75cfb45d5b0ca011703b1d1e838bd4f0dc..f8d0c2f3f20165c5a3ab85fd67d81032757b2715 100644
--- a/docs/source/reference/apollon.som.utilities.rst
+++ b/docs/source/generated/api/apollon.som.utilities.rst
@@ -2,6 +2,6 @@ apollon.som.utilities module
 ============================
 
 .. automodule:: apollon.som.utilities
-    :members:
-    :undoc-members:
-    :show-inheritance:
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/reference/apollon.tools.rst b/docs/source/generated/api/apollon.tools.rst
similarity index 56%
rename from docs/source/reference/apollon.tools.rst
rename to docs/source/generated/api/apollon.tools.rst
index ac6d46ab5c0e40d396f3a4d342ed680c467a5276..45ef3e4c6d8e306f8b759a111bac3d3f8e7c86d1 100644
--- a/docs/source/reference/apollon.tools.rst
+++ b/docs/source/generated/api/apollon.tools.rst
@@ -2,6 +2,6 @@ apollon.tools module
 ====================
 
 .. automodule:: apollon.tools
-    :members:
-    :undoc-members:
-    :show-inheritance:
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/reference/apollon.types.rst b/docs/source/generated/api/apollon.types.rst
similarity index 56%
rename from docs/source/reference/apollon.types.rst
rename to docs/source/generated/api/apollon.types.rst
index 19c2581e82361fb7c976e08e13214d0ae0965fc0..2ceb8dcf2a5aa318cb0ae1ad873924168b9d7680 100644
--- a/docs/source/reference/apollon.types.rst
+++ b/docs/source/generated/api/apollon.types.rst
@@ -2,6 +2,6 @@ apollon.types module
 ====================
 
 .. automodule:: apollon.types
-    :members:
-    :undoc-members:
-    :show-inheritance:
+   :members:
+   :undoc-members:
+   :show-inheritance:
diff --git a/docs/source/reference/modules.rst b/docs/source/generated/api/modules.rst
similarity index 100%
rename from docs/source/reference/modules.rst
rename to docs/source/generated/api/modules.rst
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 52c35fd1650f5eda3c64d8b307ee96cefc6316e9..8a1922029cdb39bb75e3e508c86ed77978190a22 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -5,10 +5,10 @@
 
 Welcome to the documentation of the apollon feature extraction framework!
 =========================================================================
-*apollon* is a feature extraction and modelling frame work for music data
+*apollon* is a feature extraction and modeling framework for music data
 analysis. It handles low-level audio feature extraction, their aggreagation
-using Hidden Markov models, and comparison by means of the Self-Organizing Map.
-See the :doc:`starters` chapter for gentle introduction to the mentioned
+using Hidden Markov models, and comparison by means of the self-organizing map.
+See the :doc:`framework` chapter for gentle introduction to the mentioned
 concepts.
 
 Contents
@@ -17,8 +17,10 @@ Contents
 .. toctree::
    :maxdepth: 2
 
-   installation
-   starters
-   clt 
+   download
+   install
+   framework
+   generated/api/modules
+
 * :ref:`genindex`
 * :ref:`modindex`
diff --git a/docs/source/install.rst b/docs/source/install.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a3e9a5b1a0f392378f826e89343c1a9ca4c906d5
--- /dev/null
+++ b/docs/source/install.rst
@@ -0,0 +1,33 @@
+Installation
+***************************************
+apollon can be installed on GNU/Linux, macOS, and Windows. Installation process
+is similar on each of these plaforms. Note, however, that apollon contains
+CPython extension modules, which have to be compiled locally for GNU/Linux and
+Windows users. If you work on those platforms, please make shure that there
+is a C compiler set up on your machine; otherwise the installation will fail.
+In the case of macOS, a precompiled wheel is provided for the latest version
+only.
+
+
+Install using pip
+=======================================
+The Python packager manager can automatically download and install
+apollon from Pypi. Simply run the following command from your terminal:
+
+.. code-block:: Bash
+
+   python3 -m pip install apollon
+
+
+Install from source
+=======================================
+You can also install and compile apollon directly from its sources in three
+steps:
+
+* Download the apollon source code
+* Open a terminal and navigate to the apollon root directory
+* Install and compile with the following command
+
+.. code-block:: Bash
+
+   python3 -m pip install .
diff --git a/docs/source/installation.rst b/docs/source/installation.rst
deleted file mode 100644
index bdb722a4457b0649c7022a7f5d8628bb96ae3105..0000000000000000000000000000000000000000
--- a/docs/source/installation.rst
+++ /dev/null
@@ -1,16 +0,0 @@
-Before you order
-================
-
-1. Download
------------
-Download the repository from the gitlab server using
-``git clone git@gitlab.rrz.uni-hamburg.de:bla7667/apollon.git``
-
-2. Installation
-----------------
-To install apollon, navigate to its repository's source directory and install
-using pip:
-``
-cd path/to/apollon
-pip install .
-``
diff --git a/docs/source/modules.rst b/docs/source/modules.rst
deleted file mode 100644
index 3d0e131cafec9374bd500220453055f6fa6cea55..0000000000000000000000000000000000000000
--- a/docs/source/modules.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-apollon
-=======
-
-.. toctree::
-   :maxdepth: 4
-
diff --git a/docs/source/reference/apollon.analyses.rst b/docs/source/reference/apollon.analyses.rst
deleted file mode 100644
index a925627d2cf1dc48d1e23ed536c70ce0016e7613..0000000000000000000000000000000000000000
--- a/docs/source/reference/apollon.analyses.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-apollon.analyses module
-=======================
-
-.. automodule:: apollon.analyses
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/docs/source/reference/apollon.commands.apollon_export.rst b/docs/source/reference/apollon.commands.apollon_export.rst
deleted file mode 100644
index 26c7857ac0adc6421a2a0055125731d2d2a03fae..0000000000000000000000000000000000000000
--- a/docs/source/reference/apollon.commands.apollon_export.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-apollon.commands.apollon\_export module
-=======================================
-
-.. automodule:: apollon.commands.apollon_export
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/docs/source/reference/apollon.commands.apollon_features.rst b/docs/source/reference/apollon.commands.apollon_features.rst
deleted file mode 100644
index 35168847949b740d6f1781406cb372600634cd1b..0000000000000000000000000000000000000000
--- a/docs/source/reference/apollon.commands.apollon_features.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-apollon.commands.apollon\_features module
-=========================================
-
-.. automodule:: apollon.commands.apollon_features
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/docs/source/reference/apollon.commands.apollon_hmm.rst b/docs/source/reference/apollon.commands.apollon_hmm.rst
deleted file mode 100644
index fd3bc55bd5dc968222c8474c83739dcf7ee0d026..0000000000000000000000000000000000000000
--- a/docs/source/reference/apollon.commands.apollon_hmm.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-apollon.commands.apollon\_hmm module
-====================================
-
-.. automodule:: apollon.commands.apollon_hmm
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/docs/source/reference/apollon.commands.apollon_onsets.rst b/docs/source/reference/apollon.commands.apollon_onsets.rst
deleted file mode 100644
index c18836a56eae02f9cd13d856da71e2b9dfe3abc7..0000000000000000000000000000000000000000
--- a/docs/source/reference/apollon.commands.apollon_onsets.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-apollon.commands.apollon\_onsets module
-=======================================
-
-.. automodule:: apollon.commands.apollon_onsets
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/docs/source/reference/apollon.commands.rst b/docs/source/reference/apollon.commands.rst
deleted file mode 100644
index ae032a3ded23f66ff05bfcc77e1d88ced7049504..0000000000000000000000000000000000000000
--- a/docs/source/reference/apollon.commands.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-apollon.commands package
-========================
-
-.. automodule:: apollon.commands
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-Submodules
-----------
-
-.. toctree::
-
-   apollon.commands.apollon_export
-   apollon.commands.apollon_features
-   apollon.commands.apollon_hmm
-   apollon.commands.apollon_onsets
-
diff --git a/docs/source/reference/apollon.hmm.graph.grapher.rst b/docs/source/reference/apollon.hmm.graph.grapher.rst
deleted file mode 100644
index e8fa67791a674b9be024aa6e8ec452b40b366016..0000000000000000000000000000000000000000
--- a/docs/source/reference/apollon.hmm.graph.grapher.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-apollon.hmm.graph.grapher module
-================================
-
-.. automodule:: apollon.hmm.graph.grapher
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/docs/source/reference/apollon.hmm.graph.rst b/docs/source/reference/apollon.hmm.graph.rst
deleted file mode 100644
index de6bccda87995bbab0c58a907329e5d8fab35ba8..0000000000000000000000000000000000000000
--- a/docs/source/reference/apollon.hmm.graph.rst
+++ /dev/null
@@ -1,15 +0,0 @@
-apollon.hmm.graph package
-=========================
-
-.. automodule:: apollon.hmm.graph
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-Submodules
-----------
-
-.. toctree::
-
-   apollon.hmm.graph.grapher
-
diff --git a/docs/source/reference/apollon.hmm.hmm_utilities.rst b/docs/source/reference/apollon.hmm.hmm_utilities.rst
deleted file mode 100644
index eb8c41c4246d57e4ced66c7a069b90b605dddcbe..0000000000000000000000000000000000000000
--- a/docs/source/reference/apollon.hmm.hmm_utilities.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-apollon.hmm.hmm\_utilities module
-=================================
-
-.. automodule:: apollon.hmm.hmm_utilities
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/docs/source/reference/apollon.hmm.poisson.poisson_core.rst b/docs/source/reference/apollon.hmm.poisson.poisson_core.rst
deleted file mode 100644
index ec4d0df4018688d8e737c020bb11f495209e664f..0000000000000000000000000000000000000000
--- a/docs/source/reference/apollon.hmm.poisson.poisson_core.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-apollon.hmm.poisson.poisson\_core module
-========================================
-
-.. automodule:: apollon.hmm.poisson.poisson_core
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/docs/source/reference/apollon.hmm.poisson.poisson_hmm.rst b/docs/source/reference/apollon.hmm.poisson.poisson_hmm.rst
deleted file mode 100644
index 71942c4914051678eba2cf557e5e1bdd1b533bcf..0000000000000000000000000000000000000000
--- a/docs/source/reference/apollon.hmm.poisson.poisson_hmm.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-apollon.hmm.poisson.poisson\_hmm module
-=======================================
-
-.. automodule:: apollon.hmm.poisson.poisson_hmm
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/docs/source/reference/apollon.hmm.poisson.rst b/docs/source/reference/apollon.hmm.poisson.rst
deleted file mode 100644
index 35231b2491b271a3fb99f5f1ceed3e047b0172f8..0000000000000000000000000000000000000000
--- a/docs/source/reference/apollon.hmm.poisson.rst
+++ /dev/null
@@ -1,16 +0,0 @@
-apollon.hmm.poisson package
-===========================
-
-.. automodule:: apollon.hmm.poisson
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-Submodules
-----------
-
-.. toctree::
-
-   apollon.hmm.poisson.poisson_core
-   apollon.hmm.poisson.poisson_hmm
-
diff --git a/docs/source/reference/apollon.hmm.rst b/docs/source/reference/apollon.hmm.rst
deleted file mode 100644
index 963eaab8eb14494a6f47468856d9dcfe0f413e63..0000000000000000000000000000000000000000
--- a/docs/source/reference/apollon.hmm.rst
+++ /dev/null
@@ -1,23 +0,0 @@
-apollon.hmm package
-===================
-
-.. automodule:: apollon.hmm
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-Subpackages
------------
-
-.. toctree::
-
-    apollon.hmm.graph
-    apollon.hmm.poisson
-
-Submodules
-----------
-
-.. toctree::
-
-   apollon.hmm.hmm_utilities
-
diff --git a/docs/source/reference/apollon.io.rst b/docs/source/reference/apollon.io.rst
deleted file mode 100644
index b888376390ef920ecb3d1a10e12699199f380417..0000000000000000000000000000000000000000
--- a/docs/source/reference/apollon.io.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-apollon.io module
-=================
-
-.. automodule:: apollon.io
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/docs/source/starters.rst b/docs/source/starters.rst
deleted file mode 100644
index e6b01928d957ae5c8ccf0b2d6b376a520cb9bc1e..0000000000000000000000000000000000000000
--- a/docs/source/starters.rst
+++ /dev/null
@@ -1,14 +0,0 @@
-Starters
-========
-
-1. Audio Feature Extraction
-----------------------------
-Extract some of the most common low-level audio feauters.
-
-2. Hidden Markov Model
------------------------
-Estimate Poisson-distributed Hidden Markov Models.
-
-3. Self-Organizing Map
------------------------
-Train some Self-organizing maps.
diff --git a/include/cdim.h b/include/cdim.h
new file mode 100644
index 0000000000000000000000000000000000000000..2716d3e8a00b6f376a476d0614f4a7361509073e
--- /dev/null
+++ b/include/cdim.h
@@ -0,0 +1,58 @@
+#ifndef CDIM_H
+#define CDIM_H
+
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+
+/** Condensed distance matrix of delay embedding
+ */
+void
+delay_embedding_dists (const double *inp,
+                       const size_t  n_vectors,
+                       const size_t  delay,
+                       const size_t  m_dim,
+                             double *dists);
+
+void
+comsar_fractal_embedding (const double *x,
+                          const size_t  N_max,
+                          const size_t  m_dim,
+                          const size_t  delay,
+                          const size_t  n_dist,
+                                double *dist,
+                                double *d_min,
+                                double *d_max);
+
+void
+comsar_fractal_correlation_sum (const size_t n_radius,
+                                const double *radius,
+                                const size_t n_dist, const double *dist,
+                                const size_t N_max, double *Cr);
+
+int
+comsar_fractal_csum (const double *sig,
+                     const size_t  n_sig,
+                     const double *radius,
+                     const size_t  n_radius,
+                     const size_t  m_dim,
+                     const size_t  delay,
+                           double *Cr);
+
+int
+comsar_fractal_cdim (const double *x,
+                     const size_t  N,
+                     const size_t  n_radius,
+                     const size_t  m_dim,
+                     const size_t  delay,
+                           double *Cr);
+
+double
+corr_dim_bader (const short  *snd,
+                const size_t  delay,
+                const size_t  m_dim,
+                const size_t  n_bins,
+                const size_t  slope_points);
+
+#endif  /* CDIM_H */
diff --git a/include/correlogram.h b/include/correlogram.h
new file mode 100644
index 0000000000000000000000000000000000000000..b7850aa389fd60901bb2fe06fa287e79011eb093
--- /dev/null
+++ b/include/correlogram.h
@@ -0,0 +1,26 @@
+#ifndef CORRELOGRAM_H
+#define CORRELOGRAM_H
+
+#include <math.h>
+#include <stdio.h>
+
+
+double
+corrcoef (const double *x,
+          const double *y,
+          const size_t  n);
+
+int
+correlogram_delay (const double *sig,
+                   const size_t *delays,
+                   const size_t  wlen,
+                   const size_t *dims,
+                         double *cgram);
+
+int
+correlogram (const double *sig,
+             const size_t  wlen,
+             const size_t *dims,
+                   double *cgram);
+
+#endif  /* CORRELOGRAM_H */
diff --git a/include/distance.h b/include/distance.h
new file mode 100644
index 0000000000000000000000000000000000000000..e7f63c37262fc1c87119de0934fac771ce91f04c
--- /dev/null
+++ b/include/distance.h
@@ -0,0 +1,15 @@
+#ifndef CDIM_H
+#define CDIM_H
+
+#include <math.h>
+#include <stdlib.h>
+
+/** Hellinger distance for stochastic vectors.
+ */
+int
+hellinger (const double *pva,
+           const double *pvb,
+           const size_t  len,
+                 double *dist);
+
+#endif  /* CDIM_H */
diff --git a/mypy.ini b/mypy.ini
index 30d58100aeff5e025ea943cbef972c7c47f217d3..f655fc3cb037b89596cd03725a3685ece18f7a54 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -3,11 +3,11 @@
 [mypy-matplotlib,matplotlib.pyplot]
 ignore_missing_imports = True
 
-[mypy-numpy]
-ignore_missing_imports = True
-
 [mypy-scipy,scipy.signal,scipy.signal.windows,scipy.spatial,scipy.special]
 ignore_missing_imports = True
 
 [mypy-soundfile]
 ignore_missing_imports = True
+
+[mypy-jsonschema]
+ignore_missing_imports = True
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..dfad784d58054f1b49db8f42dbfd83700a01610b
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,6 @@
+[build-system]
+requires = [
+	'setuptools',
+	'wheel',
+	'numpy']
+build-backend = "setuptools.build_meta"
diff --git a/scripts/apollon b/scripts/apollon
index 7bc3f0be4b7319f2d202f3f93cc5cf20243b3e3f..d269b3729d391fdab16d46d4d93dc604b3247421 100755
--- a/scripts/apollon
+++ b/scripts/apollon
@@ -14,7 +14,7 @@ import apollon
 from apollon import commands
 
 
-_valid_subcommand = ('features', 'onsets', 'hmm', 'som', 'export')
+_valid_subcommand = ('features', 'onsets', 'hmm', 'som', 'export', 'position')
 
 
 def _parse_cml(argv):
@@ -26,6 +26,7 @@ def _parse_cml(argv):
     subparsers = parser.add_subparsers()
     sp_features = _create_subparser_features(subparsers)
     sp_hmm = _create_subparser_hmm(subparsers)
+    sp_position = _create_subparser_position(subparsers)
 
     return parser.parse_args(argv[1:])
 
@@ -50,7 +51,10 @@ def _create_subparser_features(subparsers):
     sp_features.add_argument(
         '-o', '--outpath', action='store', help='Output file path.')
 
-    sp_features.add_argument('file', type=str, nargs=1)
+    sp_features.add_argument(
+        'files', type=str, nargs='+',
+        help='Auio files.')
+
     sp_features.set_defaults(func=commands.apollon_features.main)
 
     return sp_features
@@ -79,6 +83,30 @@ def _create_subparser_hmm(subparsers):
     return sp_hmm
 
 
+def _create_subparser_position(subparsers):
+    sp_position = subparsers.add_parser('position',
+                        help='Map coordinate of input.')
+
+    sp_position.add_argument(
+        'som_file', type=str, action='store',
+        help='Path to SOM file.')
+
+    sp_position.add_argument(
+        'objective_files', type=str, action='store', nargs='+',
+        help='Path to objective files.')
+
+    sp_position.add_argument(
+        '--rt', action='store_true', default=False,
+        help='Compute SOM position for rhythm track.')
+
+    sp_position.add_argument(
+        '--tt', action='store_true', default=False,
+        help='Compute SOM position for timbr track.')
+
+    sp_position.set_defaults(func=commands.apollon_position.main)
+    return sp_position
+
+
 def main(argv=None):
     if argv is None:
         argv = sys.argv
diff --git a/scripts/drawtpm b/scripts/drawtpm
deleted file mode 100755
index 5e8ba73766695b4a8c4e7726f89c152d330a5469..0000000000000000000000000000000000000000
--- a/scripts/drawtpm
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/env python3
-
-import numpy as np
-from optparse import OptionParser
-from pathlib import Path
-import sys
-
-from apollon.hmm import graph
-from apollon.io import load
-
-
-def main(argv=None):
-    """Draw the tpm of a HMM as directed graph.
-    """
-    if argv is None:
-        argv = sys.argv
-
-    opts, args = _parse_cmd(argv)
-    in_file = Path(args[0])
-
-    tpm = load(in_file)
-    fig, ax, img = graph.draw_matrix(tpm.round(3).astype(float))
-    graph.save_hmmfig(fig, in_file.stem+'_matrix.jpg')
-    #fig, ax, img = graph.draw_network(tpm.round(3).astype(float))
-    #graph.save_hmmfig(fig, in_file.stem+'_network.jpg')
-
-    return 0
-
-
-def _parse_cmd(argv):
-    usage = 'Usage: %prog [OPTIONS] path_to_hmm'
-    parser = OptionParser(usage=usage)
-
-    parser.add_option('-v', '--verbose', action='store_true',
-                      dest='verbose', default=False,
-                      help='Be a little more verbose.')
-
-    opts, args = parser.parse_args()
-
-    if len(args) != 1:
-        print('Wrong number of arguments.')
-        parser.print_help(sys.stderr)
-        sys.exit(1)
-
-    return opts, args
-
-
-if __name__ == "__main__":
-    sys.exit(main())
-
-
-
diff --git a/scripts/get_som_winner b/scripts/get_som_winner
deleted file mode 100755
index 46f49e80629aac205bc108bfce01b507387602eb..0000000000000000000000000000000000000000
--- a/scripts/get_som_winner
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-
-"""get_som_winner.py
-
-(c) Michael Blaß, 2016
-
-Return index of winning neuron given a SOM and a HMM.
-"""
-
-
-import sys
-from optparse import OptionParser
-import pathlib
-
-from numpy import atleast_2d
-from numpy import unravel_index
-
-from apollon import io
-
-
-def main():
-
-    usage = 'usage: %prog [OPTIONS] som_file hmm_file'
-    parser = OptionParser(usage=usage)
-    parser.add_option('-f', '--flat', action='store_true',
-                      help='return flat index')
-
-    (opts, args) = parser.parse_args()
-    if len(args) != 2:
-        print('Specify exactly two arguments (som_file, hmm_file).')
-        sys.exit(1)
-
-    som_file = pathlib.Path(args[0])
-    hmm_file = pathlib.Path(args[1])
-
-    if som_file.exists() and str(som_file).endswith('.som'):
-        if hmm_file.exists() and str(hmm_file).endswith('.hmm'):
-            som = io.load(str(som_file))
-            hmm = io.load(str(hmm_file))
-        else:
-            raise FileNotFoundError('File {} not found or is no valid HMM.'
-                                    .format(args[1]))
-    else:
-        raise FileNotFoundError('File {} not found or is no valid SOM.'
-                                .format(args[0]))
-
-    foo = som.get_winners(hmm.reshape(16))[0]
-    if opts.flat:
-        print(foo)
-    else:
-        x, y = unravel_index(foo, som.shape[:2])
-        print("{},{}".format(x, y))
-
-if __name__ == "__main__":
-    sys.exit(main())
-
diff --git a/scripts/make_hmm b/scripts/make_hmm
deleted file mode 100755
index 62be9935e73c6745a2866a3f5f64f7469c4037f9..0000000000000000000000000000000000000000
--- a/scripts/make_hmm
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python3
-
-"""make_hmm.py
-
-(c) Michael Blaß, 2016
-
-Train a PoissonHmm 
-"""
-
-import pathlib
-from optparse import OptionParser
-import sys
-
-
-from apollon import segment
-from apollon.audio import load_audio
-from apollon.onsets import FluxOnsetDetector
-from apollon.signal.spectral import Spectrum
-from apollon.hmm import PoissonHmm
-from apollon.io import save
-
-
-def main():
-
-    def verbose_msg(s):
-        if opts.verbose:
-            print(s)
-
-    usage = 'usage: %prog [OPTIONS] path_to_wav'
-    parser = OptionParser(usage=usage)
-    parser.add_option('-v', '--verbose', action='store_true',
-                      help='enable verbose mode')
-    (opts, args) = parser.parse_args()
-
-    if len(args) == 0:
-        print('Path to .wav-file not specified.')
-        sys.exit(1)
-
-    snd = load_audio(args[0])
-    onsets = FluxOnsetDetector(snd.data, snd.fps)
-    segs = segment.by_onsets(snd.data, 2**11, onsets.index())
-    spctr = Spectrum(segs, snd.fps, window='hamming')
-    feat = spctr.centroid().round().astype(int)
-
-    mod = PoissonHmm(feat, 4, verbose=False)
-    mod.fit(feat)
-
-    # save model
-    out_fname = snd.file.stem + '.hmm'
-    save(mod.params.gamma_, out_fname)
-
-if __name__ == "__main__":
-    sys.exit(main())
diff --git a/setup.cfg b/setup.cfg
index 592b2f66d7dbf67075f7176b55e287e7889a61b2..bbe4c5452dbebe04f4ed43ead21532193fd2c14d 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,32 +1,50 @@
 [metadata]
-name = apollon 
-version = 0.1.2.2
-description = Music analysis based on HMM and SOM
+name = apollon
+version = 0.1.2.1
+description = Feature extraction frame work for content-based music analysis.
 long_description = file: README.md
-licence = GPL-3.0
+license = BSD 3-Clause License
 author = Michael Blaß
-author_email = michael.blass@uni-hamburg.de
-keywords = hmm, som, apollon, comsar
+author_email = mblass@posteo.net
+keywords = music, analysis, feature extraction
+project_urls =
+	Source code = https://gitlab.rrz.uni-hamburg.de/bal7668/apollon
+	Bug Tracker = https://gitlab.rrz.uni-hamburg.de/bal7668/apollon/-/issues
 
 classifiers =
+	License :: OSI Approved :: BSD License
 	Programming Language :: Python :: 3
-	Programming Language :: Python :: 3.5
-	Programming Language :: Python :: 3.6
 	Programming Language :: Python :: 3.7
+	Programming Language :: Python :: 3.8
+	Programming Language :: Python :: 3.9
 	Programming Language :: Python :: 3 :: Only
-	Programming Language :: Python :: Implementation :: CPython	
+	Programming Language :: Python :: Implementation :: CPython
 	Topic :: Scientific/Engineering
+	Topic :: Multimedia :: Sound/Audio :: Analysis
 	Intended Audience :: Science/Research
+	Intended Audience :: Information Technology
+	License :: OSI Approved :: BSD License
 
 [options]
 zip_safe = False
-include_package_data = True
-packages = find:
-scripts = scripts/drawtpm, scripts/make_hmm, scripts/get_som_winner, scripts/apollon
+package_dir =
+	=src
+
+packages = find_namespace:
+scripts =  scripts/apollon
+
+python_requires >= "3.7"
 install_requires = 
-	numpy            >= "15.0.0"
-	scipy            >= "0.19.0"
-	soundfile        >= "0.10.2"
-	matplotlib       >= "2"
-	setuptools       >= "40.0.0"
-	networkx
+	numpy >= '1.20'
+	jsonschema >= "3.2.0"
+	scipy      >= "0.19.0"
+	matplotlib >= "2"
+	pandas     >= "0.20"
+	soundfile  >= "0.10.2"
+	chainsaddiction >= '0.1.2' 
+
+[options.packages.find]
+where = src
+
+[options.package_data]
+apollon = schema/*.schema.json 
diff --git a/setup.py b/setup.py
index 563d6152e7b07cd55061ee9748c42be6c64b3a92..2188a7ea9366f3ed37031b39b0f6b29d5a7d5545 100644
--- a/setup.py
+++ b/setup.py
@@ -1,14 +1,19 @@
-#!/usr/bin/env  python3
-
 from setuptools import setup, Extension
 from setuptools.config import read_configuration
-from numpy.distutils.misc_util import get_numpy_include_dirs
+import numpy as np
 
 
 config = read_configuration('./setup.cfg')
 
-# Extension modules
-psycho_features = Extension('apollon.signal.roughness', sources=['apollon/signal/roughness.c'])
+ext_features = Extension('_features',
+    sources = ['src/apollon/signal/cdim.c',
+               'src/apollon/signal/correlogram.c',
+               'src/apollon/signal/_features_module.c'],
+    include_dirs = ['include', np.get_include()])
+
+ext_som_dist = Extension('_distance',
+        sources = ['src/apollon/som/distance.c',
+                   'src/apollon/som/_distance_module.c'],
+        include_dirs = ['include', np.get_include()])
 
-setup(include_dirs =  get_numpy_include_dirs(),
-      ext_modules  = [psycho_features])
+setup(ext_modules = [ext_features, ext_som_dist])
diff --git a/apollon/__init__.py b/src/apollon/__init__.py
similarity index 76%
rename from apollon/__init__.py
rename to src/apollon/__init__.py
index fc502eafc1ec6b4f34ea6e6fc0214c183e754697..e98db76439ebfe5db09eaca2efd60e85e080ab44 100644
--- a/apollon/__init__.py
+++ b/src/apollon/__init__.py
@@ -1,9 +1,9 @@
 # Licensed under the terms of the BSD-3-Clause license.
 # Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
+# mblass@posteo.net
 
 """
-apollon/__init__.py -- Main package initialization.
+Apollon feature extraction framework.
 """
 
 import os as _os
diff --git a/apollon/_defaults.py b/src/apollon/_defaults.py
similarity index 68%
rename from apollon/_defaults.py
rename to src/apollon/_defaults.py
index 2332a2107c64be708cbd630b0694ed0d6b243508..d7c6d4934563c7d6200dd47e50e0430228c65725 100644
--- a/apollon/_defaults.py
+++ b/src/apollon/_defaults.py
@@ -1,12 +1,21 @@
 # Licensed under the terms of the BSD-3-Clause license.
 # Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
+# mblass@posteo.net
 
 """
 apollon/_defaults.py --  Defaut definitions
 """
+import pathlib
 
-TIME_STAMP_FMT = '%Y-%m-%d %H:%M:%S'
+from . import APOLLON_PATH
+
+
+SCHEMA_EXT = '.schema.json'
+SCHEMA_DIR_PATH = pathlib.Path(APOLLON_PATH).parent.joinpath('schema')
+
+DATE_TIME = '%Y-%m-%d %H:%M:%S'
+
+SPL_REF = 2e-5
 
 PP_SIGNAL = {'linewidth': 1, 'linestyle': 'solid', 'color': 'k', 'alpha': .5,
              'zorder': 0}
diff --git a/apollon/aplot.py b/src/apollon/aplot.py
similarity index 74%
rename from apollon/aplot.py
rename to src/apollon/aplot.py
index 3eba53a18da2585559dbc7e91ed8abfc941dba45..baed8592c9bd0cdc9bc38d1434d3794d43366965 100644
--- a/apollon/aplot.py
+++ b/src/apollon/aplot.py
@@ -1,9 +1,10 @@
-# Licensed under the terms of the BSD-3-Clause license.
-# Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
+"""apollon/aplot.py
 
-"""
-aplot.py -- General plotting routines.
+General plotting routines.
+
+Licensed under the terms of the BSD-3-Clause license.
+Copyright (C) 2019 Michael Blaß
+mblass@posteo.net
 
 Functions:
     fourplot            Create a four plot of time a signal.
@@ -12,8 +13,7 @@ Functions:
     onest_decoding      Plot decoded onsets over a signal.
     signal              Plot a time domain signal.
 """
-
-from typing import Optional, Tuple
+from typing import Iterable, Optional, Tuple, Union
 
 import matplotlib.pyplot as _plt
 import matplotlib.cm as _cm
@@ -22,32 +22,52 @@ from scipy import stats as _stats
 
 from . import _defaults
 from . import tools as _tools
-from . types import Array as _Array
+from . types import Array as _Array, Axis
 
 
 Limits = Optional[Tuple[int, int]]
 MplFig = Optional[_plt.Figure]
 FigSize = Tuple[float, float]
 SubplotPos = Optional[Tuple[int, int, int]]
+Axes = Union[Axis, Iterable[Axis]]
 
 
-def _nice_spines(ax, offset: int = 10) -> None:
+def outward_spines(axs: Axes, offset: float = 10.0) -> None:
     """Display only left and bottom spine and displace them.
 
+    Args:
+        axs:     Axis or iterable of axes.
+        offset:  Move the spines ``offset`` pixels in the negative direction.
+
     Note:
         Increasing ``offset`` may breaks the layout. Since the spine is moved,
-        so is the axis label, which is in turn forced out of the figure's bounds.
+        so is the axis label, which is in turn forced out of the figure's
+        bounds.
+    """
+    for ax in _np.atleast_1d(axs).ravel():
+        ax.spines['left'].set_position(('outward', offset))
+        ax.spines['bottom'].set_position(('outward', offset))
+        ax.spines['top'].set_visible(False)
+        ax.spines['right'].set_visible(False)
+        ax.xaxis.set_ticks_position('bottom')
+        ax.yaxis.set_ticks_position('left')
+
+
+def center_spines(axs: Axes,
+                  intersect: Tuple[float, float] = (0.0, 0.0)) -> None:
+    """Display axes in crosshair fashion.
 
     Args:
-        ax:        Axes to be modified.
-        offset:    Move the spines ``offset`` pixels in the negative direction.
+        axs:        Axis or iterable of axes.
+        intersect:  Coordinate of axes' intersection point.
     """
-    ax.spines['left'].set_position(('outward', offset))
-    ax.spines['bottom'].set_position(('outward', offset))
-    ax.spines['top'].set_visible(False)
-    ax.spines['right'].set_visible(False)
-    ax.xaxis.set_ticks_position('bottom')
-    ax.yaxis.set_ticks_position('left')
+    for ax in _np.atleast_1d(axs).ravel():
+        ax.spines['left'].set_position(('axes', intersect[0]))
+        ax.spines['bottom'].set_position(('axes', intersect[1]))
+        ax.spines['top'].set_visible(False)
+        ax.spines['right'].set_visible(False)
+        ax.xaxis.set_ticks_position('bottom')
+        ax.yaxis.set_ticks_position('left')
 
 
 def _new_axis(spines: str = 'nice', fig: MplFig = None, sp_pos: SubplotPos = None,
@@ -101,8 +121,8 @@ def _new_axis_3d(fig: MplFig = None, **kwargs) -> tuple:
     return fig, ax_3d
 
 
-def signal(values: _Array, fps: int = None, time_scale: str = 'seconds', **kwargs) -> tuple:
-    """Plot time series with constant sampling interval
+def signal(values: _Array, fps: int = None, **kwargs) -> tuple:
+    """Plot time series with constant sampling interval.
 
     Args:
         values:        Values of the time series.
@@ -113,15 +133,15 @@ def signal(values: _Array, fps: int = None, time_scale: str = 'seconds', **kwarg
         Figure and axes.
     """
     fig, ax = _new_axis(**kwargs)
-    domain = _np.arange(values.size, dtype=float)
+    domain = _np.arange(values.size, dtype='float64')
 
-    if time_scale == 'seconds':
-        domain /= fps
-        ax.set_xlabel('t [s]')
-        ax.set_ylabel(r'x[$t$]')
-    else:
+    if fps is None:
         ax.set_xlabel('n [samples]')
         ax.set_ylabel(r'x[$n$]')
+    else:
+        domain /= float(fps)
+        ax.set_xlabel('t [s]')
+        ax.set_ylabel(r'x[$t$]')
 
     ax.plot(domain, values, **_defaults.PP_SIGNAL)
 
@@ -212,18 +232,21 @@ def marginal_distr(train_data: _Array, state_means: _Array, stat_dist: _Array, b
     return ax
 
 
-def onsets(odf: _Array, onset_index: _Array, **kwargs) -> tuple:
+def onsets(sig, ons, **kwargs) -> tuple:
     """Indicate onsets on a time series.
 
     Args:
-        odf:            Onset detection function or an arbitrary time series.
-        onset_index:    Onset indices relative to ``odf``.
+        sig:    Input to onset detection.
+        ons:    Onset detector instance.
 
     Returns:
         Figure and axes.
     """
-    fig, ax = signal(odf, fps=None, **kwargs)
-    ax.vlines(onset_index, -1, 1, **_defaults.PP_ONSETS)
+    fig, ax = signal(sig.data, fps=None, **kwargs)
+    odf_domain = _np.linspace(ons.n_perseg // 2, ons.hop_size * ons.odf.size,
+                              ons.odf.size)
+    ax.plot(odf_domain, ons.odf/ons.odf.max(), alpha=.8, lw=2)
+    ax.vlines(ons.index(), -1, 1, colors='C1', lw=2, alpha=.8)
     return fig, ax
 
 
diff --git a/src/apollon/audio.py b/src/apollon/audio.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e1bb969584b3de66c0e1cb63b8681e6600cc0eb
--- /dev/null
+++ b/src/apollon/audio.py
@@ -0,0 +1,162 @@
+"""apollon/audio.py -- Wrapper classes for audio data.
+
+Licensed under the terms of the BSD-3-Clause license.
+Copyright (C) 2019 Michael Blaß, mblass@posteo.net
+
+Classes:
+    AudioFile   Representation of an audio file.
+
+Functions:
+    fti16        Cast float to int16.
+    load_audio   Load .wav file.
+"""
+import hashlib
+import pathlib
+
+import matplotlib.pyplot as plt
+import numpy as np
+import soundfile as _sf
+
+from . signal import tools as _ast
+from . types import Array, PathType
+
+
+class AudioFile:
+    """Representation of an audio file."""
+    def __init__(self, path: PathType) -> None:
+        """Load an audio file.
+
+        Args:
+            path:   Path to file.
+        """
+        self._path = pathlib.Path(path)
+        self._file = _sf.SoundFile(self.path)
+
+    @property
+    def data(self) -> Array:
+        """Return audio data as array."""
+        return self.read()
+
+    @property
+    def file_name(self) -> str:
+        """Return source file name."""
+        return self._path.name
+
+    @property
+    def hash(self) -> str:
+        """Compute sha256 hash."""
+        obj = hashlib.sha256(self.data.tobytes())
+        return obj.hexdigest()
+
+    @property
+    def n_channels(self) -> int:
+        """Return number of channels."""
+        return self._file.channels
+
+    @property
+    def n_frames(self) -> int:
+        """Return number of frames."""
+        return self._file.frames
+
+    @property
+    def fps(self) -> int:
+        """Return sample rate."""
+        return self._file.samplerate
+
+    @property
+    def path(self) -> str:
+        """Return path of audio file."""
+        return str(self._path)
+
+    @property
+    def shape(self) -> tuple:
+        """Return (n_frames, n_channels)."""
+        return self.n_frames, self.n_channels
+
+    """
+    @property
+    def source_id(self) -> SourceId:
+        """"""
+        return SourceId(self._path.name.split('.')[0], self.hash)
+    """
+    def close(self) -> None:
+        """Close the file."""
+        self._file.close()
+
+    def plot(self) -> None:
+        """Plot audio as wave form."""
+        fig = plt.figure(figsize=(14, 7))
+        ax1 = fig.add_subplot(1, 1, 1)
+        ax1.plot(self.data)
+
+    def __str__(self):
+        return "<{}, {} kHz, {:.3} s>" \
+               .format(self._path.name, self.fps/1000, self.n_frames/self.fps)
+
+    def __repr__(self):
+        return self.__str__()
+
+    def __len__(self):
+        return self.n_frames
+
+    def read(self, n_frames: int = None, offset: int = None, norm: bool = False,
+             mono: bool = True, dtype: str = 'float64') -> Array:
+        # pylint: disable=too-many-arguments
+        """Read from audio file.
+
+        Args:
+            n_frames:  Number of frames to read.
+                       If negative, file is read until EOF.
+            offset:    Start position for reading.
+            norm:      If ``True``, normalize the data.
+            mono:      If ``True``, mixdown all channels.
+            dtype:     Dtype of output array.
+
+        Returns:
+            Two-dimensional numpy array of shape (n_frames, n_channels).
+        """
+        n_frames = n_frames or -1
+        offset = offset or 0
+        if offset >= 0:
+            self._file.seek(offset)
+            data = self._read(n_frames, dtype=dtype)
+        else:
+            data = np.zeros((n_frames, self.n_channels))
+            n_to_read = offset + n_frames
+            if n_to_read > 0:
+                self._file.seek(0)
+                data[-n_to_read:] = self._read(n_to_read, dtype=dtype)
+
+        if mono and self.n_channels > 1:
+            data = data.sum(axis=1, keepdims=True) / self.n_channels
+        if norm:
+            data = _ast.normalize(data)
+        return data
+
+    def _read(self, n_frames: int, dtype: str = 'float64') -> Array:
+        return self._file.read(n_frames, dtype=dtype, always_2d=True,
+                               fill_value=0)
+
+
+def fti16(inp: Array) -> Array:
+    """Cast audio loaded as float to int16.
+
+    Args:
+        inp:    Input array of dtype float64.
+
+    Returns:
+        Array of dtype int16.
+    """
+    return np.clip(np.floor(inp*2**15), -2**15, 2**15-1).astype('int16')
+
+
+def load_audio(path: PathType) -> AudioFile:
+    """Load an audio file.
+
+    Args:
+        path:  Path to audio file.
+
+    Return:
+        Audio file representation.
+    """
+    return AudioFile(path)
diff --git a/src/apollon/container.py b/src/apollon/container.py
new file mode 100644
index 0000000000000000000000000000000000000000..af849cc13e19570134c28b6a3e8bfc027cb05a04
--- /dev/null
+++ b/src/apollon/container.py
@@ -0,0 +1,182 @@
+""" apollon/container.py -- Container Classes.
+
+Licensed under the terms of the BSD-3-Clause license.
+Copyright (C) 2019 Michael Blaß
+mblass@posteo.net
+
+Classes:
+    FeatureSpace
+    Params
+"""
+import csv
+from dataclasses import dataclass, asdict
+import json
+import pathlib
+import sys
+from typing import (Any, ClassVar, Dict, List, Optional, Tuple, Type, TypeVar)
+
+import jsonschema
+
+from . import io
+from . types import Schema, PathType
+
+
+GenericParams = TypeVar('GenericParams', bound='Parent')
+
+@dataclass
+class Params:
+    """Parmeter base class."""
+    _schema: ClassVar[Schema] = {}
+
+    @property
+    def schema(self) -> dict:
+        """Returns the serialization schema."""
+        return self._schema
+
+    @classmethod
+    def from_dict(cls: Type[GenericParams], instance: dict) -> GenericParams:
+        """Construct Params from dictionary"""
+        return cls(**instance)
+
+    def to_dict(self) -> dict:
+        """Returns parameters as dictionary."""
+        return asdict(self)
+
+    def to_json(self, path: PathType) -> None:
+        """Write parameters to JSON file.
+
+        Args:
+            path:  File path.
+        """
+        instance = self.to_dict()
+        jsonschema.validate(instance, self.schema, jsonschema.Draft7Validator)
+        with pathlib.Path(path).open('w') as fobj:
+            json.dump(instance, fobj)
+
+
+
+class NameSpace:
+    """Simple name space object."""
+    def __init__(self, **kwargs):
+        for key, val in kwargs.items():
+            if isinstance(val, dict):
+                val = FeatureSpace(**val)
+            self.__dict__[key] = val
+
+
+class FeatureSpace(NameSpace):
+    """Container class for feature vectors."""
+    def __init__(self, **kwargs):
+        super().__init__(**kwargs)
+
+    def update(self, key: str, val: Any) -> None:
+        """Update the set of parameters.
+
+        Args:
+            key:  Field name.
+            val:  Field value.
+        """
+        self.__dict__[key] = val
+
+    def items(self) -> List[Tuple[str, Any]]:
+        """Provides the the FeatureSpace's items.
+
+        Returns:
+            List of (key, value) pairs.
+        """
+        return list(self.__dict__.items())
+
+    def keys(self) -> List[str]:
+        """Provides the FeatureSpaces's keys.
+
+        Returns:
+            List of keys.
+        """
+        return list(self.__dict__.keys())
+
+    def values(self) -> List[Any]:
+        """Provides the FeatureSpace's values.
+
+        Returns:
+            List of values.
+        """
+        return list(self.__dict__.values())
+
+    def as_dict(self) -> Dict[str, Any]:
+        """Returns the FeatureSpace converted to a dict."""
+        flat_dict = {}
+        for key, val in self.__dict__.items():
+            try:
+                flat_dict[key] = val.as_dict()
+            except AttributeError:
+                flat_dict[key] = val
+        return flat_dict
+
+    def to_csv(self, path: str = None) -> None:
+        """Write FeatureSpace to csv file.
+
+        If ``path`` is ``None``, comma separated values are written stdout.
+
+        Args:
+            path:  Output file path.
+
+        Returns:
+            FeatureSpace as csv-formatted string if ``path`` is ``None``,
+            else ``None``.
+        """
+        features = {}
+        for name, space in self.items():
+            try:
+                features.update({feat: val for feat, val in space.items()})
+            except AttributeError:
+                features.update({name: space})
+
+        field_names = ['']
+        field_names.extend(features.keys())
+
+        if path is None:
+            csv_writer = csv.DictWriter(sys.stdout, delimiter=',', fieldnames=field_names)
+            self._write(csv_writer, features)
+        else:
+            with open(path, 'w', newline='') as csv_file:
+                csv_writer = csv.DictWriter(csv_file, delimiter=',', fieldnames=field_names)
+                self._write(csv_writer, features)
+
+    def __getitem__(self, key):
+        return self.__dict__[key]
+
+    @staticmethod
+    def _write(csv_writer, features):
+        csv_writer.writeheader()
+
+        i = 0
+        while True:
+            try:
+                row = {key: val[i] for key, val in features.items()}
+                row[''] = i
+                csv_writer.writerow(row)
+                i += 1
+            except IndexError:
+                break
+
+    def to_json(self, path: str = None) -> Optional[str]:
+        """Convert FeaturesSpace to JSON.
+
+        If ``path`` is ``None``, this method returns of the data of the
+        ``FeatureSpace`` in JSON format. Otherwise, data is written to
+        ``path``.
+
+        Args:
+            path:  Output file path.
+
+        Returns:
+            FeatureSpace as JSON-formatted string if path is not ``None``,
+            else ``None``.
+        """
+        if path is None:
+            return json.dumps(self.as_dict(), cls=ArrayEncoder)
+
+        with open(path, 'w') as json_file:
+            json.dump(self.as_dict(), json_file, cls=ArrayEncoder)
+
+        return None
diff --git a/apollon/datasets.py b/src/apollon/datasets.py
similarity index 96%
rename from apollon/datasets.py
rename to src/apollon/datasets.py
index ec6544cd749fe62f282d2f2ace3c948223c1ccaf..6b0ef30f81e315aad9f92e2fc58b198ee5dd0476 100644
--- a/apollon/datasets.py
+++ b/src/apollon/datasets.py
@@ -1,6 +1,6 @@
 # Licensed under the terms of the BSD-3-Clause license.
 # Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
+# mblass@posteo.net
 
 """
 datasets.py -- Load test data sets.
diff --git a/src/apollon/fractal.py b/src/apollon/fractal.py
new file mode 100644
index 0000000000000000000000000000000000000000..c38ee4fb2490d9dd5692331dfc1d92a4e30cb5e6
--- /dev/null
+++ b/src/apollon/fractal.py
@@ -0,0 +1,138 @@
+# Licensed under the terms of the BSD-3-Clause license.
+# Copyright (C) 2019 Michael Blaß
+# mblass@posteo.net
+
+"""apollon/fractal.py
+
+Tools for estimating fractal dimensions.
+
+Function:
+    lorenz_attractor   Simulate Lorenz system.
+"""
+from typing import Tuple
+
+import numpy as np
+from scipy import stats
+from scipy.spatial import distance
+
+from . types import Array
+
+
+def log_histogram_bin_edges(dists, n_bins: int, default: float = None):
+    """Compute histogram bin edges that are equidistant in log space.
+    """
+    lower_bound = dists.min()
+    upper_bound = dists.max()
+
+    if lower_bound == 0:
+        lower_bound = np.absolute(np.diff(dists)).min()
+
+    if lower_bound == 0:
+        sd_it = iter(np.sort(dists))
+        while not lower_bound:
+            lower_bound = next(sd_it)
+
+    if lower_bound == 0:
+        lower_bound = np.finfo('float64').eps
+
+    return np.geomspace(lower_bound, dists.max(), n_bins+1)
+
+
+
+def delay_embedding(inp: Array, delay: int, m_dim: int) -> Array:
+    """Compute a delay embedding of the `inp`.
+
+    This method makes a hard cut at the upper bound of `inp` and
+    does not perform zero padding to match the input size.
+
+    Params:
+        inp:   One-dimensional input vector.
+        delay: Vector delay in samples.
+        m_dim: Number of embedding dimension.
+
+    Returns:
+        Two-dimensional delay embedding array in which the nth row
+        represents the  n * `delay` samples delayed vector.
+    """
+    max_idx = inp.size - ((m_dim-1)*delay)
+    emb_vects = np.empty((max_idx, m_dim))
+    for i in range(max_idx):
+        emb_vects[i] = inp[i:i+m_dim*delay:delay]
+    return emb_vects
+
+
+def embedding_dists(inp: Array, delay: int, m_dim: int,
+                    metric: str = 'euclidean') -> Array:
+    """Perfom a delay embedding and return the pairwaise distances
+    of the delayed vectors.
+
+    The returned vector is the flattend upper triangle of the distance
+    matrix.
+
+    Params:
+        inp:    One-dimensional input vector.
+        delay:  Vector delay in samples.
+        m_dim   Number of embedding dimension.
+        metric: Metric to use.
+
+    Returns:
+        Flattened upper triangle of the distance matrix.
+    """
+    emb_vects = delay_embedding(inp, delay, m_dim)
+    return distance.pdist(emb_vects, metric)
+
+
+def embedding_entropy(emb: Array, n_bins: int) -> Array:
+    """Compute the information entropy from an embedding.
+
+    Params:
+        emb:     Input embedding.
+        bins:    Number of bins per dimension.
+
+    Returns:
+        Entropy of the embedding.
+    """
+    counts, edges = np.histogramdd(emb, bins=n_bins)
+    return stats.entropy(counts.flatten())
+
+
+def __lorenz_system(x, y, z, s, r, b):
+    """Compute the derivatives of the Lorenz system of coupled
+       differential equations.
+
+    Params:
+        x, y, z    (float) Current system state.
+        s, r, b    (float) System parameters.
+
+    Return:
+        xyz_dot    (array) Derivatives of current system state.
+    """
+    xyz_dot = np.array([s * (y - x),
+                        x * (r - z) - y,
+                        x * y - b * z])
+    return xyz_dot
+
+
+def lorenz_attractor(n, sigma=10, rho=28, beta=8/3,
+                     init_xyz=(0., 1., 1.05), dt=0.01):
+    """Simulate a Lorenz system with given parameters.
+
+    Params:
+        n        (int)   Number of data points to generate.
+        sigma    (float) System parameter.
+        rho      (rho)   System parameter.
+        beta     (beta)  System parameter.
+        init_xyz (tuple) Initial System state.
+        dt       (float) Step size.
+
+    Return:
+        xyz    (array) System states.
+    """
+    xyz = np.empty((n, 3))
+    xyz[0] = init_xyz
+
+    for i in range(n-1):
+        xyz_prime = __lorenz_system(*xyz[i], sigma, rho, beta)
+        xyz[i+1] = xyz[i] + xyz_prime * dt
+
+    return xyz
diff --git a/apollon/signal/__init__.py b/src/apollon/hmm/__init__.py
similarity index 100%
rename from apollon/signal/__init__.py
rename to src/apollon/hmm/__init__.py
diff --git a/apollon/hmm/poisson/poisson_hmm.py b/src/apollon/hmm/poisson.py
similarity index 89%
rename from apollon/hmm/poisson/poisson_hmm.py
rename to src/apollon/hmm/poisson.py
index dfdfe90c649759a37a4b78ac0a3f0ee8a7abc415..afaa2c796ef862af709c547cfa2ced80f04f6fdb 100644
--- a/apollon/hmm/poisson/poisson_hmm.py
+++ b/src/apollon/hmm/poisson.py
@@ -1,6 +1,6 @@
 """
 poisson_hmm.py -- HMM with Poisson-distributed state dependent process.
-Copyright (C) 2018  Michael Blaß <michael.blass@uni-hamburg.de>
+Copyright (C) 2018  Michael Blaß <mblass@posteo.net>
 
 Functions:
     to_txt                  Serializes model to text file.
@@ -13,21 +13,19 @@ Classes:
     PoissonHMM              HMM with univariat Poisson-distributed states.
 """
 
-#import json as _json
-#import pathlib as _pathlib
 import typing as _typing
 import warnings as _warnings
 
 import numpy as _np
 
-import chains_addiction as _ca
+import chainsaddiction as _ca
 
 import apollon
 from apollon import types as _at
-from apollon import io as _io
+import apollon.io.io as aio
 from apollon.types import Array as _Array
 from apollon import tools as _tools
-from apollon.hmm import hmm_utilities as _utils
+import apollon.hmm.utilities as ahu
 
 
 class PoissonHmm:
@@ -150,7 +148,7 @@ class _HyperParams:
 
         self.gamma_dp = _tools.assert_and_pass(self._assert_dirichlet_param, gamma_dp)
         self.delta_dp = _tools.assert_and_pass(self._assert_dirichlet_param, delta_dp)
-        self.fill_diag = _tools.assert_and_pass(_utils.assert_st_val, fill_diag)
+        self.fill_diag = _tools.assert_and_pass(ahu.assert_st_val, fill_diag)
 
         self.init_lambda_meth = self._assert_lambda(init_lambda)
         self.init_gamma_meth = self._assert_gamma(init_gamma, gamma_dp, fill_diag)
@@ -176,7 +174,7 @@ class _HyperParams:
             TypeError
         """
         if isinstance(_lambda, str):
-            if _lambda not in _utils.StateDependentMeansInitializer.methods:
+            if _lambda not in ahu.StateDependentMeansInitializer.methods:
                 raise ValueError('Unrecognized initialization method `{}`'.format(_lambda))
 
         elif isinstance(_lambda, _np.ndarray):
@@ -206,7 +204,7 @@ class _HyperParams:
         """
         if isinstance(_gamma, str):
 
-            if _gamma not in _utils.TpmInitializer.methods:
+            if _gamma not in ahu.TpmInitializer.methods:
                 raise ValueError('Unrecognized initialization method `{}`'.format(_gamma))
 
             if _gamma == 'dirichlet' and gamma_dp is None:
@@ -218,7 +216,7 @@ class _HyperParams:
                                   '`uniform` for parameter `gamma`.'))
 
         elif isinstance(_gamma, _np.ndarray):
-            _utils.assert_st_matrix(_gamma)
+            ahu.assert_st_matrix(_gamma)
         else:
             raise TypeError(('Unrecognized type of argument `init_gamma`. Expected `str` or '
                              '`numpy.ndarray`, got {}.\n').format(type(_gamma)))
@@ -241,7 +239,7 @@ class _HyperParams:
         """
         if isinstance(_delta, str):
 
-            if _delta not in _utils.StartDistributionInitializer.methods:
+            if _delta not in ahu.StartDistributionInitializer.methods:
                 raise ValueError('Unrecognized initialization method `{}`'.format(_delta))
 
             if _delta == 'dirichlet' and delta_dp is None:
@@ -249,7 +247,7 @@ class _HyperParams:
                                   '`dirichlet` for parameter `delta`.'))
 
         elif isinstance(_delta, _np.ndarray):
-            _utils.assert_st_vector(_delta)
+            ahu.assert_st_vector(_delta)
 
         else:
             raise TypeError(('Unrecognized type of argument `init_delta`. Expected `str` or '
@@ -307,14 +305,17 @@ class _InitParams:
         if isinstance(hy_params.init_lambda_meth, _np.ndarray):
             return hy_params.init_lambda_meth.copy()
 
+        if hy_params.init_lambda_meth == 'hist':
+            return ahu.StateDependentMeansInitializer.hist(X, hy_params.m_states)
+
         if hy_params.init_lambda_meth == 'linear':
-            return _utils.StateDependentMeansInitializer.linear(X, hy_params.m_states)
+            return ahu.StateDependentMeansInitializer.linear(X, hy_params.m_states)
 
         if hy_params.init_lambda_meth == 'quantile':
-            return _utils.StateDependentMeansInitializer.quantile(X, hy_params.m_states)
+            return ahu.StateDependentMeansInitializer.quantile(X, hy_params.m_states)
 
         if hy_params.init_lambda_meth == 'random':
-            return _utils.StateDependentMeansInitializer.random(X, hy_params.m_states)
+            return ahu.StateDependentMeansInitializer.random(X, hy_params.m_states)
 
         raise ValueError("Unknown init method or init_lambda_meth is not an array.")
 
@@ -326,13 +327,13 @@ class _InitParams:
             return hy_params.init_gamma_meth.copy()
 
         if hy_params.init_gamma_meth == 'dirichlet':
-            return _utils.TpmInitializer.dirichlet(hy_params.m_states, hy_params.gamma_dp)
+            return ahu.TpmInitializer.dirichlet(hy_params.m_states, hy_params.gamma_dp)
 
         if hy_params.init_gamma_meth == 'softmax':
-            return _utils.TpmInitializer.softmax(hy_params.m_states)
+            return ahu.TpmInitializer.softmax(hy_params.m_states)
 
         if hy_params.init_gamma_meth == 'uniform':
-            return _utils.TpmInitializer.uniform(hy_params.m_states, hy_params.fill_diag)
+            return ahu.TpmInitializer.uniform(hy_params.m_states, hy_params.fill_diag)
 
         raise ValueError("Unknown init method or init_gamma_meth is not an array.")
 
@@ -342,22 +343,22 @@ class _InitParams:
             return hy_params.init_delta_meth.copy()
 
         if hy_params.init_delta_meth == 'dirichlet':
-            return _utils.StartDistributionInitializer.dirichlet(hy_params.m_states,
+            return ahu.StartDistributionInitializer.dirichlet(hy_params.m_states,
                                                                  hy_params.delta_dp)
 
         if hy_params.init_delta_meth == 'softmax':
-            return _utils.StartDistributionInitializer.softmax(hy_params.m_states)
+            return ahu.StartDistributionInitializer.softmax(hy_params.m_states)
 
         if hy_params.init_delta_meth == 'stationary':
-            return _utils.StartDistributionInitializer.stationary(self.gamma_)
+            return ahu.StartDistributionInitializer.stationary(self.gamma_)
 
         if hy_params.init_delta_meth == 'uniform':
-            return _utils.StartDistributionInitializer.uniform(hy_params.m_states)
+            return ahu.StartDistributionInitializer.uniform(hy_params.m_states)
 
         raise ValueError("Unknown init method or init_delta_meth is not an array.")
 
     def __str__(self):
-        with _io.array_print_opt(precision=4, suppress=True):
+        with aio.array_print_opt(precision=4, suppress=True):
             out = 'Initial Lambda:\n{}\n\nInitial Gamma:\n{}\n\nInitial Delta:\n{}\n'
             out = out.format(*self.__dict__.values())
         return out
@@ -391,7 +392,7 @@ class Params:
         self.delta_ = delta_
 
     def __str__(self):
-        with _io.array_print_opt(precision=4, suppress=True):
+        with aio.array_print_opt(precision=4, suppress=True):
             out = 'Lambda:\n{}\n\nGamma:\n{}\n\nDelta:\n{}\n'
             out = out.format(*self.__dict__.values())
         return out
diff --git a/apollon/hmm/hmm_utilities.py b/src/apollon/hmm/utilities.py
similarity index 94%
rename from apollon/hmm/hmm_utilities.py
rename to src/apollon/hmm/utilities.py
index b3286b45a5ddcb8984037d39d2d5f1b8c7fc0809..b1cba4f91f498494c41d49e020d7ed2f9dfa0409 100644
--- a/apollon/hmm/hmm_utilities.py
+++ b/src/apollon/hmm/utilities.py
@@ -1,6 +1,6 @@
 # Licensed under the terms of the BSD-3-Clause license.
 # Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
+# mblass@posteo.net
 
 """
 Functions:
@@ -120,12 +120,31 @@ def assert_st_val(val: float):
 class StateDependentMeansInitializer:
     """Initializer methods for state-dependent vector of means."""
 
-    methods = ('linear', 'quantile', 'random')
+    methods = ('hist', 'linear', 'quantile', 'random')
+
+    @staticmethod
+    def hist(data: _np.ndarray, m_states: int) -> _np.ndarray:
+        """Initialize state-dependent means based on a histogram of ``data``.
+
+        The histogram is calculated with ten bins. The centers of the
+        ``m_states`` most frequent bins are returned as estimates of lambda.
+
+        Args:
+            data:     Input data.
+            m_states: Number of states.
+
+        Returns:
+            Lambda estimates.
+        """
+        frqs, bin_edges = _np.histogram(data, bins=10)
+        bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
+        return _np.sort(bin_centers[frqs.argsort()[::-1]][:m_states])
+
 
     @staticmethod
     def linear(X: _np.ndarray, m: int) -> _np.ndarray:
         """Initialize state-dependent means with `m` linearily spaced values
-        from ]min(data), max(data)[.
+        from [min(data), max(data)].
 
             Args:
                 X    (np.ndarray)   Input data.
@@ -134,8 +153,7 @@ class StateDependentMeansInitializer:
             Returns:
                 (np.ndarray)    Initial state-dependent means of shape (m, ).
         """
-        bordered_space = _np.linspace(X.min(), X.max(), m+2)
-        return bordered_space[1:-1]
+        return _np.linspace(X.min(), X.max(), m)
 
 
     @staticmethod
diff --git a/src/apollon/io/__init__.py b/src/apollon/io/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..741ec778b116109c38c113d1b97cbb748459c78b
--- /dev/null
+++ b/src/apollon/io/__init__.py
@@ -0,0 +1 @@
+from . import json
diff --git a/src/apollon/io/io.py b/src/apollon/io/io.py
new file mode 100644
index 0000000000000000000000000000000000000000..6623b6be97f600d8cd8dd70b02ed8286644d9fd6
--- /dev/null
+++ b/src/apollon/io/io.py
@@ -0,0 +1,212 @@
+"""apollon/io.py -- General I/O functionallity.
+
+Licensed under the terms of the BSD-3-Clause license.
+Copyright (C) 2019 Michael Blaß, mblass@posteo.net
+
+Classes:
+    FileAccessControl       Descriptor for file name attributes.
+
+Functions:
+    array_print_opt         Set format for printing numpy arrays.
+    files_in_folder         Iterate over all files in given folder.
+    generate_outpath        Compute path for feature output.
+    load_from_pickle        Load pickled data.
+    repath                  Change path but keep file name.
+    save_to_pickle          Pickle some data.
+"""
+from contextlib import contextmanager as _contextmanager
+import pathlib
+import pickle
+from typing import Any, Optional
+
+import numpy as np
+
+from .. types import Array, PathType
+from . json import ArrayEncoder
+
+def generate_outpath(in_path: PathType,
+                     out_path: Optional[PathType],
+                     suffix: str = None) -> PathType:
+    """Generates file paths for feature und HMM output files.
+
+    If ``out_path`` is ``None``, the basename of ``in_path`` is taken
+    with the extension replaced by ``suffix``.
+
+    Args:
+        in_path:   Path to file under analysis.
+        out_path:  Commandline argument.
+        suffix:    File extension.
+
+    Returns:
+        Valid output path.
+    """
+    in_path = pathlib.Path(in_path)
+    if suffix is None:
+        default_fname = '{}'.format(in_path.stem)
+    else:
+        default_fname = '{}.{}'.format(in_path.stem, suffix)
+
+    if out_path is None:
+        out_path = pathlib.Path(default_fname)
+    else:
+        out_path = pathlib.Path(out_path)
+        if not out_path.suffix:
+            out_path = out_path.joinpath(default_fname)
+        if not out_path.parent.is_dir():
+            msg = f'Error. Path "{out_path.parent!s}" does not exist.'
+            raise ValueError(msg)
+    return out_path
+
+class PoissonHmmEncoder(ArrayEncoder):
+    """JSON encoder for PoissonHmm.
+    """
+    def default(self, o):
+        """Custon default JSON encoder. Properly handles <class 'PoissonHMM'>.
+
+        Note: Falls back to ``ArrayEncoder`` for all types that do not implement
+        a ``to_dict()`` method.
+
+        Params:
+            o (any)  Object to encode.
+
+        Returns:
+            (dict)
+        """
+        if isinstance(o, HMM):
+            items = {}
+            for attr in o.__slots__:
+                try:
+                    items[attr] = getattr(o, attr).to_dict()
+                except AttributeError:
+                    items[attr] = getattr(o, attr)
+            return items
+        return ArrayEncoder.default(self, o)
+
+class WavFileAccessControl:
+    """Control initialization and access to the ``file`` attribute of class:``AudioData``.
+
+    This assures that the path indeed points to a file, which has to be a .wav file. Otherwise
+    an error is raised. The path to the file is saved as absolute path and the attribute is
+    read-only.
+    """
+
+    def __init__(self):
+        """Hi there!"""
+        self.__attribute = {}
+
+    def __get__(self, obj, objtype):
+        return self.__attribute[obj]
+
+    def __set__(self, obj, file_name):
+        if obj not in self.__attribute.keys():
+            _path = pathlib.Path(file_name).resolve()
+            if _path.exists():
+                if _path.is_file():
+                    if _path.suffix == '.wav':
+                        self.__attribute[obj] = _path
+                    else:
+                        raise IOError('`{}` is not a .wav file.'
+                                      .format(file_name))
+                else:
+                    raise IOError('`{}` is not a file.'.format(file_name))
+            else:
+                raise FileNotFoundError('`{}` does not exists.'
+                                        .format(file_name))
+        else:
+            raise AttributeError('File name cannot be changed.')
+
+    def __delete__(self, obj):
+        del self.__attribute[obj]
+
+
+@_contextmanager
+def array_print_opt(*args, **kwargs):
+    """Set print format for numpy arrays.
+
+    Thanks to unutbu:
+    https://stackoverflow.com/questions/2891790/how-to-pretty-print-a-
+    numpy-array-without-scientific-notation-and-with-given-pre
+    """
+    std_options = np.get_printoptions()
+    np.set_printoptions(*args, **kwargs)
+    try:
+        yield
+    finally:
+        np.set_printoptions(**std_options)
+
+
+def load_from_pickle(path: PathType) -> Any:
+    """Load a pickled file.
+
+    Args:
+        path:  Path to file.
+
+    Returns:
+        Unpickled object
+    """
+    path = pathlib.Path(path)
+    with path.open('rb') as file:
+        data = pickle.load(file)
+    return data
+
+
+def repath(current_path: PathType, new_path: PathType,
+           ext: Optional[str] = None) -> PathType:
+    """Change the path and keep the file name. Optinally change the extension, too.
+
+    Args:
+        current_path:  The path to change.
+        new_path:      The new path.
+        ext:           Change file extension if ``ext`` is not None.
+
+    Returns:
+        New path.
+    """
+    current_path = pathlib.Path(current_path)
+    new_path = pathlib.Path(new_path)
+    if ext is None:
+        new_path = new_path.joinpath(current_path.name)
+    else:
+        ext = ext if ext.startswith('.') else '.' + ext
+        new_path = new_path.joinpath(current_path.stem + ext)
+    return new_path
+
+
+def save_to_pickle(data: Any, path: PathType) -> None:
+    """Pickles data to path.
+
+    Args:
+        data:  Pickleable object.
+        path:  Path to save the file.
+    """
+    path = pathlib.Path(path)
+    with path.open('wb') as file:
+        pickle.dump(data, file)
+
+
+def save_to_npy(data: Array, path: PathType) -> None:
+    """Save an array to numpy binary format without using pickle.
+
+    Args:
+        data:  Numpy array.
+        path:  Path to save the file.
+    """
+    path = pathlib.Path(path)
+    with path.open('wb') as file:
+        np.save(file, data, allow_pickle=False)
+
+
+def load_from_npy(path: PathType) -> Array:
+    """Load data from numpy's binary format.
+
+    Args:
+        path:  File path.
+
+    Returns:
+        Data as numpy array.
+    """
+    path = pathlib.Path(path)
+    with path.open('rb') as file:
+        data = np.load(file, allow_pickle=False)
+    return data
+
diff --git a/src/apollon/io/json.py b/src/apollon/io/json.py
new file mode 100644
index 0000000000000000000000000000000000000000..8161fd9a4923ad79690ea8286a8e8060151579ad
--- /dev/null
+++ b/src/apollon/io/json.py
@@ -0,0 +1,156 @@
+"""apollon/io/json.py -- General JSON IO.
+
+Licensed under the terms of the BSD-3-Clause license.
+Copyright (C) 2020 Michael Blaß, mblass@posteo.net
+
+Classes:
+    ArrayEncoder
+
+Functions:
+    dump
+    decode_ndarray
+    encode_ndarray
+    load
+    validate_ndarray
+"""
+import json
+import pathlib
+import pkg_resources
+from typing import Any, Union
+
+import jsonschema
+import numpy as np
+
+from .. import APOLLON_PATH
+from .. _defaults import SCHEMA_DIR_PATH, SCHEMA_EXT
+from .. types import Array, PathType
+
+
+def load_schema(schema_name: str) -> dict:
+    """Load a JSON schema.
+
+    This function searches within apollon's own schema repository.
+    If a schema is found it is additionally validated agains Draft 7.
+
+    Args:
+        schema_name:  Name of schema. Must be file name without extension.
+
+    Returns:
+        Schema instance.
+
+    Raises:
+        IOError
+    """
+    schema_path = 'schema/' + schema_name + SCHEMA_EXT
+    if pkg_resources.resource_exists('apollon', schema_path):
+        schema = pkg_resources.resource_string('apollon', schema_path)
+        schema = json.loads(schema)
+        jsonschema.Draft7Validator.check_schema(schema)
+        return schema
+    raise IOError(f'Schema ``{schema_path.name}`` not found.')
+
+
+def dump(obj: Any, path: PathType) -> None:
+    """Write ``obj`` to JSON file.
+
+    This function can handel numpy arrays.
+
+    If ``path`` is None, this fucntion writes to stdout.  Otherwise, encoded
+    object is written to ``path``.
+
+    Args:
+        obj:   Object to be encoded.
+        path:  Output file path.
+    """
+    path = pathlib.Path(path)
+    with path.open('w') as json_file:
+        json.dump(obj, json_file, cls=ArrayEncoder)
+
+
+def load(path: PathType):
+    """Load JSON file.
+
+    Args:
+        path: Path to file.
+
+    Returns:
+        JSON file as FeatureSpace.
+    """
+    path = pathlib.Path(path)
+    with path.open('r') as fobj:
+        return json.load(fobj, object_hook=_ndarray_hook)
+
+
+def validate_ndarray(encoded_arr: dict) -> bool:
+    """Check whether ``encoded_arr`` is a valid instance of
+    ``ndarray.schema.json``.
+
+    Args:
+        encoded_arr:  Instance to validate.
+
+    Returns:
+        ``True``, if instance is valid.
+    """
+    return _NDARRAY_VALIDATOR.is_valid(encoded_arr)
+
+
+def decode_ndarray(instance: dict) -> Array:
+    """Decode numerical numpy arrays from a JSON data stream.
+
+    Args:
+        instance:  Instance of ``ndarray.schema.json``.
+
+    Returns:
+        Numpy array.
+    """
+    _NDARRAY_VALIDATOR.validate(instance)
+    return np.array(instance['data'], dtype=instance['__dtype__'])
+
+
+def encode_ndarray(arr: Array) -> dict:
+    """Transform an numpy array to a JSON-serializable dict.
+
+    Array must have a numerical dtype. Datetime objects are currently
+    not supported.
+
+    Args:
+        arr:  Numpy ndarray.
+
+    Returns:
+        JSON-serializable dict adhering ``ndarray.schema.json``.
+    """
+    return {'__ndarray__': True, '__dtype__': arr.dtype.str,
+            'data': arr.tolist()}
+
+
+def _ndarray_hook(inp: dict) -> Union[Array, dict]:
+    try:
+        return decode_ndarray(inp)
+    except jsonschema.ValidationError:
+        return inp
+
+
+class ArrayEncoder(json.JSONEncoder):
+    # pylint: disable=E0202
+    # Issue: False positive for E0202 (method-hidden) #414
+    # https://github.com/PyCQA/pylint/issues/414
+    """Encode np.ndarrays to JSON.
+
+    Simply set the ``cls`` parameter of the dump method to this class.
+    """
+    def default(self, inp: Any) -> Any:
+        """Custon SON encoder for numpy arrays. Other types are passed
+        on to ``JSONEncoder.default``.
+
+        Args:
+            inp:  Object to encode.
+
+        Returns:
+            JSON-serializable dictionary.
+        """
+        if isinstance(inp, Array):
+            return encode_ndarray(inp)
+        return json.JSONEncoder.default(self, inp)
+
+
+_NDARRAY_VALIDATOR = jsonschema.Draft7Validator(load_schema('ndarray'))
diff --git a/src/apollon/onsets.py b/src/apollon/onsets.py
new file mode 100644
index 0000000000000000000000000000000000000000..b297f865cbb7f5e89744a78ac1f654f96cb4c809
--- /dev/null
+++ b/src/apollon/onsets.py
@@ -0,0 +1,287 @@
+"""
+apollon/onsets.py -- Onset detection routines.
+Licensed under the terms of the BSD-3-Clause license.
+Copyright (C) 2019 Michael Blaß
+mblass@posteo.net
+
+Classes:
+    OnsetDetector           Base class for onset detection.
+    EntropyOnsetDetector    Onset detection based on phase pace entropy estimation.
+    FluxOnsetDetector       Onset detection based on spectral flux.
+
+Functions:
+    peak_picking            Identify local peaks in time series.
+    evaluate_onsets         Evaluation of onset detection results given ground truth.
+"""
+from dataclasses import dataclass
+from typing import Dict, Optional, Tuple, Type, TypeVar
+
+import numpy as np
+import pandas as pd
+import scipy.signal as _sps
+
+from . container import Params
+from . io import io
+from . signal import features
+from . signal import tools as _ast
+from . signal.spectral import Stft, StftParams
+from . import fractal as _fractal
+from . import segment as aseg
+from . types import Array, PathType
+
+
+T = TypeVar('T')
+
+
+@dataclass
+class PeakPickingParams(Params):
+    n_before: int
+    n_after: int
+    alpha: float
+    delta: float
+
+
+@dataclass
+class FluxOnsetDetectorParams(Params):
+    stft_params: StftParams
+    pp_params: PeakPickingParams
+
+
+
+pp_params = {'n_before': 10, 'n_after': 10, 'alpha': .1,
+                  'delta': .1}
+
+class OnsetDetector:
+    """Onset detection base class.
+    """
+    def __init__(self) -> None:
+        self._odf: Optional[pd.DataFrame] = None
+
+    @property
+    def odf(self) -> pd.DataFrame:
+        return self._odf
+
+    @property
+    def onsets(self) -> pd.DataFrame:
+        """Returns the index of each detected onset.
+
+        The resulting data frame has two columns:
+        `frame` is number of the center frame of the segment in which
+        the onset was detected.
+
+        `time` is the time difference between the center frame of the segment
+        in which the onset was detected and the start of the audio signal.
+
+        The data frame index represents the segments.
+
+        Returns:
+            Index of each onset as data frame.
+        """
+        return self._odf.iloc[self._peaks][['frame', 'time']]
+
+    @property
+    def params(self):
+        """Return initial parameters."""
+        return self._params
+
+    def detect(self, inp: Array) -> None:
+        """Detect onsets."""
+        self._odf = self._compute_odf(inp)
+        self._peaks = self._ppkr.detect(self._odf['value'].to_numpy().squeeze())
+
+    def to_csv(self, path: PathType) -> None:
+        """Serialize odf in csv format.
+
+        Args:
+            path: Path to save location.
+        """
+        self.odf.to_csv(path)
+
+    def to_json(self, path: PathType) -> None:
+        """Serialize odf in JSON format.
+
+        Args:
+            path: Path to save location.
+        """
+        self.odf.to_json(path)
+
+    def to_pickle(self, path: PathType) -> None:
+        """Serialize object to pickle file.
+
+        Args:
+            path: Path to save location.
+        """
+        io.save_to_pickle(self, path)
+
+    def plot(self, mode: str ='time') -> None:
+        """Plot odf against time or index.
+
+        Args:
+            mode:  Either `time`, or `index`.
+        """
+        raise NotImplementedError
+
+
+class EntropyOnsetDetector(OnsetDetector):
+    """Detect onsets based on entropy maxima.
+    """
+    def __init__(self, fps: int, m_dims: int = 3, delay: int = 10,
+                 bins: int = 10, n_perseg: int = 1024, n_overlap: int = 512,
+                 pp_params: Optional[dict] = None) -> None:
+        """Detect onsets as local maxima of information entropy of consecutive
+        windows.
+
+        Be sure to set ``n_perseg`` and ``hop_size`` according to the
+        sampling rate of the input signal.
+
+        Params:
+            fps:         Audio signal.
+            m_dim:       Embedding dimension.
+            bins:        Boxes per axis.
+            delay:       Embedding delay.
+            n_perseg:    Length of segments in samples.
+            hop_size:    Displacement in samples.
+            smooth:      Smoothing filter length.
+        """
+        super().__init__()
+        self.fps = fps
+        self.m_dims = m_dims
+        self.bins = bins
+        self.delay = delay
+        self.cutter = aseg.Segmentation(n_perseg, n_overlap)
+
+        if pp_params:
+            self._ppkr = FilterPeakPicker(**pp_params)
+        else:
+            self._ppkr = FilterPeakPicker()
+
+    def _compute_odf(self, inp: Array) -> Array:
+        """Compute onset detection function as the information entropy of
+        ``m_dims``-dimensional delay embedding per segment.
+
+        Args:
+            inp:  Audio data.
+
+        Returns:
+            Onset detection function.
+        """
+        segs = self.cutter.transform(inp)
+        odf = np.empty((segs.n_segs, 3))
+        for i, seg in enumerate(segs):
+            emb = _fractal.delay_embedding(seg.squeeze(), self.delay, self.m_dims)
+            odf[i, 0] = segs.center(i)
+            odf[i, 0] = odf[i, 0] / self.fps
+            odf[i, 2] = _fractal.embedding_entropy(emb, self.bins)
+        odf[i, 2] = np.maximum(odf[i, 2], odf[i, 2].mean())
+        return pd.DataFrame(data=odf, columns=['frame', 'time', 'value'])
+
+
+class FluxOnsetDetector(OnsetDetector):
+    """Onset detection based on spectral flux.
+    """
+    def __init__(self, fps: int, window: str = 'hamming', n_perseg: int = 1024,
+                 n_overlap: int = 512, pp_params: Optional[dict] = None) -> None:
+        """Detect onsets as local maxima in the energy difference of
+        consecutive stft time steps.
+
+        Args:
+            fps:        Sample rate.
+            window:     Name of window function.
+            n_perseg:   Samples per segment.
+            n_overlap:  Numnber of overlapping samples per segment.
+            pp_params:  Keyword args for peak picking.
+        """
+        super().__init__()
+        self._stft = Stft(fps, window, n_perseg, n_overlap)
+        if pp_params:
+            self._ppkr = FilterPeakPicker(**pp_params)
+        else:
+            self._ppkr = FilterPeakPicker()
+
+    def _compute_odf(self, inp: Array) -> Array:
+        """Onset detection function based on spectral flux.
+
+        Args:
+            inp:  Audio data.
+
+        Returns:
+            Onset detection function.
+        """
+        sxx = self._stft.transform(inp)
+        flux = features.spectral_flux(sxx.abs, total=True)
+        times = sxx.times.squeeze()
+        odf = {'frame': (times * sxx.params.fps).astype(int),
+               'time': times,
+               'value': np.maximum(flux.squeeze(), flux.mean())}
+        return pd.DataFrame(odf)
+
+
+class FilterPeakPicker:
+    def __init__(self, n_after: int = 10, n_before: int = 10,
+                 alpha: float = .1, delta: float=.1) -> None:
+        self.n_after = n_after
+        self.n_before = n_before
+        self.alpha = alpha
+        self.delta = delta
+
+    def detect(self, inp: Array) -> Array:
+        """Pick local maxima from a numerical time series.
+
+        Pick local maxima from the onset detection function `odf`, which is assumed
+        to be an one-dimensional array. Typically, `odf` is the Spectral Flux per
+        time step.
+
+        Args:
+            odf:         Onset detection function, e.g., Spectral Flux.
+            n_after: Window lenght to consider after now.
+            n_before:  Window lenght to consider before now.
+            alpha:       Smoothing factor. Must be in ]0, 1[.
+            delta:       Difference to the mean.
+
+        Return:
+            Peak indices.
+        """
+        g = [0]
+        out = []
+
+        for n, val in enumerate(inp):
+            # set local window
+            idx = np.arange(n-self.n_before, n+self.n_after+1, 1)
+            window = np.take(inp, idx, mode='clip')
+
+            cond1 = np.all(val >= window)
+            cond2 = val >= (np.mean(window) + self.delta)
+
+            foo = max(val, self.alpha*g[n] + (1-self.alpha)*val)
+            g.append(foo)
+            cond3 = val >= foo
+
+            if cond1 and cond2 and cond3:
+                out.append(n)
+
+        return np.array(out)
+
+
+def evaluate_onsets(targets: Dict[str, np.ndarray],
+                    estimates: Dict[str, np.ndarray]
+                    ) -> Tuple[float, float, float]:
+    """Evaluate onset detection performance.
+
+    This function uses the mir_eval package for evaluation.
+
+    Params:
+        targets:    Ground truth onset times, with dict keys being file names,
+                    and values being target onset time codes in ms.
+
+        estimates:  Estimated onsets times, with dictkeys being file names,
+                    and values being the estimated onset time codes in ms.
+
+    Returns:
+        Precison, recall, f-measure.
+    """
+    out = []
+    for name, tvals in targets.items():
+        od_eval = _me.onset.evaluate(tvals, estimates[name])
+        out.append([i for i in od_eval.values()])
+
+    return np.array(out)
diff --git a/src/apollon/schema/corrdim.schema.json b/src/apollon/schema/corrdim.schema.json
new file mode 100644
index 0000000000000000000000000000000000000000..914c2589139b0f269404d337d4740702e228090f
--- /dev/null
+++ b/src/apollon/schema/corrdim.schema.json
@@ -0,0 +1,30 @@
+{
+    "$schema": "http://json-schema.org/draft-07/schema",
+    "type": "object",
+    "title": "cdim root",
+    "description": "Correlation Dimension parameters",
+    "required": ["delay", "m_dim", "n_bins", "scaling_size"],
+    "additionalProperties": false,
+    "properties": {
+        "delay": {
+            "$id": "#properties/delay",
+            "description": "Length of delay vectors",
+            "type": "integer"
+        },
+        "m_dim": {
+            "$id": "#properties/m_dim",
+            "description": "Number of embedding dimensions",
+            "type": "integer"
+        },
+        "n_bins": {
+            "$id": "#properties/n_bins",
+            "description": "Number of histogram bins (radius)",
+            "type": "integer"
+        },
+        "scaling_size": {
+            "$id": "#properties/scaling_size",
+            "description": "Length of scaling size",
+            "type": "integer"
+        }
+    }
+}
diff --git a/src/apollon/schema/corrgram.schema.json b/src/apollon/schema/corrgram.schema.json
new file mode 100644
index 0000000000000000000000000000000000000000..d170a9f917e611fbe595b013acd3151d2192dc6f
--- /dev/null
+++ b/src/apollon/schema/corrgram.schema.json
@@ -0,0 +1,25 @@
+{
+    "$schema": "http://json-schema.org/draft-07/schema",
+    "type": "object",
+    "title": "cgram root",
+    "description": "Correlogram parameters",
+    "required": ["wlen", "n_delay", "total"],
+    "additionalProperties": false,
+    "properties": {
+        "wlen": {
+            "$id": "#properties/wlen",
+            "description": "Length of correlation window",
+            "type": "integer"
+        },
+        "n_delay": {
+            "$id": "#properties/n_delay",
+            "description": "Number of correlation lags",
+            "type": "integer"
+        },
+        "total": {
+            "$id": "#properties/total",
+            "description": "Sum correlogram",
+            "type": "boolean"
+        }
+    }
+}
diff --git a/src/apollon/schema/dft_params.schema.json b/src/apollon/schema/dft_params.schema.json
new file mode 100644
index 0000000000000000000000000000000000000000..e7bbea047c45403320804948c8bb777294e1ccb8
--- /dev/null
+++ b/src/apollon/schema/dft_params.schema.json
@@ -0,0 +1,28 @@
+{
+    "$schema": "http://json-schema.org/draft-07/schema",
+    "type": "object",
+    "title": "DFT params, root",
+    "description": "Parameter for Discrete Fourier Transform",
+    "required": ["fps","window", "n_fft"],
+    "additionalProperties": false,
+    "properties": {
+        "fps": {
+            "$id": "#properties/fps",
+            "description": "Sample rate",
+            "type": "integer"
+        },
+        "window": {
+            "$id": "#properties/window",
+            "description": "Name of window function",
+            "type": "string"
+        },
+        "n_fft": {
+            "$id": "#properties/n_fft",
+            "description": "FFT length",
+            "anyOf": [
+                {"type": "integer"},
+                {"type": "null"}
+            ]
+        }
+    }
+}
diff --git a/src/apollon/schema/ndarray.schema.json b/src/apollon/schema/ndarray.schema.json
new file mode 100644
index 0000000000000000000000000000000000000000..2d7190874d2abb9276bd43d88b7ae8513ea69e10
--- /dev/null
+++ b/src/apollon/schema/ndarray.schema.json
@@ -0,0 +1,25 @@
+{
+    "$schema": "http://json-schema.org/draft-07/schema",
+    "type": "object",
+    "title": "ndarray root schema",
+    "description": "The root schema comprises the entire JSON document.",
+    "required": ["__ndarray__", "__dtype__", "data"],
+    "additionalProperties": false,
+    "properties": {
+        "__ndarray__": {
+            "$id": "#/properties/__ndarray__",
+            "description": "ndarray indicator",
+            "type": "boolean"
+            },
+        "__dtype__": {
+            "$id": "#/properties/__dtype__",
+            "description": "Data type descriptor",
+            "type": "string"
+            },
+        "data": {
+            "$id": "#/properties/data",
+            "description": "Actual array elements",
+            "type": "array"
+        }
+    }
+}
diff --git a/src/apollon/schema/stft_params.schema.json b/src/apollon/schema/stft_params.schema.json
new file mode 100644
index 0000000000000000000000000000000000000000..6a44853569476a26752d93e52be773d6915d4856
--- /dev/null
+++ b/src/apollon/schema/stft_params.schema.json
@@ -0,0 +1,54 @@
+{
+    "$schema": "http://json-schema.org/draft-07/schema",
+    "type": "object",
+    "required": [
+        "fps",
+        "window",
+        "n_fft",
+        "n_perseg",
+        "n_overlap",
+        "extend",
+        "pad"
+    ],
+    "additionalProperties": false,
+    "properties": {
+        "fps": {
+            "$id": "#properties/fps",
+            "description": "Sample rate",
+            "type": "integer"
+        },
+        "window": {
+            "$id": "#properties/window",
+            "description": "Name of window function",
+            "type": "string"
+        },
+        "n_fft": {
+            "$id": "#properties/n_fft",
+            "description": "FTT length",
+            "anyOf": [
+                {"type": "integer"},
+                {"type": "null"}
+            ]
+        },
+        "n_perseg": {
+            "$id": "#properties/n_perseg",
+            "description": "Number of samples per segment",
+            "type": "integer"
+        },
+        "n_overlap": {
+            "$id": "#properties/n_overlap",
+            "description": "Number of overlapping samples per segment",
+            "type": "integer"
+        },
+        "extend": {
+            "$id": "#properties/extend",
+            "description": "Extend half-window before and after signal",
+            "type": "boolean"
+        },
+        "pad": {
+            "$id": "properties/pad",
+            "description": "Zero padding",
+            "type": "boolean"
+        }
+    }
+}
diff --git a/src/apollon/segment.py b/src/apollon/segment.py
new file mode 100644
index 0000000000000000000000000000000000000000..c536ac7b6e60bd319c9245e9310b6e034b5e6580
--- /dev/null
+++ b/src/apollon/segment.py
@@ -0,0 +1,475 @@
+"""
+Licensed under the terms of the BSD-3-Clause license.
+Copyright (C) 2019 Michael Blaß, mblass@posteo.net
+"""
+from dataclasses import dataclass
+from typing import ClassVar, Generator, Tuple, Union
+
+import numpy as _np
+from numpy.lib.stride_tricks import as_strided
+
+from . audio import AudioFile
+from . container import Params
+from . signal.tools import zero_padding as _zero_padding
+from . types import Array, Schema
+
+
+@dataclass
+class LazySegmentParams:
+    """Encapsulates segmentation parameters."""
+    n_perseg: int
+    n_overlap: int
+    norm: bool = False
+    mono: bool = True
+    expand: bool = True
+    dtype: str = 'float64'
+
+
+SEGMENTATION_PARAMS = {
+    "type": "object",
+    "properties": {
+        "n_perseg": {"type": "integer"},
+        "n_overlap": {"type": "integer"},
+        "extend": {"anyOf": [{"type": "boolean"}, {"type": "integer"}]},
+        "pad": {"anyOf": [{"type": "boolean"}, {"type": "integer"}]}
+    }
+}
+
+
+@dataclass
+class SegmentationParams(Params):
+    """Parameters for Segmentation."""
+    _schema: ClassVar[Schema] = SEGMENTATION_PARAMS
+    n_perseg: int = 512
+    n_overlap: int = 256
+    extend: Union[bool, int] = True
+    pad: Union[bool, int] = True
+
+
+@dataclass
+class Segment:
+    """Encapsulates audio segment data and meta data."""
+    idx: int
+    start: int
+    stop: int
+    center: int
+    n_frames: int
+    data: _np.ndarray
+
+
+class Segments:
+    """Segement"""
+    def __init__(self, params: SegmentationParams, segs: _np.ndarray) -> None:
+        self._segs = segs
+        self._params = params
+        if self._params.extend:
+            self._offset = 0
+        else:
+            self._offset = self._params.n_perseg // 2
+
+    @property
+    def data(self) -> Array:
+        """Return the raw segment data array."""
+        return self._segs
+
+    @property
+    def n_segs(self) -> int:
+       return self._segs.shape[1]
+
+    @property
+    def n_perseg(self) -> int:
+        return self._params.n_perseg
+
+    @property
+    def n_overlap(self) -> int:
+        return self._params.n_overlap
+
+    @property
+    def step(self) -> int:
+        return self._params.n_perseg - self._params.n_overlap
+
+    @property
+    def params(self) -> SegmentationParams:
+        """Parameter set used to compute this instance."""
+        return self._params
+
+    def center(self, seg_idx) -> int:
+        """Return the center of segment ``seg_idx`` as frame number
+        of the original signal.
+
+        Args:
+            seg_indx:  Segment index.
+
+        Returns:
+            Center frame index.
+        """
+        if not (0 <= seg_idx < self.n_segs):
+            raise IndexError('Requested index out of range.')
+        return seg_idx * self.step + self._offset
+
+    def bounds(self, seg_idx) -> Tuple[int, int]:
+        """Return the frame numbers of the lower and upper bound
+        of segment ``seg_idx``. Lower bound index is inclusive,
+        upper bound index is exclusive.
+
+        Args:
+            seg_idx:  Segment index.
+
+        Returns:
+            Lower and upper bound frame index.
+        """
+        if not (0 <= seg_idx < self.n_segs):
+            raise IndexError('Requested index out of range.')
+        lob = self.center(seg_idx) - self._params.n_perseg // 2
+        upb = lob + self._params.n_perseg
+        return lob, upb
+
+    def get(self, seg_idx) -> Segment:
+        """Retrun segment ``seg_idx`` wrapped in an ``Segment`` object.
+
+        Args:
+            seg_idx:  Segment index.
+
+        Returns:
+            Segment ``seg_idx``.
+        """
+        return Segment(seg_idx, *self.bounds(seg_idx), self.center(seg_idx),
+                       self._params.n_perseg, self[seg_idx])
+
+    def __iter__(self) -> Generator[_np.ndarray, None, None]:
+        for seg in self._segs.T:
+            yield _np.expand_dims(seg, 1)
+
+    def __getitem__(self, key) -> _np.ndarray:
+        out = self._segs[:, key]
+        if out.ndim < 2:
+            return _np.expand_dims(out, 1)
+        return out
+
+    def __repr__(self) -> str:
+        return f'Segments(params={self._params!s}, segs={self._segs!s})'
+
+    def __str__(self) -> str:
+        return f'<n_segs: {self.n_segs}, len_seg: {self._params.n_perseg}>'
+
+
+class Segmentation:
+    """Segementation"""
+    def __init__(self, n_perseg: int, n_overlap: int, extend: bool = True,
+                pad: bool = True) -> None:
+        """Subdivide input array.
+
+        Args:
+            n_perseg:  Samples per segment.
+            n_overlap: Overlap in samples.
+            extend:    Extend a half window at start and end.
+            pad:       Pad extension.
+        """
+        if n_perseg > 0:
+            self.n_perseg = n_perseg
+        else:
+            msg = (f'Argument to ``n_perseg`` must be greater than '
+                   f'zero.\nFound ``n_perseg`` = {n_perseg}.')
+            raise ValueError(msg)
+
+        if 0 < n_overlap < n_perseg:
+            self.n_overlap = n_overlap
+        else:
+            msg = (f'Argument to ``n_overlap`` must be greater than '
+                   f'zero and less then ``n_perseg``.\n Found '
+                   f'``n_perseg`` = {self.n_perseg} and ``n_overlap`` '
+                   f' = {n_overlap}.')
+            raise ValueError(msg)
+
+        self._extend = extend
+        self._pad = pad
+        self._ext_len = 0
+        self._pad_len = 0
+
+    def transform(self, data: _np.ndarray) -> Segments:
+        """Apply segmentation.
+
+        Input array must be either one-, or two-dimensional.
+        If ``data`` is two-dimensional, it must be of shape
+        (n_elements, 1).
+
+        Args:
+            data:  Input array.
+
+        Returns:
+            ``Segments`` object.
+        """
+        self._validate_data_shape(data)
+        self._validate_nps(data.shape[0])
+        n_frames = data.shape[0]
+        step = self.n_perseg - self.n_overlap
+
+        if self._extend:
+            self._ext_len = self.n_perseg // 2
+
+        if self._pad:
+            self._pad_len = (-(n_frames-self.n_perseg) % step) % self.n_perseg
+
+        data = _np.pad(data.squeeze(), (self._ext_len, self._ext_len+self._pad_len))
+        new_shape = data.shape[:-1] + ((data.shape[-1] - self.n_overlap) // step, self.n_perseg)
+        new_strides = data.strides[:-1] + (step * data.strides[-1], data.strides[-1])
+        segs = as_strided(data, new_shape, new_strides, writeable=False).T
+        params = SegmentationParams(self.n_perseg, self.n_overlap,
+                                    self._extend, self._pad)
+        return Segments(params, segs)
+
+    def _validate_nps(self, n_frames: int) -> None:
+        if self.n_perseg > n_frames:
+            msg = (f'Input data length ({n_frames}) incompatible with '
+                    'parameter ``n_perseg`` = {self.n_perseg}. ``n_perseg`` '
+                    'must be less then or equal to input data length.')
+            raise ValueError(msg)
+
+    def _validate_data_shape(self, data: _np.ndarray) -> None:
+        if not (0 < data.ndim < 3):
+            msg = (f'Input array must have one or two dimensions.\n'
+                   f'Found ``data.shape`` = {data.shape}.')
+        elif data.ndim == 2 and data.shape[1] != 1:
+            msg = (f'Two-dimensional import arrays can only have one '
+                   f'column.\nFound ``data.shape``= {data.shape}.')
+        else:
+            return None
+        raise ValueError(msg)
+
+
+class LazySegments:
+    """Read segments from audio file."""
+    def __init__(self, snd: AudioFile, n_perseg: int, n_overlap: int,
+                 norm: bool = False, mono: bool = True,
+                 expand: bool = True, dtype: str = 'float64') -> None:
+        """Compute equal-sized segments.
+
+        Args:
+            snd:
+            n_perseg:   Number of samples per segment.
+            n_overlap:  Size of segment overlap in samples.
+            norm:       Normalize each segment separately.
+            mono:       If ``True`` mixdown all channels.
+            expand:     Start segmentation at -n_perseg//2.
+            dtype:      Dtype of output array.
+        """
+        self._snd = snd
+        self.n_perseg = n_perseg
+        self.n_overlap = n_overlap
+        self.expand = expand
+        self.n_segs = int(_np.ceil(self._snd.n_frames / n_overlap))
+        if expand:
+            self.n_segs += 1
+            self.offset = -self.n_perseg // 2
+        else:
+            self.n_segs -= 1
+            self.offset = 0
+        self.step = self.n_perseg - self.n_overlap
+        self.norm = norm
+        self.mono = mono
+        self.dtype = dtype
+
+    def compute_bounds(self, seg_idx):
+        if seg_idx < 0:
+            raise IndexError('Expected positive integer for ``seg_idx``. '
+                             f'Got {seg_idx}.')
+        if seg_idx >= self.n_segs:
+            raise IndexError(f'You requested segment {seg_idx}, but there '
+                             f'are only {self.n_segs} segments.')
+        start = seg_idx * self.n_overlap + self.offset
+        return start, start + self.n_perseg
+
+    def read_segment(self, seg_idx: int, norm: bool = None,
+                     mono: bool = None, dtype: str = None):
+        norm = norm or self.norm
+        mono = mono or self.mono
+        dtype = dtype or self.dtype
+        offset = seg_idx * self.n_overlap + self.offset
+        return self._snd.read(self.n_perseg, offset, norm, mono, dtype)
+
+    def loc(self, seg_idx: int, norm: bool = None,
+            mono: bool = None, dtype: str = None) -> Segment:
+        """Locate segment by index.
+
+        Args:
+            seg_idx:  Segment index.
+            norm:     If ``True``, normalize each segment separately.
+                      Falls back to ``self.norm``.
+            mono:     If ``True`` mixdown all channels.
+                      Falls back to ``self.mono``.
+            dtype:    Output dtype. Falls back to ``self.dtype``.
+
+        Returns:
+            Segment number ``seg_idx``.
+        """
+        start, stop = self.compute_bounds(seg_idx)
+        data = self.read_segment(seg_idx, norm, mono, dtype)
+        return Segment(seg_idx, start, stop, self.n_perseg,
+                       self._snd.fps, data)
+
+    def __getitem__(self, key):
+        return self.loc(key)
+
+    def __iter__(self):
+        for i in range(self.n_segs):
+            yield self.__getitem__(i)
+
+    def iter_data(self):
+
+        for i in range(self.n_segs):
+            yield self._snd.read(self.n_perseg)
+
+    def iter_bounds(self):
+        for i in range(self.n_segs):
+            yield self.compute_bounds(i)
+
+
+def _by_samples(x: Array, n_perseg: int) -> Array:
+    """Split ``x`` into segments of lenght ``n_perseg`` samples.
+
+    This function automatically applies zero padding for inputs that cannot be
+    split evenly.
+
+    Args:
+        x:         One-dimensional input array.
+        n_perseg:  Length of segments in samples.
+
+    Returns:
+        Two-dimensional array of segments.
+    """
+    if not isinstance(n_perseg, int):
+        raise TypeError('Param ``n_perchunk`` must be of type int.')
+
+    if n_perseg < 1:
+        raise ValueError('``n_perchunk`` out of range. '
+                         'Expected 1 <= n_perchunk.')
+
+    fit_size = int(_np.ceil(x.size / n_perseg) * n_perseg)
+    n_ext = fit_size - x.size
+    x = _zero_padding(x, n_ext)
+    return x.reshape(-1, n_perseg)
+
+
+def _by_samples_with_hop(x: Array, n_perseg: int, hop_size: int) -> Array:
+    """Split `x` into segments of lenght `n_perseg` samples. Move the
+    extraction window `hop_size` samples.
+
+    This function automatically applies zero padding for inputs that cannot be
+    split evenly.
+
+    Args:
+        x:         One-dimensional input array.
+        n_perseg:  Length of segments in samples.
+        hop_size:  Hop size in samples
+
+    Returns:
+        Two-dimensional array of segments.
+    """
+    if not (isinstance(n_perseg, int) and isinstance(hop_size, int)):
+        raise TypeError('Params must be of type int.')
+
+    if not 1 < n_perseg <= x.size:
+        raise ValueError('n_perseg out of range. '
+                         'Expected 1 < n_perseg <= len(x).')
+
+    if hop_size < 1:
+        raise ValueError('hop_size out of range. Expected 1 < hop_size.')
+
+    n_hops = (x.size - n_perseg) // hop_size + 1
+    n_segs = n_hops
+
+    if (x.size - n_perseg) % hop_size != 0 and n_perseg > hop_size:
+        n_segs += 1
+
+    fit_size = hop_size * n_hops + n_perseg
+    n_ext = fit_size - x.size
+    x = _zero_padding(x, n_ext)
+
+    out = _np.empty((n_segs, n_perseg), dtype=x.dtype)
+    for i in range(n_segs):
+        off = i * hop_size
+        out[i] = x[off:off+n_perseg]
+    return out
+
+
+def by_samples(x: Array, n_perseg: int, hop_size: int = 0) -> Array:
+    """Segment the input into n segments of length n_perseg and move the
+    window `hop_size` samples.
+
+    This function automatically applies zero padding for inputs that cannot be
+    split evenly.
+
+    If `hop_size` is less than one, it is reset to `n_perseg`.
+
+    Overlap in percent is calculated as ov = hop_size / n_perseg * 100.
+
+    Args:
+        x           One-dimensional input array.
+        n_perseg    Length of segments in samples.
+        hop_size    Hop size in samples. If < 1, hop_size = n_perseg.
+
+    Returns:
+        Two-dimensional array of segments.
+    """
+    if hop_size < 1:
+        return _by_samples(x, n_perseg)
+    else:
+        return _by_samples_with_hop(x, n_perseg, hop_size)
+
+
+def by_ms(x: Array, fps: int, ms_perseg: int, hop_size: int = 0) -> Array:
+    """Segment the input into n segments of length ms_perseg and move the
+    window `hop_size` milliseconds.
+
+    This function automatically applies zero padding for inputs that cannot be
+    split evenly.
+
+    If `hop_size` is less than one, it is reset to `n_perseg`.
+
+    Overlap in percent is calculated as ov = hop_size / n_perseg * 100.
+
+    Args:
+        x           One-dimensional input array.
+        fs          Sampling frequency.
+        n_perseg    Length of segments in milliseconds.
+        hop_size    Hop size in milliseconds. If < 1, hop_size = n_perseg.
+
+    Returns:
+        Two-dimensional array of segments.
+        """
+    n_perseg = fps * ms_perseg // 1000
+    hop_size = fps * hop_size // 1000
+    return by_samples(x, n_perseg, hop_size)
+
+
+def by_onsets(x: Array, n_perseg: int, ons_idx: Array, off: int = 0
+              ) -> Array:
+    """Split input `x` into len(ons_idx) segments of length `n_perseg`.
+
+    Extraction windos start at `ons_idx[i]` + `off`.
+
+    Args:
+        x           One-dimensional input array.
+        n_perseg    Length of segments in samples.
+        ons_idx     One-dimensional array of onset positions.
+        off         Length of offset.
+
+    Returns:
+        Two-dimensional array of shape (len(ons_idx), n_perseg).
+    """
+    n_ons = ons_idx.size
+    out = _np.empty((n_ons, n_perseg), dtype=x.dtype)
+
+    for i, idx in enumerate(ons_idx):
+        pos = idx + off
+        if pos < 0:
+            pos = 0
+        elif pos >= x.size:
+            pos = x.size - 1
+
+        if pos + n_perseg >= x.size:
+            buff = x[pos:]
+            out[i] = _zero_padding(buff, n_perseg-buff.size)
+        else:
+            out[i] = x[pos:pos+n_perseg]
+    return out
diff --git a/src/apollon/signal/__init__.py b/src/apollon/signal/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b69af25637ea22f1f00af8d6462e58fa96c1db0
--- /dev/null
+++ b/src/apollon/signal/__init__.py
@@ -0,0 +1,15 @@
+"""
+========================================
+Signal processing tools
+========================================
+
+Audio features
+========================================
+
+.. currentmodule:: apollon.signal.features
+
+.. autosummary::
+
+    cdim
+    spectral_centroid
+"""
diff --git a/src/apollon/signal/_features_module.c b/src/apollon/signal/_features_module.c
new file mode 100644
index 0000000000000000000000000000000000000000..c2b41be2a4a7a1e4d8329a9923b2c4559ba65787
--- /dev/null
+++ b/src/apollon/signal/_features_module.c
@@ -0,0 +1,237 @@
+#define NPY_NO_DEPRECATED_API NPY_1_8_API_VERSION
+#define PY_ARRAY_UNIQUE_SYMBOL comsar_NP_ARRAY_API
+
+#include <Python.h>
+#include <numpy/arrayobject.h>
+#include "correlogram.h"
+#include "cdim.h"
+
+/* Compute the correlogram of an audio signal
+ *
+ * Params:
+ *      wlen        Length of window in samples
+ *      delay       Window hop size
+ *      n_lags
+ *  Return 2d array
+ */
+
+static PyObject *
+apollon_correlogram_delay (PyObject* self, PyObject* args)
+{
+    int      success    = 0;
+    npy_intp window_len = 0;
+    npy_intp max_offset = 0;
+    npy_intp dims[]    = {0, 0};
+
+    PyObject *op_signal = NULL;
+    PyObject *op_delays = NULL;
+
+    PyArrayObject *arr_signal = NULL;
+    PyArrayObject *arr_delays = NULL;
+    PyArrayObject *arr_corr   = NULL;
+
+    if (!PyArg_ParseTuple (args, "OOkk", &op_signal, &op_delays, &window_len, &max_offset))
+    {
+        return NULL;
+    }
+
+    arr_signal = (PyArrayObject *) PyArray_ContiguousFromAny (op_signal, NPY_DOUBLE, 1, 1);
+    if (arr_signal == NULL)
+    {
+        PyErr_SetString (PyExc_RuntimeError, "Could not convert signal array.\n");
+        Py_RETURN_NONE;
+    }
+
+    arr_delays = (PyArrayObject *) PyArray_ContiguousFromAny (op_delays, NPY_LONG, 1, 1);
+    if (arr_delays == NULL)
+    {
+        PyErr_SetString (PyExc_RuntimeError, "Could not convert delays array.\n");
+        Py_RETURN_NONE;
+    }
+
+    dims[0] = PyArray_SIZE (arr_delays);
+    dims[1] = max_offset;
+    arr_corr = (PyArrayObject *) PyArray_NewFromDescr (&PyArray_Type,
+                                                PyArray_DescrFromType (NPY_DOUBLE),
+                                                2, dims, NULL, NULL, 0, NULL);
+    if (arr_corr == NULL)
+    {
+        PyErr_SetString (PyExc_MemoryError, "Could not allocate correlogram.\n");
+        Py_RETURN_NONE;
+    }
+
+    success = correlogram_delay (
+                (double *) PyArray_DATA (arr_signal),
+                (size_t *) PyArray_DATA (arr_delays),
+                (size_t) window_len,
+                (size_t *) dims,
+                PyArray_DATA (arr_corr));
+
+    if (success == 0)
+    {
+        PyErr_SetString (PyExc_ValueError, "Correlogram failed.");
+        Py_RETURN_NONE;
+    }
+
+    return (PyObject *) arr_corr;
+}
+
+
+static PyObject *
+apollon_correlogram (PyObject* self, PyObject* args)
+{
+    int      success    = 0;
+    npy_intp window_len = 0;
+    npy_intp max_delay  = 0;
+    npy_intp dims[]     = {0, 0};
+
+    PyObject      *op_signal  = NULL;
+    PyArrayObject *arr_signal = NULL;
+    PyArrayObject *arr_corr   = NULL;
+
+    if (!PyArg_ParseTuple (args, "Okk", &op_signal, &window_len, &max_delay))
+    {
+        return NULL;
+    }
+
+    arr_signal = (PyArrayObject *) PyArray_ContiguousFromAny (op_signal, NPY_DOUBLE, 1, 1);
+    if (arr_signal == NULL)
+    {
+        PyErr_SetString (PyExc_RuntimeError, "Could not convert signal array.\n");
+        Py_RETURN_NONE;
+    }
+
+    dims[0] = max_delay;
+    dims[1] = PyArray_SIZE (arr_signal) - window_len - max_delay;
+
+    arr_corr = (PyArrayObject *) PyArray_NewFromDescr (
+                &PyArray_Type, PyArray_DescrFromType (NPY_DOUBLE),
+                2, dims, NULL, NULL, 0, NULL);
+
+    if (arr_corr == NULL)
+    {
+        PyErr_SetString (PyExc_MemoryError, "Could not allocate correlogram.\n");
+        Py_RETURN_NONE;
+    }
+
+    success = correlogram ((double *) PyArray_DATA (arr_signal),
+                (size_t) window_len, (size_t *) dims, PyArray_DATA (arr_corr));
+
+    if (success == 0)
+    {
+        PyErr_SetString (PyExc_ValueError, "Correlogram failed..");
+        Py_RETURN_NONE;
+    }
+
+    return (PyObject *) arr_corr;
+}
+
+
+static PyObject *
+apollon_delay_embedding_dists (PyObject *self, PyObject *args)
+{
+    PyObject *inp = NULL;
+    npy_intp  delay = 0;
+    npy_intp  m_dim = 0;
+
+    if (!PyArg_ParseTuple (args, "Okk", &inp, &delay, &m_dim))
+    {
+        return NULL;
+    }
+
+    PyArrayObject *arr_inp = (PyArrayObject *) PyArray_FROM_OTF (inp,
+            NPY_DOUBLE, NPY_ARRAY_IN_ARRAY);
+    if (arr_inp == NULL)
+    {
+        PyErr_SetString (PyExc_RuntimeError, "Could not convert input arrays.\n");
+        Py_RETURN_NONE;
+    }
+
+    npy_intp n_vectors = PyArray_SIZE (arr_inp) - ((m_dim -1) * delay);
+    npy_intp n_dists = n_vectors * (n_vectors - 1) / 2;
+
+    /*
+    PyArrayObject *dists = (PyArrayObject *) PyArray_NewFromDescr (
+            &PyArray_Type, PyArray_DescrFromType (NPY_DOUBLE),
+            1, &n_dists, NULL, NULL, 0, NULL);
+            */
+    PyArrayObject *dists = (PyArrayObject *) PyArray_ZEROS(1, &n_dists, 
+            NPY_DOUBLE, 0);
+    if (dists == NULL)
+    {
+        PyErr_SetString (PyExc_MemoryError, "Could not allocate correlogram.\n");
+        Py_RETURN_NONE;
+    }
+
+    delay_embedding_dists (PyArray_DATA (arr_inp), (size_t) n_vectors,
+            (size_t) delay, (size_t) m_dim, PyArray_DATA (dists));
+    
+    return (PyObject *) dists;
+}
+
+
+static PyObject *
+apollon_cdim_bader (PyObject *self, PyObject *args)
+{
+    PyObject *op_snd = NULL;
+    npy_intp  delay;
+    npy_intp  m_dim;
+    npy_intp  n_bins;
+    npy_intp  scaling_size;
+
+    if (!PyArg_ParseTuple (args, "Okkkk", &op_snd, &delay, &m_dim,
+                &n_bins, &scaling_size))
+    {
+        return NULL;
+    }
+
+    PyArrayObject *arr_snd = (PyArrayObject *) PyArray_FROM_OTF (op_snd,
+            NPY_INT16, NPY_ARRAY_IN_ARRAY);
+
+    if (arr_snd == NULL)
+    {
+        PyErr_SetString (PyExc_RuntimeError, "Could not convert input arrays.\n");
+        Py_RETURN_NONE;
+    }
+
+    double cdim = corr_dim_bader (PyArray_DATA (arr_snd), (size_t) delay,
+            (size_t) m_dim, (size_t) n_bins, (size_t) scaling_size);
+
+    if (cdim < 0)
+    {
+        PyErr_SetString (PyExc_ValueError, "cdim_bader failed.\n");
+        Py_RETURN_NONE;
+    }
+    return PyFloat_FromDouble (cdim);
+}
+
+
+
+static PyMethodDef
+Features_Methods[] = {
+    {"correlogram_delay", apollon_correlogram_delay, METH_VARARGS,
+        "correlogram (signal, delays, wlen, off_max)"},
+    {"correlogram", apollon_correlogram, METH_VARARGS,
+        "correlogram (signal, wlen, delay_max)"},
+    {"emb_dists", apollon_delay_embedding_dists, METH_VARARGS,
+        "emb_dists(inp, delay, m_dim)"},
+    {"cdim_bader", apollon_cdim_bader, METH_VARARGS,
+     "cdim_bader (snd, delay, m_dim, n_bins, scaling_size)\n"
+     "Estimate the correlation dimension Bader-style."},
+    {NULL, NULL, 0, NULL}
+};
+
+static struct PyModuleDef
+_features_module = {
+    PyModuleDef_HEAD_INIT,
+    "_features",
+    NULL,
+    -1,
+    Features_Methods
+};
+
+PyMODINIT_FUNC
+PyInit__features(void) {
+    import_array();
+    return PyModule_Create (&_features_module);
+}
diff --git a/src/apollon/signal/cdim.c b/src/apollon/signal/cdim.c
new file mode 100644
index 0000000000000000000000000000000000000000..db91be5657c3973a96549a61d98650095b48de82
--- /dev/null
+++ b/src/apollon/signal/cdim.c
@@ -0,0 +1,283 @@
+#include "cdim.h"
+
+void
+delay_embedding_dists (const double *inp,
+                       const size_t  n_vectors,
+                       const size_t  delay,
+                       const size_t  m_dim,
+                             double *dists)
+{
+    
+    for (size_t i = 0; i < n_vectors - 1; i++)
+    {
+        for (size_t j = i + 1; j < n_vectors; j++)
+        {
+            size_t flat_idx = i * n_vectors + j - i*(i+1)/2 - i - 1;
+            for (size_t m = 0; m < m_dim; m++)
+            {
+                dists[flat_idx] += pow (inp[i+m*delay] - inp[j+m*delay], 2);
+            }
+            dists[flat_idx] = sqrt (dists[flat_idx]);
+        }
+    }
+}
+
+void
+comsar_fractal_embedding (const double *x,
+                          const size_t  N_max,
+                          const size_t  m_dim,
+                          const size_t  delay,
+                          const size_t  n_dist,
+                                double *dist,
+                                double *d_min,
+                                double *d_max)
+{
+    /* embedding */
+    for (size_t i = 0; i < m_dim; i++)
+    {
+        /* distance matrix */
+        for (size_t j = 0; j < N_max-1; j++)
+        {
+            for (size_t k = j+1, idx = 0; k < N_max; k++)
+            {
+                idx = j * N_max + k - j * (j+1) / 2 - j - 1;
+                dist[idx] += pow (x[i+j*delay] - x[i+k*delay], 2);
+            }
+        }   /* END distance matrix */
+    }   /* END embedding */
+
+    *d_min = sqrt (dist[0]);
+    *d_max = sqrt (dist[0]);
+    for (size_t i = 0; i < n_dist; i++)
+    {
+        dist[i] = sqrt (dist[i]);
+        if (dist[i] < *d_min) *d_min = dist[i];
+        if (dist[i] > *d_max) *d_max = dist[i];
+    }
+}
+
+
+void
+comsar_fractal_correlation_sum (const size_t  n_radius,
+                                const double *radius,
+                                const size_t  n_dist,
+                                const double *dist,
+                                const size_t  N_max,
+                                      double *Cr)
+{
+    for (size_t i = 0; i < n_radius; i++)
+    {
+        Cr[i] = 0;
+        for (size_t j = 0; j < n_dist; j++)
+        {
+            if (dist[j] < radius[i])
+            {
+                Cr[i] += 1.0;
+            }
+        }
+        Cr[i] *= 2.0 / ((double) N_max * ((double) N_max - 1.0));
+    }
+}
+
+
+int
+comsar_fractal_csum (const double *sig,
+                     const size_t  n_sig,
+                     const double *radius,
+                     const size_t  n_radius,
+                     const size_t  m_dim,
+                     const size_t  delay,
+                           double *Cr)
+{
+    const size_t  N_max  = (n_sig - m_dim) / delay + 1;
+    const size_t  n_dist = N_max * (N_max - 1) / 2;
+          double *dist   = NULL;
+          double  d_min  = 0;
+          double  d_max  = 0;
+
+    dist = calloc (n_dist, sizeof (double));
+    if (dist == NULL)
+    {
+        fprintf (stderr, "Out of memory while allocating `dist` in correlation sum.");
+        return -1;
+    }
+
+    comsar_fractal_embedding (sig, N_max, m_dim, delay, n_dist, dist, &d_min, &d_max);
+    comsar_fractal_correlation_sum (n_radius, radius, n_dist, dist, N_max, Cr);
+
+    free (dist);
+    return 0;
+}
+
+
+int
+comsar_fractal_cdim (const double *x,
+                     const size_t  N,
+                     const size_t  n_radius,
+                     const size_t  m_dim,
+                     const size_t  delay,
+                           double *Cr)
+{
+    const size_t  N_max  = (N - m_dim) / delay + 1;
+    const size_t  n_dist = N_max * (N_max - 1) / 2;
+          double *dist   = NULL;
+          double  d_min  = 0;
+          double  d_max  = 0;
+          double *radius = NULL;
+
+    dist = calloc (n_dist, sizeof (double));
+    if (dist == NULL)
+    {
+        fprintf (stderr, "Out of memory while allocating `dist` in cdim");
+        return -1;
+    }
+
+    radius = calloc (n_radius, sizeof (double));
+    if (dist == NULL)
+    {
+        fprintf (stderr, "Out of memory while allocating `radius` in cdim");
+        return -1;
+    }
+
+    comsar_fractal_embedding (x, N_max, m_dim, delay, n_dist, dist, &d_min, &d_max);
+
+    for (size_t i = 0; i < n_radius; i++)
+    {
+        double ld_min = log (d_min);
+        double ld_max = log (d_max);
+        double lr_inc = (ld_min - ld_max) / (double) (n_radius - 1);
+
+        radius[i] = exp (ld_min + i * lr_inc);
+    }
+
+    comsar_fractal_correlation_sum (n_radius, radius, n_dist, dist, N_max, Cr);
+
+    free (dist);
+    return 0;
+}
+
+/* Compute an estimate of the correlation dimension using Bader style
+ *
+ * This implementation is an improovement of the original Bader style
+ * algorithm ONLY IN TERMS OF SPEED and readability.
+ *
+ * The algorithm has beyond that several issues that are addressed where
+ * they occure.
+ */
+double
+corr_dim_bader (const short *snd, const size_t delay, const size_t m_dim,
+        const size_t n_bins, const size_t scaling_size)
+{
+    /* arbitrarily set boundary condition for distance matrix computation */
+    const size_t bound = 10;
+
+    /* arbitrarily set number of samples to consume form the input array 
+     * If the input array has less than ``n_samples`` frames the behaviour
+     * of this function is undefined. */
+    const size_t n_samples = 2400;
+
+    size_t n_dists = (n_samples-bound) * (n_samples-bound+1) / 2;
+    double dist_min = 1.0;
+    double dist_max = 0.0;
+
+    double *dists = calloc (n_dists, sizeof (double));
+    size_t *corr_hist = calloc (n_bins, sizeof (size_t));
+    size_t *corr_sums = calloc (n_bins, sizeof (size_t));
+
+    if (corr_hist == NULL || corr_sums == NULL || dists == NULL) {
+        fprintf (stderr, "Failed to allocate memory.");
+        return -1.0;
+    }
+
+    /* The below block is intended to compute the distances in a
+     * `m_dim`-dimensional delay embedding by traversing the condensed
+     * distance matrix of the embedded vectors.
+     *
+     * It does, however, compute the distances in the upper right triangle
+     * of the distance matrix. The outcome is the values on the main diagonal
+     * of the distance matrix are computed even though they equal 0 by
+     * definiton. Moreover, the remaining distances are computed twice, i. e.,
+     * the vectors at (n, m) and (m, n) are computed. Additionlly, many other
+     * distances are omitted.
+     */
+    for (size_t i = 0, cnt = 0; i < n_samples-bound; i++)
+    {
+        for (size_t j = 0; j < n_samples-bound-i; j++)
+        {
+            for (size_t m = 0; m < m_dim; m++)
+            {
+                double diff = (double) (snd[i+m*delay] - snd[i+j+m*delay]);
+                dists[cnt] += diff * diff;
+            }
+            dists[cnt] = sqrt (dists[cnt]);
+            if (dists[cnt] > dist_max)
+            {
+                dist_max = dists[cnt];
+            }
+            cnt++;
+        }
+    }
+
+   size_t bin_spacing = (size_t) (dist_max / 1000.0);
+   size_t step_size   = bin_spacing == 0 ? 1 : bin_spacing;
+    for (size_t i = 0; i < n_dists; i++)
+    {
+        if (dists[i] < dist_min)
+        {
+            corr_hist[0]++;
+        }
+        else
+        {
+            size_t idx = ((size_t) dists[i] - dist_min) / step_size;
+            if (idx + 2 < n_bins)
+            {
+                corr_hist[idx+1]++;
+            }
+        }
+    }
+
+    /* Compute the correlation sum as the cummulative sum over
+     * the correlation histogram `corr_hist`.
+     * Note that the below implementation is wrong. Because of the
+     * condition `i < j`, it skips the first index and ommits the last.
+     * Hence, `corr_sums[0]` is always 0.
+     * To correct this implementation use either `j <= i`, or `j < i+1`.*/
+    for (size_t i = 0; i < n_bins; i++)
+    {
+        for (size_t j = 0; j < i+1; j++)
+        {
+            corr_sums[i] += corr_hist[j];
+        }
+        // printf ("cs[%zu] = %zu\n", i, corr_sums[i]);
+    }
+
+    /* Find the bin with the most points in it and its index */
+    size_t max_pts = 0;
+    size_t max_bin = 0;
+    for (size_t i = 0; i < (size_t) ((double) n_bins * 3. / 5.); i++)
+    {
+        if (corr_hist[i] > max_pts)
+        {
+            max_pts = corr_hist[i];
+            max_bin = i;
+        }
+    }
+
+    /* Compute the slope */
+    double x1 = log ((double) (max_bin * step_size) + (double) dist_min);
+    double x2 = log ((double) ((max_bin + scaling_size) * step_size) + (double) dist_min);
+    double y1 = log ((double) corr_sums[max_bin] / (double) n_dists);
+    double y2 = log ((double) corr_sums[max_bin+scaling_size] / (double) n_dists);
+    /*
+    printf("x1: %f\nx2: %f\n", x1, x2);
+    printf("y1: %f\ny2: %f\n", y1, y2);
+    printf("corr_sums[max_bin]: %f\n", corr_sums[max_bin]);
+    printf("n_dists: %f\n", (double)n_dists);
+    printf("max_bin: %zu\n", max_bin);
+    */
+    free (dists);
+    free (corr_hist);
+    free (corr_sums);
+
+    return (y2 - y1) / (x2 - x1);
+}
diff --git a/src/apollon/signal/container.py b/src/apollon/signal/container.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc56062e797d84cc455c93cd2385a1629636e116
--- /dev/null
+++ b/src/apollon/signal/container.py
@@ -0,0 +1,51 @@
+"""apollon/spectral/container.py
+
+Licensed under the terms of the BSD-3-Clause license.
+Copyright (C) 2019 Michael Blaß, mblass@posteo.net
+"""
+from dataclasses import dataclass, asdict
+from typing import ClassVar, Optional
+
+from .. import io
+from .. container import Params
+from .. segment import SegmentationParams
+from .. types import PathType, Schema
+
+
+@dataclass
+class DftParams(Params):
+    """Parameter set for Discrete Fourier Transform."""
+    _schema: ClassVar[Schema] = io.json.load_schema('dft_params')
+    fps: int
+    window: str = 'hamming'
+    n_fft: Optional[int] = None
+
+
+@dataclass
+class StftParams(Params):
+    """Parameter set for spectral transforms."""
+    _schema: ClassVar[Schema] = io.json.load_schema('stft_params')
+    fps: int
+    window: str
+    n_fft: Optional[int] = None
+    n_perseg: Optional[int] = None
+    n_overlap: Optional[int] = None
+    extend: Optional[bool] = None
+    pad: Optional[bool] = None
+
+
+@dataclass
+class CorrDimParams(Params):
+    _schema: ClassVar[Schema] = io.json.load_schema('corrdim')
+    delay: int
+    m_dim: int
+    n_bins: int
+    scaling_size: int
+
+
+@dataclass
+class CorrGramParams(Params):
+    _schema: ClassVar[Schema] = io.json.load_schema('corrgram')
+    wlen: int
+    n_delay: int
+    total: bool = True
diff --git a/src/apollon/signal/correlogram.c b/src/apollon/signal/correlogram.c
new file mode 100644
index 0000000000000000000000000000000000000000..90c2300905a4547e20694b14a865b93839fe94b3
--- /dev/null
+++ b/src/apollon/signal/correlogram.c
@@ -0,0 +1,78 @@
+#include "correlogram.h"
+
+double
+corrcoef (const double *x,
+          const double *y,
+          const size_t  n)
+{
+    double s_x    = 0.0;
+    double s_y    = 0.0;
+    double s_xy   = 0.0;
+    double s_sq_x = 0.0;
+    double s_sq_y = 0.0;
+    double cov    = 0.0;
+    double ms_x   = 0.0;
+    double ms_y   = 0.0;
+    double p_std  = 0.0;
+
+    for (size_t i = 0; i < n; i++)
+    {
+        double  xi = *(x+i);
+        double  yi = *(y+i);
+
+        s_x    += xi;
+        s_y    += yi;
+        s_xy   += xi * yi;
+        s_sq_x += xi * xi;
+        s_sq_y += yi * yi;
+    }
+
+    cov   = s_xy - s_x * s_y / n;
+    ms_x  = s_x * s_x / n;
+    ms_y  = s_y * s_y / n;
+    p_std = sqrt (s_sq_x - ms_x) * sqrt (s_sq_y - ms_y);
+
+    if (p_std == 0.0)
+    {
+        return 0.0;
+    }
+    return cov / p_std;
+}
+
+
+int
+correlogram_delay (const double *sig,
+                   const size_t *delays,
+                   const size_t  wlen,
+                   const size_t *dims,
+                         double *cgram)
+{
+    for (size_t i = 0; i < dims[0]; i++)
+    {
+        for (size_t t = 0; t < dims[1]; t++)
+        {
+            double crr = corrcoef (sig+t, sig+t+delays[i], wlen);
+            cgram[i*dims[1]+t] = crr > 0.0F ? pow (crr, 4) : 0.0F;
+        }
+    }
+    return 1;
+}
+
+
+int
+correlogram (const double *sig,
+             const size_t  wlen,
+             const size_t *dims,
+                   double *cgram)
+{
+    for (size_t delay = 1; delay < dims[0]; delay++)
+    {
+        for (size_t off = 0; off < dims[1]; off++)
+        {
+            double crr = corrcoef (sig+off, sig+off+delay, wlen);
+            size_t idx = (delay-1) * dims[1] + off;
+            cgram[idx] = crr > 0.0F ? pow (crr, 4) : 0.0F;
+        }
+    }
+    return 1;
+}
diff --git a/apollon/signal/critical_bands.py b/src/apollon/signal/critical_bands.py
similarity index 81%
rename from apollon/signal/critical_bands.py
rename to src/apollon/signal/critical_bands.py
index 770a3952f3bed775f7cf980a0c1046fb847a9c82..725fccf631e7103ad6f19094f8d288fe20930625 100644
--- a/apollon/signal/critical_bands.py
+++ b/src/apollon/signal/critical_bands.py
@@ -1,6 +1,6 @@
 # Licensed under the terms of the BSD-3-Clause license.
 # Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
+# mblass@posteo.net
 
 import numpy as _np
 from scipy.signal.windows import get_window as _get_window
@@ -8,7 +8,8 @@ from scipy.signal.windows import get_window as _get_window
 from .. types import Array as _Array
 from .. import tools as _tools
 
-def frq2cbr(frq) -> _Array:
+
+def frq2cbr(frq: _Array) -> _Array:
     """Transform frquencies in Hz to critical band rates in Bark.
 
     Args:
@@ -25,10 +26,10 @@ def level(cbi: _Array):
     """Compute the critical band level L_G from critical band intensities I_G.
 
     Args:
-        cbi (ndarray)    Critical band intensities.
+        cbi:    Critical band intensities.
 
     Returns:
-        (nddarray)    Critical band levels.
+        Critical band levels.
     """
     ref = 10e-12
     return 10.0 * _np.log10(_np.maximum(cbi, ref) / ref)
@@ -41,15 +42,15 @@ def specific_loudness(cbr: _Array):
     should be scaled in critical band levels.
 
     Args:
-        cbr (ndarray)    Critical band rate spectrum.
+        cbr:    Critical band rate spectrum.
 
     Returns:
-        (ndarray)    Specific loudness.
+        Specific loudness.
     """
     return _np.power(level(cbr), 0.23)
 
 
-def total_loudness(cbr: _Array):
+def total_loudness(cbr: _Array) -> _Array:
     """Compute the totals loudness of critical band rate spectra.
 
     The total loudness is the sum of the specific loudnesses. The spectra
@@ -61,20 +62,20 @@ def total_loudness(cbr: _Array):
     Returns:
         (ndarray)    Total loudness.
     """
-    return _tools.array2d_fsum(specific_loudness(cbr), axis=0)
+    return _tools.fsum(specific_loudness(cbr), axis=0)
 
 
-def filter_bank(frqs):
+def filter_bank(frqs: _Array) -> _Array:
     """Return a critical band rate scaled filter bank.
 
     Each filter is triangular, which lower and upper cuttoff frequencies
     set to lower and upper bound of the given critical band rate.
 
     Args:
-        frqs (ndarray)    Frequency axis in Hz.
+        frqs:    Frequency axis in Hz.
 
     Returns:
-        (ndarray)    Bark scaled filter bank.
+        Bark scaled filter bank.
     """
     n_bands = 24
     z_frq = frq2cbr(frqs)
@@ -87,7 +88,6 @@ def filter_bank(frqs):
         idx = _np.logical_and(lo <= z_frq, z_frq < hi)
         n = idx.sum()
         filter_bank[lo, idx] = _get_window('triang', n, False)
-
     return filter_bank
 
 
@@ -97,14 +97,13 @@ def weight_factor(z):
     This is an improved version of Peeters (2004), section 8.1.3.
 
     Args:
-        z (ndarray)    Critical band rate.
+        z: Critical band rate.
 
     Returns:
-        (ndarray)    Wheighting factor.
+        Weighting factor.
     """
     base = _np.ones_like(z, dtype='float64')
     slope = 0.066 * _np.exp(0.171 * _np.atleast_1d(z))
-
     return _np.maximum(base, slope)
 
 
@@ -119,7 +118,7 @@ def sharpness(cbr_spctrm: _Array) -> _Array:
         (ndarray)    Sharpness for each time instant of the cbr_spctrm
     """
     loud_specific = _np.maximum(specific_loudness(cbr_spctrm), _np.finfo('float64').eps)
-    loud_total = _tools.array2d_fsum(loud_specific, axis=0)
+    loud_total = _tools.fsum(loud_specific, keepdims=True)
 
     z = _np.arange(1, 25)
     return ((z * weight_factor(z)) @ cbr_spctrm) / loud_total
diff --git a/src/apollon/signal/features.py b/src/apollon/signal/features.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe3f79637905f2aebfa79b14ddc192b7cb3602ae
--- /dev/null
+++ b/src/apollon/signal/features.py
@@ -0,0 +1,453 @@
+"""
+Audio feature extraction routines.
+
+|  Licensed under the terms of the BSD-3-Clause license.
+|  Copyright (C) 2019 Michael Blaß
+|  mblass[at]posteo[dot]net
+"""
+
+import numpy as _np
+from scipy.signal import hilbert as _hilbert
+from typing import Optional
+
+import _features
+from . import tools as _sigtools
+from .. import segment as _segment
+from .. import tools
+from .. types import Array as _Array
+from . import critical_bands as _cb
+from .. audio import fti16
+from .. import _defaults
+
+
+def cdim(inp: _Array, delay: int, m_dim: int, n_bins: int = 1000,
+         scaling_size: int = 10, mode: str = 'bader') -> _Array:
+    # pylint: disable = too-many-arguments
+    r"""Compute an estimate of the correlation dimension ``inp``.
+
+    This function implements the Grassberger-Procaccia algorithm
+    [Grassberger1983]_ to compute the correlation sum
+
+    .. math::
+        \hat C(r) = \frac{2} {N(n-1)} \sum_{i<j}
+        \Theta (r - | \boldsymbol{x}_i - \boldsymbol{x}_j)
+
+    from a time delay embedding of ``inp``.
+
+    If ``mode`` is set to 'bader', the input array must have at least
+    2400 elements. Otherwise, the result is undefined.
+
+    Args:
+        inp:       Input array.
+        delay:     Embedding delay in samples.
+        m_dim:     Number of embedding dimensions.
+        n_bins:    Number of bins.
+        mode:      Use either 'bader' for the original algorithm
+
+    Returns:
+        Array of correlation dimension estimates.
+
+    Raises:
+        ValueError
+
+    .. [Grassberger1983] P. Grassberger, and I. Procaccia, "Measuring the strangeness of strange attractors,"  *Physica 9d*, pp. 189-208.
+    """
+    if inp.ndim != 2:
+        raise ValueError(f'Input array must be two-dimensional.')
+
+    if mode == 'bader':
+        cdim_func = _features.cdim_bader
+        if inp.dtype != 'int16':
+            inp = fti16(inp)
+    elif mode == 'blass':
+        raise NotImplementedError
+        # cdim_func = fractal.cdim
+    else:
+        raise ValueError(f'Unknown mode "{mode}". Expected either "bader", '
+                         'or "blass"')
+    out = _np.zeros(inp.shape[1])
+    for i, seg in enumerate(inp.T):
+        out[i] = _np.nan_to_num(cdim_func(seg, delay, m_dim, n_bins,
+                                          scaling_size))
+    return _np.expand_dims(out, 0)
+
+
+def correlogram(inp: _Array, wlen: int, n_delay: int,
+                total: bool = False) -> _Array:
+    r"""Windowed autocorrelation of ``inp``.
+
+    This function estimates autocorrelation functions between ``wlen``-sized
+    windows of the input, separated by ``n_delay`` samples [Granqvist2003]_ .
+    The autocorrelation :math:`r_{m, n}` is given by
+
+    .. math::
+        r_{m, n} = \frac{ \sum_{k=m}^{m+w-1} (x_k- \overline x_m)(x_{k+m}-
+        \overline x_{m+n})}
+        {\sqrt{\sum_{k=m}^{m+w-1}(x_k - \overline x_m)^2
+        \sum_{k=m}^{m+w-1}(x_{k+n} - \overline x_{m+n})^2}} \,,
+
+    where :math:`x_m` is
+
+    .. math::
+        x_m=\frac{\sum_{i=m}^{m+w-1} x_i}{w} \,.
+
+    Args:
+        inp:        One-dimensional input signal.
+        wlen:       Length of the autocorrelation window.
+        n_delay:    Number of delay.
+        total:      Sum the correlogram along its first axis.
+
+    Returns:
+        Two-dimensional array in which each column is an auto-correlation
+        function.
+
+    .. [Granqvist2003] S. Granqvist, B. Hammarberg, "The correlogram: a visual display of periodicity," *JASA,* 114, pp. 2934.
+    """
+    if not isinstance(inp, _np.ndarray):
+        raise TypeError(f'Argument ``inp`` is of type {type(inp)}. It has '
+                        'to be an numpy array.')
+
+    if inp.ndim != 2:
+        raise ValueError('Input must be two-dimensional.')
+
+    out = _np.zeros((inp.shape[1], n_delay, inp.shape[0]-wlen-n_delay))
+    for i, seg in enumerate(inp.T):
+        out[i] = _features.correlogram(seg, wlen, n_delay)
+    if total is True:
+        return out.sum(axis=(1, 2)) / _np.prod(out.shape[1:])
+    return out
+
+
+def energy(sig: _Array) -> _Array:
+    """Total energy of time domain signal.
+
+    Args:
+        sig:  Time domain signal.
+
+    Returns:
+        Energy along fist axis.
+    """
+    if not _np.isfinite(sig).all():
+        raise ValueError('Input ``sig`` contains NaNs or infinite values.')
+    return _np.sum(_np.square(_np.abs(sig)), axis=0, keepdims=True)
+
+
+def frms(bins: _Array, n_sig: int, window: str = None) -> _Array:
+    """Root meann square of signal energy estimate in the spectral domain.
+
+    Args:
+        bins:    DFT bins.
+        n_sig:   Size of original signal.
+        window:  Window function applied to original signal.
+
+    Returns:
+        Estimate of signal energy along first axis.
+    """
+    vals = bins * n_sig
+    if n_sig % 2:
+        vals /= 2
+    else:
+        vals[:-1] /= 2
+    rms_ = _np.sqrt(2*energy(vals)) / n_sig
+    if window:
+        rms_ /= rms(getattr(_np, window)(n_sig))
+    return rms_
+
+
+def rms(sig: _Array) -> _Array:
+    """Root mean square of time domain signal.
+
+    Args:
+        sig:  Time domain signal
+
+    Returns:
+        RMS of signal along first axis.
+    """
+    return _np.sqrt(_np.mean(_np.square(_np.abs(sig)), axis=0, keepdims=True))
+
+
+def spectral_centroid(frqs: _Array, amps: _Array) -> _Array:
+    r"""Estimate the spectral centroid frequency.
+
+    Spectral centroid is always computed along the second axis of ``amps``.
+
+    Args:
+        frqs:   Nx1 array of DFT frequencies.
+        amps:   NxM array of absolute values of DFT bins.
+
+    Returns:
+        1xM array of spectral centroids.
+
+    Note:
+        The spectral centroid frequency :math:`f_C` is computed as
+        the expectation of a spectral distribution:
+
+        .. math::
+            f_C = \sum_{i=0}^{N} f_i p(i) \,,
+
+        where :math:`f_i` is the center frequency, and :math:`p(i)` the
+        relative amplitude of the :math:`i` th DFT bin.
+    """
+    return tools.fsum(frqs*_power_distr(amps), axis=0, keepdims=True)
+
+
+def spectral_spread(frqs: _Array, bins: _Array,
+                    centroids: Optional[_Array] = None) -> _Array:
+    """Estimate spectral spread.
+
+    Spectral Spread is always computed along the second axis of ``bins``.
+    This function computes the square roote of spectral spread.
+
+    Args:
+        frqs:   Nx1 array of DFT frequencies.
+        bins:   NxM array of DFT bin values.
+        centroids:  Array Spectral Centroid values.
+
+    Returns:
+        Square root of spectral spread.
+
+    Note:
+        Spectral Spread :math:`f_s` is computed as
+
+        .. math::
+            f_S = \sum_{i=0}^N (f_i - f_C)^2 p(i) \,,
+
+        where :math:`f_i` is the center frequency, and :math:`p(i)` the
+        relative amplitude of the :math:`i` th DFT bin. :math:`f_C` is the
+        spectral centroid frequency.
+    """
+    if centroids is None:
+        centroids = spectral_centroid(frqs, bins)
+    deviation = _np.power(frqs-centroids, 2)
+    return _np.sqrt(tools.fsum(deviation*_power_distr(bins), axis=0,
+                               keepdims=True))
+
+
+def spectral_skewness(frqs: _Array, bins: _Array,
+                      centroid: Optional[_Array] = None,
+                      spreads: Optional[_Array] = None) -> _Array:
+    r"""Estimate the spectral skewness.
+
+    Args:
+        frqs:   Frequency array.
+        bins:   Absolute values of DFT bins.
+        centroids:  Precomputed spectral centroids.
+        spreads:    Precomputed spectral spreads.
+
+    Returns:
+        Array of spectral skewness values.
+
+    Note:
+        The spectral skewness :math:`S_S` is calculated by
+
+        .. math::
+            S_{K} = \sum_{i=0}^N \frac{(f_i-f_C)^3}{\sigma^3} p(i) \,,
+
+        where :math:`f_i` is the center frequency, and :math:`p(i)` the
+        relative amplitude of the :math:`i` th DFT bin. :math:`f_C` is the
+        spectral centroid frequency, and :math:`\sigma = \sqrt{f_S}.`
+    """
+    pass
+
+def spectral_kurtosis(frqs: _Array, bins: _Array,
+                      centroid: Optional[_Array] = None,
+                      spreads: Optional[_Array] = None) -> _Array:
+    r"""Estimate spectral kurtosis.
+
+    Args:
+        frqs:   Frequency array.
+        bins:   Absolute values of DFT bins.
+        centroids:  Precomputed spectral centroids.
+        spreads:    Precomputed spectral spreads.
+
+    Returns:
+        Array of spectral skewness values.
+
+    Note:
+        Spectral kurtosis is calculated by
+
+        .. math::
+            S_{K} = \sum_{i=0}^N \frac{(f_i-f_c)^4}{\sigma^4} p(i) \,,
+
+        where :math:`f_i` is the center frequency, and :math:`p(i)` the
+        relative amplitude of the :math:`i` th DFT bin. :math:`f_C` is the
+        spectral centroid frequency, and :math:`\sigma = \sqrt{f_S}.`
+    """
+    pass
+
+
+def spectral_flux(inp: _Array, delta: float = 1.0,
+                  total: bool = True) -> _Array:
+    r"""Estimate the spectral flux
+
+    Args:
+        inp:    Input data. Each row is assumend DFT bins.
+        delta:  Sample spacing.
+        total:  Accumulate over first axis.
+
+    Returns:
+        Array of Spectral flux.
+
+    Note:
+        Spextral flux is computed by
+
+        .. math::
+            SF(i) = \sum_{j=0}^k H(|X_{i,j}| - |X_{i-1,j}|) \,,
+
+        where :math:`X_{i,j}` is the :math:`j` th frequency bin of the :math:`i`
+        th spectrum :math:`X` of a spectrogram :math:`\boldsymbol X`.
+    """
+    inp = _np.atleast_2d(inp).astype('float64')
+    out = _np.maximum(_np.gradient(inp, delta, axis=-1), 0)
+    if total:
+        return out.sum(axis=0, keepdims=True)
+    return out
+
+
+def fspl(amps: _Array, total: bool = False, ref: float = None) -> _Array:
+    """Computes sound pressure level from spectrum.
+
+    The values of ``amp`` are assumed to be magnitudes of DFT bins.
+
+    The reference pressure defaults to the human hearing treshold of 20 μPa.
+
+    Args:
+        amps:     Amplitude values.
+        total:    If True, returns the total spl over all values. In case
+                  ``amp`` is two-dimensional, the first axis is aggregated.
+        ref:      Custom reference value.
+
+    Returns:
+        Sound pressure level of ``amp``.
+    """
+    if ref is None:
+        ref = _defaults.SPL_REF
+
+    vals = _np.power(amps/ref, 2)
+    if total:
+        vals = vals.sum(axis=0, keepdims=True)
+    vals = _np.maximum(1.0, vals)
+    return 10.0*_np.log10(vals)
+
+
+def fsplc(frqs: _Array, amps: _Array, total: bool = False,
+         ref: float = None) -> _Array:
+    """Apply C-weighted to SPL.
+
+    Args:
+        frqs:    Center frequency of DFT band.
+        amps:    Magnitude of DFT band.
+        ref:     Reference value for p_0.
+
+    Returns:
+        C-weighted sound pressure level.
+    """
+    return spl(_sigtools.c_weighting(frqs)*amps, total, ref)
+
+def spl(inp: _Array, ref=_defaults.SPL_REF):
+    """Computes the average sound pressure level of time domain signal.
+
+    Args:
+        inp:  Time domain signal.
+        ref:  Reference level.
+
+    Returns:
+        Average sound pressure level.
+    """
+    level = rms(inp)/ref
+    return 20 * _np.log10(level, where=level>0)
+
+def log_attack_time(inp: _Array, fps: int, ons_idx: _Array,
+                    wlen: float = 0.05) -> _Array:
+    """Estimate the attack time of each onset and return its logarithm.
+
+    This function estimates the attack time as the duration between the
+    onset and the local maxima of the magnitude of the Hilbert transform
+    of the local window.
+
+    Args:
+        inp:      Input signal.
+        fps:      Sampling frequency.
+        ons_idx:  Sample indices of onsets.
+        wlen:     Local window length in samples.
+
+    Returns:
+        Logarithm of the attack time.
+    """
+    wlen = int(fps * wlen)
+    segs = _segment.by_onsets(inp, wlen, ons_idx)
+    attack_time = _np.absolute(_hilbert(segs)).argmax(axis=1) / fps
+    attack_time[attack_time == 0.0] = 1.0
+    return _np.log(attack_time)
+
+
+def loudness(frqs: _Array, bins: _Array) -> _Array:
+    """Calculate a measure for the perceived loudness from a spectrogram.
+
+    Args:
+        frqs:   Frquency axis.
+        bins:   Magnitude spectrogram.
+
+    Returns:
+        Estimate of the total loudness.
+    """
+    cbrs = _cb.filter_bank(frqs) @ bins
+    return _cb.total_loudness(cbrs)
+
+
+def roughness_helmholtz(d_frq: float, bins: _Array, frq_max: float,
+                        total: bool = True) -> _Array:
+    kernel = _roughnes_kernel(d_frq, frq_max)
+    out = _np.empty((kernel.size, bins.shape[1]))
+    for i, bin_slice in enumerate(bins.T):
+        out[:, i] = _np.correlate(bin_slice, kernel, mode='same')
+
+    if total is True:
+        out = out.sum(axis=0, keepdims=True)
+    return out
+
+
+def sharpness(frqs: _Array, bins: _Array) -> _Array:
+    """Calculate a measure for the perception of auditory sharpness from a
+    spectrogram.
+
+    Args:
+        frqs:    Frequencies.
+        bins:    DFT magnitudes.
+
+    Returns:
+        Sharpness.
+    """
+    cbrs = _cb.filter_bank(frqs.squeeze()) @ bins
+    return _cb.sharpness(cbrs)
+
+
+def _power_distr(bins: _Array) -> _Array:
+    """Computes the spectral energy distribution.
+
+    Args:
+        bins:    NxM array of DFT bins.
+
+    Returns:
+        NxM array of spectral densities.
+    """
+    total_power = tools.fsum(bins, axis=0, keepdims=True)
+    total_power[total_power == 0] = 1
+    return bins / total_power
+
+
+def _roughnes_kernel(frq_res: float, frq_max: float) -> _Array:
+    """Comput the convolution kernel for roughness computation.
+
+    Args:
+        frq_res:    Frequency resolution
+        frq_max:    Frequency bound.
+
+    Returns:
+        Weight for each frequency below ``frq_max``.
+    """
+    frm = 33.5
+    bin_idx = int(_np.round(frq_max/frq_res))
+    norm = frm * _np.exp(-1)
+    base = _np.abs(_np.arange(-bin_idx, bin_idx+1)) * frq_res
+    return base / norm * _np.exp(-base/frm)
diff --git a/apollon/signal/filter.py b/src/apollon/signal/filter.py
similarity index 97%
rename from apollon/signal/filter.py
rename to src/apollon/signal/filter.py
index 1b8966a2e12f9848d047ff2de91fe7d95282c9b2..f86c4dd0a71170c65e9de04ad4a2876151fe4340 100644
--- a/apollon/signal/filter.py
+++ b/src/apollon/signal/filter.py
@@ -1,6 +1,6 @@
 # Licensed under the terms of the BSD-3-Clause license.
 # Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
+# mblass@posteo.net
 
 """
 """
diff --git a/src/apollon/signal/spectral.py b/src/apollon/signal/spectral.py
new file mode 100644
index 0000000000000000000000000000000000000000..17530ef9e530aa1d94921c6bcdca08f1b49ebef0
--- /dev/null
+++ b/src/apollon/signal/spectral.py
@@ -0,0 +1,277 @@
+"""apollon/signal/spectral.py
+
+Licensed under the terms of the BSD-3-Clause license.
+Copyright (C) 2019 Michael Blaß, mblass@posteo.net
+
+Provide easy access to frequency spectra obtained by the DFT.
+
+Classes:
+    Spectrum
+    Spectrogram
+    stft
+
+Functions:
+    fft:  One-sided Fast fourier transform for real input.
+"""
+from typing import Any, Union
+
+import matplotlib.pyplot as _plt
+import numpy as np
+import scipy.signal as _sps
+
+from .. segment import Segmentation, Segments
+from .. types import Array, Optional
+from . container import Params, DftParams, StftParams
+
+
+def fft(sig, window: str = None, n_fft: int = None,
+        norm: bool = True):
+    """Compute the Discrete Fouier Transform for real input.
+
+    This is a simple wrapper around ``numpy.fft.rfft``. Input signal must
+    be two-dimensional. FTT is performed along the rows.
+
+    Args:
+        sig:     Two-dimensional input array.
+        n_fft:   FFT length in samples.
+        window:  Name of window function.
+        norm:    If True, scale such that a sinusodial signal with unit
+                 aplitude has unit amplitude in the spectrum.
+
+    Returns:
+        FFT bins.
+
+    Raises:
+        AttributeError
+    """
+    if sig.ndim != 2:
+        raise ValueError(f'Input array has {sig.ndim} dimensions. However,'
+                         ' ``fft`` expects two-dimensional array.')
+    n_sig = sig.shape[0]
+    if n_fft is None:
+        n_fft = n_sig
+
+    if window is None:
+        window = 'rect'
+
+    win = np.expand_dims(_sps.get_window(window, n_sig), 1)
+    bins = np.fft.rfft(sig*win, n_fft, axis=0)
+
+    if norm:
+        bins = bins / np.sqrt(np.square(win.sum())) * 2
+
+    return bins
+
+
+class TransformResult:
+    """Base class for transformation results."""
+    def __init__(self, params: Any, bins: np.ndarray) -> None:
+        self._params = params
+        self._bins = bins
+
+    @property
+    def abs(self) -> Array:
+        """Compute magnitude spectrum."""
+        return self.__abs__()
+
+    @property
+    def bins(self) -> Array:
+        """Raw FFT bins."""
+        return self._bins
+
+    @property
+    def d_frq(self) -> Union[float, None]:
+        """Retrun the frequency resolution."""
+        return self._params.fps / self._n_fft
+
+    @property
+    def frqs(self) -> Array:
+        """Frequency axis."""
+        return np.fft.rfftfreq(self._n_fft,
+                               1.0/self._params.fps).reshape(-1, 1)
+
+    @property
+    def params(self) -> DftParams:
+        """Initial parameters."""
+        return self._params
+
+    @property
+    def phase(self):
+        """Compute phase spectrum."""
+        if self._bins is None:
+            return None
+        return np.angle(self._bins)
+
+    @property
+    def power(self):
+        """Compute power spectrum."""
+        return np.square(self.__abs__())
+
+    @property
+    def centroid(self):
+        """Compute spectral centroid."""
+        return np.multiply(self.abs, self.frqs).sum() / self.abs.sum()
+
+    @property
+    def _n_fft(self) -> int:
+        """Compute the FFT length considering ``n_fft`` was ``None``."""
+        if self._params.n_fft is None:
+            n_fft = self._inp_size
+        else:
+            n_fft = self._params.n_fft
+        return n_fft
+
+    def __abs__(self) -> Array:
+        return np.absolute(self._bins)
+
+    def __getitem__(self, key) -> Array:
+        return self._bins[key]
+
+    def __len__(self) -> int:
+        return self._bins.shape[0]
+
+
+class Spectrum(TransformResult):
+    """FFT Spectrum."""
+    def __init__(self, params: DftParams, bins: np.ndarray,
+                 inp_size: int) -> None:
+        """Representation of DFT bins with frequency axis.
+
+        Args:
+            bins:      DFT bins.
+            params:    DFT parameters.
+            inp_size:  Length of original signal.
+        """
+        if not isinstance(params, Params):
+            raise TypeError('Expected type ``Params``')
+        if not isinstance(bins, np.ndarray):
+            raise TypeError('Expected numpy array.')
+        super().__init__(params, bins)
+        self._inp_size = inp_size
+
+    def plot(self, fmt='-') -> None:
+        """Plot the spectrum."""
+        _plt.plot(self.frqs, self.abs, fmt)
+
+    def __repr__(self) -> str:
+        return f'Spectrum({self._params})'
+
+
+class Spectrogram(TransformResult):
+    """Result of Short Time Fourier Transform."""
+    def __init__(self, params: StftParams, bins: np.ndarray,
+                 inp_size: int) -> None:
+        """Representation of DFT bins with time and frequency axis.
+
+        Args:
+            params:    Set of params.
+            bins:      FFT bins
+            inp_size:  Length time domain signal.
+        """
+        super().__init__(params, bins)
+        self._inp_size = inp_size
+
+    @property
+    def n_segments(self) -> int:
+        """Return number of segments."""
+        return self._bins.shape[1]
+
+    @property
+    def index(self) -> Array:
+        """Center index regarding original signal per bin."""
+        if self._params.extend:
+            offset = 0
+        else:
+            offset = self._params.n_perseg // 2
+        return (offset + np.arange(self._bins.shape[1]) *
+                (self._params.n_perseg - self._params.n_overlap))
+
+    @property
+    def times(self) -> Array:
+        """Time axis."""
+        return self.index / self._params.fps
+
+    def __repr__(self) -> str:
+        return f'Spectrogram({self._params})'
+
+
+class SpectralTransform:
+    """Base class for spectral transforms."""
+    def __init__(self, params: Params):
+        """SpectralTransform base class.
+
+        Args:
+            params:  Parameter object.
+        """
+        self._params = params
+
+    def transform(self, data: np.ndarray):
+        """Transform ``data`` to spectral domain."""
+
+    @property
+    def params(self) -> Params:
+        """Return parameters."""
+        return self._params
+
+
+class Dft(SpectralTransform):
+    """Discrete Fourier Transform."""
+    def __init__(self, fps: int, window: str,
+                 n_fft: Optional[int] = None) -> None:
+        """Create a new spectrum.
+
+        Args:
+            fps:     Sample rate.
+            window:  Name of window function.
+            n_fft:   FFT length.
+        """
+        super().__init__(DftParams(fps, window, n_fft))
+
+    def transform(self, data: np.ndarray) -> Spectrum:
+        """Transform ``data`` to spectral domain."""
+        bins = fft(data, self.params.window, self.params.n_fft)
+        return Spectrum(self._params, bins, data.shape[0])
+
+
+class Stft(SpectralTransform):
+    """Short Time Fourier Transform of AudioFile."""
+    def __init__(self, fps: int, window: str,
+                 n_perseg: int, n_overlap: int,
+                 n_fft: Optional[int] = None, extend: bool = True,
+                 pad: bool = True) -> None:
+        """Create a new spectrogram.
+
+        Args:
+            params:  Initial parameters
+        """
+        super().__init__(StftParams(fps, window, n_fft, n_perseg,
+                                    n_overlap, extend, pad))
+        self._cutter = Segmentation(self.params.n_perseg, self.params.n_overlap,
+                                    self.params.extend, self.params.pad)
+
+    def transform(self, data: np.ndarray) -> Spectrogram:
+        """Transform ``data`` to spectral domain."""
+        segs = self._cutter.transform(data)
+        bins = fft(segs.data, self.params.window, self.params.n_fft)
+        return Spectrogram(self._params, bins, segs.params.n_perseg)
+
+
+class StftSegments(SpectralTransform):
+    """Short Time Fourier Transform on already segmented audio."""
+    def __init__(self, fps: int, window: str,
+                 n_fft: Optional[int] = None) -> None:
+        """Create a new ``Spectrogram`` from ``Segments``.
+
+        Args:
+            fps:     Sample rate.
+            window:  Name of window function.
+            n_fft:   FFT length.
+        """
+        super().__init__(StftParams(fps, window, n_fft))
+
+    def transform(self, segments: Segments) -> Spectrogram:
+        """Transform ``data`` to spectral domain."""
+        for key, val in segments.params.to_dict().items():
+            setattr(self.params, key, val)
+        bins = fft(segments.data, self.params.window, self.params.n_fft)
+        return Spectrogram(self.params, bins, segments.params.n_perseg)
diff --git a/src/apollon/signal/tools.py b/src/apollon/signal/tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..218c9f7863491308c2b9466359b0cd973e1fae6d
--- /dev/null
+++ b/src/apollon/signal/tools.py
@@ -0,0 +1,280 @@
+"""apollon/signal/tools.py
+
+Licensed under the terms of the BSD-3-Clause license.
+Copyright (C) 2019 Michael Blaß
+mblass@posteo.net
+
+Functions:
+    acf                 Normalized autocorrelation.
+    acf_pearson         Normalized Pearson acf.
+    corr_coef_pearson   Correlation coefficient after Pearson.
+    c_weighting         C-weighting for SPL.
+    freq2mel            Transform frequency to mel.
+    limit               Limit dynamic range.
+    mel2freq            Transform mel to frequency.
+    frq2bark            Transform frequency to Bark scale.
+    maxamp              Maximal amplitude of signal.
+    minamp              Minimal amplitude of signal.
+    normalize           Scale data betwee -1.0 and 1.0.
+    noise               Generate white noise.
+    sinusoid            Generate sinusoidal signal.
+    zero_padding        Append array with zeros.
+    trim_spectrogram    Trim spectrogram to a frequency range.
+"""
+
+import numpy as np
+from scipy import stats
+
+from .. import _defaults
+from .. types import Array, Optional, Sequence, Union
+
+
+def acf(inp: Array) -> Array:
+    """Normalized estimate of the autocorrelation function of ``inp``
+    by means of cross correlation.
+
+    Args:
+        inp:  One-dimensional input array.
+
+    Returns:
+        Autocorrelation function for all positive lags.
+    """
+    N = len(inp)
+    norm = inp @ inp
+    out = np.empty(N)
+    out[0] = 1
+    for lag in range(1, N):
+        pre = inp[:-lag]
+        post = inp[lag:]
+        prod = pre @ post
+        if prod == 0:
+            out[lag] = 0
+        else:
+            out[lag] = prod / norm
+    return out
+
+
+def acf_pearson(inp_sig):
+    """Normalized estimate of the autocorrelation function of `inp_sig`
+       by means of pearson correlation coefficient."""
+
+    N = len(inp_sig)
+    out = np.empty(N-1)
+    out[0] = 1
+    for lag in range(1, N-1):
+        pre = inp_sig[:-lag]
+        post = inp_sig[lag:]
+        prod = corr_coef_pearson(pre, post)
+        if prod == 0:
+            out[lag] = 0
+        else:
+            out[lag] = prod
+    return out
+
+
+def corr_coef_pearson(x_sig: Array, y_sig: Array) -> float:
+    """Fast perason correlation coefficient."""
+    x_dtr = x_sig - np.mean(x_sig)
+    y_dtr = y_sig - np.mean(y_sig)
+    r_xy = np.convolve(x_dtr, y_dtr[::-1], mode='valid')
+    r_xx_yy = (x_dtr @ x_dtr) * (y_dtr @ y_dtr)
+    return r_xy / r_xx_yy
+
+
+def c_weighting(frqs: Array) -> Array:
+    """C-weighhting for SPL.
+
+    Args:
+        frq:    Frequencies.
+
+    Returns:
+        Weight for DFT bin with center frequency ``frq``.
+    """
+    aaa = 148693636.0
+    bbb = 424.36
+    sqf = np.power(frqs, 2)
+    return np.divide(aaa*sqf, (sqf+aaa)*(sqf+bbb))
+
+
+def freq2mel(frqs):
+    """Transforms Hz to Mel-Frequencies.
+
+    Params:
+        frqs:  Frequency in Hz.
+
+    Return:
+        Frequency transformed to Mel scale.
+    """
+    frqs = np.atleast_1d(frqs)
+    return 1125 * np.log(1 + frqs / 700)
+
+
+def limit(inp: Array, ldb: Union[float] = None,
+          udb: Union[float] = None) -> Array:
+    """Limit the dynamic range of ``inp`` to  [``ldb``, ``udb``].
+
+    Boundaries are given in dB SPL.
+
+    Args:
+        inp:  DFT bin magnitudes.
+        ldb:  Lower clip boundary in deci Bel.
+        udb:  Upper clip boundary in deci Bel.
+
+    Returns:
+        Copy of ``inp`` with values clipped.
+    """
+    if ldb is None:
+        lth = 0.0
+    elif isinstance(ldb, int) or isinstance(ldb, float):
+        lth = amp(ldb)
+    else:
+        msg = (f'Argument to ``ldb`` must be of types ``int``, or ``float``.\n'
+               f'Found {type(ldb)}.')
+        raise TypeError(msg)
+
+    if udb is None:
+        uth = 0.0
+    elif isinstance(udb, int) or isinstance(udb, float):
+        uth = inp.max()
+    else:
+        msg = (f'Argument to ``udb`` must be of types ``int``, or ``float``.\n'
+               f'Found {type(ldb)}.')
+        raise TypeError(msg)
+
+    low = np.where(inp < lth, 0.0, inp)
+    return np.minimum(low, uth)
+
+
+def mel2freq(zfrq):
+    """Transforms Mel-Frequencies to Hzfrq.
+
+    Args:
+        zfrq:  Mel-Frequency.
+
+    Returns:
+        Frequency in Hz.
+    """
+    zfrq = np.atleast_1d(zfrq)
+    return 700 * (np.exp(zfrq / 1125) - 1)
+
+
+def maxamp(sig):
+    """Maximal absolute elongation within the signal.
+
+    Params:
+        sig    (array-like) Input signal.
+
+    Return:
+        (scalar) Maximal amplitude.
+    """
+    return np.max(np.absolute(sig))
+
+
+def minamp(sig):
+    """Minimal absolute elongation within the signal.
+
+    Params
+        sig    (array-like) Input signal.
+
+    Return:
+        (scalar) Maximal amplitude.
+    """
+    return np.min(np.absolute(sig))
+
+
+def noise(level, n=9000):
+    """Generate withe noise.
+
+    Params:
+        level       (float) Noise level as standard deviation of a gaussian.
+        n           (int) Length of noise signal in samples.
+
+    Return:
+        (ndarray)   White noise signal.
+    """
+    return stats.norm.rvs(0, level, size=n)
+
+
+def normalize(sig):
+    """Normlize a signal to [-1.0, 1.0].
+
+    Params:
+        sig (np.nadarray)    Signal to normalize.
+
+    Return:
+        (np.ndarray) Normalized signal.
+    """
+    return sig / np.max(np.absolute(sig), axis=0)
+
+
+def sinusoid(frqs: Union[Sequence, Array, int, float],
+             amps: Union[Sequence, Array, int, float] = 1,
+             fps: int = 9000, length: float = 1.0,
+             noise: float = None, comps: bool = False) -> Array:
+    """Generate sinusoidal signal.
+
+    Args:
+        frqs:    Component frequencies.
+        amps:    Amplitude of each component in ``frqs``. If ``amps`` is an
+                 integer, each component of ``frqs`` is scaled according to
+                 ``amps``. If ``amps`` iis an iterable each frequency is scaled
+                 by the respective amplitude.
+        fps:     Sample rate.
+        length:  Length of signal in seconds.
+        noise:   Add gaussian noise with standard deviation ``noise`` to each
+                 sinusodial component.
+        comps:   If True, return the components of the signal,
+                 else return the sum.
+
+    Return:
+        Array of signals.
+    """
+    frqs_: Array = np.atleast_1d(frqs)
+    amps_: Array = np.atleast_1d(amps)
+
+    if frqs_.shape == amps_.shape or amps_.size == 1:
+        txs = np.arange(fps*length)[:, None] / fps
+        sig = np.sin(2*np.pi*txs*frqs_) * amps_
+    else:
+        raise ValueError(f'Shape of ``frqs`` ({frqs_.shape}) differs from shape '
+                         f' of ``amps``({amps_.shape}).')
+    if noise:
+        sig += stats.norm.rvs(0, noise, size=sig.shape)
+
+    if comps:
+        return sig
+    return sig.sum(axis=1, keepdims=True)
+
+
+def amp(spl: Union[Array, int, float],
+        ref: float = _defaults.SPL_REF) -> Union[Array, float]:
+    """Computes amplitudes form sound pressure level.
+
+    The reference pressure defaults to the human hearing
+    treshold of 20 μPa.
+
+    Args:
+        spl:    Sound pressure level.
+
+    Returns:
+        DFT magnituds.
+    """
+    return np.power(10.0, 0.05*spl) * ref
+
+
+def zero_padding(sig: Array, n_pad: int,
+                 dtype: Optional[Union[str, np.dtype]] = None) -> Array:
+    """Append n zeros to signal. `sig` must be 1D array.
+
+    Args:
+        sig:    Array to be padded.
+        n_pad:  Number of zeros to be appended.
+
+    Returns:
+        Zero-padded input signal.
+    """
+    if dtype is None:
+        dtype = sig.dtype
+    container = np.zeros(sig.size+n_pad, dtype=dtype)
+    container[:sig.size] = sig
+    return container
diff --git a/apollon/som/__init__.py b/src/apollon/som/__init__.py
similarity index 79%
rename from apollon/som/__init__.py
rename to src/apollon/som/__init__.py
index a37e02c85b0149874636241b9502d6a73ed97025..f8c589c88bc3ad91b52312846ccdd80d34aae0ad 100644
--- a/apollon/som/__init__.py
+++ b/src/apollon/som/__init__.py
@@ -1,6 +1,6 @@
 # Licensed under the terms of the BSD-3-Clause license.
 # Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
+# mblass@posteo.net
 
 """apollon/som/__init__.py
 """
diff --git a/src/apollon/som/_distance_module.c b/src/apollon/som/_distance_module.c
new file mode 100644
index 0000000000000000000000000000000000000000..fa4fce02d9bb3d65f62d7b1059342ad214b5568a
--- /dev/null
+++ b/src/apollon/som/_distance_module.c
@@ -0,0 +1,164 @@
+#define NPY_NO_DEPRECATED_API NPY_1_8_API_VERSION
+#define PY_ARRAY_UNIQUE_SYMBOL comsar_NP_ARRAY_API
+#if !defined(__clang__) && defined(__GNUC__) && defined(__GNUC_MINOR__)
+#if __GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)
+#pragma GCC optimize("tree-vectorize")
+#pragma GCC optimize("unsafe-math-optimizations")
+#pragma GCC optimize("unroll-loops")
+#pragma GCC diagnostic warning "-Wall"
+#endif
+#endif
+#include <Python.h>
+#include <numpy/arrayobject.h>
+#include "distance.h"
+
+
+/* Compute the Hellinger distance for two one-dimensional arrays.
+ *
+ * Params:
+ *      inp_a   One-dimensional array.
+ *      inp_b   One-dimensional array.
+ *  Returns:
+ *      float
+ */
+static PyObject *
+apollon_som_distance_hellinger (PyObject* self, PyObject* args)
+{
+    int       status     = 0;
+    npy_intp  n_elem     = 0;
+    double    dist       = 0.0;
+    PyObject *op_prob_a  = NULL;
+    PyObject *op_prob_b  = NULL;
+
+    PyArrayObject *prob_a = NULL;
+    PyArrayObject *prob_b = NULL;
+
+    if (!PyArg_ParseTuple (args, "OO", &op_prob_a, &op_prob_b))
+    {
+        return NULL;
+    }
+
+    prob_a = (PyArrayObject *) PyArray_ContiguousFromAny (op_prob_a, NPY_DOUBLE, 1, 1);
+    if (prob_a == NULL)
+    {
+        PyErr_SetString (PyExc_RuntimeError, "Could not convert first input.\n");
+        Py_RETURN_NONE;
+    }
+
+    prob_b = (PyArrayObject *) PyArray_ContiguousFromAny (op_prob_b, NPY_DOUBLE, 1, 1);
+    if (prob_b == NULL)
+    {
+        PyErr_SetString (PyExc_RuntimeError, "Could not convert second input.\n");
+        Py_RETURN_NONE;
+    }
+
+    n_elem = PyArray_SIZE (prob_a);
+    status = hellinger (
+                (double *) PyArray_DATA (prob_a),
+                (double *) PyArray_DATA (prob_b),
+                (size_t) n_elem,
+                &dist);
+
+    if (status < 0)
+    {
+        PyErr_SetString (PyExc_ValueError, "Correlogram failed.");
+        Py_RETURN_NONE;
+    }
+
+    return Py_BuildValue("d", dist);
+}
+
+
+/* Compute the Hellinger distance for stochastic matrices
+ *
+ * Params:
+ *      stma   One-dimensional array.
+ *      stmb   One-dimensional array.
+ *  Returns:
+ *      Numpy array of floats.
+ */
+static PyObject *
+apollon_som_distance_hellinger_stm (PyObject* self, PyObject* args)
+{
+    int      status = 0;
+    npy_intp len    = 0;
+    npy_intp stride = 0;
+    PyObject *op_stma = NULL;
+    PyObject *op_stmb = NULL;
+    PyArrayObject *stma  = NULL;
+    PyArrayObject *stmb  = NULL;
+    PyArrayObject *dists = NULL;
+
+    if (!PyArg_ParseTuple (args, "OO", &op_stma, &op_stmb))
+    {
+        return NULL;
+    }
+
+    stma = (PyArrayObject *) PyArray_ContiguousFromAny (op_stma, NPY_DOUBLE, 1, 1);
+    if (stma == NULL)
+    {
+        PyErr_SetString (PyExc_RuntimeError, "Could not convert first input.\n");
+        Py_RETURN_NONE;
+    }
+
+    stmb = (PyArrayObject *) PyArray_ContiguousFromAny (op_stmb, NPY_DOUBLE, 1, 1);
+    if (stmb == NULL)
+    {
+        PyErr_SetString (PyExc_RuntimeError, "Could not convert second input.\n");
+        Py_RETURN_NONE;
+    }
+
+    len = PyArray_SIZE (stma);
+    stride = (npy_intp) sqrt ((double) len);
+    dists = (PyArrayObject *) PyArray_ZEROS(1, &stride, NPY_DOUBLE, 0);
+    if (dists == NULL)
+    {
+        PyErr_SetString (PyExc_MemoryError, "Could not allocate output array.\n");
+        Py_RETURN_NONE;
+    }
+
+    double *dists_ptr = (double *) PyArray_DATA (dists);
+    double *stma_ptr  = (double *) PyArray_DATA (stma);
+    double *stmb_ptr  = (double *) PyArray_DATA (stmb);
+
+    for (npy_intp i = 0; i < stride; i++)
+    {
+        status = hellinger (stma_ptr, stmb_ptr, (size_t) stride, dists_ptr);
+        stma_ptr+=stride;
+        stmb_ptr+=stride;
+        dists_ptr++;
+    }
+
+    if (status < 0)
+    {
+        PyErr_SetString (PyExc_ValueError, "hellinger failed.");
+        Py_RETURN_NONE;
+    }
+
+    return (PyObject *) dists;
+}
+
+
+static PyMethodDef
+Distance_Methods[] = {
+    {"hellinger", apollon_som_distance_hellinger, METH_VARARGS,
+        "hellinger (prob_a, prob_b)"},
+    {"hellinger_stm", apollon_som_distance_hellinger_stm, METH_VARARGS,
+        "hellinger (stma, stmb)"},
+    {NULL, NULL, 0, NULL}
+};
+
+static struct PyModuleDef
+_distance_module = {
+    PyModuleDef_HEAD_INIT,
+    "_distance",
+    NULL,
+    -1,
+    Distance_Methods
+};
+
+PyMODINIT_FUNC
+PyInit__distance(void) {
+    import_array();
+    return PyModule_Create (&_distance_module);
+}
diff --git a/src/apollon/som/datasets.py b/src/apollon/som/datasets.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0c194a8d1ca503ab00b74608ce4e23345d41a79
--- /dev/null
+++ b/src/apollon/som/datasets.py
@@ -0,0 +1,42 @@
+"""apollon/som/datasets.py
+
+Licensed under the terms of the BSD-3-Clause license.
+Copyright (C) 2019 Michael Blaß
+mblass@posteo.net
+
+Function for generating test and illustration data sets.
+"""
+from typing import Optional, Tuple
+
+import numpy as np
+from scipy import stats
+
+def norm_circle(n_classes: int, n_per_class: int, class_std: int,
+                center: Tuple[int, int] = (0, 0), radius: int = 5,
+                seed: Optional[int] = None):
+    """Generate ``n_per_class`` samples from ``n_classes`` bivariate normal
+    distributions, each with standard deviation ``class_std``. The means
+    are equidistantly placed on a circle with radius ``radius``.
+
+    Args:
+        n_classes:    Number of classes.
+        n_per_class:  Number of samples in each class.
+        class_std:    Standard deviation for every class.
+        center:       Center of ther circle.
+        radius:       Radius of the circle on which the means are placed.
+        seed:         Set the random seed.
+
+    Returns:
+        Data set and target vector.
+    """
+    n_samples = n_classes * n_per_class
+    ang = np.pi * np.linspace(0, 360, n_classes, endpoint=False) / 180
+    xy_pos = np.stack((np.sin(ang), np.cos(ang)), axis=1)
+    xy_pos *= radius + np.asarray(center)
+
+    out = np.empty((n_samples, 2))
+    for i, pos in enumerate(xy_pos):
+        idx = slice(i*n_per_class, (i+1)*n_per_class)
+        distr = stats.multivariate_normal(pos, np.sqrt(class_std), seed=seed)
+        out[idx, :] = distr.rvs(n_per_class)
+    return out, np.arange(n_samples) // n_per_class
diff --git a/apollon/som/defaults.py b/src/apollon/som/defaults.py
similarity index 84%
rename from apollon/som/defaults.py
rename to src/apollon/som/defaults.py
index dedb0416033dca6678b97ddf7ce598d0854a9997..f94c19833d7d42fae568e1ed0a7f5fb1cc9ef68a 100644
--- a/apollon/som/defaults.py
+++ b/src/apollon/som/defaults.py
@@ -1,6 +1,6 @@
 # Licensed under the terms of the BSD-3-Clause license.
 # Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
+# mblass@posteo.net
 
 # finale learning rate
 final_eta = .0
diff --git a/src/apollon/som/distance.c b/src/apollon/som/distance.c
new file mode 100644
index 0000000000000000000000000000000000000000..2bc77950f9332accb1e39baacd0f69db2568d7e0
--- /dev/null
+++ b/src/apollon/som/distance.c
@@ -0,0 +1,26 @@
+#if !defined(__clang__) && defined(__GNUC__) && defined(__GNUC_MINOR__)
+#if __GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)
+#pragma GCC optimize("tree-vectorize")
+#pragma GCC optimize("unsafe-math-optimizations")
+#pragma GCC optimize("unroll-loops")
+#pragma GCC diagnostic warning "-Wall"
+#endif
+#endif
+#include <stdio.h>
+#include "distance.h"
+
+
+int
+hellinger (const double *pva,
+           const double *pvb,
+           const size_t  n_elements,
+                 double *dist)
+{
+    for (size_t i = 0; i < n_elements; i++)
+    {
+        double diff = sqrt (pva[i]) - sqrt (pvb[i]);
+        *dist += diff * diff;
+    }
+    *dist = sqrt (*dist / 2);
+    return 1;
+}
diff --git a/src/apollon/som/neighbors.py b/src/apollon/som/neighbors.py
new file mode 100644
index 0000000000000000000000000000000000000000..2278de43555dd8e46769d6a1ac11525c293a49ad
--- /dev/null
+++ b/src/apollon/som/neighbors.py
@@ -0,0 +1,158 @@
+# Licensed under the terms of the BSD-3-Clause license.
+# Copyright (C) 2019 Michael Blaß
+# mblass@posteo.net
+
+"""apollon/som/neighbors.py
+
+Neighborhood computations
+
+Functions:
+    gaussian    N-Dimensional Gausian neighborhood.
+"""
+from typing import List, Tuple
+
+import numpy as np
+from scipy.spatial import distance
+
+from .. types import Array
+
+Shape = Tuple[int, int]
+Coord = Tuple[int, int]
+AdIndex = Tuple[List[int], List[int]]
+
+
+def gaussian(grid, center, radius):
+    """Compute n-dimensional Gaussian neighbourhood.
+
+    Gaussian neighborhood smoothes the array.
+
+    Params:
+        grid      Array of n-dimensional indices.
+        center    Index of the neighborhood center.
+        radius    Size of neighborhood.
+    """
+    center = np.atleast_2d(center)
+    dists = distance.cdist(center, grid, metric='sqeuclidean')
+    return np.exp(-dists/(2*radius**2)).T
+
+
+def mexican(grid, center, radius):
+    """Compute n-dimensional Mexcican hat neighbourhood.
+
+    Mexican hat neighborhood smoothes the array.
+
+    Params:
+        grid      Array of n-dimensional indices.
+        center    Index of the neighborhood center.
+        radius    Size of neighborhood.
+    """
+    center = np.atleast_2d(center)
+    dists = distance.cdist(center, grid, metric='sqeuclidean')
+    return ((1-(dists/radius**2)) * np.exp(-dists/(2*radius**2))).T
+
+
+def star(grid, center, radius):
+    """Compute n-dimensional cityblock neighborhood.
+
+    The cityblock neighborhood is a star-shaped area
+    around ``center``.
+
+    Params:
+        grid      Array of n-dimensional indices.
+        center    Index of the neighborhood center.
+        radius    Size of neighborhood.
+
+    Returns:
+    """
+    center = np.atleast_2d(center)
+    dists = distance.cdist(center, grid, 'cityblock')
+    return (dists <= radius).astype(int).T
+
+
+def neighborhood(grid, metric='sqeuclidean'):
+    """Compute n-dimensional cityblock neighborhood.
+
+    The cityblock neighborhood is a star-shaped area
+    around ``center``.
+
+    Params:
+        grid:      Array of n-dimensional indices.
+        metric:    Distance metric.
+
+    Returns:
+        Pairwise distances of map units.
+    """
+    return distance.squareform(distance.pdist(grid, metric))
+
+
+def rect(grid, center, radius):
+    """Compute n-dimensional Chebychev neighborhood.
+
+    The Chebychev neighborhood is a square-shaped area
+    around ``center``.
+
+    Params:
+        grid      Array of n-dimensional indices.
+        center    Index of the neighborhood center.
+        radius    Size of neighborhood.
+
+    Returns:
+        Two-dimensional array of in
+    """
+    center = np.atleast_2d(center)
+    dists = distance.cdist(center, grid, 'chebychev')
+    return (dists <= radius).astype(int).T
+
+""" NNNNNNNNNEEEEEEEEEEWWWWWW STUFFFFFFFF """
+def gauss_kern(nhb, r):
+    return np.exp(-nhb/(r))
+
+
+def is_neighbour(cra: Array, crb: Array, grid: Array, metric: str) -> Array:
+    """Compute neighbourship between each coordinate in ``units_a`` abd
+    ``units_b`` on ``grid``.
+
+    Args:
+        cra:     (n x 2) array of grid coordinates.
+        crb:     (n x 2) array of grid coordinates.
+        grid:    SOM grid array.
+        metric:  Name of distance metric function.
+
+    Returns:
+        One-dimensional boolean array. ``True`` in position n means that the
+        points ``cra[n]`` and ``crb[n]`` are direct neighbours on ``grid``
+        regarding ``metric``.
+    """
+
+
+def check_bounds(shape: Shape, point: Coord) -> bool:
+    """Return ``True`` if ``point`` is valid index in ``shape``.
+
+    Args:
+        shape:  Shape of two-dimensional array.
+        point:  Two-dimensional coordinate.
+
+    Return:
+        True if ``point`` is within ``shape`` else ``False``.
+    """
+    return (0 <= point[0] < shape[0]) and (0 <= point[1] < shape[1])
+
+
+def direct_rect_nb(shape: Shape, point: Coord) -> AdIndex:
+    """Return the set of direct neighbours of ``point`` given rectangular
+    topology.
+
+    Args:
+        shape:  Shape of two-dimensional array.
+        point:  Two-dimensional coordinate.
+
+    Returns:
+        Advanced index of points in neighbourhood set.
+    """
+    nhb = []
+    for i in range(point[0]-1, point[0]+2):
+        for j in range(point[1]-1, point[1]+2):
+            if check_bounds(shape, (i, j)):
+                nhb.append((i, j))
+    return np.asarray(nhb)
+
diff --git a/src/apollon/som/plot.py b/src/apollon/som/plot.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c0dbeeba12540233cfec1653950011318c7d83c
--- /dev/null
+++ b/src/apollon/som/plot.py
@@ -0,0 +1,279 @@
+"""apollon/som/plot.py
+
+Plotting functions for SOMs.
+
+Licensed under the terms of the BSD-3-Clause license.
+Copyright (C) 2019 Michael Blaß
+mblass@posteo.net
+"""
+
+__all__ = ['cluster_by', 'component', 'hit_counts', 'qerror', 'label_target',
+           'umatrix', 'wire']
+
+from typing import Callable, Optional, Union
+
+import numpy as np
+
+from apollon import tools
+from apollon import aplot
+from apollon.types import Array, Axis
+
+
+def umatrix(ax: Axis, som, outline: bool = False, **kwargs) -> None:
+    """Plot the U-matrix.
+
+    Args:
+        ax:   Axis subplot.
+        som:  SOM instance.
+
+    Note:
+        Figure aspect is set to 'eqaul'.
+    """
+    props = {
+        'cmap': 'terrain',
+        'levels': 20}
+    props.update(kwargs)
+    _generic_contour(ax, som.umatrix(), outline, **props)
+
+
+def umatrix3d(ax: Axis, som, **kwargs) -> None:
+    """Plot the U-matrix in three dimensions.
+
+    Args:
+        ax:   Axis subplot.
+        som:  SOM instance.
+
+    Note:
+        Figure aspect is set to 'eqaul'.
+    """
+    props = {
+        'cmap': 'terrain',
+        }
+    props.update(kwargs)
+    ax.plot_surface(*np.mgrid[:som.dx, :som.dy], som.umatrix(), **props)
+
+
+def component(ax: Axis, som, comp: int, outline: bool = False,
+              **kwargs) -> None:
+    """Plot a component plane.
+
+    Args:
+        ax:    Axis subplot.
+        som:   SOM instance.
+        comp:  Component number.
+    """
+    props = {
+        'cmap': 'magma',
+        'levels': 20,}
+    props.update(kwargs)
+    _generic_contour(ax, som.weights[:, comp].reshape(som.shape), outline,
+                     **props)
+
+
+def label_target(ax: Axis, som, data: Array, target: Array, **kwargs) -> None:
+    """Add target labels for each bmu.
+
+    Args:
+        ax:      Axis subplot.
+        som:     SOM instance.
+        data:    Input data.
+        target:  Target labels.
+    """
+    props = {
+        'fontsize': 9,
+        'ha': 'left',
+        'va': 'bottom',
+        }
+    props.update(kwargs)
+
+    bmu = som.match(data)
+    bmu_xy = np.fliplr(np.atleast_2d(bmu)).T
+    for x, y, t in zip(*bmu_xy, target):
+        ax.text(x, y, t, fontdict=props)
+
+
+def qerror(ax: Axis, som, **kwargs) -> None:
+    """Plot quantization error."""
+    props = {
+        'lw': 3,
+        'alpha': .8,
+        }
+    props.update(kwargs)
+    ax.plot(som.quantization_error, **props)
+
+
+def cluster_by(ax: Axis, som, data: Array, target: Array,
+               **kwargs) -> None:
+    """Plot bmu colored by ``traget``.
+
+    Args:
+        ax:      Axis subplot.
+        som:     SOM instance.
+        data:    Input data.
+        target:  Target labels.
+    """
+    props = {
+            's': 50,
+            'c': target,
+            'marker': 'o',
+            }
+    props.update(kwargs)
+    bmu = som.match(data)
+    bmu_xy = np.fliplr(np.atleast_2d(bmu)).T
+    ax.scatter(*bmu_xy, **props)
+
+
+def hit_counts(ax: Axis, som, transform: Optional[Callable] = None,
+               **kwargs) -> None:
+    """Plot the winner histogram.
+
+    Each unit is colored according to the number of times it was bmu.
+
+    Args:
+        ax:    Axis subplot.
+        som:   SOM instance.
+        mode:  Choose either 'linear', or 'log'.
+    """
+    props = {
+        'interpolation': None,
+        'origin': 'lower',
+        'cmap': 'Greys',
+        }
+    props.update(kwargs)
+    data = som.hit_counts.reshape(som.shape)
+    if transform is not None:
+        data = transform(data)
+    ax.imshow(data, **props)
+
+
+def wire(ax: Axis, som,
+         unit_size: Union[int, float, Array] = 100.0,
+         line_width: Union[int, float] = 1.0,
+         highlight: Optional[Array] = None, labels: bool = False,
+         **kwargs) -> None:
+    """Plot the weight vectors of a SOM with two-dimensional feature space.
+
+    Neighbourhood relations are indicate by connecting lines.
+
+    Args:
+        ax:          The axis subplot.
+        som:         SOM instance.
+        unit_size:   Size for each unit.
+        line_width:  Width of the wire lines.
+        highlight:   Index of units to be marked in different color.
+        labels:      If ``True``, attach a box with coordinates to each unit.
+
+    Returns:
+        vlines, hlines, bgmarker, umarker
+    """
+    if isinstance(unit_size, np.ndarray):
+        marker_size = tools.scale(unit_size, 10, 110)
+    elif isinstance(unit_size, float) or isinstance(unit_size, int):
+        marker_size = np.repeat(unit_size, som.n_units)
+    else:
+        msg = (f'Argument of parameter ``unit_size`` must be real scalar '
+               'or one-dimensional numpy array.')
+        raise ValueError(msg)
+    marker_size_bg = marker_size + marker_size / 100 * 30
+
+    bg_color: str = 'w'
+    hl_color: str = 'r'
+
+    line_props = {
+        'color': 'k',
+        'alpha': 0.7,
+        'lw': 1.0,
+        'zorder': 9,
+        }
+    line_props.update(kwargs)
+
+    marker_bg_props = {
+        's': marker_size_bg,
+        'c': bg_color,
+        'edgecolors': None,
+        'zorder': 11,
+        }
+
+    marker_hl_props = {
+        's': marker_size,
+        'c': unit_color,
+        'alpha': line_props['alpha'],
+        }
+
+    if highlight is not None:
+        bg_color = np.where(highlight, hl_color, bg_color)
+
+    rsw = som.weights.reshape(som.shape, 2)
+    v_wx, v_wy = rsw.T
+    h_wx, h_wy = np.rollaxis(rsw, 1).T
+    vlines = ax.plot(v_wx, v_wy, **line_props)
+    hlines = ax.plot(h_wx, h_wy, **line_props)
+    bgmarker = ax.scatter(v_wx, v_wy, s=marker_size_bg, c=bg_color,
+                          edgecolors='None', zorder=11)
+    umarker = ax.scatter(v_wx, v_wy, s=marker_size, c=unit_color, alpha=alpha,
+                         edgecolors='None', zorder=12)
+
+    font = {'fontsize': 4,
+            'va': 'bottom',
+            'ha': 'center',
+            }
+
+    bbox = {'alpha': 0.7,
+            'boxstyle': 'round',
+            'edgecolor': '#aaaaaa',
+            'facecolor': '#dddddd',
+            'linewidth': .5,
+            }
+
+    if labels is True:
+        for (px, py), (ix, iy) in zip(som.weights, np.ndindex(shape)):
+            ax.text(px+1.3, py, f'({ix}, {iy})', font, bbox=bbox, zorder=13)
+    ax.set_aspect('equal')
+    return None
+
+
+
+def data_2d(ax: Axis, data: Array, colors: Array,
+            **kwargs) -> None:
+    """Scatter plot a data set with two-dimensional feature space.
+
+    This just the usual scatter command with some reasonable defaults.
+
+    Args:
+        ax:      The axis subplot.
+        data:    The data set.
+        colors:  Colors for each elemet in ``data``.
+
+    Returns:
+        PathCollection.
+    """
+    props = {
+        'alpha': 0.2,
+        'c': colors,
+        'cmap': 'plasma',
+        'edgecolors': 'None',
+        's': 10}
+    props.update(kwargs)
+    aplot.outward_spines(ax)
+    _ = ax.scatter(*data.T, **props)
+
+
+def _generic_contour(ax: Axis, data: Array, outline: bool = False,
+                     **kwargs) -> None:
+    """Contour plot.
+
+    Args:
+        ax:    Axis subplot.
+        data:  Two-dimensional array.
+    """
+    sdx, sdy = data.shape
+    overwrites = {
+        'extent': (-0.5, sdy-0.5, -0.5, sdx-0.5),
+        }
+    kwargs.update(overwrites)
+    _ = ax.contourf(data, **kwargs)
+    _ = ax.set_xticks(range(sdy))
+    _ = ax.set_yticks(range(sdx))
+    if outline:
+        ax.contour(data, cmap='Greys_r', alpha=.7)
+    ax.set_aspect('equal')
diff --git a/src/apollon/som/som.py b/src/apollon/som/som.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c570d8042e2c9c47baf2632a0009280772b6c65
--- /dev/null
+++ b/src/apollon/som/som.py
@@ -0,0 +1,419 @@
+# Licensed under the terms of the BSD-3-Clause license.
+# Copyright (C) 2019 Michael Blaß
+# mblass@posteo.net
+from typing import Callable, Dict, List, Optional, Tuple, Union
+
+import numpy as np
+from scipy.spatial import cKDTree
+from scipy.spatial import distance
+
+from apollon.io import io as aio
+from apollon.som import defaults as _defaults
+from . import neighbors as _neighbors
+from . import utilities as asu
+from .. types import Array, Shape, SomDims, Coord
+
+WeightInit = Union[Callable[[Array, Shape], Array], str]
+Metric = Union[Callable[[Array, Array], float], str]
+
+
+class SomGrid:
+    def __init__(self, shape: Shape) -> None:
+        if not all(isinstance(val, int) and val >= 1 for val in shape):
+            raise ValueError('Dimensions must be integer > 0.')
+        self.shape = shape
+        self.pos = np.asarray(list(np.ndindex(shape)), dtype=int)
+        self.tree = cKDTree(self.pos)
+        self.rows, self.cols = np.indices(shape)
+
+    def nhb_idx(self, point: Coord, radius: float) -> Array:
+        """Compute the neighbourhood within ``radius`` around ``point``.
+
+        Args:
+            point:   Coordinate in a two-dimensional array.
+            radius:  Lenght of radius.
+
+        Returns:
+            Array of indices of neighbours.
+        """
+        return np.asarray(self.tree.query_ball_point(point, radius, np.inf))
+
+    def nhb(self, point: Coord, radius: float) -> Array:
+        """Compute neighbourhood within ``radius`` around ``pouint``.
+
+        Args:
+            point:   Coordinate in a two-dimensional array.
+            radius:  Lenght of radius.
+
+        Returns:
+            Array of positions of neighbours.
+        """
+        idx = self.nhb_idx(point, radius)
+        return self.pos[idx]
+
+    def __iter__(self):
+        for row, col in zip(self.rows.flat, self.cols.flat):
+            yield row, col
+
+    def rc(self):
+        return self.__iter__()
+
+    def cr(self):
+        for row, col in zip(self.rows.flat, self.cols.flat):
+            yield col, row
+
+
+class SomBase:
+    def __init__(self, dims: SomDims, n_iter: int, eta: float,
+                 nhr: float, nh_shape: str, init_weights: WeightInit,
+                 metric: Metric, seed: Optional[float] = None):
+
+        self._grid = SomGrid(dims[:2])
+        self.n_features = dims[2]
+        self._hit_counts = np.zeros(self.n_units)
+        self.n_iter = n_iter
+        self.metric = metric
+        self._qrr = np.zeros(n_iter)
+        self._trr = np.zeros(n_iter)
+        self._weights: Optional[Array] = None
+
+        try:
+            self._neighbourhood = getattr(_neighbors, nh_shape)
+        except AttributeError:
+            raise AttributeError(f'Neighborhood shape {nh_shape} is unknown.'
+                                 'Use one `gaussian`, `mexican`, `rect`, or'
+                                 '`star`')
+
+        if eta is None:
+            self.init_eta = None
+        else:
+            if 0 < eta <= 1.:
+                self.init_eta = eta
+            else:
+                raise ValueError(f'Parameter ``eta``={self.init_eta} not in'
+                                 'range [0, 1]')
+
+        if nhr >= 1:
+            self.init_nhr = nhr
+        else:
+            raise ValueError('Neighbourhood radius must be int > 0.')
+
+        if seed is not None:
+            np.random.seed(seed)
+
+        if isinstance(init_weights, str):
+            self.init_weights = asu.weight_initializer[init_weights]
+        elif callable(init_weights):
+            self.init_weights = init_weights
+        else:
+            msg = f'Initializer must be string or callable.'
+            raise ValueError(msg)
+
+        self._dists: Optional[Array] = None
+
+    @property
+    def dims(self) -> Tuple[int, int, int]:
+        """Return the SOM dimensions."""
+        return (*self._grid.shape, self.n_features)
+
+    @property
+    def dx(self) -> int:
+        """Return the number of units along the first dimension."""
+        return self._grid.shape[0]
+
+    @property
+    def dy(self) -> int:
+        """Return the number of units along the second dimension."""
+        return self._grid.shape[1]
+
+    @property
+    def dw(self) -> int:
+        """Return the dimension of the weight vectors."""
+        return self.n_features
+
+    @property
+    def n_units(self) -> int:
+        """Return the total number of units on the SOM."""
+        return self.dx * self.dy
+
+    @property
+    def shape(self) -> Shape:
+        """Return the map shape."""
+        return self._grid.shape
+
+    @property
+    def grid(self) -> Array:
+        """Return the grid."""
+        return self._grid
+
+    @property
+    def dists(self) -> Array:
+        """Return the distance matrix of the grid points."""
+        return self._dists
+
+    @property
+    def weights(self) -> Array:
+        """Return the weight vectors."""
+        return self._weights
+
+    @property
+    def hit_counts(self) -> Array:
+        """Return total hit counts for each SOM unit."""
+        return self._hit_counts
+
+    @property
+    def quantization_error(self) -> Array:
+        """Return quantization error."""
+        return self._qrr
+
+    @property
+    def topographic_error(self) -> Array:
+        """Return topographic error."""
+        return self._trr
+
+    def calibrate(self, data: Array, target: Array) -> Array:
+        """Retrieve the target value of the best matching input data vector
+        for each unit weight vector.
+
+        Args:
+            data:     Input data set.
+            target:  Target labels.
+
+        Returns:
+            Array of target values.
+        """
+        bm_dv, _ = asu.best_match(data, self._weights, self.metric)
+        return target[bm_dv]
+
+    def distribute(self, data: Array) -> Dict[int, List[int]]:
+        """Distribute the vectors of ``data`` on the SOM.
+
+        Indices of vectors n ``data`` are mapped to the index of
+        their best matching unit.
+
+        Args:
+            data:  Input data set.
+
+        Returns:
+            Dictionary with SOM unit indices as keys. Each key maps to a list
+            that holds the indices of rows in ``data``, which best match this
+            key.
+        """
+        return asu.distribute(self.match(data), self.n_units)
+
+    def match_flat(self, data: Array) -> Array:
+        """Return the index of the best matching unit for each vector in
+        ``data``.
+
+        Args:
+            data:  Input data set.
+
+        Returns:
+            Array of SOM unit indices.
+        """
+        bmu, _ = asu.best_match(self._weights, data, self.metric)
+        return bmu
+
+    def match(self, data: Array) -> Array:
+        """Return the multi index of the best matching unit for each vector in
+        ``data``.
+
+        Caution: This function returns the multi index into the array.
+
+        Args:
+            data:  Input data set.
+
+        Returns:
+            Array of SOM unit indices.
+        """
+        bmu = self.match_flat(data)
+        return np.column_stack(np.unravel_index(bmu, self.shape))
+
+    def predict(self, data: Array) -> Array:
+        """Predict the SOM index of the best matching unit
+        for each item in ``data``.
+
+        Args:
+            data:  Input data. Rows are items, columns are features.
+
+        Returns:
+            One-dimensional array of indices.
+        """
+        bmi, _ = asu.best_match(self.weights, data, self.metric)
+        return bmi
+
+    def save(self, path) -> None:
+        """Save som object to file using pickle.
+
+        Args:
+            path: Save SOM to this path.
+        """
+        aio.save_to_pickle(self, path)
+
+    def save_weights(self, path) -> None:
+        """Save weights only.
+
+        Args:
+            path:  File path
+        """
+        aio.save_to_npy(self._weights, path)
+
+    def transform(self, data: Array) -> Array:
+        """Transform each item in ``data`` to feature space.
+
+        This, in principle, returns best matching unit's weight vectors.
+
+        Args:
+            data:  Input data. Rows are items, columns are features.
+
+        Returns:
+            Position of each data item in the feature space.
+        """
+        bmi = self.predict(data)
+        return self.weights[bmi]
+
+
+    def umatrix(self, radius: int = 1, scale: bool = True, norm: bool = True):
+        """Compute U-matrix of SOM instance.
+
+        Args:
+            radius:   Map neighbourhood radius.
+            scale:    If ``True``, scale each U-height by the number of the
+                      associated unit's neighbours.
+            norm:     Normalize U-matrix if ``True``.
+
+        Returns:
+            Unified distance matrix.
+        """
+        u_height = np.empty(self.n_units, dtype='float64')
+        nhd_per_unit = self._grid.nhb_idx(self._grid.pos, radius)
+        for i, nhd_idx in enumerate(nhd_per_unit):
+            cwv = self._weights[[i]]
+            nhd = self._weights[nhd_idx]
+            u_height[i] = distance.cdist(cwv, nhd, self.metric).sum()
+            if scale:
+                u_height[i] /= len(nhd_idx)
+        if norm:
+            umax = u_height.max()
+            if umax == 0:
+                u_height = np.zeros_like(u_height)
+            else:
+                u_height /= u_height.max()
+        return u_height.reshape(self.shape)
+
+
+class BatchMap(SomBase):
+    def __init__(self, dims: SomDims, n_iter: int, eta: float, nhr: float,
+                 nh_shape: str = 'gaussian', init_weights: WeightInit  = 'rnd',
+                 metric: Metric = 'euclidean', seed: int = None):
+
+        super().__init__(dims, n_iter, eta, nhr, nh_shape, init_weights, metric,
+                         seed=seed)
+
+
+class IncrementalMap(SomBase):
+    def __init__(self, dims: SomDims, n_iter: int, eta: float, nhr: float,
+                 nh_shape: str = 'gaussian', init_weights: WeightInit = 'rnd',
+                 metric: Metric = 'euclidean', seed: int = None):
+
+        super().__init__(dims, n_iter, eta, nhr, nh_shape, init_weights, metric,
+                         seed=seed)
+
+    def fit(self, train_data, verbose=False, output_weights=False):
+        self._weights = self.init_weights(self.dims, train_data)
+        eta_ = asu.decrease_linear(self.init_eta, self.n_iter, _defaults.final_eta)
+        nhr_ = asu.decrease_expo(self.init_nhr, self.n_iter, _defaults.final_nhr)
+
+        np.random.seed(10)
+        for (c_iter, c_eta, c_nhr) in zip(range(self.n_iter), eta_, nhr_):
+            if verbose:
+                print('iter: {:2} -- eta: {:<5} -- nh: {:<6}' \
+                 .format(c_iter, np.round(c_eta, 4), np.round(c_nhr, 5)))
+
+            for i, fvect in enumerate(np.random.permutation(train_data)):
+                if output_weights:
+                    fname = f'weights/weights_{c_iter:05}_{i:05}.npy'
+                    with open(fname, 'wb') as fobj:
+                        np.save(fobj, self._weights, allow_pickle=False)
+                bmu, err = asu.best_match(self.weights, fvect, self.metric)
+                self._hit_counts[bmu] += 1
+                m_idx = np.atleast_2d(np.unravel_index(bmu, self.shape)).T
+                neighbors = self._neighbourhood(self._grid.pos, m_idx, c_nhr)
+                self._weights += c_eta * neighbors * (fvect - self._weights)
+
+            _, err = asu.best_match(self.weights, train_data, self.metric)
+            self._qrr[c_iter] = err.sum() / train_data.shape[0]
+
+
+class IncrementalKDTReeMap(SomBase):
+    def __init__(self, dims: tuple, n_iter: int, eta: float, nhr: float,
+                 nh_shape: str = 'star2', init_distr: str = 'uniform',
+                 metric: str = 'euclidean', seed: int = None):
+
+        super().__init__(dims, n_iter, eta, nhr, nh_shape, init_distr, metric,
+                         seed=seed)
+
+    def fit(self, train_data, verbose=False):
+        """Fit SOM to input data."""
+        self._weights = self.init_weights(train_data, self.shape)
+        eta_ = asu.decrease_linear(self.init_eta, self.n_iter, _defaults.final_eta)
+        nhr_ = asu.decrease_expo(self.init_nhr, self.n_iter, _defaults.final_nhr)
+        iter_ = range(self.n_iter)
+
+        np.random.seed(10)
+        for (c_iter, c_eta, c_nhr) in zip(iter_, eta_, nhr_):
+            if verbose:
+                print('iter: {:2} -- eta: {:<5} -- nh: {:<6}' \
+                 .format(c_iter, np.round(c_eta, 4), np.round(c_nhr, 5)))
+
+            for fvect in np.random.permutation(train_data):
+                bmu, _ = asu.best_match(self.weights, fvect, self.metric)
+                self._hit_counts[bmu] += 1
+                nh_idx = self._grid.nhb_idx(np.unravel_index(*bmu, self.shape), c_nhr)
+                #dists = _distance.cdist(self._grid.pos[nh_idx], self._grid.pos[bmu])
+                dists = np.ones(nh_idx.shape[0])
+                kern = _neighbors.gauss_kern(dists.ravel(), c_nhr) * c_eta
+                self._weights[nh_idx] += ((fvect - self._weights[nh_idx]) * kern[:, None])
+
+            _, err = asu.best_match(self.weights, train_data, self.metric)
+            self._qrr[c_iter] = err.sum() / train_data.shape[0]
+
+'''
+    def _batch_update(self, data_set, c_nhr):
+        # get bmus for vector in data_set
+        bm_units, total_qE = self.get_winners(data_set)
+        self.quantization_error.append(total_qE)
+
+        # get bmu's multi index
+        bmu_midx = np.unravel_index(bm_units, self.shape)
+
+        w_nh = np.zeros((self.n_units, 1))
+        w_lat = np.zeros((self.n_units, self.dw))
+
+        for bx, by, fv in zip(*bmu_midx, data_set):
+            # TODO:  Find a way for faster nh computation
+            c_nh = self._neighbourhood((bx, by), c_nhr)
+            w_nh += c_nh
+            w_lat += c_nh * fv
+
+        self.weights = w_lat / w_nh
+
+
+    def train_batch(self, data, verbose=False):
+        """Feed the whole data set to the network and update once
+           after each iteration.
+
+        Args:
+            data:    Input data set.
+            verbose: Print verbose messages if True.
+        """
+        # main loop
+        for (c_iter, c_nhr) in \
+            zip(range(self.n_iter),
+                asu.decrease_linear(self.init_nhr, self.n_iter)):
+
+            if verbose:
+                print(c_iter, end=' ')
+
+            self._batch_update(data, c_nhr)
+            '''
diff --git a/apollon/som/topologies.py b/src/apollon/som/topologies.py
similarity index 66%
rename from apollon/som/topologies.py
rename to src/apollon/som/topologies.py
index f8f6e690e893a7f15aa7e48b5b3af2f84cf0dea0..13c0877d45eaa49d134e8fc3ca5a153984642bca 100644
--- a/apollon/som/topologies.py
+++ b/src/apollon/som/topologies.py
@@ -1,6 +1,6 @@
 # Licensed under the terms of the BSD-3-Clause license.
 # Copyright (C) 2019 Michael Blaß
-# michael.blass@uni-hamburg.de
+# mblass@posteo.net
 
 """apollon/som/topologies.py
 
@@ -9,32 +9,10 @@
 Topologies for self-organizing maps.
 
 Functions:
-    rect_neighbourhood    Return rectangular neighbourhood.
     vn_neighbourhood      Return 4-neighbourhood.
 """
 
-
-import numpy as _np
-
-
-def rect_neighbourhood(mat_shape, point, w=1):
-    if point[0] - w < 0:
-        rows1 = 0
-    else:
-        rows1 = point[0] - w
-    rows2 = point[0] + w + 1
-
-    if point[1] - w < 0:
-        cols1 = 0
-    else:
-        cols1 = point[1] - w
-    cols2 = point[1] + w + 1
-
-    mask = _np.ones(mat_shape)
-    mask[rows1:rows2, cols1:cols2] = 0
-    mask[point] = 1
-    out = _np.ma.masked_array(mask, mask=mask)
-    return out
+import numpy as np
 
 
 def vn_neighbourhood(x, y, dx, dy, flat=False):
@@ -67,7 +45,7 @@ def vn_neighbourhood(x, y, dx, dy, flat=False):
         nh.append((x, y+1))
 
     if flat:
-        nh = _np.array(nh)
-        return _np.ravel_multi_index(nh.T, (dx, dy))
+        nh = np.array(nh)
+        return np.ravel_multi_index(nh.T, (dx, dy))
     else:
         return nh
diff --git a/src/apollon/som/utilities.py b/src/apollon/som/utilities.py
new file mode 100644
index 0000000000000000000000000000000000000000..cdd71878b1a31454d8f47192f3c4a1171a3b41dd
--- /dev/null
+++ b/src/apollon/som/utilities.py
@@ -0,0 +1,253 @@
+"""apollon/som/utilites.py
+
+Utilities for self.organizing maps.
+
+Licensed under the terms of the BSD-3-Clause license.
+Copyright (C) 2019 Michael Blaß
+mblass@posteo.net
+"""
+import itertools
+from typing import Dict, Iterable, Iterator, List, Optional, Tuple
+
+import numpy as np
+from scipy.spatial import distance as _distance
+from scipy import stats as _stats
+
+from apollon.types import Array, Shape, SomDims
+from apollon import tools
+
+
+def grid_iter(n_rows: int, n_cols: int) -> Iterator[Tuple[int, int]]:
+    """Compute grid indices of an two-dimensional array.
+
+    Args:
+        n_rows:  Number of array rows.
+        n_cols:  Number of array columns.
+
+    Returns:
+        Multi-index iterator.
+    """
+    return itertools.product(range(n_rows), range(n_cols))
+
+
+def grid(n_rows: int, n_cols: int) -> Array:
+    """Compute grid indices of a two-dimensional array.
+
+    Args:
+        n_rows:  Number of array rows.
+        n_cols:  Number of array columns.
+
+    Returns:
+        Two-dimensional array in which each row represents an multi-index.
+    """
+    return np.array(list(grid_iter(n_rows, n_cols)))
+
+
+def decrease_linear(start: float, step: float, stop: float = 1.0
+                    ) -> Iterator[float]:
+    """Linearily decrease ``start``  in ``step`` steps to ``stop``."""
+    if step < 1 or not isinstance(step, int):
+        raise ValueError('Param `step` must be int >= 1.')
+    elif step == 1:
+        yield start
+    else:
+        a = (stop - start) / (step-1)
+        for x in range(step):
+            yield a * x + start
+
+
+def decrease_expo(start: float, step: float, stop: float = 1.0
+                  ) -> Iterator[float]:
+    """Exponentially decrease ``start``  in ``step`` steps to ``stop``."""
+    if step < 1 or not isinstance(step, int):
+        raise ValueError('Param `step` must be int >= 1.')
+    elif step == 1:
+        yield start
+    else:
+        b = np.log(stop / start) / (step-1)
+        for x in range(step):
+            yield start * np.exp(b*x)
+
+"""
+def match(weights: Array, data: Array, kth, metric: str):
+    dists = _distance.cdist(weights, data, metric)
+    idx = dists.argpartition(kth, axis=0)
+    min_vals = dists[min_idx]
+    return (min_idx, min_vals)
+"""
+
+def best_match(weights: Array, inp: Array, metric: str):
+    """Compute the best matching unit of ``weights`` for each
+    element in ``inp``.
+
+    If several elemets in ``weights`` have the same distance to the
+    current element of ``inp``, the first element of ``weights`` is
+    choosen to be the best matching unit.
+
+    Args:
+        weights:    Two-dimensional array of weights, in which each row
+                    represents an unit.
+        inp:        Array of test vectors. If two-dimensional, rows are
+                    assumed to represent observations.
+        metric:     Distance metric to use.
+
+    Returns:
+        Index and error of best matching units.
+    """
+    if weights.ndim != 2:
+        msg = (f'Array ``weights`` has {weights.ndim} dimensions, it '
+               'has to have exactly two dimensions.')
+        raise ValueError(msg)
+
+    if weights.shape[-1] != inp.shape[-1]:
+        msg = (f'Feature dimension of ``weights`` has {weights.shape[0]} '
+               'elemets, whereas ``inp`` has {inp.shape[-1]} elemets. '
+               'However, both dimensions have to match exactly.')
+        raise ValueError(msg)
+
+    inp = np.atleast_2d(inp)
+    if inp.ndim > 2:
+        msg = (f'Array ``inp`` has {weights.ndim} dimensions, it '
+               'has to have one or two dimensions.')
+        raise ValueError(msg)
+
+    dists = _distance.cdist(weights, inp, metric)
+    return dists.argmin(axis=0), dists.min(axis=0)
+
+
+def sample_pca(dims: SomDims, data: Optional[Array] = None, **kwargs) -> Array:
+    """Compute initial SOM weights by sampling from the first two principal
+    components of the input data.
+
+    Args:
+        dims:   Dimensions of SOM.
+        data:   Input data set.
+        adapt:  If ``True``, the largest value of ``shape`` is applied to the
+                principal component with the largest sigular value. This
+                orients the map, such that map dimension with the most units
+                coincides with principal component with the largest variance.
+
+    Returns:
+        Array of SOM weights.
+    """
+    n_rows, n_cols, n_feats = dims
+    n_units = n_rows * n_cols
+    if data is None:
+        data = np.random.randint(-100, 100, (300, n_feats))
+    vals, vects, trans_data = tools.pca(data, 2)
+    data_limits = np.column_stack((trans_data.min(axis=0),
+                                   trans_data.max(axis=0)))
+    if 'adapt' in kwargs and kwargs['adapt'] is True:
+        shape = sorted((n_rows, n_cols), reverse=True)
+    else:
+        shape = (n_rows, n_cols)
+    dim_x = np.linspace(*data_limits[0], shape[0])
+    dim_y = np.linspace(*data_limits[1], shape[1])
+    grid_x, grid_y = np.meshgrid(dim_x, dim_y)
+    points = np.vstack((grid_x.ravel(), grid_y.ravel()))
+    weights = points.T @ vects + data.mean(axis=0)
+    return weights
+
+
+def sample_rnd(dims: SomDims, data: Optional[Array] = None, **kwargs) -> Array:
+    """Compute initial SOM weights by sampling uniformly from the data space.
+
+    Args:
+        dims:  Dimensions of SOM.
+        data:  Input data set. If ``None``, sample from [-10, 10].
+
+    Returns:
+        Array of SOM weights.
+    """
+    n_rows, n_cols, n_feats = dims
+    n_units = n_rows * n_cols
+    if data is not None:
+        data_limits = np.column_stack((data.min(axis=0), data.max(axis=0)))
+    else:
+        data_limits = np.random.randint(-10, 10, (n_feats, 2))
+        data_limits.sort()
+    weights = [np.random.uniform(*lim, n_units) for lim in data_limits]
+    return np.column_stack(weights)
+
+
+def sample_stm(dims: SomDims, data: Optional[Array] = None, **kwargs) -> Array:
+    """Compute initial SOM weights by sampling stochastic matrices from
+    Dirichlet distribution.
+
+    The rows of each n by n stochastic matrix are sampes drawn from the
+    Dirichlet distribution, where n is the number of rows and cols of the
+    matrix. The diagonal elemets of the matrices are set to twice the
+    probability of the remaining elements.
+    The square root of the weight vectors' size must be a real integer.
+
+    Args:
+        dims:  Dimensions of SOM.
+        data:  Input data set.
+
+    Returns:
+        Array of SOM weights.
+
+    Notes:
+        Each row of the output array is to be considered a flattened
+        stochastic matrix, such that each ``N = sqrt(data.shape[1])`` values
+        are a discrete probability distribution forming the ``N`` th row of
+        the matrix.
+    """
+    n_rows, n_cols, n_feats = dims
+    n_states = np.sqrt(n_feats)
+    if bool(n_states - int(n_states)):
+        msg = (f'Weight vector with {n_feats} elements is not '
+               'reshapeable to square matrix.')
+        raise ValueError(msg)
+
+    n_states = int(n_states)
+    n_units = n_rows * n_cols
+    alpha = np.random.randint(1, 10, (n_states, n_states))
+    st_matrix = np.hstack([_stats.dirichlet(a).rvs(size=n_units)
+                           for a in alpha])
+    return st_matrix
+
+
+def sample_hist(dims: SomDims, data: Optional[Array] = None, **kwargs) -> Array:
+    """Sample sum-normalized histograms.
+
+    Args:
+        dims:  Dimensions of SOM.
+        data:  Input data set.
+
+    Returns:
+        Two-dimensional array in which each row is a historgram.
+    """
+    n_rows, n_cols, n_feats = dims
+    return _stats.dirichlet(np.ones(n_feats)).rvs(n_rows*n_cols)
+
+
+def distribute(bmu_idx: Iterable[int], n_units: int
+               ) -> Dict[int, List[int]]:
+    """List training data matches per SOM unit.
+
+    This method assumes that the ith element of ``bmu_idx`` corresponds to the
+    ith vetor in a array of input data vectors.
+
+    Empty units result in empty list.
+
+    Args:
+        bmu_idx:  Indices of best matching units.
+        n_units:  Number of units on the SOM.
+
+    Returns:
+        Dictionary in which the keys represent the flat indices of SOM units.
+        The corresponding value is a list of indices of those training data
+        vectors that have been mapped to this unit.
+    """
+    unit_matches = {i:[] for i in range(n_units)}
+    for data_idx, bmu in enumerate(bmu_idx):
+        unit_matches[bmu].append(data_idx)
+    return unit_matches
+
+
+weight_initializer = {
+    'rnd': sample_rnd,
+    'stm': sample_stm,
+    'pca': sample_pca,
+    'hist': sample_hist}
diff --git a/src/apollon/tools.py b/src/apollon/tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..34b2963603877a00500332b2357797e2456181e8
--- /dev/null
+++ b/src/apollon/tools.py
@@ -0,0 +1,308 @@
+"""
+Common tool library.
+Licensed under the terms of the BSD-3-Clause license.
+
+Copyright (C) 2019 Michael Blaß
+"""
+from datetime import datetime, timezone
+import math as _math
+from typing import Any, Tuple, Callable
+
+import numpy as np
+
+from . import _defaults
+from . types import Array as Array
+
+
+def pca(data: Array, n_comps: int = 2) -> Tuple[Array, Array, Array]:
+    """Compute a PCA based on ``numpy.linalg.svd``.
+
+    Interanlly, ``data`` will be centered but not scaled.
+
+    Args:
+        data:     Data set.
+        n_comps:  Number of principal components.
+
+    Returns:
+        ``n_comps`` largest singular values,
+        ``n_comps`` largest eigen vectors,
+        transformed input data.
+    """
+    data_centered = (data - data.mean(axis=0))
+    _, vals, vects = np.linalg.svd(data_centered)
+
+    ord_idx = np.flip(vals.argsort())[:n_comps]
+    vals = vals[ord_idx]
+    vects = vects[ord_idx]
+    return vals, vects, data_centered @ vects.T
+
+
+def assert_array(arr: Array, ndim: int, size: int,     # pylint: disable=R0913
+                 lower_bound: float = -np.inf,
+                 upper_bound: float = np.inf,
+                 name: str = 'arr'):
+    """Raise an error if shape of `arr` does not match given arguments.
+
+    Args:
+        arr:    Array to test.
+        ndim:   Expected number of dimensions.
+        size:   Expected total number of elements.
+        lower_bound:    Lower bound for array elements.
+        upper_bound:    Upper bound for array elements.
+
+    Raises:
+        ValueError
+    """
+    if arr.ndim != ndim:
+        raise ValueError(('Shape of {} does not match. Expected '
+                          '{}, got {}.\n').format(name, ndim, arr.ndim))
+
+    if arr.size != size:
+        raise ValueError(('Size of {} does not match. Expected '
+                          '{}, got {}.\n').format(name, size, arr.size))
+
+    if np.any(arr < lower_bound):
+        raise ValueError(('Elements of {} must '
+                          'be >= {}.'.format(name, lower_bound)))
+
+    if np.any(arr > upper_bound):
+        raise ValueError(('Elements of {} must '
+                          'be <= {}.'.format(name, upper_bound)))
+
+
+def jsonify(inp: Any):
+    """Returns a representation of ``inp`` that can be serialized to JSON.
+
+    This method passes through Python objects of type dict, list, str, int
+    float, True, False, and None. Tuples will be converted to list by the JSON
+    encoder. Numpy arrays will be converted to list using thier .to_list() method.
+    On all other types, the method will try to call str() and raises
+    on error.
+
+    Args:
+        inp:    Input to be jsonified.
+
+    Returns:
+        Jsonified  input.
+    """
+    valid_types = (dict, list, tuple, str, int, float)
+    valid_vals = (True, False, None)
+
+    xx = [isinstance(inp, v_type) for v_type in valid_types]
+    yy = [inp is v_vals for v_vals in valid_vals]
+
+    if any(xx) or any(yy):
+        return inp
+
+    if isinstance(inp, np.ndarray):
+        return inp.to_list()
+
+    return str(inp)
+
+
+#TODO Move to better place
+def L1_Norm(arr: Array) -> float:
+    """Compute the L_1 norm of input vector `x`.
+
+    This implementation is generally faster than np.norm(arr, ord=1).
+    """
+    return np.abs(arr).sum(axis=0)
+
+
+def normalize(arr: Array, mode: str = 'array'):
+    """Normalize an arbitrary array_like.
+
+    Args:
+        arr:    Input signal.
+        mode:   Normalization mode:
+                    'array' -> (default) Normalize whole array.
+                    'rows'  -> Normalize each row separately.
+                    'cols'  -> Normalize each col separately.
+    Return:
+        Normalized input.
+    """
+
+    arr = np.atleast_1d(arr)
+
+    if mode == 'array':
+        return _normalize(arr)
+
+    if mode == 'rows':
+        return np.vstack(_normalize(row) for row in arr)
+
+    if mode == 'cols':
+        return np.hstack(_normalize(col[:, None]) for col in arr.T)
+
+    raise ValueError('Unknown normalization mode')
+
+
+# TODO: This normalizes in [0, 1]; for audio we need [-1, 1]
+def _normalize(arr: Array) -> Array:
+    """Normalize array."""
+    arr_min = arr.min()
+    arr_max = arr.max()
+    return (arr - arr_min) / (arr_max - arr_min)
+
+
+def assert_and_pass(func: Callable, arg: Any):
+    """Call ``func``` with ``arg`` and return ``arg``. Additionally allow arg
+    to be ``None``.
+
+    Args:
+        func:   Test function.
+        arg:    Function argument.
+
+    Returns:
+        Result of ``func(arg)``.
+    """
+    if arg is not None:
+        func(arg)
+    return arg
+
+
+def rowdiag(arr: Array, k: int = 0) -> Array:
+    """Get or set ``k`` th diagonal of square matrix.
+
+    Get the ``k`` th diagonal of a square matrix sorted by rows or construct a
+    sqare matrix with the elements of v as the main diagonal of the second and
+    third dimension.
+
+    Args:
+        arr:    Square array.
+        k:      Number of diagonal.
+
+    Returns:
+        Flattened diagonal.
+    """
+    return np.diag(arr, k)[:, None]
+
+
+def scale(arr: Array, new_min: int = 0, new_max: int = 1, axis: int = -1
+          ) -> Array:
+    """Scale ``arr`` between ``new_min`` and ``new_max``.
+
+    Args:
+        arr:        Array to be scaled.
+        new_min:    Lower bound.
+        new_max:    Upper bound.
+
+    Return:
+        One-dimensional array of transformed values.
+    """
+    xmax = arr.max(axis=axis, keepdims=True)
+    xmin = arr.min(axis=axis, keepdims=True)
+
+    fact = (arr-xmin) / (xmax - xmin)
+    out = fact * (new_max - new_min) + new_min
+
+    return out
+
+
+def smooth_stat(arr: Array) -> Array:
+    """Smooth the signal based on its mean and standard deviation.
+
+    Args:
+        arr:    Input signal.
+
+    Returns:
+        smoothed input signal.
+    """
+    out = []
+    sig_mean = arr.mean()
+    sig_std = arr.std()
+    for i in arr:
+        if i < sig_mean - sig_std or i > sig_mean + sig_std:
+            out.append(i)
+        else:
+            out.append(sig_mean)
+
+    return np.array(out)
+
+
+def standardize(arr: Array) -> Array:
+    """Retrun z-transformed values of ``arr``.
+
+    Args:
+        arr:    Input array.
+
+    Returns:
+        z-transformed values
+    """
+    return (arr - arr.mean(axis=0)) / arr.std(axis=0)
+
+
+def time_stamp(fmt: str = None) -> str:
+    """Report call time as UTC time stamp.
+
+    If ``fmt`` is not given, this function returns time stampes
+    in ISO 8601 format.
+
+    Args:
+       fmt:  Format specification.
+
+    Returns:
+        Time stamp according to ``fmt``.
+    """
+    tsp = datetime.now(timezone.utc)
+    if fmt is None:
+        return tsp.isoformat()
+    return tsp.strftime(fmt)
+
+
+def within(val: float, bounds: Tuple[float, float]) -> bool:
+    """Return True if x is in window.
+
+    Args:
+        val:    Value to test.
+
+    Returns:
+       ``True``, if ``val`` is within ``bounds``.
+    """
+    return bounds[0] <= val <= bounds[1]
+
+
+def within_any(val: float, windows: Array) -> bool:
+    """Return True if x is in any of the given windows.
+
+    Args:
+        val:    Value to test.
+        windows: Array of bounds.
+
+    Returns:
+    """
+    a = windows[:, 0] <= val
+    b = val <= windows[:, 1]
+    c = np.logical_and(a, b)
+    return np.any(c)
+
+
+def fsum(arr: Array, axis: int = None, keepdims: bool = False,
+         dtype: 'str' = 'float64') -> Array:
+    """Return math.fsum along the specifyed axis.
+
+    This function supports at most two-dimensional arrays.
+
+    Args:
+        arr:      Input array.
+        axis:     Reduction axis.
+        keepdims: If ``True``, the output will have the same dimensionality
+                  as the input.
+        dtype:    Numpy data type.
+    Returns:
+        Sums along axis.
+    """
+    if axis is None:
+        out = np.float64(_math.fsum(arr.flatten()))
+        if keepdims:
+            out = np.array(out, ndmin=arr.ndim)
+    elif axis == 0:
+        out = np.array([_math.fsum(col) for col in arr.T], dtype=dtype)
+        if keepdims:
+            out = np.expand_dims(out, 0)
+    elif axis == 1:
+        out = np.array([_math.fsum(row) for row in arr], dtype=dtype)
+        if keepdims:
+            out = np.expand_dims(out, 1)
+    else:
+        raise ValueError(f'``Axis is {axis} but must be 0, 1, or ``None``.')
+    return out
diff --git a/src/apollon/types.py b/src/apollon/types.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b0619ad6c4319888f749d1d2317d8f21dead7ca
--- /dev/null
+++ b/src/apollon/types.py
@@ -0,0 +1,27 @@
+"""apollon/types.py -- Collection of static type hints.
+Licensed under the terms of the BSD-3-Clause license.
+Copyright (C) 2019 Michael Blaß
+mblass@posteo.net
+"""
+import pathlib
+from typing import (Any, Collection, Dict, Generator, Iterable, List, Optional,
+                    Sequence, Tuple, Union)
+import numpy as np
+from matplotlib import axes
+
+
+Array = np.ndarray
+ArrayOrStr = Union[Array, str]
+IterOrNone = Union[Iterable, None]
+
+ParamsType = Dict[str, Any]
+PathType = Union[str, pathlib.Path]
+PathGen = Generator[PathType, None, None]
+Schema = Dict[str, Collection[str]]
+
+Shape = Tuple[int, int]
+SomDims = Tuple[int, int, int]
+Coord = Tuple[int, int]
+AdIndex = Tuple[List[int], List[int]]
+
+Axis = axes._axes.Axes
diff --git a/tests/data/bmu_err_euc.npy b/tests/data/bmu_err_euc.npy
new file mode 100644
index 0000000000000000000000000000000000000000..29bb303fa1a1fb167b3bbaec8f9cc53d5172c01b
Binary files /dev/null and b/tests/data/bmu_err_euc.npy differ
diff --git a/tests/data/bmu_idx_euc.npy b/tests/data/bmu_idx_euc.npy
new file mode 100644
index 0000000000000000000000000000000000000000..6a641ea7af59a593b1123f3c6657a59035c5344c
Binary files /dev/null and b/tests/data/bmu_idx_euc.npy differ
diff --git a/tests/data/test_inp.npy b/tests/data/test_inp.npy
new file mode 100644
index 0000000000000000000000000000000000000000..2cc2dccd9c5007c1ecd73c8f91146d9ece5e20e9
Binary files /dev/null and b/tests/data/test_inp.npy differ
diff --git a/tests/data/test_weights.npy b/tests/data/test_weights.npy
new file mode 100644
index 0000000000000000000000000000000000000000..b72f46e2359b4ccf67dc794469e1a08f39af7fb0
Binary files /dev/null and b/tests/data/test_weights.npy differ
diff --git a/tests/environment.py b/tests/environment.py
new file mode 100644
index 0000000000000000000000000000000000000000..13b76cf74ce10e8f9d64fa9d2a2ea46f900cc976
--- /dev/null
+++ b/tests/environment.py
@@ -0,0 +1,3 @@
+import numpy as np
+
+Array = np.ndarray
diff --git a/tests/hmm/test_hmm.py b/tests/hmm/test_hmm.py
new file mode 100644
index 0000000000000000000000000000000000000000..b3c373ac20a4a7a284347775b1bae8c0d9f84c38
--- /dev/null
+++ b/tests/hmm/test_hmm.py
@@ -0,0 +1,38 @@
+"""hmm_test.py
+Unit test for HMM implementation."""
+
+
+import numpy as np
+from scipy.stats import poisson
+import unittest
+
+from apollon.hmm.poisson import PoissonHmm
+
+
+class TestHMM_utilities(unittest.TestCase):
+    def setUp(self):
+        # Arbitrary transition probability matrix
+        self.A = np.array([[1., 0, 0], [.2, .3, .5], [.1, .3, .6]])
+
+        # Wrong number of dimensions
+        self.B1 = np.array([1., 0, 0, 0])
+        self.B2 = np.array([[[1., 0, 0], [.2, .3, .5], [.1, .3, .6]]])
+
+        # Not quadratic
+        self.C1 = np.array([[1., 0, 0], [.2, .3, .5]])
+        self.C2 = np.array([[1.0], [.5, .5], [.2, .8]])
+
+        # Rows do not sum up to one
+        self.D = np.array([[.2, .3, .5], [.5, .4, .2], [1., 0, 0]])
+
+    def test_success(self):
+        mus = [20, 40, 80, 120, 40]
+        m = len(mus)
+        data = np.concatenate([poisson(mu).rvs(30) for mu in mus])
+        hmm = PoissonHmm(data, m)
+        hmm.fit(data)
+        self.assertTrue(hmm.success)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tests/io/test_json.py b/tests/io/test_json.py
new file mode 100644
index 0000000000000000000000000000000000000000..1cee0ac63208b9e9c536d1021642d0abc1cba41b
--- /dev/null
+++ b/tests/io/test_json.py
@@ -0,0 +1,12 @@
+import json
+import unittest
+
+from apollon.io.json import load_schema
+
+class TestLoadSchema(unittest.TestCase):
+    def setUp(self):
+        pass
+
+    def test_load(self):
+        schema = load_schema('ndarray')
+        self.assertTrue(isinstance(schema, dict))
diff --git a/tests/signal/test_features.py b/tests/signal/test_features.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca6b324167143fed2d84d14a7866c2a88119f40d
--- /dev/null
+++ b/tests/signal/test_features.py
@@ -0,0 +1,96 @@
+import unittest
+import numpy as np
+
+from hypothesis import given, assume
+from hypothesis import strategies as st
+import hypothesis.extra.numpy as htn
+
+from apollon.types import Array
+from apollon.signal import features
+from apollon.signal.spectral import Dft
+from apollon.signal.tools import sinusoid
+from apollon._defaults import SPL_REF
+
+finite_float_arrays = htn.arrays(np.float,
+        htn.array_shapes(min_dims=2, max_dims=2, min_side=2),
+        elements = st.floats(allow_nan=False, allow_infinity=False))
+
+sample_rates = st.integers(min_value=4, max_value=100000)
+
+@st.composite
+def rates_and_frequencies(draw, elements=sample_rates):
+    fps = draw(elements)
+    frq = draw(st.integers(min_value=1, max_value=fps//2-1))
+    return fps, frq
+
+"""
+class TestCdim(unittest.TestCase):
+    def setUp(self):
+        self.data = sinusoid((300, 600), [.2, .1], fps=3000, noise=None)
+        self.ecr = features.cdim(self.data, delay=14, m_dim=80, n_bins=1000,
+                scaling_size=10, mode='bader')
+
+    def test_cdim_returns_array(self):
+        self.assertTrue(isinstance(self.ecr, Array))
+
+    def test_cdim_gt_zero(self):
+        self.assertTrue(np.all(self.ecr > 0))
+"""
+
+class TestEnergy(unittest.TestCase):
+    @given(finite_float_arrays)
+    def test_energy_positive(self, test_sig):
+        res = features.energy(test_sig) >= 0
+        self.assertTrue(res.all())
+
+
+class TestSpl(unittest.TestCase):
+    def setUp(self):
+        self.lower_bound = np.array([SPL_REF, SPL_REF*0.1])
+        self.range = np.array([SPL_REF+1e-6, 1.0])
+
+    def test_spl_lower_bound(self):
+        cnd = np.all(features.spl(self.lower_bound) == 0)
+        self.assertTrue(cnd)
+
+    def test_spl_range(self):
+        cnd = np.all(features.spl(self.range) > 0)
+        self.assertTrue(cnd)
+
+
+class TestSpectralCentroid(unittest.TestCase):
+    @given(rates_and_frequencies())
+    def test_centroid(self, params):
+        fps, frq = params
+        sig = sinusoid(frq, fps=fps)
+        dft = Dft(fps=fps, window=None)
+        sxx = dft.transform(sig)
+        spc = features.spectral_centroid(sxx.frqs, sxx.power)
+        self.assertAlmostEqual(spc.item(), frq)
+
+
+class TestSpectralSpread(unittest.TestCase):
+   @given(rates_and_frequencies())
+   def test_spread(self, params):
+       fps, frq = params
+       sig = sinusoid(frq, fps=fps)
+       dft = Dft(fps=fps, window=None)
+       sxx = dft.transform(sig)
+       sps = features.spectral_spread(sxx.frqs, sxx.power)
+       self.assertLess(sps.item(), 1.0)
+
+   @given(rates_and_frequencies())
+   def test_spread(self, params):
+       fps, frq = params
+       sig = sinusoid(frq, fps=fps)
+       dft = Dft(fps=fps, window=None)
+       sxx = dft.transform(sig)
+       spc = features.spectral_centroid(sxx.frqs, sxx.power)
+       sps = features.spectral_spread(sxx.frqs, sxx.power)
+       sps_wc = features.spectral_spread(sxx.frqs, sxx.power, spc)
+       self.assertEqual(sps.item(), sps_wc.item())
+       self.assertLess(sps.item(), 1.0)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tests/signal/test_spectral.py b/tests/signal/test_spectral.py
new file mode 100644
index 0000000000000000000000000000000000000000..5bca2558c1379925d19648a8165e0dbb6b30c83b
--- /dev/null
+++ b/tests/signal/test_spectral.py
@@ -0,0 +1,183 @@
+import unittest
+import numpy as np
+import scipy as sp
+
+from hypothesis import given
+from hypothesis.strategies import integers, floats
+from hypothesis.extra.numpy import arrays, array_shapes
+
+from apollon.signal.spectral import fft, Dft, Stft, StftSegments
+from apollon.signal.container import StftParams
+from apollon.signal.tools import sinusoid
+
+
+Array = np.ndarray
+
+class TestFft(unittest.TestCase):
+    def setUp(self):
+        self.fps = 9000
+        self.frqs = np.array([440, 550, 660, 880, 1760])
+        self.amps = np.array([1., .5, .25, .1, .05])
+        self.signal = sinusoid(self.frqs, self.amps, fps=self.fps, comps=True)
+
+    def test_input_shape(self):
+        with self.assertRaises(ValueError):
+            fft(np.random.randint(2, 100, (20, 20, 20)))
+
+    def test_window_exists(self):
+        with self.assertRaises(ValueError):
+            fft(self.signal, window='whatever')
+
+    @given(integers(min_value=1, max_value=44100))
+    def test_nfft(self, n_fft):
+        bins = fft(self.signal, n_fft=n_fft)
+        self.assertEqual(bins.shape[0], n_fft//2+1)
+
+    def test_transform(self):
+        bins = np.absolute(fft(self.signal))
+        idx = np.arange(self.frqs.size, dtype=int)
+        self.assertTrue(np.allclose(bins[self.frqs, idx], self.amps))
+
+
+
+class TestStftSegmentsTimes(unittest.TestCase):
+    def setUp(self):
+        self.fps = 9000
+        self.n_perseg = 512
+        self.n_overlap = 256
+        self.amps = np.array([1., .5, .25, .1, .05])
+        self.frqs = np.array([440, 550, 660, 880, 1760])
+        self.signal = sinusoid(self.frqs, self.amps, fps=self.fps)
+        self.stft = StftSegments(self.fps)
+
+    def times_extend_pad(self):
+        cutter = Segmentation(self.n_perseg, self.n_overlap,
+                              extend=True, pad=True)
+        segs = cutter.transform(self.signal)
+        sxx = self.stft.transform(segs)
+        frqs, times, bins = stft(self.signal.squeezs(), self.fps, 'hamming',
+                                 self.n_perseg, self.n_overlap,
+                                 boundary='zeros', padded=True)
+        self.assertEqual(sxx.times.size, times.size)
+        self.assertTrue(np.allclose(sxx.times.squeeze(), times))
+
+    def times_extend_no_pad(self):
+        segmenter = Segmentation(self.n_perseg, self.n_overlap,
+                                 extend=True, pad=False)
+        segs = segmenter.transform(self.signal)
+        sxx = self.stft.transform(segs)
+        frqs, times, bins = stft(self.signal.squeezs(), self.fps, 'hamming',
+                                 self.n_perseg, self.n_overlap,
+                                 boundary='zeros', padded=False)
+        self.assertEqual(sxx.times.size, times.size)
+        self.assertTrue(np.allclose(sxx.times.squeeze(), times))
+
+    def times_no_extend_pad(self):
+        segmenter = Segmentation(self.n_perseg, self.n_overlap,
+                                 extend=False, pad=True)
+        segs = segmenter.transform(self.signal)
+        sxx = self.stft.transform(segs)
+        frqs, times, bins = stft(self.signal.squeezs(), self.fps, 'hamming',
+                                 self.n_perseg, self.n_overlap,
+                                 boundary=None, padded=True)
+        self.assertEqual(sxx.times.size, times.size)
+        self.assertTrue(np.allclose(sxx.times.squeeze(), times))
+
+    def times_no_extend_no_pad(self):
+        segmenter = Segmentation(self.n_perseg, self.n_overlap,
+                                 extend=False, pad=False)
+        segs = segmenter.transform(self.signal)
+        sxx = self.stft.transform(segs)
+        frqs, times, bins = stft(self.signal.squeezs(), self.fps, 'hamming',
+                                 self.n_perseg, self.n_overlap,
+                                 boundary=None, padded=False)
+        self.assertEqual(sxx.times.size, times.size)
+        self.assertTrue(np.allclose(sxx.times.squeeze(), times))
+
+
+class TestSpectrum(unittest.TestCase):
+    real_floats = floats(0, 1, allow_nan=False, allow_infinity=False)
+    arr_2d_shapes = array_shapes(min_dims=2, max_dims=2,
+                               min_side=1, max_side=100)
+    float_2d_arrays = arrays(np.float, arr_2d_shapes,
+                             elements=real_floats)
+
+    @given(float_2d_arrays)
+    def test_abs_is_real(self, inp: Array) -> None:
+        dft = Dft(inp.shape[0], 'hamming', None)
+        spctrm = dft.transform(inp)
+        self.assertTrue(spctrm.abs.dtype.type is np.float64)
+
+    @given(float_2d_arrays)
+    def test_abs_ge_zero(self, inp: Array) -> None:
+        dft = Dft(inp.shape[0], 'hamming', None)
+        spctrm = dft.transform(inp)
+        self.assertTrue(np.all(spctrm.abs>=0))
+
+    @given(float_2d_arrays)
+    def test_d_frq_is_positive_float(self, inp: Array) -> None:
+        dft = Dft(inp.shape[0], 'hamming', None)
+        spctrm = dft.transform(inp)
+        dfrq = spctrm.d_frq
+        self.assertTrue(isinstance(dfrq, float))
+        self.assertTrue(dfrq>0)
+
+    @given(float_2d_arrays)
+    def test_frqs_is_positive_array(self, inp: Array) -> None:
+        dft = Dft(inp.shape[0], 'hamming', None)
+        spctrm = dft.transform(inp)
+        frqs = spctrm.frqs
+        self.assertTrue(isinstance(frqs, np.ndarray))
+        self.assertTrue(frqs.dtype.type is np.float64)
+        self.assertTrue(np.all(frqs>=0))
+
+    @given(float_2d_arrays)
+    def test_phase_within_pi(self, inp: Array) -> None:
+        dft = Dft(inp.shape[0], 'hamming', None)
+        spctrm = dft.transform(inp)
+        phase = spctrm.phase
+        self.assertTrue(phase.dtype.type is np.float64)
+        self.assertTrue(np.all(-np.pi<=phase))
+        self.assertTrue(np.all(phase<=np.pi))
+
+    @given(float_2d_arrays)
+    def test_power_is_positive_array(self, inp: Array) -> None:
+        dft = Dft(inp.shape[0], 'hamming', None)
+        spctrm = dft.transform(inp)
+        power = spctrm.power
+        self.assertTrue(power.dtype.type is np.float64)
+        self.assertTrue(np.all(power>=0.0))
+
+    @given(integers(min_value=1, max_value=10000))
+    def test_n_fft(self, n_samples: int) -> None:
+        sig = np.empty((n_samples, 1))
+        dft = Dft(n_samples, 'hamming', None)
+        y = dft.transform(sig)
+        self.assertEqual(y._n_fft, sig.size)
+
+
+class TestSpectrogram(unittest.TestCase):
+
+    sp_args = {'window': 'hamming', 'nperseg': 512, 'noverlap': 256}
+    ap_args = {'window': 'hamming', 'n_perseg': 512, 'n_overlap': 256}
+
+    @given(integers(min_value=1000, max_value=20000))
+    def test_times(self, fps) -> None:
+        sig = np.random.rand(fps, 1)
+        _, times, _ = sp.signal.stft(sig.squeeze(), fps,
+                                     **TestSpectrogram.sp_args)
+        stft = Stft(fps, **TestSpectrogram.ap_args)
+        sxx = stft.transform(sig)
+        self.assertTrue(np.allclose(times, sxx.times))
+
+    @given(integers(min_value=2, max_value=44100))
+    def test_frqs_and_bins_have_same_first_dim(self, nfft) -> None:
+        fps = 9000
+        sig = np.random.rand(fps, 1)
+        stft = Stft(fps, **TestSpectrogram.ap_args)
+        sxx = stft.transform(sig)
+        self.assertEqual(sxx.frqs.shape[0], sxx.bins.shape[0])
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tests/signal/test_tools.py b/tests/signal/test_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..e8e71024002e0a2c33865dbb673ce3c3b0a512b7
--- /dev/null
+++ b/tests/signal/test_tools.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python3
+
+import unittest
+import numpy as np
+
+from apollon._defaults import SPL_REF
+from apollon.signal import features
+from apollon.signal import tools
+
+
+class TestAmp(unittest.TestCase):
+    def setUp(self):
+        self.lower_bound = np.array([SPL_REF, SPL_REF*0.1])
+        self.range = np.array([SPL_REF+1e-6, 1.0])
+
+    def test_amp_lower_bound(self):
+        res = tools.amp(features.spl(self.lower_bound))
+        cnd = np.array_equal(res, np.array([SPL_REF, SPL_REF]))
+        self.assertTrue(cnd)
+
+    def test_amp_range(self):
+        res = st.amp(feat.spl(self.range))
+        cnd = np.allclose(res, self.range)
+        self.assertTrue(cnd)
+
+
+class TestSinusoid(unittest.TestCase):
+    def setUp(self):
+        self.single_frq = 100
+        self.multi_frq = (100, 200, 300)
+        self.single_amp = .3
+        self.multi_amp = (0.5, .3, .2)
+
+    def test_returns_2darray_on_scalar_frq(self):
+        sig = sinusoid(self.single_frq)
+        self.assertTrue(sig.ndim>1)
+
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tests/som/test_distance.py b/tests/som/test_distance.py
new file mode 100644
index 0000000000000000000000000000000000000000..e80cb9bb0e7445fb8a362a3197106e23839fe6bc
--- /dev/null
+++ b/tests/som/test_distance.py
@@ -0,0 +1,32 @@
+import unittest
+
+from hypothesis import strategies as hst
+import numpy as np
+from scipy.spatial import distance
+
+import _distance as asd
+
+
+class TestHellinger(unittest.TestCase):
+    def setUp(self):
+        pass
+
+    def test_unit_distance(self):
+        comp = np.array([[1.0, 0.0, 0.0]])
+        sample = np.array([[0.0, 1.0, 0.0],
+                           [0.0, 0.0, 1.0],
+                           [0.0, 0.5, 0.5]])
+        res = distance.cdist(comp, sample, metric=asd.hellinger)
+        self.assertTrue(np.all(res == 1.))
+
+
+class TestHellinger_stm(unittest.TestCase):
+    def setUp(self):
+        pass
+
+    def test_zero_dist_on_eq_dist(self):
+        n_rows = 5
+        sample = np.eye(n_rows).ravel()
+        res = asd.hellinger_stm(sample, sample)
+        self.assertTrue(np.all(res == 0.0))
+
diff --git a/tests/som/test_neighbours.py b/tests/som/test_neighbours.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff645a8ce2ddab2f90b902c5f97862df6df7fe8b
--- /dev/null
+++ b/tests/som/test_neighbours.py
@@ -0,0 +1,17 @@
+import unittest
+
+from hypothesis import strategies as hst
+import numpy as np
+from numpy.spatial import distance
+import scipy as sp
+
+from apollon.som import utilities as asu
+from apollon.som.som import IncrementalMap
+
+
+class TestIsNeighbour(unittest.TestCase):
+    def setUp(self):
+        self.som = IncrementralMap((10, 10, 3), 100, 0.5, 5)
+
+
+
diff --git a/tests/som/test_som.py b/tests/som/test_som.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d6d92a39245b9ac1c5c01052588733cc7be7f5d
--- /dev/null
+++ b/tests/som/test_som.py
@@ -0,0 +1,99 @@
+import unittest
+from typing import Tuple
+
+from hypothesis import given
+import hypothesis.strategies as hst
+import numpy as np
+import scipy as sp
+
+from apollon.som.som import SomBase, SomGrid
+
+SomDim = Tuple[int, int, int]
+dimension = hst.integers(min_value=2, max_value=50)
+som_dims = hst.tuples(dimension, dimension, dimension)
+
+
+class TestSomBase(unittest.TestCase):
+
+    @given(som_dims)
+    def test_dims(self, dims: SomDim) -> None:
+        som = SomBase(dims, 100, 0.1, 10, 'gaussian', 'rnd', 'euclidean')
+        self.assertEqual(som.dims, dims)
+
+    @given(som_dims)
+    def test_dx(self, dims: SomDim) -> None:
+        som = SomBase(dims, 100, 0.1, 10, 'gaussian', 'rnd', 'euclidean')
+        self.assertEqual(som.dx, dims[0])
+
+    @given(som_dims)
+    def test_dy(self, dims: SomDim) -> None:
+        som = SomBase(dims, 100, 0.1, 10, 'gaussian', 'rnd', 'euclidean')
+        self.assertEqual(som.dy, dims[1])
+
+    @given(som_dims)
+    def test_dw(self, dims: SomDim) -> None:
+        som = SomBase(dims, 100, 0.1, 10, 'gaussian', 'rnd', 'euclidean')
+        self.assertEqual(som.dw, dims[2])
+
+    @given(som_dims)
+    def test_n_units(self, dims: SomDim) -> None:
+        som = SomBase(dims, 100, 0.1, 10, 'gaussian', 'rnd', 'euclidean')
+        self.assertEqual(som.n_units, dims[0]*dims[1])
+
+    @given(som_dims)
+    def test_shape(self, dims: SomDim) -> None:
+        som = SomBase(dims, 100, 0.1, 10, 'gaussian', 'rnd', 'euclidean')
+        self.assertEqual(som.shape, (dims[0], dims[1]))
+
+    @given(som_dims)
+    def test_grid(self, dims: SomDim) -> None:
+        som = SomBase(dims, 100, 0.1, 10, 'gaussian', 'rnd', 'euclidean')
+        self.assertIsInstance(som.grid, SomGrid)
+
+    """
+    @given(som_dims)
+    def test_dists(self, dims: SomDim) -> None:
+        som = SomBase(dims, 100, 0.1, 10, 'gaussian', 'rnd', 'euclidean')
+        self.assertIsInstance(som.dists, np.ndarray)
+    """
+
+    @given(som_dims)
+    def test_weights(self, dims: SomDim) -> None:
+        som = SomBase(dims, 100, 0.1, 10, 'gaussian', 'rnd', 'euclidean')
+        self.assertIsNone(som.weights)
+
+    @given(som_dims)
+    def test_match(self, dims: SomDim) -> None:
+        data = np.random.rand(100, dims[2])
+        som = SomBase(dims, 10, 0.1, 10, 'gaussian', 'rnd', 'euclidean')
+        som._weights = som.init_weights(data, som.shape)
+        self.assertIsInstance(som.match(data), np.ndarray)
+
+    @given(som_dims)
+    def test_umatrix_has_map_shape(self, dims: SomDim) -> None:
+        data = np.random.rand(100, dims[2])
+        som = SomBase(dims, 100, 0.1, 10, 'gaussian', 'rnd', 'euclidean')
+        som._weights = som.init_weights(data, som.shape)
+        um = som.umatrix()
+        self.assertEqual(um.shape, som.shape)
+
+    @given(som_dims)
+    def test_umatrix_scale(self, dims: SomDim) -> None:
+        som = SomBase(dims, 100, 0.1, 10, 'gaussian', 'rnd', 'euclidean')
+        som._weights = np.tile(np.arange(som.n_features), (som.n_units, 1))
+        som._weights[:, -1] = np.arange(som.n_units)
+        um = som.umatrix(scale=True, norm=False)
+        self.assertEqual(um[0, 0], um[-1, -1])
+        self.assertEqual(um[0, -1], um[-1, 0])
+
+    @given(som_dims)
+    def test_umatrix_norm(self, dims: SomDim) -> None:
+        data = np.random.rand(100, dims[2])
+        som = SomBase(dims, 10, 0.1, 10, 'gaussian', 'rnd', 'euclidean')
+        som._weights = som.init_weights(data, som.shape)
+        um = som.umatrix(norm=True)
+        self.assertEqual(um.max(), 1.0)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tests/som/test_utilities.py b/tests/som/test_utilities.py
new file mode 100644
index 0000000000000000000000000000000000000000..a8c61e5243c6c942064264bca35ca2bc54368fbd
--- /dev/null
+++ b/tests/som/test_utilities.py
@@ -0,0 +1,78 @@
+import unittest
+
+from hypothesis import strategies as hst
+from hypothesis import given
+import numpy as np
+from scipy.spatial import distance
+
+from apollon.som import utilities as asu
+from apollon.types import SomDims
+
+dimension = hst.integers(min_value=2, max_value=50)
+som_dims = hst.tuples(dimension, dimension, dimension)
+"""
+class TestMatch(unittest.TestCase):
+    def setUp(self) -> None:
+        self.weights = np.random.rand(100, 5)
+        self.data = np.random.rand(200, 5)
+
+    def test_returns_tuple(self) -> None:
+        res = asu.match(self.weights, self.data, 2, 'euclidean')
+        self.assertIsInstance(res, tuple)
+
+    def test_elements_are_arrays(self) -> None:
+        bmu, err = asu.match(self.weights, self.data, 'euclidean')
+        self.assertIsInstance(bmu, np.ndarray)
+        self.assertIsInstance(err, np.ndarray)
+
+    def test_correct_ordering(self) -> None:
+        kth = 5
+        bmu, err = asu.match(self.weights, self.data, 'euclidean')
+        wdists = distance.cdist(self.weights, self.data)
+        kswd = wdists.sort(axis=0)[:kth, :]
+"""
+
+class TestDistribute(unittest.TestCase):
+    def setUp(self) -> None:
+        self.n_units = 400
+        self.bmu = np.random.randint(0, self.n_units, 100)
+
+    def returns_dict(self):
+        res = asu.distribute(self.bmu, self.n_units)
+        self.assertIsInstance(res, dict)
+
+
+class TestSampleHist(unittest.TestCase):
+    def setUp(self) -> None:
+        pass
+
+    @given(som_dims)
+    def test_rows_are_stochastic(self, dims: SomDims) -> None:
+        weights = asu.sample_hist(dims)
+        comp =np.isclose(weights.sum(axis=1), 1)
+        self.assertTrue(comp.all())
+
+
+class TestSamplePca(unittest.TestCase):
+    def setUp(self) -> None:
+        pass
+
+    @given(som_dims)
+    def test_x(self, dims: SomDims) -> None:
+        weights = asu.sample_pca(dims)
+"""
+class TestSelfOrganizingMap(unittest.TestCase):
+    def setUp(self):
+        self.weights = np.load('data/test_weights.npy')
+        self.inp = np.load('data/test_inp.npy')
+
+    def test_best_match_computation(self):
+        test_bmu = np.load('data/bmu_idx_euc.npy')
+        test_err = np.load('data/bmu_err_euc.npy')
+        bmu, err = utilities.best_match(self.weights, self.inp, 'euclidean')
+        self.assertTrue(np.array_equiv(test_bmu, bmu))
+        self.assertTrue(np.array_equiv(test_err, err))
+"""
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tests/test_audio.py b/tests/test_audio.py
index 2b211358a730bf01a8b5a7dee204da62b60dc9ed..c8f794c06b74d3fc3345797a73993578912dc1bf 100644
--- a/tests/test_audio.py
+++ b/tests/test_audio.py
@@ -1,22 +1,85 @@
-#!/usr/bin/python3
-# -*- coding: utf-8 -*-
-
 import unittest
-import numpy as _np
+import numpy as np
+
+from apollon.audio import AudioFile
+
+class TestAudioFile(unittest.TestCase):
+    def setUp(self):
+        self.snd = AudioFile('audio/beat.wav')
+
+    def test_path_is_sting(self):
+        self.assertIsInstance(self.snd.file_name, str)
+
+    def test_hash(self):
+        snd2 = AudioFile('audio/beat.wav')
+        self.assertEqual(self.snd.hash, snd2.hash)
+
+
+class TestAudioFileReadMono(unittest.TestCase):
+    def setUp(self):
+        self.snd = AudioFile('audio/beat.wav')
+        self.ref = self.snd._file.read(always_2d=True)
 
-from apollon.audio import loadwav
+    def test_read_raw_multi(self):
+        data = self.snd.read(-1, norm=False, mono=False)
+        self.assertTrue(np.array_equal(self.ref, data))
 
+    def test_read_raw_mono(self):
+        ref = self.ref.sum(axis=1, keepdims=True) / self.ref.shape[1]
+        data = self.snd.read(norm=False, mono=True)
+        self.assertTrue(np.array_equal(ref, data))
 
-class Test_ModulAudio(unittest.TestCase):
+    def test_read_norm_multi(self):
+        ref = self.ref / self.ref.max(axis=0, keepdims=True)
+        data = self.snd.read(norm=True, mono=False)
+        self.assertTrue(np.array_equal(ref, data))
+
+    def test_read_norm_mono(self):
+        ref = self.ref.sum(axis=1, keepdims=True) / self.ref.shape[1]
+        ref /= self.ref.max()
+        data = self.snd.read(norm=True, mono=True)
+        self.assertTrue(np.array_equal(ref, data))
+
+    def tearDown(self):
+        self.snd.close()
+
+
+class TestAudioFileReadMultiChannel(unittest.TestCase):
     def setUp(self):
-        self.x = loadwav('/Users/michael/audio/beat.wav')
+        self.snd = AudioFile('audio/beat_5ch.wav')
+        self.ref = self.snd._file.read(always_2d=True)
+
+    def test_read_raw_multi(self):
+        data = self.snd.read(norm=False, mono=False)
+        self.assertTrue(np.array_equal(self.ref, data))
+
+    def test_read_raw_mono(self):
+        ref = self.ref.sum(axis=1, keepdims=True) / self.ref.shape[1]
+        data = self.snd.read(norm=False, mono=True)
+        self.assertTrue(np.array_equal(ref, data))
+
+    def test_read_norm_multi(self):
+        ref = self.ref / self.ref.max(axis=0, keepdims=True)
+        data = self.snd.read(norm=True, mono=False)
+        self.assertTrue(np.array_equal(ref, data))
+
+    def test_read_norm_mono(self):
+        ref = self.ref.sum(axis=1, keepdims=True) / self.ref.shape[1]
+        ref /= self.ref.max()
+        data = self.snd.read(norm=True, mono=True)
+        self.assertTrue(np.array_equal(ref, data))
+
+    def tearDown(self):
+        self.snd.close()
 
-    def test_AudioDataAttributes(self):
-        c = isinstance(self.x.fs, int)
-        self.assertTrue(c)
 
-        c = isinstance(self.x.data, _np.ndarray)
-        self.assertTrue(c)
+"""
+def test_fti16(self):
+    res = fti16(self.snd_mono.data)
+    self.assertTrue(isinstance(res, Array))
+    self.assertTrue(res.dtype == 'int16')
+    self.assertTrue(self.snd_mono.data.shape == res.shape)
+"""
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/tests/test_cdim.c b/tests/test_cdim.c
new file mode 100644
index 0000000000000000000000000000000000000000..68f6aaa9c24e1899bc327dd5153b9dd15ec79aa7
--- /dev/null
+++ b/tests/test_cdim.c
@@ -0,0 +1,49 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+
+void
+delay_embedding_dists (const double *inp,
+                       const size_t  n_vectors,
+                       const size_t  delay,
+                       const size_t  m_dim,
+                             double *dists)
+{
+    
+    for (size_t i = 0; i < n_vectors - 1; i++)
+    {
+        for (size_t j = i + 1; j < n_vectors; j++)
+        {
+            size_t flat_idx = i * n_vectors + j - i*(i+1)/2 - i - 1;
+            for (size_t m = 0; m < m_dim; m++)
+            {
+                dists[flat_idx] += pow (inp[i+m*delay] - inp[j+m*delay], 2);
+            }
+            dists[flat_idx] = sqrt (dists[flat_idx]);
+            printf ("%f\n", dists[flat_idx]);
+        }
+    }
+}
+
+
+int
+main (void) {
+    double sig[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+    size_t delay = 2;
+    size_t m_dim = 3;
+    size_t len_sig = sizeof sig / sizeof (double);
+    size_t n_vectors = len_sig - ((m_dim - 1) * delay);
+    size_t n_dists = n_vectors * (n_vectors-1) / 2;
+    double *dists = calloc (n_dists, sizeof (double));
+
+    printf ("nv: %zu\nndists: %zu\n" , n_vectors, n_dists);
+    delay_embedding_dists (sig, n_vectors, delay, m_dim, dists);
+
+    for (size_t i=0; i<n_dists; i++)
+    {
+        printf ("%f\n", *(dists+i));
+    }
+    free (dists);
+    return 0;
+}
+    
diff --git a/tests/test_container.py b/tests/test_container.py
new file mode 100644
index 0000000000000000000000000000000000000000..a85937b083c96b6213f090e33b7b3734d9bddad2
--- /dev/null
+++ b/tests/test_container.py
@@ -0,0 +1,32 @@
+import os
+import string
+import tempfile
+import unittest
+
+from hypothesis import given
+from hypothesis.strategies import dictionaries, sampled_from, text
+import jsonschema
+
+from apollon.container import Params
+from apollon import io
+
+
+class TestParams(unittest.TestCase):
+    def test_dict(self):
+        params = Params()
+        self.assertTrue(isinstance(params.to_dict(), dict))
+
+
+class TestDumpJSON(unittest.TestCase):
+    keys = text(sampled_from(string.ascii_letters), min_size=1, max_size=10)
+    vals = text(sampled_from(string.ascii_letters), min_size=1, max_size=10)
+    str_dicts = dictionaries(keys, vals)
+
+    @given(str_dicts)
+    def test_dump(self, inp) -> None:
+        handle, path = tempfile.mkstemp(suffix='.json', text=True)
+        io.json.dump(inp, path)
+        res = io.json.load(path)
+        out = [inp[k] == res[k] for k, v in res.items()]
+        os.unlink(path)
+        os.close(handle)
diff --git a/tests/test_hmm.py b/tests/test_hmm.py
deleted file mode 100644
index 6563c910646f08ccae4d95a8015d168757d8a1ba..0000000000000000000000000000000000000000
--- a/tests/test_hmm.py
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/python3
-# -*- coding: utf-8 -*-
-
-"""hmm_test.py
-
-(c) Michael Blaß 2016
-
-Unit test for HMM implementation."""
-
-
-import unittest
-import numpy as _np
-
-from apollon.hmm.hmm_base import is_tpm
-
-
-class TestHMM_utilities(unittest.TestCase):
-    def setUp(self):
-        # Arbitrary transition probability matrix
-        self.A = _np.array([[1., 0, 0], [.2, .3, .5], [.1, .3, .6]])
-
-        # Wrong number of dimensions
-        self.B1 = _np.array([1., 0, 0, 0])
-        self.B2 = _np.array([[[1., 0, 0], [.2, .3, .5], [.1, .3, .6]]])
-
-        # Not quadratic
-        self.C1 = _np.array([[1., 0, 0], [.2, .3, .5]])
-        self.C2 = _np.array([[1.0], [.5, .5], [.2, .8]])
-
-        # Rows do not sum up to one
-        self.D = _np.array([[.2, .3, .5], [.5, .4, .2], [1., 0, 0]])
-
-    def test_true_tpm(self):
-        self.assertTrue(is_tpm(self.A), True)
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/tests/test_io.py b/tests/test_io.py
index da0fdd8fe22017d9691251af1df234fa1ffa6b1f..a902cac690f26a6650a8972e312bfe84ef77b24a 100644
--- a/tests/test_io.py
+++ b/tests/test_io.py
@@ -1,37 +1,63 @@
 #!/usr/bin/python3
-# -*- coding: utf-8 -*-
-
+"""apollon/tests/test_io.py
+Test cases for IO module.
+"""
+import json
 from pathlib import Path
 import unittest
 
-from apollon.io import WavFileAccessControl
+import numpy as np
+from hypothesis import given
+import hypothesis.extra.numpy as htn
+
+import apollon.io as aio
 
 
-class Foo:
-    file = WavFileAccessControl()
+class MockFileLoader:
+    file = aio.WavFileAccessControl()
     def __init__(self, fname):
         self.file = fname
 
 
-class Test_ModulIO(unittest.TestCase):
+class TestWavFileAccessControl(unittest.TestCase):
     def setUp(self):
         self.invalid_fname = 34
-
         self.not_existing_file = './xxx.py'
         self.not_a_file = '.'
         self.not_a_wav_file = '../../README.md'
 
-    def test_InvalidFileNames(self):
-
+    def test_invalid_file_names(self):
         with self.assertRaises(TypeError):
-            x = Foo(self.invalid_fname)
+            MockFileLoader(self.invalid_fname)
 
         with self.assertRaises(FileNotFoundError):
-            x = Foo(self.not_existing_file)
+            MockFileLoader(self.not_existing_file)
 
         with self.assertRaises(IOError):
-            x = Foo(self.not_a_file)
-            x = Foo(self.not_a_wav_file)
+            MockFileLoader(self.not_a_file)
+            MockFileLoader(self.not_a_wav_file)
+
+
+class TestEncodeNdarray(unittest.TestCase):
+    @given(htn.arrays(htn.floating_dtypes(), htn.array_shapes()))
+    def test_encode(self, arr):
+        encoded = aio.encode_ndarray(arr)
+        self.assertTrue('__ndarray__' in encoded)
+        self.assertTrue(encoded['__ndarray__'])
+        self.assertTrue('__dtype__' in encoded)
+        self.assertTrue(isinstance(encoded['__dtype__'], str))
+        self.assertTrue('data' in encoded)
+        self.assertTrue(isinstance(encoded['data'], list))
+
+
+class TestDecodeNdarray(unittest.TestCase):
+    @given(htn.arrays(htn.floating_dtypes(), htn.array_shapes()))
+    def test_arrays(self, arr):
+        restored = aio.decode_ndarray(aio.encode_ndarray(arr))
+        self.assertTrue(arr.dtype.type is restored.dtype.type)
+        self.assertTrue(arr.shape == restored.shape)
+        self.assertTrue(np.allclose(arr, restored,
+            rtol=0, atol=0, equal_nan=True))
 
 
 if __name__ == '__main__':
diff --git a/tests/test_onsets.py b/tests/test_onsets.py
new file mode 100644
index 0000000000000000000000000000000000000000..4fcbcb73f0b664a4fab8733220f40c3b36126db5
--- /dev/null
+++ b/tests/test_onsets.py
@@ -0,0 +1,61 @@
+import unittest
+
+import numpy as np
+import pandas as pd
+
+from apollon.audio import AudioFile
+from apollon.onsets import (OnsetDetector, EntropyOnsetDetector,
+        FluxOnsetDetector, FilterPeakPicker)
+
+
+class TestOnsetDetector(unittest.TestCase):
+    def setUp(self):
+        self.osd = OnsetDetector()
+
+    def test_init(self):
+        pass
+
+    def test_to_csv(self):
+        pass
+
+    def test_to_json(self):
+        pass
+
+    def test_to_pickle(self):
+        pass
+
+
+class TestEntropyOnsetDetector(unittest.TestCase):
+    def setUp(self):
+        self.snd = AudioFile('audio/beat.wav')
+        self.osd = EntropyOnsetDetector(self.snd.fps)
+
+    def test_detect(self):
+        self.osd.detect(self.snd.data)
+
+    def test_odf(self):
+        self.osd.detect(self.snd.data)
+        self.assertIsInstance(self.osd.odf, pd.DataFrame)
+
+
+class TestFluxOnsetDetector(unittest.TestCase):
+    def setUp(self):
+        self.snd = AudioFile('audio/beat.wav')
+        self.osd = FluxOnsetDetector(self.snd.fps)
+
+    def test_detect(self):
+        self.osd.detect(self.snd.data)
+
+    def test_odf(self):
+        self.osd.detect(self.snd.data)
+        self.assertIsInstance(self.osd.odf, pd.DataFrame)
+
+
+class TestPeakPicking(unittest.TestCase):
+    def setUp(self):
+        self.picker = FilterPeakPicker()
+        self.data = np.random.randint(0, 100, 100) + np.random.rand(100)
+
+    def test_peaks(self):
+        peaks = self.picker.detect(self.data)
+        self.assertIsInstance(peaks, np.ndarray)
diff --git a/tests/test_schema.py b/tests/test_schema.py
new file mode 100644
index 0000000000000000000000000000000000000000..2972d06fedf0df811371c62fdd5e0284c10291ce
--- /dev/null
+++ b/tests/test_schema.py
@@ -0,0 +1,100 @@
+import unittest
+
+import jsonschema
+import numpy as np
+
+from apollon import io
+
+check_schema = jsonschema.Draft7Validator.check_schema
+
+def validate(instance, schema):
+    return jsonschema.validate(instance, schema,
+                               jsonschema.Draft7Validator)
+
+
+
+class TestCorrGramParams(unittest.TestCase):
+    def setUp(self) -> None:
+        self.schema = io.json.load_schema('corrgram')
+        self.corrgram = {'wlen': 100, 'n_delay': 20, 'total': True}
+
+    def test_schema_is_valid(self):
+        check_schema(self.schema)
+
+    def test_fails_on_additional_property(self) -> None:
+        self.corrgram['beer'] = None
+        with self.assertRaises(jsonschema.ValidationError):
+            validate(self.corrgram, self.schema)
+
+    def test_fails_on_total_is_not_bool(self):
+        self.corrgram['total'] = 'this_causes_an_error'
+        with self.assertRaises(jsonschema.ValidationError):
+            validate(self.corrgram, self.schema)
+
+
+class TestCdimParams(unittest.TestCase):
+    def setUp(self) -> None:
+        self.schema = io.json.load_schema('corrdim')
+        self.corrdim = {'delay': 14, 'm_dim': 80, 'n_bins': 1000,
+                     'scaling_size': 10}
+
+    def test_schema_is_valid(self):
+        check_schema(self.schema)
+
+    def test_fails_on_additional_property(self) -> None:
+        self.corrdim['beer'] = None
+        with self.assertRaises(jsonschema.ValidationError):
+            validate(self.corrdim, self.schema)
+
+
+class TestDftParams(unittest.TestCase):
+    def setUp(self) -> None:
+        self.schema = io.json.load_schema('dft_params')
+        self.dft_params = {'fps': 44100, 'window': 'hamming', 'n_fft': None}
+
+    def test_schema_is_valid(self):
+        check_schema(self.schema)
+
+    def test_fails_on_additional_property(self) -> None:
+        self.dft_params['beer'] = None
+        with self.assertRaises(jsonschema.ValidationError):
+            validate(self.dft_params, self.schema)
+
+
+class TestStftParams(unittest.TestCase):
+    def setUp(self) -> None:
+        self.schema = io.json.load_schema('stft_params')
+        self.stft_params = {'fps': 44100, 'window': 'hamming', 'n_fft': None,
+                            'n_perseg': 1024, 'n_overlap': 512, 'extend': True,
+                            'pad': True}
+
+    def test_schema_is_valid(self) -> None:
+        check_schema(self.schema)
+
+    def test_fails_on_additional_property(self) -> None:
+        self.stft_params['beer'] = None
+        with self.assertRaises(jsonschema.ValidationError):
+            validate(self.stft_params, self.schema)
+
+
+class TestNdarray(unittest.TestCase):
+    def setUp(self) -> None:
+        self.schema = io.json.load_schema('ndarray')
+        self.data = np.arange(10.0)
+        self.valid_array = io.json.encode_ndarray(self.data)
+
+    def test_valid_schema(self) -> None:
+        check_schema(self.schema)
+
+    def test_pass_on_array(self) -> None:
+        validate(self.valid_array, self.schema)
+
+    def test_fails_on_addition_property(self) -> None:
+        self.valid_array['a'] = 12
+        with self.assertRaises(jsonschema.ValidationError):
+            validate(self.valid_array, self.schema)
+
+    def test_fails_on_data_is_not_array(self) -> None:
+        self.valid_array['data'] = 12
+        with self.assertRaises(jsonschema.ValidationError):
+            validate(self.valid_array, self.schema)
diff --git a/tests/test_segment.py b/tests/test_segment.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec7fb768516b6ed067bf367448c0cfc2ab388cbe
--- /dev/null
+++ b/tests/test_segment.py
@@ -0,0 +1,153 @@
+"""test_segment.py
+"""
+import unittest
+
+from hypothesis import given
+from hypothesis.strategies import integers, data, composite, SearchStrategy
+from hypothesis.extra.numpy import array_shapes
+import numpy as np
+
+from apollon.audio import AudioFile
+from apollon.segment import Segments, Segmentation, SegmentationParams
+
+
+MAX_NSEGS = 345    # cannot pass instance attribute to method decorator
+
+
+def _valid_nfr() -> SearchStrategy:
+    return integers(min_value=2,
+                    max_value=2000000)
+
+def _valid_nps(n_frames: int) -> SearchStrategy:
+    return integers(min_value=2,
+                    max_value=n_frames)
+
+def _valid_nol(n_perseg: int) -> SearchStrategy:
+    return integers(min_value=1,
+                    max_value=n_perseg-1)
+
+@composite
+def valid_nfr_nps_nol(draw) -> tuple:
+    nfr = draw(_valid_nfr())
+    nps = draw(_valid_nps(nfr))
+    nol = draw(_valid_nol(nps))
+    return nfr, nps, nol
+
+
+class TestSegments(unittest.TestCase):
+    def setUp(self) -> None:
+        seg_params = SegmentationParams(1024, 512, extend=True, pad=True)
+        self.segs = Segments(seg_params, np.empty((1024, 30)))
+
+    @given(valid_nfr_nps_nol())
+    def test_bounds_idx0_negative(self, shape) -> None:
+        nfr, nps, nol = shape
+        params = SegmentationParams(nps, nol, extend=True, pad=True)
+        segs = Segments(params, np.empty((nps, nfr)))
+        start, stop = segs.bounds(0)
+        self.assertLess(start, 0)
+        self.assertEqual(start, -(nps//2))    # parens because of floor div
+
+    @given(integers(max_value=-1))
+    def test_center_idx_gteq_zero(self, seg_idx) -> None:
+        with self.assertRaises(IndexError):
+            self.segs.bounds(seg_idx)
+
+    @given(data())
+    def test_center_idx_lt_nsegs(self, data) -> None:
+        seg_idx = data.draw(integers(min_value=self.segs.n_segs))
+        with self.assertRaises(IndexError):
+            self.segs.bounds(seg_idx)
+
+    @given(integers(max_value=-1))
+    def test_bounds_idx_gteq_zero(self, seg_idx) -> None:
+        with self.assertRaises(IndexError):
+            bounds = self.segs.bounds(seg_idx)
+
+    @given(data())
+    def test_bounds_idx_lt_nsegs(self, data) -> None:
+        seg_idx = data.draw(integers(min_value=self.segs.n_segs))
+        with self.assertRaises(IndexError):
+            bounds = self.segs.bounds(seg_idx)
+
+    def test_data(self):
+        seg_data = self.segs.data
+        self.assertIsInstance(seg_data, np.ndarray)
+
+    def test_extend_true(self) -> None:
+        self.assertEqual(self.segs._offset, 0)
+
+    def test_extend_false(self) -> None:
+        n_perseg = 1024
+        n_overlap = 512
+        n_segs = 30
+        seg_params = SegmentationParams(n_perseg, n_overlap, extend=False,
+                                        pad=True)
+        segs = Segments(seg_params, np.empty((n_perseg, n_segs)))
+        self.assertEqual(segs._offset, n_perseg//2)
+
+
+class TestSegmentation(unittest.TestCase):
+    """
+       nps -> n_perseg
+       nol -> n_overlap
+       gt  -> greater than
+       lt  -> less than
+       eq  -> eqaul to
+    """
+    def setUp(self) -> None:
+        self.snd = AudioFile('audio/beat_5ch.wav')
+
+    @given(integers(max_value=0))
+    def test_nps_gt_zero(self, n_perseg) -> None:
+        with self.assertRaises(ValueError):
+            cutter = Segmentation(n_perseg, 1)
+
+    @given(data())
+    def test_nps_lteq_nframes(self, data) -> None:
+        n_perseg = data.draw(integers(min_value=self.snd.n_frames+1))
+        cutter = Segmentation(n_perseg, 1)
+        with self.assertRaises(ValueError):
+            cutter.transform(self.snd.data.squeeze())
+
+    @given(data())
+    def test_nol_gt_zero(self, data) -> None:
+        nov_min_max = (None, 0)
+        n_perseg = data.draw(self._valid_nps())
+        n_overlap = data.draw(integers(*nov_min_max))
+        with self.assertRaises(ValueError):
+            cutter = Segmentation(n_perseg, n_overlap)
+
+    @given(data())
+    def test_nol_lt_nps(self, data) -> None:
+        n_perseg = data.draw(self._valid_nps())
+        n_overlap = data.draw(integers(min_value=n_perseg))
+        with self.assertRaises(ValueError):
+            cutter = Segmentation(n_perseg, n_overlap)
+
+    @given(data())
+    def test_inp_lt_three(self, data) -> None:
+        n_perseg = data.draw(self._valid_nps())
+        n_overlap = data.draw(_valid_nol(n_perseg))
+        inp_shape = data.draw(array_shapes(min_dims=3))
+        inp = np.empty(inp_shape)
+        cutter = Segmentation(n_perseg, n_overlap)
+        with self.assertRaises(ValueError):
+            segs = cutter.transform(inp)
+
+    @given(integers(min_value=2, max_value=1000))
+    def test_inp2d_only_one_col(self, n_cols) -> None:
+        n_frames = 1000
+        n_perseg = 50
+        n_overlap = 10
+        inp = np.empty((n_frames, n_cols))
+        cutter = Segmentation(n_perseg, n_overlap)
+        with self.assertRaises(ValueError):
+            segs = cutter.transform(inp)
+
+    def _valid_nps(self):
+        return _valid_nps(self.snd.n_frames)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tests/test_som.py b/tests/test_som.py
deleted file mode 100644
index ac89f7ee55d4f7f9bcc9778cbd077e1abc239cdf..0000000000000000000000000000000000000000
--- a/tests/test_som.py
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/python3
-
-
-import unittest
-import numpy as np
-import scipy as sp
-
-from apollon.som.som import SelfOrganizingMap
-
-
-class TestSelfOrganizingMap(unittest.TestCase):
-    def setUp(self):
-        N = 100
-
-        m1 = (0, 0)
-        m2 = (10, 15)
-        c1 = ((10, 0), (0, 10))
-        c2 = ((2, 0), (0, 2))
-
-        seg1 = np.random.multivariate_normal(m1, c1, N)
-        seg2 = np.random.multivariate_normal(m2, c2, N)
-
-        self.data = np.vstack((seg1, seg2))
-        self.dims = (10, 10, 2)
-
-    def test_init_random(self):
-        som = SelfOrganizingMap(self.dims, init_distr='uniform')
-        self.assertTrue('weights', True)
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/tests/test_tools.py b/tests/test_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..78dd797048636fae52b02495d3556cff38320c8c
--- /dev/null
+++ b/tests/test_tools.py
@@ -0,0 +1,20 @@
+import unittest
+
+import numpy as np
+from scipy.stats import multivariate_normal
+
+from apollon import tools
+
+
+class TestPca(unittest.TestCase):
+    def setUp(self) -> None:
+        mu = (0, 0)
+        cov = ((10, 0), (0, 12))
+        n = 1000
+        self.data = multivariate_normal(mu, cov).rvs(n)
+
+    def test_output_is_tuple(self):
+        self.assertIsInstance(tools.pca(self.data, 2), tuple)
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000000000000000000000000000000000000..beab1f325a1935a4f6c92594e614099b4a752c52
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,8 @@
+[tox]
+requires = numpy >= 1.20
+envlist = py37,py38,py39
+
+[testenv]
+deps =
+	pytest
+	hypothesis