repo
stringlengths 2
91
| file
stringlengths 14
211
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
1.36M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
OpenBCIPython | OpenBCIPython-master/py_qt/py_local_linear.py | from __future__ import division, absolute_import, print_function
import numpy as np
def local_linear_1d(bw, xdata, ydata, points, kernel, out):
r'''
We are trying to find the fitting for points :math:`x` given a gaussian kernel
Given the following definitions:
.. math::
x_0 &=& x-x_i
\begin{array}{rlc|rlc}
w_i &=& \mathcal{K}\left(\frac{x_0}{h}\right) & W &=& \sum_i w_i \\
X &=& \sum_i w_i x_0 & X_2 &=& w_i x_0^2 \\
Y &=& \sum_i w_i y_i & Y_2 &=& \sum_i w_i y_i x_0
\end{array}
The fitted value is given by:
.. math::
f(x) = \frac{X_2 T - X Y_2}{W X_2 - X^2}
'''
x0 = points - xdata[:, np.newaxis]
x02 = x0 * x0
# wi = kernel(x0 / bw)
wi = np.exp(-x02 / (2.0 * bw * bw))
X = np.sum(wi * x0, axis=0)
X2 = np.sum(wi * x02, axis=0)
wy = wi * ydata[:, np.newaxis]
Y = np.sum(wy, axis=0)
Y2 = np.sum(wy * x0, axis=0)
W = np.sum(wi, axis=0)
return None, np.divide(X2 * Y - Y2 * X, W * X2 - X * X, out)
| 1,039 | 25 | 82 | py |
OpenBCIPython | OpenBCIPython-master/py_qt/binning.py | try:
from cy_binning import fast_linbin as fast_bin
except ImportError:
from py_binning import fast_bin
def usePython():
global fast_bin
from py_linbin import fast_bin
def useCython():
global fast_bin
from cy_linbin import fast_linbin as fast_bin
| 274 | 18.642857 | 50 | py |
OpenBCIPython | OpenBCIPython-master/py_qt/loader.py | from __future__ import print_function, absolute_import
import inspect
from path import path
import imp
import sys
import re
bad_chars = re.compile(u'\W')
python_version = sys.version_info
if python_version.major == 2 and python_version.minor == 7:
if sys.platform == 'win32' or sys.platform == 'cygwin':
module_exts = ['.dll']
elif sys.platform == 'darwin':
module_exts = ['.dylib']
else:
module_exts = ['.so']
module_exts += ['.pyx', '.pyc', '.py']
def load_module(pack_name, module_name, search_path):
""" Version for Python 2.7 """
mod_desc = imp.find_module(module_name, [search_path])
return imp.load_module(pack_name, *mod_desc)
elif python_version.major == 3 and python_version.minor >= 3:
from importlib import machinery as ilm
module_exts = ilm.all_suffixes()
module_exts.append('.pyx')
module_exts = module_exts[::-1]
def create_loader(pack_name, filepath):
ext = filepath.ext
if ext in ilm.SOURCE_SUFFIXES:
return ilm.SourceFileLoader(pack_name, str(filepath))
if ext in ilm.BYTECODE_SUFFIXES:
return ilm.SourcelessFileLoader(pack_name, str(filepath))
if ext in ilm.EXTENSION_SUFFIXES:
return ilm.ExtensionFileLoader(pack_name, str(filepath))
if python_version.minor == 3:
def create_module(loader):
" Version for Python 3.3 "
return loader.load_module()
else:
from types import ModuleType
def create_module(loader):
" Version for Python 3.4 or later "
mod = ModuleType(loader.name)
loader.exec_module(mod)
return mod
module_loaders = [ (ilm.EXTENSION_SUFFIXES, ilm.ExtensionFileLoader),
(ilm.SOURCE_SUFFIXES, ilm.SourceFileLoader),
(ilm.BYTECODE_SUFFIXES, ilm.SourcelessFileLoader) ]
def load_module(pack_name, module_name, search_path):
pth = path(search_path) / module_name
for exts, loader_cls in module_loaders:
for ext in exts:
filename = pth + ext
if filename.exists():
loader = loader_cls(pack_name, str(filename))
mod = create_module(loader)
if mod is not None:
return mod
else:
raise ImportError("This module can only be imported with python 2.7 and 3.x where x >= 3")
def load(find_functions, search_path=None):
"""
Load the modules in the search_path.
If search_path is None, then load modules in the same folder as the function looking for them.
"""
caller_module = inspect.getmodule(inspect.stack()[1][0])
system_files = [caller_module.__file__]
module_path = path(caller_module.__file__).abspath().dirname()
sys_files = set()
for f in system_files:
if f.endswith(".pyo") or f.endswith(".pyc"):
f = f[:-3] + "py"
sys_files.add(path(f).abspath())
if search_path is None:
search_path = module_path
else:
search_path = path(search_path).abspath()
fcts = {}
# Search for python, cython and modules
modules = set()
for ext in module_exts:
for f in search_path.files("*" + ext):
if f.basename()[:2] != '__':
module_name = f.namebase
modules.add(module_name)
for module_name in modules:
pack_name = '%s.%s_%s' % (caller_module.__name__,
bad_chars.sub('_', module_path),
module_name)
try:
mod = load_module(pack_name, module_name, search_path)
fcts.update(find_functions(mod))
except ImportError as ex:
print("Warning, cannot import module '{0}' from {1}: {2}"
.format(module_name, caller_module.__name__, ex), file=sys.stderr)
return fcts
| 3,942 | 33.893805 | 98 | py |
OpenBCIPython | OpenBCIPython-master/py_qt/kde_bandwidth.py | import numpy as np
from utils import finite
from scipy import fftpack, optimize
from utils import large_float
from compat import irange
def variance_bandwidth(factor, xdata):
r"""
Returns the covariance matrix:
.. math::
\mathcal{C} = \tau^2 cov(X)
where :math:`\tau` is a correcting factor that depends on the method.
"""
data_covariance = np.atleast_2d(np.cov(xdata, rowvar=1, bias=False))
sq_bandwidth = data_covariance * factor * factor
return sq_bandwidth
def silverman_covariance(xdata, model=None):
r"""
The Silverman bandwidth is defined as a variance bandwidth with factor:
.. math::
\tau = \left( n \frac{d+2}{4} \right)^\frac{-1}{d+4}
"""
xdata = np.atleast_2d(xdata)
d, n = xdata.shape
return variance_bandwidth(np.power(n * (d + 2.) / 4.,
-1. / (d + 4.)), xdata)
def scotts_covariance(xdata, model=None):
r"""
The Scotts bandwidth is defined as a variance bandwidth with factor:
.. math::
\tau = n^\frac{-1}{d+4}
"""
xdata = np.atleast_2d(xdata)
d, n = xdata.shape
return variance_bandwidth(np.power(n, -1. / (d + 4.)), xdata)
def _botev_fixed_point(t, M, I, a2):
l = 7
I = large_float(I)
M = large_float(M)
a2 = large_float(a2)
f = 2 * np.pi ** (2 * l) * np.sum(I ** l * a2 *
np.exp(-I * np.pi ** 2 * t))
for s in irange(l, 1, -1):
K0 = np.prod(np.arange(1, 2 * s, 2)) / np.sqrt(2 * np.pi)
const = (1 + (1 / 2) ** (s + 1 / 2)) / 3
time = (2 * const * K0 / M / f) ** (2 / (3 + 2 * s))
f = 2 * np.pi ** (2 * s) * \
np.sum(I ** s * a2 * np.exp(-I * np.pi ** 2 * time))
return t - (2 * M * np.sqrt(np.pi) * f) ** (-2 / 5)
class botev_bandwidth(object):
"""
Implementation of the KDE bandwidth selection method outline in:
Z. I. Botev, J. F. Grotowski, and D. P. Kroese. Kernel density
estimation via diffusion. The Annals of Statistics, 38(5):2916-2957, 2010.
Based on the implementation of Daniel B. Smith, PhD.
The object is a callable returning the bandwidth for a 1D kernel.
"""
def __init__(self, N=None, **kword):
if 'lower' in kword or 'upper' in kword:
print("Warning, using 'lower' and 'upper' for botev bandwidth is "
"deprecated. Argument is ignored")
self.N = N
def __call__(self, data, model):
"""
Returns the optimal bandwidth based on the data
"""
N = 2 ** 10 if self.N is None else int(2 ** np.ceil(np.log2(self.N)))
lower = getattr(model, 'lower', None)
upper = getattr(model, 'upper', None)
if not finite(lower) or not finite(upper):
minimum = np.min(data)
maximum = np.max(data)
span = maximum - minimum
lower = minimum - span / 10 if not finite(lower) else lower
upper = maximum + span / 10 if not finite(upper) else upper
# Range of the data
span = upper - lower
# Histogram of the data to get a crude approximation of the density
weights = model.weights
if not weights.shape:
weights = None
M = len(data)
DataHist, bins = np.histogram(data, bins=N, range=(lower, upper), weights=weights)
DataHist = DataHist / M
DCTData = fftpack.dct(DataHist, norm=None)
I = np.arange(1, N, dtype=int) ** 2
SqDCTData = (DCTData[1:] / 2) ** 2
guess = 0.1
try:
t_star = optimize.brentq(_botev_fixed_point, 0, guess,
args=(M, I, SqDCTData))
except ValueError:
t_star = .28 * N ** (-.4)
return np.sqrt(t_star) * span
| 3,799 | 30.404959 | 90 | py |
OpenBCIPython | OpenBCIPython-master/py_qt/_kernels_py.py | """
Pure Python implementation of the kernel functions
"""
import numpy as np
from scipy.special import erf
from utils import numpy_trans, numpy_trans_idx
s2pi = np.sqrt(2.0 * np.pi)
s2 = np.sqrt(2.0)
@numpy_trans
def norm1d_pdf(z, out):
"""
Full-python implementation of :py:func:`normal_kernel1d.pdf`
"""
z = np.atleast_1d(z)
if out is None:
out = np.empty(z.shape, dtype=z.dtype)
np.multiply(z, z, out)
out *= -0.5
np.exp(out, out)
out /= s2pi
return out
@numpy_trans
def norm1d_cdf(z, out):
"""
Full-python implementation of :py:func:`normal_kernel1d.cdf`
"""
np.divide(z, s2, out)
erf(out, out)
out *= 0.5
out += 0.5
return out
@numpy_trans
def norm1d_pm1(z, out):
"""
Full-python implementation of :py:func:`normal_kernel1d.pm1`
"""
np.multiply(z, z, out)
out *= -0.5
np.exp(out, out)
out /= -s2pi
return out
@numpy_trans_idx
def norm1d_pm2(z, out):
"""
Full-python implementation of :py:func:`normal_kernel1d.pm2`
"""
np.divide(z, s2, out)
erf(out, out)
out /= 2
if z.shape:
zz = np.isfinite(z)
sz = z[zz]
out[zz] -= sz * np.exp(-0.5 * sz * sz) / s2pi
elif np.isfinite(z):
out -= z * np.exp(-0.5 * z * z) / s2pi
out += 0.5
return out
tricube_width = np.sqrt(35. / 243)
@numpy_trans_idx
def tricube_pdf(z, out=None):
np.multiply(z, tricube_width, out)
sel = (out > -1) & (out < 1)
out[~sel] = 0
out[sel] = 70. / 81 * (1 - abs(out[sel]) ** 3.) ** 3. * tricube_width
return out
@numpy_trans_idx
def tricube_cdf(z, out=None):
np.multiply(z, tricube_width, out)
sel_down = out <= -1
sel_up = out >= 1
sel_neg = (out < 0) & (~sel_down)
sel_pos = (out >= 0) & (~sel_up)
out[sel_up] = 1
out[sel_down] = 0
out[sel_pos] = 1. / 162 * \
(60 * (out[sel_pos] ** 7) - 7. *
(2 * (out[sel_pos] ** 10) + 15 * (out[sel_pos] ** 4)) +
140 * out[sel_pos] + 81)
out[sel_neg] = 1. / 162 * \
(60 * (out[sel_neg] ** 7) + 7. *
(2 * (out[sel_neg] ** 10) + 15 * (out[sel_neg] ** 4)) +
140 * out[sel_neg] + 81)
return out
@numpy_trans_idx
def tricube_pm1(z, out=None):
np.multiply(z, tricube_width, out)
out[out < 0] = -out[out < 0]
sel = out < 1
out[~sel] = 0
out[sel] = 7 / (3564 * tricube_width) * \
(165 * out[sel] ** 8 - 8 * (5 * out[sel] ** 11 + 33 * out[sel] ** 5) +
220 * out[sel] ** 2 - 81)
return out
@numpy_trans_idx
def tricube_pm2(z, out=None):
np.multiply(z, tricube_width, out)
sel_down = out <= -1
sel_up = out >= 1
sel_neg = (out < 0) & ~sel_down
sel_pos = (out >= 0) & ~sel_up
out[sel_down] = 0
out[sel_up] = 1
out[sel_pos] = 35. / (tricube_width * tricube_width * 486) * \
(4 * out[sel_pos] ** 9 - (out[sel_pos] ** 12 + 6 * out[sel_pos] ** 6) +
4 * out[sel_pos] ** 3 + 1)
out[sel_neg] = 35. / (tricube_width * tricube_width * 486) * \
(4 * out[sel_neg] ** 9 + (out[sel_neg] ** 12 + 6 * out[sel_neg] ** 6) +
4 * out[sel_neg] ** 3 + 1)
return out
epanechnikov_width = 1. / np.sqrt(5.)
@numpy_trans_idx
def epanechnikov_pdf(z, out=None):
np.multiply(z, epanechnikov_width, out)
sel = (out > -1) & (out < 1)
out[~sel] = 0
out[sel] = (.75 * epanechnikov_width) * (1 - out[sel] ** 2)
return out
@numpy_trans_idx
def epanechnikov_cdf(z, out=None):
np.multiply(z, epanechnikov_width, out)
sel_up = out >= 1
sel_down = out <= -1
out[sel_up] = 1
out[sel_down] = 0
sel = ~(sel_up | sel_down)
out[sel] = .25 * (2 + 3 * out[sel] - out[sel] ** 3)
return out
@numpy_trans_idx
def epanechnikov_pm1(z, out=None):
np.multiply(z, epanechnikov_width, out)
sel = (out > -1) & (out < 1)
out[~sel] = 0
out[sel] = -3 / (16 * epanechnikov_width) * \
(1 - 2 * out[sel] ** 2 + out[sel] ** 4)
return out
@numpy_trans_idx
def epanechnikov_pm2(z, out=None):
np.multiply(z, epanechnikov_width, out)
sel_up = out >= 1
sel_down = out <= -1
out[sel_up] = 1
out[sel_down] = 0
sel = ~(sel_up | sel_down)
out[sel] = .25 * (2 + 5 * out[sel] ** 3 - 3 * out[sel] ** 5)
return out
@numpy_trans
def normal_o4_pdf(z, out=None):
norm1d_pdf(z, out)
out *= (3 - z ** 2) / 2
return out
@numpy_trans_idx
def normal_o4_cdf(z, out=None):
norm1d_cdf(z, out)
sel = np.isfinite(z)
out[sel] += z[sel] * norm1d_pdf(z[sel]) / 2
return out
@numpy_trans_idx
def normal_o4_pm1(z, out=None):
norm1d_pdf(z, out)
out -= normal_o4_pdf(z)
out[~np.isfinite(z)] = 0
return out
@numpy_trans_idx
def normal_o4_pm2(z, out=None):
np.power(z, 3, out)
out *= norm1d_pdf(z) / 2
out[~np.isfinite(z)] = 0
return out
@numpy_trans_idx
def epanechnikov_o4_pdf(z, out=None):
np.power(z, 2., out)
out *= -15 / 8.
out += 9. / 8.
out[(z < -1) | (z > 1)] = 0
return out
@numpy_trans_idx
def epanechnikov_o4_cdf(z, out=None):
np.power(z, 3, out)
out *= -5. / 8.
out += (4 + 9 * z) / 8.
out[z > 1] = 1
out[z < -1] = 0
return out
@numpy_trans_idx
def epanechnikov_o4_pm1(z, out=None):
out = np.power(z, 4, out)
out *= -15. / 32.
out += 1. / 32. * (18 * z ** 2 - 3)
out[(z < -1) | (z > 1)] = 0
return out
@numpy_trans_idx
def epanechnikov_o4_pm2(z, out=None):
out = np.power(z, 3, out)
out *= .375
out -= .375 * np.power(z, 5)
out[(z < -1) | (z > 1)] = 0
return out
| 5,587 | 21.995885 | 79 | py |
OpenBCIPython | OpenBCIPython-master/py_qt/bootstrap_workers.py | import sys
import traceback
from compat import irange, izip
nx = ny = 1
shuffled_x = shuffled_y = None
fit_args = ()
fit_kwrds = {}
fit = None
result_array = None
eval_points = None
extra_arrays = None
extra_attrs = None
def initialize_shared(nx, ny, result_array, extra_arrays, shuffled_x,
shuffled_y, eval_points, extra_attrs, fit,
fit_args, fit_kwrds):
initialize(nx, ny, result_array.np, [ea.np for ea in extra_arrays],
shuffled_x.np, shuffled_y.np, eval_points.np, extra_attrs,
fit, fit_args, fit_kwrds)
def initialize(nx, ny, result_array, extra_arrays, shuffled_x, shuffled_y,
eval_points, extra_attrs, fit, fit_args, fit_kwrds):
globals().update(locals())
def bootstrap_result(worker, start_repeats, end_repeats):
#print("Starting worker {} from {} to {}".format(worker, start_repeats, end_repeats))
try:
for i in irange(start_repeats, end_repeats):
#print("Worker {} runs iteration {} with fit: {}".format(worker, i, fit))
new_fit = fit(shuffled_x[..., i % nx, :], shuffled_y[i % ny, :],
*fit_args, **fit_kwrds)
new_fit.fit()
#print("new_fit = {}".format(new_fit))
result_array[i + 1] = new_fit(eval_points)
for ea, attr in izip(extra_arrays, extra_attrs):
ea[i + 1] = getattr(new_fit, attr)
except Exception:
traceback.print_exc(None, sys.stderr)
raise
#print "Worker {} finished".format(worker)
| 1,564 | 33.777778 | 89 | py |
OpenBCIPython | OpenBCIPython-master/visualization/panda_try.py | from scipy.signal import butter, filtfilt
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pandas import DataFrame, Series
from scipy import signal
fsamp = 250
tsample = 1 / fsamp
f_low = 50
f_high = 1
order = 2
channel_vector = [1,2, 3, 4, 5]
n_ch = len(channel_vector)
df = pd.read_csv("/home/runge/openbci/application.linux64/application.linux64/OpenBCI-RAW-right_strait_up_new.txt")
df = df[channel_vector].dropna(axis=0)
processed_signal = df.copy()
b, a = butter(order, (order * f_low * 1.0) / fsamp * 1.0, btype='low')
for i in range(0, n_ch):
processed_signal.ix[:, i] = np.transpose(filtfilt(b, a, df.ix[:, i]))
b1, a1 = butter(order, (order * f_high * 1.0) / fsamp * 1.0, btype='high')
for i in range(0, n_ch):
processed_signal.ix[:, i] = np.transpose(filtfilt(b1, a1, processed_signal.ix[:, i]))
Wn = (np.array([58.0, 62.0]) / 500 * order).tolist()
b3, a3 = butter(order, Wn, btype='stop')
for i in range(0, n_ch):
processed_signal.ix[:, i] = np.transpose(filtfilt(b3, a3, processed_signal.ix[:, i]))
start = 7000
end = 8000
plt.figure(figsize=(12, 8))
for h in range(0, n_ch):
plt.subplot(5,1,h+1)
# f, Pxx_spec = signal.periodogram(processed_signal.ix[:, h][start * fsamp:end * fsamp], fsamp, 'flattop',
# scaling='spectrum')
# f, Pxx_spec = signal.welch(processed_signal.ix[:, h][start * fsamp:end * fsamp], fsamp, 'flattop', 128, scaling='spectrum')
# wavelet = signal.ricker
# widths = np.arange(1, 11)
# cwtmatr = signal.cwt(processed_signal.ix[:, h][start * fsamp:end * fsamp], wavelet, widths)
plt.plot(processed_signal.ix[:, h][start :end])
# plt.semilogy(fsamp, np.sqrt(Pxx_spec))
# plt.ylim([1e-4, 1e1])
plt.show()
# plt.figure()
# plt.semilogy(f, np.sqrt(Pxx_spec))
# plt.ylim([1e-4, 1e1])
# plt.xlabel('frequency [Hz]')
# plt.ylabel('Linear spectrum [V RMS]')
# plt.show()
print "---"
| 1,932 | 31.216667 | 129 | py |
OpenBCIPython | OpenBCIPython-master/visualization/gvgvg.py | from scipy.signal import butter, filtfilt
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pandas import DataFrame, Series
import numpy as np
import neo
import mne
from mne import io, read_proj, read_selection
from mne.datasets import sample
from mne.time_frequency import psd_multitaper
import mne
print(__doc__)
sfreq = 250
tsample = 1 / sfreq
f_low = 50
f_high = 1
order = 2
channel_vector = [1, 2, 3, 4, 5]
data = []
ch_types = []
ch_names = []
n_ch = len(channel_vector)
start=0
end=2
df = pd.read_csv("/home/runge/openbci/application.linux64/application.linux64/OpenBCI-RAW-right_strait_up_new.txt")
df = df[channel_vector].dropna(axis=0)
for i in range(0, n_ch):
# dfm[i].ix[:, 0] = ((dfm[i].ix[:,0] - dfm[i].ix[:,0].min(0)) / dfm[i].ix[:,0].ptp(0))
data.append(np.array(df.ix[:,i].tolist()[int(start*sfreq):int(end*sfreq)]))
ch_types.append('mag')
ch_names.append(df.ix[:,i].name)
info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
raw = mne.io.RawArray(data, info)
scalings = 'auto'
raw.plot(n_channels=n_ch, scalings=scalings, title='MEG data visualization over time', show=True, block=True)
# Set parameters
# data_path = sample.data_path()
# raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
# proj_fname = data_path + '/MEG/sample/sample_audvis_eog-proj.fif'
#
# df1 = pd.read_csv("/home/runge/openbci/git/OpenBCI_Python/build/dataset/3noise_signal.csv")
# df1 = df1.dropna(axis=0)
#
# df2 = pd.read_csv("/home/runge/openbci/git/OpenBCI_Python/build/dataset/3noise_reduced_signal.csv")
# df2 = df2.dropna(axis=0)
#
# df3 = pd.read_csv("/home/runge/openbci/git/OpenBCI_Python/build/dataset/reconstructed_mod.csv")
# df3 = df3.dropna(axis=0)
#
# df4 = pd.read_csv("/home/runge/openbci/git/OpenBCI_Python/build/dataset/2feature_vector.csv")
# df4 = df4.dropna(axis=0)
#
# dfm = []
# dfm.append(df1)
# dfm.append(df2)
# dfm.append(df3)
# dfm.append(df4)
# max_length = len(df3.ix[:,0])
#
# dfm_len = 3
#
# for i in range(0, dfm_len):
# # dfm[i].ix[:, 0] = ((dfm[i].ix[:,0] - dfm[i].ix[:,0].min(0)) / dfm[i].ix[:,0].ptp(0))
# data.append(np.array(dfm[i].ix[:,0].tolist()[0:max_length]))
# ch_types.append('mag')
# ch_names.append(dfm[i].ix[:,0].name)
# info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
# raw = mne.io.RawArray(data, info)
# scalings = 'auto'
# raw.plot(n_channels=dfm_len, scalings=scalings, title='MEG data visualization over time', show=True, block=True)
#
# start = 10
# end = 11
# plt.figure(figsize=(12, 8))
# for h in range(1, dfm_len):
# # plt.subplot(dfm_len-1,1,h)
# # plt.plot(data[h][int(start*sfreq):int(end*sfreq)])
# plt.plot(data[h][int(start*sfreq):int(end*sfreq)])
# plt.show()
#
# start = 30
# end = 50
# number_of_plots = end-start
# plt.figure(figsize=(12, 8))
# for h in range(0, number_of_plots):
# plt.subplot(number_of_plots,1,h+1)
# plt.plot(df4.ix[start+h].tolist())
# plt.show()
# fmin, fmax = 2, 300
# tmin, tmax = 0, 130
# n_fft = 64
#
# # Let's first check out all channel types
# # raw.plot_psd(area_mode='range', tmax=10.0, show=False)
#
# picks = mne.pick_types(raw.info, meg='mag', eeg=False, eog=False,
# stim=False)
# picks = picks[:1]
#
#
# f, ax = plt.subplots()
# psds, freqs = psd_multitaper(raw, low_bias=True, tmin=tmin, tmax=tmax,
# fmin=fmin, fmax=fmax, proj=True, picks=picks,
# n_jobs=1)
# psds = 10 * np.log10(psds)
# psds_mean = psds.mean(0)
# psds_std = psds.std(0)
#
# ax.plot(freqs, psds_mean, color='k')
# ax.fill_between(freqs, psds_mean - psds_std, psds_mean + psds_std,
# color='k', alpha=.5)
# ax.set(title='Multitaper PSD', xlabel='Frequency',
# ylabel='Power Spectral Density (dB)')
# plt.show()
# EpochsArray
# event_id = 1
# events = np.array([[200, 0, event_id],
# [1200, 0, event_id],
# [2000, 0, event_id]])
# epochs_data = np.array([[data[0][:700], data[1][:700]],
# [data[0][1000:1700], data[1][1000:1700]],
# [data[0][1800:2500], data[1][1800:2500]]])
#
# epochs = mne.EpochsArray(epochs_data, info=info, events=events,
# event_id={'arbitrary': 1})
# picks = mne.pick_types(info, meg=True, eeg=False, misc=False)
# epochs.plot(picks=picks, scalings='auto', show=True, block=True)
# ###############################################################################
# # EvokedArray
#
# nave = len(epochs_data) # Number of averaged epochs
# evoked_data = np.mean(epochs_data, axis=0)
#
# evokeds = mne.EvokedArray(evoked_data, info=info, tmin=-0.2,
# comment='Arbitrary', nave=nave)
# evokeds.plot(picks=picks, show=True, units={'mag': '-'},
# titles={'mag': 'sin and cos averaged'})
#
# ###############################################################################
# # Create epochs by windowing the raw data.
#
# # The events are spaced evenly every 1 second.
# duration = 1.
#
# # create a fixed size events array
# # start=0 and stop=None by default
# events = mne.make_fixed_length_events(raw, event_id, duration=duration)
# print(events)
#
# # for fixed size events no start time before and after event
# tmin = 0.
# tmax = 0.99 # inclusive tmax, 1 second epochs
#
# # create :class:`Epochs <mne.Epochs>` object
# epochs = mne.Epochs(raw, events=events, event_id=event_id, tmin=tmin,
# tmax=tmax, baseline=None, verbose=True)
# epochs.plot(scalings='auto', block=True)
#
# ###############################################################################
# # Create overlapping epochs using :func:`mne.make_fixed_length_events` (50 %
# # overlap). This also roughly doubles the amount of events compared to the
# # previous event list.
#
# duration = 0.5
# events = mne.make_fixed_length_events(raw, event_id, duration=duration)
# print(events)
# epochs = mne.Epochs(raw, events=events, tmin=tmin, tmax=tmax, baseline=None,
# verbose=True)
# epochs.plot(scalings='auto', block=True)
#
# ###############################################################################
# # Extracting data from NEO file
#
# # The example here uses the ExampleIO object for creating fake data.
# # For actual data and different file formats, consult the NEO documentation.
# reader = neo.io.ExampleIO('fakedata.nof')
# bl = reader.read(cascade=True, lazy=False)[0]
#
# # Get data from first (and only) segment
# seg = bl.segments[0]
# title = seg.file_origin
#
# ch_names = list()
# data = list()
# for asig in seg.analogsignals:
# # Since the data does not contain channel names, channel indices are used.
# ch_names.append(str(asig.channel_index))
# asig = asig.rescale('V').magnitude
# data.append(asig)
#
# sfreq = int(seg.analogsignals[0].sampling_rate.magnitude)
#
# # By default, the channel types are assumed to be 'misc'.
# info = mne.create_info(ch_names=ch_names, sfreq=sfreq)
#
# raw = mne.io.RawArray(data, info)
# raw.plot(n_channels=4, scalings={'misc': 1}, title='Data from NEO',
# show=True, block=True, clipping='clamp')
| 7,183 | 32.413953 | 115 | py |
OpenBCIPython | OpenBCIPython-master/visualization/try_eog.py | import numpy, scipy, pylab, random
# This script demonstrates how to use band-pass (low-pass)
# filtering to eliminate electrical noise and static
# from signal data!
##################
### PROCESSING ###
##################
xs = numpy.arange(1, 100, .01) # generate Xs (0.00,0.01,0.02,0.03,...,100.0)
signal = sin1 = numpy.sin(xs * .3) # (A)
sin1 = numpy.sin(xs) # (B) sin1
sin2 = numpy.sin(xs * 2.33) * .333 # (B) sin2
sin3 = numpy.sin(xs * 2.77) * .777 # (B) sin3
noise = sin1 + sin2 + sin3 # (C)
static = (numpy.random.random_sample((len(xs))) - .5) * .2 # (D)
sigstat = static + signal # (E)
rawsignal = sigstat + noise # (F)
fft = scipy.fft(rawsignal) # (G) and (H)
bp = fft[:]
for i in range(len(bp)): # (H-red)
if i >= 10: bp[i] = 0
ibp = scipy.ifft(bp) # (I), (J), (K) and (L)
################
### GRAPHING ###
################
h, w = 6, 2
pylab.figure(figsize=(12, 9))
pylab.subplots_adjust(hspace=.7)
pylab.subplot(h, w, 1);
pylab.title("(A) Original Signal")
pylab.plot(xs, signal)
pylab.subplot(h, w, 3);
pylab.title("(B) Electrical Noise Sources (3 Sine Waves)")
pylab.plot(xs, sin1, label="sin1")
pylab.plot(xs, sin2, label="sin2")
pylab.plot(xs, sin3, label="sin3")
pylab.legend()
pylab.subplot(h, w, 5);
pylab.title("(C) Electrical Noise (3 sine waves added together)")
pylab.plot(xs, noise)
pylab.subplot(h, w, 7);
pylab.title("(D) Static (random noise)")
pylab.plot(xs, static)
pylab.axis([None, None, -1, 1])
pylab.subplot(h, w, 9);
pylab.title("(E) Signal + Static")
pylab.plot(xs, sigstat)
pylab.subplot(h, w, 11);
pylab.title("(F) Recording (Signal + Static + Electrical Noise)")
pylab.plot(xs, rawsignal)
pylab.subplot(h, w, 2);
pylab.title("(G) FFT of Recording")
fft = scipy.fft(rawsignal)
pylab.plot(abs(fft))
pylab.text(200, 3000, "signals", verticalalignment='top')
pylab.text(9500, 3000, "static", verticalalignment='top',
horizontalalignment='right')
pylab.subplot(h, w, 4);
pylab.title("(H) Low-Pass FFT")
pylab.plot(abs(fft))
pylab.text(17, 3000, "sin1", verticalalignment='top', horizontalalignment='left')
pylab.text(37, 2000, "sin2", verticalalignment='top', horizontalalignment='center')
pylab.text(45, 3000, "sin3", verticalalignment='top', horizontalalignment='left')
pylab.text(6, 3000, "signal", verticalalignment='top', horizontalalignment='left')
pylab.axvspan(10, 10000, fc='r', alpha='.5')
pylab.axis([0, 60, None, None])
pylab.subplot(h, w, 6)
pylab.title("(I) Inverse FFT")
pylab.plot(ibp)
pylab.subplot(h, w, 8)
pylab.title("(J) Signal vs. iFFT")
pylab.plot(signal, 'k', label="signal", alpha=.5)
pylab.plot(ibp, 'b', label="ifft", alpha=.5)
pylab.subplot(h, w, 10)
pylab.title("(K) Normalized Signal vs. iFFT")
pylab.plot(signal / max(signal), 'k', label="signal", alpha=.5)
pylab.plot(ibp / max(ibp), 'b', label="ifft", alpha=.5)
pylab.subplot(h, w, 12)
pylab.title("(L) Difference / Error")
pylab.plot(signal / max(signal) - ibp / max(ibp), 'k')
pylab.savefig("SIG.png", dpi=200)
pylab.show()
# http://www.swharden.com/wp/2009-01-21-signal-filtering-with-python/ | 3,058 | 29.287129 | 83 | py |
OpenBCIPython | OpenBCIPython-master/visualization/draw_graph_kinect_angles.py | import pandas as pd
from pandas import DataFrame, Series
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from preprocessing.ssa import SingularSpectrumAnalysis
matplotlib.rc('xtick', labelsize=15)
matplotlib.rc('ytick', labelsize=15)
matplotlib.rc('axes', titlesize=20)
matplotlib.rc('legend', fontsize=20)
manager = plt.get_current_fig_manager()
manager.resize(*manager.window.maxsize())
from matplotlib.backends.backend_pdf import PdfPages
pp = PdfPages('kinect_angles_reconstructed.pdf')
index=0
window_size=128
fsamp = 1
# kinect__angles = pd.read_csv("/home/runge/openbci/git/OpenBCI_Python/build/dataset/train/result/reconstructed_kinect__angles_.csv")
raw_kinect_angle_data = pd.read_csv("/home/runge/openbci/git/OpenBCI_Python/build/dataset2017-5-5_23-55-32new_bycept.csv").ix[:,10:13].dropna()
# raw_kinect_angle_data = pd.read_csv("/home/runge/openbci/git/OpenBCI_Python/build/dataset/train/result/reconstructed_kinect__angles_.csv").dropna()
raw_kinect_angle_data = pd.read_csv("/home/runge/openbci/git/OpenBCI_Python/build/dataset/train/result/reconstructed_kinect__angles_.csv").dropna()
angle_names = ["wrist", "elbow", "shoulder"]
graph_legend = []
handle_as=[]
labels_as=[]
start = 2100
end = 2600
num_ch = 3
fig = plt.figure(figsize=(20, 14))
fig.subplots_adjust(hspace=.5)
for h in range(0, num_ch):
ax = plt.subplot(num_ch,1,h+1)
axes = plt.gca()
# axes.set_ylim([0, 180])
if(end==0):
end = raw_kinect_angle_data.ix[:, h].shape[0]-1
input_signal = raw_kinect_angle_data.ix[:, h][start * fsamp:end * fsamp]
x = np.arange(start,end,1)
# mean = np.mean(input_signal, axis=0)
# input_signal -= mean
# input_signal=input_signal / np.std(input_signal, axis=0)
l1 = ax.plot(x,input_signal, linewidth=3.0, label="raw signal")
graph_legend.append(l1)
# reconstructed_signal = SingularSpectrumAnalysis(input_signal, window_size, False).execute(1)
# l2 = ax.plot(x,reconstructed_signal, linewidth=3.0, label='reconstructed signal with SSA')
# graph_legend.append(l2)
handles, labels = ax.get_legend_handles_labels()
handle_as.append(handles)
labels_as.append(labels)
plt.title(angle_names[h])
# leg = plt.legend(handles=handles, labels=labels)
fig.legend(handles=handle_as[0], labels=labels_as[0])
fig.text(0.5, 0.04, 'position', ha='center', fontsize=20)
fig.text(0.04, 0.5, 'angle(0-180)', va='center', rotation='vertical', fontsize=20)
plt.show()
#
# pp.savefig(bbox_inches='tight')
# pp.close()
| 2,525 | 34.577465 | 149 | py |
OpenBCIPython | OpenBCIPython-master/visualization/plot_objects_from_arrays.py | from scipy.signal import butter, filtfilt
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pandas import DataFrame, Series
import numpy as np
import neo
import mne
from mne import io, read_proj, read_selection
from mne.datasets import sample
from mne.time_frequency import psd_multitaper
import mne
print(__doc__)
sfreq = 250
tsample = 1 / sfreq
f_low = 50
f_high = 1
order = 2
channel_vector = [1,2, 3, 4, 5,6,7,8,9,10,11,12]
data = []
ch_types = []
ch_names = []
n_ch = 5
df = pd.read_csv("/home/runge/openbci/git/OpenBCI_Python/build/dataset/train/result/raw_reconstructed_signals.csv")
df = df.dropna(axis=0)
# Set parameters
# data_path = sample.data_path()
# raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
# proj_fname = data_path + '/MEG/sample/sample_audvis_eog-proj.fif'
for i in range(0,n_ch):
data.append(df.ix[:,i])
ch_types.append('mag')
ch_names.append("kkkk"+str(i))
info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
raw = mne.io.RawArray(data, info)
scalings = 'auto'
# Add SSP projection vectors to reduce EOG and ECG artifacts
# projs = read_proj(proj_fname)
# raw.add_proj(projs, remove_existing=True)
raw.plot(n_channels=n_ch, scalings=scalings, title='MEG data visualization over time', show=True, block=True)
# EpochsArray
# event_id = 1
# events = np.array([[200, 0, event_id],
# [1200, 0, event_id],
# [2000, 0, event_id]])
# epochs_data = np.array([[data[0][:700], data[1][:700]],
# [data[0][1000:1700], data[1][1000:1700]],
# [data[0][1800:2500], data[1][1800:2500]]])
#
# epochs = mne.EpochsArray(epochs_data, info=info, events=events,
# event_id={'arbitrary': 1})
# picks = mne.pick_types(info, meg=True, eeg=False, misc=False)
# epochs.plot(picks=picks, scalings='auto', show=True, block=True)
# ###############################################################################
# # EvokedArray
#
# nave = len(epochs_data) # Number of averaged epochs
# evoked_data = np.mean(epochs_data, axis=0)
#
# evokeds = mne.EvokedArray(evoked_data, info=info, tmin=-0.2,
# comment='Arbitrary', nave=nave)
# evokeds.plot(picks=picks, show=True, units={'mag': '-'},
# titles={'mag': 'sin and cos averaged'})
#
# ###############################################################################
# # Create epochs by windowing the raw data.
#
# # The events are spaced evenly every 1 second.
# duration = 1.
#
# # create a fixed size events array
# # start=0 and stop=None by default
# events = mne.make_fixed_length_events(raw, event_id, duration=duration)
# print(events)
#
# # for fixed size events no start time before and after event
# tmin = 0.
# tmax = 0.99 # inclusive tmax, 1 second epochs
#
# # create :class:`Epochs <mne.Epochs>` object
# epochs = mne.Epochs(raw, events=events, event_id=event_id, tmin=tmin,
# tmax=tmax, baseline=None, verbose=True)
# epochs.plot(scalings='auto', block=True)
#
# ###############################################################################
# # Create overlapping epochs using :func:`mne.make_fixed_length_events` (50 %
# # overlap). This also roughly doubles the amount of events compared to the
# # previous event list.
#
# duration = 0.5
# events = mne.make_fixed_length_events(raw, event_id, duration=duration)
# print(events)
# epochs = mne.Epochs(raw, events=events, tmin=tmin, tmax=tmax, baseline=None,
# verbose=True)
# epochs.plot(scalings='auto', block=True)
#
# ###############################################################################
# # Extracting data from NEO file
#
# # The example here uses the ExampleIO object for creating fake data.
# # For actual data and different file formats, consult the NEO documentation.
# reader = neo.io.ExampleIO('fakedata.nof')
# bl = reader.read(cascade=True, lazy=False)[0]
#
# # Get data from first (and only) segment
# seg = bl.segments[0]
# title = seg.file_origin
#
# ch_names = list()
# data = list()
# for asig in seg.analogsignals:
# # Since the data does not contain channel names, channel indices are used.
# ch_names.append(str(asig.channel_index))
# asig = asig.rescale('V').magnitude
# data.append(asig)
#
# sfreq = int(seg.analogsignals[0].sampling_rate.magnitude)
#
# # By default, the channel types are assumed to be 'misc'.
# info = mne.create_info(ch_names=ch_names, sfreq=sfreq)
#
# raw = mne.io.RawArray(data, info)
# raw.plot(n_channels=4, scalings={'misc': 1}, title='Data from NEO',
# show=True, block=True, clipping='clamp')
| 4,672 | 32.141844 | 115 | py |
OpenBCIPython | OpenBCIPython-master/visualization/draw_graph_23_15.py | import json
import pandas as pd
from pandas import DataFrame, Series
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import butter, filtfilt
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pandas import DataFrame, Series
from scipy import signal
from preprocessing.preprocessing import PreProcessor
from preprocessing.ssa import SingularSpectrumAnalysis
matplotlib.rc('xtick', labelsize=15)
matplotlib.rc('ytick', labelsize=15)
matplotlib.rc('axes', titlesize=20)
matplotlib.rc('legend', fontsize=20)
manager = plt.get_current_fig_manager()
manager.resize(*manager.window.maxsize())
from matplotlib.backends.backend_pdf import PdfPages
pp = PdfPages('RAW_EMG_SIGNALS.pdf')
index=0
window_size=16
fsamp = 256
tsample = 1 / fsamp
f_low = 128
f_high = 1
order = 2
channels_names = ["ch1", "ch2", "ch3", "ch4", "ch5"]
n_ch = len(channels_names)
df = pd.read_csv("/home/runge/openbci/application.linux64/application.linux64/OpenBCI-RAW-right_strait_up_new.txt")
df = df[channels_names].dropna(axis=0)
processed_signal = df.copy()
b, a = butter(order, (order * f_low * 1.0) / fsamp * 1.0, btype='low')
for i in range(0, n_ch):
processed_signal.ix[:, i] = np.transpose(filtfilt(b, a, df.ix[:, i]))
b1, a1 = butter(order, (order * f_high * 1.0) / fsamp * 1.0, btype='high')
for i in range(0, n_ch):
processed_signal.ix[:, i] = np.transpose(filtfilt(b1, a1, processed_signal.ix[:, i]))
Wn = (np.array([58.0, 62.0]) / 500 * order).tolist()
b3, a3 = butter(order, Wn, btype='stop')
for i in range(0, n_ch):
processed_signal.ix[:, i] = np.transpose(filtfilt(b3, a3, processed_signal.ix[:, i]))
graph_legend = []
handle_as=[]
labels_as=[]
start = 6000
end = 12000
fsamp = 1
num_ch = len(channels_names)
fig = plt.figure(figsize=(20, 14))
fig.subplots_adjust(hspace=.5)
for h in range(0, num_ch):
ax = plt.subplot(num_ch,1,h+1)
axes = plt.gca()
if(end==0):
end = processed_signal.ix[:, h].shape[0]-1
x = np.arange(start, end, 1)
input_signal = processed_signal.ix[:, h][start * fsamp:end * fsamp]
l4 = ax.plot(x,input_signal, linewidth=1.5, label='Pre-processed Signal')
graph_legend.append(l4)
handles, labels = ax.get_legend_handles_labels()
handle_as.append(handles)
labels_as.append(labels)
plt.title(channels_names[h])
# leg = plt.legend(handles=handles, labels=labels)
fig.legend(handles=handle_as[0], labels=labels_as[0])
fig.text(0.5, 0.04, 'Sample Count', ha='center', fontsize=20)
fig.text(0.04, 0.5, 'Amplitudes', va='center', rotation='vertical', fontsize=20)
# plt.show()
pp.savefig(bbox_inches='tight')
pp.close()
| 2,666 | 28.307692 | 115 | py |
OpenBCIPython | OpenBCIPython-master/visualization/plot_visualize_raw.py | """
.. _tut_viz_raw:
Visualize Raw data
==================
"""
import os.path as op
import numpy as np
import mne
data_path = op.join(mne.datasets.sample.data_path(), 'MEG', 'sample')
raw = mne.io.read_raw_fif(op.join(data_path, 'sample_audvis_raw.fif'))
# raw.set_eeg_reference() # set EEG average reference
# events = mne.read_events(op.join(data_path, 'sample_audvis_raw-eve.fif'))
raw.plot(block=True)
# raw.plot(order='selection')
# raw.plot_sensors(kind='3d', ch_type='mag', ch_groups='position')
#
# projs = mne.read_proj(op.join(data_path, 'sample_audvis_eog-proj.fif'))
# raw.add_proj(projs)
# raw.plot_projs_topomap()
#
# raw.plot()
#
# raw.plot_psd(tmax=np.inf, average=False)
#
# # wise spectra of first 30 seconds of the data.
# layout = mne.channels.read_layout('Vectorview-mag')
# layout.plot()
# raw.plot_psd_topo(tmax=30., fmin=5., fmax=60., n_fft=1024, layout=layout)
| 894 | 23.861111 | 75 | py |
OpenBCIPython | OpenBCIPython-master/visualization/plot_compute_raw_data_spectrum.py |
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io, read_proj, read_selection
from mne.datasets import sample
from mne.time_frequency import psd_multitaper
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
proj_fname = data_path + '/MEG/sample/sample_audvis_eog-proj.fif'
tmin, tmax = 0, 60 # use the first 60s of data
# Setup for reading the raw data (to save memory, crop before loading)
raw = io.read_raw_fif(raw_fname).crop(tmin, tmax).load_data()
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Add SSP projection vectors to reduce EOG and ECG artifacts
projs = read_proj(proj_fname)
raw.add_proj(projs, remove_existing=True)
fmin, fmax = 2, 300 # look at frequencies between 2 and 300Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
# Let's first check out all channel types
raw.plot_psd(area_mode='range', tmax=10.0, show=False)
# Now let's focus on a smaller subset:
# Pick MEG magnetometers in the Left-temporal region
selection = read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg='mag', eeg=False, eog=False,
stim=False, exclude='bads', selection=selection)
# Let's just look at the first few channels for demonstration purposes
picks = picks[:4]
plt.figure()
ax = plt.axes()
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=False, ax=ax, color=(0, 0, 1), picks=picks,
show=False)
# And now do the same with SSP applied
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=True, ax=ax, color=(0, 1, 0), picks=picks,
show=False)
# And now do the same with SSP + notch filtering
# Pick all channels for notch since the SSP projection mixes channels together
raw.notch_filter(np.arange(60, 241, 60), n_jobs=1)
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=True, ax=ax, color=(1, 0, 0), picks=picks,
show=False)
ax.set_title('Four left-temporal magnetometers')
plt.legend(['Without SSP', 'With SSP', 'SSP + Notch'])
# Alternatively, you may also create PSDs from Raw objects with ``psd_*``
f, ax = plt.subplots()
psds, freqs = psd_multitaper(raw, low_bias=True, tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, proj=True, picks=picks,
n_jobs=1)
psds = 10 * np.log10(psds)
psds_mean = psds.mean(0)
psds_std = psds.std(0)
ax.plot(freqs, psds_mean, color='k')
ax.fill_between(freqs, psds_mean - psds_std, psds_mean + psds_std,
color='k', alpha=.5)
ax.set(title='Multitaper PSD', xlabel='Frequency',
ylabel='Power Spectral Density (dB)')
plt.show()
| 2,858 | 34.7375 | 116 | py |
OpenBCIPython | OpenBCIPython-master/visualization/__init__.py | 0 | 0 | 0 | py |
|
OpenBCIPython | OpenBCIPython-master/visualization/draw_graph_channels_pattern.py | import json
from scipy.signal import butter, filtfilt
import pandas as pd
from pandas import DataFrame, Series
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from pandas.tools.plotting import scatter_matrix
from scipy.interpolate import interp1d
from preprocessing.preprocessing import PreProcessor
from preprocessing.ssa import SingularSpectrumAnalysis
import scipy.linalg as lin
matplotlib.rc('xtick', labelsize=10)
matplotlib.rc('ytick', labelsize=10)
matplotlib.rc('axes', titlesize=15)
matplotlib.rc('legend', fontsize=15)
manager = plt.get_current_fig_manager()
manager.resize(*manager.window.maxsize())
from matplotlib.backends.backend_pdf import PdfPages
pp = PdfPages('channels_reconstructed.pdf')
index=0
window_size=16
fsamp = 250
tsample = 1 / fsamp
f_low = 50
f_high = 1
order = 2
project_file_path = "/home/runge/openbci/git/OpenBCI_Python"
config_file = project_file_path + "/config/config.json"
raw_reconstructed_signals = pd.read_csv(project_file_path+"/build/dataset/train/result/bycept_reconstructed_signals.csv")
# raw_channels_data = pd.read_csv(project_file_path+"/build/dataset/result_bicep.csv").ix[:,2:7].dropna()
# scatter_matrix(raw_channels_data, alpha=0.2, figsize=(6, 6), diagonal='kde')
channels_names = ["ch1", "ch2", "ch3", "ch4", "ch5"]
# channel_vector = [1,2, 3, 4, 5]
# n_ch = len(channel_vector)
# df = pd.read_csv("/home/runge/openbci/application.linux64/application.linux64/OpenBCI-RAW-right_strait_up_new.txt")
# df = df[channel_vector].dropna(axis=0)
#
# processed_signal_channel = df.copy()
#
# b, a = butter(order, (order * f_low * 1.0) / fsamp * 1.0, btype='low')
# for i in range(0, n_ch):
# processed_signal_channel.ix[:, i] = np.transpose(filtfilt(b, a, df.ix[:, i]))
#
# b1, a1 = butter(order, (order * f_high * 1.0) / fsamp * 1.0, btype='high')
# for i in range(0, n_ch):
# processed_signal_channel.ix[:, i] = np.transpose(filtfilt(b1, a1, processed_signal_channel.ix[:, i]))
#
# Wn = (np.array([58.0, 62.0]) / 500 * order).tolist()
# b3, a3 = butter(order, Wn, btype='stop')
# for i in range(0, n_ch):
# processed_signal_channel.ix[:, i] = np.transpose(filtfilt(b3, a3, processed_signal_channel.ix[:, i]))
fsamp = 1
graph_legend = []
handle_as=[]
labels_as=[]
num_ch = len(channels_names)
start = 2100
end = 2600
raw_processed_signal = pd.read_csv("/home/runge/openbci/git/OpenBCI_Python/build/dataset/train/result/reconstructed_bycept_kinect__angles_.csv").dropna()
# scatter_matrix(processed_signal, alpha=0.2, figsize=(6, 6), diagonal='kde')
def nomalize_signal(input_signal):
mean = np.mean(input_signal, axis=0)
input_signal -= mean
return input_signal / np.std(input_signal, axis=0)
start_g = 700
end_g = 10000
processed_signal = raw_processed_signal.copy()
nomalized_signal = nomalize_signal(processed_signal)
# mapping = interp1d([-1,1],[0,180])
pattern=np.array(nomalized_signal.ix[:, 1][start :end])
data=np.array(nomalized_signal.ix[:, 1][start_g :end_g])
def create_mats(dat):
step=5
eps=.1
dat=dat[::step]
K=len(dat)+1
A=np.zeros((K,K))
A[0,1]=1.
pA=np.zeros((K,K))
pA[0,1]=1.
for i in xrange(1,K-1):
A[i,i]=(step-1.+eps)/(step+2*eps)
A[i,i+1]=(1.+eps)/(step+2*eps)
pA[i,i]=1.
pA[i,i+1]=1.
A[-1,-1]=(step-1.+eps)/(step+2*eps)
A[-1,1]=(1.+eps)/(step+2*eps)
pA[-1,-1]=1.
pA[-1,1]=1.
w=np.ones( (K,2) , dtype=np.float)
w[0,1]=dat[0]
w[1:-1,1]=(dat[:-1]-dat[1:])/step
w[-1,1]=(dat[0]-dat[-1])/step
return A,pA,w,K
A,pA,w,K=create_mats(pattern)
eta=5. #precision parameter for the autoregressive portion of the model
lam=.1 #precision parameter for the weights prior
N=1 #number of sequences
M=2 #number of dimensions - the second variable is for the bias term
T=len(data) #length of sequences
x=np.ones( (T+1,M) ) # sequence data (just one sequence)
x[0,1]=1
x[1:,0]=data
#emissions
e=np.zeros( (T,K) )
#residuals
v=np.zeros( (T,K) )
#store the forward and backward recurrences
f=np.zeros( (T+1,K) )
fls=np.zeros( (T+1) )
f[0,0]=1
b=np.zeros( (T+1,K) )
bls=np.zeros( (T+1) )
b[-1,1:]=1./(K-1)
#hidden states
z=np.zeros( (T+1),dtype=np.int )
#expected hidden states
ex_k=np.zeros((T,K))
# expected pairs of hidden states
ex_kk=np.zeros((K,K))
nkk=np.zeros((K,K))
def fwd():
global f,e
for t in xrange(T):
f[t+1,:]=np.dot(f[t,:],A)*e[t,:]
sm=np.sum(f[t+1,:])
fls[t+1]=fls[t]+np.log(sm)
f[t+1,:]/=sm
assert f[t+1,0]==0
def bck():
global b,e
for t in xrange(T-1,-1,-1):
b[t,:]=np.dot(A,b[t+1,:]*e[t,:])
sm=np.sum(b[t,:])
bls[t]=bls[t+1]+np.log(sm)
b[t,:]/=sm
def em_step(xn):
global A,w,eta
global f,b,e,v
global ex_k,ex_kk,nkk
x=xn[:-1] #current data vectors
y=xn[1:,:1] #next data vectors predicted from current
#compute residuals
v=np.dot(x,w.T) # (N,K) <- (N,1) (N,K)
v-=y
e=np.exp(-eta/2*v**2,e)
fwd()
bck()
# compute expected hidden states
for t in xrange(len(e)):
ex_k[t,:]=f[t+1,:]*b[t+1,:]
ex_k[t,:]/=np.sum(ex_k[t,:])
# compute expected pairs of hidden states
for t in xrange(len(f)-1) :
ex_kk=A*f[t,:][:,np.newaxis]*e[t,:]*b[t+1,:]
ex_kk/=np.sum(ex_kk)
nkk+=ex_kk
# max w/ respect to transition probabilities
A=pA+nkk
A/=np.sum(A,1)[:,np.newaxis]
# solve the weighted regression problem for emissions weights
# x and y are from above
for k in xrange(K):
ex=ex_k[:,k][:,np.newaxis]
dx=np.dot(x.T,ex*x)
dy=np.dot(x.T,ex*y)
dy.shape=(2)
w[k,:]=lin.solve(dx+lam*np.eye(x.shape[1]), dy)
#return the probability of the sequence (computed by the forward algorithm)
return fls[-1]
for i in xrange(5):
print em_step(x)
#get rough boundaries by taking the maximum expected hidden state for each position
r=np.arange(len(ex_k))[np.argmax(ex_k,1)<2]
f = np.diff(np.diff(r))
for i in range(0,len(f)):
if(f[i]<=0):
r[i] = 0
#plot
plt.plot(range(T),x[1:,0])
yr=[np.min(x[:,0]),np.max(x[:,0])]
for i in r:
plt.plot([i,i],yr,'-r')
plt.show()
graph_legend = []
fig = plt.figure(figsize=(20, 14))
fig.subplots_adjust(hspace=.5)
x = np.arange(start_g,end_g, 1)
for i in range(0,5):
ax = plt.subplot(810+i+1)
# axes = plt.gca()
l1=ax.plot(range(T),raw_reconstructed_signals.ix[:,i][start_g:end_g], linewidth=1.0,
label="Processed signal with SSA")
graph_legend.append(l1)
handles, labels = ax.get_legend_handles_labels()
handle_as.append(handles)
labels_as.append(labels)
plt.title(channels_names[i])
for i in r:
plt.plot([i,i],yr,'-r')
for j in range(0,3):
ax = plt.subplot(815+1+j)
# axes = plt.gca()
l1=ax.plot(range(T),raw_processed_signal.ix[:,j][start_g:end_g], linewidth=1.0,
label="Processed signal with SSA")
graph_legend.append(l1)
handles, labels = ax.get_legend_handles_labels()
handle_as.append(handles)
labels_as.append(labels)
plt.title(channels_names[j])
for i in r:
plt.plot([i,i],yr,'-r')
fig.legend(handles=handle_as[0], labels=labels_as[0])
fig.text(0.5, 0.04, 'position', ha='center', fontsize=10)
fig.text(0.04, 0.5, 'angle(0-180)', va='center', rotation='vertical', fontsize=10)
plt.show()
# fig = plt.figure(figsize=(20, 14))
# fig.subplots_adjust(hspace=.5)
#
# start_g = 0
# end_g= 200
#
# for i in range(0, 5):
# x = np.arange(start_g, end_g, 1)
# input_signal = raw_reconstructed_signals.ix[:,i][start_g:end_g]
# with open(config_file) as config:
# config = json.load(config)
# config["train_dir_abs_location"] = project_file_path + "/build/dataset/train"
#
# for h in range(0, num_ch):
# preprocessor = PreProcessor(h, None, None, config)
# ax = plt.subplot(num_ch,1,h+1)
#
# axes = plt.gca()`
# # axes.set_ylim([0, 180])
# if(end==0):
# end = raw_channels_data.ix[:, h].shape[0]-1
# x = np.arange(start, end, 1)
# input_signal = raw_channels_data.ix[:, h][start * fsamp:end * fsamp]
# # l1 = ax.plot(input_signal, linewidth=3.0, label="raw signal")
# # graph_legend.append(l1)
#
# noise_reducer_signal = preprocessor.apply_noise_reducer_filer(input_signal)
# l2 = ax.plot(x, noise_reducer_signal, linewidth=3.0, label="noise_reducer_signal")
# graph_legend.append(l2)
#
# # normalize_signal = preprocessor.nomalize_signal(noise_reducer_signal)
# # l3 = ax.plot(x, normalize_signal, linewidth=3.0, label="normalize_signal")
# # graph_legend.append(l3)
#
# reconstructed_signal = SingularSpectrumAnalysis(noise_reducer_signal, window_size, False).execute(1)
# l4 = ax.plot(x,reconstructed_signal, linewidth=3.0, label='reconstructed signal with SSA')
# graph_legend.append(l4)
#
# handles, labels = ax.get_legend_handles_labels()
# handle_as.append(handles)
# labels_as.append(labels)
# plt.title(channels_names[h])
# # leg = plt.legend(handles=handles, labels=labels)
#
# fig.legend(handles=handle_as[0], labels=labels_as[0])
# fig.text(0.5, 0.04, 'position', ha='center', fontsize=10)
# fig.text(0.04, 0.5, 'angle(0-180)', va='center', rotation='vertical', fontsize=10)
# plt.show()
# pp.savefig(bbox_inches='tight')
# pp.close()
# def plot_detected_pattern(self, start=0, end=0, is_raw=False, pattern_start_with=200, pattern_end_with=800):
# if is_raw:
# channel_signals = pd.read_csv(self.dataset_location).ix[:, 2:7].dropna()
# else:
# channel_signals = pd.read_csv(self.config["train_dir_abs_location"]
# + "/result/raw_reconstructed_signalbycepts.csv").dropna()
# kinect_angles = pd.read_csv(self.config["train_dir_abs_location"]
# + "/result/reconstructed_bycept_kinect__angles_.csv").dropna()
# nomalized_signal = self.nomalize_signal(kinect_angles)
# pattern = np.array(nomalized_signal.ix[:, 1][pattern_start_with:pattern_end_with])
#
# if end == 0:
# end = len(nomalized_signal)
#
# data = np.array(nomalized_signal.ix[:, 1][start:end])
# graph_legend = []
# handle_as = []
# labels_as = []
#
# def create_mats(dat):
# step = 5
# eps = .1
# dat = dat[::step]
# K = len(dat) + 1
# A = np.zeros((K, K))
# A[0, 1] = 1.
# pA = np.zeros((K, K))
# pA[0, 1] = 1.
# for i in xrange(1, K - 1):
# A[i, i] = (step - 1. + eps) / (step + 2 * eps)
# A[i, i + 1] = (1. + eps) / (step + 2 * eps)
# pA[i, i] = 1.
# pA[i, i + 1] = 1.
# A[-1, -1] = (step - 1. + eps) / (step + 2 * eps)
# A[-1, 1] = (1. + eps) / (step + 2 * eps)
# pA[-1, -1] = 1.
# pA[-1, 1] = 1.
#
# w = np.ones((K, 2), dtype=np.float)
# w[0, 1] = dat[0]
# w[1:-1, 1] = (dat[:-1] - dat[1:]) / step
# w[-1, 1] = (dat[0] - dat[-1]) / step
# return A, pA, w, K
#
# self.A, pA, w, K = create_mats(pattern)
#
# eta = 5.
# lam = .1
# N = 1
# M = 2
# T = len(data)
# x = np.ones((T + 1, M))
# x[0, 1] = 1
# x[1:, 0] = data
#
# # emissions
# e = np.zeros((T, K))
# # residuals
# v = np.zeros((T, K))
#
# # store the forward and backward recurrences
# f = np.zeros((T + 1, K))
# fls = np.zeros((T + 1))
# f[0, 0] = 1
# b = np.zeros((T + 1, K))
# bls = np.zeros((T + 1))
# b[-1, 1:] = 1. / (K - 1)
#
# # hidden states
# z = np.zeros((T + 1), dtype=np.int)
#
# # expected hidden states
# ex_k = np.zeros((T, K))
#
# # expected pairs of hidden states
# ex_kk = np.zeros((K, K))
# nkk = np.zeros((K, K))
#
# def fwd():
# global f, e
# for t in xrange(T):
# f[t + 1, :] = np.dot(f[t, :], self.A) * e[t, :]
# sm = np.sum(f[t + 1, :])
# fls[t + 1] = fls[t] + np.log(sm)
# f[t + 1, :] /= sm
# assert f[t + 1, 0] == 0
#
# def bck():
# global b, e
# for t in xrange(T - 1, -1, -1):
# b[t, :] = np.dot(self.A, b[t + 1, :] * e[t, :])
# sm = np.sum(b[t, :])
# bls[t] = bls[t + 1] + np.log(sm)
# b[t, :] /= sm
#
# def em_step(xn):
# global self.A, w, eta
# global f, b, e, v
# global ex_k, ex_kk, nkk
#
# x = xn[:-1] # current data vectors
# y = xn[1:, :1] # next data vectors predicted from current
# # compute residuals
# v = np.dot(x, w.T) # (N,K) <- (N,1) (N,K)
# v -= y
# e = np.exp(-eta / 2 * v ** 2, e)
#
# fwd()
# bck()
#
# # compute expected hidden states
# for t in xrange(len(e)):
# ex_k[t, :] = f[t + 1, :] * b[t + 1, :]
# ex_k[t, :] /= np.sum(ex_k[t, :])
#
# # compute expected pairs of hidden states
# for t in xrange(len(f) - 1):
# ex_kk = self.A * f[t, :][:, np.newaxis] * e[t, :] * b[t + 1, :]
# ex_kk /= np.sum(ex_kk)
# nkk += ex_kk
#
# # max w/ respect to transition probabilities
# self.A = pA + nkk
# self.A /= np.sum(A, 1)[:, np.newaxis]
#
# # solve the weighted regression problem for emissions weights
# # x and y are from above
# for k in xrange(K):
# ex = ex_k[:, k][:, np.newaxis]
# dx = np.dot(x.T, ex * x)
# dy = np.dot(x.T, ex * y)
# dy.shape = (2)
# w[k, :] = lin.solve(dx + lam * np.eye(x.shape[1]), dy)
#
# # return the probability of the sequence (computed by the forward algorithm)
# return fls[-1]
#
# for i in xrange(5):
# em_step(x)
#
# # get rough boundaries by taking the maximum expected hidden state for each position
# r = np.arange(len(ex_k))[np.argmax(ex_k, 1) < 2]
# f = np.diff(np.diff(r))
# for i in range(0, len(f)):
# if (f[i] <= 0):
# r[i] = 0
# # plot
# plt.plot(range(T), x[1:, 0])
# yr = [np.min(x[:, 0]), np.max(x[:, 0])]
# for i in r:
# plt.plot([i, i], yr, '-r')
# plt.show()
#
# graph_legend = []
# fig = plt.figure(figsize=(20, 14))
# fig.subplots_adjust(hspace=.5)
# x = np.arange(start, end, 1)
# for i in range(0, 5):
# ax = plt.subplot(810 + i + 1)
# # axes = plt.gca()
# l1 = ax.plot(range(T), channel_signals.ix[:, i][start:end], linewidth=1.0,
# label="Processed signal with SSA")
# graph_legend.append(l1)
# handles, labels = ax.get_legend_handles_labels()
# handle_as.append(handles)
# labels_as.append(labels)
# plt.title(self.channels_names[i])
# for i in r:
# plt.plot([i, i], yr, '-r')
#
# for j in range(0, 3):
# ax = plt.subplot(815 + 1 + j)
# l1 = ax.plot(range(T), kinect_angles.ix[:, j][start:end], linewidth=1.0,
# label="Processed signal with SSA")
# graph_legend.append(l1)
# handles, labels = ax.get_legend_handles_labels()
# handle_as.append(handles)
# labels_as.append(labels)
# plt.title(self.channels_names[j])
# for i in r:
# plt.plot([i, i], yr, '-r')
#
# fig.legend(handles=handle_as[0], labels=labels_as[0])
# fig.text(0.5, 0.04, 'position', ha='center', fontsize=10)
# fig.text(0.04, 0.5, 'angle(0-180)', va='center', rotation='vertical', fontsize=10)
# plt.show()
| 15,738 | 29.800391 | 153 | py |
OpenBCIPython | OpenBCIPython-master/visualization/draw_graph.py | import pandas as pd
from pandas import DataFrame, Series
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from preprocessing.ssa import SingularSpectrumAnalysis
matplotlib.rc('xtick', labelsize=15)
matplotlib.rc('ytick', labelsize=15)
matplotlib.rc('axes', titlesize=20)
matplotlib.rc('legend', fontsize=20)
manager = plt.get_current_fig_manager()
manager.resize(*manager.window.maxsize())
from matplotlib.backends.backend_pdf import PdfPages
pp = PdfPages('kinect_angles_reconstructed.pdf')
index=0
window_size=32
fsamp = 1
df2 = pd.read_csv("/home/runge/openbci/git/OpenBCI_Python/build/dataset/kincet_anagles/kinecet_angles.csv")
df2 = df2.dropna(axis=0)
angle_names = ["wrist", "elbow", "shoulder"]
graph_legend = []
handle_as=[]
labels_as=[]
start = 350
end = 420
num_ch = 3
fig = plt.figure(figsize=(20, 14))
fig.subplots_adjust(hspace=.5)
for h in range(0, num_ch):
ax = plt.subplot(num_ch,1,h+1)
axes = plt.gca()
# axes.set_ylim([0, 180])
input_signal = df2.ix[:, h][start * fsamp:end * fsamp]
x = np.arange(start,end,1)
# mean = np.mean(input_signal, axis=0)
# input_signal -= mean
# input_signal=input_signal / np.std(input_signal, axis=0)
l1 = ax.plot(x,input_signal, linewidth=3.0, label="raw signal")
graph_legend.append(l1)
reconstructed_signal = SingularSpectrumAnalysis(input_signal, 16, False).execute(1)
l2 = ax.plot(x,reconstructed_signal, linewidth=3.0, label='reconstructed signal with SSA')
# graph_legend.append(l2)
handles, labels = ax.get_legend_handles_labels()
handle_as.append(handles)
labels_as.append(labels)
plt.title(angle_names[h])
# leg = plt.legend(handles=handles, labels=labels)
fig.legend(handles=handle_as[0], labels=labels_as[0])
fig.text(0.5, 0.04, 'position', ha='center', fontsize=20)
fig.text(0.04, 0.5, 'angle(0-180)', va='center', rotation='vertical', fontsize=20)
# plt.show()
pp.savefig(bbox_inches='tight')
pp.close()
| 1,971 | 29.338462 | 107 | py |
OpenBCIPython | OpenBCIPython-master/visualization/visualize.py | 0 | 0 | 0 | py |
|
OpenBCIPython | OpenBCIPython-master/visualization/draw_graph_channels.py | import json
import pandas as pd
from pandas import DataFrame, Series
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from preprocessing.preprocessing import PreProcessor
from preprocessing.ssa import SingularSpectrumAnalysis
matplotlib.rc('xtick', labelsize=10)
matplotlib.rc('ytick', labelsize=10)
matplotlib.rc('axes', titlesize=15)
matplotlib.rc('legend', fontsize=15)
manager = plt.get_current_fig_manager()
manager.resize(*manager.window.maxsize())
from matplotlib.backends.backend_pdf import PdfPages
pp = PdfPages('channels_reconstructed.pdf')
index=0
window_size=16
fsamp = 1
project_file_path = "/home/runge/openbci/git/OpenBCI_Python"
config_file = project_file_path + "/config/config.json"
raw_reconstructed_signals = pd.read_csv(project_file_path+"/build/dataset/train/result/raw_reconstructed_signals.csv")
raw_channels_data = pd.read_csv(project_file_path+"/build/dataset2017-5-6_0-0-33new_up.csv").ix[:,2:7].dropna()
# raw_channels_data = pd.read_csv("/home/runge/openbci/git/OpenBCI_Python/build/dataset/OpenBCI-RAW-right_strait_up_new.csv").ix[:,1:6].dropna()
channels_names = ["ch1", "ch2", "ch3", "ch4", "ch5"]
graph_legend = []
handle_as=[]
labels_as=[]
start =1100
end = 0
num_ch = len(channels_names)
fig = plt.figure(figsize=(20, 14))
fig.subplots_adjust(hspace=.5)
with open(config_file) as config:
config = json.load(config)
config["train_dir_abs_location"] = project_file_path + "/build/dataset/train"
for h in range(0, num_ch):
preprocessor = PreProcessor(h, None, None, config)
ax = plt.subplot(num_ch,1,h+1)
# axes = plt.gca()
# axes.set_ylim([0, 180])
if(end==0):
end = raw_channels_data.ix[:, h].shape[0]-1
x = np.arange(start, end, 1)
input_signal = raw_channels_data.ix[:, h][start * fsamp:end * fsamp]
# l1 = ax.plot(input_signal, linewidth=1.0, label="raw signal")
# graph_legend.append(l1)
noise_reducer_signal = preprocessor.apply_noise_reducer_filer(input_signal)
l2 = ax.plot(x, noise_reducer_signal, linewidth=3.0, label="noise_reducer_signal")
graph_legend.append(l2)
# normalize_signal = preprocessor.nomalize_signal(noise_reducer_signal)
# l3 = ax.plot(x, normalize_signal, linewidth=3.0, label="normalize_signal")
# graph_legend.append(l3)
# reconstructed_signal = SingularSpectrumAnalysis(noise_reducer_signal, window_size, False).execute(1)
# l4 = ax.plot(x,reconstructed_signal, linewidth=3.0, label='reconstructed signal with SSA')
# graph_legend.append(l4)
handles, labels = ax.get_legend_handles_labels()
handle_as.append(handles)
labels_as.append(labels)
plt.title(channels_names[h])
# leg = plt.legend(handles=handles, labels=labels)
fig.legend(handles=handle_as[0], labels=labels_as[0])
fig.text(0.5, 0.04, 'position', ha='center', fontsize=10)
fig.text(0.04, 0.5, 'angle(0-180)', va='center', rotation='vertical', fontsize=10)
plt.show()
# pp.savefig(bbox_inches='tight')
# pp.close()
| 3,104 | 34.284091 | 144 | py |
OpenBCIPython | OpenBCIPython-master/visualization/plot_interpolate_bad_channels.py |
import mne
import os.path as op
from mne.datasets import sample
# print(__doc__)
#
# data_path = sample.data_path()
#
# fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
# evoked = mne.read_evokeds(fname, condition='Left Auditory',
# baseline=(None, 0))
#
# # plot with bads
# evoked.plot(exclude=[])
#
# # compute interpolation (also works with Raw and Epochs objects)
# evoked.interpolate_bads(reset_bads=False)
#
# # plot interpolated (previous bads)
# evoked.plot(exclude=[])
data_path = op.join(mne.datasets.sample.data_path(), 'MEG', 'sample')
raw = mne.io.read_raw_fif(op.join(data_path, 'sample_audvis_raw.fif'))
raw.set_eeg_reference() # set EEG average reference
events = mne.read_events(op.join(data_path, 'sample_audvis_raw-eve.fif'))
| 783 | 28.037037 | 73 | py |
OpenBCIPython | OpenBCIPython-master/visualization/LocalPolynomialRegression.py | from py_qt import bootstrap as bs
import matplotlib.pyplot as plt
from py_qt import npr_methods
import numpy as np
from py_qt import nonparam_regression as smooth
from py_qt import plot_fit
import tensorflow as tf
import requests
def f(x):
return 3*np.cos(x/2) + x**2/5 + 3
xs = np.random.rand(200) * 10
ys = f(xs) + 2*np.random.randn(*xs.shape)
birthdata_url = 'https://www.umass.edu/statdata/statdata/data/lowbwt.dat'
birth_file = requests.get(birthdata_url)
birth_data = birth_file.text.split('\r\n')[5:]
birth_header = [x for x in birth_data[0].split(' ') if len(x) >= 1]
birth_data = [[float(x) for x in y.split(' ') if len(x) >= 1] for y in birth_data[1:] if len(y) >= 1]
# Pull out target variable
y_vals = np.array([x[1] for x in birth_data])
# Pull out predictor variables (not id, not target, and not birthweight)
x_vals = np.array([x[2:9] for x in birth_data])
# Split data into train/test = 80%/20%
train_indices = np.random.choice(len(x_vals), int(round(len(x_vals) * 0.8)), replace=False)
test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))
x_vals_train = x_vals[train_indices]
x_vals_test = x_vals[test_indices]
y_vals_train = y_vals[train_indices]
y_vals_test = y_vals[test_indices]
# Normalize by column (min-max norm)
def normalize_cols(m):
col_max = m.max(axis=0)
col_min = m.min(axis=0)
return (m - col_min) / (col_max - col_min)
x_vals_train = np.nan_to_num(normalize_cols(x_vals_train))
x_vals_test = np.nan_to_num(normalize_cols(x_vals_test))
# xs = x_vals_train
# ys = y_vals_train
grid = np.r_[0:10:512j]
k1 = smooth.NonParamRegression(xs, ys, method=npr_methods.LocalPolynomialKernel())
k1.fit()
plt.figure()
# plt.plot(xs, ys, 'o', alpha=0.5, label='Data')
plt.plot(k1(grid), 'k', label='quadratic', linewidth=2)
plt.show()
# grid = np.r_[0:10:512j]
#
# plt.plot(grid, f(grid), 'r--', label='Reference')
# plt.plot(xs, ys, 'o', alpha=0.5, label='Data')
# # plt.legend(loc='best')
# # plt.show()
#
# k0 = smooth.NonParamRegression(xs, ys, method=npr_methods.SpatialAverage())
# k0.fit()
# plt.plot(grid, k0(grid), label="Spatial Averaging", linewidth=2)
# plt.legend(loc='best')
#
# k1 = smooth.NonParamRegression(xs, ys, method=npr_methods.LocalPolynomialKernel(q=1))
# k2 = smooth.NonParamRegression(xs, ys, method=npr_methods.LocalPolynomialKernel(q=2))
#
# k1.fit()
# k2.fit()
#
# plt.figure()
# plt.plot(xs, ys, 'o', alpha=0.5, label='Data')
# plt.plot(grid, k2(grid), 'k', label='quadratic', linewidth=2)
# plt.plot(grid, k1(grid), 'g', label='linear', linewidth=2)
# plt.plot(grid, f(grid), 'r--', label='Target', linewidth=2)
# plt.legend(loc='best')
#
# yopts = k2(xs)
# res = ys - yopts
# plot_fit.plot_residual_tests(xs, yopts, res, 'Local Quadratic')
#
# yopts = k1(xs)
# res = ys - yopts
# plot_fit.plot_residual_tests(xs, yopts, res, 'Local Linear')
# plt.show()
# def fit(xs, ys):
# est = smooth.NonParamRegression(xs, ys, method=npr_methods.LocalPolynomialKernel(q=2))
# est.fit()
# return est
#
# result = bs.bootstrap(fit, xs, ys, eval_points = grid, CI = (95,99))
#
# plt.plot(xs, ys, 'o', alpha=0.5, label='Data')
# plt.plot(grid, result.y_fit(grid), 'r', label="Fitted curve", linewidth=2)
# plt.plot(grid, result.CIs[0][0,0], 'g--', label='95% CI', linewidth=2)
# plt.plot(grid, result.CIs[0][0,1], 'g--', linewidth=2)
# plt.fill_between(grid, result.CIs[0][0,0], result.CIs[0][0,1], color='g', alpha=0.25)
# plt.legend(loc=0)
# plt.show()
| 3,458 | 30.445455 | 101 | py |
OpenBCIPython | OpenBCIPython-master/analyzer/tmp.py | # Visualize an STFT power spectrum
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
# y, sr = librosa.load(librosa.util.example_audio_file())
# plt.figure(figsize=(12, 8))
# number_of_columns = 5
# number_of_rows = 2
#
# D = librosa.stft(y)
# plt.subplot(number_of_columns, number_of_rows, 1)
# librosa.display.specshow(D, y_axis='log')
# plt.colorbar()
# plt.title('Log-frequency power spectrogram')
#
# # Or on a logarithmic scale
# D = np.array(librosa.zero_crossings(y))
# D = np.where(D == True, 10, D)
# D = np.where(D == False, -10, D)
# F=[]
# F.append(D.tolist())
#
# plt.subplot(number_of_columns, number_of_rows, 2)
# librosa.display.specshow(np.array(F))
# plt.colorbar()
# plt.title('Zero-Crossing-Rate')
#
# # Or use a CQT scale
#
# CQT = librosa.cqt(y, sr=sr)
# plt.subplot(number_of_columns, number_of_rows, 3)
# librosa.display.specshow(CQT, y_axis='cqt_note')
# plt.colorbar()
# plt.title('Constant-Q power spectrogram (note)')
#
# plt.subplot(number_of_columns, number_of_rows, 4)
# librosa.display.specshow(CQT, y_axis='cqt_hz')
# plt.colorbar()
# plt.title('Constant-Q power spectrogram (Hz)')
#
# # Draw a chromagram with pitch classes
#
# tonnetz = librosa.feature.tonnetz(y=y, sr=sr)
# plt.subplot(number_of_columns, number_of_rows, 5)
# librosa.display.specshow(tonnetz, y_axis='tonnetz')
# plt.colorbar()
# plt.title('Tonal Centroids (Tonnetz)')
#
# rms = librosa.feature.rmse(y=y)
# plt.subplot(number_of_columns, number_of_rows, 6)
# plt.semilogy(rms.T, label='RMS Energy')
# plt.colorbar()
# plt.title('Root Mean Square Energy')
#
# # Draw time markers automatically
# cent = librosa.feature.spectral_centroid(y=y, sr=sr)
# plt.subplot(number_of_columns, number_of_rows, 7)
# plt.semilogy(cent.T, label='Spectral centroid')
# plt.ylabel('Hz')
# plt.xticks([])
# plt.xlim([0, cent.shape[-1]])
# plt.colorbar()
# plt.title('Spectral centroid')
#
# # Draw a tempogram with BPM markers
# plt.subplot(number_of_columns, number_of_rows, 8)
# Tgram = librosa.feature.tempogram(y=y, sr=sr)
# librosa.display.specshow(Tgram, x_axis='time', y_axis='tempo')
# plt.colorbar()
# plt.title('Tempogram')
# plt.tight_layout()
#
#
# plt.show()
distance = np.array([0.43535290045236669, 0.42654141461933315, 0.41773000255681991, 0.40891999695557635, 0.40011370175151184, 0.39131343663359758, 0.38252154303566893, 0.37374034300584275, 0.36497215917263209, 0.35621801267585679, 0.34747593421300349, 0.33874262441059977, 0.33001335524476083, 0.3212808090951445, 0.31253415076299623, 0.30375467571545162, 0.29279413332463616, 0.28066950263846913, 0.26748473568315551, 0.25329508095999298, 0.23816238276517113, 0.22257064085487269, 0.20686783041366305, 0.19137636417560147, 0.17619725667101774, 0.1613130873077859, 0.14692619820926339, 0.13302604230193524, 0.11948229124115357, 0.10618926662385368, 0.093221416544688129, 0.080759511753566646, 0.068909841268586389, 0.057741789191314258, 0.047528297400111059, 0.038377661783666583, 0.030577245984914559, 0.024229987657192708, 0.019320941451161162, 0.015605339007646183, 0.012769894921379362, 0.010630675021273517, 0.0089375571196414604, 0.0072574453484229992, 0.0056665577901831524, 0.0043394932948313406, 0.0035662888407506171, 0.0034126443670462752, 0.0039818360969491691, 0.0053913811140176788, 0.0073389765787151154, 0.0093547705244976988, 0.011300899362130834, 0.012900677877949721, 0.013524752957730889, 0.014121637252505396, 0.014717515762864768, 0.015325399042876801, 0.015933710924031286, 0.016502856888040599, 0.017002871453495255, 0.017400742126837367, 0.017829183078290511, 0.018753749533680416, 0.020934332637185773, 0.024991827581979334, 0.031421788811820006, 0.040747187563057036, 0.053215524918749042, 0.067617524265106255, 0.083370785924705648, 0.099695199146981744, 0.11689992919236054, 0.134963661581574, 0.15375361935196571, 0.17356293278893636, 0.19408264497823111, 0.21546504892339038, 0.23725339989733626, 0.25875695656690234, 0.28044331230887315, 0.30214780293492705, 0.32366721622267552, 0.34504462156594307, 0.36666007061959555, 0.38890379787610135, 0.41102197506491245, 0.43248705900351403, 0.4529589142264005, 0.47214882975792971, 0.48973001298004715, 0.50302491018108664, 0.51210501675271447, 0.52107731940253832, 0.529938411004651, 0.53870077642282344, 0.54735354397444347, 0.55592687834268262, 0.56440659203044641, 0.57280878565964799, 0.5811498002415344, 0.58942140363062234, 0.58482735683832388, 0.57619597258432675, 0.56756219870123692, 0.55892263588288948, 0.55027269774753662, 0.54160807921309972, 0.5308809126363857, 0.51974139230213912, 0.50874440856793657, 0.49815937245854192, 0.48827197610377121, 0.47859141503352676, 0.46910027064819265, 0.45976941661994469, 0.45058890866217649, 0.44155727244372256, 0.43274265723819233, 0.42408725357847676, 0.41546812439802033, 0.40685551657557978, 0.39825325460116845, 0.38966073114838395, 0.38107465393138651, 0.37249044995929598, 0.36390362384516112, 0.35531120216550049, 0.34671093707706274, 0.33810314995250557, 0.3294914862008711, 0.32087906065044597, 0.31226671688852181, 0.30364581844177974, 0.29500182862252461, 0.28403606246995888, 0.27153179977920489, 0.25778464961804654, 0.24287656772744626, 0.22752864536604098, 0.21203505682134763, 0.19665621838086986, 0.18168856450435636, 0.16756855422543532, 0.15437068213661675, 0.1416600196550514, 0.12910030508099471, 0.11685724018153666, 0.10468678073177597, 0.092575814757584557, 0.080403051307463128, 0.06864018511146644, 0.057556751290241619, 0.047255598674713031, 0.037993178442720092, 0.030210001333835237, 0.023869729986541324, 0.018855332245264606, 0.014982594828002637, 0.012047324366695134, 0.0099043831832178898, 0.0081247137128125559, 0.0065756984406157805, 0.0054776909249845303, 0.0049300201970713092, 0.0049647773335861371, 0.0055914517299463281, 0.0068565700087189303, 0.008710617433249258, 0.010747814704224134, 0.012761475696567159, 0.014640068460340562, 0.016036925370460201, 0.01650608656845691, 0.016848941113580859, 0.017044662062991409, 0.017062358507574628, 0.016868931928889205, 0.016688326567252366, 0.016828530988248547, 0.017381237721516257, 0.018476351187846053, 0.020954901855918598, 0.025678473275442881, 0.032912038598899844, 0.042840318931119184, 0.056134923025646419, 0.071915639569667789, 0.089363018430540467, 0.1078974579720597, 0.12750269943698384, 0.14845037659636565, 0.17039287997231573, 0.19270731475446001, 0.21496971240896295, 0.23699911756127193, 0.25843043853752162, 0.27916723977377234, 0.29889429559885983, 0.31794416830652417, 0.33656153373388631, 0.35490611407861528, 0.37351232945966456, 0.39262361025775638, 0.41230389094183761, 0.4324393602429884, 0.45252257258573897, 0.47249100297033531, 0.49174927499074123, 0.50892283943548644, 0.51751878540110952, 0.52592945202842301, 0.5341890013003614, 0.54230938489049441, 0.55032130959055992, 0.55823327749295726, 0.56605447504193052, 0.57380377856330866, 0.58147336636802183, 0.58905098117863675, 0.59048689189889325, 0.58025735850311011, 0.57008988468377786, 0.55996157091900589, 0.54997845553147207, 0.54028059892613978, 0.53076523643910067, 0.52141643260408399, 0.5122332197310816, 0.50321403263467301, 0.49434027008410614, 0.48551839712616696, 0.47673211894162176, 0.46797789667334161, 0.4592510804137957, 0.45054720548850258, 0.44186230046241243, 0.43319208166878603, 0.42453072071286679, 0.41587222376451355, 0.40721186793265912, 0.39854499074278882, 0.38986769393436893, 0.38117925214305254, 0.37244040119486505, 0.36353623993741868, 0.35431248303233537, 0.34453610211238472, 0.33397042150421957, 0.32248446807272546, 0.30989532380397972, 0.29609310704926045, 0.28106403842207311, 0.26522510401179045, 0.2489343855755638, 0.23257312027846178, 0.2164388254094374, 0.20083110018693509, 0.18596021380490396, 0.17163030306202878, 0.15762737343182978, 0.14371772690136797, 0.12993747486850818, 0.11603985643520666, 0.10199577334083414, 0.088165907670793478, 0.074946284782225814, 0.062506181636369854, 0.051274092286640677, 0.041409166490116875, 0.033050448269565449, 0.026111250187634199, 0.020375925857880679, 0.015615347783130949, 0.011558005415772009, 0.0080604546952381655, 0.0051648588494875988, 0.0028757615320535576, 0.0012608058602624438, 0.00031945288788812627, 0.0, 0.00030215588092830331, 0.0011053800229415514, 0.0023331691705536883, 0.0038912736972227429, 0.0056807424395955344, 0.0076193618938801572, 0.0095027114613017091, 0.011014866794965357, 0.012179859682251841, 0.012876294702121106, 0.012917727059568751, 0.012825176953454701, 0.012758391043117042, 0.012849972135827415, 0.013170387503607788, 0.013988969371269248, 0.015753883940680769, 0.018870949025161246, 0.023667621239168889, 0.030571800062589104, 0.039821816989419598, 0.051465982741920012, 0.064689919394282924, 0.079161363538066715, 0.094930042188734992, 0.11196980314892914, 0.12981625804802052, 0.1485553989432345, 0.16814483707870759, 0.18815505339559346, 0.2082273818477865, 0.22792538436781143, 0.2474711517989642, 0.26686357345048667, 0.28609720000344807, 0.30557111561606493, 0.3254383953418753, 0.34551631933000526, 0.3659426577153429, 0.38661525455313933, 0.407328346051538, 0.42742795687454643, 0.44661109418819689, 0.46499714015799554, 0.48242007173737494, 0.49863944070197608, 0.51360095351508928, 0.52594231178847872, 0.53485301527847684, 0.54360801995251173, 0.55224260922967239, 0.56075371348732628, 0.56917099601840004, 0.57751606376269149, 0.58567113051320752, 0.57692026120340623, 0.56818831463966302, 0.55947456280602814, 0.55077744633265879, 0.54208965952813282, 0.53340459365022141, 0.52471991432196508, 0.51603521248414319, 0.50734968514582202, 0.49836723030448077, 0.48831836089417141, 0.47840048243582384, 0.4685072554923489, 0.45863678255393014, 0.44878522677006422, 0.43910862560484731, 0.42967375353725873, 0.42038115571454698, 0.41123732965276538, 0.40232794694511054, 0.39351776249905207, 0.38471296134421745, 0.37591653170936845, 0.36713079564262158, 0.35835807577249051, 0.34959939323879508, 0.34085279716796846, 0.33211467476121259, 0.32337842392549931, 0.31463707233084681, 0.30588363359588899, 0.29624701061788128, 0.28491555390220508, 0.27281107633424234, 0.25975188673046029, 0.24584610554252612, 0.23114918083839428, 0.21603038545403494, 0.20068700758132763, 0.18540809972227409, 0.17051433555630707, 0.156018371306443, 0.14199094128430753, 0.12825591808531661, 0.11472016925833366, 0.10129774374472049, 0.087922075162199007, 0.07495180645503996, 0.062620891499645892, 0.051801650926280203, 0.042637366049437957, 0.035060188041196677, 0.029127223093147799, 0.024549652954513515, 0.021007933158400587, 0.017686840540883233, 0.014242156689934884, 0.010944082031700405, 0.0079925776473869659, 0.0057089358893125507, 0.0043554408197835313, 0.0039980086792299196, 0.0045903293200590866, 0.0058376878952243073, 0.00745798739739167, 0.0091150709901356274, 0.010665170290190089, 0.01187010843759945, 0.012587392969204875, 0.013043490038363222, 0.01349257040215528, 0.013943217541372342, 0.014408055527312096, 0.01491851233844672, 0.01548662504040791, 0.016086505941584502, 0.016711074383355139, 0.017594041828910365, 0.019159668770481919, 0.02177644531886801, 0.025970050832394252, 0.032233892652397095, 0.040907897992209799, 0.052165264721859846, 0.065181896860047453, 0.079545818751863701, 0.095231399477126014, 0.11194545034480534, 0.12967403216701198, 0.14837252288321551, 0.16779419153001521, 0.18786583198682347, 0.20814964599848554, 0.22836391981872098, 0.24844914426150802, 0.2681105777304103, 0.28766660453939735, 0.30711223243683317, 0.32649387882865621, 0.34596405664335622, 0.36572781005257016, 0.38588053581595377, 0.40623699300029481, 0.42659563566193365, 0.44681986892221137, 0.46676285237231024, 0.4860819386379216, 0.50433755457973162, 0.52131204380517204, 0.53506856524251667, 0.54411530777148798, 0.55295124555673458, 0.56160366976531662, 0.5701008837178263, 0.57846186632091512, 0.58669856472155046, 0.59483061153566452, 0.60286304259459822, 0.59612731359882687, 0.58734664288497351, 0.57855548124074618, 0.56975540873454156, 0.56094675569419694, 0.55212814541287669, 0.54330074007494644, 0.53446682956493408, 0.52562853509222141, 0.51619217102198089, 0.50593206731742435, 0.49597830247209052, 0.4863360519852688, 0.47696361807976911, 0.46775658831385852, 0.45870694126688516, 0.44980745408388645, 0.44095750461600602, 0.43212028840592098, 0.42329302400752528, 0.41447497201878913, 0.40566414172716081, 0.39685609790972282, 0.38804640824389236, 0.37923207013148386, 0.3704107146620117, 0.3615779789480788, 0.35272723621697311, 0.34385417084259873, 0.33365337126428307, 0.32258731853553002, 0.31108835561616371, 0.29951231925973437, 0.28708970706159531, 0.27380337410055128, 0.25987088757358812, 0.24533905518389232, 0.23032968459356865, 0.21511927914795537, 0.20012000920960932, 0.18583109254597271, 0.1719984604243841, 0.15882167953523868, 0.14629806736468026, 0.13445917450840261, 0.12319084426840514, 0.11223248927381424, 0.1015980557019588, 0.09142545599022904, 0.081668480529717452, 0.072468953693248822, 0.063928258534515003, 0.056001753406608033, 0.048743932958326124, 0.041802588232179423, 0.035101522630330222, 0.02867666906626436, 0.02251219113394174, 0.016870086696195132, 0.012203032131833588, 0.0088929844582203432, 0.0070918275961699868, 0.0066711747133233173, 0.0072428804475971774, 0.0083860995258808746, 0.0097614065810391702, 0.011048724774180429, 0.012172465782027605, 0.013128274321987767, 0.013989182312813094, 0.014721581888211779, 0.015091576466864029, 0.015464528204388825, 0.015843217509004082, 0.016231802091579359, 0.016639121429430848, 0.017084739273522918, 0.01773997818260406, 0.018849490040124539, 0.02080475173688074, 0.024087285853489973, 0.029226992633017944, 0.036936697060947266, 0.047929293847597317, 0.060944714746474546, 0.075406174668359888, 0.091046865452827058, 0.10774171520092823, 0.12568804676103465, 0.14438982231918299, 0.16360545206285368, 0.18353321701569614, 0.2038108267883281, 0.22428995565941548, 0.2443536345035382, 0.26404774026123351, 0.28365962358412389, 0.30281715853089847, 0.32168674343537401, 0.34068262113412245, 0.35985662275066926, 0.3794269734313343, 0.39902857174963258, 0.41878371236863016, 0.4383256915569721, 0.4570309241775834, 0.46917024181945122, 0.47765207472638233, 0.48614857539953127, 0.49467892251886364, 0.50328897638303671, 0.51199051319614997, 0.52079830709584352, 0.52968256162607963, 0.53862424449248303, 0.54758326315625216, 0.5565408624603303, 0.56546772135652934, 0.57433955827004624, 0.58313665651367408, 0.59184692530621508, 0.58590296327056635, 0.57735874436604884, 0.56881930412435755, 0.56028298187212466, 0.55174992075622276, 0.54322014447728495, 0.53469330587351305, 0.52616572629077818, 0.51763443796198483, 0.50909689477442011, 0.50055008427260361, 0.49199165320695176, 0.48341923670476783, 0.47483290995240157, 0.46623496209291942, 0.45762773054927841, 0.44901453814194897, 0.44039995287299516, 0.4317884318615568, 0.42318210559850516, 0.41457874400183614, 0.40597102079896424, 0.39734531333183631, 0.38867580804117158, 0.37472035632126111, 0.35798843608007719, 0.34260371465626138, 0.32579378067902565, 0.30790459838355583, 0.28947378877098268, 0.27080437851032618, 0.25219349838428928, 0.2339886216903119, 0.21687547199674909, 0.20079735633247459, 0.18532712876764518, 0.17056064984931954, 0.15642779485123762, 0.1429027923514796, 0.12985611545273426, 0.11727917565508073, 0.10527121276966807, 0.094060285420280915, 0.083539531938186098, 0.073683530113549875, 0.064300287033153908, 0.05543774235636216, 0.047276633804638299, 0.039859099066441653, 0.033293247774456948, 0.027543547557059112, 0.022688873425411496, 0.018621469180613526, 0.01521017312492229, 0.012374412964800189, 0.010091708788589795, 0.0083516795255471361, 0.0071457472086108682, 0.0064886857001648065, 0.0064589633781951495, 0.0072012803639131236, 0.0084592313142127992, 0.0099628815672237539, 0.011459358249358571, 0.012779314543907599, 0.013859552757009555, 0.014831827424233628, 0.015707804235302183, 0.016476629105313972, 0.017124162681032302, 0.017599824248874279, 0.017859156477827234, 0.018044508033644765, 0.018355209094968978, 0.0189437956560667, 0.019852445896942052, 0.02111756378804874, 0.022789968186689318, 0.024977187188072941, 0.027895862745880285, 0.031950372055218557, 0.037588588778261871, 0.04533056233583712, 0.055822234840144924, 0.069968350347510908, 0.087776687537675493, 0.10737468377114787, 0.12821035076580206, 0.15044469745596642, 0.17362187230229492, 0.19669809143045736, 0.21912315243493788, 0.24068652391295411, 0.26145843360157772, 0.28133441197302655, 0.30004538339492859, 0.31844882312742462, 0.33615129810939109, 0.35334033804825027, 0.37028646056272063, 0.38746091670143112, 0.40453870150700727, 0.42159077576957132, 0.43867880808116239, 0.45623948783429258, 0.47478550214217563, 0.49380275701393744, 0.51312039593731251, 0.53229698140034198, 0.55129315877161866, 0.56978888153128104, 0.58710361032175962, 0.58341425123099588, 0.57479639773828839, 0.56606344615541682, 0.55724171652949916, 0.54835385582575247, 0.53942198763233962, 0.53046261446303877, 0.52148646278510402, 0.51250007279640031, 0.50350876502613329, 0.49451906047773736, 0.4855352746837413, 0.47656089178059291, 0.46755740288318343, 0.4550004363416415, 0.43598191519521617, 0.41988712312866533, 0.40429718421512878, 0.38412957700706341, 0.36194152749150998, 0.34490944708774768, 0.33089537416508286, 0.31843692371717469, 0.30603079364513119, 0.29603205388303006, 0.28729132574620264, 0.27912308644933581, 0.27148434513119607, 0.26432819664136431, 0.25750239837768163, 0.25071881670058394, 0.24356039567950521, 0.2355533354243062, 0.20509731910932, 0.16672867150527124, 0.1341296755877526, 0.11125955554618865, 0.09692701936014482, 0.087328176919445388, 0.080094259688856292, 0.073799464543419666, 0.067944475445671365, 0.062342321861094521, 0.0569805979668234, 0.051942974839642352, 0.047580918705391967, 0.044127615724677841, 0.0418341075180933, 0.040870690190047604, 0.041480708123150085, 0.043897957838945616, 0.048196889426332976, 0.054137563561365593, 0.061440216297928292, 0.069867357712817768, 0.079160725920755168, 0.089016824414138662, 0.099169587281046315, 0.1095928058510479, 0.12022437353255157, 0.13087469640174931, 0.14144293620997725, 0.15190951272018277, 0.16220138318815183, 0.17227253066513942, 0.18214970522282159, 0.19181248461013878, 0.20099418417181689, 0.20942586270019001, 0.21688376406869997, 0.22317193882775607, 0.22807863521418933, 0.23145465177744451, 0.22275170134694525, 0.21167456568841683, 0.20448650060538701, 0.20035195998786937, 0.19873050852960142, 0.19894739292327762, 0.19976454924926443, 0.20136306785054825, 0.20410801332309747, 0.208202158687575, 0.21367928648426432, 0.22080903867324628, 0.22973684920775739, 0.23947452601540703, 0.25015722368718291, 0.26154577923034655, 0.2737333633066949, 0.28648040946590692, 0.29942957200956394, 0.31236907833383776, 0.32522048974438211, 0.33787977387002388, 0.35049395899871205, 0.36298438368904795, 0.37541899208156049, 0.38767793908429993, 0.39988864021492782, 0.41195610077681699, 0.42344726140834626, 0.43476462884645806, 0.44605983265929711, 0.4576986742113151, 0.45586726829610463, 0.44922601441496973, 0.44168452668729835, 0.43387591145471482, 0.42583828888101971, 0.41760581978617944, 0.40926082597855384, 0.4009653400758274, 0.39284487981410626, 0.38504750367582236, 0.37762077199750926, 0.37058335307091395, 0.36392956087081529, 0.35746851899807813, 0.35095083451569509, 0.34413357374603992, 0.3369217350490803, 0.32934274664472091, 0.32147511773380677, 0.31345560073080464, 0.30563707812089186, 0.29831314634318001, 0.29163826314367619, 0.2857196545217443, 0.28041523734778556, 0.27550989908860879, 0.27069205278198899, 0.26562146828333499, 0.26008520921592099, 0.25395035730782478, 0.24722689534425887, 0.24011313516630381, 0.23276222471534966, 0.2254231858576568, 0.21838761404941182, 0.21171650493351718, 0.2053682577636195, 0.19925909011478821, 0.19334632451152212, 0.18753526802757964, 0.18139200632675026, 0.17463581143641826, 0.16726267053558677, 0.1592823969674432, 0.15083312656761735, 0.14221287388185461, 0.13302046581134577, 0.12397988087238353, 0.11652444872694806, 0.11121051712204368, 0.10801788664069349, 0.10649137286269583, 0.10692295445348236, 0.10970887586207011, 0.11257798368982895, 0.11407552833474931, 0.11437870959615407, 0.11507588655366244, 0.11623556110471292, 0.11833754623617672, 0.12358299844399326, 0.1354359613669962, 0.15561082504716245, 0.18405330343961113, 0.22328937963124634, 0.27501647400376261, 0.32883204162150698, 0.37966690910521178, 0.40563193821719107, 0.40271430308450157, 0.39926529436010466, 0.39541915880995082, 0.39125950000896736, 0.38683634449565146, 0.3821740806972011, 0.37723632322475886, 0.37216271944678503, 0.36703693607250654, 0.36184147516213638, 0.35664499640077069, 0.35115870679770095, 0.34283220688406396, 0.33476770880169132, 0.32656808560457296, 0.31784571460652505, 0.30998989903988661, 0.30349254148780686, 0.29821422662603986, 0.2952734732313389, 0.29378878657366364, 0.29304146771484824, 0.29280987307579681, 0.29294884761108991, 0.29463522395066244, 0.29667192503740564, 0.2986776559958283, 0.30085995623138612, 0.30344545200431589, 0.29031957100029954, 0.28320405268126264, 0.28340007925518212, 0.28384009098514856, 0.28437205864600462, 0.2848820680207374, 0.28526618756878463, 0.28545394325725021, 0.27608205256033602, 0.2710246538612559, 0.26809391189736248, 0.26534429147896643, 0.26422949415775154, 0.26535054160673949, 0.26808620795502736, 0.27094846083018215, 0.27294021055971107, 0.27298688704727608, 0.27291271616871027, 0.27286867861446901, 0.27286642058724797, 0.27292487121503878, 0.27307411858764896, 0.27330852543614587, 0.27357213283170573, 0.27423583772879678, 0.27512977403136363, 0.27646640834786812, 0.28079849986837918, 0.28784971517512037, 0.29128857096087835, 0.29084310546334524, 0.28987632769887078, 0.28697612648899906, 0.27849302709726087, 0.27040182687763575, 0.26313598066475358, 0.25736953519198241, 0.25390598541742998, 0.25252011424599741, 0.25158379281330517, 0.25087691546326274, 0.25016368106446818, 0.24916780826531498, 0.24757336510865122, 0.24533318976705304, 0.24241758801732174, 0.2389234940494453, 0.23500048496638054, 0.22517230687318487, 0.20968403325682883, 0.1954168931013287, 0.18653916776633384, 0.18331583090627679, 0.18215909901887159, 0.18145950323339111, 0.18111948230345445, 0.18111924333514509, 0.1814137460697513, 0.1818535851353055, 0.18229296467977235, 0.18262723505235368, 0.1827770738594422, 0.18265681180591548, 0.1821994799425449, 0.17608807141805072, 0.16872983107947212, 0.16240144702190812, 0.15776702584744981, 0.15464840228225418, 0.15237019406976063, 0.15069387855224797, 0.14957855280129848, 0.14887084600985745, 0.14944410780100323, 0.15119699571390499, 0.1536553381027834, 0.15630072203410425, 0.15934025175622293, 0.16403903814992299, 0.16937677296024786, 0.17377070391008656, 0.17912562063261195, 0.18733224310885299, 0.19879874326035937, 0.21231634467295796, 0.22825017488658014, 0.24753159238992894, 0.25631758902797996, 0.25757325062304964, 0.25869443202710857, 0.25978688224240926, 0.26099281158466298, 0.26238203918045533, 0.26398376588674261, 0.26574486564100647, 0.26766362036726948, 0.26971398165645072, 0.27181052886496732, 0.27387207451181622, 0.27594100769380986, 0.27798838385600128, 0.28017436118818662, 0.2824514682885908, 0.28474846618476674, 0.28705095901637745, 0.28919778908299615, 0.29114421238617272, 0.29274340523878489, 0.2939492708455681])
possion = np.array([3000, 3005, 3010, 3015, 3020, 3025, 3030, 3035, 3040, 3045, 3050, 3055, 3060, 3065, 3070, 3075, 3080, 3085, 3090, 3095, 3100, 3105, 3110, 3115, 3120, 3125, 3130, 3135, 3140, 3145, 3150, 3155, 3160, 3165, 3170, 3175, 3180, 3185, 3190, 3195, 3200, 3205, 3210, 3215, 3220, 3225, 3230, 3235, 3240, 3245, 3250, 3255, 3260, 3265, 3270, 3275, 3280, 3285, 3290, 3295, 3300, 3305, 3310, 3315, 3320, 3325, 3330, 3335, 3340, 3345, 3350, 3355, 3360, 3365, 3370, 3375, 3380, 3385, 3390, 3395, 3400, 3405, 3410, 3415, 3420, 3425, 3430, 3435, 3440, 3445, 3450, 3455, 3460, 3465, 3470, 3475, 3480, 3485, 3490, 3495, 3500, 3505, 3510, 3515, 3520, 3525, 3530, 3535, 3540, 3545, 3550, 3555, 3560, 3565, 3570, 3575, 3580, 3585, 3590, 3595, 3600, 3605, 3610, 3615, 3620, 3625, 3630, 3635, 3640, 3645, 3650, 3655, 3660, 3665, 3670, 3675, 3680, 3685, 3690, 3695, 3700, 3705, 3710, 3715, 3720, 3725, 3730, 3735, 3740, 3745, 3750, 3755, 3760, 3765, 3770, 3775, 3780, 3785, 3790, 3795, 3800, 3805, 3810, 3815, 3820, 3825, 3830, 3835, 3840, 3845, 3850, 3855, 3860, 3865, 3870, 3875, 3880, 3885, 3890, 3895, 3900, 3905, 3910, 3915, 3920, 3925, 3930, 3935, 3940, 3945, 3950, 3955, 3960, 3965, 3970, 3975, 3980, 3985, 3990, 3995, 4000, 4005, 4010, 4015, 4020, 4025, 4030, 4035, 4040, 4045, 4050, 4055, 4060, 4065, 4070, 4075, 4080, 4085, 4090, 4095, 4100, 4105, 4110, 4115, 4120, 4125, 4130, 4135, 4140, 4145, 4150, 4155, 4160, 4165, 4170, 4175, 4180, 4185, 4190, 4195, 4200, 4205, 4210, 4215, 4220, 4225, 4230, 4235, 4240, 4245, 4250, 4255, 4260, 4265, 4270, 4275, 4280, 4285, 4290, 4295, 4300, 4305, 4310, 4315, 4320, 4325, 4330, 4335, 4340, 4345, 4350, 4355, 4360, 4365, 4370, 4375, 4380, 4385, 4390, 4395, 4400, 4405, 4410, 4415, 4420, 4425, 4430, 4435, 4440, 4445, 4450, 4455, 4460, 4465, 4470, 4475, 4480, 4485, 4490, 4495, 4500, 4505, 4510, 4515, 4520, 4525, 4530, 4535, 4540, 4545, 4550, 4555, 4560, 4565, 4570, 4575, 4580, 4585, 4590, 4595, 4600, 4605, 4610, 4615, 4620, 4625, 4630, 4635, 4640, 4645, 4650, 4655, 4660, 4665, 4670, 4675, 4680, 4685, 4690, 4695, 4700, 4705, 4710, 4715, 4720, 4725, 4730, 4735, 4740, 4745, 4750, 4755, 4760, 4765, 4770, 4775, 4780, 4785, 4790, 4795, 4800, 4805, 4810, 4815, 4820, 4825, 4830, 4835, 4840, 4845, 4850, 4855, 4860, 4865, 4870, 4875, 4880, 4885, 4890, 4895, 4900, 4905, 4910, 4915, 4920, 4925, 4930, 4935, 4940, 4945, 4950, 4955, 4960, 4965, 4970, 4975, 4980, 4985, 4990, 4995, 5000, 5005, 5010, 5015, 5020, 5025, 5030, 5035, 5040, 5045, 5050, 5055, 5060, 5065, 5070, 5075, 5080, 5085, 5090, 5095, 5100, 5105, 5110, 5115, 5120, 5125, 5130, 5135, 5140, 5145, 5150, 5155, 5160, 5165, 5170, 5175, 5180, 5185, 5190, 5195, 5200, 5205, 5210, 5215, 5220, 5225, 5230, 5235, 5240, 5245, 5250, 5255, 5260, 5265, 5270, 5275, 5280, 5285, 5290, 5295, 5300, 5305, 5310, 5315, 5320, 5325, 5330, 5335, 5340, 5345, 5350, 5355, 5360, 5365, 5370, 5375, 5380, 5385, 5390, 5395, 5400, 5405, 5410, 5415, 5420, 5425, 5430, 5435, 5440, 5445, 5450, 5455, 5460, 5465, 5470, 5475, 5480, 5485, 5490, 5495, 5500, 5505, 5510, 5515, 5520, 5525, 5530, 5535, 5540, 5545, 5550, 5555, 5560, 5565, 5570, 5575, 5580, 5585, 5590, 5595, 5600, 5605, 5610, 5615, 5620, 5625, 5630, 5635, 5640, 5645, 5650, 5655, 5660, 5665, 5670, 5675, 5680, 5685, 5690, 5695, 5700, 5705, 5710, 5715, 5720, 5725, 5730, 5735, 5740, 5745, 5750, 5755, 5760, 5765, 5770, 5775, 5780, 5785, 5790, 5795, 5800, 5805, 5810, 5815, 5820, 5825, 5830, 5835, 5840, 5845, 5850, 5855, 5860, 5865, 5870, 5875, 5880, 5885, 5890, 5895, 5900, 5905, 5910, 5915, 5920, 5925, 5930, 5935, 5940, 5945, 5950, 5955, 5960, 5965, 5970, 5975, 5980, 5985, 5990, 5995, 6000, 6005, 6010, 6015, 6020, 6025, 6030, 6035, 6040, 6045, 6050, 6055, 6060, 6065, 6070, 6075, 6080, 6085, 6090, 6095, 6100, 6105, 6110, 6115, 6120, 6125, 6130, 6135, 6140, 6145, 6150, 6155, 6160, 6165, 6170, 6175, 6180, 6185, 6190, 6195, 6200, 6205, 6210, 6215, 6220, 6225, 6230, 6235, 6240, 6245, 6250, 6255, 6260, 6265, 6270, 6275, 6280, 6285, 6290, 6295, 6300, 6305, 6310, 6315, 6320, 6325, 6330, 6335, 6340, 6345, 6350, 6355, 6360, 6365, 6370, 6375, 6380, 6385, 6390, 6395, 6400, 6405, 6410, 6415, 6420, 6425, 6430, 6435, 6440, 6445, 6450, 6455, 6460, 6465, 6470, 6475, 6480, 6485, 6490, 6495, 6500, 6505, 6510, 6515, 6520, 6525, 6530, 6535, 6540, 6545, 6550, 6555, 6560, 6565, 6570, 6575, 6580, 6585, 6590, 6595, 6600, 6605, 6610, 6615, 6620, 6625, 6630, 6635, 6640, 6645, 6650, 6655, 6660, 6665, 6670, 6675, 6680, 6685, 6690, 6695, 6700, 6705, 6710, 6715, 6720, 6725, 6730, 6735, 6740, 6745, 6750, 6755, 6760, 6765, 6770, 6775, 6780, 6785, 6790, 6795, 6800, 6805, 6810, 6815, 6820, 6825, 6830, 6835, 6840, 6845, 6850, 6855, 6860, 6865, 6870, 6875, 6880, 6885, 6890, 6895, 6900, 6905, 6910, 6915, 6920, 6925, 6930, 6935, 6940, 6945, 6950, 6955, 6960, 6965, 6970, 6975, 6980, 6985, 6990, 6995, 7000, 7005, 7010, 7015, 7020, 7025, 7030, 7035, 7040, 7045, 7050, 7055, 7060, 7065, 7070, 7075, 7080, 7085, 7090, 7095, 7100, 7105, 7110, 7115, 7120, 7125, 7130, 7135, 7140, 7145, 7150, 7155, 7160, 7165, 7170, 7175, 7180, 7185, 7190, 7195, 7200, 7205, 7210, 7215, 7220, 7225, 7230, 7235, 7240, 7245, 7250, 7255, 7260, 7265, 7270, 7275, 7280, 7285, 7290, 7295, 7300, 7305, 7310, 7315, 7320, 7325, 7330, 7335, 7340, 7345, 7350, 7355, 7360, 7365, 7370, 7375, 7380, 7385, 7390, 7395, 7400, 7405, 7410, 7415, 7420, 7425, 7430, 7435, 7440, 7445, 7450, 7455, 7460, 7465, 7470, 7475, 7480, 7485, 7490, 7495, 7500, 7505, 7510, 7515, 7520, 7525, 7530, 7535, 7540, 7545, 7550, 7555, 7560, 7565, 7570, 7575, 7580, 7585, 7590, 7595, 7600, 7605, 7610, 7615, 7620, 7625, 7630, 7635, 7640, 7645, 7650, 7655, 7660, 7665, 7670, 7675, 7680, 7685, 7690, 7695, 7700, 7705, 7710, 7715, 7720, 7725, 7730, 7735, 7740, 7745, 7750, 7755, 7760, 7765, 7770, 7775, 7780, 7785, 7790, 7795, 7800, 7805, 7810, 7815, 7820, 7825, 7830, 7835, 7840, 7845, 7850, 7855, 7860, 7865, 7870, 7875, 7880, 7885, 7890, 7895, 7900, 7905, 7910, 7915, 7920, 7925, 7930, 7935, 7940, 7945, 7950, 7955, 7960, 7965, 7970, 7975, 7980, 7985, 7990, 7995])
plt.plot(possion, distance)
plt.show()
0.0881659076708
0.0749462847822
0.0625061816364
0.0512740922866
0.0414091664901
0.0330504482696
0.0261112501876
0.0203759258579
0.0156153477831
0.0115580054158
0.00806045469524
0.00516485884949
0.00287576153205
0.00126080586026
0.000319452887888
0.0
0.000302155880928
| 29,739 | 284.961538 | 21,221 | py |
OpenBCIPython | OpenBCIPython-master/analyzer/__init__.py | 0 | 0 | 0 | py |
|
OpenBCIPython | OpenBCIPython-master/externals/mne_openbci.py | """Conversion tool from OpenBCI to MNE Raw Class"""
# Authors: Teon Brooks <[email protected]>
#
# License: BSD (3-clause)
import warnings
np = None
try:
import numpy as np
except ImportError:
raise ImportError('Numpy is needed to use function.')
mne = None
try:
from mne.utils import verbose, logger
from mne.io.meas_info import create_info
from mne.io.base import _BaseRaw
except ImportError:
raise ImportError('MNE is needed to use function.')
class RawOpenBCI(_BaseRaw):
"""Raw object from OpenBCI file
Parameters
----------
input_fname : str
Path to the OpenBCI file.
montage : str | None | instance of Montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0). See the documentation of
:func:`mne.channels.read_montage` for more information.
eog : list or tuple
Names of channels or list of indices that should be designated
EOG channels. Default is None.
misc : list or tuple
List of indices that should be designated MISC channels.
Default is (-3, -2, -1), which are the accelerator sensors.
stim_channel : int | None
The channel index (starting at 0).
If None (default), there will be no stim channel added.
scale : float
The scaling factor for EEG data. Units for MNE are in volts.
OpenBCI data are typically stored in microvolts. Default scale
factor is 1e-6.
sfreq : int
The sampling frequency of the data. OpenBCI defaults are 250 Hz.
missing_tol : int
The tolerance for interpolating missing samples. Default is 1. If the
number of contiguous missing samples is greater than tolerance, then
values are marked as NaN.
preload : bool
If True, all data are loaded at initialization.
If False, data are not read until save.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, input_fname, montage=None, eog=None,
misc=(-3, -2, -1), stim_channel=None, scale=1e-6, sfreq=250,
missing_tol=1, preload=True, verbose=None):
bci_info = {'missing_tol': missing_tol, 'stim_channel': stim_channel}
if not eog:
eog = list()
if not misc:
misc = list()
nsamps, nchan = self._get_data_dims(input_fname)
last_samps = [nsamps - 1]
ch_names = ['EEG %03d' % num for num in range(1, nchan + 1)]
ch_types = ['eeg'] * nchan
if misc:
misc_names = ['MISC %03d' % ii for ii in range(1, len(misc) + 1)]
misc_types = ['misc'] * len(misc)
for ii, mi in enumerate(misc):
ch_names[mi] = misc_names[ii]
ch_types[mi] = misc_types[ii]
if eog:
eog_names = ['EOG %03d' % ii for ii in range(len(eog))]
eog_types = ['eog'] * len(eog)
for ii, ei in enumerate(eog):
ch_names[ei] = eog_names[ii]
ch_types[ei] = eog_types[ii]
if stim_channel:
ch_names[stim_channel] = 'STI 014'
ch_types[stim_channel] = 'stim'
# fix it for eog and misc marking
info = create_info(ch_names, sfreq, ch_types, montage)
super(RawOpenBCI, self).__init__(info, last_samps=last_samps,
raw_extras=[bci_info],
filenames=[input_fname],
preload=False, verbose=verbose)
# load data
if preload:
self.preload = preload
logger.info('Reading raw data from %s...' % input_fname)
self._data, _ = self._read_segment()
def _read_segment_file(self, data, idx, offset, fi, start, stop,
cals, mult):
"""Read a chunk of raw data"""
input_fname = self._filenames[fi]
data_ = np.genfromtxt(input_fname, delimiter=',', comments='%',
skip_footer=1)
"""
Dealing with the missing data
-----------------------------
When recording with OpenBCI over Bluetooth, it is possible for some of
the data packets, samples, to not be recorded. This does not happen
often but it poses a problem for maintaining proper sampling periods.
OpenBCI data format combats this by providing a counter on the sample
to know which ones are missing.
Solution
--------
Interpolate the missing samples by resampling the surrounding samples.
1. Find where the missing samples are.
2. Deal with the counter reset (resets after cycling a byte).
3. Resample given the diffs.
4. Insert resampled data in the array using the diff indices
(index + 1).
5. If number of missing samples is greater than the missing_tol, Values
are replaced with np.nan.
"""
# counter goes from 0 to 255, maxdiff is 255.
# make diff one like others.
missing_tol = self._raw_extras[fi]['missing_tol']
diff = np.abs(np.diff(data_[:, 0]))
diff = np.mod(diff, 254) - 1
missing_idx = np.where(diff != 0)[0]
missing_samps = diff[missing_idx].astype(int)
if missing_samps.size:
missing_nsamps = np.sum(missing_samps, dtype=int)
missing_cumsum = np.insert(np.cumsum(missing_samps), 0, 0)[:-1]
missing_data = np.empty((missing_nsamps, data_.shape[-1]),
dtype=float)
insert_idx = list()
for idx_, nn, ii in zip(missing_idx, missing_samps,
missing_cumsum):
missing_data[ii:ii + nn] = np.mean(data_[(idx_, idx_ + 1), :])
if nn > missing_tol:
missing_data[ii:ii + nn] *= np.nan
warnings.warn('The number of missing samples exceeded the '
'missing_tol threshold.')
insert_idx.append([idx_] * nn)
insert_idx = np.hstack(insert_idx)
data_ = np.insert(data_, insert_idx, missing_data, axis=0)
# data_ dimensions are samples by channels. transpose for MNE.
data_ = data_[start:stop, 1:].T
data[:, offset:offset + stop - start] = \
np.dot(mult, data_[idx]) if mult is not None else data_[idx]
def _get_data_dims(self, input_fname):
"""Briefly scan the data file for info"""
# raw data formatting is nsamps by nchans + counter
data = np.genfromtxt(input_fname, delimiter=',', comments='%',
skip_footer=1)
diff = np.abs(np.diff(data[:, 0]))
diff = np.mod(diff, 254) - 1
missing_idx = np.where(diff != 0)[0]
missing_samps = diff[missing_idx].astype(int)
nsamps, nchan = data.shape
# add the missing samples
nsamps += sum(missing_samps)
# remove the tracker column
nchan -= 1
del data
return nsamps, nchan
def read_raw_openbci(input_fname, montage=None, eog=None, misc=(-3, -2, -1),
stim_channel=None, scale=1e-6, sfreq=250, missing_tol=1,
preload=True, verbose=None):
"""Raw object from OpenBCI file
Parameters
----------
input_fname : str
Path to the OpenBCI file.
montage : str | None | instance of Montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0). See the documentation of
:func:`mne.channels.read_montage` for more information.
eog : list or tuple
Names of channels or list of indices that should be designated
EOG channels. Default is None.
misc : list or tuple
List of indices that should be designated MISC channels.
Default is (-3, -2, -1), which are the accelerator sensors.
stim_channel : str | int | None
The channel name or channel index (starting at 0).
-1 corresponds to the last channel (default).
If None, there will be no stim channel added.
scale : float
The scaling factor for EEG data. Units for MNE are in volts.
OpenBCI data are typically stored in microvolts. Default scale
factor is 1e-6.
sfreq : int
The sampling frequency of the data. OpenBCI defaults are 250 Hz.
missing_tol : int
The tolerance for interpolating missing samples. Default is 1. If the
number of contiguous missing samples is greater than tolerance, then
values are marked as NaN.
preload : bool
If True, all data are loaded at initialization.
If False, data are not read until save.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
raw : Instance of RawOpenBCI
A Raw object containing OpenBCI data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
raw = RawOpenBCI(input_fname=input_fname, montage=montage, eog=eog,
misc=misc, stim_channel=stim_channel, scale=scale,
sfreq=sfreq, missing_tol=missing_tol, preload=preload,
verbose=verbose)
return raw
| 9,549 | 39.987124 | 79 | py |
OpenBCIPython | OpenBCIPython-master/utils/dataset_reader_utils.py | import os
def load_dataset_from_ogg(name, Clip):
clips = []
for directory in sorted(os.listdir('{0}/'.format(name))):
directory = '{0}/{1}'.format(name, directory)
if os.path.isdir(directory) and os.path.basename(directory)[0:3].isdigit():
print('Parsing ' + directory)
category = []
for clip in sorted(os.listdir(directory)):
if clip[-3:] == 'ogg':
print ('{0}/{1}'.format(directory, clip))
category.append(Clip('{0}/{1}'.format(directory, clip), 'ogg'))
clips.append(category)
print('All {0} recordings loaded.'.format(name))
return clips
| 679 | 36.777778 | 83 | py |
OpenBCIPython | OpenBCIPython-master/utils/feature_extractor.py |
def _get_frame(audio, index, frame):
if index < 0:
return None
return audio.raw[(index * frame):(index + 1) * frame]
def _get_frame_array(audio, index, frame):
if index < 0:
return None
return audio.data[(index * frame):(index + 1) * frame] | 274 | 26.5 | 58 | py |
OpenBCIPython | OpenBCIPython-master/utils/data_types_utils.py | import tensorflow as tf
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
| 334 | 21.333333 | 73 | py |
OpenBCIPython | OpenBCIPython-master/utils/utils.py | import numpy as np
def get_label(class_number, number_of_class):
label = np.zeros(number_of_class, dtype=np.int)
label[class_number - 1] = 1
return label | 167 | 23 | 51 | py |
OpenBCIPython | OpenBCIPython-master/utils/dataset_writer_utils.py | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.interactive(False)
import Image
import numpy as np
import tensorflow as tf
import librosa.display
from data_types_utils import _int64_feature, _bytes_feature
def read_tf_recode(config):
reconstructed_clips = []
record_iterator = tf.python_io.tf_record_iterator(path=config.tfrecords_filename)
for string_record in record_iterator:
raw_clip = tf.train.Example()
raw_clip.ParseFromString(string_record)
height = int(raw_clip.features.feature['clip_height'].int64_list.value[0])
width = int(raw_clip.features.feature['clip_width'].int64_list.value[0])
img_string = (raw_clip.features.feature['clip_raw'].bytes_list.value[0])
label = (raw_clip.features.feature['clip_label_raw'].bytes_list.value[0])
img_1d = np.fromstring(img_string, dtype=np.float64)
label = np.fromstring(img_string, dtype=np.uint64)
reconstructed_clip = img_1d.reshape((height, width, -1))
reconstructed_clip_label = label.reshape((1, config.number_of_class, -1))
reconstructed_clips.append((reconstructed_clip, reconstructed_clip_label))
return reconstructed_clips
def create_sample_from_data(clip, clip_label):
feature_vector = clip.tostring()
clip_label = clip_label.tostring()
return tf.train.Example(features=tf.train.Features(feature={
'clip_height': _int64_feature(1),
'clip_width': _int64_feature(clip.shape[0]),
'clip_raw': _bytes_feature(feature_vector),
'clip_label_raw': _bytes_feature(clip_label)}))
def create_sample_from_image(clip_filename, clip_label, config):
image_width = int(config["processing"]["train"]["generated_image_width"])
image_height = int(config["processing"]["train"]["generated_image_height"])
image = Image.open(clip_filename)
image = image.resize((image_width, image_height),Image.ANTIALIAS)
image = np.asarray(image)
image = image.flatten()
# np.reshape(image, (-1, image.shape[1] * image.shape[0]))
feature_vector = image.tostring()
clip_label = clip_label.tostring()
clip_raw = tf.train.Example(features=tf.train.Features(feature={
'clip_height': _int64_feature(image.shape[0]),
'clip_width': _int64_feature(image.shape[0]),
'clip_raw': _bytes_feature(feature_vector),
'clip_label_raw': _bytes_feature(clip_label)}))
return clip_raw
def draw_sample_plot_and_save(clip, clip_type, index, config):
result = []
image_width=int(config["processing"]["train"]["generated_image_width"])
image_height=int(config["processing"]["train"]["generated_image_height"])
figure = plt.figure(figsize=(
np.ceil(image_width + image_width * 0.2),
np.ceil(image_height + image_height * 0.2)), dpi=1)
axis = figure.add_subplot(111)
plt.axis('off')
plt.tick_params(axis='both', left='off', top='off', right='off', bottom='off',
labelleft='off',
labeltop='off',
labelright='off', labelbottom='off')
result.append(clip)
result = np.array(result)
librosa.display.specshow(result, sr=int(config["sampling_rate"]), x_axis='time', y_axis='mel', cmap='RdBu_r')
extent = axis.get_window_extent().transformed(figure.dpi_scale_trans.inverted())
clip_filename = "%s%s%s%s" % (config["train_dir_abs_location"], clip_type, str(index), "_.jpg")
plt.savefig(clip_filename, format='jpg', bbox_inches=extent, pad_inches=0)
plt.close(figure)
return clip_filename
def read_and_decode(filename_queue, config):
image_width = int(config["processing"]["train"]["generated_image_width"])
image_height = int(config["processing"]["train"]["generated_image_height"])
number_of_channels = int(config["number_of_channels"])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'clip_height': tf.FixedLenFeature([], tf.int64),
'clip_width': tf.FixedLenFeature([], tf.int64),
'clip_raw': tf.FixedLenFeature([], tf.string),
'clip_label_raw': tf.FixedLenFeature([], tf.string)
})
image = tf.decode_raw(features['clip_raw'], tf.int8)
label = tf.decode_raw(features['clip_label_raw'], tf.int64)
image = tf.reshape(image, [1, image_width*image_height*3])
image = tf.cast(image, tf.float32)
label = tf.reshape(label, [1, int(config["processing"]["train"]["number_of_class"])])
return image, label
# def inputs(config):
# with tf.name_scope('input'):
# filename_queue = tf.train.string_input_producer([config.tfrecords_filename],
# num_epochs=config.num_epochs)
# image, label = read_and_decode(filename_queue, config)
# images, sparse_labels = tf.train.shuffle_batch(
# [image, label], batch_size=config.batch_size, num_threads=config.batch_process_threads_num,
# capacity=1000 + 3 * config.batch_size,
# min_after_dequeue=100)
# return images, sparse_labels
| 5,196 | 44.587719 | 113 | py |
OpenBCIPython | OpenBCIPython-master/utils/__init__.py | 0 | 0 | 0 | py |
|
OpenBCIPython | OpenBCIPython-master/utils/Audio.py | import pydub
import numpy as np
class Audio:
def __init__(self, path=None, file_type=None, is_raw_data=True, data=None):
self.is_raw_data = is_raw_data
if self.is_raw_data:
self.path = path
self.file_type = file_type
else:
self.data = data
self.raw = []
def __enter__(self):
if self.is_raw_data:
if self.file_type == "ogg":
self.data = pydub.AudioSegment.from_ogg(self.path)
self.data = self.data.overlay(pydub.AudioSegment.from_file(self.path))
self.raw = (np.fromstring(self.data._data, dtype="int16") + 0.5) / (0x7FFF + 0.5)
else:
print ("file type is not supported yet... add new file handler to this format..")
return (self)
def __exit__(self, exception_type, exception_value, traceback):
del self.data
del self.raw | 927 | 34.692308 | 97 | py |
OpenBCIPython | OpenBCIPython-master/utils/data_types.py | import tensorflow as tf
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
| 333 | 22.857143 | 73 | py |
OpenBCIPython | OpenBCIPython-master/lib/dtw.py | from numpy import array, zeros, argmin, inf, equal, ndim
from scipy.spatial.distance import cdist
import pandas as pd
import numpy as np
def dtw(x, y, dist):
"""
Computes Dynamic Time Warping (DTW) of two sequences.
:param array x: N1*M array
:param array y: N2*M array
:param func dist: distance used as cost measure
Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
"""
assert len(x)
assert len(y)
r, c = len(x), len(y)
D0 = zeros((r + 1, c + 1))
D0[0, 1:] = inf
D0[1:, 0] = inf
D1 = D0[1:, 1:] # view
for i in range(r):
for j in range(c):
D1[i, j] = dist(x[i], y[j])
C = D1.copy()
for i in range(r):
for j in range(c):
D1[i, j] += min(D0[i, j], D0[i, j+1], D0[i+1, j])
if len(x)==1:
path = zeros(len(y)), range(len(y))
elif len(y) == 1:
path = range(len(x)), zeros(len(x))
else:
path = _traceback(D0)
return D1[-1, -1] / sum(D1.shape), C, D1, path
def fastdtw(x, y, dist):
"""
Computes Dynamic Time Warping (DTW) of two sequences in a faster way.
Instead of iterating through each element and calculating each distance,
this uses the cdist function from scipy (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html)
:param array x: N1*M array
:param array y: N2*M array
:param string or func dist: distance parameter for cdist. When string is given, cdist uses optimized functions for the distance metrics.
If a string is passed, the distance function can be 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', 'yule'.
Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
"""
assert len(x)
assert len(y)
if ndim(x)==1:
x = x.reshape(-1,1)
if ndim(y)==1:
y = y.reshape(-1,1)
r, c = len(x), len(y)
D0 = zeros((r + 1, c + 1))
D0[0, 1:] = inf
D0[1:, 0] = inf
D1 = D0[1:, 1:]
D0[1:,1:] = cdist(x,y,dist)
C = D1.copy()
for i in range(r):
for j in range(c):
D1[i, j] += min(D0[i, j], D0[i, j+1], D0[i+1, j])
if len(x)==1:
path = zeros(len(y)), range(len(y))
elif len(y) == 1:
path = range(len(x)), zeros(len(x))
else:
path = _traceback(D0)
return D1[-1, -1] / sum(D1.shape), C, D1, path
def _traceback(D):
i, j = array(D.shape) - 2
p, q = [i], [j]
while ((i > 0) or (j > 0)):
tb = argmin((D[i, j], D[i, j+1], D[i+1, j]))
if (tb == 0):
i -= 1
j -= 1
elif (tb == 1):
i -= 1
else: # (tb == 2):
j -= 1
p.insert(0, i)
q.insert(0, j)
return array(p), array(q)
def nomalize_signal(input_signal):
mean = np.mean(input_signal, axis=0)
input_signal -= mean
return input_signal / np.std(input_signal, axis=0)
if __name__ == '__main__':
# if 0: # 1-D numeric
from sklearn.metrics.pairwise import manhattan_distances
kinect_angle_data = pd.read_csv("/home/runge/openbci/git/OpenBCI_Python/build/dataset/train/result/reconstructed_bycept_kinect__angles_.csv").dropna()
nomalized_signal = nomalize_signal(kinect_angle_data)
# mapping = interp1d([-1,1],[0,180])
result=[]
possion=[]
x = np.array(nomalized_signal.ix[:, 1][4400:5000]).tolist()
size = 5000-4400
counter=3000
for i in range(0,1000):
y = np.array(nomalized_signal.ix[:, 1][counter:counter+size]).tolist()
possion.append(counter)
counter+=5
dist_fun = manhattan_distances
dist, cost, acc, path = dtw(x, y, dist_fun)
print dist
result.append(dist)
print result
print possion
# else: # 2-D numeric
# from sklearn.metrics.pairwise import euclidean_distances
# x = [[0, 0], [0, 1], [1, 1], [1, 2], [2, 2], [4, 3], [2, 3], [1, 1], [2, 2], [0, 1]]
# y = [[1, 0], [1, 1], [1, 1], [2, 1], [4, 3], [4, 3], [2, 3], [3, 1], [1, 2], [1, 0]]
# dist_fun = euclidean_distances
# vizualize
# from matplotlib import pyplot as plt
# plt.imshow(cost.T, origin='lower', cmap=plt.cm.Reds, interpolation='nearest')
# plt.plot(path[0], path[1], '-o') # relation
# plt.xticks(range(len(x)), x)
# plt.yticks(range(len(y)), y)
# plt.xlabel('x')
# plt.ylabel('y')
# plt.axis('tight')
# plt.title('Minimum distance: {}'.format(dist))
# plt.show()
| 4,748 | 33.165468 | 343 | py |
OpenBCIPython | OpenBCIPython-master/lib/hmm.py | import json
from scipy.signal import butter, filtfilt
import pandas as pd
from pandas import DataFrame, Series
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from pandas.tools.plotting import scatter_matrix
from scipy.interpolate import interp1d
from preprocessing.preprocessing import PreProcessor
from preprocessing.ssa import SingularSpectrumAnalysis
import scipy.linalg as lin
class HMM:
def __init__(self, config, pattern, data):
self.pattern = pattern
self.data = data
self.A, self.pA, self.w, self.K = self.init()
self.lam = .1 # precision parameter for the weights prior
self.eta = 5. # precision parameter for the autoregressive portion of the model
self.N = 1 # number of sequences
self.M = 2 # number of dimensions - the second variable is for the bias term
self.T = len(self.data) # length of sequences
self.x = np.ones((self.T + 1, self.M)) # sequence data (just one sequence)
self.x[0, 1] = 1
self.x[1:, 0] = self.data
# emissions
self.e = np.zeros((self.T, self.K))
# residuals
self.v = np.zeros((self.T, self.K))
# store the forward and backward recurrences
self.f = np.zeros((self.T + 1, self.K))
self.fls = np.zeros((self.T + 1))
self.f[0, 0] = 1
self.b = np.zeros((self.T + 1, self.K))
self.bls = np.zeros((self.T + 1))
self.b[-1, 1:] = 1. / (self.K - 1)
# hidden states
self.z = np.zeros((self.T + 1), dtype=np.int)
# expected hidden states
self.ex_k = np.zeros((self.T, self.K))
# expected pairs of hidden states
self.ex_kk = np.zeros((self.K, self.K))
self.nkk = np.zeros((self.K, self.K))
self.number_of_iteration = 5
def init(self):
step=5
eps=.1
dat=self.pattern[::step]
K=len(dat)+1
A=np.zeros((K,K))
A[0,1]=1.
pA=np.zeros((K,K))
pA[0,1]=1.
for i in xrange(1,K-1):
A[i,i]=(step-1.+eps)/(step+2*eps)
A[i,i+1]=(1.+eps)/(step+2*eps)
pA[i,i]=1.
pA[i,i+1]=1.
A[-1,-1]=(step-1.+eps)/(step+2*eps)
A[-1,1]=(1.+eps)/(step+2*eps)
pA[-1,-1]=1.
pA[-1,1]=1.
w=np.ones( (K,2) , dtype=np.float)
w[0,1]=dat[0]
w[1:-1,1]=(dat[:-1]-dat[1:])/step
w[-1,1]=(dat[0]-dat[-1])/step
return A,pA,w,K
def fwd(self):
for t in xrange(self.T):
self.f[t+1,:]=np.dot(self.f[t,:],self.A)*self.e[t,:]
sm=np.sum(self.f[t+1,:])
self.fls[t+1]=self.fls[t]+np.log(sm)
self.f[t+1,:]/=sm
assert self.f[t+1,0]==0
def bck(self):
for t in xrange(self.T-1,-1,-1):
self.b[t,:]=np.dot(self.A,self.b[t+1,:]*self.e[t,:])
sm=np.sum(self.b[t,:])
self.bls[t]=self.bls[t+1]+np.log(sm)
self.b[t,:]/=sm
def em_step(self):
x=self.x[:-1] #current data vectors
y=self.x[1:,:1] #next data vectors predicted from current
#compute residuals
v=np.dot(x,self.w.T) # (N,K) <- (N,1) (N,K)
v-=y
self.e=np.exp(-self.eta/2*v**2,self.e)
self.fwd()
self.bck()
# compute expected hidden states
for t in xrange(len(self.e)):
self.ex_k[t,:]=self.f[t+1,:]*self.b[t+1,:]
self.ex_k[t,:]/=np.sum(self.ex_k[t,:])
# compute expected pairs of hidden states
for t in xrange(len(self.f)-1) :
self.ex_kk=self.A*self.f[t,:][:,np.newaxis]*self.e[t,:]*self.b[t+1,:]
self.ex_kk/=np.sum(self.ex_kk)
self.nkk+=self.ex_kk
# max w/ respect to transition probabilities
self.A=self.pA+self.nkk
self.A/=np.sum(self.A,1)[:,np.newaxis]
# solve the weighted regression problem for emissions weights
# x and y are from above
for k in xrange(self.K):
self.ex=self.ex_k[:,k][:,np.newaxis]
self.dx=np.dot(x.T,self.ex*x)
self.dy=np.dot(x.T,self.ex*y)
self.dy.shape=(2)
self.w[k,:]=lin.solve(self.dx+self.lam*np.eye(x.shape[1]), self.dy)
#return the probability of the sequence (computed by the forward algorithm)
return self.fls[-1]
def run_em_algorithm(self):
for i in xrange(self.number_of_iteration):
print self.em_step()
# get rough boundaries by taking the maximum expected hidden state for each position
self.r = np.arange(len(self.ex_k))[np.argmax(self.ex_k, 1) < 4]
self.f = np.diff(np.diff(self.r))
for i in range(0, len(self.f)):
if (self.f[i] <= 0):
self.r[i] = 0
def plot_result(self):
plt.plot(range(self.T), self.x[1:, 0])
self.yr = [np.min(self.x[:, 0]), np.max(self.x[:, 0])]
for i in self.r:
plt.plot([i, i], self.yr, '-r')
plt.show()
| 5,006 | 34.764286 | 92 | py |
OpenBCIPython | OpenBCIPython-master/lib/forward_backword.py | # imports go here
import numpy as np
# functions and classes go here
def fb_alg(A_mat, O_mat, observ):
# set up
k = observ.size
(n, m) = O_mat.shape
prob_mat = np.zeros((n, k))
fw = np.zeros((n, k + 1))
bw = np.zeros((n, k + 1))
# forward part
fw[:, 0] = 1.0 / n
for obs_ind in xrange(k):
f_row_vec = np.matrix(fw[:, obs_ind])
fw[:, obs_ind + 1] = f_row_vec * np.matrix(A_mat) * np.matrix(np.diag(O_mat[:, int(observ[obs_ind])]))
fw[:, obs_ind + 1] = fw[:, obs_ind + 1] / np.sum(fw[:, obs_ind + 1])
# backward part
bw[:, -1] = 1.0
for obs_ind in xrange(k, 0, -1):
b_col_vec = np.matrix(bw[:, obs_ind]).transpose()
bw[:, obs_ind - 1] = (np.matrix(A_mat) *np.matrix(np.diag(O_mat[:, int(observ[obs_ind - 1])])) * b_col_vec).transpose()
bw[:, obs_ind - 1] = bw[:, obs_ind - 1] / np.sum(bw[:, obs_ind - 1])
# combine it
prob_mat = np.array(fw) * np.array(bw)
prob_mat = prob_mat / np.sum(prob_mat, 0)
# get out
return prob_mat, fw, bw
# main script stuff goes here
if __name__ == '__main__':
# the transition matrix
A_mat = np.array([[.6, .4], [.2, .8]])
# the observation matrix
O_mat = np.array([[.5, .5], [.15, .85]])
# sample heads or tails, 0 is heads, 1 is tails
num_obs = 15
observations1 = np.random.randn(num_obs)
observations1[observations1 > 0] = 1
observations1[observations1 <= 0] = 0
p, f, b = fb_alg(A_mat, O_mat, observations1)
print p
# change observations to reflect messed up ratio
observations2 = np.random.random(num_obs)
observations2[observations2 > .85] = 0
observations2[observations2 <= .85] = 1
# majority of the time its tails, now what?
p, f, b = fb_alg(A_mat, O_mat, observations1)
print p
p, f, b = fb_alg(A_mat, O_mat, np.hstack((observations1, observations2)))
print p | 1,897 | 34.811321 | 127 | py |
OpenBCIPython | OpenBCIPython-master/lib/__init__.py | 0 | 0 | 0 | py |
|
OpenBCIPython | OpenBCIPython-master/preprocessing/kernel_regression.py | """The :mod:`sklearn.kernel_regressor` module implements the Kernel Regressor.
"""
# Author: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.base import BaseEstimator, RegressorMixin
class KernelRegression(BaseEstimator, RegressorMixin):
"""Nadaraya-Watson kernel regression with automatic bandwidth selection.
This implements Nadaraya-Watson kernel regression with (optional) automatic
bandwith selection of the kernel via leave-one-out cross-validation. Kernel
regression is a simple non-parametric kernelized technique for learning
a non-linear relationship between input variable(s) and a target variable.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF ("bandwidth"), polynomial,
exponential chi2 and sigmoid kernels. Interpretation of the default
value is left to the kernel; see the documentation for
sklearn.metrics.pairwise. Ignored by other kernels. If a sequence of
values is given, one of these values is selected which minimizes
the mean-squared-error of leave-one-out cross-validation.
See also
--------
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None):
self.kernel = kernel
self.gamma = gamma
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values
Returns
-------
self : object
Returns self.
"""
self.X = X
self.y = y
if hasattr(self.gamma, "__iter__"):
self.gamma = self._optimize_gamma(self.gamma)
return self
def predict(self, X):
"""Predict target values for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted target value.
"""
K = pairwise_kernels(self.X, X, metric=self.kernel, gamma=self.gamma)
return (K * self.y[:, None]).sum(axis=0) / K.sum(axis=0)
def _optimize_gamma(self, gamma_values):
# Select specific value of gamma from the range of given gamma_values
# by minimizing mean-squared error in leave-one-out cross validation
mse = np.empty_like(gamma_values, dtype=np.float)
for i, gamma in enumerate(gamma_values):
K = pairwise_kernels(self.X, self.X, metric=self.kernel,
gamma=gamma)
np.fill_diagonal(K, 0) # leave-one-out
Ky = K * self.y[:, np.newaxis]
y_pred = Ky.sum(axis=0) / K.sum(axis=0)
mse[i] = ((y_pred - self.y) ** 2).mean()
return gamma_values[np.nanargmin(mse)]
| 3,341 | 32.757576 | 79 | py |
OpenBCIPython | OpenBCIPython-master/preprocessing/preprocessing.py | from __future__ import print_function
import Queue
import json
import socket
import sys
import threading
import tensorflow as tf
import pandas as pd
from scipy.signal import butter, filtfilt
import librosa
import numpy as np
from processor import Clip
from ssa import SingularSpectrumAnalysis
class PreProcessor(threading.Thread):
def __init__(self, thread_id, input_buffer, output_buffer, config):
threading.Thread.__init__(self)
self.isRun = True
self.config = config
self.thread_id = thread_id
self.lock = threading.Lock()
self.input_buffer = input_buffer
self.output_buffer = output_buffer
self.window_size = int(config["window_size"])
self.sampling_rate = int(config["sampling_rate"])
self.low_frequency = int(config["low_frequency"])
self.high_frequency = int(config["high_frequency"])
self.order = int(config["order"])
self.train_dir = str(config["train_dir_abs_location"])
self.number_of_channels = int(config["number_of_channels"])
self.sampling_time = 1.0 / self.sampling_rate * 1.0
def run(self):
self.processor(self.thread_id)
def nomalize_signal(self, input_signal):
mean = np.mean(input_signal, axis=0)
input_signal -= mean
return input_signal / np.std(input_signal, axis=0)
def processor(self, thread_id, activity_type=None):
noise_signal = self.input_buffer[thread_id]
noise_signal = noise_signal[~np.isnan(noise_signal)]
if activity_type is not None:
train_dir = self.train_dir + "/" + str(thread_id) + "_" + activity_type + "_"
else:
train_dir = self.train_dir + "/" + str(thread_id) + "_"
with open(train_dir + "noise_signal.csv", 'a') as f:
np.savetxt(f, noise_signal, delimiter=',', fmt='%.18e')
noise_reduced_signal = self.apply_noise_reducer_filer(noise_signal)
# noise_reduced_signal = self.nomalize_signal(noise_reduced_signal)
with open(train_dir + "noise_reduced_signal.csv", 'a') as f:
np.savetxt(f, noise_reduced_signal, delimiter=',', fmt='%.18e')
# reconstructed_signal = SingularSpectrumAnalysis(noise_reduced_signal, self.window_size).execute()
# with open(train_dir + "reconstructed_signal.csv", 'a') as f:
# np.savetxt(f, reconstructed_signal, delimiter=',', fmt='%.18e')
# todo uncomment when you running the main process
processed_signal = []
position = 0
for i in range(0, int((noise_reduced_signal.shape[0]) - int(self.config['window_size']) - 1)):
clip = Clip(self.config, buffer=np.array(noise_reduced_signal[position:position + int(self.config['window_size'])].tolist()))
processed_signal.append(clip.get_feature_vector())
position += 1
with open(train_dir + "feature_vector.csv", 'a') as f:
np.savetxt(f, np.array(processed_signal), delimiter=',', fmt='%.18e')
# self.lock.acquire()
# self.output_buffer[thread_id] = reconstructed_signal
# self.lock.release()
def apply_noise_reducer_filer(self, data):
data = np.array(data, dtype=float)
b, a = butter(self.order, (self.order * self.low_frequency * 1.0)
/ self.sampling_rate * 1.0, btype='low')
# for i in range(0, self.number_of_channels):
data = np.transpose(filtfilt(b, a, data))
b1, a1 = butter(self.order, (self.order * self.high_frequency * 1.0) /
self.sampling_rate * 1.0, btype='high')
# for i in range(0, self.number_of_channels):
data = np.transpose(filtfilt(b1, a1, data))
Wn = (np.array([58.0, 62.0]) / 500 * self.order).tolist()
b3, a3 = butter(self.order, Wn, btype='stop')
for i in range(0, self.number_of_channels):
data = np.transpose(filtfilt(b3, a3, data))
Wn = [0.05008452488,0.152839]
b3, a3 = butter(self.order, Wn, btype='stop')
for i in range(0, self.number_of_channels):
data = np.transpose(filtfilt(b3, a3, data))
return data
| 4,179 | 41.222222 | 137 | py |
OpenBCIPython | OpenBCIPython-master/preprocessing/noise_reducer.py | import json
import threading
import numpy as np
import init_buffer as buf
from preprocessing import PreProcessor
# import draw_sample_plot_and_save, create_sample_from_image
# import get_label
from utils.dataset_writer_utils import draw_sample_plot_and_save, create_sample_from_image
from utils.utils import get_label
class NoiseReducer(threading.Thread):
def __init__(self, thread_id, server, writer, config):
threading.Thread.__init__(self)
self.config = config
self.window_size = int(config["window_size"])
self.verbose = eval(config["verbose"])
self.input_data = buf.ring_buffers
self.number_of_threads = int(config["number_of_channels"])
self.feature_vector_size = int(config["feature_vector_size"])
self.train_dir = str(config["train_dir_abs_location"])
self.counter=0
# self.train_dir = str(config["train_dir"])
self.lock = threading.Lock()
self.input_buffer = np.zeros([self.number_of_threads, self.window_size])
self.thread_id = thread_id
self.output_buffer = []
self.is_processing = False
self.server = server
self.writer = writer
self.number_of_class = int(config["processing"]["train"]["number_of_class"])
self.ip = str(config["ip"])
self.port = int(config["port"]) + 5 # adding five offset to secondary udp server
self.overlap_size = int(config["overlap_size"])
def construct_input_buffer(self):
for j in range(0, len(self.input_data)):
try:
self.input_buffer[j] = self.input_data[j].pop_window(self.window_size, self.overlap_size)
except:
print ("Still input buffer is empty... creating random data set...")
self.input_buffer[j] = [i for i in range(0, self.window_size)]
pass
self.input_buffer = np.array(self.input_buffer)
def run(self):
if self.verbose:
print("Starting " + str(self.thread_id))
self.lock.acquire()
self.is_processing = True
self.construct_input_buffer()
self.process_signal()
if self.verbose:
print (self.output_buffer)
self.is_processing = False
self.lock.release()
if self.verbose:
print("Existing " + str(self.thread_id))
def process_signal(self):
self.counter += 1
self.output_buffer = np.zeros([self.input_buffer.shape[0], self.feature_vector_size])
threads = []
thread_list = [i for i in range(0, self.number_of_threads)]
for thread_id in thread_list:
thread = PreProcessor(thread_id, self.input_buffer, self.output_buffer, config=self.config)
thread.start()
threads.append(thread)
for t in threads:
t.join()
# with open(self.train_dir + "/feature_vectors.csv", 'a') as f:
# np.savetxt(f, self.output_buffer, delimiter=',', fmt='%.18e')
clip_label = get_label(1, self.number_of_class)
clip_filename = draw_sample_plot_and_save(self.output_buffer.flatten(), "/channel", self.thread_id, self.config)
sample = create_sample_from_image(clip_filename, clip_label, self.config)
# sample = create_sample_from_data(self.output_buffer.flatten(), class_label)
self.writer.write(sample.SerializeToString())
self.send_noise_data(json.dumps(self.input_buffer.tolist()))
self.send_preprocessed_data(json.dumps(self.output_buffer.tolist()))
# return self.output_buffer
def send_preprocessed_data(self, data):
self.server.sendto(data, (self.ip, self.port))
def send_noise_data(self, data):
self.server.sendto(data, (self.ip, self.port+1))
# project_file_path = "/home/runge/openbci/OpenBCI_Python"
# config_file = "/home/runge/openbci/OpenBCI_Python/config/config.json"
#
# with open(config_file) as config:
# plugin_config = json.load(config)
# buffer_size = int(plugin_config["buffer_size"])
# number_of_channels = int(plugin_config["number_of_channels"])
# buffer_capacity = int(plugin_config["buffer_capacity"])
# tfrecords_filename = project_file_path + str(plugin_config["model"]["tfrecords_filename"])
# lock = threading.Lock()
# server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# ring_buffers = [RingBuffer(buffer_size * 4) for i in range(0, number_of_channels)]
# for k in range(0, number_of_channels):
# for p in range(0, buffer_size*buffer_capacity):
# ring_buffers[k].append(random.randint(1,100))
# writer = tf.python_io.TFRecordWriter(t ,mfrecords_filename)
# noisereducer_thread = NoiseReducer("main thread",ring_buffers,server,lock, writer, plugin_config)
# i = 0
# while i<100:
# if not noisereducer_thread.is_processing:
# print ("------current process-----")
# noisereducer_thread = NoiseReducer("main thread", ring_buffers,server,lock, writer, plugin_config)
# noisereducer_thread.start()
# noisereducer_thread.join()
# i+=1
# writer.close()
2332111112313233212232323322223132332212323213223231323322233232331232323233122232321321233132332121
| 5,434 | 44.291667 | 120 | py |
OpenBCIPython | OpenBCIPython-master/preprocessing/test_udp_client.py | import json
import socket
import sys
# Create a UDP socket
import threading
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = ('localhost', 8889)
message = "34,56,89"
message = json.dumps([34,56,89])
try:
while True:
# Send data
print >>sys.stderr, 'sending "%s"' % message
sent = sock.sendto(message, server_address)
threading._sleep(1)
finally:
print >>sys.stderr, 'closing socket'
sock.close() | 466 | 19.304348 | 55 | py |
OpenBCIPython | OpenBCIPython-master/preprocessing/server.py | import Queue
import json
import socket
import sys
import threading
import init_buffer as buf
import numpy as np
from RingBuffer import RingBuffer
class UDPServer(threading.Thread):
def __init__(self, threadID, input_buffer, port, receiver_port, ip="localhost", verbose=False):
threading.Thread.__init__(self)
self.threadID = threadID
self.isRun = True
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.ip = ip
self.port = port
self.server_address = (self.ip, self.port)
self.lock = threading.Lock()
self.previous_buffer = [-1,-1,-1]
self.buffer = input_buffer
self.receiver_port = receiver_port
self.verbose = verbose
self.file_descriptor = ""
if self.verbose:
self.file_descriptor = ""
try:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
except:
print ("Error: while starting up udp server")
def run(self):
print ('starting up on %s port %s \n' % self.server_address)
self.call_back_handler()
self.socket.close()
print ("Exiting " + self.threadID)
def get_previous_buffer(self):
return self.previous_buffer
def get_next_point(self):
# self.lock.acquire()
# if not self.buffer.empty():
# data = self.buffer.pop_window(10)
# self.lock.release()
# return data
# else:
# self.lock.release()
return self.previous_buffer
# def retrieve_data(self):
# while self.isRun:
# data, address = self.socket.recvfrom(self.receiver_port)
# data = json.loads(data)
# result = []
# if data:
# self.lock.acquire()
# if self.verbose:
# print (data)
# self.buffer.append(data)
# self.previous_buffer = data
# # result.append(data)
# # result = np.asarray(result)
# # np.savetxt(self.file_descriptor, result, delimiter=',', fmt='%.18e')
# self.lock.release()
def call_back_handler(self):
while self.isRun:
# print "-----------------------------------------------"
data, address = self.socket.recvfrom(self.receiver_port)
data = json.loads(data)
result = []
if data:
# channal_result = buf.channel_data
try:
# self.lock.acquire()
if self.verbose:
print (data)
self.buffer.append(data)
self.previous_buffer = data
# with open("/home/runge/openbci/git/OpenBCI_Python/build/dataset/result_bicep_new.csv", 'a') as f:
# for i in self.previous_buffer:
# channal_result += str(i)
# channal_result += ","
# channal_result[-1].replace(",", "")
# channal_result += '\n'
# f.write(channal_result)
except:
# print (channal_result)
print (self.previous_buffer)
pass
# result.append(data)
# result = np.asarray(result)
# np.savetxt(self.file_descriptor, result, delimiter=',', fmt='%.18e')
# self.lock.release()
# if self.verbose:
# with open("/home/runge/openbci/git/OpenBCI_Python/build/dataset/kincet_anagles/kinect_angles.csv", 'a')\
# as self.file_descriptor:
# self.retrieve_data()
# else:
# self.retrieve_data()
# kinect_angles = RingBuffer(20, dtype=list)
# ip = "0.0.0.0"
# port = 8889
# receiver_port = 4096
# thread = UDPServer("udp_server", kinect_angles, port, receiver_port, ip, False)
# thread.start()
# thread.isRun = True
# thread.join()
| 4,144 | 34.732759 | 123 | py |
OpenBCIPython | OpenBCIPython-master/preprocessing/pattern_detection.py | import numpy as np
import numpy.random as rnd
import matplotlib.pyplot as plt
import scipy.linalg as lin
import re
from scipy.signal import butter, filtfilt
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pandas import DataFrame, Series
from scipy import signal
fsamp = 250
tsample = 1 / fsamp
f_low = 50
f_high = 1
order = 2
channel_vector = [1,2, 3, 4, 5]
n_ch = len(channel_vector)
# df = pd.read_csv("/home/runge/openbci/git/OpenBCI_Python/build/dataset/train/result/reconstructed_kinect__angles_.csv")
# df = df[channel_vector].dropna(axis=0)
#
# processed_signal = df.copy()
# b, a = butter(order, (order * f_low * 1.0) / fsamp * 1.0, btype='low')
# for i in range(0, n_ch):
# processed_signal.ix[:, i] = np.transpose(filtfilt(b, a, df.ix[:, i]))
#
# b1, a1 = butter(order, (order * f_high * 1.0) / fsamp * 1.0, btype='high')
# for i in range(0, n_ch):
# processed_signal.ix[:, i] = np.transpose(filtfilt(b1, a1, processed_signal.ix[:, i]))
#
# Wn = (np.array([58.0, 62.0]) / 500 * order).tolist()
# b3, a3 = butter(order, Wn, btype='stop')
# for i in range(0, n_ch):
# processed_signal.ix[:, i] = np.transpose(filtfilt(b3, a3, processed_signal.ix[:, i]))
start = 850
end = 900
processed_signal = pd.read_csv("/home/runge/openbci/git/OpenBCI_Python/build/dataset/train/result/reconstructed_kinect__angles_.csv").dropna()
def nomalize_signal(input_signal):
mean = np.mean(input_signal, axis=0)
input_signal -= mean
return input_signal / np.std(input_signal, axis=0)
#
# x = np.arange(start, end, 1)
# plt.plot(x, processed_signal.ix[:,1][start:end])
# plt.show()
processed_signal = nomalize_signal(processed_signal)
# plt.plot(x, processed_signal.ix[:,1][start:end])
# plt.show()
pattern=np.array(processed_signal.ix[:, 1][start :end])
data=np.array(processed_signal.ix[:, 1][750 :1300])
def create_mats(dat):
'''
create
A - an initial transition matrix
pA - pseudocounts for A
w - emission distribution regression weights
K - number of hidden states
'''
step=5 #adjust this to change the granularity of the pattern
eps=.1
dat=dat[::step]
K=len(dat)+1
A=np.zeros( (K,K) )
A[0,1]=1.
pA=np.zeros( (K,K) )
pA[0,1]=1.
for i in xrange(1,K-1):
A[i,i]=(step-1.+eps)/(step+2*eps)
A[i,i+1]=(1.+eps)/(step+2*eps)
pA[i,i]=1.
pA[i,i+1]=1.
A[-1,-1]=(step-1.+eps)/(step+2*eps)
A[-1,1]=(1.+eps)/(step+2*eps)
pA[-1,-1]=1.
pA[-1,1]=1.
w=np.ones( (K,2) , dtype=np.float)
w[0,1]=dat[0]
w[1:-1,1]=(dat[:-1]-dat[1:])/step
w[-1,1]=(dat[0]-dat[-1])/step
return A,pA,w,K
#initialize stuff
A,pA,w,K=create_mats(pattern)
eta=5. #precision parameter for the autoregressive portion of the model
lam=.1 #precision parameter for the weights prior
N=1 #number of sequences
M=2 #number of dimensions - the second variable is for the bias term
T=len(data) #length of sequences
x=np.ones( (T+1,M) ) # sequence data (just one sequence)
x[0,1]=1
x[1:,0]=data
#emissions
e=np.zeros( (T,K) )
#residuals
v=np.zeros( (T,K) )
#store the forward and backward recurrences
f=np.zeros( (T+1,K) )
fls=np.zeros( (T+1) )
f[0,0]=1
b=np.zeros( (T+1,K) )
bls=np.zeros( (T+1) )
b[-1,1:]=1./(K-1)
#hidden states
z=np.zeros( (T+1),dtype=np.int )
#expected hidden states
ex_k=np.zeros( (T,K) )
# expected pairs of hidden states
ex_kk=np.zeros( (K,K) )
nkk=np.zeros( (K,K) )
def fwd(xn):
global f,e
for t in xrange(T):
f[t+1,:]=np.dot(f[t,:],A)*e[t,:]
sm=np.sum(f[t+1,:])
fls[t+1]=fls[t]+np.log(sm)
f[t+1,:]/=sm
assert f[t+1,0]==0
def bck(xn):
global b,e
for t in xrange(T-1,-1,-1):
b[t,:]=np.dot(A,b[t+1,:]*e[t,:])
sm=np.sum(b[t,:])
bls[t]=bls[t+1]+np.log(sm)
b[t,:]/=sm
def em_step(xn):
global A,w,eta
global f,b,e,v
global ex_k,ex_kk,nkk
x=xn[:-1] #current data vectors
y=xn[1:,:1] #next data vectors predicted from current
#compute residuals
v=np.dot(x,w.T) # (N,K) <- (N,1) (N,K)
v-=y
e=np.exp(-eta/2*v**2,e)
fwd(xn)
bck(xn)
# compute expected hidden states
for t in xrange(len(e)):
ex_k[t,:]=f[t+1,:]*b[t+1,:]
ex_k[t,:]/=np.sum(ex_k[t,:])
# compute expected pairs of hidden states
for t in xrange(len(f)-1):
ex_kk=A*f[t,:][:,np.newaxis]*e[t,:]*b[t+1,:]
ex_kk/=np.sum(ex_kk)
nkk+=ex_kk
# max w/ respect to transition probabilities
A=pA+nkk
A/=np.sum(A,1)[:,np.newaxis]
# solve the weighted regression problem for emissions weights
# x and y are from above
for k in xrange(K):
ex=ex_k[:,k][:,np.newaxis]
dx=np.dot(x.T,ex*x)
dy=np.dot(x.T,ex*y)
dy.shape=(2)
w[k,:]=lin.solve(dx+lam*np.eye(x.shape[1]), dy)
#return the probability of the sequence (computed by the forward algorithm)
return fls[-1]
if __name__=='__main__':
#run the em algorithm
for i in xrange(5):
print em_step(x)
#get rough boundaries by taking the maximum expected hidden state for each position
r=np.arange(len(ex_k))[np.argmax(ex_k,1)<2]
f = np.diff(np.diff(r))
for i in range(0,len(f)):
if(f[i]<=0):
r[i] = 0
#plot
plt.plot(range(T),x[1:,0])
yr=[np.min(x[:,0]),np.max(x[:,0])]
previous_value= r[0]
plt.plot([r[0], r[0]], yr, '-r')
for i in r:
plt.plot([i,i],yr,'-r')
previous_value = i
plt.show()
# def create_mats(dat):
# '''
# create
# A - an initial transition matrix
# pA - pseudocounts for A
# w - emission distribution regression weights
# K - number of hidden states
# '''
# step=5 #adjust this to change the granularity of the pattern
# eps=.1
# dat=dat[::step]
# K=len(dat)+1
# A=np.zeros((K,K))
# A[0,1]=1.
# pA=np.zeros((K,K))
# pA[0,1]=1.
# for i in xrange(1,K-1):
# A[i,i]=(step-1.+eps)/(step+2*eps)
# A[i,i+1]=(1.+eps)/(step+2*eps)
# pA[i,i]=1.
# pA[i,i+1]=1.
# A[-1,-1]=(step-1.+eps)/(step+2*eps)
# A[-1,1]=(1.+eps)/(step+2*eps)
# pA[-1,-1]=1.
# pA[-1,1]=1.
#
# w=np.ones( (K,2) , dtype=np.float)
# w[0,1]=dat[0]
# w[1:-1,1]=(dat[:-1]-dat[1:])/step
# w[-1,1]=(dat[0]-dat[-1])/step
#
# return A,pA,w,K
#
# #initialize stuff
# A,pA,w,K=create_mats(pattern)
#
# eta=10. #precision parameter for the autoregressive portion of the model
# lam=.1 #precision parameter for the weights prior
#
# N=1 #number of sequences
# M=2 #number of dimensions - the second variable is for the bias term
# T=len(data) #length of sequences
#
# x=np.ones( (T+1,M) ) # sequence data (just one sequence)
# x[0,1]=1
# x[1:,0]=data
#
# #emissions
# e=np.zeros( (T,K) )
# #residuals
# v=np.zeros( (T,K) )
#
# #store the forward and backward recurrences
# f=np.zeros( (T+1,K) )
# fls=np.zeros( (T+1) )
# f[0,0]=1
# b=np.zeros( (T+1,K) )
# bls=np.zeros( (T+1) )
# b[-1,1:]=1./(K-1)
#
# #hidden states
# z=np.zeros( (T+1),dtype=np.int )
#
# #expected hidden states
# ex_k=np.zeros( (T,K) )
#
# # expected pairs of hidden states
# ex_kk=np.zeros( (K,K) )
# nkk=np.zeros( (K,K) )
#
# def fwd(xn):
# global f,e
# for t in xrange(T):
# f[t+1,:]=np.dot(f[t,:],A)*e[t,:]
# sm=np.sum(f[t+1,:])
# sm=sm+0.000000000001
# fls[t+1]=fls[t]+np.log(sm)
# f[t+1,:]/=sm
# assert f[t+1,0]==0
#
# def bck(xn):
# global b,e
# for t in xrange(T-1,-1,-1):
# b[t,:]=np.dot(A,b[t+1,:]*e[t,:])
# sm=np.sum(b[t,:])
# sm = sm + 0.000000000001
# bls[t]=bls[t+1]+np.log(sm)
# b[t,:]/=sm
#
# def em_step(xn):
# global A,w,eta
# global f,b,e,v
# global ex_k,ex_kk,nkk
#
# x=xn[:-1] #current data vectors
# y=xn[1:,:1] #next data vectors predicted from current
# #compute residuals
# v=np.dot(x,w.T) # (N,K) <- (N,1) (N,K)
# v-=y
# e=np.exp(-eta/2*v**2,e)
#
# fwd(xn)
# bck(xn)
#
# # compute expected hidden states
# for t in xrange(len(e)):
# try:
# ex_k[t,:]=f[t+1,:]*b[t+1,:]
# ex_k[t,:]/=np.sum(ex_k[t,:])+ 0.0000001
# except :
# print "exception first "+ str(t)
#
# # compute expected pairs of hidden states
# for t in xrange(len(f)-1):
# try:
# ex_kk=A*f[t,:][:,np.newaxis]*e[t,:]*b[t+1,:]
# ex_kk/=np.sum(ex_kk)+ 0.0000001
# nkk+=ex_kk
# except:
# print "exception second " + str(t)
#
# # max w/ respect to transition probabilities
# A=pA+nkk
# A/=np.sum(A,1)[:,np.newaxis]
#
# # solve the weighted regression problem for emissions weights
# # x and y are from above
# for k in xrange(K):
# ex=ex_k[:,k][:,np.newaxis]
# dx=np.dot(x.T,ex*x)
# dy=np.dot(x.T,ex*y)
# dy.shape=(2)
# w[k,:]=lin.solve(dx+lam*np.eye(x.shape[1]), dy)
#
# #return the probability of the sequence (computed by the forward algorithm)
# return fls[-1]
#
# if __name__=='__main__':
# #run the em algorithm
# for i in xrange(20):
# print em_step(x)
#
# #get rough boundaries by taking the maximum expected hidden state for each position
# r=np.arange(len(ex_k))[np.argmax(ex_k,1)<3]
#
# # plot
# plt.plot(range(T),x[1:,0])
#
# yr=[np.min(x[:,0]),np.max(x[:,0])]
# # for i in range(0,10):
# # plt.plot([r[i],r[i]],yr,'-r')
# for i in r:
# plt.plot(i,i,yr,'-r')
# plt.show() | 9,600 | 24.878706 | 142 | py |
OpenBCIPython | OpenBCIPython-master/preprocessing/plot_kernel_regression.py | """
========================================================================
Comparison of kernel regression (KR) and support vector regression (SVR)
========================================================================
Toy example of 1D regression using kernel regression (KR) and support vector
regression (SVR). KR provides an efficient way of selecting a kernel's
bandwidth via leave-one-out cross-validation, which is considerably faster
that an explicit grid-search as required by SVR. The main disadvantages are
that it does not support regularization and is not robust to outliers.
"""
from py_qt import bootstrap as bs
import matplotlib.pyplot as plt
from py_qt import npr_methods
import numpy as np
from py_qt import nonparam_regression as smooth
from py_qt import plot_fit
import tensorflow as tf
import requests
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.grid_search import GridSearchCV
from sklearn.learning_curve import learning_curve
import matplotlib.pyplot as plt
from kernel_regression import KernelRegression
np.random.seed(0)
def f(x):
return 3*np.cos(x/2) + x**2/5 + 3
xs = np.random.rand(200) * 10
ys = f(xs) + 2*np.random.randn(*xs.shape)
birthdata_url = 'https://www.umass.edu/statdata/statdata/data/lowbwt.dat'
birth_file = requests.get(birthdata_url)
birth_data = birth_file.text.split('\r\n')[5:]
birth_header = [x for x in birth_data[0].split(' ') if len(x) >= 1]
birth_data = [[float(x) for x in y.split(' ') if len(x) >= 1] for y in birth_data[1:] if len(y) >= 1]
# Pull out target variable
y_vals = np.array([x[1] for x in birth_data])
# Pull out predictor variables (not id, not target, and not birthweight)
x_vals = np.array([x[2:9] for x in birth_data])
# Split data into train/test = 80%/20%
train_indices = np.random.choice(len(x_vals), int(round(len(x_vals) * 0.8)), replace=False)
test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))
x_vals_train = x_vals[train_indices]
x_vals_test = x_vals[test_indices]
y_vals_train = y_vals[train_indices]
y_vals_test = y_vals[test_indices]
# Normalize by column (min-max norm)
def normalize_cols(m):
col_max = m.max(axis=0)
col_min = m.min(axis=0)
return (m - col_min) / (col_max - col_min)
x_vals_train = np.nan_to_num(normalize_cols(x_vals_train))
x_vals_test = np.nan_to_num(normalize_cols(x_vals_test))
###############################################################################
# Generate sample data
# X = np.sort(5 * np.random.rand(100, 1), axis=0)
# y = np.sin(X).ravel()
X=x_vals_train
y=y_vals_train
###############################################################################
# Add noise to targets
y += 0.5 * (0.5 - np.random.rand(y.size))
###############################################################################
# Fit regression models
svr = GridSearchCV(SVR(kernel='rbf'), cv=5,
param_grid={"C": [1e-1, 1e0, 1e1, 1e2],
"gamma": np.logspace(-2, 2, 10)})
kr = KernelRegression(kernel="rbf", gamma=np.logspace(-2, 2, 10))
t0 = time.time()
y_svr = svr.fit(X, y).predict(X)
print("SVR complexity and bandwidth selected and model fitted in %.3f s" \
% (time.time() - t0))
t0 = time.time()
y_kr = kr.fit(X, y).predict(X)
print("KR including bandwith fitted in %.3f s"% (time.time() - t0))
###############################################################################
# Visualize models
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_kr, c='g', label='Kernel Regression')
plt.plot(X, y_svr, c='r', label='SVR')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Kernel regression versus SVR')
plt.legend()
# Visualize learning curves
plt.figure()
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X, y, train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X, y, train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="Kernel Regression")
plt.yscale("symlog", linthreshy=1e-7)
plt.ylim(-10, -0.01)
plt.xlabel("Training size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| 4,429 | 34.15873 | 101 | py |
OpenBCIPython | OpenBCIPython-master/preprocessing/tmp.py | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.animation as animation
class SubplotAnimation(animation.TimedAnimation):
def __init__(self):
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(2, 2, 2)
ax3 = fig.add_subplot(2, 2, 4)
self.t = np.linspace(0, 80, 400)
self.x = np.cos(2 * np.pi * self.t / 10.)
self.y = np.sin(2 * np.pi * self.t / 10.)
self.z = 10 * self.t
ax1.set_xlabel('x')
ax1.set_ylabel('y')
self.line1 = Line2D([], [], color='black')
self.line1a = Line2D([], [], color='red', linewidth=2)
self.line1e = Line2D(
[], [], color='red', marker='o', markeredgecolor='r')
ax1.add_line(self.line1)
ax1.add_line(self.line1a)
ax1.add_line(self.line1e)
ax1.set_xlim(-1, 1)
ax1.set_ylim(-2, 2)
ax1.set_aspect('equal', 'datalim')
ax2.set_xlabel('y')
ax2.set_ylabel('z')
self.line2 = Line2D([], [], color='black')
self.line2a = Line2D([], [], color='red', linewidth=2)
self.line2e = Line2D(
[], [], color='red', marker='o', markeredgecolor='r')
ax2.add_line(self.line2)
ax2.add_line(self.line2a)
ax2.add_line(self.line2e)
ax2.set_xlim(-1, 1)
ax2.set_ylim(0, 800)
ax3.set_xlabel('x')
ax3.set_ylabel('z')
self.line3 = Line2D([], [], color='black')
self.line3a = Line2D([], [], color='red', linewidth=2)
self.line3e = Line2D(
[], [], color='red', marker='o', markeredgecolor='r')
ax3.add_line(self.line3)
ax3.add_line(self.line3a)
ax3.add_line(self.line3e)
ax3.set_xlim(-1, 1)
ax3.set_ylim(0, 800)
animation.TimedAnimation.__init__(self, fig, interval=50, blit=True)
def _draw_frame(self, framedata):
i = framedata
head = i - 1
head_slice = (self.t > self.t[i] - 1.0) & (self.t < self.t[i])
self.line1.set_data(self.x[:i], self.y[:i])
self.line1a.set_data(self.x[head_slice], self.y[head_slice])
self.line1e.set_data(self.x[head], self.y[head])
self.line2.set_data(self.y[:i], self.z[:i])
self.line2a.set_data(self.y[head_slice], self.z[head_slice])
self.line2e.set_data(self.y[head], self.z[head])
self.line3.set_data(self.x[:i], self.z[:i])
self.line3a.set_data(self.x[head_slice], self.z[head_slice])
self.line3e.set_data(self.x[head], self.z[head])
self._drawn_artists = [self.line1, self.line1a, self.line1e,
self.line2, self.line2a, self.line2e,
self.line3, self.line3a, self.line3e]
def new_frame_seq(self):
return iter(range(self.t.size))
def _init_draw(self):
lines = [self.line1, self.line1a, self.line1e,
self.line2, self.line2a, self.line2e,
self.line3, self.line3a, self.line3e]
for l in lines:
l.set_data([], [])
ani = SubplotAnimation()
# ani.save('test_sub.mp4')
plt.show() | 3,192 | 34.087912 | 76 | py |
OpenBCIPython | OpenBCIPython-master/preprocessing/czxc.py | # import time, random
# import math
# from collections import deque
#
# import librosa
# import matplotlib.animation as animation
# from matplotlib import pyplot as plt
# import numpy as np
# start = time.time()
#
#
# class RealtimePlot:
# def __init__(self, axes, max_entries=100):
# self.axis_x = deque(maxlen=max_entries)
# self.axis_y = deque(maxlen=max_entries)
# self.axes = axes
# self.max_entries = max_entries
#
# self.lineplot, = axes.plot([], [], "ro-")
# self.axes.set_autoscaley_on(True)
#
# def add(self, x, y):
# self.axis_x.extend(x)
# self.axis_y.extend(y)
# self.lineplot.set_data(self.axis_x, self.axis_y)
# self.axes.set_xlim(self.axis_x[0], self.axis_x[-1] + 1e-15)
# self.axes.relim()
# self.axes.autoscale_view() # rescale the y-axis
#
# def animate(self, figure, callback, interval=50):
# def wrapper(frame_index):
# self.add(*callback(frame_index))
# self.axes.relim()
# self.axes.autoscale_view() # rescale the y-axis
# return self.lineplot
# animation.FuncAnimation(figure, wrapper, interval=interval)
#
#
# def main():
# fig, axes = plt.subplots()
# display = RealtimePlot(axes)
# display.animate(fig, lambda frame_index: (time.time() - start, random.random() * 100))
# while True:
# ydata = [random.randint(0, i) * i for i in range(0, 20)]
# # ydata = librosa.amplitude_to_db(librosa.stft(ydata), ref=np.max)
# xdata = [i for i in range(0, len(ydata))]
# display.add(xdata, ydata)
# plt.pause(0.001)
#
#
# if __name__ == "__main__": main()
import random
import time
from matplotlib import pyplot as plt
from matplotlib import animation
class RegrMagic(object):
"""Mock for function Regr_magic()
"""
def __init__(self):
self.x = 0
def __call__(self):
time.sleep(random.random())
self.x += 1
return self.x, random.random()
regr_magic = RegrMagic()
def frames():
while True:
yield regr_magic()
fig = plt.figure()
x = []
y = []
def animate(args):
x.append(args[0])
y.append(args[1])
return plt.plot(x, y, color='g')
anim = animation.FuncAnimation(fig, animate, frames=frames, interval=1000)
plt.show()
| 2,340 | 25.303371 | 92 | py |
OpenBCIPython | OpenBCIPython-master/preprocessing/init_buffer.py | ring_buffers = []
max_iteration = 10
channel_data = "" | 54 | 17.333333 | 18 | py |
OpenBCIPython | OpenBCIPython-master/preprocessing/analyzer.py | import librosa
import librosa.display
import matplotlib.pyplot as plt
plt.ion()
import numpy as np
import time
from matplotlib import dates
import datetime
class DynamicUpdate():
min_x = 0
max_x = 10
def on_launch(self):
self.figure, self.ax = plt.subplots()
self.lines, = self.ax.plot([],[], 'o')
self.ax.set_autoscaley_on(True)
# self.ax.set_xlim(self.min_x, self.max_x)
self.ax.grid()
def on_running(self, xdata, ydata):
self.lines.set_xdata(xdata)
self.lines.set_ydata(ydata)
self.ax.relim()
self.ax.autoscale_view()
plt.gcf().autofmt_xdate()
librosa.display.specshow(ydata, sr=20, y_axis='linear', x_axis='time')
self.figure.canvas.draw()
self.figure.canvas.flush_events()
def __call__(self):
self.on_launch()
xdata = []
ydata = []
for x in np.arange(0,10,0.5):
ydata = np.array([np.exp(-i**2)+10*np.exp(-(i-7)**2) for i in range(0, 128)])
if(x%2 == 0):
ydata = np.abs(librosa.fmt(ydata, n_fmt=64))
else:
ydata = librosa.amplitude_to_db(librosa.stft(ydata), ref=np.max)
xdata = np.array([i for i in range(0, ydata.size)])
self.on_running(xdata, ydata)
time.sleep(1)
return xdata, ydata
d = DynamicUpdate()
d()
# import matplotlib.pyplot as plt
# y, sr = librosa.load(librosa.util.example_audio_file())
# plt.figure(figsize=(12, 8))
# D = librosa.amplitude_to_db(librosa.stft(y), ref=np.max)
# plt.subplot(4, 2, 1)
# librosa.display.specshow(D, y_axis='linear')
# plt.colorbar(format='%+2.0f dB')
# plt.title('Linear-frequency power spectrogram')
print "---------------"
| 1,750 | 28.677966 | 89 | py |
OpenBCIPython | OpenBCIPython-master/preprocessing/processor.py | import json
import os
import seaborn as sb
from features.fft import FFT
from features.generic_type import EMG
from features.mean import Mean
from features.mfcc import MFCC
from features.zcr import ZCR
from manager import FeatureManager
import pandas as pd
from pandas import DataFrame, Series
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from utils.Audio import Audio
#
# sb.set(style="white", palette="muted")
#
# import random
# random.seed(20150420)
# from scipy.signal import butter, filtfilt
# import numpy as np
# import matplotlib.pyplot as plt
#
# import pandas as pd
# from pandas import DataFrame, Series
# from scipy.signal import butter, filtfilt
# from pandas import DataFrame, Series
# from scipy import signal
#
# class Clip:
#
# def __init__(self, config, buffer=None, filename=None, file_type=None):
# self.is_raw_data = eval(config["is_raw_data"])
# self.frame_size = int(config["window_size"])
# self.sampling_rate = int(config["sampling_rate"])
# # self.project_path = str(config["project_file_path"])
# self.project_path = "/home/runge/openbci/git/OpenBCI_Python"
# feature_config_file = self.project_path + "/features/config/feature_config.json"
# if self.is_raw_data:
# self.filename = os.path.basename(filename)
# self.path = os.path.abspath(filename)
# self.directory = os.path.dirname(self.path)
# self.category = self.directory.split('/')[-1]
# self.audio = Audio(self.path, file_type)
#
# else:
# self.audio = Audio(is_raw_data=self.is_raw_data, data=buffer)
#
# with open(feature_config_file) as feature_config:
# self.feature_config = json.load(feature_config)
# self.feature_config["sampling_rate"] = self.sampling_rate
# self.feature_config["frame_size"] = self.frame_size
# self.feature_config["is_raw_data"] = self.is_raw_data
#
#
# with self.audio as audio:
# self.featureManager = FeatureManager()
#
# self.featureManager.addRegisteredFeatures(FFT(self.audio, self.feature_config), "fft")
# self.featureManager.addRegisteredFeatures(EMG(self.audio, self.feature_config), "emg")
#
# self.featureManager.getRegisteredFeature("fft").compute_fft()
# # self.featureManager.getRegisteredFeature("emg").compute_hurst()
# self.featureManager.getRegisteredFeature("emg").compute_embed_seq()
# self.featureManager.getRegisteredFeature("emg").compute_bin_power()
# # self.featureManager.getRegisteredFeature("emg").compute_pfd()
# # self.featureManager.getRegisteredFeature("emg").compute_hfd()
# # self.featureManager.getRegisteredFeature("emg").compute_hjorth()
# # self.featureManager.getRegisteredFeature("emg").compute_spectral_entropy()
# # self.featureManager.getRegisteredFeature("emg").compute_svd_entropy()
# # self.featureManager.getRegisteredFeature("emg").compute_ap_entropy()
# # self.featureManager.getRegisteredFeature("emg").compute_samp_entropy()
#
#
# self.feature_list = self.featureManager.getRegisteredFeatures()
#
# def __repr__(self):
# return '<{0}/{1}>'.format(self.category, self.filename)
#
# def get_feature_vector(self):
# # self.featureManager.getRegisteredFeature("emg").get_hurst()
# return self.featureManager.getRegisteredFeature("fft").get_logamplitude()
#
# fsamp = 256
# tsample = 1 / fsamp
# f_low = 50
# f_high = 1
# order = 2
# channel_vector = [1,2, 3, 4, 5]
# n_ch = len(channel_vector)
# df = pd.read_csv("/home/runge/openbci/application.linux64/application.linux64/OpenBCI-RAW-right_strait_up_new.txt")
# df = df[channel_vector].dropna(axis=0)
#
# processed_signal = df.copy()
#
# b, a = butter(order, (order * f_low * 1.0) / fsamp * 1.0, btype='low')
# for i in range(0, n_ch):
# processed_signal.ix[:, i] = np.transpose(filtfilt(b, a, df.ix[:, i]))
#
# b1, a1 = butter(order, (order * f_high * 1.0) / fsamp * 1.0, btype='high')
# for i in range(0, n_ch):
# processed_signal.ix[:, i] = np.transpose(filtfilt(b1, a1, processed_signal.ix[:, i]))
#
# Wn = (np.array([58.0, 62.0]) / 500 * order).tolist()
# b3, a3 = butter(order, Wn, btype='stop')
# for i in range(0, n_ch):
# processed_signal.ix[:, i] = np.transpose(filtfilt(b3, a3, processed_signal.ix[:, i]))
#
# project_file_path = "/home/runge/openbci/git/OpenBCI_Python"
# config_file = project_file_path + "/config/config.json"
# with open(config_file) as config:
# config = json.load(config)
#
# start = 0
# end = 400
# plt.figure(figsize=(12, 8))
# for h in range(0, n_ch):
# plt.subplot(n_ch,1,h+1)
# clip = Clip(config, buffer=np.array(processed_signal.ix[:, h][start * fsamp:end * fsamp].tolist()))
# # f, Pxx_spec = signal.periodogram(processed_signal.ix[:, h][start * fsamp:end * fsamp], fsamp, 'flattop',
# # scaling='spectrum')
#
# # f, Pxx_spec = signal.welch(processed_signal.ix[:, h][start * fsamp:end * fsamp], fsamp, 'flattop', 128, scaling='spectrum')
# # wavelet = signal.ricker
# # widths = np.arange(1, 11)
# # cwtmatr = signal.cwt(processed_signal.ix[:, h][start * fsamp:end * fsamp], wavelet, widths)
# plt.plot(clip.feature_list.get("emg").get_bin_power())
# # plt.semilogy(fsamp, np.sqrt(Pxx_spec))
# # plt.ylim([1e-4, 1e1])
# plt.show()
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
import json
import os
from features.fft import FFT
from features.generic_type import EMG
from manager import FeatureManager
sb.set(style="white", palette="muted")
import random
random.seed(20150420)
class Clip:
def __init__(self, config, buffer=None, filename=None, file_type=None):
self.is_raw_data = eval(config["is_raw_data"])
self.frame_size = int(config["window_size"])
self.sampling_rate = int(config["sampling_rate"])
# self.project_path = str(config["project_file_path"])
self.project_path = "/home/runge/openbci/git/OpenBCI_Python"
feature_config_file = self.project_path + "/features/config/feature_config.json"
if self.is_raw_data:
self.filename = os.path.basename(filename)
self.path = os.path.abspath(filename)
self.directory = os.path.dirname(self.path)
self.category = self.directory.split('/')[-1]
self.audio = Audio(self.path, file_type)
else:
self.audio = Audio(is_raw_data=self.is_raw_data, data=buffer)
with open(feature_config_file) as feature_config:
self.feature_config = json.load(feature_config)
self.feature_config["sampling_rate"] = self.sampling_rate
self.feature_config["frame_size"] = self.frame_size
self.feature_config["is_raw_data"] = self.is_raw_data
with self.audio as audio:
self.featureManager = FeatureManager()
# self.featureManager.addRegisteredFeatures(FFT(self.audio, self.feature_config), "fft")
self.featureManager.addRegisteredFeatures(EMG(self.audio, self.feature_config), "emg")
# self.featureManager.getRegisteredFeature("fft").compute_fft()
# self.featureManager.getRegisteredFeature("emg").compute_hurst()
self.featureManager.getRegisteredFeature("emg").compute_embed_seq()
# self.featureManager.getRegisteredFeature("emg").compute_bin_power()
# self.featureManager.getRegisteredFeature("emg").compute_pfd()
# self.featureManager.getRegisteredFeature("emg").compute_hfd()
# self.featureManager.getRegisteredFeature("emg").compute_hjorth()
# self.featureManager.getRegisteredFeature("emg").compute_spectral_entropy()
# self.featureManager.getRegisteredFeature("emg").compute_svd_entropy()
# self.featureManager.getRegisteredFeature("emg").compute_ap_entropy()
self.featureManager.getRegisteredFeature("emg").compute_svd_entropy()
self.feature_list = self.featureManager.getRegisteredFeatures()
def __repr__(self):
return '<{0}/{1}>'.format(self.category, self.filename)
def get_feature_vector(self):
return self.featureManager.getRegisteredFeature("emg").get_svd_entropy()
# return self.featureManager.getRegisteredFeature("fft").get_logamplitude()
# return self.featureManager.getRegisteredFeature("fft").get_fft_spectrogram()
# fsamp = 1
#
# channel_vector = [1,2, 3, 4, 5]
# n_ch = len(channel_vector)
#
# # df1 = pd.read_csv("/home/runge/openbci/git/OpenBCI_Python/build/dataset/2noise_signal.csv")
# # df1 = df1.dropna(axis=0)
# #
# # df2 = pd.read_csv("/home/runge/openbci/git/OpenBCI_Python/build/dataset/2noise_reduced_signal.csv")
# # df2 = df2.dropna(axis=0)
# #
# # df3 = pd.read_csv("/home/runge/openbci/git/OpenBCI_Python/build/dataset/2reconstructed_signal.csv")
# # df3 = df3.dropna(axis=0)
# #
# # df4 = pd.read_csv("/home/runge/openbci/git/OpenBCI_Python/build/dataset/2feature_vector.csv")
# # df4 = df4.dropna(axis=0)
# #
# # df = []
# # df.append(df1)
# # df.append(df2)
# # df.append(df3)
# # df.append(df4)
# #
# # processed_signal = df.copy()
#
#
#
# project_file_path = "/home/runge/openbci/git/OpenBCI_Python"
# config_file = project_file_path + "/config/config.json"
# raw_reconstructed_signals = pd.read_csv(project_file_path+"/build/dataset2017-5-5_23-55-32new_bycept.csv").ix[:, 2:7].dropna()
# with open(config_file) as config:
#
# config = json.load(config)
# start = 100
# end = 200
#
# if end == 0:
# end = raw_reconstructed_signals.shape[0]
# x = np.arange(start,end, 1)
# fig = plt.figure(figsize=(10, 15))
# fig.subplots_adjust(hspace=.5)
# index = 1
# for h in range(0, 5):
# processed_signal = []
# ax = plt.subplot(10,2,index)
# input_signal = raw_reconstructed_signals.ix[:,h][start:end]
# ax.plot(x, input_signal)
# position=0
# for i in range(0, int((end-start)-int(config['window_size'])-1)):
# clip = Clip(config, buffer=np.array(input_signal[position:position+int(config['window_size'])].tolist()))
# processed_signal.append(clip.get_feature_vector())
# position+=1
#
# index += 1
# ax = plt.subplot(10, 2, index)
# index += 1
# ax.plot(processed_signal)
#
# # f, Pxx_spec = signal.periodogram(processed_signal.ix[:, h][start * fsamp:end * fsamp], fsamp, 'flattop',
# # scaling='spectrum')
#
# # f, Pxx_spec = signal.welch(processed_signal.ix[:, h][start * fsamp:end * fsamp], fsamp, 'flattop', 128, scaling='spectrum')
# # wavelet = signal.ricker
# # widths = np.arange(1, 11)
# # cwtmatr = signal.cwt(processed_signal.ix[:, h][start * fsamp:end * fsamp], wavelet, widths)
# # plt.plot(raw_reconstructed_signals[h].ix[:,0][start * fsamp:end * fsamp])
# # plt.semilogy(fsamp, np.sqrt(Pxx_spec))
# # plt.ylim([1e-4, 1e1])
# plt.show()
| 11,417 | 32.781065 | 135 | py |
OpenBCIPython | OpenBCIPython-master/preprocessing/RingBuffer.py | import numpy as np
from collections import Sequence
class RingBuffer(Sequence):
def __init__(self, capacity, dtype=float, allow_overwrite=True):
self._arr = np.zeros(capacity, dtype)
self._left_index = 0
self._right_index = 0
self._capacity = capacity
self._allow_overwrite = allow_overwrite
def _unwrap(self):
return np.concatenate((
self._arr[self._left_index:min(self._right_index, self._capacity)],
self._arr[:max(self._right_index - self._capacity, 0)]
))
def _fix_indices(self):
if self._left_index >= self._capacity:
self._left_index -= self._capacity
self._right_index -= self._capacity
elif self._left_index < 0:
self._left_index += self._capacity
self._right_index += self._capacity
@property
def is_full(self):
return len(self) == self._capacity
# numpy compatibility
def __array__(self):
return self._unwrap()
@property
def dtype(self):
return self._arr.dtype
@property
def shape(self):
return (len(self),) + self._arr.shape[1:]
# these mirror methods from deque
@property
def maxlen(self):
return self._capacity
def append(self, value):
if self.is_full:
if not self._allow_overwrite:
raise IndexError('append to a full RingBuffer with overwrite disabled')
elif not len(self):
return
else:
self._left_index += 1
self._arr[self._right_index % self._capacity] = value
self._right_index += 1
self._fix_indices()
def pop(self):
if len(self) == 0:
raise IndexError("pop from an empty RingBuffer")
self._right_index -= 1
self._fix_indices()
res = self._arr[self._right_index % self._capacity]
return res
def pop_window(self, window_size, overlap):
dataset = []
for i in range(0, window_size):
dataset.append(self.pop())
self._right_index = self._right_index + overlap
self._fix_indices()
return dataset
# implement Sequence methods
def __len__(self):
return self._right_index - self._left_index
def __getitem__(self, item):
return self._unwrap()[item]
def __iter__(self):
return iter(self._unwrap())
# Everything else
def __repr__(self):
return '<RingBuffer of {!r}>'.format(np.asarray(self))
# buffer = RingBuffer(100)
# buffer.append(78)
# print buffer.pop_window(1) | 2,621 | 26.893617 | 87 | py |
OpenBCIPython | OpenBCIPython-master/preprocessing/logistic_regression.py | import pandas as pd
from pandas import DataFrame, Series
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import requests
matplotlib.rc('xtick', labelsize=15)
matplotlib.rc('ytick', labelsize=15)
matplotlib.rc('axes', titlesize=20)
matplotlib.rc('legend', fontsize=20)
manager = plt.get_current_fig_manager()
manager.resize(*manager.window.maxsize())
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Create graph
sess = tf.Session()
project_file_path = "/home/runge/openbci/git/OpenBCI_Python"
config_file = project_file_path + "/config/config.json"
channel_signals = pd.read_csv(project_file_path
+"/build/dataset/train/result/raw_reconstructed_signals.csv").dropna()
kinect__angles = pd.read_csv(project_file_path+"/build/dataset/train/result/reconstructed_kinect__angles_.csv").dropna()
y_vals = np.array(kinect__angles.ix[:,0])
x_vals = np.array(channel_signals)
# Split data into train/test = 80%/20%
train_indices = np.random.choice(len(x_vals), int(round(len(x_vals) * 0.8)), replace=False)
test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))
x_vals_train = x_vals[train_indices]
x_vals_test = x_vals[test_indices]
y_vals_train = y_vals[train_indices]
y_vals_test = y_vals[test_indices]
# Normalize by column (min-max norm)
def normalize_cols(m):
col_max = m.max(axis=0)
col_min = m.min(axis=0)
return (m - col_min) / (col_max - col_min)
x_vals_train = np.nan_to_num(normalize_cols(x_vals_train))
x_vals_test = np.nan_to_num(normalize_cols(x_vals_test))
# Declare batch size
batch_size = 25
# Initialize placeholders
x_data = tf.placeholder(shape=[None, 5], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
# Create variables for linear regression
A = tf.Variable(tf.random_normal(shape=[5, 1]))
b = tf.Variable(tf.random_normal(shape=[1, 1]))
# Declare model operations
model_output = tf.add(tf.matmul(x_data, A), b)
# Declare loss function (Cross Entropy loss)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=model_output, logits=y_target))
learningRate = tf.train.exponential_decay(learning_rate=0.0008,
global_step= 1,
decay_steps=x_vals_train.shape[0],
decay_rate= 0.95,
staircase=True)
# Declare optimizer
my_opt = tf.train.GradientDescentOptimizer(0.01)
train_step = my_opt.minimize(loss)
# Initialize variables
init = tf.initialize_all_variables()
sess.run(init)
# Actual Prediction
prediction = tf.round(tf.sigmoid(model_output))
predictions_correct = tf.cast(tf.equal(prediction, y_target), tf.float32)
accuracy = tf.reduce_mean(predictions_correct)
# Training loop
loss_vec = []
train_acc = []
test_acc = []
for i in range(15000):
rand_index = np.random.choice(len(x_vals_train), size=batch_size)
rand_x = x_vals_train[rand_index]
rand_y = np.transpose([y_vals_train[rand_index]])
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})
loss_vec.append(temp_loss)
temp_acc_train = sess.run(accuracy, feed_dict={x_data: x_vals_train, y_target: np.transpose([y_vals_train])})
train_acc.append(temp_acc_train)
temp_acc_test = sess.run(accuracy, feed_dict={x_data: x_vals_test, y_target: np.transpose([y_vals_test])})
test_acc.append(temp_acc_test)
if (i + 1) % 300 == 0:
print('Loss = ' + str(temp_loss))
# Plot loss over time
plt.plot(loss_vec, 'k-')
plt.title('Cross Entropy Loss per Generation')
plt.xlabel('Generation')
plt.ylabel('Cross Entropy Loss')
plt.show()
# Plot train and test accuracy
plt.plot(train_acc, 'k-', label='Train Set Accuracy')
plt.plot(test_acc, 'r--', label='Test Set Accuracy')
plt.title('Train and Test Accuracy')
plt.xlabel('Generation')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.show()
| 4,069 | 32.636364 | 120 | py |
OpenBCIPython | OpenBCIPython-master/preprocessing/__init__.py | 0 | 0 | 0 | py |
|
OpenBCIPython | OpenBCIPython-master/preprocessing/manager.py |
class FeatureManager:
def __init__(self):
self.featureList = {}
def __enter__(self):
print "Initializing mfcc calculation..."
def __exit__(self, exc_type, exc_val, exc_tb):
print "Done with calculations..."
def addRegisteredFeatures(self, feature, featureId):
self.featureList[featureId] = feature
def getRegisteredFeatures(self):
return self.featureList
def getRegisteredFeature(self, feature_id):
return self.featureList[feature_id]
| 514 | 23.52381 | 56 | py |
OpenBCIPython | OpenBCIPython-master/preprocessing/ssa.py | import matplotlib.pyplot as plt
from operator import itemgetter
import numpy as np
from scipy.linalg import toeplitz
from numpy import linalg as lin
class SingularSpectrumAnalysis():
def __init__(self, input_signal, window, verbose=False):
self.input_signal = input_signal
self.M = window # window length = embedding dimension
self.N = self.input_signal.size # length of generated time series
self.number_of_lags = self.N - self.M + 1
self.verbose =verbose
self.covariance_matrix = []
self.trajectory_matrix = []
self.eigne_vectors = []
self.eigne_values = []
self.principal_components = []
self.reconstructed_matrix = []
def nomalize(self):
mean = np.mean(self.input_signal, axis=0)
self.input_signal = self.input_signal - mean
self.input_signal = self.input_signal / np.std(self.input_signal, axis=0)
# if self.verbose:
# plt.figure(1)
# plt.subplot(311)
# plt.plot(self.input_signal)
# plt.title('Normalized Values')
# plt.grid(True)
def calculate_trajectory_matrix(self):
self.trajectory_matrix = np.zeros([self.N - self.M + 1, self.M])
for m in range(0, self.N - self.M + 1):
self.trajectory_matrix[m] = self.input_signal[m:self.M + m]
def calculate_covariance_matrix_toeplitz_approach(self):
# _, correlation, _, _ = plt.xcorr(self.input_signal, self.input_signal, maxlags=self.M - 1)
correlation = self.coss_corelation(self.input_signal, self.input_signal, self.M - 1)
self.covariance_matrix = toeplitz(correlation[self.M - 1:2 * self.M - 1])
# if self.verbose:
# plt.subplot(912)
# plt.plot(self.covariance_matrix)
# plt.title('Covariance Matrix: Toeplitz Approach')
# plt.grid(True)
def calculate_covariance_matrix_trajectory_approach(self):
self.covariance_matrix = np.matmul(self.trajectory_matrix.transpose(), self.trajectory_matrix) / (self.N - self.M + 1)
# if self.verbose:
# plt.subplot(913)
# plt.plot(self.covariance_matrix)
# plt.title('Trajectory Matrix')
# plt.grid(True)
def calculate_eigen_vectors_and_values(self):
[self.eigne_values, self.eigne_vectors] = lin.eig(self.covariance_matrix)
indices, sorted_value = zip(*sorted(enumerate(self.eigne_values), key=itemgetter(1), reverse=True))
self.eigne_values = list(sorted_value) # % sort eigenvalues
self.eigne_vectors = np.array(self.eigne_vectors[list(indices)]) # and eigenvectors
# if self.verbose:
# plt.subplot(914)
# plt.plot(np.real(self.eigne_values))
# plt.title('Eigen Values')
# plt.grid(True)
#
# plt.subplot(915)
# plt.plot(np.real(self.eigne_vectors))
# plt.title('Eigen Vectors')
# plt.grid(True)
def calculate_principle_components(self):
self.principal_components = np.matmul(self.trajectory_matrix, self.eigne_vectors)
# if self.verbose:
# plt.subplot(312)
# plt.plot(self.principal_components)
# plt.title('Principle Components')
# plt.grid(True)
def reconstruct_matrix(self):
self.reconstructed_matrix = np.zeros([self.N, self.M])
for m in range(0, self.M):
buf = np.outer(self.principal_components[:, m], self.eigne_vectors[:, m].transpose())
buf = np.array(list(reversed(buf)))
for n in range(0, self.N):
self.reconstructed_matrix[n, m] = np.mean(np.real(np.diag(buf, -(self.N - self.M) + n)))
def coss_corelation(self, x, y, max_lags):
Nx = len(x)
if Nx != len(y):
raise ValueError('x and y must be equal length')
x = np.asarray(x)
y = np.asarray(y)
c = np.correlate(x, y, mode=2)
c /= np.sqrt(np.dot(x, x) * np.dot(y, y))
if max_lags >= Nx or max_lags < 1:
raise ValueError('maglags must be None or strictly '
'positive < %d' % Nx)
lags = np.arange(-max_lags, max_lags + 1)
c = c[Nx - 1 - max_lags:Nx + max_lags]
return c
def get_reconstructed_signal(self, start=0, end=None):
if(end == None):
end = self.N
reconstructed_final_signal = np.sum(self.reconstructed_matrix[:,start:end], axis=1)
if self.verbose:
# plt.subplot(311)
plt.plot(reconstructed_final_signal,'g')
# plt.title('Reconstructed Signal')
# plt.grid(True)
# plt.subplot(312)
plt.plot(self.input_signal, 'r')
plt.title('Reconstructed Signal and Noise Added Signal')
plt.grid(True)
# plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=0.95, hspace=0.25,
# wspace=0.35)
plt.show()
return reconstructed_final_signal
def execute(self, number_of_pin_com=None):
# self.nomalize()
self.calculate_trajectory_matrix()
self.calculate_covariance_matrix_toeplitz_approach()
self.calculate_eigen_vectors_and_values()
self.calculate_principle_components()
self.reconstruct_matrix()
return self.get_reconstructed_signal(end=number_of_pin_com)
# for j in range(int(len(df2)/window_size)):
# reconstructed_signal = SingularSpectrumAnalysis(df2[index:window_size+index], 16, True).execute(2)
# index += window_size
# with open("/home/runge/openbci/git/OpenBCI_Python/build/dataset/2recon.csv", 'a') as f:
# np.savetxt(f, reconstructed_signal, delimiter=',', fmt='%.18e') | 5,821 | 41.49635 | 126 | py |
OpenBCIPython | OpenBCIPython-master/processing/dtw_testing.py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.signal import butter, filtfilt
from lib.dtw import dtw
fsamp = 250
tsample = 1 / fsamp
f_low = 50
f_high = 1
order = 2
channel_vector = [1,2, 3, 4, 5]
n_ch = len(channel_vector)
df = pd.read_csv("/home/runge/openbci/application.linux64/application.linux64/OpenBCI-RAW-right_strait_up_new.txt")
df = df[channel_vector].dropna(axis=0)
processed_signal = df.copy()
b, a = butter(order, (order * f_low * 1.0) / fsamp * 1.0, btype='low')
for i in range(0, n_ch):
processed_signal.ix[:, i] = np.transpose(filtfilt(b, a, df.ix[:, i]))
b1, a1 = butter(order, (order * f_high * 1.0) / fsamp * 1.0, btype='high')
for i in range(0, n_ch):
processed_signal.ix[:, i] = np.transpose(filtfilt(b1, a1, processed_signal.ix[:, i]))
Wn = (np.array([58.0, 62.0]) / 500 * order).tolist()
b3, a3 = butter(order, Wn, btype='stop')
for i in range(0, n_ch):
processed_signal.ix[:, i] = np.transpose(filtfilt(b3, a3, processed_signal.ix[:, i]))
start = 7000
end = 8000
def nomalize_signal(input_signal):
mean = np.mean(input_signal, axis=0)
input_signal -= mean
return input_signal / np.std(input_signal, axis=0)
processed_signal = nomalize_signal(processed_signal)
pattern=np.array(processed_signal.ix[:, 2][start :end]).reshape(-1,1)
data=np.array(processed_signal.ix[:, 2][7000:8000]).reshape(-1,1)
def my_custom_norm(x, y):
return (x * x) + (y * y)
dist, cost, acc, path = dtw(pattern, data, dist=my_custom_norm)
print 'Normalized distance between the two sounds:', dist, cost, acc
plt.imshow(acc.T, origin='lower', interpolation='nearest')
plt.plot(cost-acc, 'w')
# plt.xlim((-0.5, acc.shape[0]-0.5))
# plt.ylim((-0.5, acc.shape[1]-0.5))
plt.show()
# x = np.array([0, 0, 1, 1, 2, 4, 2, 1, 2, 0]).reshape(-1, 1)
# y = np.array([1, 1, 1, 2, 2, 2, 2, 3, 2, 0]).reshape(-1, 1)
#
# dist, cost, acc, path = dtw(x, y, dist=my_custom_norm) | 1,935 | 30.225806 | 115 | py |
OpenBCIPython | OpenBCIPython-master/build/filterout_dataset.py | import json
import pandas as pd
import numpy as np
import json
import scipy.linalg as lin
import pandas as pd
import sys
from pandas import DataFrame, Series
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import pandas as pd
properdataset_ranges_bycept =[[1400,7000],[11100, 19500],[26000,36000]]
properdataset_ranges_straight_up =[[1000,8000],[8500, 12500]]
properdataset_ranges = properdataset_ranges_straight_up
# datasource = "/home/runge/openbci/git/OpenBCI_Python/build/dataset2017-5-5_23-55-32new_bycept.csv"
datasource = "/home/runge/openbci/git/OpenBCI_Python/build/dataset2017-5-6_0-0-33new_up.csv"
dataset_filtered_location = "/home/runge/openbci/git/OpenBCI_Python/build/dataset2017-5-5_23-55-32new_straight_up_filttered.csv"
raw_data = pd.read_csv(datasource)
filtered_dataset = pd.DataFrame()
for proper_boundary in properdataset_ranges:
filtered_dataset=filtered_dataset.append(raw_data[proper_boundary[0]:proper_boundary[1]], ignore_index = True)
print filtered_dataset.shape
with open(dataset_filtered_location,'w') as f:
np.savetxt(f, np.array(filtered_dataset), delimiter=',', fmt='%.18e')
| 1,186 | 37.290323 | 128 | py |
dibs | dibs-master/setup.py | from setuptools import setup, find_packages
setup(
name='dibs-lib',
version='1.2.1',
description='DiBS: Differentiable Bayesian Structure Learning',
author='Lars Lorch',
author_email='[email protected]',
url="https://github.com/larslorch/dibs",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=find_packages(),
install_requires=[
'jax>=0.2.25',
'jaxlib>=0.1.59',
'numpy',
'pandas',
'python-igraph',
'imageio',
'jupyter',
'tqdm',
'matplotlib',
]
)
| 688 | 24.518519 | 67 | py |
dibs | dibs-master/dibs/target.py | import jax.numpy as jnp
from jax import random
from dibs.models.graph import ErdosReniDAGDistribution, ScaleFreeDAGDistribution, UniformDAGDistributionRejection
from dibs.graph_utils import graph_to_mat
from dibs.models import LinearGaussian, BGe, DenseNonlinearGaussian
from typing import Any, NamedTuple
class Data(NamedTuple):
""" NamedTuple for structuring simulated synthetic data and their ground
truth generative model
Args:
passed_key (ndarray): ``jax.random`` key passed *into* the function generating this object
n_vars (int): number of variables in model
n_observations (int): number of observations in ``x`` and used to perform inference
n_ho_observations (int): number of held-out observations in ``x_ho``
and elements of ``x_interv`` used for evaluation
g (ndarray): ground truth DAG
theta (Any): ground truth parameters
x (ndarray): i.i.d observations from the model of shape ``[n_observations, n_vars]``
x_ho (ndarray): i.i.d observations from the model of shape ``[n_ho_observations, n_vars]``
x_interv (list): list of (interv dict, i.i.d observations)
"""
passed_key: Any
n_vars: int
n_observations: int
n_ho_observations: int
g: Any
theta: Any
x: Any
x_ho :Any
x_interv: Any
def make_synthetic_bayes_net(*,
key,
n_vars,
graph_dist,
generative_model,
n_observations=100,
n_ho_observations=100,
n_intervention_sets=10,
perc_intervened=0.1,
):
"""
Returns an instance of :class:`~dibs.metrics.Target` for evaluation of a method on
a ground truth synthetic causal Bayesian network
Args:
key (ndarray): rng key
n_vars (int): number of variables
graph_dist (Any): graph model object. For example: :class:`~dibs.models.ErdosReniDAGDistribution`
generative_model (Any): BN model object for generating the observations. For example: :class:`~dibs.models.LinearGaussian`
n_observations (int): number of observations generated for posterior inference
n_ho_observations (int): number of held-out observations generated for evaluation
n_intervention_sets (int): number of different interventions considered overall
for generating interventional data
perc_intervened (float): percentage of nodes intervened upon (clipped to 0) in
an intervention.
Returns:
:class:`~dibs.target.Data`:
synthetic ground truth generative DAG and parameters as well observations sampled from the model
"""
# remember random key
passed_key = key.copy()
# generate ground truth observations
key, subk = random.split(key)
g_gt = graph_dist.sample_G(subk)
g_gt_mat = jnp.array(graph_to_mat(g_gt))
key, subk = random.split(key)
theta = generative_model.sample_parameters(key=subk, n_vars=n_vars)
key, subk = random.split(key)
x = generative_model.sample_obs(key=subk, n_samples=n_observations, g=g_gt, theta=theta)
key, subk = random.split(key)
x_ho = generative_model.sample_obs(key=subk, n_samples=n_ho_observations, g=g_gt, theta=theta)
# 10 random 0-clamp interventions where `perc_interv` % of nodes are intervened on
# list of (interv dict, x)
x_interv = []
for idx in range(n_intervention_sets):
# random intervention
key, subk = random.split(key)
n_interv = jnp.ceil(n_vars * perc_intervened).astype(jnp.int32)
interv_targets = random.choice(subk, n_vars, shape=(n_interv,), replace=False)
interv = {int(k): 0.0 for k in interv_targets}
# observations from p(x | theta, G, interv) [n_samples, n_vars]
key, subk = random.split(key)
x_interv_ = generative_model.sample_obs(key=subk, n_samples=n_observations, g=g_gt, theta=theta, interv=interv)
x_interv.append((interv, x_interv_))
# return and save generated target object
data = Data(
passed_key=passed_key,
n_vars=n_vars,
n_observations=n_observations,
n_ho_observations=n_ho_observations,
g=g_gt_mat,
theta=theta,
x=x,
x_ho=x_ho,
x_interv=x_interv,
)
return data
def make_graph_model(*, n_vars, graph_prior_str, edges_per_node=2):
"""
Instantiates graph model
Args:
n_vars (int): number of variables in graph
graph_prior_str (str): specifier for random graph model; choices: ``er``, ``sf``
edges_per_node (int): number of edges per node (in expectation when applicable)
Returns:
Object representing graph model. For example :class:`~dibs.models.ErdosReniDAGDistribution` or :class:`~dibs.models.ScaleFreeDAGDistribution`
"""
if graph_prior_str == 'er':
graph_dist = ErdosReniDAGDistribution(
n_vars=n_vars,
n_edges_per_node=edges_per_node)
elif graph_prior_str == 'sf':
graph_dist = ScaleFreeDAGDistribution(
n_vars=n_vars,
n_edges_per_node=edges_per_node)
else:
assert n_vars <= 5, "Naive uniform DAG sampling only possible up to 5 nodes"
graph_dist = UniformDAGDistributionRejection(
n_vars=n_vars)
return graph_dist
def make_linear_gaussian_equivalent_model(*, key, n_vars=20, graph_prior_str='sf',
obs_noise=0.1, mean_edge=0.0, sig_edge=1.0, min_edge=0.5, n_observations=100,
n_ho_observations=100):
"""
Samples a synthetic linear Gaussian BN instance
with Bayesian Gaussian equivalent (BGe) marginal likelihood
as inference model to weight each DAG in an MEC equally
By marginalizing out the parameters, the BGe model does not
allow inferring the parameters :math:`\\Theta`.
Args:
key (ndarray): rng key
n_vars (int): number of variables i
n_observations (int): number of iid observations of variables
n_ho_observations (int): number of iid held-out observations of variables
graph_prior_str (str): graph prior (``er`` or ``sf``)
obs_noise (float): observation noise
mean_edge (float): edge weight mean
sig_edge (float): edge weight stddev
min_edge (float): min edge weight enforced by constant shift of sampled parameter
Returns:
tuple(:class:`~dibs.models.BGe`, :class:`~dibs.target.Data`):
BGe inference model and observations from a linear Gaussian generative process
"""
# init models
graph_dist = make_graph_model(n_vars=n_vars, graph_prior_str=graph_prior_str)
generative_model = LinearGaussian(
graph_dist=graph_dist, obs_noise=obs_noise,
mean_edge=mean_edge, sig_edge=sig_edge,
min_edge=min_edge)
inference_model = BGe(graph_dist=graph_dist)
# sample synthetic BN and observations
key, subk = random.split(key)
data = make_synthetic_bayes_net(
key=subk, n_vars=n_vars,
graph_dist=graph_dist,
generative_model=generative_model,
n_observations=n_observations,
n_ho_observations=n_ho_observations)
return data, inference_model
def make_linear_gaussian_model(*, key, n_vars=20, graph_prior_str='sf',
obs_noise=0.1, mean_edge=0.0, sig_edge=1.0, min_edge=0.5, n_observations=100,
n_ho_observations=100):
"""
Samples a synthetic linear Gaussian BN instance
Args:
key (ndarray): rng key
n_vars (int): number of variables
n_observations (int): number of iid observations of variables
n_ho_observations (int): number of iid held-out observations of variables
graph_prior_str (str): graph prior (`er` or `sf`)
obs_noise (float): observation noise
mean_edge (float): edge weight mean
sig_edge (float): edge weight stddev
min_edge (float): min edge weight enforced by constant shift of sampled parameter
Returns:
tuple(:class:`~dibs.models.LinearGaussian`, :class:`~dibs.target.Data`):
linear Gaussian inference model and observations from a linear Gaussian generative process
"""
# init models
graph_dist = make_graph_model(n_vars=n_vars, graph_prior_str=graph_prior_str)
generative_model = LinearGaussian(
graph_dist=graph_dist, obs_noise=obs_noise,
mean_edge=mean_edge, sig_edge=sig_edge,
min_edge=min_edge)
inference_model = LinearGaussian(
graph_dist=graph_dist, obs_noise=obs_noise,
mean_edge=mean_edge, sig_edge=sig_edge,
min_edge=min_edge)
# sample synthetic BN and observations
key, subk = random.split(key)
data = make_synthetic_bayes_net(
key=subk, n_vars=n_vars,
graph_dist=graph_dist,
generative_model=generative_model,
n_observations=n_observations,
n_ho_observations=n_ho_observations)
return data, inference_model
def make_nonlinear_gaussian_model(*, key, n_vars=20, graph_prior_str='sf',
obs_noise=0.1, sig_param=1.0, hidden_layers=[5,], n_observations=100,
n_ho_observations=100):
"""
Samples a synthetic nonlinear Gaussian BN instance
where the local conditional distributions are parameterized
by fully-connected neural networks.
Args:
key (ndarray): rng key
n_vars (int): number of variables
n_observations (int): number of iid observations of variables
n_ho_observations (int): number of iid held-out observations of variables
graph_prior_str (str): graph prior (`er` or `sf`)
obs_noise (float): observation noise
sig_param (float): stddev of the BN parameters,
i.e. here the neural net weights and biases
hidden_layers (list): list of ints specifying the hidden layer (sizes)
of the neural nets parameterizatin the local condtitionals
Returns:
tuple(:class:`~dibs.models.DenseNonlinearGaussian`, :class:`~dibs.metrics.Target`):
nonlinear Gaussian inference model and observations from a nonlinear Gaussian generative process
"""
# init models
graph_dist = make_graph_model(n_vars=n_vars, graph_prior_str=graph_prior_str)
generative_model = DenseNonlinearGaussian(
obs_noise=obs_noise, sig_param=sig_param,
hidden_layers=hidden_layers, graph_dist=graph_dist)
inference_model = DenseNonlinearGaussian(
obs_noise=obs_noise, sig_param=sig_param,
hidden_layers=hidden_layers, graph_dist=graph_dist)
# sample synthetic BN and observations
key, subk = random.split(key)
data = make_synthetic_bayes_net(
key=subk, n_vars=n_vars,
graph_dist=graph_dist,
generative_model=generative_model,
n_observations=n_observations,
n_ho_observations=n_ho_observations)
return data, inference_model
| 10,814 | 35.785714 | 149 | py |
dibs | dibs-master/dibs/graph_utils.py | import functools
import igraph as ig
import jax.numpy as jnp
from jax import jit, vmap
@functools.partial(jit, static_argnums=(1,))
def acyclic_constr_nograd(mat, n_vars):
"""
Differentiable acyclicity constraint from Yu et al. (2019)
http://proceedings.mlr.press/v97/yu19a/yu19a.pdf
Args:
mat (ndarray): graph adjacency matrix of shape ``[n_vars, n_vars]``
n_vars (int): number of variables, to allow for ``jax.jit``-compilation
Returns:
constraint value ``[1, ]``
"""
alpha = 1.0 / n_vars
# M = jnp.eye(n_vars) + alpha * mat * mat # [original version]
M = jnp.eye(n_vars) + alpha * mat
M_mult = jnp.linalg.matrix_power(M, n_vars)
h = jnp.trace(M_mult) - n_vars
return h
elwise_acyclic_constr_nograd = jit(vmap(acyclic_constr_nograd, (0, None), 0), static_argnums=(1,))
def graph_to_mat(g):
"""Returns adjacency matrix of ``ig.Graph`` object
Args:
g (igraph.Graph): graph
Returns:
ndarray:
adjacency matrix
"""
return jnp.array(g.get_adjacency().data)
def mat_to_graph(mat):
"""Returns ``ig.Graph`` object for adjacency matrix
Args:
mat (ndarray): adjacency matrix
Returns:
igraph.Graph:
graph
"""
return ig.Graph.Weighted_Adjacency(mat.tolist())
def mat_is_dag(mat):
"""Returns ``True`` iff adjacency matrix represents a DAG
Args:
mat (ndarray): graph adjacency matrix
Returns:
bool:
``True`` iff ``mat`` represents a DAG
"""
G = ig.Graph.Weighted_Adjacency(mat.tolist())
return G.is_dag()
def adjmat_to_str(mat, max_len=40):
"""
Converts binary adjacency matrix to human-readable string
Args:
mat (ndarray): graph adjacency matrix
max_len (int): maximum length of string
Returns:
str:
human readable description of edges in adjacency matrix
"""
edges_mat = jnp.where(mat == 1)
undir_ignore = set() # undirected edges, already printed
def get_edges():
for e in zip(*edges_mat):
u, v = e
# undirected?
if mat[v, u] == 1:
# check not printed yet
if e not in undir_ignore:
undir_ignore.add((v, u))
yield (u, v, True)
else:
yield (u, v, False)
strg = ' '.join([(f'{e[0]}--{e[1]}' if e[2] else
f'{e[0]}->{e[1]}') for e in get_edges()])
if len(strg) > max_len:
return strg[:max_len] + ' ... '
elif strg == '':
return '<empty graph>'
else:
return strg
| 2,654 | 23.813084 | 98 | py |
dibs | dibs-master/dibs/metrics.py | import jax.numpy as jnp
from jax.scipy.special import logsumexp
from dibs.utils.tree import tree_mul, tree_select
from dibs.graph_utils import elwise_acyclic_constr_nograd
from sklearn import metrics as sklearn_metrics
from typing import Any, NamedTuple
class ParticleDistribution(NamedTuple):
""" NamedTuple for structuring sampled particles :math:`(G, \\Theta)` (or :math:`G`)
and their assigned log probabilities
Args:
logp (ndarray): vector of log probabilities or weights of shape ``[M, ]``
g (ndarray): batch of graph adjacency matrix of shape ``[M, d, d]``
theta (ndarray): batch of parameter PyTrees with leading dimension ``M``
"""
logp: Any
g: Any
theta: Any = None
def pairwise_structural_hamming_distance(*, x, y):
"""
Computes pairwise Structural Hamming distance, i.e.
the number of edge insertions, deletions or flips in order to transform one graph to another
This means, edge reversals do not double count, and that getting an undirected edge wrong only counts 1
Args:
x (ndarray): batch of adjacency matrices [N, d, d]
y (ndarray): batch of adjacency matrices [M, d, d]
Returns:
matrix of shape ``[N, M]`` where elt ``i,j`` is SHD(``x[i]``, ``y[j]``)
"""
# all but first axis is usually used for the norm, assuming that first dim is batch dim
assert(x.ndim == 3 and y.ndim == 3)
# via computing pairwise differences
pw_diff = jnp.abs(jnp.expand_dims(x, axis=1) - jnp.expand_dims(y, axis=0))
pw_diff = pw_diff + pw_diff.transpose((0, 1, 3, 2))
# ignore double edges
pw_diff = jnp.where(pw_diff > 1, 1, pw_diff)
shd = jnp.sum(pw_diff, axis=(2, 3)) / 2
return shd
def expected_shd(*, dist, g):
"""
Computes expected structural hamming distance metric, defined as
:math:`\\text{expected SHD}(p, G^*) := \\sum_G p(G | D) \\text{SHD}(G, G^*)`
Args:
dist (:class:`dibs.metrics.ParticleDistribution`): particle distribution
g (ndarray): ground truth adjacency matrix of shape ``[d, d]``
Returns:
expected SHD ``[1, ]``
"""
n_vars = g.shape[0]
# select acyclic graphs
is_dag = elwise_acyclic_constr_nograd(dist.g, n_vars) == 0
if is_dag.sum() == 0:
# score as "wrong on every edge"
return n_vars * (n_vars - 1) / 2
particles = dist.g[is_dag, :, :]
log_weights = dist.logp[is_dag] - logsumexp(dist.logp[is_dag])
# compute shd for each graph
shds = pairwise_structural_hamming_distance(x=particles, y=g[None]).squeeze(1)
# expected SHD = sum_G p(G) SHD(G)
log_expected_shd, log_expected_shd_sgn = logsumexp(
log_weights, b=shds.astype(log_weights.dtype), axis=0, return_sign=True)
eshd = log_expected_shd_sgn * jnp.exp(log_expected_shd)
return eshd
def expected_edges(*, dist):
"""
Computes expected number of edges, defined as
:math:`\\text{expected edges}(p) := \\sum_G p(G | D) |\\text{edges}(G)|`
Args:
dist (:class:`dibs.metrics.ParticleDistribution`): particle distribution
Returns:
expected number of edges ``[1, ]``
"""
n_vars = dist.g.shape[-1]
# select acyclic graphs
is_dag = elwise_acyclic_constr_nograd(dist.g, n_vars) == 0
if is_dag.sum() == 0:
# if no acyclic graphs, count the edges of the cyclic graphs; more consistent
n_edges_cyc = dist.g.sum(axis=(-1, -2))
log_expected_edges_cyc, log_expected_edges_cyc_sgn = logsumexp(
dist.logp, b=n_edges_cyc.astype(dist.logp.dtype), axis=0, return_sign=True)
expected_edges_cyc = log_expected_edges_cyc_sgn * jnp.exp(log_expected_edges_cyc)
return expected_edges_cyc
particles = dist.g[is_dag, :, :]
log_weights = dist.logp[is_dag] - logsumexp(dist.logp[is_dag])
# count edges for each graph
n_edges = particles.sum(axis=(-1, -2))
# expected edges = sum_G p(G) edges(G)
log_expected_edges, log_expected_edges_sgn = logsumexp(
log_weights, b=n_edges.astype(log_weights.dtype), axis=0, return_sign=True)
edges = log_expected_edges_sgn * jnp.exp(log_expected_edges)
return edges
def threshold_metrics(*, dist, g):
"""
Computes various threshold metrics (e.g. ROC, precision-recall, ...)
Args:
dist (:class:`dibs.metrics.ParticleDistribution`): sampled particle distribution
g (ndarray): ground truth adjacency matrix of shape ``[d, d]``
Returns:
dict of metrics
"""
n_vars = g.shape[0]
g_flat = g.reshape(-1)
# select acyclic graphs
is_dag = elwise_acyclic_constr_nograd(dist.g, n_vars) == 0
if is_dag.sum() == 0:
# score as random/junk classifier
# for AUROC: 0.5
# for precision-recall: no. true edges/ no. possible edges
return {
'roc_auc': 0.5,
'prc_auc': (g.sum() / (n_vars * (n_vars - 1))).item(),
'ave_prec': (g.sum() / (n_vars * (n_vars - 1))).item(),
}
particles = dist.g[is_dag, :, :]
log_weights = dist.logp[is_dag] - logsumexp(dist.logp[is_dag])
# P(G_ij = 1) = sum_G w_G 1[G = G] in log space
log_edge_belief, log_edge_belief_sgn = logsumexp(
log_weights[..., jnp.newaxis, jnp.newaxis],
b=particles.astype(log_weights.dtype),
axis=0, return_sign=True)
# L1 edge error
p_edge = log_edge_belief_sgn * jnp.exp(log_edge_belief)
p_edge_flat = p_edge.reshape(-1)
# threshold metrics
fpr_, tpr_, _ = sklearn_metrics.roc_curve(g_flat, p_edge_flat)
roc_auc_ = sklearn_metrics.auc(fpr_, tpr_)
precision_, recall_, _ = sklearn_metrics.precision_recall_curve(g_flat, p_edge_flat)
prc_auc_ = sklearn_metrics.auc(recall_, precision_)
ave_prec_ = sklearn_metrics.average_precision_score(g_flat, p_edge_flat)
return {
'fpr': fpr_.tolist(),
'tpr': tpr_.tolist(),
'roc_auc': roc_auc_,
'precision': precision_.tolist(),
'recall': recall_.tolist(),
'prc_auc': prc_auc_,
'ave_prec': ave_prec_,
}
def neg_ave_log_marginal_likelihood(*, dist, eltwise_log_marginal_likelihood, x):
"""
Computes neg. ave log marginal likelihood for a marginal posterior over :math:`G`, defined as
:math:`\\text{neg. MLL}(p, G^*) := - \\sum_G p(G | D) p(D^{\\text{test}} | G)`
Args:
dist (:class:`dibs.metrics.ParticleDistribution`): particle distribution
eltwise_log_marginal_likelihood (callable):
function evaluting the marginal log likelihood :math:`p(D | G)` for a batch of graph samples given
a data set of held-out observations;
must satisfy the signature
``[:, d, d], [N, d] -> [:,]``
x (ndarray): held-out observations of shape ``[N, d]``
Returns:
neg. ave log marginal likelihood metric of shape ``[1,]``
"""
n_ho_observations, n_vars = x.shape
# select acyclic graphs
is_dag = elwise_acyclic_constr_nograd(dist.g, n_vars) == 0
if is_dag.sum() == 0:
# score as empty graph only
g = jnp.zeros((1, n_vars, n_vars), dtype=dist.g.dtype)
log_weights = jnp.array([0.0], dtype=dist.logp.dtype)
else:
g = dist.g[is_dag, :, :]
log_weights = dist.logp[is_dag] - logsumexp(dist.logp[is_dag])
log_likelihood = eltwise_log_marginal_likelihood(g, x)
# - sum_G p(G | D) log(p(x | G))
log_score, log_score_sgn = logsumexp(
log_weights, b=log_likelihood, axis=0, return_sign=True)
score = - log_score_sgn * jnp.exp(log_score)
return score
def neg_ave_log_likelihood(*, dist, eltwise_log_likelihood, x):
"""
Computes neg. ave log likelihood for a joint posterior over :math:`(G, \\Theta)`, defined as
:math:`\\text{neg. LL}(p, G^*) := - \\sum_G \\int_{\\Theta} p(G, \\Theta | D) p(D^{\\text{test}} | G, \\Theta)`
Args:
dist (:class:`dibs.metrics.ParticleDistribution`): particle distribution
eltwise_log_likelihood (callable):
function evaluting the log likelihood :math:`p(D | G, \\Theta)` for a batch of graph samples given
a data set of held-out observations;
must satisfy the signature
``[:, d, d], PyTree(leading dim :), [N, d] -> [:,]``
x (ndarray): held-out observations of shape ``[N, d]``
Returns:
neg. ave log likelihood metric of shape ``[1,]``
"""
assert dist.theta is not None
n_ho_observations, n_vars = x.shape
# select acyclic graphs
is_dag = elwise_acyclic_constr_nograd(dist.g, n_vars) == 0
if is_dag.sum() == 0:
# score as empty graph only
g = tree_mul(dist.g, 0.0)
theta = tree_mul(dist.theta, 0.0)
log_weights = tree_mul(dist.logp, 0.0)
else:
g = dist.g[is_dag, :, :]
theta = tree_select(dist.theta, is_dag)
log_weights = dist.logp[is_dag] - logsumexp(dist.logp[is_dag])
log_likelihood = eltwise_log_likelihood(g, theta, x)
# - sum_G p(G, theta | D) log(p(x | G, theta))
log_score, log_score_sgn = logsumexp(
log_weights, b=log_likelihood, axis=0, return_sign=True)
score = - log_score_sgn * jnp.exp(log_score)
return score
| 9,279 | 33.243542 | 116 | py |
dibs | dibs-master/dibs/__init__.py | 0 | 0 | 0 | py |
|
dibs | dibs-master/dibs/kernel.py | import jax.numpy as jnp
from dibs.utils.func import squared_norm_pytree
class AdditiveFrobeniusSEKernel:
"""
Squared exponential kernel defined as
:math:`k(Z, Z') = \\text{scale} \\cdot \\exp(- \\frac{1}{h} ||Z - Z'||^2_F )`
Args:
h (float): bandwidth parameter
scale (float): scale parameter
"""
def __init__(self, *, h=20.0, scale=1.0):
self.h = h
self.scale = scale
def eval(self, *, x, y):
"""Evaluates kernel function
Args:
x (ndarray): any shape ``[...]``
y (ndarray): any shape ``[...]``, but same as ``x``
Returns:
kernel value of shape ``[1,]``
"""
return self.scale * jnp.exp(- jnp.sum((x - y) ** 2.0) / self.h)
class JointAdditiveFrobeniusSEKernel:
"""
Squared exponential kernel defined as
:math:`k((Z, \\Theta), (Z', \\Theta')) = \\text{scale}_z \\cdot \\exp(- \\frac{1}{h_z} ||Z - Z'||^2_F ) + \\text{scale}_{\\theta} \\cdot \\exp(- \\frac{1}{h_{\\theta}} ||\\Theta - \\Theta'||^2_F )`
Args:
h_latent (float): bandwidth parameter for :math:`Z` term
h_theta (float): bandwidth parameter for :math:`\\Theta` term
scale_latent (float): scale parameter for :math:`Z` term
scale_theta (float): scale parameter for :math:`\\Theta` term
"""
def __init__(self, *, h_latent=5.0, h_theta=500.0, scale_latent=1.0, scale_theta=1.0):
self.h_latent = h_latent
self.h_theta = h_theta
self.scale_latent = scale_latent
self.scale_theta = scale_theta
def eval(self, *, x_latent, x_theta, y_latent, y_theta):
"""Evaluates kernel function k(x, y)
Args:
x_latent (ndarray): any shape ``[...]``
x_theta (Any): any PyTree of ``jnp.array`` tensors
y_latent (ndarray): any shape ``[...]``, but same as ``x_latent``
y_theta (Any): any PyTree of ``jnp.array`` tensors, but same as ``x_theta``
Returns:
kernel value of shape ``[1,]``
"""
# compute norm
latent_squared_norm = jnp.sum((x_latent - y_latent) ** 2.0)
theta_squared_norm = squared_norm_pytree(x_theta, y_theta)
# compute kernel
return (self.scale_latent * jnp.exp(- latent_squared_norm / self.h_latent)
+ self.scale_theta * jnp.exp(- theta_squared_norm / self.h_theta))
| 2,413 | 32.068493 | 201 | py |
dibs | dibs-master/dibs/models/nonlinearGaussian.py | import os
import numpy as onp
import jax.numpy as jnp
from jax import vmap
from jax import random
from jax.scipy.stats import norm as jax_normal
from jax.tree_util import tree_map, tree_reduce
from jax.nn.initializers import normal
import jax.example_libraries.stax as stax
from jax.example_libraries.stax import Dense, Sigmoid, LeakyRelu, Relu, Tanh
from dibs.graph_utils import graph_to_mat
from dibs.utils.tree import tree_shapes
def DenseNoBias(out_dim, W_init=normal()):
"""Layer constructor function for a dense (fully-connected) layer _without_ bias"""
def init_fun(rng, input_shape):
output_shape = input_shape[:-1] + (out_dim,)
W = W_init(rng, (input_shape[-1], out_dim))
return output_shape, (W, )
def apply_fun(params, inputs, **kwargs):
W, = params
return jnp.dot(inputs, W)
return init_fun, apply_fun
def makeDenseNet(*, hidden_layers, sig_weight, sig_bias, bias=True, activation='relu'):
"""
Generates functions defining a fully-connected NN
with Gaussian initialized parameters
Args:
hidden_layers (list): list of ints specifying the dimensions of the hidden sizes
sig_weight: std dev of weight initialization
sig_bias: std dev of weight initialization
bias: bias of linear layer
activation: activation function str; choices: `sigmoid`, `tanh`, `relu`, `leakyrelu`
Returns:
stax.serial neural net object
"""
# features: [hidden_layers[0], hidden_layers[0], ..., hidden_layers[-1], 1]
if activation == 'sigmoid':
f_activation = Sigmoid
elif activation == 'tanh':
f_activation = Tanh
elif activation == 'relu':
f_activation = Relu
elif activation == 'leakyrelu':
f_activation = LeakyRelu
else:
raise KeyError(f'Invalid activation function `{activation}`')
modules = []
if bias:
for dim in hidden_layers:
modules += [
Dense(dim, W_init=normal(stddev=sig_weight),
b_init=normal(stddev=sig_bias)),
f_activation
]
modules += [Dense(1, W_init=normal(stddev=sig_weight),
b_init=normal(stddev=sig_bias))]
else:
for dim in hidden_layers:
modules += [
DenseNoBias(dim, W_init=normal(stddev=sig_weight)),
f_activation
]
modules += [DenseNoBias(1, W_init=normal(stddev=sig_weight))]
return stax.serial(*modules)
class DenseNonlinearGaussian:
"""
Nonlinear Gaussian BN model corresponding to a nonlinaer structural equation model (SEM)
with additive Gaussian noise.
Each variable distributed as Gaussian with mean parameterized by a dense neural network (MLP)
whose weights and biases are sampled from a Gaussian prior.
The noise variance at each node is equal by default.
Refer to http://proceedings.mlr.press/v108/zheng20a/zheng20a.pdf
Args:
graph_dist: Graph model defining prior :math:`\\log p(G)`. Object *has to implement the method*:
``unnormalized_log_prob_soft``.
For example: :class:`~dibs.graph.ErdosReniDAGDistribution`
or :class:`~dibs.graph.ScaleFreeDAGDistribution`
hidden_layers (list): list of integers specifying the number of layers as well as their widths.
For example: ``[8, 8]`` would correspond to 2 hidden layers with 8 neurons
obs_noise (float, optional): variance of additive observation noise at nodes
sig_param (float, optional): std dev of Gaussian parameter prior
activation (str, optional): identifier for activation function.
Choices: ``sigmoid``, ``tanh``, ``relu``, ``leakyrelu``
"""
def __init__(self, *, graph_dist, hidden_layers, obs_noise=0.1, sig_param=1.0, activation='relu', bias=True):
self.graph_dist = graph_dist
self.n_vars = graph_dist.n_vars
self.obs_noise = obs_noise
self.sig_param = sig_param
self.hidden_layers = hidden_layers
self.activation = activation
self.bias = bias
self.no_interv_targets = jnp.zeros(self.n_vars).astype(bool)
# init single neural net function for one variable with jax stax
self.nn_init_random_params, nn_forward = makeDenseNet(
hidden_layers=self.hidden_layers,
sig_weight=self.sig_param,
sig_bias=self.sig_param,
activation=self.activation,
bias=self.bias)
# [?], [N, d] -> [N,]
self.nn_forward = lambda theta, x: nn_forward(theta, x).squeeze(-1)
# vectorize init and forward functions
self.eltwise_nn_init_random_params = vmap(self.nn_init_random_params, (0, None), 0)
self.double_eltwise_nn_init_random_params = vmap(self.eltwise_nn_init_random_params, (0, None), 0)
self.triple_eltwise_nn_init_random_params = vmap(self.double_eltwise_nn_init_random_params, (0, None), 0)
# [d2, ?], [N, d] -> [N, d2]
self.eltwise_nn_forward = vmap(self.nn_forward, (0, None), 1)
# [d2, ?], [d2, N, d] -> [N, d2]
self.double_eltwise_nn_forward = vmap(self.nn_forward, (0, 0), 1)
def get_theta_shape(self, *, n_vars):
"""Returns tree shape of the parameters of the neural networks
Args:
n_vars (int): number of variables in model
Returns:
PyTree of parameter shape
"""
dummy_subkeys = jnp.zeros((n_vars, 2), dtype=jnp.uint32)
_, theta = self.eltwise_nn_init_random_params(dummy_subkeys, (n_vars, )) # second arg is `input_shape` of NN forward pass
theta_shape = tree_shapes(theta)
return theta_shape
def sample_parameters(self, *, key, n_vars, n_particles=0, batch_size=0):
"""Samples batch of random parameters given dimensions of graph from :math:`p(\\Theta | G)`
Args:
key (ndarray): rng
n_vars (int): number of variables in BN
n_particles (int): number of parameter particles sampled
batch_size (int): number of batches of particles being sampled
Returns:
Parameter PyTree with leading dimension(s) ``batch_size`` and/or ``n_particles``,
dropping either dimension when equal to 0
"""
shape = [d for d in (batch_size, n_particles, n_vars) if d != 0]
subkeys = random.split(key, int(onp.prod(shape))).reshape(*shape, 2)
if len(shape) == 1:
_, theta = self.eltwise_nn_init_random_params(subkeys, (n_vars, ))
elif len(shape) == 2:
_, theta = self.double_eltwise_nn_init_random_params(subkeys, (n_vars, ))
elif len(shape) == 3:
_, theta = self.triple_eltwise_nn_init_random_params(subkeys, (n_vars, ))
else:
raise ValueError(f"invalid shape size for nn param initialization {shape}")
# to float64
prec64 = 'JAX_ENABLE_X64' in os.environ and os.environ['JAX_ENABLE_X64'] == 'True'
theta = tree_map(lambda arr: arr.astype(jnp.float64 if prec64 else jnp.float32), theta)
return theta
def sample_obs(self, *, key, n_samples, g, theta, toporder=None, interv=None):
"""Samples ``n_samples`` observations given graph ``g`` and parameters ``theta``
by doing single forward passes in topological order
Args:
key (ndarray): rng
n_samples (int): number of samples
g (igraph.Graph): graph
theta (Any): parameters
interv (dict): intervention specification of the form ``{intervened node : clamp value}``
Returns:
observation matrix of shape ``[n_samples, n_vars]``
"""
if interv is None:
interv = {}
if toporder is None:
toporder = g.topological_sorting()
n_vars = len(g.vs)
x = jnp.zeros((n_samples, n_vars))
key, subk = random.split(key)
z = jnp.sqrt(self.obs_noise) * random.normal(subk, shape=(n_samples, n_vars))
g_mat = graph_to_mat(g)
# ancestral sampling
# for simplicity, does d full forward passes for simplicity, which avoids indexing into python list of parameters
for j in toporder:
# intervention
if j in interv.keys():
x = x.at[:, j].set(interv[j])
continue
# regular ancestral sampling
parents = g_mat[:, j].reshape(1, -1)
has_parents = parents.sum() > 0
if has_parents:
# [N, d] = [N, d] * [1, d] mask non-parent entries of j
x_msk = x * parents
# [N, d] full forward pass
means = self.eltwise_nn_forward(theta, x_msk)
# [N,] update j only
x = x.at[:, j].set(means[:, j] + z[:, j])
else:
x = x.at[:, j].set(z[:, j])
return x
"""
The following functions need to be functionally pure and @jit-able
"""
def log_prob_parameters(self, *, theta, g):
"""Computes parameter prior :math:`\\log p(\\Theta | G)``
In this model, the prior over weights and biases is zero-centered Gaussian.
Arguments:
theta (Any): parameter pytree
g (ndarray): graph adjacency matrix of shape ``[n_vars, n_vars]``
Returns:
log prob
"""
# compute log prob for each weight
logprobs = tree_map(lambda leaf_theta: jax_normal.logpdf(x=leaf_theta, loc=0.0, scale=self.sig_param), theta)
# mask logprobs of first layer weight matrix [0][0] according to graph
# [d, d, dim_first_layer] = [d, d, dim_first_layer] * [d, d, 1]
if self.bias:
first_weight_logprobs, first_bias_logprobs = logprobs[0]
logprobs[0] = (first_weight_logprobs * g.T[:, :, None], first_bias_logprobs)
else:
first_weight_logprobs, = logprobs[0]
logprobs[0] = (first_weight_logprobs * g.T[:, :, None],)
# sum logprobs of every parameter tensor and add all up
return tree_reduce(jnp.add, tree_map(jnp.sum, logprobs))
def log_likelihood(self, *, x, theta, g, interv_targets):
"""Computes likelihood :math:`p(D | G, \\Theta)`.
In this model, the noise per observation and node is additive and Gaussian.
Arguments:
x (ndarray): observations of shape ``[n_observations, n_vars]``
theta (Any): parameters PyTree
g (ndarray): graph adjacency matrix of shape ``[n_vars, n_vars]``
interv_targets (ndarray): binary intervention indicator vector of shape ``[n_vars, ]``
Returns:
log prob
"""
assert x.shape == interv_targets.shape
# [d2, N, d] = [1, N, d] * [d2, 1, d] mask non-parent entries of each j
all_x_msk = x[None] * g.T[:, None]
# [N, d2] NN forward passes for parameters of each param j
all_means = self.double_eltwise_nn_forward(theta, all_x_msk)
# sum scores for all nodes and data
return jnp.sum(
jnp.where(
# [n_observations, n_vars]
interv_targets,
0.0,
# [n_observations, n_vars]
jax_normal.logpdf(x=x, loc=all_means, scale=jnp.sqrt(self.obs_noise))
)
)
"""
Distributions used by DiBS for inference: prior and joint likelihood
"""
def log_graph_prior(self, g_prob):
""" Computes graph prior :math:`\\log p(G)` given matrix of edge probabilities.
This function simply binds the function of the provided ``self.graph_dist``.
Arguments:
g_prob (ndarray): edge probabilities in G of shape ``[n_vars, n_vars]``
Returns:
log prob
"""
return self.graph_dist.unnormalized_log_prob_soft(soft_g=g_prob)
def interventional_log_joint_prob(self, g, theta, x, interv_targets, rng):
"""Computes interventional joint likelihood :math:`\\log p(\\Theta, D | G)``
Arguments:
g (ndarray): graph adjacency matrix of shape ``[n_vars, n_vars]``
theta (Any): parameter PyTree
x (ndarray): observational data of shape ``[n_observations, n_vars]``
interv_targets (ndarray): indicator mask of interventions of shape ``[n_observations, n_vars]``
rng (ndarray): rng; skeleton for minibatching (TBD)
Returns:
log prob of shape ``[1,]``
"""
log_prob_theta = self.log_prob_parameters(g=g, theta=theta)
log_likelihood = self.log_likelihood(g=g, theta=theta, x=x, interv_targets=interv_targets)
return log_prob_theta + log_likelihood
| 12,829 | 36.51462 | 129 | py |
dibs | dibs-master/dibs/models/graph.py | import igraph as ig
import random as pyrandom
import jax.numpy as jnp
from jax import random
from dibs.graph_utils import mat_to_graph, graph_to_mat, mat_is_dag
from dibs.utils.func import zero_diagonal
class ErdosReniDAGDistribution:
"""
Randomly oriented Erdos-Reni random graph model with i.i.d. edge probability.
The pmf is defined as
:math:`p(G) \\propto p^e (1-p)^{\\binom{d}{2} - e}`
where :math:`e` denotes the total number of edges in :math:`G`
and :math:`p` is chosen to satisfy the requirement of sampling ``n_edges_per_node``
edges per node in expectation.
Args:
n_vars (int): number of variables in DAG
n_edges_per_node (int): number of edges sampled per variable in expectation
"""
def __init__(self, n_vars, n_edges_per_node=2):
self.n_vars = n_vars
self.n_edges = n_edges_per_node * n_vars
self.p = self.n_edges / ((self.n_vars * (self.n_vars - 1)) / 2)
def sample_G(self, key, return_mat=False):
"""Samples DAG
Args:
key (ndarray): rng
return_mat (bool): if ``True``, returns adjacency matrix of shape ``[n_vars, n_vars]``
Returns:
``iGraph.graph`` / ``jnp.array``:
DAG
"""
key, subk = random.split(key)
mat = random.bernoulli(subk, p=self.p, shape=(self.n_vars, self.n_vars)).astype(jnp.int32)
# make DAG by zeroing above diagonal; k=-1 indicates that diagonal is zero too
dag = jnp.tril(mat, k=-1)
# randomly permute
key, subk = random.split(key)
P = random.permutation(subk, jnp.eye(self.n_vars, dtype=jnp.int32))
dag_perm = P.T @ dag @ P
if return_mat:
return dag_perm
else:
g = mat_to_graph(dag_perm)
return g
def unnormalized_log_prob_single(self, *, g, j):
"""
Computes :math:`\\log p(G_j)` up the normalization constant
Args:
g (iGraph.graph): graph
j (int): node index:
Returns:
unnormalized log probability of node family of :math:`j`
"""
parent_edges = g.incident(j, mode='in')
n_parents = len(parent_edges)
return n_parents * jnp.log(self.p) + (self.n_vars - n_parents - 1) * jnp.log(1 - self.p)
def unnormalized_log_prob(self, *, g):
"""
Computes :math:`\\log p(G)` up the normalization constant
Args:
g (iGraph.graph): graph
Returns:
unnormalized log probability of :math:`G`
"""
N = self.n_vars * (self.n_vars - 1) / 2.0
E = len(g.es)
return E * jnp.log(self.p) + (N - E) * jnp.log(1 - self.p)
def unnormalized_log_prob_soft(self, *, soft_g):
"""
Computes :math:`\\log p(G)` up the normalization constant
where :math:`G` is the matrix of edge probabilities
Args:
soft_g (ndarray): graph adjacency matrix, where entries
may be probabilities and not necessarily 0 or 1
Returns:
unnormalized log probability corresponding to edge probabilities in :math:`G`
"""
N = self.n_vars * (self.n_vars - 1) / 2.0
E = soft_g.sum()
return E * jnp.log(self.p) + (N - E) * jnp.log(1 - self.p)
class ScaleFreeDAGDistribution:
"""
Randomly-oriented scale-free random graph with power-law degree distribution.
The pmf is defined as
:math:`p(G) \\propto \\prod_j (1 + \\text{deg}(j))^{-3}`
where :math:`\\text{deg}(j)` denotes the in-degree of node :math:`j`
Args:
n_vars (int): number of variables in DAG
n_edges_per_node (int): number of edges sampled per variable
"""
def __init__(self, n_vars, verbose=False, n_edges_per_node=2):
self.n_vars = n_vars
self.n_edges_per_node = n_edges_per_node
self.verbose = verbose
def sample_G(self, key, return_mat=False):
"""Samples DAG
Args:
key (ndarray): rng
return_mat (bool): if ``True``, returns adjacency matrix of shape ``[n_vars, n_vars]``
Returns:
``iGraph.graph`` / ``jnp.array``:
DAG
"""
pyrandom.seed(int(key.sum()))
perm = random.permutation(key, self.n_vars).tolist()
g = ig.Graph.Barabasi(n=self.n_vars, m=self.n_edges_per_node, directed=True).permute_vertices(perm)
if return_mat:
return graph_to_mat(g)
else:
return g
def unnormalized_log_prob_single(self, *, g, j):
"""
Computes :math:`\\log p(G_j)` up the normalization constant
Args:
g (iGraph.graph): graph
j (int): node index:
Returns:
unnormalized log probability of node family of :math:`j`
"""
parent_edges = g.incident(j, mode='in')
n_parents = len(parent_edges)
return -3 * jnp.log(1 + n_parents)
def unnormalized_log_prob(self, *, g):
"""
Computes :math:`\\log p(G)` up the normalization constant
Args:
g (iGraph.graph): graph
Returns:
unnormalized log probability of :math:`G`
"""
return jnp.array([self.unnormalized_log_prob_single(g=g, j=j) for j in range(self.n_vars)]).sum()
def unnormalized_log_prob_soft(self, *, soft_g):
"""
Computes :math:`\\log p(G)` up the normalization constant
where :math:`G` is the matrix of edge probabilities
Args:
soft_g (ndarray): graph adjacency matrix, where entries
may be probabilities and not necessarily 0 or 1
Returns:
unnormalized log probability corresponding to edge probabilities in :math:`G`
"""
soft_indegree = soft_g.sum(0)
return jnp.sum(-3 * jnp.log(1 + soft_indegree))
class UniformDAGDistributionRejection:
"""
Naive implementation of a uniform distribution over DAGs via rejection
sampling. This is efficient up to roughly :math:`d = 5`.
Properly sampling a uniformly-random DAG is possible but nontrivial
and not implemented here.
Args:
n_vars (int): number of variables in DAG
"""
def __init__(self, n_vars):
self.n_vars = n_vars
def sample_G(self, key, return_mat=False):
"""Samples DAG
Args:
key (ndarray): rng
return_mat (bool): if ``True``, returns adjacency matrix of shape ``[n_vars, n_vars]``
Returns:
``iGraph.graph`` / ``jnp.array``:
DAG
"""
while True:
key, subk = random.split(key)
mat = random.bernoulli(subk, p=0.5, shape=(self.n_vars, self.n_vars)).astype(jnp.int32)
mat = zero_diagonal(mat)
if mat_is_dag(mat):
if return_mat:
return mat
else:
return mat_to_graph(mat)
def unnormalized_log_prob_single(self, *, g, j):
"""
Computes :math:`\\log p(G_j)` up the normalization constant
Args:
g (iGraph.graph): graph
j (int): node index:
Returns:
unnormalized log probability of node family of :math:`j`
"""
return jnp.array(0.0)
def unnormalized_log_prob(self, *, g):
"""
Computes :math:`\\log p(G)` up the normalization constant
Args:
g (iGraph.graph): graph
Returns:
unnormalized log probability of :math:`G`
"""
return jnp.array(0.0)
def unnormalized_log_prob_soft(self, *, soft_g):
"""
Computes :math:`\\log p(G)` up the normalization constant
where :math:`G` is the matrix of edge probabilities
Args:
soft_g (ndarray): graph adjacency matrix, where entries
may be probabilities and not necessarily 0 or 1
Returns:
unnormalized log probability corresponding to edge probabilities in :math:`G`
"""
return jnp.array(0.0)
| 8,119 | 28.314079 | 107 | py |
dibs | dibs-master/dibs/models/linearGaussian.py | import jax.numpy as jnp
from jax import random, vmap
from jax.scipy.stats import norm as jax_normal
from jax.scipy.special import gammaln
from dibs.utils.func import _slogdet_jax
class BGe:
"""
Linear Gaussian BN model corresponding to linear structural equation model (SEM) with additive Gaussian noise.
Uses Normal-Wishart conjugate parameter prior to allow for closed-form marginal likelihood
:math:`\\log p(D | G)` and thus allows inference of the marginal posterior :math:`p(G | D)`
For details on the closed-form expression, refer to
- Geiger et al. (2002): https://projecteuclid.org/download/pdf_1/euclid.aos/1035844981
- Kuipers et al. (2014): https://projecteuclid.org/download/suppdf_1/euclid.aos/1407420013
The default arguments imply commonly-used default hyperparameters for mean and precision
of the Normal-Wishart and assume a diagonal parameter matrix :math:`T`.
Inspiration for the implementation was drawn from
https://bitbucket.org/jamescussens/pygobnilp/src/master/pygobnilp/scoring.py
This implementation uses properties of the determinant to make the computation of the marginal likelihood
``jax.jit``-compilable and ``jax.grad``-differentiable by remaining well-defined for soft relaxations of the graph.
Args:
graph_dist: Graph model defining prior :math:`\\log p(G)`. Object *has to implement the method*:
``unnormalized_log_prob_soft``.
For example: :class:`~dibs.graph.ErdosReniDAGDistribution`
or :class:`~dibs.graph.ScaleFreeDAGDistribution`
mean_obs (ndarray, optional): mean parameter of Normal
alpha_mu (float, optional): precision parameter of Normal
alpha_lambd (float, optional): degrees of freedom parameter of Wishart
"""
def __init__(self, *,
graph_dist,
mean_obs=None,
alpha_mu=None,
alpha_lambd=None,
):
self.graph_dist = graph_dist
self.n_vars = graph_dist.n_vars
self.mean_obs = mean_obs or jnp.zeros(self.n_vars)
self.alpha_mu = alpha_mu or 1.0
self.alpha_lambd = alpha_lambd or (self.n_vars + 2)
assert self.alpha_lambd > self.n_vars + 1
self.no_interv_targets = jnp.zeros(self.n_vars).astype(bool)
def get_theta_shape(self, *, n_vars):
raise NotImplementedError("Not available for BGe score; use `LinearGaussian` model instead.")
def sample_parameters(self, *, key, n_vars, n_particles=0, batch_size=0):
raise NotImplementedError("Not available for BGe score; use `LinearGaussian` model instead.")
def sample_obs(self, *, key, n_samples, g, theta, toporder=None, interv=None):
raise NotImplementedError("Not available for BGe score; use `LinearGaussian` model instead.")
"""
The following functions need to be functionally pure and jax.jit-compilable
"""
def _log_marginal_likelihood_single(self, j, n_parents, g, x, interv_targets):
"""
Computes node-specific score of BGe marginal likelihood. ``jax.jit``-compilable
Args:
j (int): node index for score
n_parents (int): number of parents of node ``j``
g (ndarray): adjacency matrix of shape ``[d, d]
x (ndarray): observations matrix of shape ``[N, d]``
interv_targets (ndarray): intervention indicator matrix of shape ``[N, d]``
Returns:
BGe score for node ``j``
"""
d = x.shape[-1]
small_t = (self.alpha_mu * (self.alpha_lambd - d - 1)) / (self.alpha_mu + 1)
T = small_t * jnp.eye(d)
# mask rows of `x` where j is intervened upon to 0.0 and compute (remaining) number of observations `N`
x = x * (1 - interv_targets[..., j, None])
N = (1 - interv_targets[..., j]).sum()
# covariance matrix of non-intervened rows
x_bar = jnp.where(jnp.isclose(N, 0), jnp.zeros((1, d)), x.sum(axis=0, keepdims=True) / N)
x_center = (x - x_bar) * (1 - interv_targets[..., j, None])
s_N = x_center.T @ x_center # [d, d]
# Kuipers et al. (2014) state `R` wrongly in the paper, using `alpha_lambd` rather than `alpha_mu`
# their supplementary contains the correct term
R = T + s_N + ((N * self.alpha_mu) / (N + self.alpha_mu)) * \
((x_bar - self.mean_obs).T @ (x_bar - self.mean_obs)) # [d, d]
parents = g[:, j]
parents_and_j = (g + jnp.eye(d))[:, j]
log_gamma_term = (
0.5 * (jnp.log(self.alpha_mu) - jnp.log(N + self.alpha_mu))
+ gammaln(0.5 * (N + self.alpha_lambd - d + n_parents + 1))
- gammaln(0.5 * (self.alpha_lambd - d + n_parents + 1))
- 0.5 * N * jnp.log(jnp.pi)
# log det(T_JJ)^(..) / det(T_II)^(..) for default T
+ 0.5 * (self.alpha_lambd - d + 2 * n_parents + 1) *
jnp.log(small_t)
)
log_term_r = (
# log det(R_II)^(..) / det(R_JJ)^(..)
0.5 * (N + self.alpha_lambd - d + n_parents) *
_slogdet_jax(R, parents)
- 0.5 * (N + self.alpha_lambd - d + n_parents + 1) *
_slogdet_jax(R, parents_and_j)
)
# return neutral sum element (0) if no observations (N=0)
return jnp.where(jnp.isclose(N, 0), 0.0, log_gamma_term + log_term_r)
def log_marginal_likelihood(self, *, g, x, interv_targets):
"""Computes BGe marginal likelihood :math:`\\log p(D | G)`` in closed-form;
``jax.jit``-compatible
Args:
g (ndarray): adjacency matrix of shape ``[d, d]``
x (ndarray): observations of shape ``[N, d]``
interv_targets (ndarray): boolean mask of shape ``[N, d]`` of whether or not
a node was intervened upon in a given sample. Intervened nodes are ignored in likelihood computation
Returns:
BGe Score
"""
# indices
_, d = x.shape
nodes_idx = jnp.arange(d)
# number of parents for each node
n_parents_all = g.sum(axis=0)
# sum scores for all nodes [d,]
scores = vmap(self._log_marginal_likelihood_single,
(0, 0, None, None, None), 0)(nodes_idx, n_parents_all, g, x, interv_targets)
return scores.sum(0)
"""
Distributions used by DiBS for inference: prior and marginal likelihood
"""
def log_graph_prior(self, g_prob):
""" Computes graph prior :math:`\\log p(G)` given matrix of edge probabilities.
This function simply binds the function of the provided ``self.graph_dist``.
Arguments:
g_prob (ndarray): edge probabilities in G of shape ``[n_vars, n_vars]``
Returns:
log prob
"""
return self.graph_dist.unnormalized_log_prob_soft(soft_g=g_prob)
def interventional_log_marginal_prob(self, g, _, x, interv_targets, rng):
"""Computes interventional marginal likelihood :math:`\\log p(D | G)`` in closed-form;
``jax.jit``-compatible
To unify the function signatures for the marginal and joint inference classes
:class:`~dibs.inference.MarginalDiBS` and :class:`~dibs.inference.JointDiBS`,
this marginal likelihood is defined with dummy ``theta`` inputs as ``_``,
i.e., like a joint likelihood
Arguments:
g (ndarray): graph adjacency matrix of shape ``[n_vars, n_vars]``.
Entries must be binary and of type ``jnp.int32``
_:
x (ndarray): observational data of shape ``[n_observations, n_vars]``
interv_targets (ndarray): indicator mask of interventions of shape ``[n_observations, n_vars]``
rng (ndarray): rng; skeleton for minibatching (TBD)
Returns:
BGe score of shape ``[1,]``
"""
return self.log_marginal_likelihood(g=g, x=x, interv_targets=interv_targets)
class LinearGaussian:
"""
Linear Gaussian BN model corresponding to linear structural equation model (SEM) with additive Gaussian noise.
Each variable distributed as Gaussian with mean being the linear combination of its parents
weighted by a Gaussian parameter vector (i.e., with Gaussian-valued edges).
The noise variance at each node is equal by default, which implies the causal structure is identifiable.
Args:
graph_dist: Graph model defining prior :math:`\\log p(G)`. Object *has to implement the method*:
``unnormalized_log_prob_soft``.
For example: :class:`~dibs.graph.ErdosReniDAGDistribution`
or :class:`~dibs.graph.ScaleFreeDAGDistribution`
obs_noise (float, optional): variance of additive observation noise at nodes
mean_edge (float, optional): mean of Gaussian edge weight
sig_edge (float, optional): std dev of Gaussian edge weight
min_edge (float, optional): minimum linear effect of parent on child
"""
def __init__(self, *, graph_dist, obs_noise=0.1, mean_edge=0.0, sig_edge=1.0, min_edge=0.5):
self.graph_dist = graph_dist
self.n_vars = graph_dist.n_vars
self.obs_noise = obs_noise
self.mean_edge = mean_edge
self.sig_edge = sig_edge
self.min_edge = min_edge
self.no_interv_targets = jnp.zeros(self.n_vars).astype(bool)
def get_theta_shape(self, *, n_vars):
"""Returns tree shape of the parameters of the linear model
Args:
n_vars (int): number of variables in model
Returns:
PyTree of parameter shape
"""
return jnp.array((n_vars, n_vars))
def sample_parameters(self, *, key, n_vars, n_particles=0, batch_size=0):
"""Samples batch of random parameters given dimensions of graph from :math:`p(\\Theta | G)`
Args:
key (ndarray): rng
n_vars (int): number of variables in BN
n_particles (int): number of parameter particles sampled
batch_size (int): number of batches of particles being sampled
Returns:
Parameters ``theta`` of shape ``[batch_size, n_particles, n_vars, n_vars]``, dropping dimensions equal to 0
"""
shape = (batch_size, n_particles, *self.get_theta_shape(n_vars=n_vars))
theta = self.mean_edge + self.sig_edge * random.normal(key, shape=tuple(d for d in shape if d != 0))
theta += jnp.sign(theta) * self.min_edge
return theta
def sample_obs(self, *, key, n_samples, g, theta, toporder=None, interv=None):
"""Samples ``n_samples`` observations given graph ``g`` and parameters ``theta``
Args:
key (ndarray): rng
n_samples (int): number of samples
g (igraph.Graph): graph
theta (Any): parameters
interv (dict): intervention specification of the form ``{intervened node : clamp value}``
Returns:
observation matrix of shape ``[n_samples, n_vars]``
"""
if interv is None:
interv = {}
if toporder is None:
toporder = g.topological_sorting()
x = jnp.zeros((n_samples, len(g.vs)))
key, subk = random.split(key)
z = jnp.sqrt(self.obs_noise) * random.normal(subk, shape=(n_samples, len(g.vs)))
# ancestral sampling
for j in toporder:
# intervention
if j in interv.keys():
x = x.at[:, j].set(interv[j])
continue
# regular ancestral sampling
parent_edges = g.incident(j, mode='in')
parents = list(g.es[e].source for e in parent_edges)
if parents:
mean = x[:, jnp.array(parents)] @ theta[jnp.array(parents), j]
x = x.at[:, j].set(mean + z[:, j])
else:
x = x.at[:, j].set(z[:, j])
return x
"""
The following functions need to be functionally pure and @jit-able
"""
def log_prob_parameters(self, *, theta, g):
"""Computes parameter prior :math:`\\log p(\\Theta | G)``
In this model, the parameter prior is Gaussian.
Arguments:
theta (ndarray): parameter matrix of shape ``[n_vars, n_vars]``
g (ndarray): graph adjacency matrix of shape ``[n_vars, n_vars]``
Returns:
log prob
"""
return jnp.sum(g * jax_normal.logpdf(x=theta, loc=self.mean_edge, scale=self.sig_edge))
def log_likelihood(self, *, x, theta, g, interv_targets):
"""Computes likelihood :math:`p(D | G, \\Theta)`.
In this model, the noise per observation and node is additive and Gaussian.
Arguments:
x (ndarray): observations of shape ``[n_observations, n_vars]``
theta (ndarray): parameters of shape ``[n_vars, n_vars]``
g (ndarray): graph adjacency matrix of shape ``[n_vars, n_vars]``
interv_targets (ndarray): binary intervention indicator vector of shape ``[n_observations, n_vars]``
Returns:
log prob
"""
assert x.shape == interv_targets.shape
# sum scores for all nodes and data
return jnp.sum(
jnp.where(
# [n_observations, n_vars]
interv_targets,
0.0,
# [n_observations, n_vars]
jax_normal.logpdf(x=x, loc=x @ (g * theta), scale=jnp.sqrt(self.obs_noise))
)
)
"""
Distributions used by DiBS for inference: prior and joint likelihood
"""
def log_graph_prior(self, g_prob):
""" Computes graph prior :math:`\\log p(G)` given matrix of edge probabilities.
This function simply binds the function of the provided ``self.graph_dist``.
Arguments:
g_prob (ndarray): edge probabilities in G of shape ``[n_vars, n_vars]``
Returns:
log prob
"""
return self.graph_dist.unnormalized_log_prob_soft(soft_g=g_prob)
def interventional_log_joint_prob(self, g, theta, x, interv_targets, rng):
"""Computes interventional joint likelihood :math:`\\log p(\\Theta, D | G)``
Arguments:
g (ndarray): graph adjacency matrix of shape ``[n_vars, n_vars]``
theta (ndarray): parameter matrix of shape ``[n_vars, n_vars]``
x (ndarray): observational data of shape ``[n_observations, n_vars]``
interv_targets (ndarray): indicator mask of interventions of shape ``[n_observations, n_vars]``
rng (ndarray): rng; skeleton for minibatching (TBD)
Returns:
log prob of shape ``[1,]``
"""
log_prob_theta = self.log_prob_parameters(g=g, theta=theta)
log_likelihood = self.log_likelihood(g=g, theta=theta, x=x, interv_targets=interv_targets)
return log_prob_theta + log_likelihood
| 14,980 | 39.163539 | 119 | py |
dibs | dibs-master/dibs/models/__init__.py | from .graph import ErdosReniDAGDistribution, ScaleFreeDAGDistribution, UniformDAGDistributionRejection
from .linearGaussian import BGe, LinearGaussian
from .nonlinearGaussian import DenseNonlinearGaussian | 204 | 67.333333 | 102 | py |
dibs | dibs-master/dibs/utils/tree.py | import jax.numpy as jnp
from jax.tree_util import tree_flatten, tree_unflatten, tree_map, tree_reduce
from jax import random
def tree_index(pytree, idx):
"""
Indexes pytree leaves and returns resulting pytree
"""
return tree_map(lambda leaf: leaf[idx], pytree)
def tree_select(pytree, bool_mask):
"""
Indexes pytree leading dimension with boolean mask
"""
return tree_map(lambda leaf: leaf[bool_mask, ...], pytree)
def tree_zip_leading(pytree_list):
"""
Converts n pytrees without leading dimension into one pytree with leading dim [n, ...]
"""
return tree_map(lambda *args: jnp.stack([*args]) if len(args) > 1 else tree_expand_leading_by(*args, 1), *pytree_list)
def tree_unzip_leading(pytree, n):
"""
Converts pytree with leading dim [n, ...] into n pytrees without the leading dimension
"""
leaves, treedef = tree_flatten(pytree)
return [
tree_unflatten(treedef, [leaf[i] for leaf in leaves])
for i in range(n)
]
def tree_expand_leading_by(pytree, n):
"""
Converts pytree with leading pytrees with additional `n` leading dimensions
"""
return tree_map(lambda leaf: jnp.expand_dims(leaf, axis=tuple(range(n))), pytree)
def tree_shapes(pytree):
"""
Returns pytree with same tree but leaves replaced by original shapes
"""
return tree_map(lambda leaf: jnp.array(leaf.shape), pytree)
def tree_key_split(key, pytree):
"""
Generates one subkey from `key` for each leaf of `pytree` and returns it in tree of shape `pytree`
"""
tree_flat, treedef = tree_flatten(pytree)
subkeys_flat = random.split(key, len(tree_flat))
subkeys_tree = tree_unflatten(treedef, subkeys_flat)
return subkeys_tree
def tree_mul(pytree, c):
"""
Multiplies every leaf of pytree with `c`
"""
return tree_map(lambda leaf: leaf * c, pytree)
| 1,897 | 26.911765 | 122 | py |
dibs | dibs-master/dibs/utils/__init__.py | from .func import *
from .tree import *
from .visualize import * | 64 | 20.666667 | 24 | py |
dibs | dibs-master/dibs/utils/func.py | import functools
import jax.numpy as jnp
from jax import jit
from jax.tree_util import tree_map, tree_reduce
def expand_by(arr, n):
"""
Expands jnp.array by n dimensions at the end
Args:
arr: shape [...]
n (int)
Returns:
arr of shape [..., 1, ..., 1] with `n` ones
"""
return jnp.expand_dims(arr, axis=tuple(arr.ndim + j for j in range(n)))
@jit
def sel(mat, mask):
"""
jit/vmap helper function
Args:
mat: [N, d]
mask: [d, ] boolean
Returns:
[N, d] with columns of `mat` with `mask` == 1 non-zero a
and the columns with `mask` == 0 are zero
Example:
mat
1 2 3
4 5 6
7 8 9
mask
1 0 1
out
1 0 3
4 0 6
7 0 9
"""
return jnp.where(mask, mat, 0)
@jit
def leftsel(mat, mask, maskval=0.0):
"""
jit/vmap helper function
Args:
mat: [N, d]
mask: [d, ] boolean
Returns:
[N, d] [N, d] with columns of `mat` with `mask` == 1 non-zero a
and pushed leftmost; the columns with `mask` == 0 are zero
Example:
mat
1 2 3
4 5 6
7 8 9
mask
1 0 1
out
1 3 0
4 6 0
7 9 0
"""
valid_indices = jnp.where(mask, jnp.arange(mask.shape[0]), mask.shape[0])
padded_mat = jnp.concatenate([mat, maskval * jnp.ones((mat.shape[0], 1))], axis=1)
padded_valid_mat = padded_mat[:, jnp.sort(valid_indices)]
return padded_valid_mat
@functools.partial(jit, static_argnums=(1,))
def mask_topk(x, topkk):
"""
Returns indices of `topk` entries of `x` in decreasing order
Args:
x: [N, ]
topk (int)
Returns:
array of shape [topk, ]
"""
mask = x.argsort()[-topkk:][::-1]
return mask
def squared_norm_pytree(x, y):
"""Computes squared euclidean norm between two pytrees
Args:
x: PyTree
y: PyTree
Returns:
shape []
"""
diff = tree_map(jnp.subtract, x, y)
squared_norm_ind = tree_map(lambda leaf: jnp.square(leaf).sum(), diff)
squared_norm = tree_reduce(jnp.add, squared_norm_ind)
return squared_norm
def zero_diagonal(g):
"""
Returns the argument matrix with its diagonal set to zero.
Args:
g (ndarray): matrix of shape ``[..., d, d]``
"""
d = g.shape[-1]
return g.at[..., jnp.arange(d), jnp.arange(d)].set(0)
def _slogdet_jax(m, parents):
"""
Log determinant of a submatrix. Made ``jax.jit``-compilable and ``jax.grad``-differentiable
by masking everything but the submatrix and adding a diagonal of ones everywhere else
to obtain the valid determinant
Args:
m (ndarray): matrix of shape ``[d, d]``
parents (ndarray): boolean indicator of parents of shape ``[d, ]``
Returns:
natural log of determinant of submatrix ``m`` indexed by ``parents`` on both dimensions
"""
n_vars = parents.shape[0]
mask = jnp.einsum('...i,...j->...ij', parents, parents)
submat = mask * m + (1 - mask) * jnp.eye(n_vars)
return jnp.linalg.slogdet(submat)[1]
| 3,188 | 20.842466 | 95 | py |
dibs | dibs-master/dibs/utils/visualize.py | import os
import matplotlib.pyplot as plt
import imageio
def visualize_ground_truth(mat, size=4.0):
"""
`mat`: (d, d)
"""
plt.rcParams['figure.figsize'] = [size, size]
fig, ax = plt.subplots(1, 1)
ax.matshow(mat, vmin=0, vmax=1)
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
ax.tick_params(axis='both', which='both', length=0)
ax.set_title(r'Ground truth $G^*$', pad=10)
plt.show()
return
def visualize(mats, t, save_path=None, n_cols=7, size=2.5, show=False):
"""
Based on visualization by https://github.com/JannerM/gamma-models/blob/main/gamma/visualization/pendulum.py
`mats` should have shape (N, d, d) and take values in [0,1]
"""
N = mats.shape[0]
n_rows = N // n_cols
if N % n_cols:
n_rows += 1
plt.rcParams['figure.figsize'] = [size * n_cols, size * n_rows]
fig, axes = plt.subplots(n_rows, n_cols)
axes = axes.flatten()
# for j, (ax, mat) in enumerate(zip(axes[:len(mats)], mats)):
for j, ax in enumerate(axes):
if j < len(mats):
# plot matrix of edge probabilities
ax.matshow(mats[j], vmin=0, vmax=1)
ax.tick_params(axis='both', which='both', length=0)
ax.set_title(r'$Z^{('f'{j}'r')}$', pad=3)
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
ax.axis('off')
# save
if save_path is not None:
if not os.path.exists(save_path):
os.makedirs(save_path)
plt.savefig(save_path + f'/img{t}.png')
img = imageio.imread(save_path + f'/img{t}.png')
else:
img = None
if show:
plt.show()
plt.close()
return img
| 1,795 | 27.967742 | 111 | py |
dibs | dibs-master/dibs/inference/dibs.py | import jax.numpy as jnp
from jax import vmap, random, grad
from jax.scipy.special import logsumexp
from jax.nn import sigmoid, log_sigmoid
import jax.lax as lax
from jax.tree_util import tree_map
from dibs.graph_utils import acyclic_constr_nograd
from dibs.utils.func import expand_by, zero_diagonal
class DiBS:
"""
This class implements the backbone for DiBS, i.e. all gradient estimators and sampling
components. Any inference method in the DiBS framework should inherit from this class.
Args:
x (ndarray): matrix of shape ``[n_observations, n_vars]`` of i.i.d. observations of the variables
interv_mask (ndarray): binary matrix of shape ``[n_observations, n_vars]`` indicating
whether a given variable was intervened upon in a given sample (intervention = 1, no intervention = 0)
log_graph_prior (callable):
function implementing prior :math:`\\log p(G)` of soft adjacency matrix of
edge probabilities.
For example: :func:`~dibs.graph.ErdosReniDAGDistribution.unnormalized_log_prob_soft`
or usually bound in e.g. :func:`~dibs.graph.LinearGaussian.log_graph_prior`
log_joint_prob (callable):
function implementing joint likelihood :math:`\\log p(\Theta, D | G)`
of parameters and observations given the discrete graph adjacency matrix
For example: :func:`dibs.models.LinearGaussian.observational_log_joint_prob`.
When inferring the marginal posterior :math:`p(G | D)` via a closed-form
marginal likelihood :math:`\\log p(D | G)`, the same function signature has to be
satisfied (simply ignoring :math:`\\Theta`)
alpha_linear (float): slope of of linear schedule for inverse temperature :math:`\\alpha`
of sigmoid in latent graph model :math:`p(G | Z)`
beta_linear (float): slope of of linear schedule for inverse temperature :math:`\\beta`
of constraint penalty in latent prio :math:`p(Z)`
tau (float): constant Gumbel-softmax temperature parameter
n_grad_mc_samples (int): number of Monte Carlo samples in gradient estimator
for likelihood term :math:`p(\Theta, D | G)`
n_acyclicity_mc_samples (int): number of Monte Carlo samples in gradient estimator
for acyclicity constraint
grad_estimator_z (str): gradient estimator :math:`\\nabla_Z` of expectation over :math:`p(G | Z)`;
choices: ``score`` or ``reparam``
score_function_baseline (float): scale of additive baseline in score function (REINFORCE) estimator;
``score_function_baseline == 0.0`` corresponds to not using a baseline
latent_prior_std (float): standard deviation of Gaussian prior over :math:`Z`; defaults to ``1/sqrt(k)``
"""
def __init__(self, *,
x,
interv_mask,
log_graph_prior,
log_joint_prob,
alpha_linear=0.05,
beta_linear=1.0,
tau=1.0,
n_grad_mc_samples=128,
n_acyclicity_mc_samples=32,
grad_estimator_z='reparam',
score_function_baseline=0.0,
latent_prior_std=None,
verbose=False):
self.x = x
self.interv_mask = interv_mask
self.n_vars = x.shape[-1]
self.log_graph_prior = log_graph_prior
self.log_joint_prob = log_joint_prob
self.alpha = lambda t: (alpha_linear * t)
self.beta = lambda t: (beta_linear * t)
self.tau = tau
self.n_grad_mc_samples = n_grad_mc_samples
self.n_acyclicity_mc_samples = n_acyclicity_mc_samples
self.grad_estimator_z = grad_estimator_z
self.score_function_baseline = score_function_baseline
self.latent_prior_std = latent_prior_std
self.verbose = verbose
"""
Backbone functionality
"""
def particle_to_g_lim(self, z):
"""
Returns :math:`G` corresponding to :math:`\\alpha = \\infty` for particles `z`
Args:
z (ndarray): latent variables ``[..., d, k, 2]``
Returns:
graph adjacency matrices of shape ``[..., d, d]``
"""
u, v = z[..., 0], z[..., 1]
scores = jnp.einsum('...ik,...jk->...ij', u, v)
g_samples = (scores > 0).astype(jnp.int32)
# mask diagonal since it is explicitly not modeled
return zero_diagonal(g_samples)
def sample_g(self, p, subk, n_samples):
"""
Sample Bernoulli matrix according to matrix of probabilities
Args:
p (ndarray): matrix of probabilities ``[d, d]``
n_samples (int): number of samples
subk (ndarray): rng key
Returns:
an array of matrices sampled according to ``p`` of shape ``[n_samples, d, d]``
"""
n_vars = p.shape[-1]
g_samples = random.bernoulli(
subk, p=p, shape=(n_samples, n_vars, n_vars)).astype(jnp.int32)
# mask diagonal since it is explicitly not modeled
return zero_diagonal(g_samples)
def particle_to_soft_graph(self, z, eps, t):
"""
Gumbel-softmax / concrete distribution using Logistic(0,1) samples ``eps``
Args:
z (ndarray): a single latent tensor :math:`Z` of shape ``[d, k, 2]```
eps (ndarray): random i.i.d. Logistic(0,1) noise of shape ``[d, d]``
t (int): step
Returns:
Gumbel-softmax sample of adjacency matrix [d, d]
"""
scores = jnp.einsum('...ik,...jk->...ij', z[..., 0], z[..., 1])
# soft reparameterization using gumbel-softmax/concrete distribution
# eps ~ Logistic(0,1)
soft_graph = sigmoid(self.tau * (eps + self.alpha(t) * scores))
# mask diagonal since it is explicitly not modeled
return zero_diagonal(soft_graph)
def particle_to_hard_graph(self, z, eps, t):
"""
Bernoulli sample of :math:`G` using probabilities implied by latent ``z``
Args:
z (ndarray): a single latent tensor :math:`Z` of shape ``[d, k, 2]``
eps (ndarray): random i.i.d. Logistic(0,1) noise of shape ``[d, d]``
t (int): step
Returns:
Gumbel-max (hard) sample of adjacency matrix ``[d, d]``
"""
scores = jnp.einsum('...ik,...jk->...ij', z[..., 0], z[..., 1])
# simply take hard limit of sigmoid in gumbel-softmax/concrete distribution
hard_graph = ((eps + self.alpha(t) * scores) > 0.0).astype(jnp.float32)
# mask diagonal since it is explicitly not modeled
return zero_diagonal(hard_graph)
"""
Generative graph model p(G | Z)
"""
def edge_probs(self, z, t):
"""
Edge probabilities encoded by latent representation
Args:
z (ndarray): latent tensors :math:`Z` ``[..., d, k, 2]``
t (int): step
Returns:
edge probabilities of shape ``[..., d, d]``
"""
u, v = z[..., 0], z[..., 1]
scores = jnp.einsum('...ik,...jk->...ij', u, v)
probs = sigmoid(self.alpha(t) * scores)
# mask diagonal since it is explicitly not modeled
return zero_diagonal(probs)
def edge_log_probs(self, z, t):
"""
Edge log probabilities encoded by latent representation
Args:
z (ndarray): latent tensors :math:`Z` ``[..., d, k, 2]``
t (int): step
Returns:
tuple of tensors ``[..., d, d], [..., d, d]`` corresponding to ``log(p)`` and ``log(1-p)``
"""
u, v = z[..., 0], z[..., 1]
scores = jnp.einsum('...ik,...jk->...ij', u, v)
log_probs, log_probs_neg = log_sigmoid(self.alpha(t) * scores), log_sigmoid(self.alpha(t) * -scores)
# mask diagonal since it is explicitly not modeled
# NOTE: this is not technically log(p), but the way `edge_log_probs_` is used, this is correct
return zero_diagonal(log_probs), zero_diagonal(log_probs_neg)
def latent_log_prob(self, single_g, single_z, t):
"""
Log likelihood of generative graph model
Args:
single_g (ndarray): single graph adjacency matrix ``[d, d]``
single_z (ndarray): single latent tensor ``[d, k, 2]``
t (int): step
Returns:
log likelihood :math:`log p(G | Z)` of shape ``[1,]``
"""
# [d, d], [d, d]
log_p, log_1_p = self.edge_log_probs(single_z, t)
# [d, d]
log_prob_g_ij = single_g * log_p + (1 - single_g) * log_1_p
# [1,] # diagonal is masked inside `edge_log_probs`
log_prob_g = jnp.sum(log_prob_g_ij)
return log_prob_g
def eltwise_grad_latent_log_prob(self, gs, single_z, t):
"""
Gradient of log likelihood of generative graph model w.r.t. :math:`Z`
i.e. :math:`\\nabla_Z \\log p(G | Z)`
Batched over samples of :math:`G` given a single :math:`Z`.
Args:
gs (ndarray): batch of graph matrices ``[n_graphs, d, d]``
single_z (ndarray): latent variable ``[d, k, 2]``
t (int): step
Returns:
batch of gradients of shape ``[n_graphs, d, k, 2]``
"""
dz_latent_log_prob = grad(self.latent_log_prob, 1)
return vmap(dz_latent_log_prob, (0, None, None), 0)(gs, single_z, t)
"""
Estimators for scores of log p(theta, D | Z)
"""
def eltwise_log_joint_prob(self, gs, single_theta, rng):
"""
Joint likelihood :math:`\\log p(\\Theta, D | G)` batched over samples of :math:`G`
Args:
gs (ndarray): batch of graphs ``[n_graphs, d, d]``
single_theta (Any): single parameter PyTree
rng (ndarray): for mini-batching ``x`` potentially
Returns:
batch of logprobs of shape ``[n_graphs, ]``
"""
return vmap(self.log_joint_prob, (0, None, None, None, None), 0)(gs, single_theta, self.x, self.interv_mask,
rng)
def log_joint_prob_soft(self, single_z, single_theta, eps, t, subk):
"""
This is the composition of :math:`\\log p(\\Theta, D | G) `
and :math:`G(Z, U)` (Gumbel-softmax graph sample given :math:`Z`)
Args:
single_z (ndarray): single latent tensor ``[d, k, 2]``
single_theta (Any): single parameter PyTree
eps (ndarray): i.i.d Logistic noise of shape ``[d, d]``
t (int): step
subk (ndarray): rng key
Returns:
logprob of shape ``[1, ]``
"""
soft_g_sample = self.particle_to_soft_graph(single_z, eps, t)
return self.log_joint_prob(soft_g_sample, single_theta, self.x, self.interv_mask, subk)
#
# Estimators for score d/dZ log p(theta, D | Z)
# (i.e. w.r.t the latent embeddings Z for graph G)
#
def eltwise_grad_z_likelihood(self, zs, thetas, baselines, t, subkeys):
"""
Computes batch of estimators for score :math:`\\nabla_Z \\log p(\\Theta, D | Z)`
Selects corresponding estimator used for the term :math:`\\nabla_Z E_{p(G|Z)}[ p(\\Theta, D | G) ]`
and executes it in batch.
Args:
zs (ndarray): batch of latent tensors :math:`Z` ``[n_particles, d, k, 2]``
thetas (Any): batch of parameters PyTree with ``n_particles`` as leading dim
baselines (ndarray): array of score function baseline values of shape ``[n_particles, ]``
Returns:
tuple batch of (gradient estimates, baselines) of shapes ``[n_particles, d, k, 2], [n_particles, ]``
"""
# select the chosen gradient estimator
if self.grad_estimator_z == 'score':
grad_z_likelihood = self.grad_z_likelihood_score_function
elif self.grad_estimator_z == 'reparam':
grad_z_likelihood = self.grad_z_likelihood_gumbel
else:
raise ValueError(f'Unknown gradient estimator `{self.grad_estimator_z}`')
# vmap
return vmap(grad_z_likelihood, (0, 0, 0, None, 0), (0, 0))(zs, thetas, baselines, t, subkeys)
def grad_z_likelihood_score_function(self, single_z, single_theta, single_sf_baseline, t, subk):
"""
Score function estimator (aka REINFORCE) for the score :math:`\\nabla_Z \\log p(\\Theta, D | Z)`
Uses the same :math:`G \\sim p(G | Z)` samples for expectations in numerator and denominator.
This does not use :math:`\\nabla_G \\log p(\\Theta, D | G)` and is hence applicable when
the gradient w.r.t. the adjacency matrix is not defined (as e.g. for the BGe score).
Args:
single_z (ndarray): single latent tensor ``[d, k, 2]``
single_theta (Any): single parameter PyTree
single_sf_baseline (ndarray): ``[1, ]``
t (int): step
subk (ndarray): rng key
Returns:
tuple of gradient, baseline ``[d, k, 2], [1, ]``
"""
# [d, d]
p = self.edge_probs(single_z, t)
n_vars, n_dim = single_z.shape[0:2]
# [n_grad_mc_samples, d, d]
subk, subk_ = random.split(subk)
g_samples = self.sample_g(p, subk_, self.n_grad_mc_samples)
# same MC samples for numerator and denominator
n_mc_numerator = self.n_grad_mc_samples
n_mc_denominator = self.n_grad_mc_samples
# [n_grad_mc_samples, ]
subk, subk_ = random.split(subk)
logprobs_numerator = self.eltwise_log_joint_prob(g_samples, single_theta, subk_)
logprobs_denominator = logprobs_numerator
# variance_reduction
logprobs_numerator_adjusted = lax.cond(
self.score_function_baseline <= 0.0,
lambda _: logprobs_numerator,
lambda _: logprobs_numerator - single_sf_baseline,
operand=None)
# [d * k * 2, n_grad_mc_samples]
grad_z = self.eltwise_grad_latent_log_prob(g_samples, single_z, t) \
.reshape(self.n_grad_mc_samples, n_vars * n_dim * 2) \
.transpose((1, 0))
# stable computation of exp/log/divide
# [d * k * 2, ] [d * k * 2, ]
log_numerator, sign = logsumexp(a=logprobs_numerator_adjusted, b=grad_z, axis=1, return_sign=True)
# []
log_denominator = logsumexp(logprobs_denominator, axis=0)
# [d * k * 2, ]
stable_sf_grad = sign * jnp.exp(log_numerator - jnp.log(n_mc_numerator) - log_denominator + jnp.log(n_mc_denominator))
# [d, k, 2]
stable_sf_grad_shaped = stable_sf_grad.reshape(n_vars, n_dim, 2)
# update baseline
single_sf_baseline = (self.score_function_baseline * logprobs_numerator.mean(0) +
(1 - self.score_function_baseline) * single_sf_baseline)
return stable_sf_grad_shaped, single_sf_baseline
def grad_z_likelihood_gumbel(self, single_z, single_theta, single_sf_baseline, t, subk):
"""
Reparameterization estimator for the score :math:`\\nabla_Z \\log p(\\Theta, D | Z)`
sing the Gumbel-softmax / concrete distribution reparameterization trick.
Uses the same :math:`G \\sim p(G | Z)` samples for expectations in numerator and denominator.
This **does** require a well-defined gradient
:math:`\\nabla_G \\log p(\\Theta, D | G)` and is hence not applicable when
the gradient w.r.t. the adjacency matrix is not defined for Gumbel-relaxations
of the discrete adjacency matrix.
Any (marginal) likelihood expressible as a function of ``g[:, j]`` and ``theta`` ,
e.g. using the vector of (possibly soft) parent indicators as a mask, satisfies this.
Examples are: ``dibs.models.LinearGaussian`` and ``dibs.models.DenseNonlinearGaussian``
See also e.g. http://proceedings.mlr.press/v108/zheng20a/zheng20a.pdf
Args:
single_z (ndarray): single latent tensor ``[d, k, 2]``
single_theta (Any): single parameter PyTree
single_sf_baseline (ndarray): ``[1, ]``
t (int): step
subk (ndarray): rng key
Returns:
tuple of gradient, baseline ``[d, k, 2], [1, ]``
"""
n_vars = single_z.shape[0]
# same MC samples for numerator and denominator
n_mc_numerator = self.n_grad_mc_samples
n_mc_denominator = self.n_grad_mc_samples
# sample Logistic(0,1) as randomness in reparameterization
subk, subk_ = random.split(subk)
eps = random.logistic(subk_, shape=(self.n_grad_mc_samples, n_vars, n_vars))
# [n_grad_mc_samples, ]
# since we don't backprop per se, it leaves us with the option of having
# `soft` and `hard` versions for evaluating the non-grad p(.))
subk, subk_ = random.split(subk)
# [d, k, 2], [d, d], [n_grad_mc_samples, d, d], [1,], [1,] -> [n_grad_mc_samples]
logprobs_numerator = vmap(self.log_joint_prob_soft, (None, None, 0, None, None), 0)(single_z, single_theta, eps, t, subk_)
logprobs_denominator = logprobs_numerator
# [n_grad_mc_samples, d, k, 2]
# d/dx log p(theta, D | G(x, eps)) for a batch of `eps` samples
# use the same minibatch of data as for other log prob evaluation (if using minibatching)
# [d, k, 2], [d, d], [n_grad_mc_samples, d, d], [1,], [1,] -> [n_grad_mc_samples, d, k, 2]
grad_z = vmap(grad(self.log_joint_prob_soft, 0), (None, None, 0, None, None), 0)(single_z, single_theta, eps, t, subk_)
# stable computation of exp/log/divide
# [d, k, 2], [d, k, 2]
log_numerator, sign = logsumexp(a=logprobs_numerator[:, None, None, None], b=grad_z, axis=0, return_sign=True)
# []
log_denominator = logsumexp(logprobs_denominator, axis=0)
# [d, k, 2]
stable_grad = sign * jnp.exp(log_numerator - jnp.log(n_mc_numerator) - log_denominator + jnp.log(n_mc_denominator))
return stable_grad, single_sf_baseline
#
# Estimators for score d/dtheta log p(theta, D | Z)
# (i.e. w.r.t the conditional distribution parameters)
#
def eltwise_grad_theta_likelihood(self, zs, thetas, t, subkeys):
"""
Computes batch of estimators for the score :math:`\\nabla_{\\Theta} \\log p(\\Theta, D | Z)`,
i.e. w.r.t the conditional distribution parameters.
Uses the same :math:`G \\sim p(G | Z)` samples for expectations in numerator and denominator.
This does not use :math:`\\nabla_G \\log p(\\Theta, D | G)` and is hence applicable when
the gradient w.r.t. the adjacency matrix is not defined (as e.g. for the BGe score).
Analogous to ``eltwise_grad_z_likelihood`` but gradient w.r.t :math:`\\Theta` instead of :math:`Z`
Args:
zs (ndarray): batch of latent tensors Z of shape ``[n_particles, d, k, 2]``
thetas (Any): batch of parameter PyTree with ``n_mc_samples`` as leading dim
Returns:
batch of gradients in form of ``thetas`` PyTree with ``n_particles`` as leading dim
"""
return vmap(self.grad_theta_likelihood, (0, 0, None, 0), 0)(zs, thetas, t, subkeys)
def grad_theta_likelihood(self, single_z, single_theta, t, subk):
"""
Computes Monte Carlo estimator for the score :math:`\\nabla_{\\Theta} \\log p(\\Theta, D | Z)`
Uses hard samples of :math:`G`, but a soft reparameterization like for :math:`\\nabla_Z` is also possible.
Uses the same :math:`G \\sim p(G | Z)` samples for expectations in numerator and denominator.
Args:
single_z (ndarray): single latent tensor ``[d, k, 2]``
single_theta (Any): single parameter PyTree
t (int): step
subk (ndarray): rng key
Returns:
parameter gradient PyTree
"""
# [d, d]
p = self.edge_probs(single_z, t)
# [n_grad_mc_samples, d, d]
g_samples = self.sample_g(p, subk, self.n_grad_mc_samples)
# same MC samples for numerator and denominator
n_mc_numerator = self.n_grad_mc_samples
n_mc_denominator = self.n_grad_mc_samples
# [n_mc_numerator, ]
subk, subk_ = random.split(subk)
logprobs_numerator = self.eltwise_log_joint_prob(g_samples, single_theta, subk_)
logprobs_denominator = logprobs_numerator
# PyTree shape of `single_theta` with additional leading dimension [n_mc_numerator, ...]
# d/dtheta log p(theta, D | G) for a batch of G samples
# use the same minibatch of data as for other log prob evaluation (if using minibatching)
grad_theta_log_joint_prob = grad(self.log_joint_prob, 1)
grad_theta = vmap(grad_theta_log_joint_prob,
(0, None, None, None, None), 0)(g_samples, single_theta, self.x, self.interv_mask, subk_)
# stable computation of exp/log/divide and PyTree compatible
# sums over MC graph samples dimension to get MC gradient estimate of theta
# original PyTree shape of `single_theta`
log_numerator = tree_map(
lambda leaf_theta:
logsumexp(a=expand_by(logprobs_numerator, leaf_theta.ndim - 1), b=leaf_theta, axis=0, return_sign=True)[0],
grad_theta)
# original PyTree shape of `single_theta`
sign = tree_map(
lambda leaf_theta:
logsumexp(a=expand_by(logprobs_numerator, leaf_theta.ndim - 1), b=leaf_theta, axis=0, return_sign=True)[1],
grad_theta)
# []
log_denominator = logsumexp(logprobs_denominator, axis=0)
# original PyTree shape of `single_theta`
stable_grad = tree_map(
lambda sign_leaf_theta, log_leaf_theta:
(sign_leaf_theta * jnp.exp(log_leaf_theta - jnp.log(n_mc_numerator) - log_denominator + jnp.log(n_mc_denominator))),
sign, log_numerator)
return stable_grad
"""
Estimators for score d/dZ log p(Z)
"""
def constraint_gumbel(self, single_z, single_eps, t):
"""
Evaluates continuous acyclicity constraint using
Gumbel-softmax instead of Bernoulli samples
Args:
single_z (ndarray): single latent tensor ``[d, k, 2]``
single_eps (ndarray): i.i.d. Logistic noise of shape ``[d, d``] for Gumbel-softmax
t (int): step
Returns:
constraint value of shape ``[1,]``
"""
n_vars = single_z.shape[0]
G = self.particle_to_soft_graph(single_z, single_eps, t)
h = acyclic_constr_nograd(G, n_vars)
return h
def grad_constraint_gumbel(self, single_z, key, t):
"""
Reparameterization estimator for the gradient :math:`\\nabla_Z E_{p(G|Z)} [ h(G) ]`
where :math:`h` is the acyclicity constraint penalty function.
Since :math:`h` is differentiable w.r.t. :math:`G`, always uses
the Gumbel-softmax / concrete distribution reparameterization trick.
Args:
single_z (ndarray): single latent tensor ``[d, k, 2]``
key (ndarray): rng
t (int): step
Returns:
gradient of shape ``[d, k, 2]``
"""
n_vars = single_z.shape[0]
# [n_mc_samples, d, d]
eps = random.logistic(key, shape=(self.n_acyclicity_mc_samples, n_vars, n_vars))
# [n_mc_samples, d, k, 2]
mc_gradient_samples = vmap(grad(self.constraint_gumbel, 0), (None, 0, None), 0)(single_z, eps, t)
# [d, k, 2]
return mc_gradient_samples.mean(0)
def log_graph_prior_particle(self, single_z, t):
"""
Computes :math:`\\log p(G)` component of :math:`\\log p(Z)`,
i.e. not the contraint or Gaussian prior term, but the DAG belief.
The log prior :math:`\\log p(G)` is evaluated with
edge probabilities :math:`G_{\\alpha}(Z)` given :math:`Z`.
Args:
single_z (ndarray): single latent tensor ``[d, k, 2]``
t (int): step
Returns:
log prior graph probability`\\log p(G_{\\alpha}(Z))` of shape ``[1,]``
"""
# [d, d] # masking is done inside `edge_probs`
single_soft_g = self.edge_probs(single_z, t)
# [1, ]
return self.log_graph_prior(single_soft_g)
def eltwise_grad_latent_prior(self, zs, subkeys, t):
"""
Computes batch of estimators for the score :math:`\\nabla_Z \\log p(Z)`
with
:math:`\\log p(Z) = - \\beta(t) E_{p(G|Z)} [h(G)] + \\log \\mathcal{N}(Z) + \\log f(Z)`
where :math:`h` is the acyclicity constraint and `f(Z)` is additional DAG prior factor
computed inside ``dibs.inference.DiBS.log_graph_prior_particle``.
Args:
zs (ndarray): single latent tensor ``[n_particles, d, k, 2]``
subkeys (ndarray): batch of rng keys ``[n_particles, ...]``
Returns:
batch of gradients of shape ``[n_particles, d, k, 2]``
"""
# log f(Z) term
# [d, k, 2], [1,] -> [d, k, 2]
grad_log_graph_prior_particle = grad(self.log_graph_prior_particle, 0)
# [n_particles, d, k, 2], [1,] -> [n_particles, d, k, 2]
grad_prior_z = vmap(grad_log_graph_prior_particle, (0, None), 0)(zs, t)
# constraint term
# [n_particles, d, k, 2], [n_particles,], [1,] -> [n_particles, d, k, 2]
eltwise_grad_constraint = vmap(self.grad_constraint_gumbel, (0, 0, None), 0)(zs, subkeys, t)
return - self.beta(t) * eltwise_grad_constraint \
- zs / (self.latent_prior_std ** 2.0) \
+ grad_prior_z
def visualize_callback(self, ipython=True, save_path=None):
"""Returns callback function for visualization of particles during inference updates
Args:
ipython (bool): set to ``True`` when running in a jupyter notebook
save_path (str): path to save plotted images to
Returns:
callback
"""
from dibs.utils.visualize import visualize
from dibs.graph_utils import elwise_acyclic_constr_nograd as constraint
if ipython:
from IPython import display
def callback(**kwargs):
zs = kwargs["zs"]
gs = kwargs["dibs"].particle_to_g_lim(zs)
probs = kwargs["dibs"].edge_probs(zs, kwargs["t"])
if ipython:
display.clear_output(wait=True)
visualize(probs, save_path=save_path, t=kwargs["t"], show=True)
print(
f'iteration {kwargs["t"]:6d}'
f' | alpha {self.alpha(kwargs["t"]):6.1f}'
f' | beta {self.beta(kwargs["t"]):6.1f}'
f' | #cyclic {(constraint(gs, self.n_vars) > 0).sum().item():3d}'
)
return
return callback
| 27,067 | 38.059163 | 132 | py |
dibs | dibs-master/dibs/inference/svgd.py | import functools
import numpy as onp
import jax
import jax.numpy as jnp
from jax import jit, vmap, random, grad
from jax.tree_util import tree_map
from jax.scipy.special import logsumexp
from jax.example_libraries import optimizers
from dibs.inference.dibs import DiBS
from dibs.kernel import AdditiveFrobeniusSEKernel, JointAdditiveFrobeniusSEKernel
from dibs.metrics import ParticleDistribution
from dibs.utils.func import expand_by
class MarginalDiBS(DiBS):
"""
This class implements Stein Variational Gradient Descent (SVGD) (Liu and Wang, 2016)
for DiBS inference (Lorch et al., 2021) of the marginal DAG posterior :math:`p(G | D)`.
For joint inference of :math:`p(G, \\Theta | D)`, use the analogous class
:class:`~dibs.inference.JointDiBS`.
An SVGD update of tensor :math:`v` is defined as
:math:`\\phi(v) \\propto \\sum_{u} k(v, u) \\nabla_u \\log p(u) + \\nabla_u k(u, v)`
Args:
x (ndarray): observations of shape ``[n_observations, n_vars]``
interv_mask (ndarray, optional): binary matrix of shape ``[n_observations, n_vars]`` indicating
whether a given variable was intervened upon in a given sample (intervention = 1, no intervention = 0)
inference_model: Bayes net inference model defining prior :math:`\\log p(G)`
and marginal likelihood :math:`\\log p(D | G)`` underlying the inferred posterior.
Object *has to implement two methods*:
``log_graph_prior`` and ``observational_log_marginal_prob``.
Example: :class:`~dibs.models.BGe`
kernel: Class of kernel. *Has to implement the method* ``eval(u, v)``.
Example: :class:`~dibs.kernel.AdditiveFrobeniusSEKernel`
kernel_param (dict): kwargs to instantiate ``kernel``
optimizer (str): optimizer identifier
optimizer_param (dict): kwargs to instantiate ``optimizer``
alpha_linear (float): slope of of linear schedule for inverse temperature :math:`\\alpha`
of sigmoid in latent graph model :math:`p(G | Z)`
beta_linear (float): slope of of linear schedule for inverse temperature :math:`\\beta`
of constraint penalty in latent prior :math:`p(Z)`
tau (float): constant Gumbel-softmax temperature parameter
n_grad_mc_samples (int): number of Monte Carlo samples in gradient estimator
for likelihood term :math:`p(\Theta, D | G)`
n_acyclicity_mc_samples (int): number of Monte Carlo samples in gradient estimator
for acyclicity constraint
grad_estimator_z (str): gradient estimator :math:`\\nabla_Z` of expectation over :math:`p(G | Z)`;
choices: ``score`` or ``reparam``
score_function_baseline (float): scale of additive baseline in score function (REINFORCE) estimator;
``score_function_baseline == 0.0`` corresponds to not using a baseline
latent_prior_std (float): standard deviation of Gaussian prior over :math:`Z`; defaults to ``1/sqrt(k)``
"""
def __init__(self, *,
x,
inference_model,
interv_mask=None,
kernel=AdditiveFrobeniusSEKernel,
kernel_param=None,
optimizer="rmsprop",
optimizer_param=None,
alpha_linear=1.0,
beta_linear=1.0,
tau=1.0,
n_grad_mc_samples=128,
n_acyclicity_mc_samples=32,
grad_estimator_z="score",
score_function_baseline=0.0,
latent_prior_std=None,
verbose=False):
# handle mutable default args
if kernel_param is None:
kernel_param = {"h": 5.0}
if optimizer_param is None:
optimizer_param = {"stepsize": 0.005}
# handle interv mask in observational case
if interv_mask is None:
interv_mask = jnp.zeros_like(x, dtype=jnp.int32)
# init DiBS superclass methods
super().__init__(
x=x,
interv_mask=interv_mask,
log_graph_prior=inference_model.log_graph_prior,
log_joint_prob=inference_model.interventional_log_marginal_prob,
alpha_linear=alpha_linear,
beta_linear=beta_linear,
tau=tau,
n_grad_mc_samples=n_grad_mc_samples,
n_acyclicity_mc_samples=n_acyclicity_mc_samples,
grad_estimator_z=grad_estimator_z,
score_function_baseline=score_function_baseline,
latent_prior_std=latent_prior_std,
verbose=verbose,
)
self.inference_model = inference_model
# functions for post-hoc likelihood evaluations
self.eltwise_log_marginal_likelihood_observ = vmap(lambda g, x_ho:
inference_model.interventional_log_marginal_prob(g, None, x_ho, jnp.zeros_like(x_ho), None), (0, None), 0)
self.eltwise_log_marginal_likelihood_interv = vmap(lambda g, x_ho, interv_msk_ho:
inference_model.interventional_log_marginal_prob(g, None, x_ho, interv_msk_ho, None), (0, None, None), 0)
self.kernel = kernel(**kernel_param)
if optimizer == 'gd':
self.opt = optimizers.sgd(optimizer_param['stepsize'])
elif optimizer == 'rmsprop':
self.opt = optimizers.rmsprop(optimizer_param['stepsize'])
else:
raise ValueError()
def _sample_initial_random_particles(self, *, key, n_particles, n_dim=None):
"""
Samples random particles to initialize SVGD
Args:
key (ndarray): rng key
n_particles (int): number of particles inferred
n_dim (int): size of latent dimension :math:`k`. Defaults to ``n_vars``, s.t. :math:`k = d`
Returns:
batch of latent tensors ``[n_particles, d, k, 2]``
"""
# default full rank
if n_dim is None:
n_dim = self.n_vars
# like prior
std = self.latent_prior_std or (1.0 / jnp.sqrt(n_dim))
# sample
key, subk = random.split(key)
z = random.normal(subk, shape=(n_particles, self.n_vars, n_dim, 2)) * std
return z
def _f_kernel(self, x_latent, y_latent):
"""
Evaluates kernel
Args:
x_latent (ndarray): latent tensor of shape ``[d, k, 2]``
y_latent (ndarray): latent tensor of shape ``[d, k, 2]``
Returns:
kernel value of shape ``[1, ]``
"""
return self.kernel.eval(x=x_latent, y=y_latent)
def _f_kernel_mat(self, x_latents, y_latents):
"""
Computes pairwise kernel matrix
Args:
x_latents (ndarray): latent tensor of shape ``[A, d, k, 2]``
y_latents (ndarray): latent tensor of shape ``[B, d, k, 2]``
Returns:
kernel values of shape ``[A, B]``
"""
return vmap(vmap(self._f_kernel, (None, 0), 0), (0, None), 0)(x_latents, y_latents)
def _eltwise_grad_kernel_z(self, x_latents, y_latent):
"""
Computes gradient :math:`\\nabla_Z k(Z, Z')` elementwise for each provided particle :math:`Z`
in batch ``x_latents`
Args:
x_latents (ndarray): batch of latent particles for :math:`Z` of shape ``[n_particles, d, k, 2]``
y_latent (ndarray): single latent particle :math:`Z'` ``[d, k, 2]``
Returns:
batch of gradients of shape ``[n_particles, d, k, 2]``
"""
grad_kernel_z = grad(self._f_kernel, 0)
return vmap(grad_kernel_z, (0, None), 0)(x_latents, y_latent)
def _z_update(self, single_z, kxx, z, grad_log_prob_z):
"""
Computes SVGD update for ``single_z`` particle given the kernel values
``kxx`` and the :math:`d/dZ` gradients of the target density for each of the available particles
Args:
single_z (ndarray): single latent tensor ``[d, k, 2]``, which is the Z particle being updated
kxx (ndarray): pairwise kernel values for all particles ``[n_particles, n_particles]``
z (ndarray): all latent particles ``[n_particles, d, k, 2]``
grad_log_prob_z (ndarray): gradients of all Z particles w.r.t
target density of shape ``[n_particles, d, k, 2]``
Returns
transform vector of shape ``[d, k, 2]`` for the particle ``single_z``
"""
# compute terms in sum
weighted_gradient_ascent = kxx[..., None, None, None] * grad_log_prob_z
repulsion = self._eltwise_grad_kernel_z(z, single_z)
# average and negate (for optimizer)
return - (weighted_gradient_ascent + repulsion).mean(axis=0)
def _parallel_update_z(self, *args):
"""
Vectorizes :func:`~dibs.inference.MarginalDiBS._z_update`
for all available particles in batched first input dim (``single_z``)
Otherwise, same inputs as :func:`~dibs.inference.MarginalDiBS._z_update`.
"""
return vmap(self._z_update, (0, 1, None, None), 0)(*args)
def _svgd_step(self, t, opt_state_z, key, sf_baseline):
"""
Performs a single SVGD step in the DiBS framework, updating all :math:`Z` particles jointly.
Args:
t (int): step
opt_state_z: optimizer state for latent :math:`Z` particles; contains ``[n_particles, d, k, 2]``
key (ndarray): prng key
sf_baseline (ndarray): batch of baseline values of shape ``[n_particles, ]``
in case score function gradient is used
Returns:
the updated inputs ``opt_state_z``, ``key``, ``sf_baseline``
"""
z = self.get_params(opt_state_z) # [n_particles, d, k, 2]
n_particles = z.shape[0]
# d/dz log p(D | z)
key, *batch_subk = random.split(key, n_particles + 1)
dz_log_likelihood, sf_baseline = self.eltwise_grad_z_likelihood(z, None, sf_baseline, t, jnp.array(batch_subk))
# here `None` is a placeholder for theta (in the joint inference case)
# since this is an inherited function from the general `DiBS` class
# d/dz log p(z) (acyclicity)
key, *batch_subk = random.split(key, n_particles + 1)
dz_log_prior = self.eltwise_grad_latent_prior(z, jnp.array(batch_subk), t)
# d/dz log p(z, D) = d/dz log p(z) + log p(D | z)
dz_log_prob = dz_log_prior + dz_log_likelihood
# k(z, z) for all particles
kxx = self._f_kernel_mat(z, z)
# transformation phi() applied in batch to each particle individually
phi_z = self._parallel_update_z(z, kxx, z, dz_log_prob)
# apply transformation
# `x += stepsize * phi`; the phi returned is negated for SVGD
opt_state_z = self.opt_update(t, phi_z, opt_state_z)
return opt_state_z, key, sf_baseline
# this is the crucial @jit
@functools.partial(jit, static_argnums=(0, 2))
def _svgd_loop(self, start, n_steps, init):
return jax.lax.fori_loop(start, start + n_steps, lambda i, args: self._svgd_step(i, *args), init)
def sample(self, *, key, n_particles, steps, n_dim_particles=None, callback=None, callback_every=None):
"""
Use SVGD with DiBS to sample ``n_particles`` particles :math:`G` from the marginal posterior
:math:`p(G | D)` as defined by the BN model ``self.inference_model``
Arguments:
key (ndarray): prng key
n_particles (int): number of particles to sample
steps (int): number of SVGD steps performed
n_dim_particles (int): latent dimensionality :math:`k` of particles :math:`Z = \{ U, V \}`
with :math:`U, V \\in \\mathbb{R}^{k \\times d}`. Default is ``n_vars``
callback: function to be called every ``callback_every`` steps of SVGD.
callback_every: if ``None``, ``callback`` is only called after particle updates have finished
Returns:
batch of samples :math:`G \\sim p(G | D)` of shape ``[n_particles, n_vars, n_vars]``
"""
# randomly sample initial particles
key, subk = random.split(key)
init_z = self._sample_initial_random_particles(key=subk, n_particles=n_particles, n_dim=n_dim_particles)
# initialize score function baseline (one for each particle)
n_particles, _, n_dim, _ = init_z.shape
sf_baseline = jnp.zeros(n_particles)
if self.latent_prior_std is None:
self.latent_prior_std = 1.0 / jnp.sqrt(n_dim)
# maintain updated particles with optimizer state
opt_init, self.opt_update, get_params = self.opt
self.get_params = jit(get_params)
opt_state_z = opt_init(init_z)
"""Execute particle update steps for all particles in parallel using `vmap` functions"""
# faster if for-loop is functionally pure and compiled, so only interrupt for callback
callback_every = callback_every or steps
for t in (range(0, steps, callback_every) if steps else range(0)):
# perform sequence of SVGD steps
opt_state_z, key, sf_baseline = self._svgd_loop(t, callback_every, (opt_state_z, key, sf_baseline))
# callback
if callback:
z = self.get_params(opt_state_z)
callback(
dibs=self,
t=t + callback_every,
zs=z,
)
# retrieve transported particles
z_final = jax.device_get(self.get_params(opt_state_z))
# as alpha is large, we can convert the latents Z to their corresponding graphs G
g_final = self.particle_to_g_lim(z_final)
return g_final
def get_empirical(self, g):
"""
Converts batch of binary (adjacency) matrices into *empirical* particle distribution
where mixture weights correspond to counts/occurrences
Args:
g (ndarray): batch of graph samples ``[n_particles, d, d]`` with binary values
Returns:
:class:`~dibs.metrics.ParticleDistribution`:
particle distribution of graph samples and associated log probabilities
"""
N, _, _ = g.shape
unique, counts = onp.unique(g, axis=0, return_counts=True)
# empirical distribution using counts
logp = jnp.log(counts) - jnp.log(N)
return ParticleDistribution(logp=logp, g=unique)
def get_mixture(self, g):
"""
Converts batch of binary (adjacency) matrices into *mixture* particle distribution,
where mixture weights correspond to unnormalized target (i.e. posterior) probabilities
Args:
g (ndarray): batch of graph samples ``[n_particles, d, d]`` with binary values
Returns:
:class:`~dibs.metrics.ParticleDistribution`:
particle distribution of graph samples and associated log probabilities
"""
N, _, _ = g.shape
# mixture weighted by respective marginal probabilities
eltwise_log_marginal_target = vmap(lambda single_g:
self.log_joint_prob(single_g, None, self.x, self.interv_mask, None), 0, 0)
logp = eltwise_log_marginal_target(g)
logp -= logsumexp(logp)
return ParticleDistribution(logp=logp, g=g)
class JointDiBS(DiBS):
"""
This class implements Stein Variational Gradient Descent (SVGD) (Liu and Wang, 2016)
for DiBS inference (Lorch et al., 2021) of the marginal DAG posterior :math:`p(G | D)`.
For marginal inference of :math:`p(G | D)`, use the analogous class
:class:`~dibs.inference.MarginalDiBS`.
An SVGD update of tensor :math:`v` is defined as
:math:`\\phi(v) \\propto \\sum_{u} k(v, u) \\nabla_u \\log p(u) + \\nabla_u k(u, v)`
Args:
x (ndarray): observations of shape ``[n_observations, n_vars]``
interv_mask (ndarray, optional): binary matrix of shape ``[n_observations, n_vars]`` indicating
whether a given variable was intervened upon in a given sample (intervention = 1, no intervention = 0)
inference_model: Bayes net inference model defining prior :math:`\\log p(G)`
and joint likelihood :math:`\\log p(\\Theta, D | G) = \\log p(\\Theta | G) + \\log p(D | G, \\Theta``
underlying the inferred posterior. Object *has to implement two methods*:
``log_graph_prior`` and ``observational_log_joint_prob``.
Example: :class:`~dibs.models.LinearGaussian`
kernel: Class of kernel. *Has to implement the method* ``eval(u, v)``.
Example: :class:`~dibs.kernel.JointAdditiveFrobeniusSEKernel`
kernel_param (dict): kwargs to instantiate ``kernel``
optimizer (str): optimizer identifier
optimizer_param (dict): kwargs to instantiate ``optimizer``
alpha_linear (float): slope of of linear schedule for inverse temperature :math:`\\alpha`
of sigmoid in latent graph model :math:`p(G | Z)`
beta_linear (float): slope of of linear schedule for inverse temperature :math:`\\beta`
of constraint penalty in latent prior :math:`p(Z)`
tau (float): constant Gumbel-softmax temperature parameter
n_grad_mc_samples (int): number of Monte Carlo samples in gradient estimator
for likelihood term :math:`p(\Theta, D | G)`
n_acyclicity_mc_samples (int): number of Monte Carlo samples in gradient estimator
for acyclicity constraint
grad_estimator_z (str): gradient estimator :math:`\\nabla_Z` of expectation over :math:`p(G | Z)`;
choices: ``score`` or ``reparam``
score_function_baseline (float): scale of additive baseline in score function (REINFORCE) estimator;
``score_function_baseline == 0.0`` corresponds to not using a baseline
latent_prior_std (float): standard deviation of Gaussian prior over :math:`Z`; defaults to ``1/sqrt(k)``
"""
def __init__(self, *,
x,
inference_model,
interv_mask=None,
kernel=JointAdditiveFrobeniusSEKernel,
kernel_param=None,
optimizer="rmsprop",
optimizer_param=None,
alpha_linear=0.05,
beta_linear=1.0,
tau=1.0,
n_grad_mc_samples=128,
n_acyclicity_mc_samples=32,
grad_estimator_z="reparam",
score_function_baseline=0.0,
latent_prior_std=None,
verbose=False):
# handle mutable default args
if kernel_param is None:
kernel_param = {"h_latent": 5.0, "h_theta": 500.0}
if optimizer_param is None:
optimizer_param = {"stepsize": 0.005}
# handle interv mask in observational case
if interv_mask is None:
interv_mask = jnp.zeros_like(x, dtype=jnp.int32)
# init DiBS superclass methods
super().__init__(
x=x,
interv_mask=interv_mask,
log_graph_prior=inference_model.log_graph_prior,
log_joint_prob=inference_model.interventional_log_joint_prob,
alpha_linear=alpha_linear,
beta_linear=beta_linear,
tau=tau,
n_grad_mc_samples=n_grad_mc_samples,
n_acyclicity_mc_samples=n_acyclicity_mc_samples,
grad_estimator_z=grad_estimator_z,
score_function_baseline=score_function_baseline,
latent_prior_std=latent_prior_std,
verbose=verbose,
)
self.inference_model = inference_model
# functions for post-hoc likelihood evaluations
self.eltwise_log_likelihood_observ = vmap(lambda g, theta, x_ho:
inference_model.interventional_log_joint_prob(g, theta, x_ho, jnp.zeros_like(x_ho), None), (0, 0, None), 0)
self.eltwise_log_likelihood_interv = vmap(lambda g, theta, x_ho, interv_msk_ho:
inference_model.interventional_log_joint_prob(g, theta, x_ho, interv_msk_ho, None), (0, 0, None, None), 0)
self.kernel = kernel(**kernel_param)
if optimizer == 'gd':
self.opt = optimizers.sgd(optimizer_param['stepsize'])
elif optimizer == 'rmsprop':
self.opt = optimizers.rmsprop(optimizer_param['stepsize'])
else:
raise ValueError()
def _sample_initial_random_particles(self, *, key, n_particles, n_dim=None):
"""
Samples random particles to initialize SVGD
Args:
key (ndarray): rng key
n_particles (int): number of particles inferred
n_dim (int): size of latent dimension :math:`k`. Defaults to ``n_vars``, s.t. :math:`k = d`
Returns:
batch of latent tensors ``[n_particles, d, k, 2]``
"""
# default full rank
if n_dim is None:
n_dim = self.n_vars
# std like Gaussian prior over Z
std = self.latent_prior_std or (1.0 / jnp.sqrt(n_dim))
# sample from parameter prior
key, subk = random.split(key)
z = random.normal(subk, shape=(n_particles, self.n_vars, n_dim, 2)) * std
key, subk = random.split(key)
theta = self.inference_model.sample_parameters(key=subk, n_particles=n_particles, n_vars=self.n_vars)
return z, theta
def _f_kernel(self, x_latent, x_theta, y_latent, y_theta):
"""
Evaluates kernel
Args:
x_latent (ndarray): latent tensor of shape ``[d, k, 2]``
x_theta (Any): parameter PyTree
y_latent (ndarray): latent tensor of shape ``[d, k, 2]``
y_theta (Any): parameter PyTree
Returns:
kernel value of shape ``[1, ]``
"""
return self.kernel.eval(
x_latent=x_latent, x_theta=x_theta,
y_latent=y_latent, y_theta=y_theta)
def _f_kernel_mat(self, x_latents, x_thetas, y_latents, y_thetas):
"""
Computes pairwise kernel matrix
Args:
x_latents (ndarray): latent tensor of shape ``[A, d, k, 2]``
x_thetas (Any): parameter PyTree with batch size ``A`` as leading dim
y_latents (ndarray): latent tensor of shape ``[B, d, k, 2]``
y_thetas (Any): parameter PyTree with batch size ``B`` as leading dim
Returns:
kernel values of shape ``[A, B]``
"""
return vmap(vmap(self._f_kernel, (None, None, 0, 0), 0),
(0, 0, None, None), 0)(x_latents, x_thetas, y_latents, y_thetas)
def _eltwise_grad_kernel_z(self, x_latents, x_thetas, y_latent, y_theta):
"""
Computes gradient :math:`\\nabla_Z k((Z, \\Theta), (Z', \\Theta'))` elementwise
for each provided particle :math:`(Z, \\Theta)` in batch (``x_latents`, ``x_thetas``)
Args:
x_latents (ndarray): batch of latent particles for :math:`Z` of shape ``[n_particles, d, k, 2]``
x_thetas (Any): batch of parameter PyTrees for :math:`\\Theta` with leading dim ``n_particles``
y_latent (ndarray): single latent particle :math:`Z'` ``[d, k, 2]``
y_theta (Any): single parameter PyTree for :math:`\\Theta'`
Returns:
batch of gradients of shape ``[n_particles, d, k, 2]``
"""
grad_kernel_z = grad(self._f_kernel, 0)
return vmap(grad_kernel_z, (0, 0, None, None), 0)(x_latents, x_thetas, y_latent, y_theta)
def _eltwise_grad_kernel_theta(self, x_latents, x_thetas, y_latent, y_theta):
"""
Computes gradient :math:`\\nabla_{\\Theta} k((Z, \\Theta), (Z', \\Theta'))` elementwise
for each provided particle :math:`(Z, \\Theta)` in batch (``x_latents`, ``x_thetas``)
Args:
x_latents (ndarray): batch of latent particles for :math:`Z` of shape ``[n_particles, d, k, 2]``
x_thetas (Any): batch of parameter PyTrees for :math:`\\Theta` with leading dim ``n_particles``
y_latent (ndarray): single latent particle :math:`Z'` ``[d, k, 2]``
y_theta (Any): single parameter PyTree for :math:`\\Theta'`
Returns:
batch of gradient PyTrees with leading dim ``n_particles``
"""
grad_kernel_theta = grad(self._f_kernel, 1)
return vmap(grad_kernel_theta, (0, 0, None, None), 0)(x_latents, x_thetas, y_latent, y_theta)
def _z_update(self, single_z, single_theta, kxx, z, theta, grad_log_prob_z):
"""
Computes SVGD update for ``single_z`` of a particle tuple (``single_z``, ``single_theta``)
particle given the kernel values ``kxx`` and the :math:`d/dZ` gradients of the target density
for each of the available particles
Args:
single_z (ndarray): single latent tensor ``[d, k, 2]``, which is the :math:`\\Z` particle being updated
single_theta (Any): single parameter PyTree, the :math:`\\Theta` particle of the :math:`\\Z` particle being updated
kxx (ndarray): pairwise kernel values for all particles, of shape ``[n_particles, n_particles]``
z (ndarray): all latent particles ``[n_particles, d, k, 2]``
theta (Any): all theta particles as PyTree with leading dim `n_particles`
grad_log_prob_z (ndarray): gradients of all Z particles w.r.t
target density of shape ``[n_particles, d, k, 2]``
Returns
transform vector of shape ``[d, k, 2]`` for the particle ``single_z``
"""
# compute terms in sum
weighted_gradient_ascent = kxx[..., None, None, None] * grad_log_prob_z
repulsion = self._eltwise_grad_kernel_z(z, theta, single_z, single_theta)
# average and negate (for optimizer)
return - (weighted_gradient_ascent + repulsion).mean(axis=0)
def _parallel_update_z(self, *args):
"""
Vectorizes :func:`~dibs.inference.JointDiBS._z_update`
for all available particles in batched first and second input
dim (``single_z``, ``single_theta``)
Otherwise, same inputs as :func:`~dibs.inference.JointDiBS._z_update`.
"""
return vmap(self._z_update, (0, 0, 1, None, None, None), 0)(*args)
def _theta_update(self, single_z, single_theta, kxx, z, theta, grad_log_prob_theta):
"""
Computes SVGD update for ``single_theta`` of a particle tuple (``single_z``, ``single_theta``)
particle given the kernel values ``kxx`` and the :math:`d/d\\Theta` gradients of the target density
for each of the available particles.
Analogous to :func:`dibs.inference.JointDiBS._z_update` but for updating :math:`\Theta`.
Args:
single_z (ndarray): single latent tensor ``[d, k, 2]``, which is the particle
particle of the :math:`\\Theta` particle being updated
single_theta (Any): single parameter PyTree, the :math:`\\Theta`, which is the
:math:`\\Theta` particle being updated
kxx (ndarray): pairwise kernel values for all particles, of shape ``[n_particles, n_particles]``
z (ndarray): all latent particles ``[n_particles, d, k, 2]``
theta (Any): all theta particles as PyTree with leading dim `n_particles`
grad_log_prob_theta (ndarray): gradients of all :math:`\\Theta` particles w.r.t
target density of shape ``[n_particles, d, k, 2]``
Returns:
transform vector PyTree with leading dim ``n_particles`` for the particle ``single_theta``
"""
# compute terms in sum
weighted_gradient_ascent = tree_map(
lambda leaf_theta_grad: expand_by(kxx, leaf_theta_grad.ndim - 1) * leaf_theta_grad,
grad_log_prob_theta)
repulsion = self._eltwise_grad_kernel_theta(z, theta, single_z, single_theta)
# average and negate (for optimizer)
return tree_map(
lambda grad_asc_leaf, repuls_leaf: - (grad_asc_leaf + repuls_leaf).mean(axis=0),
weighted_gradient_ascent, repulsion)
def _parallel_update_theta(self, *args):
"""
Vectorizes :func:`~dibs.inference.JointDiBS._theta_update`
for all available particles in batched first and second input
dim (``single_z``, ``single_theta``).
Otherwise, same inputs as :func:`~dibs.inference.JointDiBS._theta_update`.
"""
return vmap(self._theta_update, (0, 0, 1, None, None, None), 0)(*args)
def _svgd_step(self, t, opt_state_z, opt_state_theta, key, sf_baseline):
"""
Performs a single SVGD step in the DiBS framework, updating all :math:`(Z, \\Theta)` particles jointly.
Args:
t (int): step
opt_state_z: optimizer state for latent :math:`Z` particles; contains ``[n_particles, d, k, 2]``
opt_state_theta: optimizer state for parameter :math:`\\Theta` particles;
contains PyTree with ``n_particles`` leading dim
key (ndarray): prng key
sf_baseline (ndarray): batch of baseline values of shape ``[n_particles, ]``
in case score function gradient is used
Returns:
the updated inputs ``opt_state_z``, ``opt_state_theta``, ``key``, ``sf_baseline``
"""
z = self.get_params(opt_state_z) # [n_particles, d, k, 2]
theta = self.get_params(opt_state_theta) # PyTree with `n_particles` leading dim
n_particles = z.shape[0]
# d/dtheta log p(theta, D | z)
key, *batch_subk = random.split(key, n_particles + 1)
dtheta_log_prob = self.eltwise_grad_theta_likelihood(z, theta, t, jnp.array(batch_subk))
# d/dz log p(theta, D | z)
key, *batch_subk = random.split(key, n_particles + 1)
dz_log_likelihood, sf_baseline = self.eltwise_grad_z_likelihood(z, theta, sf_baseline, t, jnp.array(batch_subk))
# d/dz log p(z) (acyclicity)
key, *batch_subk = random.split(key, n_particles + 1)
dz_log_prior = self.eltwise_grad_latent_prior(z, jnp.array(batch_subk), t)
# d/dz log p(z, theta, D) = d/dz log p(z) + log p(theta, D | z)
dz_log_prob = dz_log_prior + dz_log_likelihood
# k((z, theta), (z, theta)) for all particles
kxx = self._f_kernel_mat(z, theta, z, theta)
# transformation phi() applied in batch to each particle individually
phi_z = self._parallel_update_z(z, theta, kxx, z, theta, dz_log_prob)
phi_theta = self._parallel_update_theta(z, theta, kxx, z, theta, dtheta_log_prob)
# apply transformation
# `x += stepsize * phi`; the phi returned is negated for SVGD
opt_state_z = self.opt_update(t, phi_z, opt_state_z)
opt_state_theta = self.opt_update(t, phi_theta, opt_state_theta)
return opt_state_z, opt_state_theta, key, sf_baseline
# this is the crucial @jit
@functools.partial(jit, static_argnums=(0, 2))
def _svgd_loop(self, start, n_steps, init):
return jax.lax.fori_loop(start, start + n_steps, lambda i, args: self._svgd_step(i, *args), init)
def sample(self, *, key, n_particles, steps, n_dim_particles=None, callback=None, callback_every=None):
"""
Use SVGD with DiBS to sample ``n_particles`` particles :math:`(G, \\Theta)` from the joint posterior
:math:`p(G, \\Theta | D)` as defined by the BN model ``self.inference_model``
Arguments:
key (ndarray): prng key
n_particles (int): number of particles to sample
steps (int): number of SVGD steps performed
n_dim_particles (int): latent dimensionality :math:`k` of particles :math:`Z = \{ U, V \}`
with :math:`U, V \\in \\mathbb{R}^{k \\times d}`. Default is ``n_vars``
callback: function to be called every ``callback_every`` steps of SVGD.
callback_every: if ``None``, ``callback`` is only called after particle updates have finished
Returns:
tuple of shape (``[n_particles, n_vars, n_vars]``, ``PyTree``) where ``PyTree`` has leading dimension ``n_particles``:
batch of samples :math:`G, \\Theta \\sim p(G, \\Theta | D)`
"""
# randomly sample initial particles
key, subk = random.split(key)
init_z, init_theta = self._sample_initial_random_particles(key=subk, n_particles=n_particles,
n_dim=n_dim_particles)
# initialize score function baseline (one for each particle)
n_particles, _, n_dim, _ = init_z.shape
sf_baseline = jnp.zeros(n_particles)
if self.latent_prior_std is None:
self.latent_prior_std = 1.0 / jnp.sqrt(n_dim)
# maintain updated particles with optimizer state
opt_init, self.opt_update, get_params = self.opt
self.get_params = jit(get_params)
opt_state_z = opt_init(init_z)
opt_state_theta = opt_init(init_theta)
"""Execute particle update steps for all particles in parallel using `vmap` functions"""
# faster if for-loop is functionally pure and compiled, so only interrupt for callback
callback_every = callback_every or steps
for t in (range(0, steps, callback_every) if steps else range(0)):
# perform sequence of SVGD steps
opt_state_z, opt_state_theta, key, sf_baseline = self._svgd_loop(t, callback_every,
(opt_state_z, opt_state_theta, key,
sf_baseline))
# callback
if callback:
z = self.get_params(opt_state_z)
theta = self.get_params(opt_state_theta)
callback(
dibs=self,
t=t + callback_every,
zs=z,
thetas=theta,
)
# retrieve transported particles
z_final = jax.device_get(self.get_params(opt_state_z))
theta_final = jax.device_get(self.get_params(opt_state_theta))
# as alpha is large, we can convert the latents Z to their corresponding graphs G
g_final = self.particle_to_g_lim(z_final)
return g_final, theta_final
def get_empirical(self, g, theta):
"""
Converts batch of binary (adjacency) matrices and parameters into *empirical* particle distribution
where mixture weights correspond to counts/occurrences
Args:
g (ndarray): batch of graph samples ``[n_particles, d, d]`` with binary values
theta (Any): PyTree with leading dim ``n_particles``
Returns:
:class:`~dibs.metrics.ParticleDistribution`:
particle distribution of graph and parameter samples and associated log probabilities
"""
N, _, _ = g.shape
# since theta continuous, each particle (G, theta) is unique always
logp = - jnp.log(N) * jnp.ones(N)
return ParticleDistribution(logp=logp, g=g, theta=theta)
def get_mixture(self, g, theta):
"""
Converts batch of binary (adjacency) matrices and particles into *mixture* particle distribution,
where mixture weights correspond to unnormalized target (i.e. posterior) probabilities
Args:
g (ndarray): batch of graph samples ``[n_particles, d, d]`` with binary values
theta (Any): PyTree with leading dim ``n_particles``
Returns:
:class:`~dibs.metrics.ParticleDistribution`:
particle distribution of graph and parameter samples and associated log probabilities
"""
N, _, _ = g.shape
# mixture weighted by respective joint probabilities
eltwise_log_joint_target = vmap(lambda single_g, single_theta:
self.log_joint_prob(single_g, single_theta, self.x, self.interv_mask, None),
(0, 0), 0)
logp = eltwise_log_joint_target(g, theta)
logp -= logsumexp(logp)
return ParticleDistribution(logp=logp, g=g, theta=theta)
| 36,346 | 42.477273 | 130 | py |
dibs | dibs-master/dibs/inference/__init__.py | from .dibs import DiBS
from .svgd import MarginalDiBS, JointDiBS
| 65 | 21 | 41 | py |
dibs | dibs-master/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('./../../'))
# -- Project information -----------------------------------------------------
project = 'dibs'
copyright = '2021, Lars Lorch'
author = 'Lars Lorch'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '1.0'
version = ''
# The full version, including alpha/beta/rc tags.
# release = '1.0'
release = ''
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The master toctree document.
master_doc = 'index'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
todo_include_todos = True
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help items. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
htmlhelp_basename = 'dibs-doc'
| 2,914 | 30.684783 | 79 | py |
geometric-js | geometric-js-master/main.py | """Main training module."""
import argparse
import logging
import os
import sys
import torch.optim as optim
from typing import List
from vae import Evaluator, Trainer
from vae.models.losses import LOSSES, RECON_DIST, get_loss_f
from vae.models.vae import VAE
from vae.utils.modelIO import save_model, load_model, load_metadata
from utils.datasets import get_dataloaders, get_img_size, DATASETS
from utils.helpers import (create_safe_directory, get_device, set_seed,
get_n_param)
from utils.visualize import GifTraversalsTraining
RES_DIR = 'results_new'
def parse_arguments(args_to_parse: List):
"""Parse the command line arguments.
Parameters
----------
args_to_parse: list of str
Arguments to parse (splitted on whitespaces).
"""
description = "PyTorch implementation and evaluation of Variational" + \
"AutoEncoders and metrics."
parser = argparse.ArgumentParser(description=description)
# General options
general = parser.add_argument_group('General options')
general.add_argument('name', type=str,
help="Name of the model for storing and loading.")
general.add_argument('--no-progress-bar',
action='store_true', default=False,
help='Disables progress bar.')
general.add_argument('--no-cuda',
action='store_true', default=False,
help='Disables CUDA training, even when have one.')
general.add_argument('-s', '--seed',
type=int, default=1234,
help='Random seed. `None` for stochastic behaviour.')
# Learning options
training = parser.add_argument_group('Training specific options')
training.add_argument('--checkpoint-every',
type=int, default=10,
help='Save a the trained model every n epochs.')
training.add_argument('-d', '--dataset',
default='dsprites', choices=DATASETS,
help="Path to training data.")
training.add_argument('-e', '--epochs',
type=int, default=30,
help='Maximum number of epochs to run for.')
training.add_argument('-b', '--batch-size',
type=int, default=64,
help='Batch size for training.')
training.add_argument('--lr', '--learning-rate',
type=float, default=1e-4,
help='Learning rate.')
training.add_argument('--noise',
type=float, default=None,
help='Added noise to input images.')
# Model Options
model = parser.add_argument_group('Model specfic options')
model.add_argument('-z', '--latent-dim',
type=int, default=10,
help='Dimension of the latent variable.')
model.add_argument('-l', '--loss',
default='GJS', choices=LOSSES,
help="Type of VAE loss function to use.")
model.add_argument('-r', '--rec-dist',
default='bernoulli', choices=RECON_DIST,
help="Form of the likelihood ot use for each pixel.")
model.add_argument('-a', '--reg-anneal',
type=float, default=10000,
help="Number of annealing steps for regularisation.")
# Loss Specific Options
GJS = parser.add_argument_group('Geometric Jensen-Shannon parameters')
GJS.add_argument('--GJS-A',
type=float, default=0.5,
help='Skew of geometric-JS (alpha in the paper).')
GJS.add_argument('--GJS-B',
type=float, default=1.0,
help='Weight of the skew geometric-JS.')
GJS.add_argument('--GJS-invA',
type=bool, default=True,
help='Whether to invert alpha.')
betaH = parser.add_argument_group('BetaH parameters')
betaH.add_argument('--betaH-B',
type=float, default=4.0,
help='Weight of the KL (beta in the paper).')
MMD = parser.add_argument_group('MMD parameters')
MMD.add_argument('--MMD-B',
type=float, default=500.0,
help='Weight of the MMD (lambda in the paper).')
betaB = parser.add_argument_group('BetaB parameters')
betaB.add_argument('--betaB-initC',
type=float, default=0.0,
help='Starting annealed capacity.')
betaB.add_argument('--betaB-finC',
type=float, default=25.0,
help='Final annealed capacity.')
betaB.add_argument('--betaB-G',
type=float, default=100,
help='Weight of the KL (gamma in the paper).')
factor = parser.add_argument_group('factor VAE parameters')
factor.add_argument('--factor-G',
type=float, default=6.0,
help='Weight of the TC term (gamma in the paper).')
factor.add_argument('--lr-disc',
type=float, default=5e-5,
help='Learning rate of the discriminator.')
btcvae = parser.add_argument_group('beta-tcvae parameters')
btcvae.add_argument('--btcvae-A',
type=float, default=1.0,
help='Weight of the MI term (alpha in the paper).')
btcvae.add_argument('--btcvae-G',
type=float, default=1.0,
help='Weight of the dim-wise KL (gamma in the paper).')
btcvae.add_argument('--btcvae-B',
type=float, default=6.0,
help='Weight of the TC term (beta in the paper).')
# Learning options
evaluation = parser.add_argument_group('Evaluation options')
evaluation.add_argument('--is-eval-only',
action='store_true', default=False,
help='Whether to only evaluate model `name`.')
evaluation.add_argument('--is-metrics',
action='store_true', default=False,
help='Whether to compute disentanglement metrics.')
args = parser.parse_args(args_to_parse)
print(args)
return args
def main(args: argparse.Namespace):
"""Main train and evaluation function."""
formatter = logging.Formatter(
'%(asctime)s %(levelname)s - %(funcName)s: %(message)s', "%H:%M:%S")
logger = logging.getLogger(__name__)
logger.setLevel("INFO")
stream = logging.StreamHandler()
stream.setLevel("INFO")
stream.setFormatter(formatter)
logger.addHandler(stream)
set_seed(args.seed)
device = get_device(is_gpu=not args.no_cuda)
exp_dir = os.path.join(RES_DIR, args.name)
logger.info(
f"Root directory for saving and loading experiments: {exp_dir}")
if not args.is_eval_only:
create_safe_directory(exp_dir, logger=logger)
if args.loss == "factor":
logger.info(
"FactorVae needs 2 batches per iteration." +
"To replicate this behavior, double batch size and epochs.")
args.batch_size *= 2
args.epochs *= 2
# PREPARES DATA
train_loader = get_dataloaders(args.dataset,
noise=args.noise,
batch_size=args.batch_size,
logger=logger)
logger.info(
f"Train {args.dataset} with {len(train_loader.dataset)} samples")
# PREPARES MODEL
args.img_size = get_img_size(args.dataset) # stores for metadata
model = VAE(args.img_size, args.latent_dim)
logger.info(f'Num parameters in model: {get_n_param(model)}')
# TRAINS
optimizer = optim.Adam(model.parameters(), lr=args.lr)
model = model.to(device)
gif_visualizer = GifTraversalsTraining(model, args.dataset, exp_dir)
loss_f = get_loss_f(args.loss,
n_data=len(train_loader.dataset),
device=device,
**vars(args))
if args.loss in ['tdGJS', 'tGJS']:
loss_optimizer = optim.Adam(loss_f.parameters(), lr=args.lr)
else:
loss_optimizer = None
print(loss_optimizer)
trainer = Trainer(model, optimizer, loss_f,
device=device,
logger=logger,
save_dir=exp_dir,
is_progress_bar=not args.no_progress_bar,
gif_visualizer=gif_visualizer,
loss_optimizer=loss_optimizer,
denoise=args.noise is not None)
trainer(train_loader,
epochs=args.epochs,
checkpoint_every=args.checkpoint_every,)
# SAVE MODEL AND EXPERIMENT INFORMATION
save_model(trainer.model, exp_dir, metadata=vars(args))
# Eval
model = load_model(exp_dir, is_gpu=not args.no_cuda)
metadata = load_metadata(exp_dir)
test_loader = get_dataloaders(metadata["dataset"],
noise=args.noise,
train=False,
batch_size=128,
logger=logger)
loss_f = get_loss_f(args.loss,
n_data=len(test_loader.dataset),
device=device,
**vars(args))
evaluator = Evaluator(model, loss_f,
device=device,
is_metrics=args.is_metrics,
is_train=False,
logger=logger,
save_dir=exp_dir,
is_progress_bar=not args.no_progress_bar,
denoise=args.noise is not None)
evaluator(test_loader)
# Train set also
test_loader = get_dataloaders(metadata["dataset"],
train=True,
batch_size=128,
logger=logger)
loss_f = get_loss_f(args.loss,
n_data=len(test_loader.dataset),
device=device,
**vars(args))
evaluator = Evaluator(model, loss_f,
device=device,
is_metrics=args.is_metrics,
is_train=True,
logger=logger,
save_dir=exp_dir,
is_progress_bar=not args.no_progress_bar)
evaluator(test_loader)
if __name__ == '__main__':
args = parse_arguments(sys.argv[1:])
main(args)
| 10,961 | 39.750929 | 79 | py |
geometric-js | geometric-js-master/vae/evaluate.py | """Evaluation module."""
import logging
import math
import os
import torch
from collections import defaultdict
from logging import Logger
from functools import reduce
from timeit import default_timer
from tqdm import tqdm, trange
from typing import Any, Dict, Optional, Tuple
from vae.utils.math import log_density_gaussian
from vae.utils.modelIO import save_metadata
TRAIN_EVAL_FILE = "train_eval.log"
TEST_EVAL_FILE = "test_eval.log"
METRICS_FILENAME = "metrics.log"
METRIC_HELPERS_FILE = "metric_helpers.pth"
class Evaluator():
def __init__(self,
model: Any,
loss_f: Any,
device: Optional[torch.device] = torch.device("cpu"),
is_metrics: Optional[bool] = False,
is_train: Optional[bool] = False,
logger: Optional[Logger] = logging.getLogger(__name__),
save_dir: Optional[str] = "results",
is_progress_bar: Optional[bool] = True,
denoise: Optional[bool] = False
) -> None:
self.model = model.to(device)
self.loss_f = loss_f
self.device = device
self.is_metrics = is_metrics
self.is_train = is_train
self.logger = logger
self.logger.info(f"Testing Device: {self.device}")
self.save_dir = save_dir
self.is_progress_bar = is_progress_bar
self.denoise = denoise
def __call__(self,
data_loader: torch.utils.data.DataLoader
) -> Tuple[Dict, Dict]:
start = default_timer()
is_still_training = self.model.training
self.model.eval()
metrics = None
if self.is_metrics:
self.logger.info('Computing metrics...')
metrics, metric_helpers = self.compute_metrics(data_loader)
self.logger.info(f'Metrics: {metrics}')
self.logger.info(f'Metrics: {metric_helpers}')
save_metadata(metrics, self.save_dir, filename=METRICS_FILENAME)
self.logger.info('Computing losses...')
losses = self.compute_losses(data_loader)
self.logger.info(f'Losses: {losses}')
if self.is_train:
save_metadata(losses, self.save_dir, filename=TRAIN_EVAL_FILE)
else:
save_metadata(losses, self.save_dir, filename=TEST_EVAL_FILE)
if is_still_training:
self.model.train()
dt = (default_timer() - start) / 60
self.logger.info(f'Finished evaluating after {dt:.1f} min.')
return metrics, losses
def compute_losses(self, data_loader: torch.utils.data.DataLoader):
storer = defaultdict(list)
total_rec_loss = 0.
if self.denoise:
for noise_data, clean_data in tqdm(
data_loader, leave=False,
disable=not self.is_progress_bar):
noise_data = noise_data.to(self.device)
clean_data = clean_data.to(self.device)
recon_batch, latent_dist, latent_sample = self.model(noise_data)
loss, rec_loss = self.loss_f(
clean_data, recon_batch, latent_dist, self.model.training,
storer, latent_sample=latent_sample)
total_rec_loss += rec_loss.item()
else:
for data, _ in tqdm(data_loader,
leave=False,
disable=not self.is_progress_bar):
data = data.to(self.device)
recon_batch, latent_dist, latent_sample = self.model(data)
loss, rec_loss = self.loss_f(
data, recon_batch, latent_dist, self.model.training,
storer, latent_sample=latent_sample)
total_rec_loss += rec_loss.item()
losses = {k: sum(v) / len(data_loader) for k, v in storer.items()}
losses['recon_loss'] = total_rec_loss / len(data_loader)
return losses
def compute_metrics(self, data_loader):
"""
Compute all the metrics.
Parameters
----------
data_loader: torch.utils.data.DataLoader
"""
try:
lat_sizes = data_loader.dataset.lat_sizes
lat_names = data_loader.dataset.lat_names
except AttributeError:
raise ValueError(
"Dataset needs to have known true factors of variations to" +
"compute the metric. This does not seem to be the case for" +
f" {type(data_loader.__dict__['dataset']).__name__}")
self.logger.info("Computing the empirical distribution q(z|x)...")
samples_zCx, params_zCx = self._compute_q_zCx(data_loader)
len_dataset, latent_dim = samples_zCx.shape
self.logger.info("Estimating marginal entropies...")
# marginal entropy H(z_j)
H_z = self._estimate_latent_entropies(samples_zCx, params_zCx)
# conditional entropy H(z|v)
samples_zCx = samples_zCx.view(*lat_sizes, latent_dim)
params_zCx = tuple(p.view(*lat_sizes, latent_dim) for p in params_zCx)
H_zCv = self._estimate_H_zCv(
samples_zCx, params_zCx, lat_sizes, lat_names)
H_z = H_z.cpu()
H_zCv = H_zCv.cpu()
# I[z_j;v_k] = E[log \sum_x q(z_j|x)p(x|v_k)] + H[z_j]
# = - H[z_j|v_k] + H[z_j]
mut_info = - H_zCv + H_z
sorted_mut_info = \
torch.sort(mut_info, dim=1, descending=True)[0].clamp(min=0)
metric_helpers = {'marginal_entropies': H_z, 'cond_entropies': H_zCv}
mig = self._mutual_information_gap(
sorted_mut_info, lat_sizes, storer=metric_helpers)
metrics = {'MIG': mig.item()}
torch.save(
metric_helpers, os.path.join(self.save_dir, METRIC_HELPERS_FILE))
return metrics, metric_helpers
def _mutual_information_gap(self, sorted_mut_info, lat_sizes, storer=None):
"""
Compute the mutual information gap as in [1].
References
----------
[1] Chen, Tian Qi, et al. "Isolating sources of disentanglement in
variational autoencoders." Advances in Neural Information
Processing Systems. 2018.
"""
# difference between the largest and second largest mutual info
delta_mut_info = sorted_mut_info[:, 0] - sorted_mut_info[:, 1]
# NOTE: currently only works if balanced dataset for every factor of
# variation, then H(v_k) = - |V_k|/|V_k| log(1/|V_k|) = log(|V_k|)
H_v = torch.from_numpy(lat_sizes).float().log()
mig_k = delta_mut_info / H_v
mig = mig_k.mean() # mean over factor of variations
if storer is not None:
storer["mig_k"] = mig_k
storer["mig"] = mig
return mig
def _compute_q_zCx(self, dataloader):
"""
Compute the empirical disitribution of q(z|x).
Parameter
---------
dataloader: torch.utils.data.DataLoader
Batch data iterator.
Return
------
samples_zCx: torch.tensor
Tensor of shape (len_dataset, latent_dim) containing a sample of
q(z|x) for every x in the dataset.
params_zCX: tuple of torch.Tensor
Sufficient statistics q(z|x) for each training example. E.g. for
gaussian (mean, log_var) each of shape : (len_dataset, latent_dim).
"""
len_dataset = len(dataloader.dataset)
latent_dim = self.model.latent_dim
n_suff_stat = 2
q_zCx = torch.zeros(
len_dataset, latent_dim, n_suff_stat, device=self.device)
n = 0
with torch.no_grad():
for x, label in dataloader:
batch_size = x.size(0)
idcs = slice(n, n + batch_size)
q_zCx[idcs, :, 0], q_zCx[idcs, :, 1] = \
self.model.encoder(x.to(self.device))
n += batch_size
params_zCX = q_zCx.unbind(-1)
samples_zCx = self.model.reparameterize(*params_zCX)
return samples_zCx, params_zCX
def _estimate_latent_entropies(self, samples_zCx, params_zCX,
n_samples=10000):
r"""
Estimate :math:`H(z_j) = E_{q(z_j)} [-log q(z_j)]
= E_{p(x)} E_{q(z_j|x)} [-log q(z_j)]`
using the emperical distribution of :math:`p(x)`.
Note
----
- the expectation over the emperical distribution is:
:math:`q(z) = 1/N sum_{n=1}^N q(z|x_n)`.
- assume that q(z|x) is factorial i.e.
:math:`q(z|x) = \prod_j q(z_j|x)`.
- computes numerically stable NLL:
:math:`- log q(z) = log N - logsumexp_n=1^N log q(z|x_n)`.
Parameters
----------
samples_zCx: torch.tensor
Tensor of shape (len_dataset, latent_dim) containing a sample of
q(z|x) for every x in the dataset.
params_zCX: tuple of torch.Tensor
Sufficient statistics q(z|x) for each training example. E.g. for
gaussian (mean, log_var) each of shape : (len_dataset, latent_dim).
n_samples: int, optional
Number of samples to use to estimate the entropies.
Return
------
H_z: torch.Tensor
Tensor of shape (latent_dim) - the marginal entropies H(z_j).
"""
len_dataset, latent_dim = samples_zCx.shape
device = samples_zCx.device
H_z = torch.zeros(latent_dim, device=device)
# sample from p(x)
samples_x = torch.randperm(len_dataset, device=device)[:n_samples]
# sample from p(z|x)
samples_zCx = \
samples_zCx.index_select(0, samples_x).view(latent_dim, n_samples)
mini_batch_size = 10
samples_zCx = samples_zCx.expand(len_dataset, latent_dim, n_samples)
mean = params_zCX[0].unsqueeze(-1).expand(
len_dataset, latent_dim, n_samples)
log_var = params_zCX[1].unsqueeze(-1).expand(
len_dataset, latent_dim, n_samples)
log_N = math.log(len_dataset)
with trange(
n_samples, leave=False, disable=not self.is_progress_bar) as t:
for k in range(0, n_samples, mini_batch_size):
# log q(z_j|x) for n_samples
idcs = slice(k, k + mini_batch_size)
log_q_zCx = log_density_gaussian(samples_zCx[..., idcs],
mean[..., idcs],
log_var[..., idcs])
# numerically stable log q(z_j) for n_samples:
# log q(z_j) = -log N + logsumexp_{n=1}^N log q(z_j|x_n)
# As we don't know q(z) we appoximate it with the monte carlo
# expectation of q(z_j|x_n) over x. => fix single z and look at
# proba for every x to generate it. n_samples is not used here!
log_q_z = -log_N + torch.logsumexp(log_q_zCx, dim=0)
# H(z_j) = E_{z_j}[- log q(z_j)]
# mean over n_samples (dim 1 because already summed over 0).
H_z += (-log_q_z).sum(1)
t.update(mini_batch_size)
H_z /= n_samples
return H_z
def _estimate_H_zCv(self, samples_zCx, params_zCx, lat_sizes, lat_names):
"""
Estimate conditional entropies :math:`H[z|v]`.
"""
latent_dim = samples_zCx.size(-1)
len_dataset = reduce((lambda x, y: x * y), lat_sizes)
H_zCv = torch.zeros(len(lat_sizes), latent_dim, device=self.device)
for i, (lat_size, lat_name) in enumerate(zip(lat_sizes, lat_names)):
idcs = [slice(None)] * len(lat_sizes)
for j in range(lat_size):
self.logger.info(
f"Estimating conditional entropies for the {j}th value " +
f"of {lat_name}.")
idcs[i] = j
# samples from q(z,x|v)
samples_zxCv = samples_zCx[idcs].contiguous().view(
len_dataset // lat_size, latent_dim)
params_zxCv = tuple(
p[idcs].contiguous().view(
len_dataset // lat_size, latent_dim)
for p in params_zCx)
H_zCv[i] += self._estimate_latent_entropies(
samples_zxCv, params_zxCv) / lat_size
return H_zCv
| 12,490 | 37.198777 | 80 | py |
geometric-js | geometric-js-master/vae/training.py | """Training module."""
import logging
import os
import torch
from collections import defaultdict
from logging import Logger
from timeit import default_timer
from tqdm import trange
from typing import Any, Dict, Optional, Tuple
from vae.utils.modelIO import save_model
TRAIN_LOSSES_LOGFILE = "train_losses.log"
class Trainer():
def __init__(self,
model: Any,
optimizer: torch.optim.Optimizer,
loss_f: Any,
device: Optional[torch.device] = torch.device("cpu"),
logger: Optional[Logger] = logging.getLogger(__name__),
save_dir: Optional[str] = "results",
gif_visualizer: Optional[Any] = None,
is_progress_bar: Optional[bool] = True,
loss_optimizer: Optional[bool] = None,
denoise: Optional[bool] = False,
record_alpha_range: Optional[bool] = False
) -> None:
self.model = model.to(device)
self.optimizer = optimizer
self.loss_f = loss_f
self.device = device
self.logger = logger
self.save_dir = save_dir
self.gif_visualizer = gif_visualizer
self.is_progress_bar = is_progress_bar
self.loss_optimizer = loss_optimizer
self.denoise = denoise
self.record_alpha_range = record_alpha_range
self.losses_logger = LossesLogger(
os.path.join(self.save_dir, TRAIN_LOSSES_LOGFILE))
self.logger.info(f"Training Device: {self.device}")
def __call__(self,
data_loader: torch.utils.data.DataLoader,
epochs: Optional[int] = 10,
checkpoint_every: Optional[int] = 10
) -> None:
start = default_timer()
self.model.train()
for epoch in range(epochs):
storer = defaultdict(list)
avg_rec_loss = self._train_epoch(data_loader, storer, epoch)
storer['recon_loss'] += [avg_rec_loss]
self.logger.info(
f'Epoch: {epoch+1} Avg recon loss / image: {avg_rec_loss:.2f}')
if self.loss_optimizer is not None:
self.losses_logger.log(epoch,
storer,
alpha_parameter=self.loss_f.alpha)
else:
self.losses_logger.log(epoch, storer)
if self.gif_visualizer is not None:
self.gif_visualizer()
if epoch % checkpoint_every == 0:
save_model(
self.model, self.save_dir, filename=f"model-{epoch}.pt")
save_model(self.model, self.save_dir, filename=f"model-{epoch+1}.pt")
if self.gif_visualizer is not None:
self.gif_visualizer.save_reset()
self.model.eval()
dt = (default_timer() - start) / 60
self.logger.info(f'Finished training after {dt:.1f} min.')
def _train_epoch(self,
data_loader: torch.utils.data.DataLoader,
storer: Dict,
epoch: int
) -> float:
epoch_rec_loss = 0.
kwargs = dict(desc=f"Epoch {epoch+1}", leave=False,
disable=not self.is_progress_bar)
with trange(len(data_loader), **kwargs) as t:
if self.denoise:
# If denoising experiment, calculate the reconstruction error
# by comparing the output of the vae decoder with clean
for _, (noisy_data, clean_data) in enumerate(data_loader):
iter_loss, iter_rec_loss = self._train_iteration(
data=noisy_data, storer=storer, clean_data=clean_data)
epoch_rec_loss += iter_rec_loss
t.set_postfix(loss=iter_rec_loss)
t.update()
else:
for _, (data, _) in enumerate(data_loader):
iter_loss, iter_rec_loss = self._train_iteration(
data, storer)
epoch_rec_loss += iter_rec_loss
t.set_postfix(loss=iter_rec_loss)
t.update()
return epoch_rec_loss / len(data_loader)
def _train_iteration(self,
data: torch.Tensor,
storer: Dict,
clean_data: Optional[torch.Tensor] = None
) -> Tuple[float, float]:
batch_size, channel, height, width = data.size()
data_in = data.to(self.device)
if clean_data is not None:
data_out = clean_data.to(self.device)
else:
data_out = data.to(self.device)
try:
# Iterate loss parameters if a loss optimiser is passed:
recon_batch, latent_dist, latent_sample = self.model(data_in)
loss, rec_loss = self.loss_f(
data_out, recon_batch, latent_dist, self.model.training,
storer, record_alpha_range=self.record_alpha_range,
latent_sample=latent_sample)
if self.loss_optimizer is not None:
self.loss_optimizer.zero_grad()
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if self.loss_optimizer is not None:
self.loss_optimizer.step()
with torch.no_grad():
if hasattr(self.loss_f, 'alpha'):
self.loss_f.alpha.clamp_(0, 1)
except ValueError:
# For losses that use multiple optimizers (e.g. Factor)
loss = self.loss_f.call_optimize(
data, self.model, self.optimizer, storer)
return loss.item(), rec_loss.item()
class LossesLogger(object):
def __init__(self, file_path_name):
if os.path.isfile(file_path_name):
os.remove(file_path_name)
self.logger = logging.getLogger("losses_logger")
self.logger.setLevel(1) # always store
file_handler = logging.FileHandler(file_path_name)
file_handler.setLevel(1)
self.logger.addHandler(file_handler)
header = ",".join(["Epoch", "Loss", "Value"])
self.logger.debug(header)
def log(self, epoch, losses_storer,
alpha_parameter=None, mean=None, logvar=None):
for k, v in losses_storer.items():
log_string = ",".join(
str(item) for item in [epoch, k, sum(v) / len(v)])
self.logger.debug(log_string)
if alpha_parameter is not None:
self.logger.debug(f"{epoch},alpha,{alpha_parameter.item()}")
if mean is not None:
for i in range(len(mean)):
self.logger.debug(f"{epoch},mean_{i+1},{mean[i].item()}")
if logvar is not None:
var = logvar.exp()
for i in range(len(var)):
self.logger.debug(f"{epoch},var_{i+1},{var[i].item()}")
| 7,003 | 37.273224 | 79 | py |
geometric-js | geometric-js-master/vae/__init__.py | from vae.evaluate import Evaluator
from vae.training import Trainer
| 68 | 22 | 34 | py |
geometric-js | geometric-js-master/vae/models/losses.py | """Module containing all vae losses."""
import abc
import torch
from torch import optim, Tensor
from torch.nn import functional as F
from typing import Any, Dict, Optional, Tuple
from .discriminator import Discriminator
from vae.utils.math import (log_density_gaussian, log_importance_weight_matrix,
matrix_log_density_gaussian)
LOSSES = ["VAE", "KL", "fwdKL", "GJS", "dGJS",
"tGJS", "tdGJS", "MMD", "betaH", "betaB",
"factor", "btcvae"]
RECON_DIST = ["bernoulli", "laplace", "gaussian"]
def get_loss_f(loss_name, **kwargs_parse):
"""Return the correct loss function given the argparse arguments."""
kwargs_all = dict(rec_dist=kwargs_parse["rec_dist"],
steps_anneal=kwargs_parse["reg_anneal"])
if loss_name == "VAE":
return BetaHLoss(beta=0, **kwargs_all)
elif loss_name == "KL":
return BetaHLoss(beta=1, **kwargs_all)
elif loss_name == "fwdKL":
return BetaHLoss(beta=1, fwd_kl=True, **kwargs_all)
elif loss_name == "GJS":
return GeometricJSLoss(alpha=kwargs_parse["GJS_A"],
beta=kwargs_parse["GJS_B"],
dual=False,
invert_alpha=kwargs_parse["GJS_invA"],
**kwargs_all)
elif loss_name == "dGJS":
return GeometricJSLoss(alpha=kwargs_parse["GJS_A"],
beta=kwargs_parse["GJS_B"],
dual=True,
invert_alpha=kwargs_parse["GJS_invA"],
**kwargs_all)
elif loss_name == "tGJS":
return GeometricJSLossTrainableAlpha(alpha=kwargs_parse["GJS_A"],
beta=kwargs_parse["GJS_B"],
dual=False,
invert_alpha=kwargs_parse["GJS_invA"],
**kwargs_all)
elif loss_name == "tdGJS":
return GeometricJSLossTrainableAlpha(alpha=kwargs_parse["GJS_A"],
beta=kwargs_parse["GJS_B"],
dual=True,
invert_alpha=kwargs_parse["GJS_invA"],
**kwargs_all)
elif loss_name == "MMD":
return MMDLoss(beta=kwargs_parse["MMD_B"],
**kwargs_all)
elif loss_name == "betaH":
return BetaHLoss(beta=kwargs_parse["betaH_B"], **kwargs_all)
elif loss_name == "betaB":
return BetaBLoss(C_init=kwargs_parse["betaB_initC"],
C_fin=kwargs_parse["betaB_finC"],
gamma=kwargs_parse["betaB_G"],
**kwargs_all)
elif loss_name == "factor":
return FactorKLoss(kwargs_parse["device"],
gamma=kwargs_parse["factor_G"],
disc_kwargs=dict(
latent_dim=kwargs_parse["latent_dim"]),
optim_kwargs=dict(
lr=kwargs_parse["lr_disc"], betas=(0.5, 0.9)),
**kwargs_all)
elif loss_name == "btcvae":
return BtcvaeLoss(kwargs_parse["n_data"],
alpha=kwargs_parse["btcvae_A"],
beta=kwargs_parse["btcvae_B"],
gamma=kwargs_parse["btcvae_G"],
**kwargs_all)
else:
assert loss_name not in LOSSES
raise ValueError("Unknown loss : {}".format(loss_name))
class BaseLoss(abc.ABC):
r"""
Base class for losses.
Parameters
----------
record_loss_every: int, optional
Loss record frequency.
rec_dist: {"bernoulli", "gaussian", "laplace"}, optional
Reconstruction distribution of the likelihood on the each pixel.
Implicitely defines the reconstruction loss. Bernoulli corresponds to a
binary cross entropy (bse), Gaussian corresponds to MSE, Laplace
corresponds to L1.
steps_anneal: int, optional
Number of annealing steps where gradually adding the regularisation.
"""
def __init__(self,
record_loss_every: Optional[int] = 938,
rec_dist: Optional[str] = "bernoulli",
steps_anneal: Optional[int] = 0,
**kwargs: Optional[Any]
) -> None:
super().__init__(**kwargs)
self.n_train_steps = 0
self.record_loss_every = record_loss_every
self.rec_dist = rec_dist
self.steps_anneal = steps_anneal
@abc.abstractmethod
def __call__(self,
data: torch.Tensor,
recon_data: torch.Tensor,
latent_dist: Tuple[torch.Tensor, torch.Tensor],
is_train: bool,
storer: Dict,
**kwargs: Optional[Any]
) -> None:
r"""
Calculates loss for a batch of data.
Parameters
----------
data : torch.Tensor
Input data (e.g. batch of images). Shape : (batch_size, n_chan,
height, width).
recon_data : torch.Tensor
Reconstructed data. Shape : (batch_size, n_chan, height, width).
latent_dist : tuple of torch.tensor
sufficient statistics of the latent dimension. E.g. for gaussian
(mean, log_var) each of shape : (batch_size, latent_dim).
is_train : bool
Whether currently in train mode.
storer : dict
Dictionary in which to store important variables for vizualisation.
kwargs:
Loss specific arguments
"""
def _pre_call(self,
is_train: bool,
storer: Dict
) -> Dict:
if is_train:
self.n_train_steps += 1
if not is_train or self.n_train_steps % self.record_loss_every == 0:
storer = storer
else:
storer = None
return storer
class BetaHLoss(BaseLoss):
r"""Compute the Beta-VAE loss as in [1]
References
----------
[1] Higgins, Irina, et al. "beta-vae: Learning basic visual concepts
with a constrained variational framework." (2016).
"""
def __init__(self,
beta: Optional[float] = 4.0,
fwd_kl: Optional[bool] = False,
**kwargs: Optional[Dict]
) -> None:
super().__init__(**kwargs)
self.beta = beta
self.fwd_kl = fwd_kl
def __call__(self,
data: torch.Tensor,
recon_data: torch.Tensor,
latent_dist: Tuple[torch.Tensor, torch.Tensor],
is_train: bool,
storer: Dict,
**kwargs: Optional[Any]
) -> None:
storer = self._pre_call(is_train, storer)
rec_loss = _reconstruction_loss(data, recon_data,
storer=storer,
distribution=self.rec_dist)
mean, logvar = latent_dist
if self.fwd_kl:
kl_loss = _kl_normal_loss(
m_1=torch.zeros_like(mean),
lv_1=torch.zeros_like(logvar),
m_2=mean,
lv_2=logvar,
storer=storer)
else:
kl_loss = _kl_normal_loss(
m_1=mean,
lv_1=logvar,
m_2=torch.zeros_like(mean),
lv_2=torch.zeros_like(logvar),
storer=storer)
loss = rec_loss + self.beta * kl_loss
if storer is not None:
storer['loss'].append(loss.item())
return loss, rec_loss
class BetaBLoss(BaseLoss):
"""
Compute the Beta-VAE loss as in [1]
Parameters
----------
C_init : float, optional
Starting annealed capacity C.
C_fin : float, optional
Final annealed capacity C.
gamma : float, optional
Weight of the KL divergence term.
kwargs:
Additional arguments for `BaseLoss`, e.g. `rec_dist`.
References
----------
[1] Burgess, Christopher P., et al. "Understanding disentangling in
$\beta$-VAE." arXiv preprint arXiv:1804.03599 (2018).
"""
def __init__(self, C_init=0., C_fin=20., gamma=100., **kwargs):
super().__init__(**kwargs)
self.gamma = gamma
self.C_init = C_init
self.C_fin = C_fin
def __call__(self, data, recon_data, latent_dist, is_train, storer,
**kwargs):
storer = self._pre_call(is_train, storer)
rec_loss = _reconstruction_loss(data, recon_data,
storer=storer,
distribution=self.rec_dist)
kl_loss = _kl_normal_loss(*latent_dist, storer)
C = (linear_annealing(
self.C_init, self.C_fin, self.n_train_steps, self.steps_anneal)
if is_train else self.C_fin)
loss = rec_loss + self.gamma * (kl_loss - C).abs()
if storer is not None:
storer['loss'].append(loss.item())
return loss
class FactorKLoss(BaseLoss):
"""
Compute the Factor-VAE loss as per Algorithm 2 of [1]
Parameters
----------
device : torch.device
gamma : float, optional
Weight of the TC loss term. `gamma` in the paper.
discriminator : disvae.discriminator.Discriminator
optimizer_d : torch.optim
kwargs:
Additional arguments for `BaseLoss`, e.g. `rec_dist`.
References
----------
[1] Kim, Hyunjik, and Andriy Mnih. "Disentangling by factorising."
arXiv preprint arXiv:1802.05983 (2018).
"""
def __init__(self, device,
gamma=10.,
disc_kwargs={},
optim_kwargs=dict(lr=5e-5, betas=(0.5, 0.9)),
**kwargs):
super().__init__(**kwargs)
self.gamma = gamma
self.device = device
self.discriminator = Discriminator(**disc_kwargs).to(self.device)
self.optimizer_d = optim.Adam(
self.discriminator.parameters(), **optim_kwargs)
def __call__(self, *args, **kwargs):
raise ValueError("Use `call_optimize` to also train the discriminator")
def call_optimize(self, data, model, optimizer, storer):
storer = self._pre_call(model.training, storer)
# factor-vae split data into 2 batches. In paper they sample 2 batches
batch_size = data.size(dim=0)
half_batch_size = batch_size // 2
data = data.split(half_batch_size)
data1 = data[0]
data2 = data[1]
# Factor VAE Loss
recon_batch, latent_dist, latent_sample1 = model(data1)
rec_loss = _reconstruction_loss(data1, recon_batch,
storer=storer,
distribution=self.rec_dist)
kl_loss = _kl_normal_loss(*latent_dist, storer)
d_z = self.discriminator(latent_sample1)
# We want log(p_true/p_false). If softmax not logisitc regression
# then p_true = exp(logit_true) / Z; p_false = exp(logit_false) / Z
# so log(p_true/p_false) = logit_true - logit_false
tc_loss = (d_z[:, 0] - d_z[:, 1]).mean()
# with sigmoid (bad results) should be
# `tc_loss = (2 * d_z.flatten()).mean()`
anneal_reg = (linear_annealing(
0, 1, self.n_train_steps, self.steps_anneal)
if model.training else 1)
vae_loss = rec_loss + kl_loss + anneal_reg * self.gamma * tc_loss
if storer is not None:
storer['loss'].append(vae_loss.item())
storer['tc_loss'].append(tc_loss.item())
if not model.training:
# don't backprop if evaluating
return vae_loss
# Run VAE optimizer
optimizer.zero_grad()
vae_loss.backward(retain_graph=True)
optimizer.step()
# Discriminator Loss
# Get second sample of latent distribution
latent_sample2 = model.sample_latent(data2)
z_perm = _permute_dims(latent_sample2).detach()
d_z_perm = self.discriminator(z_perm)
# Calculate total correlation loss
# for cross entropy the target is the index => need to be long and says
# that it's first output for d_z and second for perm
ones = torch.ones(
half_batch_size, dtype=torch.long, device=self.device)
zeros = torch.zeros_like(ones)
d_tc_loss = 0.5 * (F.cross_entropy(d_z, zeros) +
F.cross_entropy(d_z_perm, ones))
# with sigmoid would be :
# d_tc_loss = 0.5 * (self.bce(d_z.flatten(), ones) +
# self.bce(d_z_perm.flatten(), 1 - ones))
# TO-DO: check if should also anneal discriminator
# if not becomes too good ???
# d_tc_loss = anneal_reg * d_tc_loss
# Run discriminator optimizer
self.optimizer_d.zero_grad()
d_tc_loss.backward()
self.optimizer_d.step()
if storer is not None:
storer['discrim_loss'].append(d_tc_loss.item())
return vae_loss
class BtcvaeLoss(BaseLoss):
"""
Compute the decomposed KL loss with either minibatch weighted sampling or
minibatch stratified sampling according to [1]
Parameters
----------
n_data: int
Number of data in the training set
alpha : float
Weight of the mutual information term.
beta : float
Weight of the total correlation term.
gamma : float
Weight of the dimension-wise KL term.
is_mss : bool
Whether to use minibatch stratified sampling instead of minibatch
weighted sampling.
kwargs:
Additional arguments for `BaseLoss`, e.g. `rec_dist`.
References
----------
[1] Chen, Tian Qi, et al. "Isolating sources of disentanglement in
variational autoencoders." Advances in Neural Information Processing
Systems. 2018.
"""
def __init__(self, n_data, alpha=1., beta=6., gamma=1., is_mss=True,
**kwargs):
super().__init__(**kwargs)
self.n_data = n_data
self.beta = beta
self.alpha = alpha
self.gamma = gamma
self.is_mss = is_mss # minibatch stratified sampling
def __call__(self, data, recon_batch, latent_dist, is_train, storer,
latent_sample=None):
storer = self._pre_call(is_train, storer)
# batch_size, latent_dim = latent_sample.shape
rec_loss = _reconstruction_loss(data, recon_batch,
storer=storer,
distribution=self.rec_dist)
log_pz, log_qz, log_prod_qzi, log_q_zCx = _get_log_pz_qz_prodzi_qzCx(
latent_sample, latent_dist, self.n_data, is_mss=self.is_mss)
# I[z;x] = KL[q(z,x)||q(x)q(z)] = E_x[KL[q(z|x)||q(z)]]
mi_loss = (log_q_zCx - log_qz).mean()
# TC[z] = KL[q(z)||\prod_i z_i]
tc_loss = (log_qz - log_prod_qzi).mean()
# dw_kl_loss is KL[q(z)||p(z)] instead of usual KL[q(z|x)||p(z))]
dw_kl_loss = (log_prod_qzi - log_pz).mean()
anneal_reg = (linear_annealing(
0, 1, self.n_train_steps, self.steps_anneal)
if is_train else 1)
# total loss
loss = rec_loss + (self.alpha * mi_loss +
self.beta * tc_loss +
anneal_reg * self.gamma * dw_kl_loss)
if storer is not None:
storer['loss'].append(loss.item())
storer['mi_loss'].append(mi_loss.item())
storer['tc_loss'].append(tc_loss.item())
storer['dw_kl_loss'].append(dw_kl_loss.item())
# computing this for storing and comparaison purposes
_ = _kl_normal_loss(*latent_dist, storer)
return loss
class MMDLoss(BaseLoss):
r"""Compute VAE loss with maximum mean discrepancy regularisation.
Parameters
----------
beta : float, optional
Weight of the MMD divergence.
kwargs:
Additional arguments for `BaseLoss`, e.g. `rec_dist`.
"""
def __init__(self, beta=500.0, **kwargs):
super().__init__(**kwargs)
self.beta = beta
def __call__(self, data, recon_data, latent_dist, is_train, storer,
**kwargs):
storer = self._pre_call(is_train, storer)
rec_loss = _reconstruction_loss(data, recon_data,
storer=storer,
distribution=self.rec_dist)
mmd_loss = _mmd_loss(*latent_dist, storer=storer)
loss = rec_loss + self.beta * mmd_loss
if storer is not None:
storer['loss'] += [loss.item()]
return loss, rec_loss
class GeometricJSLoss(BaseLoss):
r"""Compute VAE loss with skew geometric-Jensen-Shannon regularisation [1].
Parameters
----------
alpha : float, optional
Skew of the skew geometric-Jensen-Shannon divergence
beta : float, optional
Weight of the skew g-js divergence.
dual : bool, optional
Whether to use Dual or standard GJS.
invert_alpha : bool, optional
Whether to replace alpha with 1 - a.
kwargs:
Additional arguments for `BaseLoss`, e.g. `rec_dist`.
References
----------
[1] Deasy, Jacob, Nikola Simidjievski, and Pietro Liò.
"Constraining Variational Inference with Geometric Jensen-Shannon Divergence."
Advances in Neural Information Processing Systems 33 (2020).
"""
def __init__(self, alpha=0.5, beta=1.0, dual=True, invert_alpha=True, **kwargs):
super().__init__(**kwargs)
self.alpha = alpha
self.beta = beta
self.dual = dual
self.invert_alpha = invert_alpha
def __call__(self, data, recon_data, latent_dist, is_train, storer,
**kwargs):
storer = self._pre_call(is_train, storer)
rec_loss = _reconstruction_loss(data, recon_data,
storer=storer,
distribution=self.rec_dist)
gjs_loss = _gjs_normal_loss(*latent_dist,
dual=self.dual,
a=self.alpha,
invert_alpha=self.invert_alpha,
storer=storer)
loss = rec_loss + self.beta * gjs_loss
if storer is not None:
storer['loss'] += [loss.item()]
return loss, rec_loss
class GeometricJSLossTrainableAlpha(BaseLoss, torch.nn.Module):
r"""Compute VAE loss with skew geometric-Jensen-Shannon regularisation [1].
References
----------
[1] Deasy, Jacob, Nikola Simidjievski, and Pietro Liò.
"Constraining Variational Inference with Geometric Jensen-Shannon
Divergence." Advances in Neural Information Processing Systems 33
(2020).
"""
def __init__(self,
alpha: Optional[float] = 0.5,
beta: Optional[float] = 1.0,
dual: Optional[bool] = True,
invert_alpha: Optional[bool] = True,
device: Optional[torch.device] = None,
**kwargs: Optional[Dict]
) -> None:
super(GeometricJSLossTrainableAlpha, self).__init__(**kwargs)
self.alpha = torch.nn.Parameter(torch.tensor(alpha))
self.beta = beta
self.dual = dual
self.invert_alpha = invert_alpha
self.device = device
def __call__(self,
data,
recon_data,
latent_dist,
is_train,
storer,
record_alpha_range=False,
**kwargs):
storer = self._pre_call(is_train, storer)
rec_loss = _reconstruction_loss(data, recon_data,
storer=storer,
distribution=self.rec_dist)
gjs_loss = _gjs_normal_loss(*latent_dist,
dual=self.dual,
a=self.alpha,
invert_alpha=self.invert_alpha,
storer=storer,
record_alpha_range=record_alpha_range)
loss = rec_loss + self.beta * gjs_loss
if storer is not None:
storer['loss'] += [loss.item()]
return loss, rec_loss
class GeometricJSLossTrainablePrior(BaseLoss, torch.nn.Module):
r"""Compute VAE loss with skew geometric-Jensen-Shannon regularisation [1].
Parameters
----------
alpha : float, optional
Skew of the skew geometric-Jensen-Shannon divergence
beta : float, optional
Weight of the skew g-js divergence.
dual : bool, optional
Whether to use Dual or standard GJS.
invert_alpha : bool, optional
Whether to replace alpha with 1 - a.
kwargs:
Additional arguments for `BaseLoss`, e.g. `rec_dist`.
References
----------
[1] Deasy, Jacob, Nikola Simidjievski, and Pietro Liò.
"Constraining Variational Inference with Geometric Jensen-Shannon Divergence."
Advances in Neural Information Processing Systems 33 (2020).
"""
def __init__(self, alpha=None, beta=1.0, dual=True, invert_alpha=True, device=None, **kwargs):
super(GeometricJSLossTrainablePrior, self).__init__(**kwargs)
if alpha is not None:
if device is not None:
self.alpha = torch.nn.Parameter(torch.tensor([alpha]).to(device))
else:
self.alpha = torch.nn.Parameter(torch.tensor([alpha]).to(device))
else:
if device is not None:
self.alpha = torch.nn.Parameter(torch.rand(1).to(device))
else:
self.alpha = torch.nn.Parameter(torch.rand(1).to(device))
self.mean_prior = torch.nn.Parameter(torch.zeros(10).to(device))
self.logvar_prior = torch.nn.Parameter(torch.zeros(10).to(device))
self.beta = beta
self.dual = dual
self.invert_alpha = invert_alpha
self.device = device
def __call__(self, data, recon_data, latent_dist, is_train, storer,
**kwargs):
storer = self._pre_call(is_train, storer)
rec_loss = _reconstruction_loss(data, recon_data,
storer=storer,
distribution=self.rec_dist)
gjs_loss = _gjs_normal_loss_train_prior(*latent_dist,
mean_prior=self.mean_prior,
logvar_prior=self.logvar_prior,
dual=self.dual,
a=self.alpha,
invert_alpha=self.invert_alpha,
storer=storer)
loss = rec_loss + self.beta * gjs_loss
if storer is not None:
storer['loss'] += [loss.item()]
return loss, rec_loss
# HELPERS
def _reconstruction_loss(data, recon_data, distribution="bernoulli",
storer=None):
"""
Calculates the per image reconstruction loss for a batch of data.
I.e. negative log likelihood.
Parameters
----------
data : torch.Tensor
Input data (e.g. batch of images). Shape : (batch_size, n_chan,
height, width).
recon_data : torch.Tensor
Reconstructed data. Shape : (batch_size, n_chan, height, width).
distribution : {"bernoulli", "gaussian", "laplace"}
Distribution of the likelihood on the each pixel. Implicitely defines
the loss Bernoulli corresponds to a binary cross entropy (bse) loss
and is the most commonly used. It has the issue that it doesn't
penalize the same way (0.1,0.2) and (0.4,0.5), which might not be
optimal. Gaussian distribution corresponds to MSE, and is sometimes
used, but hard to train because it ends up focusing only a few pixels
that are very wrong. Laplace distribution corresponds to L1 solves
partially the issue of MSE.
storer : dict
Dictionary in which to store important variables for vizualisation.
Returns
-------
loss : torch.Tensor
Per image cross entropy (i.e. normalized per batch but not pixel and
channel)
"""
batch_size, n_chan, _, _ = recon_data.size()
# is_colored = n_chan == 3
recon_data = torch.clamp(recon_data, 0, 1)
if distribution == "bernoulli":
loss = F.binary_cross_entropy(recon_data, data, reduction="sum")
elif distribution == "gaussian":
# loss in [0,255] space but normalized by 255 to not be too big
# loss = F.mse_loss(recon_data * 255, data * 255, reduction="sum") / 255
loss = F.mse_loss(recon_data, data, reduction="sum")
elif distribution == "laplace":
# loss in [0,255] space but normalized by 255 to not be too big but
# multiply by 255 and divide 255, is the same as not doing anything
# for L1
loss = F.l1_loss(recon_data, data, reduction="sum")
loss = loss * 3
# emperical value to give similar values than bernoulli => same HP.
loss = loss * (loss != 0) # masking to avoid nan
else:
assert distribution not in RECON_DIST
raise ValueError("Unkown distribution: {}".format(distribution))
loss = loss / batch_size
# if storer is not None:
# storer['recon_loss'] += [loss.item()]
return loss
def _kl_normal_loss(m_1: torch.Tensor,
lv_1: torch.Tensor,
m_2: torch.Tensor,
lv_2: torch.Tensor,
term: Optional[str] = '',
storer: Optional[Dict] = None
) -> torch.Tensor:
"""Calculates the KL divergence between two normal distributions
with diagonal covariance matrices."""
latent_dim = m_1.size(1)
latent_kl = (0.5 * (-1 + (lv_2 - lv_1) + lv_1.exp() / lv_2.exp()
+ (m_2 - m_1).pow(2) / lv_2.exp()).mean(dim=0))
total_kl = latent_kl.sum()
if storer is not None:
storer['kl_loss' + str(term)] += [total_kl.item()]
for i in range(latent_dim):
storer['kl_loss' + str(term) + f'_{i}'] += [latent_kl[i].item()]
return total_kl
def _get_mu_var(m_1, v_1, m_2, v_2, a=0.5, storer=None):
"""Get mean and standard deviation of geometric mean distribution."""
v_a = 1 / ((1 - a) / v_1 + a / v_2)
m_a = v_a * ((1 - a) * m_1 / v_1 + a * m_2 / v_2)
return m_a, v_a
def _gjs_normal_loss(mean, logvar, dual=False, a=0.5, invert_alpha=True,
storer=None, record_alpha_range=False):
var = logvar.exp()
mean_0 = torch.zeros_like(mean)
var_0 = torch.ones_like(var)
if invert_alpha:
mean_a, var_a = _get_mu_var(
mean, var, mean_0, var_0, a=1-a, storer=storer)
else:
mean_a, var_a = _get_mu_var(
mean, var, mean_0, var_0, a=a, storer=storer)
var_a = torch.log(var_a)
var_0 = torch.log(var_0)
var = torch.log(var)
if dual:
kl_1 = _kl_normal_loss(
mean_a, var_a, mean, var, term=1, storer=storer)
kl_2 = _kl_normal_loss(
mean_a, var_a, mean_0, var_0, term=2, storer=storer)
else:
kl_1 = _kl_normal_loss(
mean, var, mean_a, var_a, term=1, storer=storer)
kl_2 = _kl_normal_loss(
mean_0, var_0, mean_a, var_a, term=2, storer=storer)
with torch.no_grad():
_ = _kl_normal_loss(
mean, var, mean_0, var_0, term='rkl', storer=storer)
total_gjs = (1 - a) * kl_1 + a * kl_2
if storer is not None:
storer_label = 'gjs_loss'
if dual:
storer_label += '_dual'
if invert_alpha:
storer_label += '_invert'
storer[storer_label] += [total_gjs.item()]
# Record what the alpha landscape looks like if record_alpha_range
if record_alpha_range:
storer_label = 'gjs_loss'
if dual:
storer_label += '_dual'
if invert_alpha:
storer_label += '_invert'
with torch.no_grad():
for i in range(101):
gjs = _gjs_normal_loss(
mean, logvar,
dual=False, a=i/100, invert_alpha=True, storer=None)
storer[f"storer_label_alpha_test={i/100}"] += [gjs.item()]
return total_gjs
def _mmd_loss(mean: Tensor,
logvar: Tensor,
storer: Optional[Dict] = None
) -> Tensor:
"""Calculates the maximum mean discrepancy between latent distributions."""
_, latent_dim = mean.shape
z = torch.cat((mean, logvar), axis=1)
true_samples = torch.randn((200, 2 * latent_dim), requires_grad=False)
true_samples = true_samples.to(z.device)
x_kernel = _compute_kernel(true_samples, true_samples)
y_kernel = _compute_kernel(z, z)
xy_kernel = _compute_kernel(true_samples, z)
mmd = x_kernel.mean() + y_kernel.mean() - 2 * xy_kernel.mean()
if storer is not None:
storer['mmd_loss'] += [mmd.item()]
return mmd
def _compute_kernel(x: Tensor, y: Tensor) -> Tensor:
"""Calculate kernel for maximum mean discrepancy loss."""
x_size = x.size(0)
y_size = y.size(0)
dim = x.size(1)
x = x.unsqueeze(1) # (x_size, 1, dim)
y = y.unsqueeze(0) # (1, y_size, dim)
tiled_x = x.expand(x_size, y_size, dim)
tiled_y = y.expand(x_size, y_size, dim)
kernel_input = (tiled_x - tiled_y).pow(2).mean(2) / float(dim)
kernel_output = torch.exp(-kernel_input) # (x_size, y_size)
return kernel_output
def _permute_dims(latent_sample):
"""
Implementation of Algorithm 1 in ref [1]. Randomly permutes the sample from
q(z) (latent_dist) across the batch for each of the latent dimensions (mean
and log_var).
Parameters
----------
latent_sample: torch.Tensor
sample from the latent dimension using the reparameterisation trick
shape : (batch_size, latent_dim).
References
----------
[1] Kim, Hyunjik, and Andriy Mnih. "Disentangling by factorising."
arXiv preprint arXiv:1802.05983 (2018).
"""
perm = torch.zeros_like(latent_sample)
batch_size, dim_z = perm.size()
for z in range(dim_z):
pi = torch.randperm(batch_size).to(latent_sample.device)
perm[:, z] = latent_sample[pi, z]
return perm
def linear_annealing(init, fin, step, annealing_steps):
"""Linear annealing of a parameter."""
if annealing_steps == 0:
return fin
assert fin > init
delta = fin - init
annealed = min(init + delta * step / annealing_steps, fin)
return annealed
# Batch TC specific
# TO-DO: test if mss is better!
def _get_log_pz_qz_prodzi_qzCx(latent_sample, latent_dist, n_data,
is_mss=True):
batch_size, hidden_dim = latent_sample.shape
# calculate log q(z|x)
log_q_zCx = log_density_gaussian(latent_sample, *latent_dist).sum(dim=1)
# calculate log p(z)
# mean and log var is 0
zeros = torch.zeros_like(latent_sample)
log_pz = log_density_gaussian(latent_sample, zeros, zeros).sum(1)
mat_log_qz = matrix_log_density_gaussian(latent_sample, *latent_dist)
if is_mss:
# use stratification
log_iw_mat = log_importance_weight_matrix(
batch_size, n_data).to(latent_sample.device)
mat_log_qz = mat_log_qz + log_iw_mat.view(batch_size, batch_size, 1)
log_qz = torch.logsumexp(mat_log_qz.sum(2), dim=1, keepdim=False)
log_prod_qzi = torch.logsumexp(mat_log_qz, dim=1, keepdim=False).sum(1)
return log_pz, log_qz, log_prod_qzi, log_q_zCx
| 32,281 | 33.563169 | 98 | py |
geometric-js | geometric-js-master/vae/models/discriminator.py | """Module containing discriminator for FactorVAE."""
import torch.nn as nn
from vae.utils.initialization import weights_init
class Discriminator(nn.Module):
def __init__(self,
neg_slope=0.2,
latent_dim=10,
hidden_units=1000):
"""Discriminator proposed in [1].
Parameters
----------
neg_slope: float
Hyperparameter for the Leaky ReLu
latent_dim : int
Dimensionality of latent variables.
hidden_units: int
Number of hidden units in the MLP
Model Architecture
------------
- 6 layer multi-layer perceptron, each with 1000 hidden units
- Leaky ReLu activations
- Output 2 logits
References:
[1] Kim, Hyunjik, and Andriy Mnih. "Disentangling by factorising."
arXiv preprint arXiv:1802.05983 (2018).
"""
super(Discriminator, self).__init__()
# Activation parameters
self.neg_slope = neg_slope
self.leaky_relu = nn.LeakyReLU(self.neg_slope, True)
# Layer parameters
self.z_dim = latent_dim
self.hidden_units = hidden_units
# theoretically 1 with sigmoid but gives bad results => use 2 and
# softmax
out_units = 2
# Fully connected layers
self.lin1 = nn.Linear(self.z_dim, hidden_units)
self.lin2 = nn.Linear(hidden_units, hidden_units)
self.lin3 = nn.Linear(hidden_units, hidden_units)
self.lin4 = nn.Linear(hidden_units, hidden_units)
self.lin5 = nn.Linear(hidden_units, hidden_units)
self.lin6 = nn.Linear(hidden_units, out_units)
self.reset_parameters()
def forward(self, z):
# Fully connected layers with leaky ReLu activations
z = self.leaky_relu(self.lin1(z))
z = self.leaky_relu(self.lin2(z))
z = self.leaky_relu(self.lin3(z))
z = self.leaky_relu(self.lin4(z))
z = self.leaky_relu(self.lin5(z))
z = self.lin6(z)
return z
def reset_parameters(self):
self.apply(weights_init)
| 2,135 | 27.864865 | 78 | py |
geometric-js | geometric-js-master/vae/models/vae.py | """Module containing the main VAE class."""
import torch
import torch.nn as nn
import pdb
from vae.utils.initialization import weights_init
from .decoders import DecoderBurgess, DecoderRezendeViola, IntegrationDecoderCNCVAE
from .encoders import EncoderBurgess, IntegrationEncoderCNCVAE
class VAE(nn.Module):
def __init__(self, img_size, latent_dim, encoding_type=None, dense_size=128):
"""
Class which defines model and forward pass.
Parameters
----------
img_size : tuple of ints
Size of images. E.g. (1, 32, 32) or (3, 64, 64).
"""
super(VAE, self).__init__()
if list(img_size[1:]) not in [[32, 32], [64, 64]]:
raise RuntimeError(
f"{img_size} sized images not supported. Only (None, 32, 32)"
+ "and (None, 64, 64) supported. Build your own architecture "
+ "or reshape images!")
self.latent_dim = latent_dim
self.img_size = img_size
self.num_pixels = self.img_size[1] * self.img_size[2]
if encoding_type == 'IntegrativeCNCVAE':
self.encoder = IntegrationEncoderCNCVAE(
data_size=img_size,
dense_units=dense_size,
latent_dim=latent_dim)
self.decoder = IntegrationEncoderCNCVAE(
data_size=img_size,
dense_units=dense_size,
latent_dim=latent_dim)
elif encoding_type == 'TamingVAEs':
self.encoder = EncoderBurgess(img_size, self.latent_dim)
self.decoder = DecoderRezendeViola(img_size, self.latent_dim)
else:
self.encoder = EncoderBurgess(img_size, self.latent_dim)
self.decoder = DecoderBurgess(img_size, self.latent_dim)
self.reset_parameters()
def reparameterize(self, mean, logvar):
"""
Samples from a normal distribution using the reparameterization trick.
Parameters
----------
mean : torch.Tensor
Mean of the normal distribution. Shape (batch_size, latent_dim)
logvar : torch.Tensor
Diagonal log variance of the normal distribution.
Shape : (batch_size, latent_dim)
"""
if self.training:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mean + std * eps
else:
return mean
def forward(self, x):
"""
Forward pass of model.
Parameters
----------
x : torch.Tensor
Batch of data. Shape (batch_size, n_chan, height, width)
"""
latent_dist = self.encoder(x)
latent_sample = self.reparameterize(*latent_dist)
reconstruct = self.decoder(latent_sample)
return reconstruct, latent_dist, latent_sample
def reset_parameters(self):
self.apply(weights_init)
def sample_latent(self, x):
"""
Returns a sample from the latent distribution.
Parameters
----------
x : torch.Tensor
Batch of data. Shape (batch_size, n_chan, height, width)
"""
latent_dist = self.encoder(x)
latent_sample = self.reparameterize(*latent_dist)
return latent_sample
| 3,294 | 30.682692 | 83 | py |
geometric-js | geometric-js-master/vae/models/__init__.py | 0 | 0 | 0 | py |
|
geometric-js | geometric-js-master/vae/models/decoders.py | """Module containing the decoders."""
import numpy as np
import torch
import torch.nn as nn
import pdb
class DecoderBurgess(nn.Module):
def __init__(self, img_size,
latent_dim=10):
r"""Decoder of the model proposed in [1].
Parameters
----------
img_size : tuple of ints
Size of images. E.g. (1, 32, 32) or (3, 64, 64).
latent_dim : int
Dimensionality of latent output.
Model Architecture (transposed for decoder)
------------
- 4 convolutional layers (each with 32 channels), (4 x 4 kernel),
(stride of 2)
- 2 fully connected layers (each of 256 units)
- Latent distribution:
- 1 fully connected layer of 20 units (log variance and mean for
10 Gaussians)
References:
[1] Burgess, Christopher P., et al. "Understanding disentangling in
$\beta$-VAE." arXiv preprint arXiv:1804.03599 (2018).
"""
super(DecoderBurgess, self).__init__()
# Layer parameters
hid_channels = 32
kernel_size = 4
hidden_dim = 256
self.img_size = img_size
# Shape required to start transpose convs
self.reshape = (hid_channels, kernel_size, kernel_size)
n_chan = self.img_size[0]
self.img_size = img_size
# Fully connected layers
self.lin1 = nn.Linear(latent_dim, hidden_dim)
self.lin2 = nn.Linear(hidden_dim, hidden_dim)
self.lin3 = nn.Linear(hidden_dim, np.product(self.reshape))
# Convolutional layers
cnn_kwargs = dict(stride=2, padding=1)
# If input image is 64x64 do fourth convolution
if self.img_size[1] == self.img_size[2] == 64:
self.convT_64 = nn.ConvTranspose2d(
hid_channels, hid_channels, kernel_size, **cnn_kwargs)
self.convT1 = nn.ConvTranspose2d(
hid_channels, hid_channels, kernel_size, **cnn_kwargs)
self.convT2 = nn.ConvTranspose2d(
hid_channels, hid_channels, kernel_size, **cnn_kwargs)
self.convT3 = nn.ConvTranspose2d(
hid_channels, n_chan, kernel_size, **cnn_kwargs)
def forward(self, z):
batch_size = z.size(0)
# Fully connected layers with ReLu activations
x = torch.relu(self.lin1(z))
x = torch.relu(self.lin2(x))
x = torch.relu(self.lin3(x))
x = x.view(batch_size, *self.reshape)
# Convolutional layers with ReLu activations
if self.img_size[1] == self.img_size[2] == 64:
x = torch.relu(self.convT_64(x))
x = torch.relu(self.convT1(x))
x = torch.relu(self.convT2(x))
# Sigmoid activation for final conv layer
x = torch.sigmoid(self.convT3(x))
return x
class DecoderRezendeViola(nn.Module):
def __init__(self, img_size,
latent_dim=10):
r"""Decoder of the model used in [1].
Parameters
----------
img_size : tuple of ints
Size of images. E.g. (1, 32, 32) or (3, 64, 64).
latent_dim : int
Dimensionality of latent output.
Model Architecture (transposed for decoder)
------------
- 4 convolutional layers (each with 32 channels), (4 x 4 kernel),
(stride of 2)
- 2 fully connected layers (each of 256 units)
- Latent distribution:
- 1 fully connected layer of 20 units (log variance and mean for
10 Gaussians)
References:
[1] Danilo Jimenez Rezende and Fabio Viola. Taming vaes, 2018.
"""
super(DecoderRezendeViola, self).__init__()
# Layer parameters
hid_channels = 32
kernel_size = 4
hidden_dim = 256
self.img_size = img_size
# Shape required to start transpose convs
self.reshape = (hid_channels, kernel_size, kernel_size)
n_chan = self.img_size[0]
self.img_size = img_size
# Fully connected layers
self.lin1 = nn.Linear(latent_dim, hidden_dim)
self.lin2 = nn.Linear(hidden_dim, hidden_dim)
self.lin3 = nn.Linear(hidden_dim, np.product(self.reshape))
# Convolutional layers
cnn_kwargs = dict(stride=2, padding=1)
# If input image is 64x64 do fourth convolution
if self.img_size[1] == self.img_size[2] == 64:
self.convT_64 = nn.ConvTranspose2d(
hid_channels, hid_channels, kernel_size, **cnn_kwargs)
self.convT1 = nn.ConvTranspose2d(
hid_channels, hid_channels, kernel_size, **cnn_kwargs)
self.convT2 = nn.ConvTranspose2d(
hid_channels, hid_channels, kernel_size, **cnn_kwargs)
self.convT3 = nn.ConvTranspose2d(
hid_channels, 2 * n_chan, kernel_size, **cnn_kwargs)
def reparameterize(self, mean, logvar):
"""
Samples from a normal distribution using the reparameterization trick.
Parameters
----------
mean : torch.Tensor
Mean of the normal distribution. Shape (batch_size, latent_dim)
logvar : torch.Tensor
Diagonal log variance of the normal distribution.
Shape : (batch_size, latent_dim)
"""
if self.training:
std = torch.exp(0.5 * logvar)
# std = torch.zeros_like(mean) + 0.25
eps = torch.randn_like(std)
return mean + std * eps
else:
return mean
def forward(self, z):
batch_size = z.size(0)
# Fully connected layers with ReLu activations
x = torch.relu(self.lin1(z))
x = torch.relu(self.lin2(x))
x = torch.relu(self.lin3(x))
x = x.view(batch_size, *self.reshape)
# Convolutional layers with ReLu activations
if self.img_size[1] == self.img_size[2] == 64:
x = torch.relu(self.convT_64(x))
x = torch.relu(self.convT1(x))
x = torch.relu(self.convT2(x))
# Sigmoid activation for final conv layer
x = torch.sigmoid(self.convT3(x))
out = self.reparameterize(x[:,0,:,:].view(-1, self.img_size[0],self.img_size[1], self.img_size[2]), x[:,1,:,:].view(-1, self.img_size[0],self.img_size[1], self.img_size[2]))
return out
class IntegrationDecoderCNCVAE(nn.Module):
def __init__(self, data_size, latent_dim=16, dense_units=128):
r"""Encoder of the concatanation VAE [1].
Parameters
----------
data_size : int
Dimensionality of the input data
dense_units : int
Number of units for the dense layer
latent_dim : int
Dimensionality of latent output.
Model Architecture (transposed for decoder)
------------
- 1 fully connected layer with units defined by dense_units
- Latent distribution:
- 1 fully connected layer of latent_dim units (log variance and mean for
10 Gaussians)
References:
[1] Simidjievski, Nikola et al. “Variational Autoencoders for Cancer
Data Integration: Design Principles and Computational Practice.”
Frontiers in genetics vol. 10 1205. 11 Dec. 2019,
doi:10.3389/fgene.2019.01205
"""
super(IntegrationDecoderCNCVAE, self).__init__()
self.data_size = data_size
self.dense_units = dense_units
self.latent_dim = latent_dim
# define decoding layers
self.de_embed = nn.Linear(self.latent_dim, self.dense_units)
self.decode = nn.Linear(self.dense_units, self.self.latent_dim)
def forward(self, z):
hidden = self.de_embed(z)
x = self.decode(hidden)
return x | 7,814 | 33.126638 | 181 | py |
geometric-js | geometric-js-master/vae/models/encoders.py | """Module containing the encoders."""
import numpy as np
import torch
import torch.nn as nn
import pdb
class EncoderBurgess(nn.Module):
def __init__(self, img_size,
latent_dim=10):
r"""Encoder of the model proposed in [1].
Parameters
----------
img_size : tuple of ints
Size of images. E.g. (1, 32, 32) or (3, 64, 64).
latent_dim : int
Dimensionality of latent output.
Model Architecture (transposed for decoder)
------------
- 4 convolutional layers (each with 32 channels), (4 x 4 kernel),
(stride of 2)
- 2 fully connected layers (each of 256 units)
- Latent distribution:
- 1 fully connected layer of 20 units (log variance and mean for
10 Gaussians)
References:
[1] Burgess, Christopher P., et al. "Understanding disentangling in
$\beta$-VAE." arXiv preprint arXiv:1804.03599 (2018).
"""
super(EncoderBurgess, self).__init__()
# Layer parameters
hid_channels = 32
kernel_size = 4
hidden_dim = 256
self.latent_dim = latent_dim
self.img_size = img_size
# Shape required to start transpose convs
self.reshape = (hid_channels, kernel_size, kernel_size)
n_chan = self.img_size[0]
# Convolutional layers
cnn_kwargs = dict(stride=2, padding=1)
self.conv1 = nn.Conv2d(
n_chan, hid_channels, kernel_size, **cnn_kwargs)
self.conv2 = nn.Conv2d(
hid_channels, hid_channels, kernel_size, **cnn_kwargs)
self.conv3 = nn.Conv2d(
hid_channels, hid_channels, kernel_size, **cnn_kwargs)
# If input image is 64x64 do fourth convolution
if self.img_size[1] == self.img_size[2] == 64:
self.conv_64 = nn.Conv2d(
hid_channels, hid_channels, kernel_size, **cnn_kwargs)
# Fully connected layers
self.lin1 = nn.Linear(np.product(self.reshape), hidden_dim)
self.lin2 = nn.Linear(hidden_dim, hidden_dim)
# Fully connected layers for mean and variance
self.mu_logvar_gen = nn.Linear(hidden_dim, self.latent_dim * 2)
def forward(self, x):
batch_size = x.size(0)
# Convolutional layers with ReLu activations
x = torch.relu(self.conv1(x))
x = torch.relu(self.conv2(x))
x = torch.relu(self.conv3(x))
if self.img_size[1] == self.img_size[2] == 64:
x = torch.relu(self.conv_64(x))
# Fully connected layers with ReLu activations
x = x.view((batch_size, -1))
x = torch.relu(self.lin1(x))
x = torch.relu(self.lin2(x))
# Fully connected layer for log variance and mean
# Log std-dev in paper (bear in mind)
mu_logvar = self.mu_logvar_gen(x)
mu, logvar = mu_logvar.view(-1, self.latent_dim, 2).unbind(-1)
return mu, logvar
class IntegrationEncoderCNCVAE(nn.Module):
def __init__(self, data_size, dense_units=128, latent_dim=16):
r"""Encoder of the concatanation VAE [1].
Parameters
----------
data_size : int
Dimensionality of the input data
dense_units : int
Number of units for the dense layer
latent_dim : int
Dimensionality of latent output.
Model Architecture (transposed for decoder)
------------
- 1 fully connected layer with units defined by dense_units
- Latent distribution:
- 1 fully connected layer of latent_dim units (log variance and mean for
10 Gaussians)
References:
[1] Simidjievski, Nikola et al. “Variational Autoencoders for Cancer
Data Integration: Design Principles and Computational Practice.”
Frontiers in genetics vol. 10 1205. 11 Dec. 2019,
doi:10.3389/fgene.2019.01205
"""
super(IntegrationEncoderCNCVAE, self).__init__()
self.data_size = data_size
self.dense_units = dense_units
self.latent_dim = latent_dim
# define encoding layers
self.encode = nn.Linear(self.data_size, self.dense_units)
self.embed = nn.Linear(self.dense_units, self.latent_dim * 2)
def forward(self, x):
x = self.encode(x)
z = self.embed(x)
mu, logvar = z.view(-1, self.latent_dim, 2).unbind(-1)
return mu, logvar
| 4,510 | 32.169118 | 84 | py |
geometric-js | geometric-js-master/vae/utils/math.py | """Math module."""
import math
import torch
def matrix_log_density_gaussian(x, mu, logvar):
"""Calculates log density of a Gaussian for all combination of bacth pairs of
`x` and `mu`. I.e. return tensor of shape `(batch_size, batch_size, dim)`
instead of (batch_size, dim) in the usual log density.
Parameters
----------
x: torch.Tensor
Value at which to compute the density. Shape: (batch_size, dim).
mu: torch.Tensor
Mean. Shape: (batch_size, dim).
logvar: torch.Tensor
Log variance. Shape: (batch_size, dim).
batch_size: int
number of training images in the batch
"""
batch_size, dim = x.shape
x = x.view(batch_size, 1, dim)
mu = mu.view(1, batch_size, dim)
logvar = logvar.view(1, batch_size, dim)
return log_density_gaussian(x, mu, logvar)
def log_density_gaussian(x, mu, logvar):
"""Calculates log density of a Gaussian.
Parameters
----------
x: torch.Tensor or np.ndarray or float
Value at which to compute the density.
mu: torch.Tensor or np.ndarray or float
Mean.
logvar: torch.Tensor or np.ndarray or float
Log variance.
"""
normalization = - 0.5 * (math.log(2 * math.pi) + logvar)
inv_var = torch.exp(-logvar)
log_density = normalization - 0.5 * ((x - mu)**2 * inv_var)
return log_density
def log_importance_weight_matrix(batch_size, dataset_size):
"""
Calculates a log importance weight matrix
Parameters
----------
batch_size: int
number of training images in the batch
dataset_size: int
number of training images in the dataset
"""
N = dataset_size
M = batch_size - 1
strat_weight = (N - M) / (N * M)
W = torch.Tensor(batch_size, batch_size).fill_(1 / M)
W.view(-1)[::M+1] = 1 / N
W.view(-1)[1::M+1] = strat_weight
W[M-1, 0] = strat_weight
return W.log()
| 1,920 | 24.276316 | 81 | py |
geometric-js | geometric-js-master/vae/utils/modelIO.py | """Model Input/Output module."""
import json
import numpy as np
import os
import re
import torch
from vae.models.vae import VAE
MODEL_FILENAME = "model.pt"
META_FILENAME = "specs.json"
def save_model(model, directory, metadata=None, filename=MODEL_FILENAME):
"""
Save a model and corresponding metadata.
Parameters
----------
model : nn.Module
Model.
directory : str
Path to the directory where to save the data.
metadata : dict
Metadata to save.
"""
device = next(model.parameters()).device
model.cpu()
if metadata is None:
# save the minimum required for loading
metadata = dict(img_size=model.img_size, latent_dim=model.latent_dim)
save_metadata(metadata, directory)
path_to_model = os.path.join(directory, filename)
torch.save(model.state_dict(), path_to_model)
model.to(device) # restore device
def load_metadata(directory, filename=META_FILENAME):
"""Load the metadata of a training directory.
Parameters
----------
directory : string
Path to folder where model is saved. For example './experiments/mnist'.
"""
path_to_metadata = os.path.join(directory, filename)
with open(path_to_metadata) as metadata_file:
metadata = json.load(metadata_file)
return metadata
def save_metadata(metadata, directory, filename=META_FILENAME, **kwargs):
"""Load the metadata of a training directory.
Parameters
----------
metadata:
Object to save
directory: string
Path to folder where to save model. For example './experiments/mnist'.
kwargs:
Additional arguments to `json.dump`
"""
path_to_metadata = os.path.join(directory, filename)
with open(path_to_metadata, 'w') as f:
json.dump(metadata, f, indent=4, sort_keys=True, **kwargs)
def load_model(directory, is_gpu=True, filename=MODEL_FILENAME):
"""Load a trained model.
Parameters
----------
directory : string
Path to folder where model is saved. For example './experiments/mnist'.
is_gpu : bool
Whether to load on GPU is available.
"""
device = torch.device("cuda" if torch.cuda.is_available() and is_gpu
else "cpu")
path_to_model = os.path.join(directory, MODEL_FILENAME)
metadata = load_metadata(directory)
img_size = metadata["img_size"]
latent_dim = metadata["latent_dim"]
path_to_model = os.path.join(directory, filename)
model = _get_model(img_size, latent_dim, device, path_to_model)
return model
def load_checkpoints(directory, is_gpu=True):
"""Load all chechpointed models.
Parameters
----------
directory : string
Path to folder where model is saved. For example './experiments/mnist'.
is_gpu : bool
Whether to load on GPU .
"""
checkpoints = []
for root, _, filenames in os.walk(directory):
for filename in filenames:
results = re.search(r'.*?-([0-9].*?).pt', filename)
if results is not None:
epoch_idx = int(results.group(1))
model = load_model(root, is_gpu=is_gpu, filename=filename)
checkpoints.append((epoch_idx, model))
return checkpoints
def _get_model(img_size, latent_dim, device, path_to_model):
""" Load a single model.
Parameters
----------
img_size : tuple
Tuple of the number of pixels in the image width and height.
For example (32, 32) or (64, 64).
latent_dim : int
The number of latent dimensions in the bottleneck.
device : str
Either 'cuda' or 'cpu'.
path_to_device : str
Full path to the saved model on the device.
"""
model = VAE(img_size, latent_dim).to(device)
# works with state_dict to make it independent of the file structure
model.load_state_dict(torch.load(path_to_model), strict=False)
model.eval()
return model
def numpy_serialize(obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
def save_np_arrays(arrays, directory, filename):
"""Save dictionary of arrays in json file."""
save_metadata(arrays, directory,
filename=filename, default=numpy_serialize)
def load_np_arrays(directory, filename):
"""Load dictionary of arrays from json file."""
arrays = load_metadata(directory, filename=filename)
return {k: np.array(v) for k, v in arrays.items()}
| 4,623 | 25.422857 | 79 | py |
geometric-js | geometric-js-master/vae/utils/initialization.py | """Initialisation module."""
import torch
import torch.nn as nn
def get_activation_name(activation):
"""Given a string or a `torch.nn.modules.activation` return the name of
the activation."""
if isinstance(activation, str):
return activation
mapper = {nn.LeakyReLU: "leaky_relu", nn.ReLU: "relu", nn.Tanh: "tanh",
nn.Sigmoid: "sigmoid", nn.Softmax: "sigmoid"}
for k, v in mapper.items():
if isinstance(activation, k):
return k
raise ValueError("Unkown given activation type : {}".format(activation))
def get_gain(activation):
"""Given an object of `torch.nn.modules.activation` or an activation name
return the correct gain."""
if activation is None:
return 1
activation_name = get_activation_name(activation)
param = None if activation_name != "leaky_relu" \
else activation.negative_slope
gain = nn.init.calculate_gain(activation_name, param)
return gain
def linear_init(layer, activation="relu"):
"""Initialize a linear layer.
Args:
layer (nn.Linear): parameters to initialize.
activation (`torch.nn.modules.activation` or str, optional) activation
that will be used on the `layer`.
"""
x = layer.weight
if activation is None:
return nn.init.xavier_uniform_(x)
activation_name = get_activation_name(activation)
if activation_name == "leaky_relu":
a = 0 if isinstance(activation, str) else activation.negative_slope
return nn.init.kaiming_uniform_(x, a=a, nonlinearity='leaky_relu')
elif activation_name == "relu":
return nn.init.kaiming_uniform_(x, nonlinearity='relu')
elif activation_name in ["sigmoid", "tanh"]:
return nn.init.xavier_uniform_(x, gain=get_gain(activation))
def weights_init(module):
if isinstance(module, torch.nn.modules.conv._ConvNd):
linear_init(module)
elif isinstance(module, nn.Linear):
linear_init(module)
| 1,986 | 29.569231 | 78 | py |
geometric-js | geometric-js-master/vae/utils/__init__.py | 0 | 0 | 0 | py |
|
geometric-js | geometric-js-master/utils/datasets.py | """Dataset loading module.
Adapted from: https://github.com/YannDubs/disentangling-vae"""
import abc
import glob
import hashlib
import h5py
import logging
import numpy as np
import os
import subprocess
import tarfile
import torch
import urllib.request
import zipfile
from PIL import Image
from skimage.io import imread
from torch.tensor import Tensor
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, transforms
from tqdm import tqdm
from typing import Any, List, Optional, Tuple
DIR = os.path.abspath(os.path.dirname(__file__))
COLOUR_BLACK = 0
COLOUR_WHITE = 1
DATASETS_DICT = {"mnist": "MNIST",
"fashion": "FashionMNIST",
"nmnist": "NoisyMNIST",
"bmnist": "BinarizedMNIST",
"dsprites": "DSprites",
"celeba": "CelebA",
"chairs": "Chairs"}
DATASETS = list(DATASETS_DICT.keys())
def get_dataset(dataset: str) -> Dataset:
"""Return the correct dataset."""
dataset = dataset.lower()
try:
# eval because stores name as string in order to put it at top of file
return eval(DATASETS_DICT[dataset])
except KeyError:
raise ValueError(f"Unkown dataset: {dataset}")
def get_img_size(dataset: str) -> Tuple:
"""Return the correct image size."""
return get_dataset(dataset).img_size
def get_background(dataset):
"""Return the image background color."""
return get_dataset(dataset).background_color
def get_dataloaders(dataset: str,
train: Optional[bool] = True,
noise: Optional[float] = None,
root: Optional[str] = None,
pin_memory: Optional[bool] = True,
batch_size: Optional[int] = 128,
logger: Optional[Any] = logging.getLogger(__name__),
**kwargs: Any
) -> DataLoader:
"""A generic data loader
dataset : {"mnist", "fashion", "dsprites", "celeba", "chairs"}
Name of the dataset to load
"""
pin_memory = pin_memory and torch.cuda.is_available # only pin if GPU
Dataset = get_dataset(dataset)
# Initialise the dataset class:
if root is None:
if noise == 0.0 or noise is None:
dataset = Dataset(train=train, logger=logger)
else:
dataset = Dataset(train=train, noise=noise, logger=logger)
else:
if noise == 0.0 or noise is None:
dataset = Dataset(train=train, root=root, logger=logger)
else:
dataset = Dataset(train=train, noise=noise, root=root,
logger=logger)
return DataLoader(dataset,
batch_size=batch_size,
shuffle=train,
pin_memory=pin_memory,
**kwargs)
class DisentangledDataset(Dataset, abc.ABC):
"""Base Class for disentangled VAE datasets."""
def __init__(self,
root: str,
transforms_list: Optional[List[Any]] = [],
logger: Optional[Any] = logging.getLogger(__name__)):
self.root = root
self.train_data = os.path.join(root, type(self).files["train"])
self.transforms = transforms.Compose(transforms_list)
self.logger = logger
if not os.path.isdir(root):
self.logger.info(f"Downloading {str(type(self))} ...")
self.download()
self.logger.info("Finished Downloading.")
def __len__(self) -> int:
return len(self.imgs)
@abc.abstractmethod
def __getitem__(self, idx: int) -> torch.Tensor:
pass
@abc.abstractmethod
def download(self):
pass
class DSprites(DisentangledDataset):
"""DSprites Dataset from [1].
Disentanglement test Sprites dataset. Procedurally generated 2D shapes, from 6
disentangled latent factors. This dataset uses 6 latents, controlling the color,
shape, scale, rotation and position of a sprite. All possible variations of
the latents are present. Ordering along dimension 1 is fixed and can be mapped
back to the exact latent values that generated that image. Pixel outputs are
different. No noise added.
Notes
-----
- Link : https://github.com/deepmind/dsprites-dataset/
- hard coded metadata because issue with python 3 loading of python 2
References
----------
[1] Higgins, I., Matthey, L., Pal, A., Burgess, C., Glorot, X., Botvinick,
M., ... & Lerchner, A. (2017). beta-vae: Learning basic visual concepts
with a constrained variational framework. In International Conference
on Learning Representations.
"""
urls = {"train": "https://github.com/deepmind/dsprites-dataset/blob/master/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz?raw=true"}
files = {"train": "dsprite_train.npz"}
lat_names = ('shape', 'scale', 'orientation', 'posX', 'posY')
lat_sizes = np.array([3, 6, 40, 32, 32])
img_size = (1, 64, 64)
background_color = COLOUR_BLACK
lat_values = {
'posX': np.array([0., 0.03225806, 0.06451613, 0.09677419, 0.12903226,
0.16129032, 0.19354839, 0.22580645, 0.25806452,
0.29032258, 0.32258065, 0.35483871, 0.38709677,
0.41935484, 0.4516129, 0.48387097, 0.51612903,
0.5483871, 0.58064516, 0.61290323, 0.64516129,
0.67741935, 0.70967742, 0.74193548, 0.77419355,
0.80645161, 0.83870968, 0.87096774, 0.90322581,
0.93548387, 0.96774194, 1.]),
'posY': np.array([0., 0.03225806, 0.06451613, 0.09677419, 0.12903226,
0.16129032, 0.19354839, 0.22580645, 0.25806452,
0.29032258, 0.32258065, 0.35483871, 0.38709677,
0.41935484, 0.4516129, 0.48387097, 0.51612903,
0.5483871, 0.58064516, 0.61290323, 0.64516129,
0.67741935, 0.70967742, 0.74193548, 0.77419355,
0.80645161, 0.83870968, 0.87096774, 0.90322581,
0.93548387, 0.96774194, 1.]),
'scale': np.array([0.5, 0.6, 0.7, 0.8, 0.9, 1.]),
'orientation': np.array([0., 0.16110732, 0.32221463, 0.48332195,
0.64442926, 0.80553658, 0.96664389, 1.12775121,
1.28885852, 1.44996584, 1.61107316, 1.77218047,
1.93328779, 2.0943951, 2.25550242, 2.41660973,
2.57771705, 2.73882436, 2.89993168, 3.061039,
3.22214631, 3.38325363, 3.54436094, 3.70546826,
3.86657557, 4.02768289, 4.1887902, 4.34989752,
4.51100484, 4.67211215, 4.83321947, 4.99432678,
5.1554341, 5.31654141, 5.47764873, 5.63875604,
5.79986336, 5.96097068, 6.12207799, 6.28318531]),
'shape': np.array([1., 2., 3.]),
'color': np.array([1.])}
def __init__(self,
train: Optional[bool] = True,
root: Optional[str] = os.path.join(DIR, '../data/dsprites/'),
**kwargs: Optional[Any]):
super().__init__(root, [transforms.ToTensor()], **kwargs)
dataset_zip = np.load(self.train_data)
self.imgs = dataset_zip['imgs']
self.lat_values = dataset_zip['latents_values']
def download(self):
os.makedirs(self.root)
subprocess.check_call(["curl", "-L", type(self).urls["train"],
"--output", self.train_data])
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, np.ndarray]:
"""
Return
------
sample : torch.Tensor
Tensor in [0.,1.] of shape `img_size`.
lat_value : np.array
Array of length 6, that gives the value of each factor of variation.
"""
# stored image have binary and shape (H x W) so multiply by 255 to get pixel
# values + add dimension
sample = np.expand_dims(self.imgs[idx] * 255, axis=-1)
# ToTensor transforms numpy.ndarray (H x W x C) in the range
# [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
sample = self.transforms(sample)
lat_value = self.lat_values[idx]
return sample, lat_value
class CelebA(DisentangledDataset):
"""CelebA Dataset from [1].
CelebFaces Attributes Dataset (CelebA) is a large-scale face attributes dataset
with more than 200K celebrity images, each with 40 attribute annotations.
The images in this dataset cover large pose variations and background clutter.
CelebA has large diversities, large quantities, and rich annotations, including
10,177 number of identities, and 202,599 number of face images.
Notes
-----
- Link : http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html
References
----------
[1] Liu, Z., Luo, P., Wang, X., & Tang, X. (2015). Deep learning face
attributes in the wild. In Proceedings of the IEEE international conference
on computer vision (pp. 3730-3738).
"""
urls = {"train": "https://s3-us-west-1.amazonaws.com/udacity-dlnfd/datasets/celeba.zip"}
files = {"train": "img_align_celeba"}
img_size = (3, 64, 64)
background_color = COLOUR_WHITE
def __init__(self,
train: Optional[bool] = True,
root: Optional[str] = os.path.join(DIR, '../data/celeba'),
**kwargs: Optional[Any]):
super().__init__(root, [transforms.ToTensor()], **kwargs)
self.imgs = glob.glob(self.train_data + '/*')
def download(self):
save_path = os.path.join(self.root, 'celeba.zip')
os.makedirs(self.root)
subprocess.check_call(["curl", "-L", type(self).urls["train"],
"--output", save_path])
hash_code = '00d2c5bc6d35e252742224ab0c1e8fcb'
assert hashlib.md5(open(save_path, 'rb').read()).hexdigest() == hash_code, \
'{} file is corrupted. Remove the file and try again.'.format(save_path)
with zipfile.ZipFile(save_path) as zf:
self.logger.info("Extracting CelebA ...")
zf.extractall(self.root)
os.remove(save_path)
self.logger.info("Resizing CelebA ...")
preprocess(self.train_data, size=type(self).img_size[1:])
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, int]:
img_path = self.imgs[idx]
# img values already between 0 and 255
img = imread(img_path)
# put each pixel in [0.,1.] and reshape to (C x H x W)
img = self.transforms(img)
# no label so return 0 (Dataloaders can't return None)
return img, 0
class Chairs(datasets.ImageFolder):
"""Chairs Dataset from [1].
Notes
-----
- Link : https://www.di.ens.fr/willow/research/seeing3Dchairs
References
----------
[1] Aubry, M., Maturana, D., Efros, A. A., Russell, B. C., & Sivic, J. (2014).
Seeing 3d chairs: exemplar part-based 2d-3d alignment using a large dataset
of cad models. In Proceedings of the IEEE conference on computer vision
and pattern recognition (pp. 3762-3769).
"""
urls = {"train": "https://www.di.ens.fr/willow/research/seeing3Dchairs/data/rendered_chairs.tar"}
files = {"train": "chairs_64"}
img_size = (1, 64, 64)
background_color = COLOUR_WHITE
def __init__(self,
train: Optional[bool] = True,
root: Optional[str] = os.path.join(DIR, '../data/chairs'),
logger: Optional[Any] = logging.getLogger(__name__)):
self.root = root
self.train_data = os.path.join(root, type(self).files["train"])
self.transforms = transforms.Compose([transforms.Grayscale(),
transforms.ToTensor()])
self.logger = logger
if not os.path.isdir(root):
self.logger.info("Downloading {} ...".format(str(type(self))))
self.download()
self.logger.info("Finished Downloading.")
super().__init__(self.train_data, transform=self.transforms)
def download(self):
"""Download the dataset."""
save_path = os.path.join(self.root, 'chairs.tar')
os.makedirs(self.root)
subprocess.check_call(["curl", type(self).urls["train"],
"--output", save_path])
self.logger.info("Extracting Chairs ...")
tar = tarfile.open(save_path)
tar.extractall(self.root)
tar.close()
os.rename(os.path.join(self.root, 'rendered_chairs'), self.train_data)
os.remove(save_path)
self.logger.info("Preprocessing Chairs ...")
preprocess(os.path.join(self.train_data, '*/*'), # root/*/*/*.png structure
size=type(self).img_size[1:],
center_crop=(400, 400))
class MNIST(datasets.MNIST):
"""Mnist wrapper. Docs: `datasets.MNIST.`"""
img_size = (1, 32, 32)
background_color = COLOUR_BLACK
def __init__(self, train=True, root=os.path.join(DIR, '../data/mnist'), **kwargs):
super().__init__(root,
train=train,
download=True,
transform=transforms.Compose([
transforms.Pad(2),
transforms.ToTensor()
]))
class FashionMNIST(datasets.FashionMNIST):
"""Fashion Mnist wrapper. Docs: `datasets.FashionMNIST.`"""
img_size = (1, 32, 32)
background_color = COLOUR_BLACK
def __init__(self, train=True,
root=os.path.join(DIR, '../data/fashionMnist'), **kwargs):
super().__init__(root,
train=train,
download=True,
transform=transforms.Compose([
transforms.Pad(2),
transforms.ToTensor()
]))
class NoisyMNIST(Dataset):
"""Noisy MNIST wrapper."""
img_size = (1, 32, 32)
background_color = COLOUR_BLACK
def __init__(self, train=True, noise=None,
root=os.path.join(DIR, '../data/mnist'), **kwargs):
super().__init__()
if train:
mnist_data = torch.load(
os.path.join(root, 'MNIST', 'processed', 'training.pt'))
else:
mnist_data = torch.load(
os.path.join(root, 'MNIST', 'processed', 'test.pt'))
self.x = mnist_data[0]
self.mnist_transforms = transforms.Compose([
transforms.ToPILImage(),
transforms.Pad(2),
transforms.ToTensor()
])
if noise is not None:
self.add_noise = AddGaussianNoise(mean=0.0, std=noise)
self.noise = noise
self.len = len(self.x)
def __len__(self):
return self.len
def __getitem__(self, idx):
input = self.mnist_transforms(self.x[idx:idx + 1])
if self.noise is not None:
input = self.add_noise(input)
output = self.mnist_transforms(self.x[idx:idx + 1])
return input, output
class BinarizedMNIST(Dataset):
""" Binarized MNIST dataset, proposed in
http://proceedings.mlr.press/v15/larochelle11a/larochelle11a.pdf """
train_file = 'binarized_mnist_train.amat'
val_file = 'binarized_mnist_valid.amat'
test_file = 'binarized_mnist_test.amat'
img_size = (1, 32, 32)
background_color = COLOUR_BLACK
def __init__(self, train=True, root=os.path.join(DIR, '../data/bmnist'),
logger=logging.getLogger(__name__)):
# we ignore transform.
self.root = root
self.train = train # training set or test set
if not self._check_exists():
self.download()
self.data = self._get_data(train=train)
self.mnist_transforms = transforms.Compose([
transforms.Pad(2),
transforms.ToTensor()
])
def __getitem__(self, index):
img = self.data[index]
img = Image.fromarray(img)
img = self.mnist_transforms(img)
# img = transforms.Pad(2)(transforms.ToTensor()(img)).type(torch.FloatTensor)
return img.float(), torch.tensor(-1) # Meaningless tensor instead of target
def __len__(self):
return len(self.data)
def _get_data(self, train=True):
with h5py.File(os.path.join(self.root, 'data.h5'), 'r') as hf:
data = hf.get('train' if train else 'test')
data = np.array(data)
return data
def get_mean_img(self):
return self.data.mean(0).flatten()
def download(self):
if self._check_exists():
return
if not os.path.exists(self.root):
os.makedirs(self.root)
print('Downloading MNIST with fixed binarization...')
for dataset in ['train', 'valid', 'test']:
filename = 'binarized_mnist_{}.amat'.format(dataset)
url = 'http://www.cs.toronto.edu/~larocheh/public/datasets/binarized_mnist/binarized_mnist_{}.amat'.format(dataset)
print('Downloading from {}...'.format(url))
local_filename = os.path.join(self.root, filename)
urllib.request.urlretrieve(url, local_filename)
print('Saved to {}'.format(local_filename))
def filename_to_np(filename):
with open(filename) as f:
lines = f.readlines()
return np.array([[int(i)for i in line.split()] for line in lines]).astype('int8')
train_data = np.concatenate([filename_to_np(os.path.join(self.root, self.train_file)),
filename_to_np(os.path.join(self.root, self.val_file))])
test_data = filename_to_np(os.path.join(self.root, self.val_file))
with h5py.File(os.path.join(self.root, 'data.h5'), 'w') as hf:
hf.create_dataset('train', data=train_data.reshape(-1, 28, 28))
hf.create_dataset('test', data=test_data.reshape(-1, 28, 28))
print('Done!')
def _check_exists(self):
return os.path.exists(os.path.join(self.root, 'data.h5'))
# HELPERS
def preprocess(root: str,
size: Optional[Tuple[int, int]] = (64, 64),
img_format: Optional[str] = 'JPEG',
center_crop: Optional[Tuple[int, int]] = None
) -> None:
"""Preprocess a folder of images.
Parameters
----------
img_format : string
Format to save the image in. Possible formats:
https://pillow.readthedocs.io/en/3.1.x/handbook/image-file-formats.html.
center_crop : tuple of int
Size (width, height) to center-crop the images.
"""
imgs = []
for ext in [".png", ".jpg", ".jpeg"]:
imgs += glob.glob(os.path.join(root, '*' + ext))
for img_path in tqdm(imgs):
img = Image.open(img_path)
width, height = img.size
if size is not None and width != size[1] or height != size[0]:
img = img.resize(size, Image.ANTIALIAS)
if center_crop is not None:
new_width, new_height = center_crop
left = (width - new_width) // 2
top = (height - new_height) // 2
right = (width + new_width) // 2
bottom = (height + new_height) // 2
img.crop((left, top, right, bottom))
img.save(img_path, img_format)
class AddGaussianNoise(object):
def __init__(self,
mean: Optional[float] = 0.0,
std: Optional[float] = 1.0
) -> None:
self.std = std
self.mean = mean
def __call__(self, tensor: Tensor) -> Tensor:
# return tensor + torch.randn(tensor.size()) * self.std + self.mean
# Clamp output so image with noise is still greyscale:
return torch.clamp(tensor + torch.randn(tensor.size()) * self.std + self.mean, 0, 1)
def __repr__(self):
return self.__class__.__name__ + f'(mean={self.mean}, std={self.std})'
| 20,300 | 36.181319 | 136 | py |
geometric-js | geometric-js-master/utils/__init__.py | 0 | 0 | 0 | py |
|
geometric-js | geometric-js-master/utils/helpers.py | """Generic helper module."""
import argparse
import numpy as np
import os
import random
import shutil
import torch
def create_safe_directory(directory, logger=None):
"""Create a directory and archive the previous one if already existed."""
if os.path.exists(directory):
if logger is not None:
warn = "Directory {} already exists. Archiving it to {}.zip"
logger.warning(warn.format(directory, directory))
shutil.make_archive(directory, 'zip', directory)
shutil.rmtree(directory)
os.makedirs(directory)
def set_seed(seed):
"""Set all random seeds."""
if seed is not None:
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
# if want pure determinism could uncomment below: but slower
# torch.backends.cudnn.deterministic = True
def get_device(is_gpu=True):
"""Return the correct device"""
return torch.device("cuda" if torch.cuda.is_available() and is_gpu
else "cpu")
def get_model_device(model):
"""Return the device on which a model is."""
return next(model.parameters()).device
def get_n_param(model):
"""Return the number of parameters."""
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
nParams = sum([np.prod(p.size()) for p in model_parameters])
return nParams
def update_namespace_(namespace, dictionnary):
"""Update an argparse namespace in_place."""
vars(namespace).update(dictionnary)
def check_bounds(value, type=float, lb=-float("inf"), ub=float("inf"),
is_inclusive=True, name="value"):
"""Argparse bound checker"""
value = type(value)
is_in_bound = lb <= value <= ub if is_inclusive else lb < value < ub
if not is_in_bound:
raise argparse.ArgumentTypeError(
f"{name}={value} outside of bounds ({lb},{ub})")
return value
class FormatterNoDuplicate(argparse.ArgumentDefaultsHelpFormatter):
"""Formatter overriding `argparse.ArgumentDefaultsHelpFormatter` to show
`-e, --epoch EPOCH` instead of `-e EPOCH, --epoch EPOCH`
Note
----
- code modified from cPython:
https://github.com/python/cpython/blob/master/Lib/argparse.py
"""
def _format_action_invocation(self, action):
# no args given
if not action.option_strings:
default = self._get_default_metavar_for_positional(action)
metavar, = self._metavar_formatter(action, default)(1)
return metavar
else:
parts = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend(action.option_strings)
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = self._get_default_metavar_for_optional(action)
args_string = self._format_args(action, default)
for option_string in action.option_strings:
# don't store the DEFAULT
parts.append('%s' % (option_string))
# store DEFAULT for the last one
parts[-1] += ' %s' % args_string
return ', '.join(parts)
| 3,293 | 32.272727 | 77 | py |
geometric-js | geometric-js-master/utils/visualize.py | """Visualisation module.
Adapted from: https://github.com/YannDubs/disentangling-vae"""
import os
import imageio
from PIL import Image
import numpy as np
from scipy import stats
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision.utils import make_grid, save_image
from utils.datasets import get_background
from utils.viz_helpers import (read_loss_from_file, add_labels, make_grid_img,
sort_list_by_other, FPS_GIF, concatenate_pad)
TRAIN_FILE = "train_losses.log"
DECIMAL_POINTS = 3
GIF_FILE = "training.gif"
PLOT_NAMES = dict(generate_samples="samples.png",
data_samples="data_samples.png",
reconstruct="reconstruct.png",
traversals="traversals.png",
reconstruct_traverse="reconstruct_traverse.png",
gif_traversals="posterior_traversals.gif",)
class Visualizer():
def __init__(self, model, dataset, model_dir,
save_images=True,
loss_of_interest=None,
display_loss_per_dim=False,
max_traversal=0.475, # corresponds to ~2 for standard normal
upsample_factor=1):
"""
Visualizer is used to generate images of samples, reconstructions,
latent traversals and so on of the trained model.
Parameters
----------
model : disvae.vae.VAE
dataset : str
Name of the dataset.
model_dir : str
The directory that the model is saved to and where the images will
be stored.
save_images : bool, optional
Whether to save images or return a tensor.
loss_of_interest : str, optional
The loss type (as saved in the log file) to order the latent dimensions by and display.
display_loss_per_dim : bool, optional
if the loss should be included as text next to the corresponding latent dimension images.
max_traversal: float, optional
The maximum displacement induced by a latent traversal. Symmetrical
traversals are assumed. If `m>=0.5` then uses absolute value traversal,
if `m<0.5` uses a percentage of the distribution (quantile).
E.g. for the prior the distribution is a standard normal so `m=0.45` c
orresponds to an absolute value of `1.645` because `2m=90%%` of a
standard normal is between `-1.645` and `1.645`. Note in the case
of the posterior, the distribution is not standard normal anymore.
upsample_factor : floar, optional
Scale factor to upsample the size of the tensor
"""
self.model = model
self.device = next(self.model.parameters()).device
self.latent_dim = self.model.latent_dim
self.max_traversal = max_traversal
self.save_images = save_images
self.model_dir = model_dir
self.dataset = dataset
self.upsample_factor = upsample_factor
if loss_of_interest is not None:
self.losses = read_loss_from_file(os.path.join(self.model_dir, TRAIN_FILE),
loss_of_interest)
def _get_traversal_range(self, mean=0, std=1):
"""Return the corresponding traversal range in absolute terms."""
max_traversal = self.max_traversal
if max_traversal < 0.5:
max_traversal = (1 - 2 * max_traversal) / 2 # from 0.45 to 0.05
max_traversal = stats.norm.ppf(max_traversal, loc=mean, scale=std) # from 0.05 to -1.645
# symmetrical traversals
return (-1 * max_traversal, max_traversal)
def _traverse_line(self, idx, n_samples, data=None):
"""Return a (size, latent_size) latent sample, corresponding to a traversal
of a latent variable indicated by idx.
Parameters
----------
idx : int
Index of continuous dimension to traverse. If the continuous latent
vector is 10 dimensional and idx = 7, then the 7th dimension
will be traversed while all others are fixed.
n_samples : int
Number of samples to generate.
data : torch.Tensor or None, optional
Data to use for computing the posterior. Shape (N, C, H, W). If
`None` then use the mean of the prior (all zeros) for all other dimensions.
"""
if data is None:
# mean of prior for other dimensions
samples = torch.zeros(n_samples, self.latent_dim)
traversals = torch.linspace(*self._get_traversal_range(), steps=n_samples)
else:
if data.size(0) > 1:
raise ValueError("Every value should be sampled from the same posterior, but {} datapoints given.".format(data.size(0)))
with torch.no_grad():
post_mean, post_logvar = self.model.encoder(data.to(self.device))
samples = self.model.reparameterize(post_mean, post_logvar)
samples = samples.cpu().repeat(n_samples, 1)
post_mean_idx = post_mean.cpu()[0, idx]
post_std_idx = torch.exp(post_logvar / 2).cpu()[0, idx]
# travers from the gaussian of the posterior in case quantile
traversals = torch.linspace(*self._get_traversal_range(mean=post_mean_idx,
std=post_std_idx),
steps=n_samples)
for i in range(n_samples):
samples[i, idx] = traversals[i]
return samples
def _save_or_return(self, to_plot, size, filename, is_force_return=False):
"""Create plot and save or return it."""
to_plot = F.interpolate(to_plot, scale_factor=self.upsample_factor)
if size[0] * size[1] != to_plot.shape[0]:
raise ValueError("Wrong size {} for datashape {}".format(size, to_plot.shape))
# `nrow` is number of images PER row => number of col
kwargs = dict(nrow=size[1], pad_value=(1 - get_background(self.dataset)))
if self.save_images and not is_force_return:
filename = os.path.join(self.model_dir, filename)
save_image(to_plot, filename, **kwargs)
else:
return make_grid_img(to_plot, **kwargs)
def _decode_latents(self, latent_samples):
"""Decodes latent samples into images.
Parameters
----------
latent_samples : torch.autograd.Variable
Samples from latent distribution. Shape (N, L) where L is dimension
of latent distribution.
"""
latent_samples = latent_samples.to(self.device)
return self.model.decoder(latent_samples).cpu()
def generate_samples(self, size=(8, 8)):
"""Plot generated samples from the prior and decoding.
Parameters
----------
size : tuple of ints, optional
Size of the final grid.
"""
prior_samples = torch.randn(size[0] * size[1], self.latent_dim)
generated = self._decode_latents(prior_samples)
return self._save_or_return(generated.data, size, PLOT_NAMES["generate_samples"])
def data_samples(self, data, size=(8, 8)):
"""Plot samples from the dataset
Parameters
----------
data : torch.Tensor
Data to be reconstructed. Shape (N, C, H, W)
size : tuple of ints, optional
Size of the final grid.
"""
data = data[:size[0] * size[1], ...]
return self._save_or_return(data, size, PLOT_NAMES["data_samples"])
def reconstruct(self, data, size=(8, 8), is_original=True, is_force_return=False):
"""Generate reconstructions of data through the model.
Parameters
----------
data : torch.Tensor
Data to be reconstructed. Shape (N, C, H, W)
size : tuple of ints, optional
Size of grid on which reconstructions will be plotted. The number
of rows should be even when `is_original`, so that upper
half contains true data and bottom half contains reconstructions.contains
is_original : bool, optional
Whether to exclude the original plots.
is_force_return : bool, optional
Force returning instead of saving the image.
"""
if is_original:
if size[0] % 2 != 0:
raise ValueError("Should be even number of rows when showing originals not {}".format(size[0]))
n_samples = size[0] // 2 * size[1]
else:
n_samples = size[0] * size[1]
with torch.no_grad():
originals = data.to(self.device)[:n_samples, ...]
recs, _, _ = self.model(originals)
originals = originals.cpu()
recs = recs.view(-1, *self.model.img_size).cpu()
to_plot = torch.cat([originals, recs]) if is_original else recs
return self._save_or_return(to_plot, size, PLOT_NAMES["reconstruct"],
is_force_return=is_force_return)
def traversals(self,
data=None,
is_reorder_latents=False,
n_per_latent=8,
n_latents=None,
is_force_return=False):
"""Plot traverse through all latent dimensions (prior or posterior) one
by one and plots a grid of images where each row corresponds to a latent
traversal of one latent dimension.
Parameters
----------
data : bool, optional
Data to use for computing the latent posterior. If `None` traverses
the prior.
n_per_latent : int, optional
The number of points to include in the traversal of a latent dimension.
I.e. number of columns.
n_latents : int, optional
The number of latent dimensions to display. I.e. number of rows. If `None`
uses all latents.
is_reorder_latents : bool, optional
If the latent dimensions should be reordered or not
is_force_return : bool, optional
Force returning instead of saving the image.
"""
n_latents = n_latents if n_latents is not None else self.model.latent_dim
latent_samples = [self._traverse_line(dim, n_per_latent, data=data)
for dim in range(self.latent_dim)]
decoded_traversal = self._decode_latents(torch.cat(latent_samples, dim=0))
if is_reorder_latents:
n_images, *other_shape = decoded_traversal.size()
n_rows = n_images // n_per_latent
decoded_traversal = decoded_traversal.reshape(n_rows, n_per_latent, *other_shape)
decoded_traversal = sort_list_by_other(decoded_traversal, self.losses)
decoded_traversal = torch.stack(decoded_traversal, dim=0)
decoded_traversal = decoded_traversal.reshape(n_images, *other_shape)
decoded_traversal = decoded_traversal[range(n_per_latent * n_latents), ...]
size = (n_latents, n_per_latent)
sampling_type = "prior" if data is None else "posterior"
filename = "{}_{}".format(sampling_type, PLOT_NAMES["traversals"])
return self._save_or_return(decoded_traversal.data, size, filename,
is_force_return=is_force_return)
def reconstruct_traverse(self, data,
is_posterior=True,
n_per_latent=8,
n_latents=None,
is_show_text=False):
"""
Creates a figure whith first row for original images, second are
reconstructions, rest are traversals (prior or posterior) of the latent
dimensions.
Parameters
----------
data : torch.Tensor
Data to be reconstructed. Shape (N, C, H, W)
n_per_latent : int, optional
The number of points to include in the traversal of a latent dimension.
I.e. number of columns.
n_latents : int, optional
The number of latent dimensions to display. I.e. number of rows. If `None`
uses all latents.
is_posterior : bool, optional
Whether to sample from the posterior.
is_show_text : bool, optional
Whether the KL values next to the traversal rows.
"""
n_latents = n_latents if n_latents is not None else self.model.latent_dim
reconstructions = self.reconstruct(data[:2 * n_per_latent, ...],
size=(2, n_per_latent),
is_force_return=True)
traversals = self.traversals(data=data[0:1, ...] if is_posterior else None,
is_reorder_latents=True,
n_per_latent=n_per_latent,
n_latents=n_latents,
is_force_return=True)
concatenated = np.concatenate((reconstructions, traversals), axis=0)
concatenated = Image.fromarray(concatenated)
if is_show_text:
losses = sorted(self.losses, reverse=True)[:n_latents]
labels = ['orig', 'recon'] + ["KL={:.4f}".format(l) for l in losses]
concatenated = add_labels(concatenated, labels)
filename = os.path.join(self.model_dir, PLOT_NAMES["reconstruct_traverse"])
concatenated.save(filename)
def gif_traversals(self, data, n_latents=None, n_per_gif=15):
"""Generates a grid of gifs of latent posterior traversals where the rows
are the latent dimensions and the columns are random images.
Parameters
----------
data : bool
Data to use for computing the latent posteriors. The number of datapoint
(batchsize) will determine the number of columns of the grid.
n_latents : int, optional
The number of latent dimensions to display. I.e. number of rows. If `None`
uses all latents.
n_per_gif : int, optional
Number of images per gif (number of traversals)
"""
n_images, _, _, width_col = data.shape
width_col = int(width_col * self.upsample_factor)
all_cols = [[] for c in range(n_per_gif)]
for i in range(n_images):
grid = self.traversals(data=data[i:i + 1, ...], is_reorder_latents=True,
n_per_latent=n_per_gif, n_latents=n_latents,
is_force_return=True)
height, width, c = grid.shape
padding_width = (width - width_col * n_per_gif) // (n_per_gif + 1)
# split the grids into a list of column images (and removes padding)
for j in range(n_per_gif):
all_cols[j].append(grid[:, [(j + 1) * padding_width + j * width_col + i
for i in range(width_col)], :])
pad_values = (1 - get_background(self.dataset)) * 255
all_cols = [concatenate_pad(cols, pad_size=2, pad_values=pad_values, axis=1)
for cols in all_cols]
filename = os.path.join(self.model_dir, PLOT_NAMES["gif_traversals"])
imageio.mimsave(filename, all_cols, fps=FPS_GIF)
class GifTraversalsTraining:
"""Creates a Gif of traversals by generating an image at every training epoch.
Parameters
----------
model : disvae.vae.VAE
dataset : str
Name of the dataset.
model_dir : str
The directory that the model is saved to and where the images will
be stored.
is_reorder_latents : bool, optional
If the latent dimensions should be reordered or not
n_per_latent : int, optional
The number of points to include in the traversal of a latent dimension.
I.e. number of columns.
n_latents : int, optional
The number of latent dimensions to display. I.e. number of rows. If `None`
uses all latents.
kwargs:
Additional arguments to `Visualizer`
"""
def __init__(self, model, dataset, model_dir,
is_reorder_latents=False,
n_per_latent=10,
n_latents=None,
**kwargs):
self.save_filename = os.path.join(model_dir, GIF_FILE)
self.visualizer = Visualizer(model, dataset, model_dir,
save_images=False, **kwargs)
self.images = []
self.is_reorder_latents = is_reorder_latents
self.n_per_latent = n_per_latent
self.n_latents = \
n_latents if n_latents is not None else model.latent_dim
def __call__(self):
"""Generate the next gif image. Should be called after each epoch."""
cached_training = self.visualizer.model.training
self.visualizer.model.eval()
img_grid = self.visualizer.traversals(
data=None, # GIF from prior
is_reorder_latents=self.is_reorder_latents,
n_per_latent=self.n_per_latent,
n_latents=self.n_latents)
self.images.append(img_grid)
if cached_training:
self.visualizer.model.train()
def save_reset(self):
"""Saves GIF and resets the list of images. Call at training end."""
imageio.mimsave(self.save_filename, self.images, fps=FPS_GIF)
self.images = []
| 17,560 | 39.185355 | 136 | py |
geometric-js | geometric-js-master/utils/viz_helpers.py | """Visualisation helper module.
Adapted from: https://github.com/YannDubs/disentangling-vae"""
import imageio
import numpy as np
import pandas as pd
import random
import torch
from PIL import Image, ImageDraw
from torchvision.utils import make_grid
from utils.datasets import get_dataloaders
FPS_GIF = 12
def get_samples(dataset, num_samples, idcs=[]):
""" Generate a number of samples from the dataset.
Parameters
----------
dataset : str
The name of the dataset.
num_samples : int, optional
The number of samples to load from the dataset
idcs : list of ints, optional
List of indices to of images to put at the begning of the samples.
"""
data_loader = get_dataloaders(dataset,
batch_size=1,
shuffle=idcs is None)
idcs += random.sample(range(len(data_loader.dataset)), num_samples - len(idcs))
samples = torch.stack([data_loader.dataset[i][0] for i in idcs], dim=0)
print("Selected idcs: {}".format(idcs))
return samples
def sort_list_by_other(to_sort, other, reverse=True):
"""Sort a list by an other."""
return [el for _, el in sorted(zip(other, to_sort), reverse=reverse)]
# TO-DO: clean
def read_loss_from_file(log_file_path, loss_to_fetch):
""" Read the average KL per latent dimension at the final stage of training from the log file.
Parameters
----------
log_file_path : str
Full path and file name for the log file. For example 'experiments/custom/losses.log'.
loss_to_fetch : str
The loss type to search for in the log file and return. This must be in the exact form as stored.
"""
EPOCH = "Epoch"
LOSS = "Loss"
logs = pd.read_csv(log_file_path)
df_last_epoch_loss = logs[logs.loc[:, EPOCH] == logs.loc[:, EPOCH].max()]
df_last_epoch_loss = df_last_epoch_loss.loc[df_last_epoch_loss.loc[:, LOSS].str.startswith(loss_to_fetch), :]
df_last_epoch_loss.loc[:, LOSS] = df_last_epoch_loss.loc[:, LOSS].str.replace(loss_to_fetch, "").astype(int)
df_last_epoch_loss = df_last_epoch_loss.sort_values(LOSS).loc[:, "Value"]
return list(df_last_epoch_loss)
def add_labels(input_image, labels):
"""Adds labels next to rows of an image.
Parameters
----------
input_image : image
The image to which to add the labels
labels : list
The list of labels to plot
"""
new_width = input_image.width + 100
new_size = (new_width, input_image.height)
new_img = Image.new("RGB", new_size, color='white')
new_img.paste(input_image, (0, 0))
draw = ImageDraw.Draw(new_img)
for i, s in enumerate(labels):
draw.text(xy=(new_width - 100 + 0.005,
int((i / len(labels) + 1 / (2 * len(labels))) * input_image.height)),
text=s,
fill=(0, 0, 0))
return new_img
def make_grid_img(tensor, **kwargs):
"""Converts a tensor to a grid of images that can be read by imageio.
Notes
-----
* from in https://github.com/pytorch/vision/blob/master/torchvision/utils.py
Parameters
----------
tensor (torch.Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
or a list of images all of the same size.
kwargs:
Additional arguments to `make_grid_img`.
"""
grid = make_grid(tensor, **kwargs)
img_grid = grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0)
img_grid = img_grid.to('cpu', torch.uint8).numpy()
return img_grid
def get_image_list(image_file_name_list):
image_list = []
for file_name in image_file_name_list:
image_list.append(Image.open(file_name))
return image_list
def arr_im_convert(arr, convert="RGBA"):
"""Convert an image array."""
return np.asarray(Image.fromarray(arr).convert(convert))
def plot_grid_gifs(filename, grid_files, pad_size=7, pad_values=255):
"""Take a grid of gif files and merge them in order with padding."""
grid_gifs = [[imageio.mimread(f) for f in row] for row in grid_files]
n_per_gif = len(grid_gifs[0][0])
# convert all to RGBA which is the most general => can merge any image
imgs = [concatenate_pad([concatenate_pad([arr_im_convert(gif[i], convert="RGBA")
for gif in row], pad_size, pad_values, axis=1)
for row in grid_gifs], pad_size, pad_values, axis=0)
for i in range(n_per_gif)]
imageio.mimsave(filename, imgs, fps=FPS_GIF)
def concatenate_pad(arrays, pad_size, pad_values, axis=0):
"""Concatenate lsit of array with padding inbetween."""
pad = np.ones_like(arrays[0]).take(indices=range(pad_size), axis=axis) * pad_values
new_arrays = [pad]
for arr in arrays:
new_arrays += [arr, pad]
new_arrays += [pad]
return np.concatenate(new_arrays, axis=axis)
| 4,928 | 31.427632 | 113 | py |
geometric-js | geometric-js-master/notebooks/comparison.py | """
Code to compare behavior of isotropic Gaussians optimized with respect to
different divergences.
Adapted from:
https://github.com/lucastheis/model-evaluation/blob/master/code/experiments/comparison.py
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import numpy.random as nr
import os
import seaborn as sns
import sys
import theano as th
import theano.sandbox.linalg as tl
import theano.tensor as tt
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from time import time
# from notebooks.utils import PlotParams
sys.path.append('./code')
mpl.use('Agg')
def normal(X, m, C):
"""
Evaluates the density of a normal distribution.
@type X: C{TensorVariable}
@param X: matrix storing data points column-wise
@type m: C{ndarray}/C{TensorVariable}
@param m: column vector representing the mean of the Gaussian
@type C: C{ndarray}/C{TensorVariable}
@param C: covariance matrix
@rtype: C{TensorVariable}
@return: density of a Gaussian distribution evaluated at C{X}
"""
Z = X - m
return tt.exp(
- tt.sum(Z * tt.dot(tl.matrix_inverse(C), Z), 0) / 2.
- tt.log(tl.det(C)) / 2.
- m.size / 2. * np.log(2. * np.pi))
def mogaussian(D=2, K=10, N=100000, seed=2, D_max=100):
"""
Creates a random mixture of Gaussians and corresponding samples.
@rtype: C{tuple}
@return: a function representing the density and samples
"""
nr.seed(seed)
# mixture weights
p = nr.dirichlet([.5] * K)
# variances
v = 1. / np.square(nr.rand(K) + 1.)
# means; D_max makes sure that data only depends on seed and not on D
m = nr.randn(D_max, K) * 1.5
m = m[:D]
# m is a numpy array which is normally distributed with N(0, (1.5**2)) and has shape (2, 10)
# density function
X = tt.dmatrix('X')
C = [np.eye(D) * _ for _ in v]
def log_p(X):
"""
@type X: C{ndarray}/C{TensorVariable}
@param X: data points stored column-wise
@rtype: C{ndarray}/C{TensorVariable}
"""
if isinstance(X, tt.TensorVariable):
return tt.log(tt.sum([p[i] * normal(X, m[:, [i]], C[i]) for i in range(len(p))], 0))
else:
if log_p.f is None:
Y = tt.dmatrix('Y')
log_p.f = th.function([Y], log_p(Y))
return log_p.f(X)
log_p.f = None
def nonlog_p(X):
"""
@type X: C{ndarray}/C{TensorVariable}
@param X: data points stored column-wise
@rtype: C{ndarray}/C{TensorVariable}
"""
if isinstance(X, tt.TensorVariable):
return tt.sum([p[i] * normal(X, m[:, [i]], C[i]) for i in range(len(p))], 0)
else:
if nonlog_p.f is None:
Y = tt.dmatrix('Y')
nonlog_p.f = th.function([Y], nonlog_p(Y))
return nonlog_p.f(X)
nonlog_p.f = None
# sample data
M = nr.multinomial(N, p)
data = np.hstack(nr.randn(D, M[i]) * np.sqrt(v[i]) + m[:, [i]] for i in range(len(p)))
data = data[:, nr.permutation(N)]
return nonlog_p, log_p, data
def ravel(params):
"""
Combine parameters into a long one-dimensional array.
@type params: C{list}
@param params: list of shared variables
@rtype: C{ndarray}
"""
return np.hstack(p.get_value().ravel() for p in params)
def unravel(params, x):
"""
Extract parameters from an array and insert into shared variables.
@type params: C{list}
@param params: list of shared variables
@type x: C{ndarray}
@param x: parameter values
"""
x = x.ravel()
for param in params:
param.set_value(x[:param.size.eval()].reshape(param.shape.eval()))
x = x[param.size.eval():]
def plot(log_q, data, xmin=-5, xmax=7, ymin=-5, ymax=7):
"""
Visualize density (as contour plot) and data samples (as histogram).
"""
if isinstance(log_q, tuple) or isinstance(log_q, list):
A, b = log_q
X = tt.dmatrix('X')
log_q = th.function([X], normal(X, b, np.dot(A, A.T)))
# evaluate density on a grid
xx, yy = np.meshgrid(np.linspace(xmin, xmax, 200), np.linspace(ymin, ymax, 200))
zz = np.exp(log_q(np.asarray([xx.ravel(), yy.ravel()])).reshape(xx.shape))
hh, x, y = np.histogram2d(data[0], data[1], 80, range=[(xmin, xmax), (ymin, ymax)])
sns.set_style('whitegrid')
sns.set_style('ticks')
plt.figure(figsize=(10, 10), dpi=300)
# plt.imshow(hh.T[::-1], extent=[x[0], x[-1], y[0], y[-1]],
# interpolation='nearest', cmap='YlGnBu_r')
# plt.contour(xx, yy, zz, 7, colors='w', alpha=.7)
plt.scatter(data[0], data[1], color='k', marker='.', alpha=0.05)
plt.contour(xx, yy, zz, 5, linewidths=2)
plt.axis('equal')
plt.axis([x[0], x[-1], y[0], y[-1]])
# plt.axis('off')
plt.xticks([])
plt.yticks([])
plt.gcf().tight_layout()
def fit_mmd(data):
"""
Fit isotropic Gaussian by minimizing maximum mean discrepancy.
B{References:}
- A. Gretton et al., I{A Kernel Method for the Two-Sample-Problem}, NIPS, 2007
- Y. Li et al., I{Generative Moment Matching Networks}, ICML, 2015
"""
def gaussian_kernel(x, y, sigma=1.):
return tt.exp(-tt.sum(tt.square(x - y)) / sigma**2)
def mixed_kernel(x, y, sigma=[.5, 1., 2., 4., 8.]):
return tt.sum([gaussian_kernel(x, y, s) for s in sigma])
def gram_matrix(X, Y, kernel):
M = X.shape[0]
N = Y.shape[0]
G, _ = th.scan(
fn=lambda k: kernel(X[k // N], Y[k % N]),
sequences=[tt.arange(M * N)])
return G.reshape([M, N])
# hiddens
Z = tt.dmatrix('Z')
# parameters
b = th.shared(np.mean(data, 1)[None], broadcastable=[True, False])
A = th.shared(np.std(data - b.get_value().T))
# model samples
X = Z * A + b
# data
Y = tt.dmatrix('Y')
M = X.shape[0]
N = Y.shape[0]
Kyy = gram_matrix(Y, Y, mixed_kernel)
Kxy = gram_matrix(X, Y, mixed_kernel)
Kxx = gram_matrix(X, X, mixed_kernel)
MMDsq = tt.sum(Kxx) / M**2 - 2. / (N * M) * tt.sum(Kxy) + tt.sum(Kyy) / N**2
MMD = tt.sqrt(MMDsq)
f = th.function([Z, Y], [MMD, tt.grad(MMD, A), tt.grad(MMD, b)])
# batch size, momentum, learning rate schedule
B = 100
mm = 0.8
kappa = .7
tau = 1.
values = []
try:
for t in range(0, data.shape[1], B):
if t % 10000 == 0:
# reset momentum
dA = 0.
db = 0.
Z = nr.randn(B, data.shape[0])
Y = data.T[t:t + B]
lr = np.power(tau + (t + B) / B, -kappa)
v, gA, gb = f(Z, Y)
dA = mm * dA - lr * gA
db = mm * db - lr * gb
values.append(v)
A.set_value(A.get_value() + dA)
b.set_value(b.get_value() + db)
print('{0:>6} {1:.4f}'.format(t, np.mean(values[-100:])))
except KeyboardInterrupt:
pass
return A.get_value() * np.eye(data.shape[0]), b.get_value().T
def js(X, Z, G, log_p, log_q, a=0.0):
return (tt.mean(tt.log(tt.nnet.sigmoid(log_p(X) - log_q(G(Z)))))
+ tt.mean(tt.log(tt.nnet.sigmoid(log_q(G(Z)) - log_p(X))))
+ tt.mean(0.0 * a * G(Z))) # Use dummy vars
def kl(X, Z, G, log_p, log_q, a=0.0):
return (tt.mean(tt.exp(log_q(G(Z))) * (log_q(G(Z)) - log_p(G(Z))))
+ tt.mean(0.0 * a * G(Z))) # Use dummy vars
def rkl(X, Z, G, log_p, log_q, a=0.0):
return (tt.mean(tt.exp(log_p(X)) * (log_p(X) - log_q(X)))
+ tt.mean(0.0 * a * G(Z))) # Use dummy vars
def gjs(X, Z, G, log_p, log_q, a=0.5):
return ((1 - a) ** 2 * tt.mean(tt.exp(log_p(X)) * (log_p(X) - log_q(G(Z))))
+ a ** 2 * tt.mean(tt.exp(log_q(G(Z))) * (log_q(G(Z)) - log_p(X))))
def dgjs(X, Z, G, log_p, log_q, a=0.5):
return ((1 - a) ** 2 * tt.mean((tt.exp(log_p(X)) ** a)
* (tt.exp(log_q(G(Z))) ** (1 - a))
* (log_q(G(Z)) - log_p(X)))
+ a ** 2 * tt.mean((tt.exp(log_p(X))) ** a
* (tt.exp(log_q(G(Z))) ** (1 - a))
* (log_p(X) - log_q(G(Z)))))
def fit(data, log_p, div='kl', max_epochs=20, alpha=0.5):
D = data.shape[0]
X = tt.dmatrix('X')
Z = tt.dmatrix('Z')
nr.seed(int(time() * 1000.) % 4294967295)
idx = nr.permutation(data.shape[1])[:100]
# Initialize parameters
b = th.shared(np.mean(data[:, idx], 1)[:, None],
broadcastable=(False, True))
a = th.shared(np.std(data[:, idx] - b.get_value(), 1)[:, None],
broadcastable=[False, True])
if div in ['tgjs', 'tdgjs']:
alpha = th.shared(alpha)
def log_q(X):
return (-0.5 * tt.sum(tt.square((X - b) / a), 0)
- D * tt.log(tt.abs_(a)) - D / 2. * np.log(np.pi))
def G(Z):
return a * Z + b
if div in ['tgjs', 'tdgjs']:
div = eval(div)(X, Z, G, log_p, log_q, a=alpha)
f_div = th.function(
[Z, X],
[div, th.grad(div, a), th.grad(div, b), th.grad(div, alpha)])
else:
div = eval(div)(X, Z, G, log_p, log_q, a=alpha)
f_div = th.function(
[Z, X],
[div, th.grad(div, a), th.grad(div, b)])
# SGD hyperparameters
B = 200
mm = 0.8
lr = 0.5
da = 0.0
db = 0.0
print('{0:>4} {1:.4f}'.format(0, f_div(nr.randn(*data.shape), data)[0]))
for epoch in range(max_epochs):
values = []
for t in range(0, data.shape[1], B): # SGD with momentum
Z = nr.randn(D, B)
Y = data[:, t:t + B]
v, ga, gb = f_div(Z, Y)
da = mm * da - lr * ga
db = mm * db - lr * gb
a.set_value(a.get_value() + da)
b.set_value(b.get_value() + db)
values.append(v)
# lr /= 2.
print(f'{epoch+1:>4} {np.mean(values):.4f}')
return a.get_value() * np.eye(D), b.get_value()
def fit_js(data, log_p, max_epochs=20):
"""
Fit isotropic Gaussian by minimizing Jensen-Shannon divergence.
"""
# data dimensionality
D = data.shape[0]
# data and hidden states
X = tt.dmatrix('X')
Z = tt.dmatrix('Z')
nr.seed(int(time() * 1000.) % 4294967295)
idx = nr.permutation(data.shape[1])[:100]
# initialize parameters
b = th.shared(np.mean(data[:, idx], 1)[:, None], broadcastable=(False, True))
a = th.shared(np.std(data[:, idx] - b.get_value(), 1)[:, None], broadcastable=[False, True])
# model density
def log_q(X): return -0.5 * tt.sum(tt.square((X - b) / a), 0) - D * tt.log(tt.abs_(a)) - D / 2. * np.log(np.pi)
def G(Z): return a * Z + b
# Jensen-Shannon divergence
JSD = (tt.mean(tt.log(tt.nnet.sigmoid(log_p(X) - log_q(G(Z)))))
+ tt.mean(tt.log(tt.nnet.sigmoid(log_q(G(Z)) - log_p(X)))))
JSD = (JSD + np.log(4.)) / 2.
# JSD1 = tt.mean(tt.log(tt.nnet.sigmoid(log_p(X) - log_q(X))))
# JSD2 = tt.mean(tt.log(tt.nnet.sigmoid(log_q(G(Z)) - log_p(G(Z)))))
# JSD = l * JSD1 + (1 - l) * JSD2
# function computing JSD and its gradient
f_jsd = th.function([Z, X], [JSD, th.grad(JSD, a), th.grad(JSD, b)])
# SGD hyperparameters
B = 200
mm = 0.8
lr = .5
da = 0.
db = 0.
try:
# display initial JSD
print('{0:>4} {1:.4f}'.format(0, float(f_jsd(nr.randn(*data.shape), data)[0])))
for epoch in range(max_epochs):
values = []
# stochastic gradient descent
for t in range(0, data.shape[1], B):
Z = nr.randn(D, B)
Y = data[:, t:t + B]
v, ga, gb = f_jsd(Z, Y)
da = mm * da - lr * ga
db = mm * db - lr * gb
values.append(v)
a.set_value(a.get_value() + da)
b.set_value(b.get_value() + db)
# reduce learning rate
lr /= 2.
# display estimated JSD
print('{0:>4} {1:.4f}'.format(epoch + 1, np.mean(values)))
except KeyboardInterrupt:
pass
return a.get_value() * np.eye(D), b.get_value()
def fit_gjs_train_a(data, log_p, max_epochs=50):
"""Fit isotropic Gaussian by minimizing GJS divergence."""
D = data.shape[0]
# Data and hidden states
X = tt.dmatrix('X')
Z = tt.dmatrix('Z')
nr.seed(int(time() * 1000.) % 4294967295)
idx = nr.permutation(data.shape[1])[:100]
# Initialize parameters
b = th.shared(np.mean(data[:, idx], 1)[:, None], broadcastable=(False, True))
a = th.shared(np.std(data[:, idx] - b.get_value(), 1)[:, None], broadcastable=[False, True])
alpha = th.shared(0.5)
# Density and divergence
def log_q(X): return -0.5 * tt.sum(tt.square((X - b) / a), 0) - D * tt.log(tt.abs_(a)) - D / 2. * np.log(np.pi)
def G(Z): return a * Z + b
gJSD = ((1 - alpha) ** 2 * tt.mean(tt.exp(log_p(X)) * (log_p(X) - log_q(G(Z))))
+ alpha ** 2 * tt.mean(tt.exp(log_q(G(Z))) * (log_q(G(Z)) - log_p(X))))
# Function computing G-JSD and its gradient
f_gjsd = th.function(
[Z, X],
[gJSD, th.grad(gJSD, a), th.grad(gJSD, b), th.grad(gJSD, alpha)])
# SGD hyperparameters
B = 200
mm = 0.8
lr = .002
da = 0.
db = 0.
dalpha = 0.
print('{0:>4} {1:.4f}'.format(
0, float(f_gjsd(nr.randn(*data.shape), data)[0])))
print("Starting training! \n\n")
for epoch in range(max_epochs):
values = []
print(f"Alpha: {alpha.get_value()}")
for t in range(0, data.shape[1], B):
Z = nr.randn(D, B)
Y = data[:, t:t + B]
v, ga, gb, galpha = f_gjsd(Z, Y)
da = mm * da - lr * ga
db = mm * db - lr * gb
dalpha = mm * dalpha - lr * galpha
values.append(v)
a.set_value(a.get_value() + da)
b.set_value(b.get_value() + db)
alpha.set_value(alpha.get_value() + dalpha)
if alpha.get_value() > 1.0:
alpha.set_value(1.0)
elif alpha.get_value() < 0.0:
alpha.set_value(0.0)
# lr /= 2.0
print('{0:>4} {1:.4f}'.format(epoch + 1, np.mean(values)))
return a.get_value() * np.eye(D), b.get_value(), alpha.get_value()
def fit_gjs(data, log_p, max_epochs=20):
"""Fit isotropic Gaussian by minimizing geometric Jensen-Shannon divergence."""
D = data.shape[0]
X = tt.dmatrix('X')
Z = tt.dmatrix('Z')
nr.seed(int(time() * 1000.) % 4294967295)
idx = nr.permutation(data.shape[1])[:100]
# initialize parameters
b = th.shared(np.mean(data[:, idx], 1)[:, None], broadcastable=(False, True))
a = th.shared(np.std(data[:, idx] - b.get_value(), 1)[:, None], broadcastable=[False, True])
alpha = 0.4
def log_q(X): return -0.5 * tt.sum(tt.square((X - b) / a), 0) - D * tt.log(tt.abs_(a)) - D / 2. * np.log(np.pi)
def G(Z): return a * Z + b
# geometric Jensen-Shannon divergence
# JSD = tt.mean(log_p(X) - log_q(X)) \
# + tt.mean(tt.exp(log_q(G(Z))) * (log_q(G(Z)) - log_p(G(Z))))
gJSD = ((1 - alpha) ** 2 * tt.mean(tt.exp(log_p(X)) * (log_p(X) - log_q(G(Z))))
+ alpha ** 2 * tt.mean(tt.exp(log_q(G(Z))) * (log_q(G(Z)) - log_p(X))))
f_gjsd = th.function([Z, X], [gJSD, th.grad(gJSD, a), th.grad(gJSD, b)])
# SGD hyperparameters
B = 200
mm = 0.8
lr = .5
da = 0.
db = 0.
print('{0:>4} {1:.4f}'.format(0, float(f_gjsd(nr.randn(*data.shape), data)[0])))
for epoch in range(max_epochs):
values = []
print(f"Alpha: {alpha}")
# stochastic gradient descent
for t in range(0, data.shape[1], B):
Z = nr.randn(D, B)
Y = data[:, t:t + B]
v, ga, gb = f_gjsd(Z, Y)
da = mm * da - lr * ga
db = mm * db - lr * gb
values.append(v)
a.set_value(a.get_value() + da)
b.set_value(b.get_value() + db)
# lr /= 2.
print('{0:>4} {1:.4f}'.format(epoch + 1, np.mean(values)))
return a.get_value() * np.eye(D), b.get_value()
def fit_dgjs(data, log_p, max_epochs=20):
"""
Fit isotropic Gaussian by minimizing geometric Jensen-Shannon divergence.
"""
# data dimensionality
D = data.shape[0]
# data and hidden states
X = tt.dmatrix('X')
Z = tt.dmatrix('Z')
nr.seed(int(time() * 1000.) % 4294967295)
idx = nr.permutation(data.shape[1])[:100]
# initialize parameters
b = th.shared(np.mean(data[:, idx], 1)[:, None], broadcastable=(False, True))
a = th.shared(np.std(data[:, idx] - b.get_value(), 1)[:, None], broadcastable=[False, True])
# alpha = th.shared(0.5)
# model density
def q(X): return normal(X, b, a)
def log_q(X): return -0.5 * tt.sum(tt.square((X - b) / a), 0) - D * tt.log(tt.abs_(a)) - D / 2. * np.log(np.pi)
def G(Z): return a * Z + b
# geometric Jensen-Shannon divergence
# JSD = tt.mean(log_p(X) - log_q(X)) \
# + tt.mean(tt.exp(log_q(G(Z))) * (log_q(G(Z)) - log_p(G(Z))))
# gJSD = (1 - 0.5) ** 2 * tt.mean(tt.exp(log_p(X)) * (log_p(X) - log_q(X))) \
# + 0.5 ** 2 * tt.mean(tt.exp(log_q(G(Z))) * (log_q(G(Z)) - log_p(G(Z))))
# gJSD = (1 - alpha) ** 2 * tt.mean(tt.exp(log_p(X)) * (log_p(X) - log_q(X))) + alpha ** 2 * tt.mean(tt.exp(log_q(G(Z))) * (log_q(G(Z)) - log_p(G(Z))))
alpha = 1.0
# gJSD = (1 - 0.5) ** 2 * tt.mean(tt.exp(log_p(X)) * (log_p(X) - log_q(G(Z)))) + 0.5 ** 2 * tt.mean(tt.exp(log_q(G(Z))) * (log_q(G(Z)) - log_p(X)))
gJSD = (1 - alpha) ** 2 * tt.mean((tt.exp(log_p(X)) ** (alpha)) * (tt.exp(log_q(G(Z))) ** (1 - alpha)) * (log_q(G(Z)) - log_p(X))) + alpha ** 2 * tt.mean((tt.exp(log_p(X))) ** (alpha) * (tt.exp(log_q(G(Z))) ** (1 - alpha)) * (log_p(X) - log_q(G(Z))))
# function computing G-JSD and its gradient
# f_gjsd = th.function([Z, X], [gJSD, th.grad(gJSD, a), th.grad(gJSD, b), th.grad(gJSD, alpha)])
f_gjsd = th.function([Z, X], [gJSD, th.grad(gJSD, a), th.grad(gJSD, b)])
# SGD hyperparameters
B = 200
mm = 0.8
lr = .5
da = 0.
db = 0.
dalpha = 0.
try:
# display initial JSD
print('{0:>4} {1:.4f}'.format(0, float(f_gjsd(nr.randn(*data.shape), data)[0])))
for epoch in range(max_epochs):
values = []
print(f"Alpha: {alpha}")
# stochastic gradient descent
for t in range(0, data.shape[1], B):
Z = nr.randn(D, B)
Y = data[:, t:t + B]
# v, ga, gb, galpha = f_gjsd(Z, Y)
# da = mm * da - lr * ga
# db = mm * db - lr * gb
# dalpha = mm * dalpha - lr * galpha
v, ga, gb = f_gjsd(Z, Y)
da = mm * da - lr * ga
db = mm * db - lr * gb
values.append(v)
a.set_value(a.get_value() + da)
b.set_value(b.get_value() + db)
# reduce learning rate
lr /= 2.
# display estimated JSD
print('{0:>4} {1:.4f}'.format(epoch + 1, np.mean(values)))
except KeyboardInterrupt:
pass
return a.get_value() * np.eye(D), b.get_value()
def fit_kl(data, log_p, max_epochs=20):
"""
Fit isotropic Gaussian by minimizing reverse Kullback-Leibler divergence.
"""
# data dimensionality
D = data.shape[0]
# data and hidden states
X = tt.dmatrix('X')
Z = tt.dmatrix('Z')
nr.seed(int(time() * 1000.) % 4294967295)
idx = nr.permutation(data.shape[1])[:100]
# initialize parameters
b = th.shared(np.mean(data[:, idx], 1)[:, None], broadcastable=(False, True))
a = th.shared(np.std(data[:, idx] - b.get_value(), 1)[:, None], broadcastable=[False, True])
# model density
def q(X): return normal(X, b, a)
def log_q(X): return -0.5 * tt.sum(tt.square((X - b) / a), 0) - D * tt.log(tt.abs_(a)) - D / 2. * np.log(np.pi)
def G(Z): return a * Z + b
# geometric Jensen-Shannon divergence
KL = tt.mean(0.0 * X) + tt.mean(tt.exp(log_q(G(Z))) * (log_q(G(Z)) - log_p(G(Z))))
# function computing G-JSD and its gradient
f_kl = th.function([Z, X], [KL, th.grad(KL, a), th.grad(KL, b)])
# SGD hyperparameters
B = 200
mm = 0.8
lr = .5
da = 0.
db = 0.
try:
# display initial JSD
print('{0:>4} {1:.4f}'.format(0, float(f_kl(nr.randn(*data.shape), data)[0])))
for epoch in range(max_epochs):
# print(f'\nEpoch: {epoch}')
values = []
# stochastic gradient descent
for t in range(0, data.shape[1], B):
Z = nr.randn(D, B)
Y = data[:, t:t + B]
v, ga, gb = f_kl(Z, Y)
da = mm * da - lr * ga
db = mm * db - lr * gb
values.append(v)
a.set_value(a.get_value() + da)
b.set_value(b.get_value() + db)
# reduce learning rate
lr /= 2.
# display estimated JSD
print('{0:>4} {1:.4f}'.format(epoch + 1, np.mean(values)))
except KeyboardInterrupt:
pass
return a.get_value() * np.eye(D), b.get_value()
def fit_rkl(data, log_p, max_epochs=20):
"""
Fit isotropic Gaussian by minimizing reverse Kullback-Leibler divergence.
"""
# data dimensionality
D = data.shape[0]
# data and hidden states
X = tt.dmatrix('X')
Z = tt.dmatrix('Z')
nr.seed(int(time() * 1000.) % 4294967295)
idx = nr.permutation(data.shape[1])[:100]
# initialize parameters
b = th.shared(np.mean(data[:, idx], 1)[:, None], broadcastable=(False, True))
a = th.shared(np.std(data[:, idx] - b.get_value(), 1)[:, None], broadcastable=[False, True])
# model density
def q(X): return normal(X, b, a)
def log_q(X): return -0.5 * tt.sum(tt.square((X - b) / a), 0) - D * tt.log(tt.abs_(a)) - D / 2. * np.log(np.pi)
def G(Z): return a * Z + b
# geometric Jensen-Shannon divergence
RKL = tt.mean(tt.exp(log_p(X)) * (log_p(X) - log_q(X))) + tt.mean(0.0 * Z)
# function computing G-JSD and its gradient
f_rkl = th.function([Z, X], [RKL, th.grad(RKL, a), th.grad(RKL, b)])
# SGD hyperparameters
B = 200
mm = 0.8
lr = .5
da = 0.
db = 0.
try:
# display initial JSD
print('{0:>4} {1:.4f}'.format(0, float(f_rkl(nr.randn(*data.shape), data)[0])))
for epoch in range(max_epochs):
values = []
# stochastic gradient descent
for t in range(0, data.shape[1], B):
Z = nr.randn(D, B)
Y = data[:, t:t + B]
v, ga, gb = f_rkl(Z, Y)
da = mm * da - lr * ga
db = mm * db - lr * gb
values.append(v)
a.set_value(a.get_value() + da)
b.set_value(b.get_value() + db)
# reduce learning rate
lr /= 2.
# display estimated JSD
print('{0:>4} {1:.4f}'.format(epoch + 1, np.mean(values)))
except KeyboardInterrupt:
pass
return a.get_value() * np.eye(D), b.get_value()
def main(argv):
parser = ArgumentParser(
argv[0],
description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--divergence', '-m',
choices=['mmd', 'kl', 'rkl', 'js', 'gjs', 'dgjs', '', 'tgjs'],
default='kl',
help='Which metric to use.')
parser.add_argument(
'--num_data', '-N', type=int, default=100000,
help='Number of training points.')
parser.add_argument(
'-d', type=int, default=2,
help='Dimension of optimisation problem.')
parser.add_argument(
'-k', type=int, default=10,
help='Number of mixture components.')
parser.add_argument(
'-a', type=float, default=0.5,
help='Initial alpha for skew divergences')
parser.add_argument(
'--seed', '-s', type=int, default=22,
help='Random seed used to generate data.')
parser.add_argument(
'--output', '-o', type=str, default='results/',
help='Where to store results.')
args = parser.parse_args(argv[1:])
if not os.path.exists(args.output):
os.makedirs(args.output)
print('Generating data...')
p, log_p, data = mogaussian(
D=args.d, K=args.k, N=args.num_data, seed=args.seed)
print(f'Optimizing {args.divergence} divergence...')
A, b = fit(data, log_p, div=args.divergence, alpha=args.a)
# if 'KL' in args.metrics:
# A, b = fit_kl(data, log_p)
# if 'RKL' in args.metrics:
# A, b = fit_rkl(data, log_p)
# if 'JS' in args.metrics:
# A, b = fit_js(data, log_p)
# if 'GJS' in args.metrics:
# A, b = fit_gjs(data, log_p)
# if 'tGJS' in args.metrics:
# A, b, alpha = fit_gjs_train_a(data, log_p)
# # np.savetxt(
# # os.path.join(
# # args.output,
# # f'tGJS_d={args.d}_k={args.k}_{args.seed}.png'),
# # np.array(alpha)[None, None])
# if 'dGJS' in args.metrics:
# A, b = fit_dgjs(data, log_p)
# if 'MMD' in args.metrics:
# A, b = fit_mmd(data)
if args.d == 2:
plot(log_p, data)
plt.savefig(os.path.join(args.output, f'{args.seed}_data.png'))
for metric in args.metrics:
plot([A, b], data)
plt.savefig(
os.path.join(
args.output,
f'{metric}_d={args.d}_k={args.k}_{args.seed}.png'))
if __name__ == '__main__':
main(sys.argv)
| 25,970 | 29.024277 | 256 | py |
geometric-js | geometric-js-master/notebooks/comparison_old.py | """
Code to compare behavior of isotropic Gaussians optimized with respect to
different divergences.
Adapted from:
https://github.com/lucastheis/model-evaluation/blob/master/code/experiments/comparison.py
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import numpy.random as nr
import os
import seaborn as sns
import sys
import theano as th
import pdb
import theano.sandbox.linalg as tl
import theano.tensor as tt
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from scipy.optimize import minimize
from time import time
sys.path.append('./code')
mpl.use('Agg')
def normal(X, m, C):
"""
Evaluates the density of a normal distribution.
@type X: C{TensorVariable}
@param X: matrix storing data points column-wise
@type m: C{ndarray}/C{TensorVariable}
@param m: column vector representing the mean of the Gaussian
@type C: C{ndarray}/C{TensorVariable}
@param C: covariance matrix
@rtype: C{TensorVariable}
@return: density of a Gaussian distribution evaluated at C{X}
"""
Z = X - m
return tt.exp(
- tt.sum(Z * tt.dot(tl.matrix_inverse(C), Z), 0) / 2.
- tt.log(tl.det(C)) / 2.
- m.size / 2. * np.log(2. * np.pi))
def mogaussian(D=2, K=10, N=100000, seed=2, D_max=100):
"""
Creates a random mixture of Gaussians and corresponding samples.
@rtype: C{tuple}
@return: a function representing the density and samples
"""
nr.seed(seed)
# mixture weights
p = nr.dirichlet([.5] * K)
# variances
v = 1. / np.square(nr.rand(K) + 1.)
# means; D_max makes sure that data only depends on seed and not on D
m = nr.randn(D_max, K) * 1.5
m = m[:D]
# density function
X = tt.dmatrix('X')
C = [np.eye(D) * _ for _ in v]
def log_p(X):
"""
@type X: C{ndarray}/C{TensorVariable}
@param X: data points stored column-wise
@rtype: C{ndarray}/C{TensorVariable}
"""
if isinstance(X, tt.TensorVariable):
return tt.log(tt.sum([p[i] * normal(X, m[:, [i]], C[i]) for i in range(len(p))], 0))
else:
if log_p.f is None:
Y = tt.dmatrix('Y')
log_p.f = th.function([Y], log_p(Y))
return log_p.f(X)
log_p.f = None
def nonlog_p(X):
"""
@type X: C{ndarray}/C{TensorVariable}
@param X: data points stored column-wise
@rtype: C{ndarray}/C{TensorVariable}
"""
if isinstance(X, tt.TensorVariable):
return tt.sum([p[i] * normal(X, m[:, [i]], C[i]) for i in range(len(p))], 0)
else:
if nonlog_p.f is None:
Y = tt.dmatrix('Y')
nonlog_p.f = th.function([Y], nonlog_p(Y))
return nonlog_p.f(X)
nonlog_p.f = None
# sample data
M = nr.multinomial(N, p)
data = np.hstack(nr.randn(D, M[i]) * np.sqrt(v[i]) + m[:, [i]] for i in range(len(p)))
data = data[:, nr.permutation(N)]
return nonlog_p, log_p, data
def ravel(params):
"""
Combine parameters into a long one-dimensional array.
@type params: C{list}
@param params: list of shared variables
@rtype: C{ndarray}
"""
return np.hstack(p.get_value().ravel() for p in params)
def unravel(params, x):
"""
Extract parameters from an array and insert into shared variables.
@type params: C{list}
@param params: list of shared variables
@type x: C{ndarray}
@param x: parameter values
"""
x = x.ravel()
for param in params:
param.set_value(x[:param.size.eval()].reshape(param.shape.eval()))
x = x[param.size.eval():]
def plot(log_q, data, xmin=-5, xmax=7, ymin=-5, ymax=7):
"""
Visualize density (as contour plot) and data samples (as histogram).
"""
if isinstance(log_q, tuple) or isinstance(log_q, list):
A, b = log_q
X = tt.dmatrix('X')
log_q = th.function([X], normal(X, b, np.dot(A, A.T)))
# evaluate density on a grid
xx, yy = np.meshgrid(
np.linspace(xmin, xmax, 200),
np.linspace(ymin, ymax, 200))
zz = np.exp(log_q(np.asarray([xx.ravel(), yy.ravel()])).reshape(xx.shape))
hh, x, y = np.histogram2d(data[0], data[1], 80, range=[(xmin, xmax), (ymin, ymax)])
sns.set_style('whitegrid')
sns.set_style('ticks')
plt.figure(figsize=(10, 10), dpi=300)
# plt.imshow(hh.T[::-1], extent=[x[0], x[-1], y[0], y[-1]],
# interpolation='nearest', cmap='YlGnBu_r')
# plt.contour(xx, yy, zz, 7, colors='w', alpha=.7)
plt.scatter(data[0], data[1], color='k', marker='.', alpha=0.05)
plt.contour(xx, yy, zz, 5, linewidths=2)
plt.axis('equal')
plt.axis([x[0], x[-1], y[0], y[-1]])
# plt.axis('off')
plt.xticks([])
plt.yticks([])
plt.gcf().tight_layout()
def fit_mmd(data):
"""
Fit isotropic Gaussian by minimizing maximum mean discrepancy.
B{References:}
- A. Gretton et al., I{A Kernel Method for the Two-Sample-Problem}, NIPS, 2007
- Y. Li et al., I{Generative Moment Matching Networks}, ICML, 2015
"""
def gaussian_kernel(x, y, sigma=1.):
return tt.exp(-tt.sum(tt.square(x - y)) / sigma**2)
def mixed_kernel(x, y, sigma=[.5, 1., 2., 4., 8.]):
return tt.sum([gaussian_kernel(x, y, s) for s in sigma])
def gram_matrix(X, Y, kernel):
M = X.shape[0]
N = Y.shape[0]
G, _ = th.scan(
fn=lambda k: kernel(X[k // N], Y[k % N]),
sequences=[tt.arange(M * N)])
return G.reshape([M, N])
# hiddens
Z = tt.dmatrix('Z')
# parameters
b = th.shared(np.mean(data, 1)[None], broadcastable=[True, False])
A = th.shared(np.std(data - b.get_value().T))
# model samples
X = Z * A + b
# data
Y = tt.dmatrix('Y')
M = X.shape[0]
N = Y.shape[0]
Kyy = gram_matrix(Y, Y, mixed_kernel)
Kxy = gram_matrix(X, Y, mixed_kernel)
Kxx = gram_matrix(X, X, mixed_kernel)
MMDsq = tt.sum(Kxx) / M**2 - 2. / (N * M) * tt.sum(Kxy) + tt.sum(Kyy) / N**2
MMD = tt.sqrt(MMDsq)
f = th.function([Z, Y], [MMD, tt.grad(MMD, A), tt.grad(MMD, b)])
# batch size, momentum, learning rate schedule
B = 100
mm = 0.8
kappa = .7
tau = 1.
values = []
try:
for t in range(0, data.shape[1], B):
if t % 10000 == 0:
# reset momentum
dA = 0.
db = 0.
Z = nr.randn(B, data.shape[0])
Y = data.T[t:t + B]
lr = np.power(tau + (t + B) / B, -kappa)
v, gA, gb = f(Z, Y)
dA = mm * dA - lr * gA
db = mm * db - lr * gb
values.append(v)
A.set_value(A.get_value() + dA)
b.set_value(b.get_value() + db)
print('{0:>6} {1:.4f}'.format(t, np.mean(values[-100:])))
except KeyboardInterrupt:
pass
return A.get_value() * np.eye(data.shape[0]), b.get_value().T
def fit_js(data, log_p, max_epochs=20):
"""
Fit isotropic Gaussian by minimizing Jensen-Shannon divergence.
"""
# data dimensionality
D = data.shape[0]
# data and hidden states
X = tt.dmatrix('X')
Z = tt.dmatrix('Z')
nr.seed(int(time() * 1000.) % 4294967295)
idx = nr.permutation(data.shape[1])[:100]
# initialize parameters
b = th.shared(np.mean(data[:, idx], 1)[:, None], broadcastable=(False, True))
a = th.shared(np.std(data[:, idx] - b.get_value(), 1)[:, None], broadcastable=[False, True])
# model density
log_q = lambda X: -0.5 * tt.sum(tt.square((X - b) / a), 0) - D * tt.log(tt.abs_(a)) - D / 2. * np.log(np.pi)
G = lambda Z: a * Z + b
# Jensen-Shannon divergence
JSD = tt.mean(tt.log(tt.nnet.sigmoid(log_p(X) - log_q(X)))) \
+ tt.mean(tt.log(tt.nnet.sigmoid(log_q(G(Z)) - log_p(G(Z)))))
JSD = (JSD + np.log(4.)) / 2.
# JSD1 = tt.mean(tt.log(tt.nnet.sigmoid(log_p(X) - log_q(X))))
# JSD2 = tt.mean(tt.log(tt.nnet.sigmoid(log_q(G(Z)) - log_p(G(Z)))))
# JSD = l * JSD1 + (1 - l) * JSD2
# function computing JSD and its gradient
f_jsd = th.function([Z, X], [JSD, th.grad(JSD, a), th.grad(JSD, b)])
# SGD hyperparameters
B = 200
mm = 0.8
lr = .5
da = 0.
db = 0.
try:
# display initial JSD
print('{0:>4} {1:.4f}'.format(0, float(f_jsd(nr.randn(*data.shape), data)[0])))
for epoch in range(max_epochs):
values = []
# stochastic gradient descent
for t in range(0, data.shape[1], B):
Z = nr.randn(D, B)
Y = data[:, t:t + B]
v, ga, gb = f_jsd(Z, Y)
da = mm * da - lr * ga
db = mm * db - lr * gb
values.append(v)
a.set_value(a.get_value() + da)
b.set_value(b.get_value() + db)
# reduce learning rate
lr /= 2.
# display estimated JSD
print('{0:>4} {1:.4f}'.format(epoch + 1, np.mean(values)))
except KeyboardInterrupt:
pass
return a.get_value() * np.eye(D), b.get_value()
def fit_gjs(data, log_p, max_epochs=20):
"""
Fit isotropic Gaussian by minimizing geometric Jensen-Shannon divergence.
"""
# data dimensionality
D = data.shape[0]
# data and hidden states
X = tt.dmatrix('X')
Z = tt.dmatrix('Z')
nr.seed(int(time() * 1000.) % 4294967295)
idx = nr.permutation(data.shape[1])[:100]
# initialize parameters
b = th.shared(np.mean(data[:, idx], 1)[:, None], broadcastable=(False, True))
a = th.shared(np.std(data[:, idx] - b.get_value(), 1)[:, None], broadcastable=[False, True])
alpha = th.shared(0.5)
# model density
q = lambda X: normal(X, b, a)
log_q = lambda X: -0.5 * tt.sum(tt.square((X - b) / a), 0) - D * tt.log(tt.abs_(a)) - D / 2. * np.log(np.pi)
G = lambda Z: a * Z + b
# geometric Jensen-Shannon divergence
# JSD = tt.mean(log_p(X) - log_q(X)) \
# + tt.mean(tt.exp(log_q(G(Z))) * (log_q(G(Z)) - log_p(G(Z))))
# gJSD = (1 - 0.5) ** 2 * tt.mean(tt.exp(log_p(X)) * (log_p(X) - log_q(X))) \
# + 0.5 ** 2 * tt.mean(tt.exp(log_q(G(Z))) * (log_q(G(Z)) - log_p(G(Z))))
gJSD = (1 - alpha) ** 2 * tt.mean(tt.exp(log_p(X)) * (log_p(X) - log_q(G(Z)))) \
+ alpha ** 2 * tt.mean(tt.exp(log_q(G(Z))) * (log_q(G(Z)) - log_p(X)))
# function computing G-JSD and its gradient
f_gjsd = th.function([Z, X], [gJSD, th.grad(gJSD, a), th.grad(gJSD, b)])
# SGD hyperparameters
B = 200
mm = 0.8
lr = .5
da = 0.
db = 0.
try:
# display initial JSD
print('{0:>4} {1:.4f}'.format(0, float(f_gjsd(nr.randn(*data.shape), data)[0])))
for epoch in range(max_epochs):
values = []
# stochastic gradient descent
for t in range(0, data.shape[1], B):
Z = nr.randn(D, B)
Y = data[:, t:t + B]
v, ga, gb = f_gjsd(Z, Y)
da = mm * da - lr * ga
db = mm * db - lr * gb
values.append(v)
a.set_value(a.get_value() + da)
b.set_value(b.get_value() + db)
# reduce learning rate
lr /= 2.
# display estimated JSD
print('{0:>4} {1:.4f}'.format(epoch + 1, np.mean(values)))
except KeyboardInterrupt:
pass
return a.get_value() * np.eye(D), b.get_value()
def fit_kl(data, log_p, max_epochs=20):
"""
Fit isotropic Gaussian by minimizing reverse Kullback-Leibler divergence.
"""
# data dimensionality
D = data.shape[0]
# data and hidden states
X = tt.dmatrix('X')
Z = tt.dmatrix('Z')
nr.seed(int(time() * 1000.) % 4294967295)
idx = nr.permutation(data.shape[1])[:100]
# initialize parameters
b = th.shared(np.mean(data[:, idx], 1)[:, None], broadcastable=(False, True))
a = th.shared(np.std(data[:, idx] - b.get_value(), 1)[:, None], broadcastable=[False, True])
# model density
q = lambda X: normal(X, b, a)
log_q = lambda X: -0.5 * tt.sum(tt.square((X - b) / a), 0) - D * tt.log(tt.abs_(a)) - D / 2. * np.log(np.pi)
G = lambda Z: a * Z + b
# geometric Jensen-Shannon divergence
KL = tt.mean(0.0 * X) + tt.mean(tt.exp(log_q(G(Z))) * (log_q(G(Z)) - log_p(G(Z))))
# function computing G-JSD and its gradient
f_kl = th.function([Z, X], [KL, th.grad(KL, a), th.grad(KL, b)])
# SGD hyperparameters
B = 200
mm = 0.8
lr = .5
da = 0.
db = 0.
try:
# display initial JSD
print('{0:>4} {1:.4f}'.format(0, float(f_kl(nr.randn(*data.shape), data)[0])))
for epoch in range(max_epochs):
values = []
# stochastic gradient descent
for t in range(0, data.shape[1], B):
Z = nr.randn(D, B)
Y = data[:, t:t + B]
v, ga, gb = f_kl(Z, Y)
da = mm * da - lr * ga
db = mm * db - lr * gb
values.append(v)
a.set_value(a.get_value() + da)
b.set_value(b.get_value() + db)
# reduce learning rate
lr /= 2.
# display estimated JSD
print('{0:>4} {1:.4f}'.format(epoch + 1, np.mean(values)))
except KeyboardInterrupt:
pass
return a.get_value() * np.eye(D), b.get_value()
def fit_rkl(data, log_p, max_epochs=20):
"""
Fit isotropic Gaussian by minimizing reverse Kullback-Leibler divergence.
"""
# data dimensionality
D = data.shape[0]
# data and hidden states
X = tt.dmatrix('X')
Z = tt.dmatrix('Z')
nr.seed(int(time() * 1000.) % 4294967295)
idx = nr.permutation(data.shape[1])[:100]
# initialize parameters
b = th.shared(np.mean(data[:, idx], 1)[:, None], broadcastable=(False, True))
a = th.shared(np.std(data[:, idx] - b.get_value(), 1)[:, None], broadcastable=[False, True])
# model density
q = lambda X: normal(X, b, a)
log_q = lambda X: -0.5 * tt.sum(tt.square((X - b) / a), 0) - D * tt.log(tt.abs_(a)) - D / 2. * np.log(np.pi)
G = lambda Z: a * Z + b
# geometric Jensen-Shannon divergence
RKL = tt.mean(tt.exp(log_p(X)) * (log_p(X) - log_q(X))) + tt.mean(0.0 * Z)
# function computing G-JSD and its gradient
f_rkl = th.function([Z, X], [RKL, th.grad(RKL, a), th.grad(RKL, b)])
# SGD hyperparameters
B = 200
mm = 0.8
lr = .5
da = 0.
db = 0.
try:
# display initial JSD
print('{0:>4} {1:.4f}'.format(0, float(f_rkl(nr.randn(*data.shape), data)[0])))
for epoch in range(max_epochs):
values = []
# stochastic gradient descent
for t in range(0, data.shape[1], B):
Z = nr.randn(D, B)
Y = data[:, t:t + B]
v, ga, gb = f_rkl(Z, Y)
da = mm * da - lr * ga
db = mm * db - lr * gb
values.append(v)
a.set_value(a.get_value() + da)
b.set_value(b.get_value() + db)
# reduce learning rate
lr /= 2.
# display estimated JSD
print('{0:>4} {1:.4f}'.format(epoch + 1, np.mean(values)))
except KeyboardInterrupt:
pass
return a.get_value() * np.eye(D), b.get_value()
def main(argv):
parser = ArgumentParser(argv[0],
description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('--metrics', '-m', choices=['MMD', 'KL', 'RKL', 'JS', 'GJS', ''], nargs='+', default=['KL', 'RKL', 'JS', 'GJS'],
help='Which metrics to include in comparison.')
parser.add_argument('--num_data', '-N', type=int, default=100000,
help='Number of training points.')
parser.add_argument('--seed', '-s', type=int, default=22,
help='Random seed used to generate data.')
parser.add_argument('--output', '-o', type=str, default='results/',
help='Where to store results.')
args = parser.parse_args(argv[1:])
if not os.path.exists(args.output):
os.makedirs(args.output)
print('Generating data...')
D = 3
nonlog_p, log_p, data = mogaussian(D=D, N=args.num_data, seed=args.seed)
if D == 2:
plot(log_p, data)
plt.savefig(os.path.join(args.output, '{0}_data.png'.format(args.seed)))
if 'KL' in args.metrics:
print('Optimizing Kullback-Leibler divergence...')
A, b = fit_kl(data, log_p)
if D == 2:
plot([A, b], data)
plt.savefig(os.path.join(args.output, '{0}_KL.png'.format(args.seed)))
if 'RKL' in args.metrics:
print('Optimizing *reverse* Kullback-Leibler divergence...')
A, b = fit_rkl(data, log_p)
plot([A, b], data)
plt.savefig(os.path.join(args.output, '{0}_RKL.png'.format(args.seed)))
if 'JS' in args.metrics:
print('Optimizing Jensen-Shannon divergence...')
A, b = fit_js(data, log_p)
plot([A, b], data)
plt.savefig(os.path.join(args.output, '{0}_JS.png'.format(args.seed)))
if 'GJS' in args.metrics:
print('Optimizing *geometric* Jensen-Shannon divergence...')
A, b = fit_gjs(data, log_p)
plot([A, b], data)
plt.savefig(os.path.join(args.output, '{0}_GJS.png'.format(args.seed)))
if 'MMD' in args.metrics:
print('Optimizing MMD...')
A, b = fit_mmd(data)
plot([A, b], data)
plt.savefig(os.path.join(args.output, '{0}_MMD.png'.format(args.seed)))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 17,784 | 26.832551 | 136 | py |
geometric-js | geometric-js-master/notebooks/low_dim_plots.py | import matplotlib.pyplot as plt
import numpy as np
from utils import PlotParams
plotter = PlotParams()
plotter.set_params()
# tGJS dimensions
x = np.arange(1, 11)
y = [0.730, 0.790, 0.688, 0.609, 0.568, 0.547, 0.536, 0.518, 0.511, 0.507]
plt.plot(x, y, 'bo-')
plt.xlabel('Data dimension')
plt.ylabel(r'Final $\alpha$')
plt.show()
# tGJS components
x = np.arange(1, 11)
y = [0.806, 0.807, ]
plt.plot(x, y, 'oo-')
plt.xlabel('Data dimension')
plt.ylabel(r'Final $\alpha$')
plt.show()
| 487 | 18.52 | 74 | py |
geometric-js | geometric-js-master/notebooks/utils.py | """Utils module for notebooks."""
import matplotlib as mpl
class PlotParams():
def __init__(self, labelsize=14):
self.labelsize = labelsize
def set_params(self):
mpl.rc('font', family='serif', size=15)
# mpl.rc('text', usetex=True)
mpl.rcParams['axes.linewidth'] = 1.3
mpl.rcParams['xtick.major.width'] = 1
mpl.rcParams['ytick.major.width'] = 1
mpl.rcParams['xtick.minor.width'] = 1
mpl.rcParams['ytick.minor.width'] = 1
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['xtick.minor.size'] = 5
mpl.rcParams['ytick.minor.size'] = 5
mpl.rcParams['xtick.labelsize'] = self.labelsize
mpl.rcParams['ytick.labelsize'] = self.labelsize
| 793 | 32.083333 | 56 | py |
geometric-js | geometric-js-master/notebooks/numerical_integrate.py | import abc
import json
import logging
import numpy as np
import os
import seaborn as sns
import sys
import torch
import torch.optim as optim
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
from matplotlib import pyplot as plt
from scipy.stats import multivariate_normal
from torch import Tensor
from tqdm import tqdm
from typing import Any, List, Optional, Tuple
sns.set()
sns.set_style('whitegrid')
sns.set_style('ticks')
TRAIN_LOSSES_LOGFILE = "train_losses.log"
class BaseDivergence(abc.ABC):
def __init__(self,
dist_params: List,
sample_size: Optional[int] = 200,
initial_mean: Optional[float] = None,
dimensions: Optional[int] = 2,
**kwargs: Optional[Any]
) -> None:
super().__init__(**kwargs)
if initial_mean is not None:
self.mean = torch.tensor(np.array(initial_mean)[:, None]).float()
self.mean = torch.nn.Parameter(self.mean)
else:
self.mean = torch.nn.Parameter(torch.ones((dimensions, 1)).float())
self.covariance = torch.nn.Parameter(torch.eye(dimensions))
self.dist_params = dist_params
self.dimensions = dimensions
self.sample_size = sample_size
def p(self, X: Tensor) -> Tensor:
"""Additive gaussian mixture model probabilities."""
total_probability = torch.zeros(self.sample_size, 1)
for params in self.dist_params:
mean, covariance, weight = params
mean = torch.tensor(np.array([m for m in mean])[:, None]).float()
covariance = torch.tensor(covariance).float()
probabilities = self.normal(X, mean, covariance)
total_probability += weight * probabilities
return total_probability
def log_p(self, X: Tensor) -> Tensor:
return self.p(X).log()
def q(self, X: Tensor) -> Tensor:
"""Gaussian distribution."""
return self.normal(X, self.mean, self.covariance)
def log_q(self, X: Tensor) -> Tensor:
return self.log_normal(X, self.mean, self.covariance)
def normal(self, X: Tensor, m: Tensor, C: Tensor) -> Tensor:
Z = X - m
return torch.exp(
- torch.sum(Z * torch.matmul(torch.inverse(C), Z), 1) / 2.
- torch.log(torch.det(C)) / 2.
- len(m) / 2. * torch.log(2. * torch.tensor(np.pi)))
def log_normal(self, X: Tensor, m: Tensor, C: Tensor) -> Tensor:
Z = X - m
return (- torch.sum(Z * torch.matmul(torch.inverse(C), Z), 1) / 2.
- torch.log(torch.det(C)) / 2.
- len(m) / 2. * torch.log(2. * torch.tensor(np.pi)))
class fwdKL(BaseDivergence, torch.nn.Module):
def __init__(self,
train_data_file_path='./',
**kwargs):
super(fwdKL, self).__init__(**kwargs)
# Logger for storing the parameter values during training:
if not os.path.exists(f"{train_data_file_path}/fwdKL"):
print("Creating folder!!")
os.makedirs(f"{train_data_file_path}/fwdKL")
elif os.path.isfile(
f"{train_data_file_path}/fwdKL/{TRAIN_LOSSES_LOGFILE}"):
os.remove(f"{train_data_file_path}/fwdKL/{TRAIN_LOSSES_LOGFILE}")
file_path = f"{train_data_file_path}/fwdKL/{TRAIN_LOSSES_LOGFILE}"
print(f"Logging file path: {file_path}")
self.logger = logging.getLogger("losses_logger_fwdKL")
self.logger.setLevel(1) # always store
file_handler = logging.FileHandler(file_path)
file_handler.setLevel(1)
self.logger.addHandler(file_handler)
header = ",".join(["Epoch", "Loss", "Value"])
self.logger.debug(header)
def forward(self, X):
return torch.sum(self.log_q(X).exp() * (self.log_q(X) - self.log_p(X)))
def log(self, epoch, av_divergence):
"""Write to the log file."""
self.logger.debug(f"{epoch},av_div_loss,{av_divergence.item()}")
for i, m in enumerate(self.mean.detach().numpy().reshape(-1)):
self.logger.debug(f"{epoch},mean_{i+1},{m}")
for i, var in enumerate(
self.covariance.diag().detach().numpy().reshape(-1)):
self.logger.debug(f"{epoch},var_{i+1},{var}")
class revKL(BaseDivergence, torch.nn.Module):
def __init__(self,
train_data_file_path='./',
**kwargs):
super(revKL, self).__init__(**kwargs)
# Logger for storing the parameter values during training:
if not os.path.exists(f"{train_data_file_path}/revKL"):
print("Creating folder!!")
os.makedirs(f"{train_data_file_path}/revKL")
elif os.path.isfile(
f"{train_data_file_path}/revKL/{TRAIN_LOSSES_LOGFILE}"):
os.remove(f"{train_data_file_path}/revKL/{TRAIN_LOSSES_LOGFILE}")
file_path = f"{train_data_file_path}/revKL/{TRAIN_LOSSES_LOGFILE}"
print(f"Logging file path: {file_path}")
self.logger = logging.getLogger("losses_logger_revKL")
self.logger.setLevel(1) # always store
file_handler = logging.FileHandler(file_path)
file_handler.setLevel(1)
self.logger.addHandler(file_handler)
header = ",".join(["Epoch", "Loss", "Value"])
self.logger.debug(header)
def forward(self, X):
return torch.sum(self.p(X) * (self.log_p(X) - self.log_q(X)))
def log(self, epoch, av_divergence):
"""Write to the log file."""
self.logger.debug(f"{epoch},av_div_loss,{av_divergence.item()}")
for i, m in enumerate(self.mean.detach().numpy().reshape(-1)):
self.logger.debug(f"{epoch},mean_{i+1},{m}")
for i, var in enumerate(self.covariance.diag().detach().numpy().reshape(-1)):
self.logger.debug(f"{epoch},var_{i+1},{var}")
class JS(BaseDivergence, torch.nn.Module):
def __init__(self,
train_data_file_path='./',
**kwargs):
super(JS, self).__init__(**kwargs)
# Logger initialisation:
if not os.path.exists(f"{train_data_file_path}/JS"):
print("Creating folder!!")
os.makedirs(f"{train_data_file_path}/JS")
elif os.path.isfile(f"{train_data_file_path}/JS/{TRAIN_LOSSES_LOGFILE}"):
os.remove(f"{train_data_file_path}/JS/{TRAIN_LOSSES_LOGFILE}")
file_path = f"{train_data_file_path}/JS/{TRAIN_LOSSES_LOGFILE}"
print(f"Logging file path: {file_path}")
self.logger = logging.getLogger("losses_logger_JS")
self.logger.setLevel(1) # always store
file_handler = logging.FileHandler(file_path)
file_handler.setLevel(1)
self.logger.addHandler(file_handler)
header = ",".join(["Epoch", "Loss", "Value"])
self.logger.debug(header)
def forward(self, sample_data):
loss = 0.5 * torch.sum(self.q(sample_data) * (self.log_q(sample_data) \
- (0.5 * self.p(sample_data) + 0.5 * self.q(sample_data)).log())) \
+ 0.5 * torch.sum(self.p(sample_data) * (self.log_p(sample_data)
- (0.5 * self.p(sample_data) + 0.5 * self.q(sample_data)).log()))
return loss
def log(self, epoch, av_divergence):
"""Write to the log file."""
self.logger.debug(f"{epoch},av_div_loss,{av_divergence.item()}")
for i, m in enumerate(self.mean.detach().numpy().reshape(-1)):
self.logger.debug(f"{epoch},mean_{i+1},{m}")
for i, var in enumerate(
self.covariance.diag().detach().numpy().reshape(-1)):
self.logger.debug(f"{epoch},var_{i+1},{var}")
class GJS(BaseDivergence, torch.nn.Module):
def __init__(self,
alpha=0.5,
dual=False,
train_data_file_path='./',
**kwargs):
super(GJS, self).__init__(**kwargs)
self.alpha = alpha
self.dual = dual
# Logger for storing the parameter values during training:
log_folder = f"{train_data_file_path}/{'GJS' if dual == False else 'dGJS'}-A_0={alpha}"
log_file = f"{train_data_file_path}/{'GJS' if dual == False else 'dGJS'}-A_0={alpha}/{TRAIN_LOSSES_LOGFILE}"
print(f"Logging file path: {log_file}")
if not os.path.exists(log_folder):
print("Creating folder!!")
os.makedirs(log_folder)
elif os.path.isfile(log_file):
os.remove(log_file)
self.logger = logging.getLogger("losses_logger_GJS")
self.logger.setLevel(1) # always store
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(1)
self.logger.addHandler(file_handler)
header = ",".join(["Epoch", "Loss", "Value"])
self.logger.debug(header)
def forward(self, sample_data):
qx = self.q(sample_data)
px = self.p(sample_data)
if self.dual:
KL_1 = torch.sum(qx.pow(self.alpha) * px.pow(1 - self.alpha) * (px.log() - qx.log()))
KL_2 = torch.sum(qx.pow(self.alpha) * px.pow(1 - self.alpha) * (qx.log() - px.log()))
loss = ((1 - self.alpha) ** 2) * KL_1 + (self.alpha ** 2) * KL_2
else:
KL_1 = torch.sum(qx * (qx.log() - px.log()))
KL_2 = torch.sum(px * (px.log() - qx.log()))
loss = ((1 - self.alpha) ** 2) * KL_1 + (self.alpha ** 2) * KL_2
return loss
def log(self, epoch, av_divergence):
"""Write to the log file."""
self.logger.debug(f"{epoch},alpha,{self.alpha}")
self.logger.debug(f"{epoch},av_div_loss,{av_divergence.item()}")
for i, m in enumerate(self.mean.detach().numpy().reshape(-1)):
self.logger.debug(f"{epoch},mean_{i+1},{m}")
for i, var in enumerate(self.covariance.diag().detach().numpy().reshape(-1)):
self.logger.debug(f"{epoch},var_{i+1},{var}")
class GJSTrainAlpha(BaseDivergence, torch.nn.Module):
def __init__(self,
alpha: Optional[float] = 0.5,
dual: Optional[bool] = False,
train_data_file_path: Optional[str] = './',
**kwargs: Optional[Any]
) -> None:
super(GJSTrainAlpha, self).__init__(**kwargs)
self.a = torch.nn.Parameter(torch.tensor(alpha))
self.dual = dual
folder_name = f"{'tGJS' if dual == False else 'tdGJS'}-A_0={alpha}"
log_folder = os.path.join(train_data_file_path, folder_name)
log_file = os.path.join(log_folder, TRAIN_LOSSES_LOGFILE)
print(f"Logging file path: {log_file}")
if not os.path.exists(log_folder):
print("Creating folder!!")
os.makedirs(log_folder)
elif os.path.isfile(log_file):
os.remove(log_file)
self.logger = logging.getLogger(f"losses_logger_{folder_name}")
self.logger.setLevel(1) # always store
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(1)
self.logger.addHandler(file_handler)
header = ",".join(["Epoch", "Loss", "Value"])
self.logger.debug(header)
def forward(self, sample_data: torch.Tensor) -> torch.Tensor:
lqx = self.log_q(sample_data)
lpx = self.log_p(sample_data)
if self.dual:
kl_1 = torch.mean(
lqx.exp() ** self.a * lpx.exp() ** (1 - self.a) * (lpx - lqx))
kl_2 = torch.mean(
lqx.exp() ** self.a * lpx.exp() ** (1 - self.a) * (lqx - lpx))
loss = (1 - self.a) ** 2 * kl_1 + self.a ** 2 * kl_2
else:
kl_1 = torch.mean(lqx.exp() * (lpx - lqx))
kl_2 = torch.mean(lpx.exp() * (lpx - lqx))
loss = (1 - self.a) ** 2 * kl_1 + self.a ** 2 * kl_2
return loss
def log(self, epoch, av_divergence):
self.logger.debug(f"{epoch},alpha,{self.a.item()}")
self.logger.debug(f"{epoch},av_div_loss,{av_divergence.item()}")
for i, m in enumerate(self.mean.detach().numpy()):
self.logger.debug(f"{epoch},mean_{i+1},{m}")
for i, var in enumerate(
self.covariance.diag().detach().numpy()):
self.logger.debug(f"{epoch},var_{i+1},{var}")
def get_sample_data(save_loc: str,
seed: Optional[int] = 1234,
dimensions: Optional[int] = 2,
num_gaussians: Optional[int] = 5,
num_samples: Optional[int] = 100000,
save: Optional[bool] = False
) -> Tuple[List, np.ndarray]:
"""
Function which produces a ND add-mixture of Gaussian distribution, with
num_gaussians number of gaussian components, each of which has parameters
radomly generated. The parameters of each component are the mean,
covariance and weight.
Returns
-------
dist_parms : list
Contains the parameters of each component in the mixture distribution.
Each entry in the list is a vector containing the average coordinates,
the covariance matrix and weight of the Gaussan component.
data_sampes : numpy array
Array of shape (num_samples, 2) of data coordinates randomly sampled
from the mixture distribution.
"""
np.random.seed(seed)
p = np.random.dirichlet([0.5] * num_gaussians)
v = 1 / np.square(np.random.rand(num_gaussians) + 1)
C = [np.eye(dimensions) * v_i for v_i in v]
dist_params = [[[np.random.rand() * 6 - 3 for i in range(dimensions)],
covariance, weight] for covariance, weight in zip(C, p)]
data_samples = np.concatenate(
[multivariate_normal.rvs(mean=m,
cov=covariance,
size=max(int(num_samples * weight), 2))
for m, covariance, weight in dist_params])
if save and dimensions == 2:
x, y = np.mgrid[-6:6:.01, -6:6:.01]
pos = np.dstack((x, y))
pdf = np.array([weight * multivariate_normal(mean, covariance).pdf(pos)
for mean, covariance, weight in dist_params])
fig = plt.figure(figsize=(10, 10))
ax2 = fig.add_subplot(111)
ax2.contour(x, y, pdf.sum(axis=0)/pdf.sum(axis=0).sum())
plt.scatter(data_samples[:, 0], data_samples[:, 1], s=0.01, c='k')
if not os.path.exists(f"{save_loc}"):
os.makedirs(f"{save_loc}")
plt.axis('equal')
plt.axis([np.min(data_samples[:, 0]),
np.max(data_samples[:, 0]),
np.min(data_samples[:, 1]),
np.max(data_samples[:, 1])])
plt.axis('off')
plt.xticks([])
plt.yticks([])
plt.gcf().tight_layout()
plt.savefig(f"{save_loc}/mixture-distribution.png", dpi=200)
return dist_params, data_samples
def train_model(model,
optimizer,
data_samples,
log_loc,
learn_a: Optional[bool] = False,
dimensions: Optional[int] = 2,
epochs: Optional[int] = 20,
sample_size: Optional[int] = 200,
name: Optional[str] = None,
save_loc: Optional[str] = None,
frequency: Optional[int] = 2
) -> None:
"""
Trains a passed model to learn the parameters of a
multivariate Gaussian distribution to approximate a mixture multivariate
Gaussian.
"""
for e in range(epochs):
divergence = 0
np.random.shuffle(data_samples)
print(f"Epoch {e + 1}")
for i in tqdm(range(int(len(data_samples) / sample_size))):
sample = torch.tensor(data_samples[i:i+sample_size]).view(
sample_size, dimensions, 1).float()
loss = model(sample)
optimizer.zero_grad()
loss.backward()
optimizer.step()
divergence += loss.detach()
with torch.no_grad():
if learn_a:
model.a.clamp_(0, 1)
# Enforce the covariance matrix to diagonal:
for i in range(dimensions):
for j in range(dimensions):
if i == j:
continue
else:
model.covariance[i][j] = 0.0
model.log(e, divergence / len(data_samples) / sample_size)
if save_loc is not None and name is not None and dimensions == 2:
x, y = np.mgrid[-6:6:.01, -6:6:.01]
pos = np.dstack((x, y))
pdf = multivariate_normal(
model.mean.detach().numpy().reshape(-1),
model.covariance.detach().numpy()).pdf(pos)
fig = plt.figure(figsize=(10, 10))
ax2 = fig.add_subplot(111)
ax2.contour(x, y, pdf)
plt.scatter(data_samples[:, 0], data_samples[:, 1], s=0.01, c='k')
plt.axis('equal')
plt.axis([
np.min(data_samples[:, 0]),
np.max(data_samples[:, 0]),
np.min(data_samples[:, 1]),
np.max(data_samples[:, 1])])
plt.axis('off')
plt.xticks([])
plt.yticks([])
plt.gcf().tight_layout()
plt.savefig(f"{save_loc}/fitted-dist-{name}.png", dpi=200)
# Save learnt parameters:
learnt_dist = {}
if learn_a:
learnt_dist["alpha"] = str(model.a.item())
elif hasattr(model, 'alpha'):
learnt_dist["alpha"] = str(model.alpha)
for i, m in enumerate(model.mean.detach().numpy()):
learnt_dist[f"mean_{i + 1}"] = str(m)
for i, var in enumerate(model.covariance.diag().detach().numpy()):
learnt_dist[f"var_{i+1}"] = str(var)
with open(f'{log_loc}/{name}/final-parameters.txt', 'w') as f:
json.dump(learnt_dist, f)
def main(argv):
parser = ArgumentParser(argv[0],
description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--metric', '-m',
choices=['fwdKL', 'revKL', 'JS', 'GJS', 'dGJS', 'tGJS', 'tdGJS'],
help='Which metric to optimise.')
parser.add_argument(
'--num-data', '-N', type=int, default=100000,
help='Number of points in the sampled data from the mixture.')
parser.add_argument(
'--sample-size', '-S', type=int, default=200,
help='Number of points in the each training batch.')
parser.add_argument(
'--num-mixture', type=int, default=5,
help='Number of Gaussian components in the mixture distribution.')
parser.add_argument(
'--dimensions', type=int, default=2,
help='Dimensions of the Gaussian distributions being modelled.')
parser.add_argument(
'--seed', '-s', type=int, default=22,
help='Random seed used to generate data.')
parser.add_argument(
'--A0', type=float, default=0.5,
help='Initial value of alpha if training GJS or dGJS.')
parser.add_argument(
'--lr', '--learning-rate', type=float, default=1e-4,
help='Learning rate.')
parser.add_argument(
'--epochs', type=int, default=20,
help='Number of training epochs.')
parser.add_argument(
'--plot-output', type=str, default=os.path.join(os.pardir, 'figs'),
help='Where to store plots produced.')
parser.add_argument(
'--train-log-output', '-o', type=str,
default=os.path.join(os.pardir, 'logs'),
help='Where to store log of data produced during training.')
parser.add_argument(
'--save-mixture-plot', type=bool, default=True,
help='Where to store results.')
args = parser.parse_args(argv[1:])
plot_loc = f"{args.plot_output}/{args.seed}-{args.dimensions}" + \
f"-{args.num_mixture}-{args.lr}-{args.epochs}"
log_loc = f"{args.train_log_output}/{args.seed}-{args.dimensions}" + \
f"-{args.num_mixture}-{args.lr}-{args.epochs}"
name = f"{args.metric}"
if not os.path.exists(plot_loc):
os.makedirs(plot_loc)
if not os.path.exists(log_loc):
os.makedirs(log_loc)
print('Generating data...')
dist_params, data_samples = get_sample_data(dimensions=args.dimensions,
save_loc=plot_loc,
save=True,
seed=args.seed,
num_gaussians=args.num_mixture,
num_samples=args.num_data)
if args.metric == 'tGJS' or args.metric == 'tdGJS':
print('Optimizing trainable-alpha GJS divergence...')
model = GJSTrainAlpha(dist_params=dist_params,
dimensions=args.dimensions,
sample_size=args.sample_size,
dual=args.metric == 'tdGJS',
train_data_file_path=log_loc,
alpha=args.A0)
name = f"{args.metric}-A_0={args.A0}"
if args.metric == 'dGJS' or args.metric == 'GJS':
print('Optimizing constant-alpha GJS divergence...')
model = GJS(dist_params=dist_params,
dimensions=args.dimensions,
sample_size=args.sample_size,
dual=args.metric == 'dGJS',
train_data_file_path=log_loc,
alpha=args.A0)
if args.metric == 'JS':
print('Optimizing Jensen-Shannon divergence...')
model = JS(dist_params=dist_params,
dimensions=args.dimensions,
sample_size=args.sample_size,
train_data_file_path=log_loc)
if args.metric == 'revKL':
print('Optimizing Reverse Kullback-Leibler divergence...')
model = revKL(dist_params=dist_params,
dimensions=args.dimensions,
sample_size=args.sample_size,
train_data_file_path=log_loc)
if args.metric == 'fwdKL':
print('Optimizing Kullback-Leibler divergence...')
model = fwdKL(dist_params=dist_params,
dimensions=args.dimensions,
sample_size=args.sample_size,
train_data_file_path=log_loc)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
train_model(model,
optimizer,
data_samples,
log_loc=log_loc,
learn_a=args.metric[0] == 't',
dimensions=args.dimensions,
epochs=args.epochs,
sample_size=args.sample_size,
name=name,
save_loc=plot_loc)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 22,844 | 38.387931 | 116 | py |
geometric-js | geometric-js-master/notebooks/__init__.py | 0 | 0 | 0 | py |
|
geometric-js | geometric-js-master/notebooks/generative.py | import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import os
import pdb
from tqdm import tqdm
import argparse
import pandas as pd
import sys
BASE_DIR=os.path.dirname(os.getcwd())
sys.path.append(BASE_DIR)
sys.path.append('/home/tam63/geometric-js')
import torch
import scipy.stats
from scipy.stats import norm
from scipy.special import logsumexp
from vae.utils.modelIO import save_model, load_model, load_metadata
from notebooks.utils import PlotParams
# from utils.helpers import (create_safe_directory, get_device, set_seed,
# get_n_param)
TRAIN_MODELS_DIR = "/home/tam63/results/alpha-experiments"
DATA_DIR = "/home/tam63/geometric-js/data"
SAVE_DIR = "/home/tam63/figures/alpha-experiments"
def parse_arguments(args_to_parse):
"""Parse the command line arguments.
Parameters
----------
args_to_parse: list of str
Arguments to parse (splitted on whitespaces).
"""
description = "PyTorch implementation and evaluation of Variational" + \
"AutoEncoders and metrics."
parser = argparse.ArgumentParser(description=description)
# General options
general = parser.add_argument_group('General options')
general.add_argument('--dataset', type=str, choices=['mnist', 'fashion', 'dsprites'],
help="Name of the dataset being plotted.")
general.add_argument('--divergence', type=str, choices=['dGJS', 'GJS', 'both'],
help="Type of geometric-JS divergence to be plotted on comparison plot.")
general.add_argument('--model-loc', type=str,
help="Location of the trained models to be used to generate plots.")
args = parser.parse_args(args_to_parse)
print(args)
return args
def bootstrap(x, low, high, n_samples):
mu = x.mean()
n = len(x)
X = np.random.choice(x, size=n_samples*n).reshape(n_samples, n)
mu_star = X.mean(axis=1)
d_star = np.sort(mu_star - mu)
return mu, mu + d_star[int(low*n_samples)], mu + d_star[int(high*n_samples)]
def compute_samples(model, data, num_samples, debug=False):
"""
Description
---------------------------------------------------------------
Sample from importance distribution z_samples ~ q(z|X) and
compute p(z_samples), q(z_samples) for importance sampling
Inputs
---------------------------------------------------------------
model : pytorch nn.Module
VAE model implemented in pytroch which has been
trained on the training data corresponding to the
passed test data, which is contained in the variable
'data'.
data : pytorch Tensor
Tensor of shape [batch_size, 1, im_size, im_size],
where im_size is the dimension size of the images used
to train the model, and batch size is the number of
data instances passed, which is therefore also the
number of estimates of the probability distribution
which will be produced.
num_samples : int
For each passed data instance, the probability
distribution p(x|z) will be estimated using a monte
carlo integration with num_samples samples.
returns
---------------------------------------------------------------
z_samples, pz, qz : numpy array
Returns arrays containing the representation of each
passed input image in latent space in z_samples, and the
probabilty distributions qz and pz which are defined by
samples drawn from the normal distribution defined by the
latent space (qz) and defined by the latent space
"""
data = data.cuda()
z_mean, z_log_sigma = model.encoder(data)
z_mean = z_mean.cpu().detach().numpy()
z_log_sigma = z_log_sigma.cpu().detach().numpy()
z_samples = []
qz = []
for m, s in zip(z_mean, z_log_sigma):
# len(s) = len(s) = 10 = size of the latent space dimension
#
# z_vals is num_samples (= 128) samples drawn from the normal
# distribution defined by the mean and std (m[i], s[i])
#
# qz_vals is the normal distribution defined by the samples
# in the vector z_vals
z_vals = [np.random.normal(m[i], np.exp(s[i]), num_samples) for i in range(len(m))]
qz_vals = [norm.pdf(z_vals[i], loc=m[i], scale=np.exp(s[i])) for i in range(len(m))]
z_samples.append(z_vals)
qz.append(qz_vals)
z_samples = np.array(z_samples)
pz = norm.pdf(z_samples)
qz = np.array(qz)
# pdb.set_trace()
# Check why the axes are being swapped
z_samples = np.swapaxes(z_samples, 1, 2)
pz = np.swapaxes(pz, 1, 2)
qz = np.swapaxes(qz, 1, 2)
return z_samples, pz, qz
def estimate_logpx_batch(model, data, num_samples, debug=False, digit_size=32):
"""
"""
z_samples, pz, qz = compute_samples(model, data, num_samples)
assert len(z_samples) == len(data)
assert len(z_samples) == len(pz)
assert len(z_samples) == len(qz)
z_samples = torch.tensor(z_samples).float().cuda()
result = []
for i in range(len(data)):
x_predict = model.decoder(z_samples[i]).reshape(-1, digit_size ** 2)
x_predict = x_predict.cpu().detach().numpy()
x_predict = np.clip(x_predict, np.finfo(float).eps, 1. - np.finfo(float).eps)
p_vals = pz[i]
q_vals = qz[i]
# pdb.set_trace()
datum = data[i].cpu().reshape(digit_size ** 2).numpy() #.reshape(digit_size ** 2)
# \log p(x|z) = Binary cross entropy
logp_xz = np.sum(datum * np.log(x_predict + 1e-9) + (1. - datum) * np.log(1.0 - x_predict + 1e-9), axis=-1)
logpz = np.sum(np.log(p_vals + 1e-9), axis=-1)
logqz = np.sum(np.log(q_vals + 1e-9), axis=-1)
argsum = logp_xz + logpz - logqz
logpx = -np.log(num_samples + 1e-9) + logsumexp(argsum)
result.append(logpx)
return np.array(result)
def estimate_logpx(model, data, num_samples, verbosity=0, digit_size=32):
batches = []
iterations = int(np.ceil(1. * len(data) / 100))
for b in tqdm(range(iterations)):
batch_data = data[b * 100:(b + 1) * 100]
batches.append(estimate_logpx_batch(model, batch_data, num_samples, digit_size=digit_size))
if verbosity and b % max(11 - verbosity, 1) == 0:
print("Batch %d [%d, %d): %.2f" % (b, b * 100, (b+1) * 100, np.mean(np.concatenate(batches))))
log_probs = np.concatenate(batches)
mu, lb, ub = bootstrap(log_probs, 0.025, 0.975, 1000)
return mu, lb, ub
def main(args):
device = 'cuda'
plotter = PlotParams()
plotter.set_params()
DATA_DIR = os.path.join(os.pardir, 'data')
FIG_DIR = os.path.join(os.pardir, 'figs')
RES_DIR = os.path.join(os.pardir, 'results')
# 1) select dataset to load:
if args.dataset == 'dsprites':
X_test = np.load(os.path.join(DATA_DIR, 'dsprites', 'dsprite_train.npz'))['imgs']
X_test = torch.tensor(X_test).unsqueeze(1).float() / 255.0
digit_size = 64
X_test = X_test[:10000]
X_test = X_test.to(device)
elif args.dataset == 'fashion':
X_test = torch.load(os.path.join(DATA_DIR, 'fashionMnist', 'FashionMNIST', 'processed', 'test.pt'))
digit_size = 32
X_test = X_test[0].unsqueeze(1).float() / 255.0
X_test = torch.nn.functional.pad(X_test, pad=(2, 2, 2, 2))
X_test = X_test[:10000]
X_test = X_test.to(device)
elif args.dataset == 'mnist':
X_test = torch.load(os.path.join(DATA_DIR, 'mnist', 'MNIST', 'processed', 'test.pt'))
digit_size = 32
X_test = X_test[0].unsqueeze(1).float() / 255.0
X_test = torch.nn.functional.pad(X_test, pad=(2, 2, 2, 2))
X_test = X_test[:10000]
X_test = X_test.to(device)
# 2) Get the trained alpha dGJS probabilities:
av_a = []
log_probs_lb = []
log_probs_ub = []
log_probs_mu = []
log_probs_best = -np.inf
if args.divergence in ['GJS', 'dGJS']:
divergence = args.divergence
for initial_a in [i/10 for i in range(11)]:
model_path = f"{TRAIN_MODELS_DIR}/{args.dataset}/{args.model_loc}/{divergence}-A_0={initial_a}"
model = load_model(model_path)
logpx_mu, logpx_lb, logpx_ub = estimate_logpx(model, X_test, num_samples=128, verbosity=0, digit_size=digit_size)
log_probs_mu += [logpx_mu]
log_probs_lb += [logpx_lb]
log_probs_ub += [logpx_ub]
if logpx_mu > log_probs_best:
model_best = model_path
log_probs_best = logpx_mu
# break
print(model_path)
print("log p(x) = %.2f (%.2f, %.2f)" % (logpx_mu, logpx_lb, logpx_ub))
# 3) Get the comparison divergences probabilities:
av_a_i = []
log_probs_lb_i = []
log_probs_ub_i = []
log_probs_mu_i = []
log_probs_best_i = -np.inf
model_names = []
# KL:
model_path = f"{TRAIN_MODELS_DIR}/{args.dataset}/{args.model_loc}/KL"
model = load_model(model_path)
logpx_mu, logpx_lb, logpx_ub = estimate_logpx(model, X_test, num_samples=128, verbosity=0, digit_size=digit_size)
log_probs_mu_i += [logpx_mu]
log_probs_lb_i += [logpx_lb]
log_probs_ub_i += [logpx_ub]
model_names.append("KL")
# break
print(model_path)
print("log p(x) = %.2f (%.2f, %.2f)" % (logpx_mu, logpx_lb, logpx_ub))
# fwdKL:
model_path = f"{TRAIN_MODELS_DIR}/{args.dataset}/{args.model_loc}/fwdKL"
model = load_model(model_path)
logpx_mu, logpx_lb, logpx_ub = estimate_logpx(model, X_test, num_samples=128, verbosity=0, digit_size=digit_size)
log_probs_mu_i += [logpx_mu]
log_probs_lb_i += [logpx_lb]
log_probs_ub_i += [logpx_ub]
model_names.append("fwdKL")
# break
print(model_path)
print("log p(x) = %.2f (%.2f, %.2f)" % (logpx_mu, logpx_lb, logpx_ub))
# MMD:
model_path = f"{TRAIN_MODELS_DIR}/{args.dataset}/{args.model_loc}/MMD"
model = load_model(model_path)
logpx_mu, logpx_lb, logpx_ub = estimate_logpx(model, X_test, num_samples=128, verbosity=0, digit_size=digit_size)
log_probs_mu_i += [logpx_mu]
log_probs_lb_i += [logpx_lb]
log_probs_ub_i += [logpx_ub]
model_names.append("MMD")
# break
print(model_path)
print("log p(x) = %.2f (%.2f, %.2f)" % (logpx_mu, logpx_lb, logpx_ub))
# no-constraint:
# model_path = f"{TRAIN_MODELS_DIR}/{args.dataset}/{args.model_loc}/no-constraint"
# model = load_model(model_path)
# logpx_mu, logpx_lb, logpx_ub = estimate_logpx(model, X_test, num_samples=128, verbosity=0, digit_size=digit_size)
# log_probs_mu_i += [logpx_mu]
# log_probs_lb_i += [logpx_lb]
# log_probs_ub_i += [logpx_ub]
# model_names.append("no-constraint")
# print(model_path)
# print("log p(x) = %.2f (%.2f, %.2f)" % (logpx_mu, logpx_lb, logpx_ub))
# 4) Plot:
fig = plt.figure(figsize=(10, 10))
yerr_bar = np.array(log_probs_ub) - np.array(log_probs_lb)
yerr_bar_i = np.array(log_probs_ub_i) - np.array(log_probs_lb_i)
initial_a = [i/10 for i in range(11)]
plt.errorbar(initial_a, log_probs_mu, yerr=yerr_bar, label=args.divergence)
for i in range(len(model_names)):
plt.errorbar(initial_a, [log_probs_mu_i[i]] * len(initial_a), yerr=[yerr_bar_i[i]] * len(initial_a), label=model_names[i])
plt.xlabel(r'Initial $\alpha$')
plt.ylabel(r'$\log(p_{\theta}(X))$')
plt.legend()
plt.title("Log model evidence vs initial alpha")
plt.savefig(f"{SAVE_DIR}/{args.dataset}/{args.divergence}/{args.divergence}-generative-performance.pdf")
plt.savefig(f"{SAVE_DIR}/{args.dataset}/{args.divergence}/{args.divergence}-generative-performance.png", dpi=200)
# save tight layout version:
fig = plt.figure(figsize=(10, 10))
yerr_bar = np.array(log_probs_ub) - np.array(log_probs_lb)
yerr_bar_i = np.array(log_probs_ub_i) - np.array(log_probs_lb_i)
initial_a = [i/10 for i in range(11)]
plt.errorbar(initial_a, log_probs_mu, yerr=yerr_bar, label=args.divergence)
for i in range(len(model_names)):
plt.errorbar(initial_a, [log_probs_mu_i[i]] * len(initial_a), yerr=[yerr_bar_i[i]] * len(initial_a), label=model_names[i])
plt.xlabel(r'Initial $\alpha$')
plt.ylabel(r'$\log(p_{\theta}(X))$')
plt.legend()
plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0)
plt.savefig(f"{SAVE_DIR}/{args.dataset}/{args.divergence}/{args.divergence}-generative-performance-tight-layout.pdf")
plt.savefig(f"{SAVE_DIR}/{args.dataset}/{args.divergence}/{args.divergence}-generative-performance-tight-layout.png", dpi=200)
if __name__ == '__main__':
args = parse_arguments(sys.argv[1:])
main(args) | 13,028 | 34.598361 | 130 | py |
OCDetector | OCDetector-master/EBDetector/TimeDomainFeatures/Conditional-Entropy-master/conditional_entropy.py | from argparse import ArgumentParser, FileType
import os
import numpy as np
def get_args():
parser = ArgumentParser(description='Program to calculate the period \
of a star using the Conditional Entropy method')
parser.add_argument('-i', type=str, default=None,
help='Location or file of stellar data')
parser.add_argument('-o', type=str, default=None,
help='Location for output files')
parser.add_argument('-minp', '--min-period', type=float, default= 0.1,
help='Minimun Period to search'
'(default = 0.1)')
parser.add_argument('-maxp', '--max-period', type=float, default= 32.0,
help='Maximum Period to search'
'(default = 32.0)')
parser.add_argument('-precision', type=float, default=0.0001,
help='Step between periods'
'(default = 0.0001)')
parser.add_argument('-p_bins', '--phase-bins', type=int, default=10,
help='Quantity of phase bins'
'(default = 10)')
parser.add_argument('-m_bins', '--mag-bins', type=int, default=5,
help='Quantity of magnitude bins'
'(default = 5)')
args = parser.parse_args()
return args
def get_files(data):
'''
Get the correct path of file or files inside a directory
'''
if os.path.isfile(data):
#if os.path.isfile(os.path.abspath(os.path.basename(data)))
return [data]
#os.chdir(os.path.dirname(os.path.abspath(data)))
#return os.path.basename(data)
#elif os.path.isdir(os.path.join(os.getcwd(),data)):
#path = os.path.join(os.getcwd(),data)
elif os.path.isdir(os.path.abspath(data)):
path = os.path.abspath(data)
os.chdir(path)
list_of_files = os.listdir(path)
return sorted(list_of_files)
def out_dir(out):
'''
check if the output path is relative or not and return the absolute path
'''
if out == None:
return os.getcwd()
elif os.path.isdir(out):
return os.path.abspath(out)
def rephase(data, period, shift=0.0, col=0, copy=True):
'''
transform the time of observations to the phase space
'''
rephased = np.ma.array(data, copy=copy)
rephased[:, col] = get_phase(rephased[:, col], period, shift)
return rephased
def get_phase(time, period, shift=0.0):
'''
divide the time of observations by the period
'''
return (time / period - shift) % 1
def normalization(data):
'''
Normalize the magnitude of the star
'''
norm = np.ma.copy(data)
norm[:,1] = (norm[:,1] - np.min(norm[:,1])) \
/ (np.max(norm[:,1]) - np.min(norm[:,1]))
return norm
def periods_array(min_period, max_period, precision):
'''
Creates a period array from min_period to max_period with a
step equals to precision
'''
period = np.arange(min_period, max_period+precision, precision)
return period
def cond_entropy(period, data, p_bins=10, m_bins=5):
'''
Compute the conditional entropy for the normalized observations
'''
if period <= 0:
return np.PINF
r = rephase(data, period)
bins, *_ = np.histogram2d(r[:,0], r[:,1], [p_bins, m_bins],
[[0,1], [0,1]])
size = r.shape[0]
if size > 0:
divided_bins = bins / size
arg_positive = divided_bins > 0
column_sums = np.sum(divided_bins, axis=1) #change 0 by 1
column_sums = np.repeat(np.reshape(column_sums,(p_bins,1)),
m_bins, axis=1)
#column_sums = np.repeat(np.reshape(column_sums, (1,-1)),
# p_bins, axis=0)
select_divided_bins = divided_bins[arg_positive]
select_column_sums = column_sums[arg_positive]
A = np.empty((p_bins, m_bins), dtype=float)
# bins[i,j]/size * log(bins[i,:] / size / (bins[i,j]/size))
A[ arg_positive] = select_divided_bins \
* np.log(select_column_sums / select_divided_bins)
A[~arg_positive] = 0
return np.sum(A)
else:
return np.PINF
def main():
args = get_args()
files = get_files(args.i)
out = out_dir(args.o)
ce = []
periods = periods_array(args.min_period, args.max_period,
args.precision)
for star in files:
#print(star)
ent_data = []
ce_period = []
data = np.ma.array(np.loadtxt(star), mask=None, dtype=float)
norm = normalization(data)
for p in periods:
ent_data.append(cond_entropy(p, norm, args.phase_bins,
args.mag_bins))
ce_period.append(p)
np.savetxt(os.path.join(out,
'entropies_'+os.path.basename(star)+'.txt'),
np.dstack((ce_period, ent_data))[0],
fmt='%s')
right_period = star, ce_period[np.argmin(ent_data)] ###mudar aqui para os.path.basename()
ce.append(right_period)
np.savetxt(os.path.join(out, 'results.dat'), ce, fmt='%s')
if __name__ == "__main__":
exit(main())
| 4,648 | 26.508876 | 91 | py |