repo
stringlengths 2
91
| file
stringlengths 14
211
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
1.36M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
sgx-lkl-oe_port | sgx-lkl-oe_port/samples/languages/python/src/python-helloworld.py | import numpy as np
print("Confidential Computing using SGX-LKL in Python with NumPy... ")
print(np.arange(10000).reshape(100, 100))
| 134 | 21.5 | 70 | py |
sgx-lkl-oe_port | sgx-lkl-oe_port/samples/ml/pytorch/app/sample.py | # Tutorial code from https://pytorch.org/tutorials/beginner/pytorch_with_examples.html#pytorch-nn.
import torch
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold inputs and outputs
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
# Use the nn package to define our model as a sequence of layers. nn.Sequential
# is a Module which contains other Modules, and applies them in sequence to
# produce its output. Each Linear Module computes output from input using a
# linear function, and holds internal Tensors for its weight and bias.
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H), torch.nn.ReLU(), torch.nn.Linear(H, D_out),
)
# The nn package also contains definitions of popular loss functions; in this
# case we will use Mean Squared Error (MSE) as our loss function.
loss_fn = torch.nn.MSELoss(reduction="sum")
learning_rate = 1e-4
print("Training...")
for t in range(500):
# Forward pass: compute predicted y by passing x to the model. Module objects
# override the __call__ operator so you can call them like functions. When
# doing so you pass a Tensor of input data to the Module and it produces
# a Tensor of output data.
y_pred = model(x)
# Compute and print loss. We pass Tensors containing the predicted and true
# values of y, and the loss function returns a Tensor containing the
# loss.
loss = loss_fn(y_pred, y)
if t % 100 == 99:
print(t, loss.item())
# Zero the gradients before running the backward pass.
model.zero_grad()
# Backward pass: compute gradient of the loss with respect to all the learnable
# parameters of the model. Internally, the parameters of each Module are stored
# in Tensors with requires_grad=True, so this call will compute gradients for
# all learnable parameters in the model.
loss.backward()
# Update the weights using gradient descent. Each parameter is a Tensor, so
# we can access its gradients like we did before.
with torch.no_grad():
for param in model.parameters():
param -= learning_rate * param.grad
print("Done")
| 2,215 | 37.877193 | 98 | py |
sgx-lkl-oe_port | sgx-lkl-oe_port/samples/ml/tensorflow/app/benchmark/mnist_lenet.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple, end-to-end, LeNet-5-like convolutional MNIST model example.
This should achieve a test error of 0.7%. Please keep this model as simple and
linear as possible, it is meant as a tutorial for simple convolutional models.
Run with --self_test on the command line to execute a short self-test.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import gzip
import os
import sys
import time
import numpy
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
SOURCE_URL = "https://storage.googleapis.com/cvdf-datasets/mnist/"
WORK_DIRECTORY = "/app/data/mnist"
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
NUM_LABELS = 10
VALIDATION_SIZE = 5000 # Size of the validation set.
SEED = 66478 # Set to None for random seed.
BATCH_SIZE = 64 # original: 64
NUM_EPOCHS = 10 # original : 10
EVAL_BATCH_SIZE = 64 # original: 64
EVAL_FREQUENCY = 10 # Number of steps between evaluations.
FLAGS = None
def data_type():
"""Return the type of the activations, weights, and placeholder variables."""
if FLAGS.use_fp16:
return tf.float16
else:
return tf.float32
def maybe_download(filename):
"""Download the data from Yann's website, unless it's already here."""
if not tf.gfile.Exists(WORK_DIRECTORY):
tf.gfile.MakeDirs(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not tf.gfile.Exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print("Successfully downloaded", filename, size, "bytes.")
return filepath
def extract_data(filename, num_images):
"""Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print("Using file", filename)
with open(filename, "rb") as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)
data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)
data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)
return data
def extract_labels(filename, num_images):
"""Extract the labels into a vector of int64 label IDs."""
print("Using file", filename)
with open(filename, "rb") as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)
return labels
def fake_data(num_images):
"""Generate a fake dataset that matches the dimensions of MNIST."""
data = numpy.ndarray(
shape=(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS), dtype=numpy.float32
)
labels = numpy.zeros(shape=(num_images,), dtype=numpy.int64)
for image in xrange(num_images):
label = image % 2
data[image, :, :, 0] = label - 0.5
labels[image] = label
return data, labels
def error_rate(predictions, labels):
"""Return the error rate based on dense predictions and sparse labels."""
return 100.0 - (
100.0 * numpy.sum(numpy.argmax(predictions, 1) == labels) / predictions.shape[0]
)
def main(_):
if FLAGS.self_test:
print("Running self-test.")
train_data, train_labels = fake_data(256)
validation_data, validation_labels = fake_data(EVAL_BATCH_SIZE)
test_data, test_labels = fake_data(EVAL_BATCH_SIZE)
num_epochs = 1
else:
# Get the data.
train_data_filename = maybe_download("train-images-idx3-ubyte")
train_labels_filename = maybe_download("train-labels-idx1-ubyte")
test_data_filename = maybe_download("t10k-images-idx3-ubyte")
test_labels_filename = maybe_download("t10k-labels-idx1-ubyte")
# Extract it into numpy arrays.
train_data = extract_data(train_data_filename, 60000)
train_labels = extract_labels(train_labels_filename, 60000)
test_data = extract_data(test_data_filename, 10000)
test_labels = extract_labels(test_labels_filename, 10000)
# Generate a validation set.
validation_data = train_data[:VALIDATION_SIZE, ...]
validation_labels = train_labels[:VALIDATION_SIZE]
train_data = train_data[VALIDATION_SIZE:, ...]
train_labels = train_labels[VALIDATION_SIZE:]
num_epochs = NUM_EPOCHS
train_size = train_labels.shape[0]
# This is where training samples and labels are fed to the graph.
# These placeholder nodes will be fed a batch of training data at each
# training step using the {feed_dict} argument to the Run() call below.
train_data_node = tf.placeholder(
data_type(), shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)
)
train_labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE,))
eval_data = tf.placeholder(
data_type(), shape=(EVAL_BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)
)
# The variables below hold all the trainable weights. They are passed an
# initial value which will be assigned when we call:
# {tf.global_variables_initializer().run()}
conv1_weights = tf.Variable(
tf.truncated_normal(
[5, 5, NUM_CHANNELS, 32], # 5x5 filter, depth 32.
stddev=0.1,
seed=SEED,
dtype=data_type(),
)
)
conv1_biases = tf.Variable(tf.zeros([32], dtype=data_type()))
conv2_weights = tf.Variable(
tf.truncated_normal([5, 5, 32, 64], stddev=0.1, seed=SEED, dtype=data_type())
)
conv2_biases = tf.Variable(tf.constant(0.1, shape=[64], dtype=data_type()))
fc1_weights = tf.Variable( # fully connected, depth 512.
tf.truncated_normal(
[IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512],
stddev=0.1,
seed=SEED,
dtype=data_type(),
)
)
fc1_biases = tf.Variable(tf.constant(0.1, shape=[512], dtype=data_type()))
fc2_weights = tf.Variable(
tf.truncated_normal([512, NUM_LABELS], stddev=0.1, seed=SEED, dtype=data_type())
)
fc2_biases = tf.Variable(tf.constant(0.1, shape=[NUM_LABELS], dtype=data_type()))
# We will replicate the model structure for the training subgraph, as well
# as the evaluation subgraphs, while sharing the trainable parameters.
def model(data, train=False):
"""The Model definition."""
# 2D convolution, with 'SAME' padding (i.e. the output feature map has
# the same size as the input). Note that {strides} is a 4D array whose
# shape matches the data layout: [image index, y, x, depth].
conv = tf.nn.conv2d(data, conv1_weights, strides=[1, 1, 1, 1], padding="SAME")
# Bias and rectified linear non-linearity.
relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
# Max pooling. The kernel size spec {ksize} also follows the layout of
# the data. Here we have a pooling window of 2, and a stride of 2.
pool = tf.nn.max_pool(
relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME"
)
conv = tf.nn.conv2d(pool, conv2_weights, strides=[1, 1, 1, 1], padding="SAME")
relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))
pool = tf.nn.max_pool(
relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME"
)
# Reshape the feature map cuboid into a 2D matrix to feed it to the
# fully connected layers.
pool_shape = pool.get_shape().as_list()
reshape = tf.reshape(
pool, [pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]]
)
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
# Add a 50% dropout during training only. Dropout also scales
# activations such that no rescaling is needed at evaluation time.
if train:
hidden = tf.nn.dropout(hidden, 0.5, seed=SEED)
return tf.matmul(hidden, fc2_weights) + fc2_biases
# Training computation: logits + cross-entropy loss.
logits = model(train_data_node, True)
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=train_labels_node, logits=logits
)
)
# L2 regularization for the fully connected parameters.
regularizers = (
tf.nn.l2_loss(fc1_weights)
+ tf.nn.l2_loss(fc1_biases)
+ tf.nn.l2_loss(fc2_weights)
+ tf.nn.l2_loss(fc2_biases)
)
# Add the regularization term to the loss.
loss += 5e-4 * regularizers
# Optimizer: set up a variable that's incremented once per batch and
# controls the learning rate decay.
batch = tf.Variable(0, dtype=data_type())
# Decay once per epoch, using an exponential schedule starting at 0.01.
learning_rate = tf.train.exponential_decay(
0.01, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
train_size, # Decay step.
0.95, # Decay rate.
staircase=True,
)
# Use simple momentum for the optimization.
optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9).minimize(
loss, global_step=batch
)
# Predictions for the current training minibatch.
train_prediction = tf.nn.softmax(logits)
# Predictions for the test and validation, which we'll compute less often.
eval_prediction = tf.nn.softmax(model(eval_data))
# Small utility function to evaluate a dataset by feeding batches of data to
# {eval_data} and pulling the results from {eval_predictions}.
# Saves memory and enables this to run on smaller GPUs.
def eval_in_batches(data, sess):
"""Get all predictions for a dataset by running it in small batches."""
size = data.shape[0]
if size < EVAL_BATCH_SIZE:
raise ValueError("batch size for evals larger than dataset: %d" % size)
predictions = numpy.ndarray(shape=(size, NUM_LABELS), dtype=numpy.float32)
for begin in xrange(0, size, EVAL_BATCH_SIZE):
end = begin + EVAL_BATCH_SIZE
if end <= size:
predictions[begin:end, :] = sess.run(
eval_prediction, feed_dict={eval_data: data[begin:end, ...]}
)
else:
batch_predictions = sess.run(
eval_prediction, feed_dict={eval_data: data[-EVAL_BATCH_SIZE:, ...]}
)
predictions[begin:, :] = batch_predictions[begin - size :, :]
return predictions
# Create a local session to run the training.
start_time = time.time()
with tf.Session() as sess:
# Run all the initializers to prepare the trainable parameters.
tf.global_variables_initializer().run()
print("Initialized!")
# Loop through training steps.
for step in xrange(int(num_epochs * train_size) // BATCH_SIZE):
# Compute the offset of the current minibatch in the data.
# Note that we could use better randomization across epochs.
offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
batch_data = train_data[offset : (offset + BATCH_SIZE), ...]
batch_labels = train_labels[offset : (offset + BATCH_SIZE)]
# This dictionary maps the batch data (as a numpy array) to the
# node in the graph it should be fed to.
feed_dict = {train_data_node: batch_data, train_labels_node: batch_labels}
# Run the optimizer to update weights.
sess.run(optimizer, feed_dict=feed_dict)
# print some extra information once reach the evaluation frequency
if step % EVAL_FREQUENCY == 0:
# fetch some extra nodes' data
l, lr, predictions = sess.run(
[loss, learning_rate, train_prediction], feed_dict=feed_dict
)
print(
"Step %d (epoch %.2f)"
% (step, float(step) * BATCH_SIZE / train_size)
)
print("Minibatch loss: %.3f, learning rate: %.6f" % (l, lr))
print("Minibatch error: %.1f%%" % error_rate(predictions, batch_labels))
print(
"Validation error: %.1f%%"
% error_rate(
eval_in_batches(validation_data, sess), validation_labels
)
)
sys.stdout.flush()
# Finally print the result!
test_error = error_rate(eval_in_batches(test_data, sess), test_labels)
print("Test error: %.1f%%" % test_error)
if FLAGS.self_test:
print("test_error", test_error)
assert test_error == 0.0, "expected 0.0 test_error, got %.2f" % (
test_error,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--use_fp16",
default=False,
help="Use half floats instead of full floats if True.",
action="store_true",
)
parser.add_argument(
"--self_test",
default=False,
action="store_true",
help="True if running a self test.",
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 14,391 | 39.886364 | 88 | py |
sgx-lkl-oe_port | sgx-lkl-oe_port/samples/ml/tensorflow/app/benchmark/helloworld.py | import tensorflow as tf
hello = tf.constant("Hello TensorFlow (from an SGX-LKL-OE enclave)!")
sess = tf.Session()
print(sess.run(hello))
| 138 | 22.166667 | 69 | py |
sgx-lkl-oe_port | sgx-lkl-oe_port/samples/ml/tensorflow/app/benchmark/mnist_lenet_eval.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple, end-to-end, LeNet-5-like convolutional MNIST model example.
This should achieve a test error of 0.7%. Please keep this model as simple and
linear as possible, it is meant as a tutorial for simple convolutional models.
Run with --self_test on the command line to execute a short self-test.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import gzip
import os
import sys
import time
import numpy
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
SOURCE_URL = "https://storage.googleapis.com/cvdf-datasets/mnist/"
WORK_DIRECTORY = "data"
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
NUM_LABELS = 10
VALIDATION_SIZE = 5000 # Size of the validation set.
SEED = 66478 # Set to None for random seed.
BATCH_SIZE = 64 # original: 64
NUM_EPOCHS = 10 # original : 10
EVAL_BATCH_SIZE = 64 # original: 64
EVAL_FREQUENCY = 100 # Number of steps between evaluations.
FLAGS = None
def data_type():
"""Return the type of the activations, weights, and placeholder variables."""
if FLAGS.use_fp16:
return tf.float16
else:
return tf.float32
def maybe_download(filename):
"""Download the data from Yann's website, unless it's already here."""
if not tf.gfile.Exists(WORK_DIRECTORY):
tf.gfile.MakeDirs(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not tf.gfile.Exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print("Successfully downloaded", filename, size, "bytes.")
return filepath
def extract_data(filename, num_images):
"""Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print("Extracting", filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)
data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)
data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)
return data
def extract_labels(filename, num_images):
"""Extract the labels into a vector of int64 label IDs."""
print("Extracting", filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)
return labels
def fake_data(num_images):
"""Generate a fake dataset that matches the dimensions of MNIST."""
data = numpy.ndarray(
shape=(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS), dtype=numpy.float32
)
labels = numpy.zeros(shape=(num_images,), dtype=numpy.int64)
for image in xrange(num_images):
label = image % 2
data[image, :, :, 0] = label - 0.5
labels[image] = label
return data, labels
def error_rate(predictions, labels):
"""Return the error rate based on dense predictions and sparse labels."""
return 100.0 - (
100.0 * numpy.sum(numpy.argmax(predictions, 1) == labels) / predictions.shape[0]
)
def main(_):
if FLAGS.self_test:
print("Running self-test.")
train_data, train_labels = fake_data(256)
validation_data, validation_labels = fake_data(EVAL_BATCH_SIZE)
test_data, test_labels = fake_data(EVAL_BATCH_SIZE)
num_epochs = 1
else:
# Get the data.
train_data_filename = maybe_download("train-images-idx3-ubyte.gz")
train_labels_filename = maybe_download("train-labels-idx1-ubyte.gz")
test_data_filename = maybe_download("t10k-images-idx3-ubyte.gz")
test_labels_filename = maybe_download("t10k-labels-idx1-ubyte.gz")
# Extract it into numpy arrays.
train_data = extract_data(train_data_filename, 60000)
train_labels = extract_labels(train_labels_filename, 60000)
test_data = extract_data(test_data_filename, 10000)
test_labels = extract_labels(test_labels_filename, 10000)
# Generate a validation set.
validation_data = train_data[:VALIDATION_SIZE, ...]
validation_labels = train_labels[:VALIDATION_SIZE]
train_data = train_data[VALIDATION_SIZE:, ...]
train_labels = train_labels[VALIDATION_SIZE:]
num_epochs = NUM_EPOCHS
train_size = train_labels.shape[0]
# This is where training samples and labels are fed to the graph.
# These placeholder nodes will be fed a batch of training data at each
# training step using the {feed_dict} argument to the Run() call below.
train_data_node = tf.placeholder(
data_type(), shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)
)
train_labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE,))
eval_data = tf.placeholder(
data_type(), shape=(EVAL_BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)
)
# The variables below hold all the trainable weights. They are passed an
# initial value which will be assigned when we call:
# {tf.global_variables_initializer().run()}
conv1_weights = tf.Variable(
tf.truncated_normal(
[5, 5, NUM_CHANNELS, 32], # 5x5 filter, depth 32.
stddev=0.1,
seed=SEED,
dtype=data_type(),
)
)
conv1_biases = tf.Variable(tf.zeros([32], dtype=data_type()))
conv2_weights = tf.Variable(
tf.truncated_normal([5, 5, 32, 64], stddev=0.1, seed=SEED, dtype=data_type())
)
conv2_biases = tf.Variable(tf.constant(0.1, shape=[64], dtype=data_type()))
fc1_weights = tf.Variable( # fully connected, depth 512.
tf.truncated_normal(
[IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512],
stddev=0.1,
seed=SEED,
dtype=data_type(),
)
)
fc1_biases = tf.Variable(tf.constant(0.1, shape=[512], dtype=data_type()))
fc2_weights = tf.Variable(
tf.truncated_normal([512, NUM_LABELS], stddev=0.1, seed=SEED, dtype=data_type())
)
fc2_biases = tf.Variable(tf.constant(0.1, shape=[NUM_LABELS], dtype=data_type()))
# We will replicate the model structure for the training subgraph, as well
# as the evaluation subgraphs, while sharing the trainable parameters.
def model(data, train=False):
"""The Model definition."""
# 2D convolution, with 'SAME' padding (i.e. the output feature map has
# the same size as the input). Note that {strides} is a 4D array whose
# shape matches the data layout: [image index, y, x, depth].
conv = tf.nn.conv2d(data, conv1_weights, strides=[1, 1, 1, 1], padding="SAME")
# Bias and rectified linear non-linearity.
relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
# Max pooling. The kernel size spec {ksize} also follows the layout of
# the data. Here we have a pooling window of 2, and a stride of 2.
pool = tf.nn.max_pool(
relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME"
)
conv = tf.nn.conv2d(pool, conv2_weights, strides=[1, 1, 1, 1], padding="SAME")
relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))
pool = tf.nn.max_pool(
relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME"
)
# Reshape the feature map cuboid into a 2D matrix to feed it to the
# fully connected layers.
pool_shape = pool.get_shape().as_list()
reshape = tf.reshape(
pool, [pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]]
)
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
# Add a 50% dropout during training only. Dropout also scales
# activations such that no rescaling is needed at evaluation time.
if train:
hidden = tf.nn.dropout(hidden, 0.5, seed=SEED)
return tf.matmul(hidden, fc2_weights) + fc2_biases
# Training computation: logits + cross-entropy loss.
logits = model(train_data_node, True)
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=train_labels_node, logits=logits
)
)
# L2 regularization for the fully connected parameters.
regularizers = (
tf.nn.l2_loss(fc1_weights)
+ tf.nn.l2_loss(fc1_biases)
+ tf.nn.l2_loss(fc2_weights)
+ tf.nn.l2_loss(fc2_biases)
)
# Add the regularization term to the loss.
loss += 5e-4 * regularizers
# Optimizer: set up a variable that's incremented once per batch and
# controls the learning rate decay.
batch = tf.Variable(0, dtype=data_type())
# Decay once per epoch, using an exponential schedule starting at 0.01.
learning_rate = tf.train.exponential_decay(
0.01, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
train_size, # Decay step.
0.95, # Decay rate.
staircase=True,
)
# Use simple momentum for the optimization.
optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9).minimize(
loss, global_step=batch
)
# Predictions for the current training minibatch.
train_prediction = tf.nn.softmax(logits)
# Predictions for the test and validation, which we'll compute less often.
eval_prediction = tf.nn.softmax(model(eval_data))
# Small utility function to evaluate a dataset by feeding batches of data to
# {eval_data} and pulling the results from {eval_predictions}.
# Saves memory and enables this to run on smaller GPUs.
def eval_in_batches(data, sess):
"""Get all predictions for a dataset by running it in small batches."""
size = data.shape[0]
if size < EVAL_BATCH_SIZE:
raise ValueError("batch size for evals larger than dataset: %d" % size)
predictions = numpy.ndarray(shape=(size, NUM_LABELS), dtype=numpy.float32)
for begin in xrange(0, size, EVAL_BATCH_SIZE):
end = begin + EVAL_BATCH_SIZE
if end <= size:
predictions[begin:end, :] = sess.run(
eval_prediction, feed_dict={eval_data: data[begin:end, ...]}
)
else:
batch_predictions = sess.run(
eval_prediction, feed_dict={eval_data: data[-EVAL_BATCH_SIZE:, ...]}
)
predictions[begin:, :] = batch_predictions[begin - size :, :]
return predictions
# Create a local session to run the training.
start_time = time.time()
num_steps = FLAGS.num_steps
with tf.Session() as sess:
# Run all the initializers to prepare the trainable parameters.
tf.global_variables_initializer().run()
print("Initialized!")
# Loop through training steps.
for step in xrange(num_steps):
# Compute the offset of the current minibatch in the data.
# Note that we could use better randomization across epochs.
# print some extra information once reach the evaluation frequency
rate = error_rate(eval_in_batches(validation_data, sess), validation_labels)
elapsed_time = time.time() - start_time
start_time = time.time()
print(
"Step %d in %d, %.2f ms, Validation error: %.1f%%"
% (step, num_steps, 1000 * elapsed_time / EVAL_FREQUENCY, rate)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--use_fp16",
default=False,
help="Use half floats instead of full floats if True.",
action="store_true",
)
parser.add_argument(
"--self_test",
default=False,
action="store_true",
help="True if running a self test.",
)
parser.add_argument("--num_steps", default=100, help="Number of evaluation steps.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 13,130 | 39.279141 | 88 | py |
EIR | EIR-main/main_lincls.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import builtins
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from tqdm import tqdm
from tensorboardX import SummaryWriter
writer = SummaryWriter('runs/lc_mocov2')
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('-j', '--workers', default=24, type=int, metavar='N',
help='number of data loading workers (default: 32)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=30., type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--schedule', default=[60, 80], nargs='*', type=int,
help='learning rate schedule (when to drop lr by a ratio)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=0., type=float,
metavar='W', help='weight decay (default: 0.)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--pretrained', default='', type=str,
help='path to moco pretrained checkpoint')
best_acc1 = 0
best_acc5 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
global best_acc5
args.gpu = gpu
# suppress printing if not master
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
model.fc = nn.Linear(2048, 100, bias=True)
print(model)
# freeze all layers but the last fc
for name, param in model.named_parameters():
if name not in ['fc.weight', 'fc.bias']:
param.requires_grad = False
# init the fc layer
model.fc.weight.data.normal_(mean=0.0, std=0.01)
model.fc.bias.data.zero_()
# load from pre-trained, before DistributedDataParallel constructor
if args.pretrained:
if os.path.isfile(args.pretrained):
print("=> loading checkpoint '{}'".format(args.pretrained))
checkpoint = torch.load(args.pretrained, map_location="cpu")
# rename moco pre-trained keys
state_dict = checkpoint['state_dict']
for k in list(state_dict.keys()):
# retain only encoder_q up to before the embedding layer
if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):
# remove prefix
state_dict[k[len("module.encoder_q."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
args.start_epoch = 0
msg = model.load_state_dict(state_dict, strict=False)
assert set(msg.missing_keys) == {"fc.weight", "fc.bias"}
print("=> loaded pre-trained model '{}'".format(args.pretrained))
else:
print("=> no checkpoint found at '{}'".format(args.pretrained))
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
# optimize only the linear classifier
parameters = list(filter(lambda p: p.requires_grad, model.parameters()))
assert len(parameters) == 2 # fc.weight, fc.bias
optimizer = torch.optim.SGD(parameters, args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1, acc5 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if is_best:
best_acc5 = acc5
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0) and is_best:
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best)
if epoch == args.start_epoch:
sanity_check(model.state_dict(), args.pretrained)
writer.add_scalar('valtop1', acc1, epoch)
writer.add_scalar('valtop5', acc5, epoch)
print('best_acc1: {} --- best_acc5: {}'.format(best_acc1, best_acc5))
f = open('./runs/acc.txt', 'a+')
f.write('lc ddp acc1: {}\n'.format(best_acc1))
f.write('lc ddp acc5: {}\n'.format(best_acc5))
f.close()
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
"""
Switch to eval mode:
Under the protocol of linear classification on frozen features/models,
it is not legitimate to change any part of the pre-trained model.
BatchNorm in train mode may revise running mean/std (even if it receives
no gradient), which are part of the model parameters too.
"""
model.eval()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
# print(model)
# print(images.shape) # torch.Size([32, 3, 224, 224])
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
writer.add_scalar('loss', losses.avg, epoch)
writer.add_scalar('top1', top1.avg, epoch)
writer.add_scalar('top5', top5.avg, epoch)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def sanity_check(state_dict, pretrained_weights):
"""
Linear classifier should not change any weights other than the linear layer.
This sanity check asserts nothing wrong happens (e.g., BN stats updated).
"""
print("=> loading '{}' for sanity check".format(pretrained_weights))
checkpoint = torch.load(pretrained_weights, map_location="cpu")
state_dict_pre = checkpoint['state_dict']
for k in list(state_dict.keys()):
# only ignore fc layer
if 'fc.weight' in k or 'fc.bias' in k:
continue
# name in pretrained model
k_pre = 'module.encoder_q.' + k[len('module.'):] \
if k.startswith('module.') else 'module.encoder_q.' + k
assert ((state_dict[k].cpu() == state_dict_pre[k_pre]).all()), \
'{} is changed in linear classifier training.'.format(k)
print("=> sanity check passed.")
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate based on schedule"""
lr = args.lr
for milestone in args.schedule:
lr *= 0.1 if epoch >= milestone else 1.
for param_group in optimizer.param_groups:
param_group['lr'] = lr
writer.add_scalar('lr', lr, epoch)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
writer.close()
| 20,483 | 37.649057 | 96 | py |
EIR | EIR-main/main_moco.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import builtins
import math
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import torch.nn.functional as F
import moco.loader
import moco.builder
from tqdm import tqdm
from tensorboardX import SummaryWriter
writer = SummaryWriter('runs/mocov2')
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('-j', '--workers', default=16, type=int, metavar='N',
help='number of data loading workers (default: 32)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.03, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--schedule', default=[60, 80], nargs='*', type=int,
help='learning rate schedule (when to drop lr by 10x)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum of SGD solver')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
# moco specific configs:
parser.add_argument('--moco-dim', default=128, type=int,
help='feature dimension (default: 128)')
parser.add_argument('--moco-k', default=65536, type=int,
help='queue size; number of negative keys (default: 65536)')
parser.add_argument('--moco-m', default=0.999, type=float,
help='moco momentum of updating key encoder (default: 0.999)')
parser.add_argument('--moco-t', default=0.07, type=float,
help='softmax temperature (default: 0.07)')
# options for moco v2
parser.add_argument('--mlp', action='store_true',
help='use mlp head')
parser.add_argument('--aug-plus', action='store_true',
help='use moco v2 data augmentation')
parser.add_argument('--cos', action='store_true',
help='use cosine lr schedule')
parser.add_argument('--warmup', default=5, type=int,
help='warm-up epochs (default: 128)')
parser.add_argument('--teacher-t', default=0.04, type=float,
help='softmax temperature (default: 0.04)')
parser.add_argument('--student-t', default=0.1, type=float,
help='softmax temperature (default: 0.1)')
parser.add_argument('--lambda1', default=1.0, type=float,
help='coefficient (default: 1.)')
parser.add_argument('--lambda2', default=2.0, type=float,
help='coefficient (default: 1.)')
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
# suppress printing if not master
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
print("=> creating model '{}'".format(args.arch))
model = moco.builder.MoCo(
models.__dict__[args.arch],
args.moco_dim, args.moco_k, args.moco_m, args.moco_t, args.teacher_t, args.student_t, args.mlp)
print(model)
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
# comment out the following line for debugging
raise NotImplementedError("Only DistributedDataParallel is supported.")
else:
# AllGather implementation (batch shuffle, queue update, etc.) in
# this code only supports DistributedDataParallel.
raise NotImplementedError("Only DistributedDataParallel is supported.")
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
# kl_loss = nn.KLDivLoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
if args.aug_plus:
# MoCo v2's aug: similar to SimCLR https://arxiv.org/abs/2002.05709
augmentation = [
transforms.RandomResizedCrop(224, scale=(0.2, 1.)),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([moco.loader.GaussianBlur([.1, 2.])], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
]
else:
# MoCo v1's aug: the same as InstDisc https://arxiv.org/abs/1805.01978
augmentation = [
transforms.RandomResizedCrop(224, scale=(0.2, 1.)),
transforms.RandomGrayscale(p=0.2),
transforms.ColorJitter(0.4, 0.4, 0.4, 0.4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
]
augmentation_target = [
transforms.RandomResizedCrop(224, scale=(0.2, 1.)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
]
train_dataset = datasets.ImageFolder(
traindir,
moco.loader.TwoCropsTransform(transforms.Compose(augmentation), transforms.Compose(augmentation_target)))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
if (epoch + 1) % 50 == 0:
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, is_best=False, filename='checkpoint_{:04d}.pth.tar'.format(epoch))
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
losses_moco = AverageMeter('Loss_MoCo', ':.4e')
losses_local = AverageMeter('Loss_Local', ':.4e')
losses_global = AverageMeter('Loss_Global', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, losses_moco, losses_global, losses_local, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, _) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images[0] = images[0].cuda(args.gpu, non_blocking=True)
images[1] = images[1].cuda(args.gpu, non_blocking=True)
images[2] = images[2].cuda(args.gpu, non_blocking=True)
# compute output
output, output_local, target, logitsq, logitsk = model(im_q=images[0], im_k=images[1], im_k_w=images[2])
loss_moco = criterion(output, target)
# feature attraction
loss_local = criterion(output_local, target)
# kl_loss entropy -> distribution alignment
loss_global = - torch.sum(F.softmax(logitsk, dim=1) * F.log_softmax(logitsq, dim=1), dim=1).mean()
losses_moco.update(loss_moco.item(), images[0].size(0))
losses_global.update(loss_global.item(), images[0].size(0))
losses_local.update(loss_local.item(), images[0].size(0))
loss = loss_moco + args.lambda1 * loss_global + args.lambda2 * loss_local
# acc1/acc5 are (K+1)-way contrast classifier accuracy
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images[0].size(0))
top1.update(acc1[0], images[0].size(0))
top5.update(acc5[0], images[0].size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
writer.add_scalar('loss', losses.avg, epoch)
writer.add_scalar('loss_moco', losses_moco.avg, epoch)
writer.add_scalar('loss_global', losses_global.avg, epoch)
writer.add_scalar('loss_local', losses_local.avg, epoch)
writer.add_scalar('train_top1', top1.avg, epoch)
writer.add_scalar('train_top5', top5.avg, epoch)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate based on schedule"""
lr = args.lr
if args.cos: # cosine lr schedule
lr *= 0.5 * (1. + math.cos(math.pi * epoch / args.epochs))
else: # stepwise lr schedule
for milestone in args.schedule:
lr *= 0.1 if epoch >= milestone else 1.
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
writer.close()
| 18,570 | 40.360802 | 113 | py |
EIR | EIR-main/moco/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
| 71 | 35 | 70 | py |
EIR | EIR-main/moco/builder.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torch.nn as nn
import numpy as np
import copy
class MoCo(nn.Module):
"""
Build a MoCo model with: a query encoder, a key encoder, and a queue
https://arxiv.org/abs/1911.05722
"""
def __init__(self, base_encoder, dim=128, K=65536, m=0.999, T=0.07, TT=0.04, ST=0.1, mlp=False):
"""
dim: feature dimension (default: 128)
K: queue size; number of negative keys (default: 65536)
m: moco momentum of updating key encoder (default: 0.999)
T: softmax temperature (default: 0.07)
"""
super(MoCo, self).__init__()
self.K = K
self.m = m
self.T = T
self.TT = TT
self.ST = ST
# create the encoders
# num_classes is the output fc dimension
self.encoder_q = base_encoder(num_classes=dim)
self.encoder_k = base_encoder(num_classes=dim)
if mlp: # hack: brute-force replacement
dim_mlp = self.encoder_q.fc.weight.shape[1]
self.encoder_q.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_q.fc)
self.encoder_k.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_k.fc)
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
# create the queue
self.register_buffer("queue", torch.randn(dim, K))
self.register_buffer("queue_w", torch.randn(dim, K))
self.queue = nn.functional.normalize(self.queue, dim=0)
self.queue_w = nn.functional.normalize(self.queue_w, dim=0)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data = param_k.data * self.m + param_q.data * (1. - self.m)
@torch.no_grad()
def _dequeue_and_enqueue(self, keys, keys_w):
# gather keys before updating queue
keys = concat_all_gather(keys)
keys_w = concat_all_gather(keys_w)
batch_size = keys.shape[0]
ptr = int(self.queue_ptr)
assert self.K % batch_size == 0 # for simplicity
# replace the keys at ptr (dequeue and enqueue)
self.queue[:, ptr:ptr + batch_size] = keys.T
self.queue_w[:, ptr:ptr + batch_size] = keys_w.T
ptr = (ptr + batch_size) % self.K # move pointer
self.queue_ptr[0] = ptr
@torch.no_grad()
def _rand_bbox(self, size, lam):
W = size[2]
H = size[3]
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
@torch.no_grad()
def _img_interpolation(self, im_q, im_k):
batchSize = im_q.size(0)
noise = torch.randperm(batchSize)
# 0-mixup 1-cutmix
choose = 1
# choose = np.random.randint(2)
ratio = np.random.beta(1.0, 1.0) #####1
if choose == 0: # mixup
im_mix = ratio * im_q + (1 - ratio) * im_k[noise]
else: # cutmix
bbx11, bby11, bbx12, bby12 = self._rand_bbox(im_q.size(), ratio)
im_mix = copy.deepcopy(im_q)
im_mix[:, :, bbx11:bbx12, bby11:bby12] = im_k[noise, :, bbx11:bbx12, bby11:bby12]
ratio = 1 - ((bbx12 - bbx11) * (bby12 - bby11) / (im_q.size(2) * im_q.size(3)))
return im_mix, noise, ratio
@torch.no_grad()
def _batch_shuffle_ddp(self, x):
"""
Batch shuffle, for making use of BatchNorm.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# random shuffle index
idx_shuffle = torch.randperm(batch_size_all).cuda()
# broadcast to all gpus
torch.distributed.broadcast(idx_shuffle, src=0)
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this], idx_unshuffle
@torch.no_grad()
def _batch_unshuffle_ddp(self, x, idx_unshuffle):
"""
Undo batch shuffle.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# restored index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this]
def forward(self, im_q, im_k, im_k_w):
"""
Input:
im_q: a batch of query images
im_k: a batch of key images
Output:
logits, targets
"""
# compute query features
im_mix, noise, ratio = self._img_interpolation(im_q, im_k)
q = self.encoder_q(im_q) # queries: NxC
q = nn.functional.normalize(q, dim=1)
q_mix = self.encoder_q(im_mix)
q_mix = nn.functional.normalize(q_mix, dim=1)
# compute key features
with torch.no_grad(): # no gradient to keys
self._momentum_update_key_encoder() # update the key encoder
# shuffle for making use of BN
im_k, idx_unshuffle = self._batch_shuffle_ddp(im_k)
im_k_w, idx_unshuffle_w = self._batch_shuffle_ddp(im_k_w)
k = self.encoder_k(im_k) # keys: NxC
k_w = self.encoder_k(im_k_w) # keys: NxC
k = nn.functional.normalize(k, dim=1)
k_w = nn.functional.normalize(k_w, dim=1)
# undo shuffle
k = self._batch_unshuffle_ddp(k, idx_unshuffle)
k_w = self._batch_unshuffle_ddp(k_w, idx_unshuffle_w)
reordered_k = k[noise]
f_mix = ratio * q + (1 - ratio) * reordered_k
f_mix = nn.functional.normalize(f_mix, dim=1)
f_mix = f_mix.detach()
# compute logits
# Einstein sum is more intuitive
# positive logits: Nx1
l_pos = torch.einsum('nc,nc->n', [q, k]).unsqueeze(-1)
# negative logits: NxK
l_neg = torch.einsum('nc,ck->nk', [q, self.queue.clone().detach()])
# logits: Nx(1+K)
logits = torch.cat([l_pos, l_neg], dim=1)
# apply temperature
logits /= self.T
# labels: positive key indicators
labels = torch.zeros(logits.shape[0], dtype=torch.long).cuda()
# compute distributions
logits_q = torch.einsum('nc,ck->nk', [q, self.queue_w.clone().detach()])
logits_k = torch.einsum('nc,ck->nk', [k_w, self.queue_w.clone().detach()])
logits_q /= self.ST
logits_k /= self.TT
## local
l_pos_local = torch.einsum('nc,nc->n', [q_mix, f_mix.detach()]).unsqueeze(-1)
l_neg_local = torch.einsum('nc,ck->nk', [q_mix, self.queue.clone().detach()])
logits_local = torch.cat([l_pos_local, l_neg_local], dim=1)
logits_local /= self.T
# dequeue and enqueue
self._dequeue_and_enqueue(k, k_w)
return logits, logits_local, labels, logits_q, logits_k.detach()
# utils
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
| 8,473 | 33.032129 | 104 | py |
EIR | EIR-main/moco/loader.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from PIL import ImageFilter
import random
class TwoCropsTransform:
"""Take two random crops of one image as the query and key."""
def __init__(self, base_transform, target_transform):
self.base_transform = base_transform
self.target_transform = target_transform
def __call__(self, x):
q = self.base_transform(x)
k = self.base_transform(x)
k_w = self.target_transform(x)
return [q, k, k_w]
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[.1, 2.]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
| 869 | 28 | 79 | py |
DPMs-for-Audio-Data-Augmentation | DPMs-for-Audio-Data-Augmentation-main/Diffusion_Sampling.py | import torch
from tqdm import tqdm_gui,tqdm
from diffusers import DPMSolverMultistepScheduler,get_cosine_schedule_with_warmup,DPMSolverSinglestepScheduler,DDIMScheduler
from utils import ESC
from torch.utils.data import DataLoader
from denoising_diffusion_pytorch import Unet_Conditional
from diffusers.utils import floats_tensor
import matplotlib.pyplot as plt
import datetime
import numpy as np
from timm.models.xception import xception
choose=1
def forward(model,scheduler,image_size=224,batch_size=16,sample_class=1,device='cuda',channels=3):
global choose
sample=torch.randn(batch_size,channels,image_size,image_size).to(device)
for i,t in enumerate(tqdm(scheduler.timesteps)):
#print(t.shape)
with torch.no_grad():
#print(sample.shape,t*torch.ones(batch_size).long(),sample_class*torch.ones(batch_size))
if(choose==0):
residual=model(sample,t=t*torch.ones(batch_size).long().to(device),y=sample_class*torch.ones(batch_size).long().to(device))
else:
residual=model(sample,time=t*torch.ones(batch_size).long().to(device),label=sample_class*torch.ones(batch_size).long().to(device))
sample=scheduler.step(residual,t,sample).prev_sample
return sample
device='cuda'
judge=True
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
import os
k_ranges=[2,10]#the hyperparameter k
for k in k_ranges:
os.makedirs("Synthetic_us8k_"+str(k),exist_ok=True)
urbansound=["air_conditioner","car_horn","children_playing","dog_bark","drilling","engine_idling","gun_shot","jackhammer","siren","street_music"]
#model=Unet_Conditional
import random
name="./Diffusion_us8k_dim64/Diffusion_us8k_dim64_90augmentation_3500.pt"#720'
#name="./Diffusion_us8k_dim64/Diffusion_us8k_dim64_1275.pt"
model=torch.load(name).to(device=device)
model.eval()
noise_scheduler=DPMSolverSinglestepScheduler()
print("Name: ",name)
#noise_scheduler.load_config("noise_scheduler.pt/scheduler_config.json")
noise_scheduler.set_timesteps(num_inference_steps=20)
random.seed(random.seed(random.seed(1)))
judge_net=None
if(judge):
judge_net=torch.load("judge.pt").to(device=device)
judge_net.eval()
print("judge:",judge)
cnt=0
from tqdm import tqdm
for epoch in tqdm(range(1510)):
if(cnt>8750):break
temp=random.randint(0,9)
print("Class:",temp)
image=forward(model,noise_scheduler,128,batch_size=10,sample_class=temp,device=device,channels=3)
# plt.figure(figsize=(10,10))
print(image.max(),image.min())
if(judge==False):
image=image.permute(0,2,3,1).clip(0,1).cpu().detach().numpy()
else:
output=judge_net(image)
for i in range(10):
#print(image.shape)
#plt.subplot(2,5,i+1)
#image[i]=(image[i]-image[i].min())/(image[i].max()-image[i].min())
#plt.imshow(image[i])
# print(image.max(),image.min())
#plt.show()
# plt.subplot(4,5,10+(i+1))
#plt.imshow(image[i].permute(1,2,0).clip(0,1).cpu().detach().numpy()[::-1])
if(judge):
a, idx1 = torch.sort(output[i], descending=True)#descending为alse,升序,为True,降序
idx = idx1[:k]
print("Cnt: ",cnt,name,idx[:k])
#print(output.argmax().cpu().detach().numpy(),temp)
if(temp in idx.cpu().detach().numpy()):
cnt+=1
image_to_save=image.permute(0,2,3,1).clip(0,1).cpu().detach().numpy()[i]
plt.imsave("Synthetic_us8k_"+str(k)+"/"+"0-"+str(temp)+"-"+str(i)+"-"+str(epoch)+".jpg",image_to_save)
else:
plt.imsave("Synthetic_us8k/"+"0-"+str(temp)+"-"+str(i)+"-"+str(epoch)+".jpg",image[i])
| 3,881 | 43.113636 | 149 | py |
DPMs-for-Audio-Data-Augmentation | DPMs-for-Audio-Data-Augmentation-main/DPMs_Training.py | #Training
from diffusers import DPMSolverMultistepScheduler,get_cosine_schedule_with_warmup,DPMSolverSinglestepScheduler
import torch.functional as F
import random
import torch.nn as nn
from tqdm import tqdm_notebook
from tqdm import tqdm
from utils import Unet_Conditional,US8K
from torchvision.transforms import RandAugment
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
from dataclasses import dataclass
@dataclass
class TrainingConfig:
image_size = 224 # the generated image resolution
train_batch_size = 16
eval_batch_size = 16 # how many images to sample during evaluation
num_epochs = 500
gradient_accumulation_steps = 1
learning_rate = 1e-4
lr_warmup_steps = 500
save_image_epochs = 10
save_model_epochs = 30
mixed_precision = 'fp16' # `no` for float32, `fp16` for automatic mixed precision
output_dir = 'ddpm-butterflies-128' # the model namy locally and on the HF Hub
push_to_hub = False # whether to upload the saved model to the HF Hub
hub_private_repo = False
overwrite_output_dir = True # overwrite the old model when re-running the notebook
seed = 0
config = TrainingConfig()
dataset1=US8K(transform_size=128,train=True,root="Preprocessing_us8k")
dataset2=US8K(transform_size=128,train=True,root="Preprocessing_us8k_augmentation")
urbansound8k=["air_conditioner","car_horn","children_playing","dog_bark","drilling","engine_idling","gun_shot","jackhammer","siren","street_music"]
noise_scheduler=DPMSolverMultistepScheduler(num_train_timesteps=1000)
noise_scheduler.set_timesteps(num_inference_steps=20)
loss=nn.MSELoss()
device='cuda'
model = Unet_Conditional(labels_dim=10,dim=64).to(device)
model=torch.load('./Diffusion_us8k_dim64/Diffusion_us8k_dim64_90augmentation_175.pt')
optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate)
print(sum(p.numel() for p in model.parameters() if p.requires_grad))
def forward(model,scheduler,config,batch_size=16,sample_class=1,device='cuda'):
sample=torch.randn(batch_size,3,config.image_size,config.image_size).to(device)
for i,t in enumerate(tqdm(scheduler.timesteps)):
#print(t.shape)
with torch.no_grad():
residual=model(sample,t=t*torch.ones(batch_size).long().to(device),label=sample_class*torch.ones(batch_size).long().to(device))
sample=scheduler.step(residual,t,sample).prev_sample
return sample
epoches=3500
for epoch in tqdm_notebook(range(0,epoches)):
if(random.randint(0,20)==0):
dataloader=DataLoader(dataset1, batch_size=30, shuffle=True)
else:
dataloader=DataLoader(dataset2, batch_size=30, shuffle=True)
for data,label in tqdm_notebook(dataloader):
#print(label.shape)
data=data.to(device)
# data=255*data
# data=torch.tensor(data,dtype=torch.uint8)
# data=augmen(data)
# data=data.float()
# data=data/255
label=torch.argmax(label,dim=1).to(device).long()
optimizer.zero_grad()
noise=torch.randn_like(data)
timesteps=torch.randint(0,noise_scheduler.num_train_timesteps,(data.shape[0],)).to(device)
noisy_image=noise_scheduler.add_noise(data,noise,timesteps)
noise_pred=model(noisy_image,time=timesteps,label=label.long())
loss_val=loss(noise_pred,noise)
loss_val.backward()
optimizer.step()
if(epoch%5==0):
print("Epoch: ",epoch,"Loss: ",loss_val.item())
torch.save(model,f'./Diffusion_us8k_dim64/Diffusion_us8k_dim64_90augmentation_{epoch}.pt')
| 3,610 | 36.614583 | 147 | py |
DPMs-for-Audio-Data-Augmentation | DPMs-for-Audio-Data-Augmentation-main/train_discriminator.py | #Judge Model Training
import torch
import torch.nn as nn
import torch.nn.functional as F
import timm.models as models
import numpy as np
import pandas as pd
from utils import US8K
from timm.models.xception import xception
model=xception(num_classes=10,pretrained=False).cuda()
loss_fn=nn.CrossEntropyLoss(label_smoothing=0.1)
optimizer=torch.optim.AdamW(model.parameters(),lr=0.0001)
train_dataset=US8K(train=True,transform_size=128,all_in=True)
print(len(train_dataset.data))
test_dataset=US8K(train=False,transform_size=128)
epoches=500
print(sum(p.numel() for p in model.parameters() if p.requires_grad))
for epoch in range(epoches):
model.train()
train_loader=torch.utils.data.DataLoader(train_dataset,batch_size=100,shuffle=True)
test_loader=torch.utils.data.DataLoader(test_dataset,batch_size=100,shuffle=True)
accuracy=0
losses_all=0
all_have=0
for data,label in train_loader:
data=data.cuda()
label=label.cuda()
output=model(data)
loss=loss_fn(output,label)
accuracy+=torch.sum(torch.argmax(output,dim=1)==torch.argmax(label,dim=1))
all_have+=data.shape[0]
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses_all+=loss.item()
print('epoch:{},train_loss:{},train_accuracy:{}'.format(epoch,losses_all/all_have,accuracy/all_have))
model.eval()
all_have=0
accuracy=0
losses_all=0
for data,label in test_loader:
data=data.cuda()
label=label.cuda()
output=model(data)
loss=loss_fn(output,label)
all_have+=data.shape[0]
accuracy+=torch.sum(torch.argmax(output,dim=1)==torch.argmax(label,dim=1))
losses_all+=loss.item()
print('epoch:{},test_loss:{},test_accuracy:{}'.format(epoch,losses_all/all_have,accuracy/all_have))
torch.save(model,"judge.pt")
| 1,854 | 34 | 106 | py |
OpenBCIPython | OpenBCIPython-master/user.py | #!/usr/bin/env python2.7
import argparse # new in Python2.7
import os
import time
import string
import atexit
import threading
import logging
import sys
logging.basicConfig(level=logging.ERROR)
from yapsy.PluginManager import PluginManager
# Load the plugins from the plugin directory.
manager = PluginManager()
if __name__ == '__main__':
print ("------------user.py-------------")
parser = argparse.ArgumentParser(description="OpenBCI 'user'")
parser.add_argument('--board', default="cyton",
help="Choose between [cyton] and [ganglion] boards.")
parser.add_argument('-l', '--list', action='store_true',
help="List available plugins.")
parser.add_argument('-i', '--info', metavar='PLUGIN',
help="Show more information about a plugin.")
parser.add_argument('-p', '--port',
help="Port to connect to OpenBCI Dongle " +
"( ex /dev/ttyUSB0 or /dev/tty.usbserial-* ) or AUTO to attempt auto-dection.")
parser.set_defaults(port="AUTO")
# baud rate is not currently used
parser.add_argument('-b', '--baud', default=115200, type=int,
help="Baud rate (not currently used)")
parser.add_argument('--no-filtering', dest='filtering',
action='store_false',
help="Disable notch filtering")
parser.set_defaults(filtering=True)
parser.add_argument('-d', '--daisy', dest='daisy',
action='store_true',
help="Force daisy mode (cyton board)")
parser.add_argument('-x', '--aux', dest='aux',
action='store_true',
help="Enable accelerometer/AUX data (ganglion board)")
# first argument: plugin name, then parameters for plugin
parser.add_argument('-a', '--add', metavar=('PLUGIN', 'PARAM'),
action='append', nargs='+',
help="Select which plugins to activate and set parameters.")
parser.add_argument('--log', dest='log', action='store_true',
help="Log program")
parser.add_argument('--plugins-path', dest='plugins_path', nargs='+',
help="Additional path(s) to look for plugins")
parser.set_defaults(daisy=False, log=False)
args = parser.parse_args()
if not(args.add):
print ("WARNING: no plugin selected, you will only be able to communicate with the board. You should select at least one plugin with '--add [plugin_name]'. Use '--list' to show available plugins or '--info [plugin_name]' to get more information.")
if args.board == "cyton":
print ("Board type: OpenBCI Cyton (v3 API)")
import open_bci_v3 as bci
elif args.board == "ganglion":
print ("Board type: OpenBCI Ganglion")
import open_bci_ganglion as bci
else:
raise ValueError('Board type %r was not recognized. Known are 3 and 4' % args.board)
# Check AUTO port selection, a "None" parameter for the board API
if "AUTO" == args.port.upper():
print("Will try do auto-detect board's port. Set it manually with '--port' if it goes wrong.")
args.port = None
else:
print("Port: ", args.port)
plugins_paths = ["plugins"]
if args.plugins_path:
plugins_paths += args.plugins_path
manager.setPluginPlaces(plugins_paths)
manager.collectPlugins()
# Print list of available plugins and exit
if args.list:
print ("Available plugins:")
for plugin in manager.getAllPlugins():
print ("\t- " + plugin.name)
exit()
# User wants more info about a plugin...
if args.info:
plugin = manager.getPluginByName(args.info)
if plugin == None:
# eg: if an import fail inside a plugin, yapsy skip it
print ("Error: [ " + args.info + " ] not found or could not be loaded. Check name and requirements.")
else:
print (plugin.description)
plugin.plugin_object.show_help()
exit()
print ("\n------------SETTINGS-------------")
print ("Notch filtering:" + str(args.filtering))
# Logging
if args.log:
print ("Logging Enabled: " + str(args.log))
logging.basicConfig(filename="OBCI.log", format='%(asctime)s - %(levelname)s : %(message)s', level=logging.DEBUG)
logging.getLogger('yapsy').setLevel(logging.DEBUG)
logging.info('---------LOG START-------------')
logging.info(args)
else:
print ("user.py: Logging Disabled.")
print ("\n-------INSTANTIATING BOARD-------")
board = bci.OpenBCIBoard(port=args.port,
daisy=args.daisy,
filter_data=args.filtering,
scaled_output=True,
log=args.log,
aux=args.aux)
# Info about effective number of channels and sampling rate
if board.daisy:
print ("Force daisy mode:")
else:
print ("No daisy:")
print (board.getNbEEGChannels(), "EEG channels and", board.getNbAUXChannels(), "AUX channels at", board.getSampleRate(), "Hz.")
print ("\n------------PLUGINS--------------")
# Loop round the plugins and print their names.
print ("Found plugins:")
for plugin in manager.getAllPlugins():
print ("[ " + plugin.name + " ]")
print("\n")
# Fetch plugins, try to activate them, add to the list if OK
plug_list = []
callback_list = []
if args.add:
for plug_candidate in args.add:
# first value: plugin name, then optional arguments
plug_name = plug_candidate[0]
plug_args = plug_candidate[1:]
# Try to find name
plug = manager.getPluginByName(plug_name)
if plug == None:
# eg: if an import fail inside a plugin, yapsy skip it
print ("Error: [ " + plug_name + " ] not found or could not be loaded. Check name and requirements.")
else:
print ("\nActivating [ " + plug_name + " ] plugin...")
if not plug.plugin_object.pre_activate(plug_args, sample_rate=board.getSampleRate(), eeg_channels=board.getNbEEGChannels(), aux_channels=board.getNbAUXChannels(), imp_channels=board.getNbImpChannels()):
print ("Error while activating [ " + plug_name + " ], check output for more info.")
else:
print ("Plugin [ " + plug_name + "] added to the list")
plug_list.append(plug.plugin_object)
callback_list.append(plug.plugin_object)
if len(plug_list) == 0:
fun = None
else:
fun = callback_list
def cleanUp():
board.disconnect()
print ("Deactivating Plugins...")
for plug in plug_list:
plug.deactivate()
print ("User.py exiting...")
atexit.register(cleanUp)
print ("--------------INFO---------------")
print ("User serial interface enabled...\n\
View command map at http://docs.openbci.com.\n\
Type /start to run (/startimp for impedance \n\
checking, if supported) -- and /stop\n\
before issuing new commands afterwards.\n\
Type /exit to exit. \n\
Board outputs are automatically printed as: \n\
% <tab> message\n\
$$$ signals end of message")
print("\n-------------BEGIN---------------")
# Init board state
# s: stop board streaming; v: soft reset of the 32-bit board (no effect with 8bit board)
s = 'sv'
# Tell the board to enable or not daisy module
if board.daisy:
s = s + 'C'
else:
s = s + 'c'
# d: Channels settings back to default
s = s + 'd'
while(s != "/exit"):
# Send char and wait for registers to set
if (not s):
pass
elif("help" in s):
print ("View command map at: \
http://docs.openbci.com/software/01-OpenBCI_SDK.\n\
For user interface: read README or view \
https://github.com/OpenBCI/OpenBCI_Python")
elif board.streaming and s != "/stop":
print ("Error: the board is currently streaming data, please type '/stop' before issuing new commands.")
else:
# read silently incoming packet if set (used when stream is stopped)
flush = False
if('/' == s[0]):
s = s[1:]
rec = False # current command is recognized or fot
if("T:" in s):
lapse = int(s[string.find(s, "T:")+2:])
rec = True
elif("t:" in s):
lapse = int(s[string.find(s, "t:")+2:])
rec = True
else:
lapse = -1
if('startimp' in s):
if board.getBoardType() == "cyton":
print ("Impedance checking not supported on cyton.")
else:
board.setImpedance(True)
if(fun != None):
# start streaming in a separate thread so we could always send commands in here
boardThread = threading.Thread(target=board.start_streaming, args=(fun, lapse))
boardThread.daemon = True # will stop on exit
try:
boardThread.start()
except:
raise
else:
print ("No function loaded")
rec = True
elif("start" in s):
board.setImpedance(False)
if(fun != None):
# start streaming in a separate thread so we could always send commands in here
boardThread = threading.Thread(target=board.start_streaming, args=(fun, lapse))
boardThread.daemon = True # will stop on exit
try:
boardThread.start()
except:
raise
else:
print ("No function loaded")
rec = True
elif('test' in s):
test = int(s[s.find("test")+4:])
board.test_signal(test)
rec = True
elif('stop' in s):
board.stop()
rec = True
flush = True
if rec == False:
print("Command not recognized...")
elif s:
for c in s:
if sys.hexversion > 0x03000000:
board.ser_write(bytes(c, 'utf-8'))
else:
board.ser_write(bytes(c))
time.sleep(0.100)
line = ''
time.sleep(0.1) #Wait to see if the board has anything to report
# The Cyton nicely return incoming packets -- here supposedly messages -- whereas the Ganglion prints incoming ASCII message by itself
if board.getBoardType() == "cyton":
while board.ser_inWaiting():
c = board.ser_read().decode('utf-8', errors='replace') # we're supposed to get UTF8 text, but the board might behave otherwise
line += c
time.sleep(0.001)
if (c == '\n') and not flush:
print('%\t'+line[:-1])
line = ''
elif board.getBoardType() == "ganglion":
while board.ser_inWaiting():
board.waitForNotifications(0.001)
if not flush:
print(line)
# Take user input
#s = input('--> ')
if sys.hexversion > 0x03000000:
s = input('--> ')
else:
s = raw_input('--> ')
| 12,076 | 39.391304 | 255 | py |
OpenBCIPython | OpenBCIPython-master/test_net.py | import argparse
import tensorflow as tf
import time
from loader import DataLoader
from neuralnet.net.cnn.model1.convolutional_network import CNNModel1
from neuralnet.net.cnn.model2.inception_resnet_v2 import CNNModel2
def runTest(loader, cnn_model):
with tf.name_scope('Model'):
model_predicted_output, _ = cnn_model.conv_net(cnn_model.x_input)
# Declare Loss Function (softmax cross entropy)
with tf.name_scope('Loss'):
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=
model_predicted_output,
labels=cnn_model.y_target))
# Define loss and optimizer
with tf.name_scope('AdamOptimizer'):
optimizer = tf.train.AdamOptimizer(learning_rate=cnn_model.learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(model_predicted_output, 1), tf.argmax(cnn_model.y_target, 1))
with tf.name_scope('Accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Create a summary to monitor cost tensor
tf.summary.scalar("loss", cost)
# Create a summary to monitor accuracy tensor
tf.summary.scalar("accuracy", accuracy)
# Merge all summaries into a single op
merged_summary_op = tf.summary.merge_all()
saver = tf.train.Saver()
# loader.create_one_big_file("ogg")
# Launch the graph
with tf.Session() as sess:
image, label = loader.inputs()
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
# op to write logs to Tensorboard
summary_writer = tf.summary.FileWriter(cnn_model.logs_path, graph=tf.get_default_graph())
ckpt = tf.train.get_checkpoint_state(cnn_model.model_path)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print("There is a no model which has been saved previously in this directory: %s" % cnn_model.model_path)
step = 1
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
# Keep training until reach max iterations
try:
step = 0
start_time = time.time()
while not coord.should_stop():
# Run training steps or whatever
batch_x, batch_y = sess.run([image, label])
batch_x = batch_x[0:1][0]
batch_y = batch_y[0:1][0]
# Run optimization op (backprop)
_, summary = sess.run([optimizer, merged_summary_op],
feed_dict={cnn_model.x_input: batch_x, cnn_model.y_target: batch_y,
cnn_model.keep_prob: cnn_model.dropout})
summary_writer.add_summary(summary, step * loader.batch_size + step)
loss, acc = sess.run([cost, accuracy], feed_dict={cnn_model.x_input: batch_x,
cnn_model.y_target: batch_y,
cnn_model.keep_prob: 1.})
print("Iter " + str(step * loader.batch_size) + ", Minibatch Loss= {:.6f}".format(
loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc))
# TODO save the model as you require...
saver.save(sess, cnn_model.model_path, global_step=step)
step += 1
except tf.errors.OutOfRangeError:
print('Done training for %d epochs, %d steps.' % (loader.num_epochs, loader.batch_size))
finally:
coord.request_stop()
coord.join(threads)
save_path = saver.save(sess, cnn_model.model_path)
print("Model saved in file: %s" % save_path)
sess.close()
print("Optimization Finished!")
print("Run the command line:\n" \
"--> tensorboard --logdir=%s " \
"\nThen open http://0.0.0.0:6006/ into your web browser" % cnn_model.logs_path)
project_dir = "/home/runge/openbci/git/OpenBCI_Python"
dataset_dir = "/home/runge/openbci/git/OpenBCI_Python/build/dataset"
loader = DataLoader(project_dir, dataset_dir)
cnn_model = CNNModel2(project_dir, loader.get_train_config())
runTest(loader, cnn_model)
| 4,493 | 42.631068 | 117 | py |
OpenBCIPython | OpenBCIPython-master/test_log.py | import sys; sys.path.append('..') # help python find open_bci_v3.py relative to scripts folder
import open_bci_v3 as bci
import os
import logging
import time
def printData(sample):
#os.system('clear')
print "----------------"
print("%f" %(sample.id))
print sample.channel_data
print sample.aux_data
print "----------------"
if __name__ == '__main__':
port = '/dev/tty.usbserial-DN0096XA'
baud = 115200
logging.basicConfig(filename="test.log",format='%(message)s',level=logging.DEBUG)
logging.info('---------LOG START-------------')
board = bci.OpenBCIBoard(port=port, scaled_output=False, log=True)
#32 bit reset
board.ser.write('v')
time.sleep(0.100)
#connect pins to vcc
board.ser.write('p')
time.sleep(0.100)
#board.start_streaming(printData)
board.print_packets_in() | 798 | 23.212121 | 94 | py |
OpenBCIPython | OpenBCIPython-master/open_bci_ganglion.py | """
Core OpenBCI object for handling connections and samples from the gnaglion board.
Note that the LIB will take care on its own to print incoming ASCII messages if any (FIXME, BTW).
EXAMPLE USE:
def handle_sample(sample):
print(sample.channels_data)
board = OpenBCIBoard()
board.start(handle_sample)
TODO: support impedance
TODO: reset board with 'v'?
"""
import struct
import time
import timeit
import atexit
import logging
import numpy as np
import sys
import pdb
import glob
# local bluepy should take precedence
import sys
sys.path.insert(0,"bluepy/bluepy")
from btle import Scanner, DefaultDelegate, Peripheral
SAMPLE_RATE = 200.0 # Hz
scale_fac_uVolts_per_count = 1200 / (8388607.0 * 1.5 * 51.0)
scale_fac_accel_G_per_count = 0.000032
# service for communication, as per docs
BLE_SERVICE = "fe84"
# characteristics of interest
BLE_CHAR_RECEIVE = "2d30c082f39f4ce6923f3484ea480596"
BLE_CHAR_SEND = "2d30c083f39f4ce6923f3484ea480596"
BLE_CHAR_DISCONNECT = "2d30c084f39f4ce6923f3484ea480596"
'''
#Commands for in SDK http://docs.openbci.com/Hardware/08-Ganglion_Data_Forma
command_stop = "s";
command_startBinary = "b";
'''
class OpenBCIBoard(object):
"""
Handle a connection to an OpenBCI board.
Args:
port: MAC address of the Ganglion Board. "None" to attempt auto-detect.
aux: enable on not aux channels (i.e. switch to 18bit mode if set)
impedance: measures impedance when start streaming
timeout: in seconds, if set will try to disconnect / reconnect after a period without new data -- should be high if impedance check
max_packets_to_skip: will try to disconnect / reconnect after too many packets are skipped
baud, filter_data, daisy: Not used, for compatibility with v3
"""
def __init__(self, port=None, baud=0, filter_data=False,
scaled_output=True, daisy=False, log=True, aux=False, impedance=False, timeout=2, max_packets_to_skip=20):
# unused, for compatibility with Cyton v3 API
self.daisy = False
# these one are used
self.log = log # print_incoming_text needs log
self.aux = aux
self.streaming = False
self.timeout = timeout
self.max_packets_to_skip = max_packets_to_skip
self.scaling_output = scaled_output
self.impedance = False
# might be handy to know API
self.board_type = "ganglion"
print("Looking for Ganglion board")
if port == None:
port = self.find_port()
self.port = port # find_port might not return string
self.connect()
self.streaming = False
# number of EEG channels and (optionally) accelerometer channel
self.eeg_channels_per_sample = 4
self.aux_channels_per_sample = 3
self.imp_channels_per_sample = 5
self.read_state = 0
self.log_packet_count = 0
self.packets_dropped = 0
self.time_last_packet = 0
# Disconnects from board when terminated
atexit.register(self.disconnect)
def getBoardType(self):
""" Returns the version of the board """
return self.board_type
def setImpedance(self, flag):
""" Enable/disable impedance measure """
self.impedance = bool(flag)
def connect(self):
""" Connect to the board and configure it. Note: recreates various objects upon call. """
print ("Init BLE connection with MAC: " + self.port)
print ("NB: if it fails, try with root privileges.")
self.gang = Peripheral(self.port, 'random') # ADDR_TYPE_RANDOM
print ("Get mainservice...")
self.service = self.gang.getServiceByUUID(BLE_SERVICE)
print ("Got:" + str(self.service))
print ("Get characteristics...")
self.char_read = self.service.getCharacteristics(BLE_CHAR_RECEIVE)[0]
print ("receive, properties: " + str(self.char_read.propertiesToString()) + ", supports read: " + str(self.char_read.supportsRead()))
self.char_write = self.service.getCharacteristics(BLE_CHAR_SEND)[0]
print ("write, properties: " + str(self.char_write.propertiesToString()) + ", supports read: " + str(self.char_write.supportsRead()))
self.char_discon = self.service.getCharacteristics(BLE_CHAR_DISCONNECT)[0]
print ("disconnect, properties: " + str(self.char_discon.propertiesToString()) + ", supports read: " + str(self.char_discon.supportsRead()))
# set delegate to handle incoming data
self.delegate = GanglionDelegate(self.scaling_output)
self.gang.setDelegate(self.delegate)
# enable AUX channel
if self.aux:
print("Enabling AUX data...")
try:
self.ser_write(b'n')
except Exception as e:
print("Something went wrong while enabling aux channels: " + str(e))
print("Turn on notifications")
# nead up-to-date bluepy, cf https://github.com/IanHarvey/bluepy/issues/53
self.desc_notify = self.char_read.getDescriptors(forUUID=0x2902)[0]
try:
self.desc_notify.write(b"\x01")
except Exception as e:
print("Something went wrong while trying to enable notification: " + str(e))
print("Connection established")
def init_streaming(self):
""" Tell the board to record like crazy. """
try:
if self.impedance:
print("Starting with impedance testing")
self.ser_write(b'z')
else:
self.ser_write(b'b')
except Exception as e:
print("Something went wrong while asking the board to start streaming: " + str(e))
self.streaming = True
self.packets_dropped = 0
self.time_last_packet = timeit.default_timer()
def find_port(self):
"""Detects Ganglion board MAC address -- if more than 1 around, will select first. Needs root privilege."""
print("Try to detect Ganglion MAC address. NB: Turn on bluetooth and run as root for this to work! Might not work with every BLE dongles.")
scan_time = 5
print("Scanning for 5 seconds nearby devices...")
# From bluepy example
class ScanDelegate(DefaultDelegate):
def __init__(self):
DefaultDelegate.__init__(self)
def handleDiscovery(self, dev, isNewDev, isNewData):
if isNewDev:
print ("Discovered device: " + dev.addr)
elif isNewData:
print ("Received new data from: " + dev.addr)
scanner = Scanner().withDelegate(ScanDelegate())
devices = scanner.scan(scan_time)
nb_devices = len(devices)
if nb_devices < 1:
print("No BLE devices found. Check connectivity.")
return ""
else:
print("Found " + str(nb_devices) + ", detecting Ganglion")
list_mac = []
list_id = []
for dev in devices:
# "Ganglion" should appear inside the "value" associated to "Complete Local Name", e.g. "Ganglion-b2a6"
for (adtype, desc, value) in dev.getScanData():
if desc == "Complete Local Name" and value.startswith("Ganglion"):
list_mac.append(dev.addr)
list_id.append(value)
print("Got Ganglion: " + value + ", with MAC: " + dev.addr)
break
nb_ganglions = len(list_mac)
if nb_ganglions < 1:
print("No Ganglion found ;(")
raise OSError('Cannot find OpenBCI Ganglion MAC address')
if nb_ganglions > 1:
print("Found " + str(nb_ganglions) + ", selecting first")
print("Selecting MAC address " + list_mac[0] + " for " + list_id[0])
return list_mac[0]
def ser_write(self, b):
"""Access serial port object for write"""
self.char_write.write(b)
def ser_read(self):
"""Access serial port object for read"""
return self.char_read.read()
def ser_inWaiting(self):
""" Slightly different from Cyton API, return True if ASCII messages are incoming."""
# FIXME: might have a slight problem with thread because of notifications...
if self.delegate.receiving_ASCII:
# in case the packet indicating the end of the message drops, we use a 1s timeout
if timeit.default_timer() - self.delegate.time_last_ASCII > 2:
self.delegate.receiving_ASCII = False
return self.delegate.receiving_ASCII
def getSampleRate(self):
return SAMPLE_RATE
def getNbEEGChannels(self):
"""Will not get new data on impedance check."""
return self.eeg_channels_per_sample
def getNbAUXChannels(self):
"""Might not be used depending on the mode."""
return self.aux_channels_per_sample
def getNbImpChannels(self):
"""Might not be used depending on the mode."""
return self.imp_channels_per_sample
def start_streaming(self, callback, lapse=-1):
"""
Start handling streaming data from the board. Call a provided callback
for every single sample that is processed
Args:
callback: A callback function -- or a list of functions -- that will receive a single argument of the
OpenBCISample object captured.
"""
if not self.streaming:
self.init_streaming()
start_time = timeit.default_timer()
# Enclose callback funtion in a list if it comes alone
if not isinstance(callback, list):
callback = [callback]
while self.streaming:
# should the board get disconnected and we could not wait for notification anymore, a reco should be attempted through timeout mechanism
try:
# at most we will get one sample per packet
self.waitForNotifications(1./self.getSampleRate())
except Exception as e:
print("Something went wrong while waiting for a new sample: " + str(e))
# retrieve current samples on the stack
samples = self.delegate.getSamples()
self.packets_dropped = self.delegate.getMaxPacketsDropped()
if samples:
self.time_last_packet = timeit.default_timer()
for call in callback:
for sample in samples:
call(sample)
if(lapse > 0 and timeit.default_timer() - start_time > lapse):
self.stop();
if self.log:
self.log_packet_count = self.log_packet_count + 1;
# Checking connection -- timeout and packets dropped
self.check_connection()
def waitForNotifications(self, delay):
""" Allow some time for the board to receive new data. """
self.gang.waitForNotifications(delay)
def test_signal(self, signal):
""" Enable / disable test signal """
if signal == 0:
self.warn("Disabling synthetic square wave")
try:
self.char_write.write(b']')
except Exception as e:
print("Something went wrong while setting signal: " + str(e))
elif signal == 1:
self.warn("Eisabling synthetic square wave")
try:
self.char_write.write(b'[')
except Exception as e:
print("Something went wrong while setting signal: " + str(e))
else:
self.warn("%s is not a known test signal. Valid signal is 0-1" %(signal))
def set_channel(self, channel, toggle_position):
""" Enable / disable channels """
try:
#Commands to set toggle to on position
if toggle_position == 1:
if channel is 1:
self.ser.write(b'!')
if channel is 2:
self.ser.write(b'@')
if channel is 3:
self.ser.write(b'#')
if channel is 4:
self.ser.write(b'$')
#Commands to set toggle to off position
elif toggle_position == 0:
if channel is 1:
self.ser.write(b'1')
if channel is 2:
self.ser.write(b'2')
if channel is 3:
self.ser.write(b'3')
if channel is 4:
self.ser.write(b'4')
except Exception as e:
print("Something went wrong while setting channels: " + str(e))
"""
Clean Up (atexit)
"""
def stop(self):
print("Stopping streaming...")
self.streaming = False
# connection might be already down here
try:
if self.impedance:
print("Stopping with impedance testing")
self.ser_write(b'Z')
else:
self.ser_write(b's')
except Exception as e:
print("Something went wrong while asking the board to stop streaming: " + str(e))
if self.log:
logging.warning('sent <s>: stopped streaming')
def disconnect(self):
if(self.streaming == True):
self.stop()
print("Closing BLE..")
try:
self.char_discon.write(b' ')
except Exception as e:
print("Something went wrong while asking the board to disconnect: " + str(e))
# should not try to read/write anything after that, will crash
try:
self.gang.disconnect()
except Exception as e:
print("Something went wrong while shutting down BLE link: " + str(e))
logging.warning('BLE closed')
"""
SETTINGS AND HELPERS
"""
def warn(self, text):
if self.log:
#log how many packets where sent succesfully in between warnings
if self.log_packet_count:
logging.info('Data packets received:'+str(self.log_packet_count))
self.log_packet_count = 0;
logging.warning(text)
print("Warning: %s" % text)
def check_connection(self):
""" Check connection quality in term of lag and number of packets drop. Reinit connection if necessary. FIXME: parameters given to the board will be lost."""
# stop checking when we're no longer streaming
if not self.streaming:
return
#check number of dropped packets and duration without new packets, deco/reco if too large
if self.packets_dropped > self.max_packets_to_skip:
self.warn("Too many packets dropped, attempt to reconnect")
self.reconnect()
elif self.timeout > 0 and timeit.default_timer() - self.time_last_packet > self.timeout:
self.warn("Too long since got new data, attempt to reconnect")
#if error, attempt to reconect
self.reconnect()
def reconnect(self):
""" In case of poor connection, will shut down and relaunch everything. FIXME: parameters given to the board will be lost."""
self.warn('Reconnecting')
self.stop()
self.disconnect()
self.connect()
self.init_streaming()
class OpenBCISample(object):
"""Object encapulsating a single sample from the OpenBCI board."""
def __init__(self, packet_id, channel_data, aux_data, imp_data):
self.id = packet_id
self.channel_data = channel_data
self.aux_data = aux_data
self.imp_data = imp_data
class GanglionDelegate(DefaultDelegate):
""" Called by bluepy (handling BLE connection) when new data arrive, parses samples. """
def __init__(self, scaling_output = True):
DefaultDelegate.__init__(self)
# holds samples until OpenBCIBoard claims them
self.samples = []
# detect gaps between packets
self.last_id = -1
self.packets_dropped = 0
# save uncompressed data to compute deltas
self.lastChannelData = [0, 0, 0, 0]
# 18bit data got here and then accelerometer with it
self.lastAcceleromoter = [0, 0, 0]
# when the board is manually set in the right mode (z to start, Z to stop), impedance will be measured. 4 channels + ref
self.lastImpedance = [0, 0, 0, 0, 0]
self.scaling_output = scaling_output
# handling incoming ASCII messages
self.receiving_ASCII = False
self.time_last_ASCII = timeit.default_timer()
def handleNotification(self, cHandle, data):
if len(data) < 1:
print('Warning: a packet should at least hold one byte...')
return
self.parse(data)
"""
PARSER:
Parses incoming data packet into OpenBCISample -- see docs. Will call the corresponding parse* function depending on the format of the packet.
"""
def parse(self, packet):
# bluepy returnds INT with python3 and STR with python2
if type(packet) is str:
# convert a list of strings in bytes
unpac = struct.unpack(str(len(packet)) + 'B', "".join(packet))
else:
unpac = packet
start_byte = unpac[0]
# Give the informative part of the packet to proper handler -- split between ID and data bytes
# Raw uncompressed
if start_byte == 0:
self.receiving_ASCII = False
self.parseRaw(start_byte, unpac[1:])
# 18-bit compression with Accelerometer
elif start_byte >= 1 and start_byte <= 100:
self.receiving_ASCII = False
self.parse18bit(start_byte, unpac[1:])
# 19-bit compression without Accelerometer
elif start_byte >=101 and start_byte <= 200:
self.receiving_ASCII = False
self.parse19bit(start_byte-100, unpac[1:])
# Impedance Channel
elif start_byte >= 201 and start_byte <= 205:
self.receiving_ASCII = False
self.parseImpedance(start_byte, packet[1:])
# Part of ASCII -- TODO: better formatting of incoming ASCII
elif start_byte == 206:
print("%\t" + str(packet[1:]))
self.receiving_ASCII = True
self.time_last_ASCII = timeit.default_timer()
# End of ASCII message
elif start_byte == 207:
print("%\t" + str(packet[1:]))
print ("$$$")
self.receiving_ASCII = False
else:
print("Warning: unknown type of packet: " + str(start_byte))
def parseRaw(self, packet_id, packet):
""" Dealing with "Raw uncompressed" """
if len(packet) != 19:
print('Wrong size, for raw data' + str(len(data)) + ' instead of 19 bytes')
return
chan_data = []
# 4 channels of 24bits, take values one by one
for i in range(0,12,3):
chan_data.append(conv24bitsToInt(packet[i:i+3]))
# save uncompressed raw channel for future use and append whole sample
self.pushSample(packet_id, chan_data, self.lastAcceleromoter, self.lastImpedance)
self.lastChannelData = chan_data
self.updatePacketsCount(packet_id)
def parse19bit(self, packet_id, packet):
""" Dealing with "19-bit compression without Accelerometer" """
if len(packet) != 19:
print('Wrong size, for 19-bit compression data' + str(len(data)) + ' instead of 19 bytes')
return
# should get 2 by 4 arrays of uncompressed data
deltas = decompressDeltas19Bit(packet)
# the sample_id will be shifted
delta_id = 1
for delta in deltas:
# convert from packet to sample id
sample_id = (packet_id - 1) * 2 + delta_id
# 19bit packets hold deltas between two samples
# TODO: use more broadly numpy
full_data = list(np.array(self.lastChannelData) - np.array(delta))
# NB: aux data updated only in 18bit mode, send values here only to be consistent
self.pushSample(sample_id, full_data, self.lastAcceleromoter, self.lastImpedance)
self.lastChannelData = full_data
delta_id += 1
self.updatePacketsCount(packet_id)
def parse18bit(self, packet_id, packet):
""" Dealing with "18-bit compression without Accelerometer" """
if len(packet) != 19:
print('Wrong size, for 18-bit compression data' + str(len(data)) + ' instead of 19 bytes')
return
# accelerometer X
if packet_id % 10 == 1:
self.lastAcceleromoter[0] = conv8bitToInt8(packet[18])
# accelerometer Y
elif packet_id % 10 == 2:
self.lastAcceleromoter[1] = conv8bitToInt8(packet[18])
# accelerometer Z
elif packet_id % 10 == 3:
self.lastAcceleromoter[2] = conv8bitToInt8(packet[18])
# deltas: should get 2 by 4 arrays of uncompressed data
deltas = decompressDeltas18Bit(packet[:-1])
# the sample_id will be shifted
delta_id = 1
for delta in deltas:
# convert from packet to sample id
sample_id = (packet_id - 1) * 2 + delta_id
# 19bit packets hold deltas between two samples
# TODO: use more broadly numpy
full_data = list(np.array(self.lastChannelData) - np.array(delta))
self.pushSample(sample_id, full_data, self.lastAcceleromoter, self.lastImpedance)
self.lastChannelData = full_data
delta_id += 1
self.updatePacketsCount(packet_id)
def parseImpedance(self, packet_id, packet):
""" Dealing with impedance data. packet: ASCII data. NB: will take few packet (seconds) to fill"""
if packet[-2:] != "Z\n":
print('Wrong format for impedance check, should be ASCII ending with "Z\\n"')
# convert from ASCII to actual value
imp_value = int(packet[:-2])
# from 201 to 205 codes to the right array size
self.lastImpedance[packet_id- 201] = imp_value
self.pushSample(packet_id - 200, self.lastChannelData, self.lastAcceleromoter, self.lastImpedance)
def pushSample(self, sample_id, chan_data, aux_data, imp_data):
""" Add a sample to inner stack, setting ID and dealing with scaling if necessary. """
if self.scaling_output:
chan_data = list(np.array(chan_data) * scale_fac_uVolts_per_count)
aux_data = list(np.array(aux_data) * scale_fac_accel_G_per_count)
sample = OpenBCISample(sample_id, chan_data, aux_data, imp_data)
self.samples.append(sample)
def updatePacketsCount(self, packet_id):
"""Update last packet ID and dropped packets"""
if self.last_id == -1:
self.last_id = packet_id
self.packets_dropped = 0
return
# ID loops every 101 packets
if packet_id > self.last_id:
self.packets_dropped = packet_id - self.last_id - 1
else:
self.packets_dropped = packet_id + 101 - self.last_id - 1
self.last_id = packet_id
if self.packets_dropped > 0:
print("Warning: dropped " + str(self.packets_dropped) + " packets.")
def getSamples(self):
""" Retrieve and remove from input_buffer last samples. """
unstack_samples = self.samples
self.samples = []
return unstack_samples
def getMaxPacketsDropped(self):
""" While processing last samples, how many packets were dropped?"""
# TODO: return max value of the last samples array?
return self.packets_dropped
"""
DATA conversion, for the most part courtesy of OpenBCI_NodeJS_Ganglion
"""
def conv24bitsToInt(unpacked):
""" Convert 24bit data coded on 3 bytes to a proper integer """
if len(unpacked) != 3:
raise ValueError("Input should be 3 bytes long.")
# FIXME: quick'n dirty, unpack wants strings later on
literal_read = struct.pack('3B', unpacked[0], unpacked[1], unpacked[2])
#3byte int in 2s compliment
if (unpacked[0] >= 127):
pre_fix = bytes(bytearray.fromhex('FF'))
else:
pre_fix = bytes(bytearray.fromhex('00'))
literal_read = pre_fix + literal_read;
#unpack little endian(>) signed integer(i) (makes unpacking platform independent)
myInt = struct.unpack('>i', literal_read)[0]
return myInt
def conv19bitToInt32(threeByteBuffer):
""" Convert 19bit data coded on 3 bytes to a proper integer (LSB bit 1 used as sign). """
if len(threeByteBuffer) != 3:
raise ValueError("Input should be 3 bytes long.")
prefix = 0;
# if LSB is 1, negative number, some hasty unsigned to signed conversion to do
if threeByteBuffer[2] & 0x01 > 0:
prefix = 0b1111111111111;
return ((prefix << 19) | (threeByteBuffer[0] << 16) | (threeByteBuffer[1] << 8) | threeByteBuffer[2]) | ~0xFFFFFFFF
else:
return (prefix << 19) | (threeByteBuffer[0] << 16) | (threeByteBuffer[1] << 8) | threeByteBuffer[2]
def conv18bitToInt32(threeByteBuffer):
""" Convert 18bit data coded on 3 bytes to a proper integer (LSB bit 1 used as sign) """
if len(threeByteBuffer) != 3:
raise Valuerror("Input should be 3 bytes long.")
prefix = 0;
# if LSB is 1, negative number, some hasty unsigned to signed conversion to do
if threeByteBuffer[2] & 0x01 > 0:
prefix = 0b11111111111111;
return ((prefix << 18) | (threeByteBuffer[0] << 16) | (threeByteBuffer[1] << 8) | threeByteBuffer[2]) | ~0xFFFFFFFF
else:
return (prefix << 18) | (threeByteBuffer[0] << 16) | (threeByteBuffer[1] << 8) | threeByteBuffer[2]
def conv8bitToInt8(byte):
""" Convert one byte to signed value """
if byte > 127:
return (256-byte) * (-1)
else:
return byte
def decompressDeltas19Bit(buffer):
"""
Called to when a compressed packet is received.
input_buffer: Just the data portion of the sample. So 19 bytes.
return {Array} - An array of deltas of shape 2x4 (2 samples per packet and 4 channels per sample.)
"""
if len(buffer) != 19:
raise ValueError("Input should be 19 bytes long.")
receivedDeltas = [[0, 0, 0, 0],[0, 0, 0, 0]]
# Sample 1 - Channel 1
miniBuf = [
(buffer[0] >> 5),
((buffer[0] & 0x1F) << 3 & 0xFF) | (buffer[1] >> 5),
((buffer[1] & 0x1F) << 3 & 0xFF) | (buffer[2] >> 5)
]
receivedDeltas[0][0] = conv19bitToInt32(miniBuf)
# Sample 1 - Channel 2
miniBuf = [
(buffer[2] & 0x1F) >> 2,
(buffer[2] << 6 & 0xFF) | (buffer[3] >> 2),
(buffer[3] << 6 & 0xFF) | (buffer[4] >> 2)
]
receivedDeltas[0][1] = conv19bitToInt32(miniBuf)
# Sample 1 - Channel 3
miniBuf = [
((buffer[4] & 0x03) << 1 & 0xFF) | (buffer[5] >> 7),
((buffer[5] & 0x7F) << 1 & 0xFF) | (buffer[6] >> 7),
((buffer[6] & 0x7F) << 1 & 0xFF) | (buffer[7] >> 7)
]
receivedDeltas[0][2] = conv19bitToInt32(miniBuf)
# Sample 1 - Channel 4
miniBuf = [
((buffer[7] & 0x7F) >> 4),
((buffer[7] & 0x0F) << 4 & 0xFF) | (buffer[8] >> 4),
((buffer[8] & 0x0F) << 4 & 0xFF) | (buffer[9] >> 4)
]
receivedDeltas[0][3] = conv19bitToInt32(miniBuf)
# Sample 2 - Channel 1
miniBuf = [
((buffer[9] & 0x0F) >> 1),
(buffer[9] << 7 & 0xFF) | (buffer[10] >> 1),
(buffer[10] << 7 & 0xFF) | (buffer[11] >> 1)
]
receivedDeltas[1][0] = conv19bitToInt32(miniBuf)
# Sample 2 - Channel 2
miniBuf = [
((buffer[11] & 0x01) << 2 & 0xFF) | (buffer[12] >> 6),
(buffer[12] << 2 & 0xFF) | (buffer[13] >> 6),
(buffer[13] << 2 & 0xFF) | (buffer[14] >> 6)
]
receivedDeltas[1][1] = conv19bitToInt32(miniBuf)
# Sample 2 - Channel 3
miniBuf = [
((buffer[14] & 0x38) >> 3),
((buffer[14] & 0x07) << 5 & 0xFF) | ((buffer[15] & 0xF8) >> 3),
((buffer[15] & 0x07) << 5 & 0xFF) | ((buffer[16] & 0xF8) >> 3)
]
receivedDeltas[1][2] = conv19bitToInt32(miniBuf)
# Sample 2 - Channel 4
miniBuf = [(buffer[16] & 0x07), buffer[17], buffer[18]]
receivedDeltas[1][3] = conv19bitToInt32(miniBuf)
return receivedDeltas;
def decompressDeltas18Bit(buffer):
"""
Called to when a compressed packet is received.
input_buffer: Just the data portion of the sample. So 19 bytes.
return {Array} - An array of deltas of shape 2x4 (2 samples per packet and 4 channels per sample.)
"""
if len(buffer) != 18:
raise ValueError("Input should be 18 bytes long.")
receivedDeltas = [[0, 0, 0, 0],[0, 0, 0, 0]]
# Sample 1 - Channel 1
miniBuf = [
(buffer[0] >> 6),
((buffer[0] & 0x3F) << 2 & 0xFF) | (buffer[1] >> 6),
((buffer[1] & 0x3F) << 2 & 0xFF) | (buffer[2] >> 6)
]
receivedDeltas[0][0] = conv18bitToInt32(miniBuf);
# Sample 1 - Channel 2
miniBuf = [
(buffer[2] & 0x3F) >> 4,
(buffer[2] << 4 & 0xFF) | (buffer[3] >> 4),
(buffer[3] << 4 & 0xFF) | (buffer[4] >> 4)
]
receivedDeltas[0][1] = conv18bitToInt32(miniBuf);
# Sample 1 - Channel 3
miniBuf = [
(buffer[4] & 0x0F) >> 2,
(buffer[4] << 6 & 0xFF) | (buffer[5] >> 2),
(buffer[5] << 6 & 0xFF) | (buffer[6] >> 2)
]
receivedDeltas[0][2] = conv18bitToInt32(miniBuf);
# Sample 1 - Channel 4
miniBuf = [
(buffer[6] & 0x03),
buffer[7],
buffer[8]
]
receivedDeltas[0][3] = conv18bitToInt32(miniBuf);
# Sample 2 - Channel 1
miniBuf = [
(buffer[9] >> 6),
((buffer[9] & 0x3F) << 2 & 0xFF) | (buffer[10] >> 6),
((buffer[10] & 0x3F) << 2 & 0xFF) | (buffer[11] >> 6)
]
receivedDeltas[1][0] = conv18bitToInt32(miniBuf);
# Sample 2 - Channel 2
miniBuf = [
(buffer[11] & 0x3F) >> 4,
(buffer[11] << 4 & 0xFF) | (buffer[12] >> 4),
(buffer[12] << 4 & 0xFF) | (buffer[13] >> 4)
]
receivedDeltas[1][1] = conv18bitToInt32(miniBuf);
# Sample 2 - Channel 3
miniBuf = [
(buffer[13] & 0x0F) >> 2,
(buffer[13] << 6 & 0xFF) | (buffer[14] >> 2),
(buffer[14] << 6 & 0xFF) | (buffer[15] >> 2)
]
receivedDeltas[1][2] = conv18bitToInt32(miniBuf);
# Sample 2 - Channel 4
miniBuf = [
(buffer[15] & 0x03),
buffer[16],
buffer[17]
]
receivedDeltas[1][3] = conv18bitToInt32(miniBuf);
return receivedDeltas;
| 28,106 | 33.487117 | 161 | py |
OpenBCIPython | OpenBCIPython-master/csv_collect_and_publish_test.py | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.interactive(False)
import threading
import os
import pandas as pd
from plugins.csv_collect_and_publish import PluginCSVCollectAndPublish
import preprocessing.init_buffer as buf
class DataFeeder(threading.Thread):
def __init__(self, plugin):
threading.Thread.__init__(self)
self.is_run = True
self.plugin = plugin
def run(self):
position = 0
while self.is_run:
index_buffer = 0
for i in range(0, plugin.number_of_channels):
buf.ring_buffers[index_buffer].append(float(str(df.ix[:, index_buffer][position])))
index_buffer += 1
position += 1
if position == df.shape[0]:
self.is_run = False
break
threading._sleep(self.plugin.sampling_time)
project_dir = "/home/runge/openbci/OpenBCI_Python"
plugin = PluginCSVCollectAndPublish()
plugin.activate()
df = pd.read_csv("/home/runge/openbci/application.linux64/application.linux64/OpenBCI-RAW-right_strait_up_new.txt")
df = df[plugin.channel_vector].dropna(axis=0)
data_feeder = DataFeeder(plugin)
data_feeder.start()
plugin.main_thread.start()
print ("running as background process")
# print ("process is done")
| 1,330 | 28.577778 | 115 | py |
OpenBCIPython | OpenBCIPython-master/analyzer.py | from __future__ import print_function
import json
import sys
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from lib.dtw import dtw
matplotlib.rc('xtick', labelsize=15)
matplotlib.rc('ytick', labelsize=15)
matplotlib.rc('axes', titlesize=20)
matplotlib.rc('legend', fontsize=20)
# manager = plt.get_current_fig_manager()
# manager.resize(*manager.window.maxsize())
from matplotlib.backends.backend_pdf import PdfPages
from sklearn.metrics.pairwise import manhattan_distances
from preprocessing.preprocessing import PreProcessor
from preprocessing.ssa import SingularSpectrumAnalysis
class SignalAnalyzer():
def __init__(self, activity_type, project_path, dataset_location):
self.raw_data = pd.read_csv(dataset_location)
self.config_file = project_path + "/config/config.json"
self.raw_data = self.raw_data.ix[:, 0:13].dropna()
self.raw_channel_data = self.raw_data.ix[:, 2:7]
self.raw_kinect_angle_data = self.raw_data.ix[:, 10:13]
self.channel_length = self.raw_channel_data.shape[1]
self.kinect_angle_length = 3
self.angle_names = ["wrist", "elbow", "shoulder"]
self.signal_types = ["noise_signal", "noise_reduced_signal", "feature_vector"]
self.raw_channel_data_set = []
self.output_buffer = []
self.activity_type = activity_type
self.project_path = project_path
self.dataset_location = dataset_location
self.channels_names = ["ch1", "ch2", "ch3", "ch4", "ch5"]
with open(self.config_file) as config:
self.config = json.load(config)
self.config["train_dir_abs_location"] = self.project_path + "/build/dataset/train"
def nomalize_signal(self, input_signal):
mean = np.mean(input_signal, axis=0)
input_signal -= mean
return input_signal / np.std(input_signal, axis=0)
def reconstructed_channel_data(self):
for i in range(0, self.channel_length):
self.raw_channel_data_set.append(self.nomalize_signal(self.raw_channel_data.ix[:, i]))
for i in range(0, self.channel_length):
preprocessor = PreProcessor(i, self.raw_channel_data_set, self.output_buffer, self.config)
preprocessor.processor(i, activity_type=activity_type)
def reconstructed_kinect_signals(self):
kinect_angles = []
for j in range(0, self.kinect_angle_length):
nomalize_signal = self.nomalize_signal(self.raw_kinect_angle_data.ix[:, j])
reconstructed_signal = SingularSpectrumAnalysis(nomalize_signal,
int(self.config["window_size"])) \
.execute(int(self.config["number_of_principle_component"]))
max_value = reconstructed_signal.max(axis=0)
min_value = reconstructed_signal.min(axis=0)
mapping = interp1d([min_value, max_value], [0, 180])
kinect_angles.append(mapping(np.array(reconstructed_signal)))
with open(
project_path + "/build/dataset/train/result/reconstructed_" + activity_type + "_kinect__angles_.csv",
'w') as f:
np.savetxt(f, np.transpose(np.array(kinect_angles)), delimiter=',', fmt='%.18e')
def append_channel_data(self):
for i in range(0, len(self.signal_types)):
signal_type = self.signal_types[i]
noise_signals = []
for i in range(0, self.channel_length):
processed_signal = pd.read_csv(str(self.config["train_dir_abs_location"]) + "/" + str(i) + "_" +
activity_type + "_" + signal_type + ".csv")
noise_signals.append(np.array(processed_signal.ix[:, 0]))
with open(str(self.config[
"train_dir_abs_location"]) + "/result/" + activity_type + "_" + signal_type + "s" + ".csv",
'w') as f:
np.savetxt(f, np.transpose(np.array(noise_signals)), delimiter=',', fmt='%.18e')
def plot_signals(self, is_save=False, start=0, end=0, fsamp=1, is_raw=False, is_compare=False):
matplotlib.rc('xtick', labelsize=10)
matplotlib.rc('ytick', labelsize=10)
matplotlib.rc('axes', titlesize=15)
matplotlib.rc('legend', fontsize=15)
if is_raw:
raw_channels_data = pd.read_csv(self.dataset_location).ix[:, 2:7].dropna()
else:
raw_channels_data = pd.read_csv(self.config["train_dir_abs_location"]
+ "/result/"+self.activity_type+"_feature_vectors.csv").dropna()
noise_reducer_signal_data = pd.read_csv(self.config["train_dir_abs_location"]
+ "/result/"+self.activity_type+"_noise_reduced_signals.csv").dropna()
self.save_channels = PdfPages('channels_'+self.activity_type+'_reconstructed.pdf')
graph_legend = []
handle_as = []
labels_as = []
num_ch = len(self.channels_names)
fig = plt.figure(figsize=(10, 10))
fig.subplots_adjust(hspace=.5)
index = 1
num_types = 1
if is_compare:
num_types = 2
for h in range(0, num_ch):
# preprocessor = PreProcessor(h, None, None, self.config)
ax = plt.subplot(num_ch*num_types, num_types, index)
if (end == 0):
end = raw_channels_data.ix[:, h].shape[0] - 1
x = np.arange(start, end, 1)
input_signal = raw_channels_data.ix[:, h][start * fsamp:end * fsamp]
noise_reduced_signal = noise_reducer_signal_data.ix[:, h][start * fsamp:end * fsamp]
l1 = ax.plot(noise_reduced_signal, linewidth=1.0, label="raw signal")
graph_legend.append(l1)
index+=1
if is_compare:
ax = plt.subplot(num_ch * num_types, num_types, index)
l2 = ax.plot(input_signal, linewidth=1.0, label="svd signal")
graph_legend.append(l2)
index += 1
# with open("input.csv", 'w') as f:
# np.savetxt(f, input_signal, delimiter=',', fmt='%.18e')
# noise_reducer_signal = preprocessor.apply_noise_reducer_filer(input_signal)
# l2 = ax.plot(x, noise_reducer_signal, linewidth=3.0, label="noise_reducer_signal")
# graph_legend.append(l2)
# normalize_signal = preprocessor.nomalize_signal(noise_reducer_signal)
# l3 = ax.plot(x, normalize_signal, linewidth=1.0, label="normalize_signal")
# graph_legend.append(l3)
# reconstructed_signal = SingularSpectrumAnalysis(noise_reducer_signal, self.config["window_size"], False).execute(1)
# l4 = ax.plot(x,reconstructed_signal, linewidth=1.0, label='reconstructed signal with SSA')
# graph_legend.append(l4)
handles, labels = ax.get_legend_handles_labels()
handle_as.append(handles)
labels_as.append(labels)
plt.title(self.channels_names[h])
# leg = plt.legend(handles=handles, labels=labels)
fig.legend(handles=handle_as[0], labels=labels_as[0])
fig.text(0.5, 0.04, 'position', ha='center', fontsize=10)
fig.text(0.04, 0.5, 'angle(0-180)', va='center', rotation='vertical', fontsize=10)
fig.tight_layout()
if is_save:
self.save_channels.savefig(bbox_inches='tight')
self.save_channels.close()
else:
plt.show()
def plot_kinect_angles(self, is_save=False, start=0, end=0, fsamp=1, is_raw=False):
if is_raw==True:
kinect_angle_data = pd.read_csv(self.dataset_location).ix[:, 10:13].dropna()
else:
kinect_angle_data = pd.read_csv(self.config["train_dir_abs_location"]
+ "/result/reconstructed_"+self.activity_type+"_kinect__angles_.csv").dropna()
graph_legend = []
handle_as = []
labels_as = []
self.save_kinect_anagle = PdfPages(''+self.activity_type+'_kinect_angles_reconstructed.pdf')
num_ch = 3
fig = plt.figure(figsize=(15, 10))
fig.subplots_adjust(hspace=.5)
for h in range(0, num_ch):
ax = plt.subplot(num_ch, 1, h + 1)
if (end == 0):
end = kinect_angle_data.ix[:, h].shape[0] - 1
input_signal = kinect_angle_data.ix[:, h][start * fsamp:end * fsamp]
x = np.arange(start, end, 1)
l1 = ax.plot(x, input_signal, linewidth=1.0, label="raw signal")
graph_legend.append(l1)
# nomalize_signal = self.nomalize_signal(input_signal)
# max_value = reconstructed_signal.max(axis=0)
# min_value = reconstructed_signal.min(axis=0)
# mapping = interp1d([min_value, max_value], [0, 180])
# reconstructed_signal= mapping(np.array(reconstructed_signal))
handles, labels = ax.get_legend_handles_labels()
handle_as.append(handles)
labels_as.append(labels)
plt.title(self.angle_names[h])
# leg = plt.legend(handles=handles, labels=labels)
fig.legend(handles=handle_as[0], labels=labels_as[0])
fig.text(0.5, 0.04, 'position', ha='center', fontsize=20)
fig.text(0.04, 0.5, 'angle(0-180)', va='center', rotation='vertical', fontsize=20)
if is_save:
self.save_kinect_anagle.savefig(bbox_inches='tight')
self.save_kinect_anagle.close()
else:
plt.show()
def apply_dwt(self, nomalized_signal, start, end, pattern_start_at, pattern_end_at, is_apply_dwt, channel_number=1):
if(is_apply_dwt):
pattern = np.array(nomalized_signal.ix[:, channel_number][pattern_start_at:pattern_end_at])
result = []
possion = []
final_result = []
size = pattern_end_at - pattern_start_at
counter = start
for i in range(0, int(np.floor((end-start)/5))):
# for i in range(0, 3):
y = np.array(nomalized_signal.ix[:, channel_number][counter:counter + size]).tolist()
possion.append(counter)
counter += 5
dist, cost, acc, path = dtw(pattern, y, manhattan_distances)
print (dist)
result.append(dist)
final_result.append(result)
final_result.append(possion)
with open(self.config["train_dir_abs_location"] + "/result/"+self.activity_type+"_dwt_result.csv", 'w') as f:
np.savetxt(f, np.transpose(np.array(final_result)), delimiter=',', fmt='%.18e')
return result, possion
else:
dwt_result = pd.read_csv(self.config["train_dir_abs_location"]
+ "/result/"+self.activity_type+"_dwt_result.csv").dropna()
return dwt_result.ix[:,0], dwt_result.ix[:,1]
def plot_kinect_angles_with_activity_signals(self, start=0, end=0, fsamp=1, is_raw=False):
if is_raw:
channels_data = self.nomalize_signal(pd.read_csv(self.dataset_location).ix[:, 2:7].dropna())
kinect_angle_data = pd.read_csv(self.dataset_location).ix[:, 10:13].dropna()
else:
channels_data = pd.read_csv(self.config["train_dir_abs_location"]
+ "/result/"+self.activity_type+"_feature_vectors.csv").dropna()
kinect_angle_data = pd.read_csv(self.config["train_dir_abs_location"]
+ "/result/reconstructed_"+self.activity_type+"_kinect__angles_.csv").dropna()
graph_legend = []
handle_as = []
labels_as = []
fig = plt.figure(figsize=(15, 10))
fig.subplots_adjust(hspace=.5)
if end==0:
end= kinect_angle_data.ix[:, 0].shape[0] - 1
x = np.arange(start, end, 1)
for i in range(0, 5):
ax = plt.subplot(810 + i + 1)
l1 = ax.plot(channels_data.ix[:, i][start:end], linewidth=1.0, label="Processed signal with SSA")
graph_legend.append(l1)
handles, labels = ax.get_legend_handles_labels()
handle_as.append(handles)
labels_as.append(labels)
plt.title(self.channels_names[i])
for j in range(0, 3):
ax = plt.subplot(815 + 1 + j)
l1 = ax.plot(x, kinect_angle_data.ix[:, j][start:end], linewidth=1.0, label="Processed signal with SSA")
graph_legend.append(l1)
handles, labels = ax.get_legend_handles_labels()
handle_as.append(handles)
labels_as.append(labels)
plt.title(self.channels_names[j])
fig.legend(handles=handle_as[0], labels=labels_as[0])
fig.text(0.5, 0.04, 'position', ha='center', fontsize=10)
fig.text(0.04, 0.5, 'angle(0-180)', va='center', rotation='vertical', fontsize=10)
plt.show()
def plot_detected_pattern(self, start=0, end=0, fsamp=1, is_raw=False, pattern_start_at=0, pattern_end_at=200, is_apply_dwt=False, channel_number=1):
if is_raw:
channels_data = pd.read_csv(self.dataset_location).ix[:, 2:7].dropna()
kinect_angle_data = pd.read_csv(self.dataset_location).ix[:, 10:13].dropna()
else:
channels_data = pd.read_csv(self.config["train_dir_abs_location"]
+ "/result/"+self.activity_type+"_feature_vectors.csv").dropna()
kinect_angle_data = pd.read_csv(self.config["train_dir_abs_location"]
+ "/result/reconstructed_"+self.activity_type+"_kinect__angles_.csv").dropna()
nomalized_signal = self.nomalize_signal(kinect_angle_data)
# mapping = interp1d([-1,1],[0,180])
if end==0:
end = nomalized_signal.shape[0] - 1
distance, possion = self.apply_dwt(nomalized_signal, start, end, pattern_start_at, pattern_end_at, is_apply_dwt, channel_number)
_, mintab = self.lowest_point_detect(distance, .3)
if len(mintab)==0:
print ("No patterns were detected...")
return
indices = possion[np.array(mintab[:, 0], dtype=int)]
graph_legend = []
handle_as = []
labels_as = []
fig = plt.figure(figsize=(15, 10))
fig.subplots_adjust(hspace=.5)
x = np.arange(start, end, 1)
for i in range(0, 5):
ax = plt.subplot(810 + i + 1)
l1 = ax.plot(x, self.nomalize_signal(channels_data.ix[:, i][start:end]), linewidth=1.0,
label="Processed signal with SSA")
graph_legend.append(l1)
handles, labels = ax.get_legend_handles_labels()
handle_as.append(handles)
labels_as.append(labels)
plt.title(self.channels_names[i])
for i in indices:
plt.plot([i, i], [2,1], '-r')
for j in range(0, 3):
ax = plt.subplot(815 + 1 + j)
l1 = ax.plot(x, self.nomalize_signal(kinect_angle_data.ix[:, j][start:end]), linewidth=1.0,
label="Processed signal with SSA")
graph_legend.append(l1)
handles, labels = ax.get_legend_handles_labels()
handle_as.append(handles)
labels_as.append(labels)
plt.title(self.channels_names[j])
for i in indices:
plt.plot([i, i], [2,1], '-r')
fig.legend(handles=handle_as[0], labels=labels_as[0])
fig.text(0.5, 0.04, 'position', ha='center', fontsize=10)
fig.text(0.04, 0.5, 'angle(0-180)', va='center', rotation='vertical', fontsize=10)
plt.show()
def lowest_point_detect(self, v, delta, x=None):
maxtab = []
mintab = []
if x is None:
x = np.arange(len(v))
v = np.asarray(v)
if len(v) != len(x):
sys.exit('Input vectors v and x must have same length')
if not np.isscalar(delta):
sys.exit('Input argument delta must be a scalar')
if delta <= 0:
sys.exit('Input argument delta must be positive')
mn, mx = np.Inf, -np.Inf
mnpos, mxpos = np.NaN, np.NaN
lookformax = True
for i in np.arange(len(v)):
this = v[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mx - delta:
maxtab.append((mxpos, mx))
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn + delta:
mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
return np.array(maxtab), np.array(mintab)
def execute(self, is_init=False):
start = 0
end = 0
if is_init:
self.reconstructed_channel_data()
self.reconstructed_kinect_signals()
self.append_channel_data()
# self.plot_kinect_angles(start=start, end=end, is_raw=False)
# self.plot_signals(start=start, end=end, is_raw=True)
self.plot_detected_pattern(pattern_start_at=4400, pattern_end_at=5000, start=start, end=end, is_apply_dwt=True, channel_number=1)
# self.plot_detected_pattern(pattern_start_at=3710, pattern_end_at=3830, start=start, end=end, is_apply_dwt=True, channel_number=1)
#self.plot_kinect_angles_with_activity_signals(start, end, is_raw=False)
project_path = "/home/runge/openbci/git/OpenBCI_Python"
dataset_location = project_path+ "/build/dataset2017-5-5_23-55-32new_straight_up_filttered.csv"
activity_type = "straight_up"
# dataset_location = project_path + "/build/dataset2017-5-5_23-55-32new_bycept_filttered.csv"
# activity_type = "bycept"
signal_analyzer = SignalAnalyzer(activity_type, project_path, dataset_location)
signal_analyzer.execute()
| 18,302 | 44.643392 | 153 | py |
OpenBCIPython | OpenBCIPython-master/processor.py |
import pydub
import os
import seaborn as sb
from manager import FeatureManager
from features.energy import Energy
from features.fft import FFT
from features.mean import Mean
from features.mfcc import MFCC
from features.zcr import ZCR
from utils.Audio import Audio
sb.set(style="white", palette="muted")
import random
random.seed(20150420)
class Clip:
RATE = 44100 # All recordings in ESC are 44.1 kHz
FRAME = 248 # Frame size in samples
def __init__(self, filename, file_type):
self.filename = os.path.basename(filename)
self.path = os.path.abspath(filename)
self.directory = os.path.dirname(self.path)
self.category = self.directory.split('/')[-1]
self.audio = Audio(self.path, file_type)
with self.audio as audio:
self.featureManager = FeatureManager()
self.featureManager.addRegisteredFeatures(MFCC(self.audio,None, 32, self.FRAME, self.RATE), "mfcc")
self.featureManager.addRegisteredFeatures(FFT(self.audio,None,1024, self.FRAME, self.RATE), "fft")
# TODO recheck
# self.featureManager.addRegisteredFeatures(Energy(self.audio,None,self.FRAME, self.RATE), "energy")
self.featureManager.addRegisteredFeatures(ZCR(self.audio,None,self.FRAME, self.RATE), "zcr")
self.featureManager.addRegisteredFeatures(Mean(self.audio, None, self.FRAME, self.RATE), "mean")
self.featureManager.getRegisteredFeature("mfcc").compute_mfcc()
self.featureManager.getRegisteredFeature("fft").compute_fft()
#TODO recheck
# self.featureManager.getRegisteredFeature("energy").compute_energy()
# self.featureManager.getRegisteredFeature("energy").compute_energy_entropy()
self.featureManager.getRegisteredFeature("zcr").compute_zcr()
self.featureManager.getRegisteredFeature("mean").compute_mean()
self.feature_list = self.featureManager.getRegisteredFeatures()
def __repr__(self):
return '<{0}/{1}>'.format(self.category, self.filename)
def get_feature_vector(self):
return self.featureManager.getRegisteredFeature("fft").get_logamplitude()
| 2,233 | 23.282609 | 112 | py |
OpenBCIPython | OpenBCIPython-master/open_bci_v3.py | """
Core OpenBCI object for handling connections and samples from the board.
EXAMPLE USE:
def handle_sample(sample):
print(sample.channel_data)
board = OpenBCIBoard()
board.print_register_settings()
board.start_streaming(handle_sample)
NOTE: If daisy modules is enabled, the callback will occur every two samples, hence "packet_id" will only contain even numbers. As a side effect, the sampling rate will be divided by 2.
FIXME: at the moment we can just force daisy mode, do not check that the module is detected.
TODO: enable impedance
"""
import serial
import struct
import numpy as np
import time
import timeit
import atexit
import logging
import threading
import sys
import pdb
import glob
SAMPLE_RATE = 250.0 # Hz
START_BYTE = 0xA0 # start of data packet
END_BYTE = 0xC0 # end of data packet
ADS1299_Vref = 4.5 #reference voltage for ADC in ADS1299. set by its hardware
ADS1299_gain = 24.0 #assumed gain setting for ADS1299. set by its Arduino code
scale_fac_uVolts_per_count = ADS1299_Vref/float((pow(2,23)-1))/ADS1299_gain*1000000.
scale_fac_accel_G_per_count = 0.002 /(pow(2,4)) #assume set to +/4G, so 2 mG
'''
#Commands for in SDK http://docs.openbci.com/software/01-Open BCI_SDK:
command_stop = "s";
command_startText = "x";
command_startBinary = "b";
command_startBinary_wAux = "n";
command_startBinary_4chan = "v";
command_activateFilters = "F";
command_deactivateFilters = "g";
command_deactivate_channel = {"1", "2", "3", "4", "5", "6", "7", "8"};
command_activate_channel = {"q", "w", "e", "r", "t", "y", "u", "i"};
command_activate_leadoffP_channel = {"!", "@", "#", "$", "%", "^", "&", "*"}; //shift + 1-8
command_deactivate_leadoffP_channel = {"Q", "W", "E", "R", "T", "Y", "U", "I"}; //letters (plus shift) right below 1-8
command_activate_leadoffN_channel = {"A", "S", "D", "F", "G", "H", "J", "K"}; //letters (plus shift) below the letters below 1-8
command_deactivate_leadoffN_channel = {"Z", "X", "C", "V", "B", "N", "M", "<"}; //letters (plus shift) below the letters below the letters below 1-8
command_biasAuto = "`";
command_biasFixed = "~";
'''
class OpenBCIBoard(object):
"""
Handle a connection to an OpenBCI board.
Args:
port: The port to connect to.
baud: The baud of the serial connection.
daisy: Enable or disable daisy module and 16 chans readings
aux, impedance: unused, for compatibility with ganglion API
"""
def __init__(self, port=None, baud=115200, filter_data=True,
scaled_output=True, daisy=False, aux=False, impedance=False, log=True, timeout=None):
self.log = log # print_incoming_text needs log
self.streaming = False
self.baudrate = baud
self.timeout = timeout
if not port:
port = self.find_port()
self.port = port
# might be handy to know API
self.board_type = "cyton"
print("Connecting to V3 at port %s" %(port))
self.ser = serial.Serial(port= port, baudrate = baud, timeout=timeout)
print("Serial established...")
time.sleep(2)
#Initialize 32-bit board, doesn't affect 8bit board
self.ser.write(b'v');
#wait for device to be ready
time.sleep(1)
self.print_incoming_text()
self.streaming = False
self.filtering_data = filter_data
self.scaling_output = scaled_output
self.eeg_channels_per_sample = 8 # number of EEG channels per sample *from the board*
self.aux_channels_per_sample = 3 # number of AUX channels per sample *from the board*
self.imp_channels_per_sample = 0 # impedance check not supported at the moment
self.read_state = 0
self.daisy = daisy
self.last_odd_sample = OpenBCISample(-1, [], []) # used for daisy
self.log_packet_count = 0
self.attempt_reconnect = False
self.last_reconnect = 0
self.reconnect_freq = 5
self.packets_dropped = 0
#Disconnects from board when terminated
atexit.register(self.disconnect)
def getBoardType(self):
""" Returns the version of the board """
return self.board_type
def setImpedance(self, flag):
""" Enable/disable impedance measure. Not implemented at the moment on Cyton. """
return
def ser_write(self, b):
"""Access serial port object for write"""
self.ser.write(b)
def ser_read(self):
"""Access serial port object for read"""
return self.ser.read()
def ser_inWaiting(self):
"""Access serial port object for inWaiting"""
return self.ser.inWaiting();
def getSampleRate(self):
if self.daisy:
return SAMPLE_RATE/2
else:
return SAMPLE_RATE
def getNbEEGChannels(self):
if self.daisy:
return self.eeg_channels_per_sample*2
else:
return self.eeg_channels_per_sample
def getNbAUXChannels(self):
return self.aux_channels_per_sample
def getNbImpChannels(self):
return self.imp_channels_per_sample
def start_streaming(self, callback, lapse=-1):
"""
Start handling streaming data from the board. Call a provided callback
for every single sample that is processed (every two samples with daisy module).
Args:
callback: A callback function -- or a list of functions -- that will receive a single argument of the
OpenBCISample object captured.
"""
if not self.streaming:
self.ser.write(b'b')
self.streaming = True
start_time = timeit.default_timer()
# Enclose callback funtion in a list if it comes alone
if not isinstance(callback, list):
callback = [callback]
#Initialize check connection
self.check_connection()
while self.streaming:
# read current sample
sample = self._read_serial_binary()
# if a daisy module is attached, wait to concatenate two samples (main board + daisy) before passing it to callback
if self.daisy:
# odd sample: daisy sample, save for later
if ~sample.id % 2:
self.last_odd_sample = sample
# even sample: concatenate and send if last sample was the fist part, otherwise drop the packet
elif sample.id - 1 == self.last_odd_sample.id:
# the aux data will be the average between the two samples, as the channel samples themselves have been averaged by the board
avg_aux_data = list((np.array(sample.aux_data) + np.array(self.last_odd_sample.aux_data))/2)
whole_sample = OpenBCISample(sample.id, sample.channel_data + self.last_odd_sample.channel_data, avg_aux_data)
for call in callback:
call(whole_sample)
else:
for call in callback:
call(sample)
if(lapse > 0 and timeit.default_timer() - start_time > lapse):
self.stop();
if self.log:
self.log_packet_count = self.log_packet_count + 1;
"""
PARSER:
Parses incoming data packet into OpenBCISample.
Incoming Packet Structure:
Start Byte(1)|Sample ID(1)|Channel Data(24)|Aux Data(6)|End Byte(1)
0xA0|0-255|8, 3-byte signed ints|3 2-byte signed ints|0xC0
"""
def _read_serial_binary(self, max_bytes_to_skip=3000):
def read(n):
bb = self.ser.read(n)
if not bb:
self.warn('Device appears to be stalled. Quitting...')
sys.exit()
raise Exception('Device Stalled')
sys.exit()
return '\xFF'
else:
return bb
for rep in range(max_bytes_to_skip):
#---------Start Byte & ID---------
if self.read_state == 0:
b = read(1)
if struct.unpack('B', b)[0] == START_BYTE:
if(rep != 0):
self.warn('Skipped %d bytes before start found' %(rep))
rep = 0;
packet_id = struct.unpack('B', read(1))[0] #packet id goes from 0-255
log_bytes_in = str(packet_id);
self.read_state = 1
#---------Channel Data---------
elif self.read_state == 1:
channel_data = []
for c in range(self.eeg_channels_per_sample):
#3 byte ints
literal_read = read(3)
unpacked = struct.unpack('3B', literal_read)
log_bytes_in = log_bytes_in + '|' + str(literal_read);
#3byte int in 2s compliment
if (unpacked[0] >= 127):
pre_fix = bytes(bytearray.fromhex('FF'))
else:
pre_fix = bytes(bytearray.fromhex('00'))
literal_read = pre_fix + literal_read;
#unpack little endian(>) signed integer(i) (makes unpacking platform independent)
myInt = struct.unpack('>i', literal_read)[0]
if self.scaling_output:
channel_data.append(myInt*scale_fac_uVolts_per_count)
else:
channel_data.append(myInt)
self.read_state = 2;
#---------Accelerometer Data---------
elif self.read_state == 2:
aux_data = []
for a in range(self.aux_channels_per_sample):
#short = h
acc = struct.unpack('>h', read(2))[0]
log_bytes_in = log_bytes_in + '|' + str(acc);
if self.scaling_output:
aux_data.append(acc*scale_fac_accel_G_per_count)
else:
aux_data.append(acc)
self.read_state = 3;
#---------End Byte---------
elif self.read_state == 3:
val = struct.unpack('B', read(1))[0]
log_bytes_in = log_bytes_in + '|' + str(val);
self.read_state = 0 #read next packet
if (val == END_BYTE):
sample = OpenBCISample(packet_id, channel_data, aux_data)
self.packets_dropped = 0
return sample
else:
self.warn("ID:<%d> <Unexpected END_BYTE found <%s> instead of <%s>"
%(packet_id, val, END_BYTE))
logging.debug(log_bytes_in);
self.packets_dropped = self.packets_dropped + 1
"""
Clean Up (atexit)
"""
def stop(self):
print("Stopping streaming...\nWait for input_buffer to flush...")
self.streaming = False
self.ser.write(b's')
if self.log:
logging.warning('sent <s>: stopped streaming')
def disconnect(self):
if(self.streaming == True):
self.stop()
if (self.ser.isOpen()):
print("Closing Serial...")
self.ser.close()
logging.warning('serial closed')
"""
SETTINGS AND HELPERS
"""
def warn(self, text):
if self.log:
#log how many packets where sent succesfully in between warnings
if self.log_packet_count:
logging.info('Data packets received:'+str(self.log_packet_count))
self.log_packet_count = 0;
logging.warning(text)
print("Warning: %s" % text)
def print_incoming_text(self):
"""
When starting the connection, print all the debug data until
we get to a line with the end sequence '$$$'.
"""
line = ''
#Wait for device to send data
time.sleep(1)
if self.ser.inWaiting():
line = ''
c = ''
#Look for end sequence $$$
while '$$$' not in line:
c = self.ser.read().decode('utf-8', errors='replace') # we're supposed to get UTF8 text, but the board might behave otherwise
line += c
print(line);
else:
self.warn("No Message")
def openbci_id(self, serial):
"""
When automatically detecting port, parse the serial return for the "OpenBCI" ID.
"""
line = ''
#Wait for device to send data
time.sleep(2)
if serial.inWaiting():
line = ''
c = ''
#Look for end sequence $$$
while '$$$' not in line:
c = serial.read().decode('utf-8', errors='replace') # we're supposed to get UTF8 text, but the board might behave otherwise
line += c
if "OpenBCI" in line:
return True
return False
def print_register_settings(self):
self.ser.write(b'?')
time.sleep(0.5)
self.print_incoming_text();
#DEBBUGING: Prints individual incoming bytes
def print_bytes_in(self):
if not self.streaming:
self.ser.write(b'b')
self.streaming = True
while self.streaming:
print(struct.unpack('B',self.ser.read())[0]);
'''Incoming Packet Structure:
Start Byte(1)|Sample ID(1)|Channel Data(24)|Aux Data(6)|End Byte(1)
0xA0|0-255|8, 3-byte signed ints|3 2-byte signed ints|0xC0'''
def print_packets_in(self):
while self.streaming:
b = struct.unpack('B', self.ser.read())[0];
if b == START_BYTE:
self.attempt_reconnect = False
if skipped_str:
logging.debug('SKIPPED\n' + skipped_str + '\nSKIPPED')
skipped_str = ''
packet_str = "%03d"%(b) + '|';
b = struct.unpack('B', self.ser.read())[0];
packet_str = packet_str + "%03d"%(b) + '|';
#data channels
for i in range(24-1):
b = struct.unpack('B', self.ser.read())[0];
packet_str = packet_str + '.' + "%03d"%(b);
b = struct.unpack('B', self.ser.read())[0];
packet_str = packet_str + '.' + "%03d"%(b) + '|';
#aux channels
for i in range(6-1):
b = struct.unpack('B', self.ser.read())[0];
packet_str = packet_str + '.' + "%03d"%(b);
b = struct.unpack('B', self.ser.read())[0];
packet_str = packet_str + '.' + "%03d"%(b) + '|';
#end byte
b = struct.unpack('B', self.ser.read())[0];
#Valid Packet
if b == END_BYTE:
packet_str = packet_str + '.' + "%03d"%(b) + '|VAL';
print(packet_str)
#logging.debug(packet_str)
#Invalid Packet
else:
packet_str = packet_str + '.' + "%03d"%(b) + '|INV';
#Reset
self.attempt_reconnect = True
else:
print(b)
if b == END_BYTE:
skipped_str = skipped_str + '|END|'
else:
skipped_str = skipped_str + "%03d"%(b) + '.'
if self.attempt_reconnect and (timeit.default_timer()-self.last_reconnect) > self.reconnect_freq:
self.last_reconnect = timeit.default_timer()
self.warn('Reconnecting')
self.reconnect()
def check_connection(self, interval = 2, max_packets_to_skip=10):
# stop checking when we're no longer streaming
if not self.streaming:
return
#check number of dropped packages and establish connection problem if too large
if self.packets_dropped > max_packets_to_skip:
#if error, attempt to reconect
self.reconnect()
# check again again in 2 seconds
threading.Timer(interval, self.check_connection).start()
def reconnect(self):
self.packets_dropped = 0
self.warn('Reconnecting')
self.stop()
time.sleep(0.5)
self.ser.write(b'v')
time.sleep(0.5)
self.ser.write(b'b')
time.sleep(0.5)
self.streaming = True
#self.attempt_reconnect = False
#Adds a filter at 60hz to cancel out ambient electrical noise
def enable_filters(self):
self.ser.write(b'f')
self.filtering_data = True;
def disable_filters(self):
self.ser.write(b'g')
self.filtering_data = False;
def test_signal(self, signal):
""" Enable / disable test signal """
if signal == 0:
self.ser.write(b'0')
self.warn("Connecting all pins to ground")
elif signal == 1:
self.ser.write(b'p')
self.warn("Connecting all pins to Vcc")
elif signal == 2:
self.ser.write(b'-')
self.warn("Connecting pins to low frequency 1x amp signal")
elif signal == 3:
self.ser.write(b'=')
self.warn("Connecting pins to high frequency 1x amp signal")
elif signal == 4:
self.ser.write(b'[')
self.warn("Connecting pins to low frequency 2x amp signal")
elif signal == 5:
self.ser.write(b']')
self.warn("Connecting pins to high frequency 2x amp signal")
else:
self.warn("%s is not a known test signal. Valid signals go from 0-5" %(signal))
def set_channel(self, channel, toggle_position):
""" Enable / disable channels """
#Commands to set toggle to on position
if toggle_position == 1:
if channel is 1:
self.ser.write(b'!')
if channel is 2:
self.ser.write(b'@')
if channel is 3:
self.ser.write(b'#')
if channel is 4:
self.ser.write(b'$')
if channel is 5:
self.ser.write(b'%')
if channel is 6:
self.ser.write(b'^')
if channel is 7:
self.ser.write(b'&')
if channel is 8:
self.ser.write(b'*')
if channel is 9 and self.daisy:
self.ser.write(b'Q')
if channel is 10 and self.daisy:
self.ser.write(b'W')
if channel is 11 and self.daisy:
self.ser.write(b'E')
if channel is 12 and self.daisy:
self.ser.write(b'R')
if channel is 13 and self.daisy:
self.ser.write(b'T')
if channel is 14 and self.daisy:
self.ser.write(b'Y')
if channel is 15 and self.daisy:
self.ser.write(b'U')
if channel is 16 and self.daisy:
self.ser.write(b'I')
#Commands to set toggle to off position
elif toggle_position == 0:
if channel is 1:
self.ser.write(b'1')
if channel is 2:
self.ser.write(b'2')
if channel is 3:
self.ser.write(b'3')
if channel is 4:
self.ser.write(b'4')
if channel is 5:
self.ser.write(b'5')
if channel is 6:
self.ser.write(b'6')
if channel is 7:
self.ser.write(b'7')
if channel is 8:
self.ser.write(b'8')
if channel is 9 and self.daisy:
self.ser.write(b'q')
if channel is 10 and self.daisy:
self.ser.write(b'w')
if channel is 11 and self.daisy:
self.ser.write(b'e')
if channel is 12 and self.daisy:
self.ser.write(b'r')
if channel is 13 and self.daisy:
self.ser.write(b't')
if channel is 14 and self.daisy:
self.ser.write(b'y')
if channel is 15 and self.daisy:
self.ser.write(b'u')
if channel is 16 and self.daisy:
self.ser.write(b'i')
def find_port(self):
# Finds the serial port names
if sys.platform.startswith('win'):
ports = ['COM%s' % (i+1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
ports = glob.glob('/dev/ttyUSB*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.usbserial*')
else:
raise EnvironmentError('Error finding ports on your operating system')
openbci_port = ''
for port in ports:
try:
s = serial.Serial(port= port, baudrate = self.baudrate, timeout=self.timeout)
s.write(b'v')
openbci_serial = self.openbci_id(s)
s.close()
if openbci_serial:
openbci_port = port;
except (OSError, serial.SerialException):
pass
if openbci_port == '':
raise OSError('Cannot find OpenBCI port')
else:
return openbci_port
class OpenBCISample(object):
"""Object encapulsating a single sample from the OpenBCI board. NB: dummy imp for plugin compatiblity"""
def __init__(self, packet_id, channel_data, aux_data):
self.id = packet_id
self.channel_data = channel_data
self.aux_data = aux_data
self.imp_data = []
| 19,139 | 30.27451 | 185 | py |
OpenBCIPython | OpenBCIPython-master/plugin_interface.py |
"""
Extends Yapsy IPlugin interface to pass information about the board to plugins.
Fields of interest for plugins:
args: list of arguments passed to the plugins
sample_rate: actual sample rate of the board
eeg_channels: number of EEG
aux_channels: number of AUX channels
If needed, plugins that need to report an error can set self.is_activated to False during activate() call.
NB: because of how yapsy discovery system works, plugins must use the following syntax to inherit to use polymorphism (see http://yapsy.sourceforge.net/Advices.html):
import plugin_interface as plugintypes
class PluginExample(plugintypes.IPluginExtended):
...
"""
from yapsy.IPlugin import IPlugin
class IPluginExtended(IPlugin):
# args: passed by command line
def pre_activate(self, args, sample_rate=250, eeg_channels=8, aux_channels=3, imp_channels=0):
self.args = args
self.sample_rate = sample_rate
self.eeg_channels = eeg_channels
self.aux_channels = aux_channels
self.imp_channels = imp_channels
# by default we say that activation was okay -- inherited from IPlugin
self.is_activated = True
self.activate()
# tell outside world if init went good or bad
return self.is_activated
# inherited from IPlugin
def activate(self):
print "Plugin %s activated." % (self.__class__.__name__)
# inherited from IPlugin
def deactivate(self):
print "Plugin %s deactivated." % (self.__class__.__name__)
# plugins that require arguments should implement this method
def show_help(self):
print "I, %s, do not need any parameter." % (self.__class__.__name__)
| 1,639 | 33.166667 | 166 | py |
OpenBCIPython | OpenBCIPython-master/open_bci_v_ganglion.py | """
Core OpenBCI object for handling connections and samples from the board.
EXAMPLE USE:
def handle_sample(sample):
print(sample.channels)
board = OpenBCIBoard()
board.print_register_settings()
board.start(handle_sample)
NOTE: If daisy modules is enabled, the callback will occur every two samples, hence "packet_id" will only contain even numbers. As a side effect, the sampling rate will be divided by 2.
FIXME: at the moment we can just force daisy mode, do not check that the module is detected.
"""
import serial
import struct
import numpy as np
import time
import timeit
import atexit
import logging
import threading
import sys
import pdb
SAMPLE_RATE = 256.0 # Hz
START_BYTE = 0xA0 # start of data packet
END_BYTE = 0xC0 # end of data packet
MCP_GAIN = 1
AD8293G_GAIN = 80
scale_fac_uVolts_per_count = 1200000.0/(pow(2,23)*AD8293G_GAIN*MCP_GAIN*1.5)
scale_fac_accel_G_per_count = 0.002 /(pow(2,4)) #assume set to +/4G, so 2 mG
'''
#Commands for in SDK http://docs.openbci.com/software/01-Open BCI_SDK:
command_stop = "s";
command_startText = "x";
command_startBinary = "b";
command_startBinary_wAux = "n";
command_startBinary_4chan = "v";
command_activateFilters = "F";
command_deactivateFilters = "g";
command_deactivate_channel = {"1", "2", "3", "4", "5", "6", "7", "8"};
command_activate_channel = {"q", "w", "e", "r", "t", "y", "u", "i"};
command_activate_leadoffP_channel = {"!", "@", "#", "$", "%", "^", "&", "*"}; //shift + 1-8
command_deactivate_leadoffP_channel = {"Q", "W", "E", "R", "T", "Y", "U", "I"}; //letters (plus shift) right below 1-8
command_activate_leadoffN_channel = {"A", "S", "D", "F", "G", "H", "J", "K"}; //letters (plus shift) below the letters below 1-8
command_deactivate_leadoffN_channel = {"Z", "X", "C", "V", "B", "N", "M", "<"}; //letters (plus shift) below the letters below the letters below 1-8
command_biasAuto = "`";
command_biasFixed = "~";
'''
class OpenBCIBoard(object):
"""
Handle a connection to an OpenBCI board.
Args:
port: The port to connect to.
baud: The baud of the serial connection.
daisy: Enable or disable daisy module and 16 chans readings
"""
def __init__(self, port=None, baud=115200, filter_data=True,
scaled_output=True, daisy=False, log=True, timeout=None):
if not port:
port = find_port()
if not port:
raise OSError('open_bci_v4.py: Cannot find OpenBCI port')
print("open_bci_v4.py: Connecting to V4 at port %s" % (port))
self.ser = serial.Serial(port=port, baudrate=baud, timeout=timeout)
time.sleep(2)
# Initialize 32-bit board, doesn't affect 8bit board
self.ser.write('v')
print("open_bci_v4.py: Connecting to V4 at port %s" % (port))
# wait for device to be ready
time.sleep(1)
self.log = log # Chip moved this earlier to prevent bombing
self.log_packet_count = 0 # Chip moved this earlier to prevent bombing
self.print_incoming_text()
#print("open_bci_v4.py: setting many defaults...")
self.streaming = False
self.filtering_data = filter_data
self.scaling_output = scaled_output
self.eeg_channels_per_sample = 8 # number of EEG channels per sample *from the board*
self.aux_channels_per_sample = 3 # number of AUX channels per sample *from the board*
self.read_state = 0
self.daisy = daisy
self.last_odd_sample = OpenBCISample(-1, [], []) # used for daisy
self.attempt_reconnect = False
self.last_reconnect = 0
self.reconnect_freq = 5
self.packets_dropped = 0
# Disconnects from board when terminated
atexit.register(self.disconnect)
def getSampleRate(self):
if self.daisy:
return SAMPLE_RATE/2
else:
return SAMPLE_RATE
def getNbEEGChannels(self):
if self.daisy:
return self.eeg_channels_per_sample*2
else:
return self.eeg_channels_per_sample
def getNbAUXChannels(self):
return self.aux_channels_per_sample
def start_streaming(self, callback, lapse=-1):
"""
Start handling streaming data from the board. Call a provided callback
for every single sample that is processed (every two samples with daisy module).
Args:
callback: A callback function -- or a list of functions -- that will receive a single argument of the
OpenBCISample object captured.
"""
if not self.streaming:
self.ser.write('b')
self.streaming = True
start_time = timeit.default_timer()
# Enclose callback funtion in a list if it comes alone
if not isinstance(callback, list):
callback = [callback]
#Initialize check connection
self.check_connection()
while self.streaming:
# read current sample
sample = self._read_serial_binary()
# if a daisy module is attached, wait to concatenate two samples (main board + daisy) before passing it to callback
if self.daisy:
# odd sample: daisy sample, save for later
if ~sample.id % 2:
self.last_odd_sample = sample
# even sample: concatenate and send if last sample was the fist part, otherwise drop the packet
elif sample.id - 1 == self.last_odd_sample.id:
# the aux data will be the average between the two samples, as the channel samples themselves have been averaged by the board
avg_aux_data = list((np.array(sample.aux_data) + np.array(self.last_odd_sample.aux_data))/2)
whole_sample = OpenBCISample(sample.id, sample.channel_data + self.last_odd_sample.channel_data, avg_aux_data)
for call in callback:
call(whole_sample)
else:
for call in callback:
call(sample)
if(lapse > 0 and timeit.default_timer() - start_time > lapse):
self.stop();
if self.log:
self.log_packet_count = self.log_packet_count + 1;
"""
PARSER:
Parses incoming data packet into OpenBCISample.
Incoming Packet Structure:
Start Byte(1)|Sample ID(1)|Channel Data(24)|Aux Data(6)|End Byte(1)
0xA0|0-255|8, 3-byte signed ints|3 2-byte signed ints|0xC0
"""
def _read_serial_binary(self, max_bytes_to_skip=3000):
def read(n):
b = self.ser.read(n)
if not b:
self.warn('Device appears to be stalled. Quitting...')
sys.exit()
raise Exception('Device Stalled')
sys.exit()
return '\xFF'
else:
return b
for rep in xrange(max_bytes_to_skip):
#---------Start Byte & ID---------
if self.read_state == 0:
b = read(1)
if struct.unpack('B', b)[0] == START_BYTE:
if(rep != 0):
self.warn('Skipped %d bytes before start found' %(rep))
rep = 0;
packet_id = struct.unpack('B', read(1))[0] #packet id goes from 0-255
log_bytes_in = str(packet_id);
self.read_state = 1
#---------Channel Data---------
elif self.read_state == 1:
channel_data = []
for c in xrange(self.eeg_channels_per_sample):
#3 byte ints
literal_read = read(3)
unpacked = struct.unpack('3B', literal_read)
log_bytes_in = log_bytes_in + '|' + str(literal_read);
#3byte int in 2s compliment
if (unpacked[0] >= 127):
pre_fix = '\xFF'
else:
pre_fix = '\x00'
literal_read = pre_fix + literal_read;
#unpack little endian(>) signed integer(i) (makes unpacking platform independent)
myInt = struct.unpack('>i', literal_read)[0]
if self.scaling_output:
channel_data.append(myInt*scale_fac_uVolts_per_count)
else:
channel_data.append(myInt)
self.read_state = 2;
#---------Accelerometer Data---------
elif self.read_state == 2:
aux_data = []
for a in xrange(self.aux_channels_per_sample):
#short = h
acc = struct.unpack('>h', read(2))[0]
log_bytes_in = log_bytes_in + '|' + str(acc);
if self.scaling_output:
aux_data.append(acc*scale_fac_accel_G_per_count)
else:
aux_data.append(acc)
self.read_state = 3;
#---------End Byte---------
elif self.read_state == 3:
val = struct.unpack('B', read(1))[0]
log_bytes_in = log_bytes_in + '|' + str(val);
self.read_state = 0 #read next packet
if (val == END_BYTE):
sample = OpenBCISample(packet_id, channel_data, aux_data)
self.packets_dropped = 0
return sample
else:
self.warn("ID:<%d> <Unexpected END_BYTE found <%s> instead of <%s>"
%(packet_id, val, END_BYTE))
logging.debug(log_bytes_in);
self.packets_dropped = self.packets_dropped + 1
"""
Clean Up (atexit)
"""
def stop(self):
print("Stopping streaming...\nWait for buffer to flush...")
self.streaming = False
self.ser.write('s')
if self.log:
logging.warning('sent <s>: stopped streaming')
def disconnect(self):
if(self.streaming == True):
self.stop()
if (self.ser.isOpen()):
print("Closing Serial...")
self.ser.close()
logging.warning('serial closed')
"""
SETTINGS AND HELPERS
"""
def warn(self, text):
if self.log:
#log how many packets where sent succesfully in between warnings
if self.log_packet_count:
logging.info('Data packets received:'+str(self.log_packet_count))
self.log_packet_count = 0;
logging.warning(text)
print("Warning: %s" % text)
def print_incoming_text(self):
"""
When starting the connection, print all the debug data until
we get to a line with the end sequence '$$$'.
"""
line = ''
#Wait for device to send data
time.sleep(1)
if self.ser.inWaiting():
line = ''
c = ''
#Look for end sequence $$$
while '$$$' not in line:
c = self.ser.read()
line += c
print(line);
else:
self.warn("No Message")
def print_register_settings(self):
self.ser.write('?')
time.sleep(0.5)
print_incoming_text();
#DEBBUGING: Prints individual incoming bytes
def print_bytes_in(self):
if not self.streaming:
self.ser.write('b')
self.streaming = True
while self.streaming:
print(struct.unpack('B',self.ser.read())[0]);
'''Incoming Packet Structure:
Start Byte(1)|Sample ID(1)|Channel Data(24)|Aux Data(6)|End Byte(1)
0xA0|0-255|8, 3-byte signed ints|3 2-byte signed ints|0xC0'''
def print_packets_in(self):
if not self.streaming:
self.ser.write('b')
self.streaming = True
skipped_str = ''
while self.streaming:
b = struct.unpack('B', self.ser.read())[0];
if b == START_BYTE:
self.attempt_reconnect = False
if skipped_str:
logging.debug('SKIPPED\n' + skipped_str + '\nSKIPPED')
skipped_str = ''
packet_str = "%03d"%(b) + '|';
b = struct.unpack('B', self.ser.read())[0];
packet_str = packet_str + "%03d"%(b) + '|';
#data channels
for i in xrange(24-1):
b = struct.unpack('B', self.ser.read())[0];
packet_str = packet_str + '.' + "%03d"%(b);
b = struct.unpack('B', self.ser.read())[0];
packet_str = packet_str + '.' + "%03d"%(b) + '|';
#aux channels
for i in xrange(6-1):
b = struct.unpack('B', self.ser.read())[0];
packet_str = packet_str + '.' + "%03d"%(b);
b = struct.unpack('B', self.ser.read())[0];
packet_str = packet_str + '.' + "%03d"%(b) + '|';
#end byte
b = struct.unpack('B', self.ser.read())[0];
#Valid Packet
if b == END_BYTE:
packet_str = packet_str + '.' + "%03d"%(b) + '|VAL';
print(packet_str)
#logging.debug(packet_str)
#Invalid Packet
else:
packet_str = packet_str + '.' + "%03d"%(b) + '|INV';
#Reset
self.attempt_reconnect = True
else:
print(b)
if b == END_BYTE:
skipped_str = skipped_str + '|END|'
else:
skipped_str = skipped_str + "%03d"%(b) + '.'
if self.attempt_reconnect and (timeit.default_timer()-self.last_reconnect) > self.reconnect_freq:
self.last_reconnect = timeit.default_timer()
self.warn('Reconnecting')
self.reconnect()
def check_connection(self, interval = 2, max_packets_to_skip=10):
#check number of dropped packages and establish connection problem if too large
if self.packets_dropped > max_packets_to_skip:
#if error, attempt to reconect
self.reconnect()
# check again again in 2 seconds
threading.Timer(interval, self.check_connection).start()
def reconnect(self):
self.packets_dropped = 0
self.warn('Reconnecting')
self.stop()
time.sleep(0.5)
self.ser.write('v')
time.sleep(0.5)
self.ser.write('b')
time.sleep(0.5)
self.streaming = True
#self.attempt_reconnect = False
#Adds a filter at 60hz to cancel out ambient electrical noise
def enable_filters(self):
self.ser.write('f')
self.filtering_data = True;
def disable_filters(self):
self.ser.write('g')
self.filtering_data = False;
def test_signal(self, signal):
if signal == 0:
self.ser.write('0')
self.warn("Connecting all pins to ground")
elif signal == 1:
self.ser.write('p')
self.warn("Connecting all pins to Vcc")
elif signal == 2:
self.ser.write('-')
self.warn("Connecting pins to low frequency 1x amp signal")
elif signal == 3:
self.ser.write('=')
self.warn("Connecting pins to high frequency 1x amp signal")
elif signal == 4:
self.ser.write('[')
self.warn("Connecting pins to low frequency 2x amp signal")
elif signal == 5:
self.ser.write(']')
self.warn("Connecting pins to high frequency 2x amp signal")
else:
self.warn("%s is not a known test signal. Valid signals go from 0-5" %(signal))
def set_channel(self, channel, toggle_position):
#Commands to set toggle to on position
if toggle_position == 1:
if channel is 1:
self.ser.write('!')
if channel is 2:
self.ser.write('@')
if channel is 3:
self.ser.write('#')
if channel is 4:
self.ser.write('$')
if channel is 5:
self.ser.write('%')
if channel is 6:
self.ser.write('^')
if channel is 7:
self.ser.write('&')
if channel is 8:
self.ser.write('*')
if channel is 9 and self.daisy:
self.ser.write('Q')
if channel is 10 and self.daisy:
self.ser.write('W')
if channel is 11 and self.daisy:
self.ser.write('E')
if channel is 12 and self.daisy:
self.ser.write('R')
if channel is 13 and self.daisy:
self.ser.write('T')
if channel is 14 and self.daisy:
self.ser.write('Y')
if channel is 15 and self.daisy:
self.ser.write('U')
if channel is 16 and self.daisy:
self.ser.write('I')
#Commands to set toggle to off position
elif toggle_position == 0:
if channel is 1:
self.ser.write('1')
if channel is 2:
self.ser.write('2')
if channel is 3:
self.ser.write('3')
if channel is 4:
self.ser.write('4')
if channel is 5:
self.ser.write('5')
if channel is 6:
self.ser.write('6')
if channel is 7:
self.ser.write('7')
if channel is 8:
self.ser.write('8')
if channel is 9 and self.daisy:
self.ser.write('q')
if channel is 10 and self.daisy:
self.ser.write('w')
if channel is 11 and self.daisy:
self.ser.write('e')
if channel is 12 and self.daisy:
self.ser.write('r')
if channel is 13 and self.daisy:
self.ser.write('t')
if channel is 14 and self.daisy:
self.ser.write('y')
if channel is 15 and self.daisy:
self.ser.write('u')
if channel is 16 and self.daisy:
self.ser.write('i')
class OpenBCISample(object):
"""Object encapulsating a single sample from the OpenBCI board."""
def __init__(self, packet_id, channel_data, aux_data):
self.id = packet_id;
self.channel_data = channel_data;
self.aux_data = aux_data;
| 16,490 | 29.652416 | 185 | py |
OpenBCIPython | OpenBCIPython-master/manager.py |
class FeatureManager:
def __init__(self):
self.featureList = {}
def __enter__(self):
print "Initializing mfcc calculation..."
def __exit__(self, exc_type, exc_val, exc_tb):
print "Done with calculations..."
def addRegisteredFeatures(self, feature, featureId):
self.featureList[featureId] = feature
def getRegisteredFeatures(self):
return self.featureList
def getRegisteredFeature(self, feature_id):
return self.featureList[feature_id]
| 514 | 23.52381 | 56 | py |
OpenBCIPython | OpenBCIPython-master/loader.py | import os
import matplotlib
from utils import data_types_utils
from utils.dataset_writer_utils import read_and_decode, create_sample_from_image
from utils.utils import get_label
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.interactive(False)
import numpy as np
import tensorflow as tf
import time
import librosa
import librosa.display
import numpy
import json
from PIL import Image
from preprocessing.processor import Clip
class DataLoader:
def __init__(self, project_dir, dataset_dir):
meta_info_file = project_dir + "/config/config.json"
with open(meta_info_file) as data_file:
meta_info = json.load(data_file)
self.conf = meta_info
# TODO separate out common stuff
meta_info = meta_info["processing"]["train"]
self.batch_process_threads_num = int(meta_info["batch_process_threads_num"])
self.project_dir = project_dir
self.dataset_dir = dataset_dir
self.num_epochs = int(meta_info["num_epochs"])
self.batch_size = int(meta_info["batch_size"])
self.train_dir = project_dir + str(meta_info["dir"])
self.train_file = str(meta_info["data_set_name"])
self.tfrecords_filename = project_dir + str(meta_info["tfrecords_filename"])
self.number_of_class = int(meta_info["number_of_class"])
self.generated_image_width = int(meta_info["generated_image_width"])
self.generated_image_height = int(meta_info["generated_image_height"])
self.feature_vector_size = int(self.conf["feature_vector_size"])
self.num_channels = int(self.conf["number_of_channels"])
self.conf["processing"]["train"]["number_of_channels"] = self.num_channels
self.generated_image_dir = project_dir + str(meta_info["generated_image_dir"])
self.sampling_rate = self.conf["sampling_rate"]
# TODO add validate and test initialization
def get_train_config(self):
return self.conf["processing"]["train"]
# def save_plot_clip_overview(self, clip, i):
# with clip.audio as audio:
# figure = plt.figure(figsize=(self.generated_image_width, self.generated_image_height), dpi=1)
# axis = figure.add_subplot(111)
# plt.axis('off')
# plt.tick_params(axis='both', left='off', top='off', right='off', bottom='off', labelleft='off',
# labeltop='off',
# labelright='off', labelbottom='off')
# result = np.array(np.array(clip.feature_list['fft'].get_logamplitude()[0:1]))
# librosa.display.specshow(result, sr=clip.RATE, x_axis='time', y_axis='mel', cmap='RdBu_r')
# extent = axis.get_window_extent().transformed(figure.dpi_scale_trans.inverted())
# clip.filename = self.generated_image_dir + clip.filename + str(i) + str("_.jpg")
# plt.savefig(clip.filename, format='jpg', bbox_inches=extent, pad_inches=0, dpi=1)
# plt.close()
# return clip.filename
# def save_clip_overview(self, categories=5, clips_shown=1, clips=None):
# for c in range(0, categories):
# for i in range(0, clips_shown):
# self.save_plot_clip_overview(clips[c][i], i)
# def create_one_big_file(self, file_type):
# writer = tf.python_io.TFRecordWriter(self.tfrecords_filename)
# for directory in sorted(os.listdir('{0}/'.format(self.dataset_dir))):
# store_location = self.generated_image_dir + directory
# # todo make directory if not created
# directory = '{0}/{1}'.format(self.dataset_dir, directory)
# if os.path.isdir(directory) and os.path.basename(directory)[0:3].isdigit():
# print('Parsing ' + directory)
# for clip in sorted(os.listdir(directory)):
# if clip[-3:] == file_type:
# clip_label, clip_data, rows, _ = self.extracted_sample(directory, clip, file_type)
# for j in range(0, rows - 2):
# clip_filename = self.draw_sample_plot_and_save(clip_data, store_location, clip, j)
# sample = create_sample_from_image(clip_filename, clip_label, self.get_train_config())
# writer.write(sample.SerializeToString())
# writer.close()
# return
# print('All {0} recordings loaded.'.format(self.train_file))
def extracted_sample(self, directory, clip, file_type):
print ('{0}/{1}'.format(directory, clip))
clip_category = ('{0}/{1}'.format(directory, clip), directory.split("/0")[1].
split("-")[0].strip())[1]
clip_data = Clip('{0}/{1}'.format(directory, clip), file_type). \
get_feature_vector()
rows = clip_data.shape[0]
cols = clip_data.shape[1]
clip_label = get_label(int(clip_category), self.number_of_class).tostring()
return clip_label, clip_data, rows, cols
# def draw_sample_plot_and_save(self, raw_data_clip, store_location, clip, index):
# figure = plt.figure(figsize=(
# np.ceil(self.generated_image_width + self.generated_image_width * 0.2),
# np.ceil(self.generated_image_height + self.generated_image_height * 0.2)), dpi=1)
# axis = figure.add_subplot(111)
# plt.axis('off')
# plt.tick_params(axis='both', left='off', top='off', right='off', bottom='off',
# labelleft='off',
# labeltop='off',
# labelright='off', labelbottom='off')
# result = np.array(np.array(raw_data_clip[index:index + 1]))
# librosa.display.specshow(result, sr=self.sampling_rate, x_axis='time', y_axis='mel', cmap='RdBu_r')
# extent = axis.get_window_extent().transformed(figure.dpi_scale_trans.inverted())
# clip_filename = "%s%s%s%s" % (store_location, clip, str(index), "_.jpg")
# plt.savefig(clip_filename, format='jpg', bbox_inches=extent, pad_inches=0)
# plt.close(figure)
# return clip_filename
def inputs(self):
with tf.name_scope('input'):
filename_queue = tf.train.string_input_producer([self.tfrecords_filename],
num_epochs=self.num_epochs)
image, label = read_and_decode(filename_queue, self.conf)
images, sparse_labels = tf.train.shuffle_batch(
[image, label], batch_size=self.batch_size, num_threads=self.batch_process_threads_num,
capacity=1000 + 3 * self.batch_size,
min_after_dequeue=100)
return images, sparse_labels
def run_training(self):
with tf.Graph().as_default():
image, label = self.inputs()
with tf.Session() as sess:
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
try:
step = 0
while not coord.should_stop():
start_time = time.time()
while not coord.should_stop():
# Run training steps or whatever
exatmple, l = sess.run([image, label])
print exatmple
except tf.errors.OutOfRangeError:
print('Done training for %d epochs, %d steps.' % (self.num_epochs, self.batch_size))
finally:
coord.request_stop()
coord.join(threads)
sess.close()
# parser = argparse.ArgumentParser(description='Data set location and project location')
# parser.add_argument('-dataset_dir', nargs=2)
# parser.add_argument('-project_dir', nargs=1)
#
# opts = parser.parse_args()
#
# project_dir = opts.project_dir
# dataset_dir = opts.dataset_dir
# project_dir = "/home/runge/openbci/git/OpenBCI_Python"
# dataset_dir = "/home/runge/openbci/git/OpenBCI_Python/build/dataset"
#
# loader = DataLoader(project_dir, dataset_dir)
# # # # clips_10 = loader.load_dataset_from_ogg('/home/runge/projects/sound_detector/TRAIN-10')
# # loader.create_one_big_file("ogg")
#
# image, label = loader.inputs()
# loader.run_training()
| 8,599 | 47.314607 | 115 | py |
OpenBCIPython | OpenBCIPython-master/dep_analyzer.py | import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sb
from loader import DataLoader
from preprocessing import processor as api
def add_subplot_axes(ax, position):
box = ax.get_position()
position_display = ax.transAxes.transform(position[0:2])
position_fig = plt.gcf().transFigure.inverted().transform(position_display)
x = position_fig[0]
y = position_fig[1]
return plt.gcf().add_axes([x, y, box.width * position[2], box.height * position[3]], axisbg='w')
def plot_clip_overview(clip, ax):
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax_waveform = add_subplot_axes(ax, [0.0, 0.7, 1.0, 0.3])
ax_spectrogram = add_subplot_axes(ax, [0.0, 0.0, 1.0, 0.7])
with clip.audio as audio:
ax_waveform.plot(np.arange(0, len(audio.raw)) / float(api.Clip.RATE), audio.raw)
ax_waveform.get_xaxis().set_visible(False)
ax_waveform.get_yaxis().set_visible(False)
ax_waveform.set_title('{0} \n {1}'.format(clip.category, clip.filename), {'fontsize': 8}, y=1.03)
result = np.array(np.array(clip.feature_list['fft'].get_logamplitude()[0:1]))
# result = np.array(clip.feature_list['mfcc'].get_mel_spectrogram()[0:2])
librosa.display.specshow(result, sr=api.Clip.RATE, x_axis='time', y_axis='mel', cmap='RdBu_r')
ax_spectrogram.get_xaxis().set_visible(False)
ax_spectrogram.get_yaxis().set_visible(False)
def plot_single_clip(clip):
col_names_mfcc = list('MFCC_{}'.format(i) for i in range(np.shape(clip.feature_list["mfcc"].get_mfcc())[1]))
col_names_zcr = list('ZCR_{}'.format(i) for i in range(1))
MFCC = pd.DataFrame(clip.feature_list["mfcc"].get_mfcc()[:, :], columns=col_names_mfcc)
ZCR = pd.DataFrame(clip.feature_list["zcr"].get_zcr()[:], columns=col_names_zcr)
f = plt.figure(figsize=(10, 6))
ax = f.add_axes([0.0, 0.0, 1.0, 1.0])
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.set_frame_on(False)
ax_mfcc = add_subplot_axes(ax, [0.0, 0.0, 1.0, 0.75])
ax_mfcc.set_xlim(-400, 400)
ax_zcr = add_subplot_axes(ax, [0.0, 0.85, 1.0, 0.05])
ax_zcr.set_xlim(0.0, 1.0)
plt.title('Feature distribution across frames of a single clip ({0} : {1})'.format(clip.category, clip.filename), y=1.5)
sb.boxplot(data=MFCC, orient='h', order=list(reversed(MFCC.columns)), ax=ax_mfcc)
sb.boxplot(data=ZCR, orient='h', ax=ax_zcr)
plt.show()
def plot_single_feature_one_clip(feature, title, ax):
sb.despine()
ax.set_title(title, y=1.10)
sb.distplot(feature, bins=20, hist=True, rug=False,
hist_kws={"histtype": "stepfilled", "alpha": 0.5},
kde_kws={"shade": False},
color=sb.color_palette("muted", 4)[2], ax=ax)
def plot_single_feature_all_clips(feature, title, ax):
sb.despine()
ax.set_title(title, y=1.03)
sb.boxplot(feature, vert=False, orient='h', order=list(reversed(feature.columns)), ax=ax)
def plot_single_feature_aggregate(feature, title, ax):
sb.despine()
ax.set_title(title, y=1.03)
sb.distplot(feature, bins=20, hist=True, rug=False,
hist_kws={"histtype": "stepfilled", "alpha": 0.5},
kde_kws={"shade": False},
color=sb.color_palette("muted", 4)[1], ax=ax)
def generate_feature_summary(dataset, category, clip, coefficient):
title = "{0} : {1}".format(dataset[category][clip].category, dataset[category][clip].filename)
MFCC = pd.DataFrame()
aggregate = []
for i in range(0, len(dataset[category])):
MFCC[i] = dataset[category][i].feature_list["mfcc"].get_mfcc()[:, coefficient]
aggregate = np.concatenate([aggregate, dataset[category][i].feature_list["mfcc"].get_mfcc()[:, coefficient]])
f = plt.figure(figsize=(14, 12))
f.subplots_adjust(hspace=0.6, wspace=0.3)
ax1 = plt.subplot2grid((3, 3), (0, 0))
ax2 = plt.subplot2grid((3, 3), (1, 0))
ax3 = plt.subplot2grid((3, 3), (0, 1), rowspan=2)
ax4 = plt.subplot2grid((3, 3), (0, 2), rowspan=2)
ax1.set_xlim(0.0, 0.5)
ax2.set_xlim(-100, 250)
ax4.set_xlim(-100, 250)
plot_single_feature_one_clip(dataset[category][clip].feature_list["zcr"].get_zcr(), 'ZCR distribution across frames'
'\n{0}'.format(title), ax1)
plot_single_feature_one_clip(dataset[category][clip].feature_list["mfcc"].get_mfcc()[:, coefficient],
'MFCC_{0} distribution across frames\n{1}'.format(coefficient, title), ax2)
plot_single_feature_all_clips(MFCC, 'Differences in MFCC_{0} distribution\nbetween clips of {1}'.format(coefficient
,dataset[ category][ clip].category), ax3)
plot_single_feature_aggregate(aggregate,'Aggregate MFCC_{0} distribution\n(bag-of-frames across all clips\nof {1})'.
format(coefficient, dataset[category][clip].category), ax4)
plt.show()
def view_clip_overview(categories = 5, clips_shown = 1):
f, axes = plt.subplots(categories, clips_shown, figsize=(clips_shown * 2, categories * 2), sharex=True, sharey=True)
f.subplots_adjust(hspace=0.35)
for c in range(0, categories):
for i in range(0, clips_shown):
plot_clip_overview(clips_10[c][i], axes[c])
plt.show()
loader = DataLoader('/home/runge/projects/sound_detector/TRAIN-10', "audio_clips_segmentation.tfrecords", 512, 1,1, 2)
clips_10 = loader.load_dataset_from_ogg('/home/runge/projects/sound_detector/TRAIN-10')
# plot_single_clip(clips_10[1][0])
# generate_feature_summary(clips_10, 1, 0, 1)
#view_clip_overview(10,1)
save_clip_overview(10, 1) | 5,842 | 43.603053 | 124 | py |
OpenBCIPython | OpenBCIPython-master/features/energy.py | import librosa
import numpy as np
from utils import feature_extractor as utils
class Energy:
def __init__(self, audio, dependencies=None, frame=2048, sampling_rate=44000):
self.audio = audio
self.dependencies = dependencies
self.frame = frame
self.sampling_rate = sampling_rate
self.frames = int(np.ceil(len(self.audio.data) / 1000.0 * self.sampling_rate / self.frame))
def __enter__(self):
print "Initializing energy calculation..."
def __exit__(self, exc_type, exc_val, exc_tb):
print "Done with calculations..."
def compute_energy(self, frame=2048, sampleing_rate=44000):
self.energy = []
for i in range(0, self.frames):
current_frame = utils._get_frame(self.audio, i, frame)
self.energy.append(np.sum(current_frame ** 2) / np.float64(len(current_frame)))
self.energy = np.asarray(self.energy)
def compute_energy_entropy(self):
numOfShortBlocks = 10
eps = 0.00000001
self.energy_entropy = []
for i in range(0, self.frames):
current_frame = utils._get_frame(self.audio, i, self.frame)
Eol = np.sum(current_frame ** 2) # total frame energy
L = len(current_frame)
subWinLength = int(np.floor(L / numOfShortBlocks))
if L != subWinLength * numOfShortBlocks:
current_frame = current_frame[0:subWinLength * numOfShortBlocks]
# subWindows is of size [numOfShortBlocks x L]
subWindows = current_frame.reshape(subWinLength, numOfShortBlocks, order='F').copy()
# Compute normalized sub-frame energies:
s = np.sum(subWindows ** 2, axis=0) / (Eol + eps)
# Compute entropy of the normalized sub-frame energies:
entropy = -np.sum(s * np.log2(s + eps))
self.energy_entropy.append(entropy)
self.energy_entropy = np.asarray(self.energy_entropy)
def get_energy(self):
return self.energy
def get_energy_entropy(self):
return self.energy_entropy
| 2,085 | 39.115385 | 99 | py |
OpenBCIPython | OpenBCIPython-master/features/zcr.py | import librosa
import numpy as np
from utils import feature_extractor as utils
class ZCR:
def __init__(self, audio, dependencies=None, frame=2048, sampling_rate=44000):
self.sampling_rate = sampling_rate
self.frame = frame
self.audio = audio
self.dependencies = dependencies
self.frames = int(np.ceil(len(self.audio.data) / 1000.0 * self.sampling_rate / self.frame))
def __enter__(self):
print "Initializing zcr calculation..."
def __exit__(self, exc_type, exc_val, exc_tb):
print "Done with calculations..."
def compute_zcr(self):
self.zcr = []
for i in range(0, self.frames):
current_frame = utils._get_frame(self.audio, i, self.frame)
self.zcr.append(np.mean(0.5 * np.abs(np.diff(np.sign(current_frame)))))
self.zcr = np.asarray(self.zcr)
def get_zcr(self):
return self.zcr
| 915 | 30.586207 | 99 | py |
OpenBCIPython | OpenBCIPython-master/features/fft.py | import librosa
import numpy as np
from utils import feature_extractor as utils
class FFT:
def __init__(self, audio, config):
self.audio = audio
self.dependencies = config["fft"]["dependencies"]
self.frame_size = int(config["frame_size"])
self.sampling_rate = int(config["sampling_rate"])
self.number_of_bins = int(config["fft"]["number_of_bins"])
self.is_raw_data = config["is_raw_data"]
self.frames = int(np.ceil(len(self.audio.data) /self.frame_size))
def __enter__(self):
print "Initializing fft calculation..."
def __exit__(self, exc_type, exc_val, exc_tb):
print "Done with calculations..."
def compute_fft(self):
self.fft = []
self.logamplitude = []
for i in range(0, self.frames):
current_frame = utils._get_frame_array(self.audio, i, self.frame_size)
ps = np.abs(np.fft.fft(current_frame, self.number_of_bins))
self.fft.append(ps)
self.logamplitude.append(librosa.logamplitude(ps ** 2))
self.fft = np.asarray(self.fft)
self.logamplitude = np.asarray(self.logamplitude)
def get_fft_spectrogram(self):
return self.fft
def get_logamplitude(self):
return self.logamplitude
| 1,285 | 31.974359 | 82 | py |
OpenBCIPython | OpenBCIPython-master/features/mean.py | import librosa
import numpy as np
from utils import feature_extractor as utils
class Mean:
def __init__(self, audio, dependencies=None, frame=2048, sampling_rate=44000):
self.audio = audio
self.dependencies = dependencies
self.frame = frame
self.sampling_rate = sampling_rate
self.frames = int(np.ceil(len(self.audio.data) / 1000.0 * self.sampling_rate / self.frame))
def __enter__(self):
print "Initializing mean calculation..."
def __exit__(self, exc_type, exc_val, exc_tb):
print "Done with calculations..."
def compute_mean(self):
self.mean = []
for i in range(0, self.frames):
current_frame = utils._get_frame(self.audio, i, self.frame)
sum = np.sum(current_frame ** 2) # total frame energy
frame_length = len(current_frame)
self.mean.append(sum/frame_length)
self.mean = np.asarray(self.mean)
def compute_geometric_mean(self):
self.geometric_mean = []
for i in range(0, self.frames):
current_frame = utils._get_frame(self.audio, i, self.frame)
sum = np.sum(current_frame ** 2) # total frame energy
frame_length = len(current_frame)
self.geometric_mean.append(sum/frame_length)
self.geometric_mean = np.asarray(self.geometric_mean) | 1,364 | 36.916667 | 99 | py |
OpenBCIPython | OpenBCIPython-master/features/mfcc.py | import librosa
import numpy as np
from utils import feature_extractor as utils
import matplotlib.pyplot as plt
class MFCC:
def __init__(self, audio, dependencies=None, number_of_mfcc=13, frame=2048, sampling_rate=44000):
self.audio = audio
self.dependencies = dependencies
self.frame = frame
self.sampling_rate = sampling_rate
self.number_of_mfcc = number_of_mfcc
self.frames = int(np.ceil(len(self.audio.data) / 1000.0 * self.sampling_rate / self.frame))
def __enter__(self):
print "Initializing mfcc calculation..."
def __exit__(self, exc_type, exc_val, exc_tb):
print "Done with calculations..."
def compute_mfcc(self):
self.melspectrogram = []
self.logamplitude = []
self.mfcc = []
self.mfcc_delta = []
self.mfcc_delta2 = []
for i in range(0, self.frames-1):
current_frame = utils._get_frame(self.audio, i, self.frame)
# MFCC computation with default settings (2048 FFT window length)
self.melspectrogram.append(librosa.feature.melspectrogram(current_frame, sr=self.sampling_rate,
hop_length=self.frame)[0:,][0:,1])
self.logamplitude.append(librosa.logamplitude(self.melspectrogram[i]))
self.mfcc.append(librosa.feature.mfcc(S=self.logamplitude[i], n_mfcc=self.number_of_mfcc).transpose())
# plt.figure(figsize=(10, 4))
# librosa.display.specshow(self.mfcc[i], x_axis='time')
# plt.colorbar()
# plt.title('MFCC')
# plt.tight_layout()
self.mfcc_delta.append(librosa.feature.delta(self.mfcc[i]))
self.mfcc_delta2.append(librosa.feature.delta(self.mfcc[i], order=2))
self.logamplitude[i]=(self.logamplitude[i].T.flatten()[:, np.newaxis].T)
self.melspectrogram = np.asarray(self.melspectrogram)
self.logamplitude = np.asarray(self.logamplitude)
self.mfcc = np.asarray(self.mfcc)
self.mfcc_delta = np.asarray(self.mfcc_delta)
self.mfcc_delta2 = np.asarray(self.mfcc_delta2)
def get_mel_spectrogram(self):
return self.melspectrogram
def get_log_amplitude(self):
return self.logamplitude
def get_mfcc(self):
return self.mfcc
def get_delta_mfcc(self):
return self.mfcc_delta
def get_delta2_mfcc(self):
return self.mfcc_delta2
| 2,483 | 37.215385 | 114 | py |
OpenBCIPython | OpenBCIPython-master/features/generic_type.py | import librosa
import numpy as np
from utils import feature_extractor as utils
class EMG:
def __init__(self, audio, config):
self.audio = audio
self.dependencies = config["emg"]["dependencies"]
self.frame_size = int(config["frame_size"])
self.sampling_rate = int(config["sampling_rate"])
self.number_of_bins = int(config["emg"]["number_of_bins"])
self.is_raw_data = config["is_raw_data"]
self.time_lag = int(config["emg"]["time_lag"])
self.embedded_dimension = int(config["emg"]["embedded_dimension"])
self.boundary_frequencies = list(config["emg"]["boundary_frequencies"])
self.hfd_parameter = int(config["emg"]["hfd_parameter"])
self.r = int(config["emg"]["r"])
self.frames = int(np.ceil(len(self.audio.data) / self.frame_size))
def __enter__(self):
print ("Initializing emg calculation...")
def __exit__(self, exc_type, exc_val, exc_tb):
print ("Done with calculations...")
def get_current_frame(self, index):
return utils._get_frame_array(self.audio, index, self.frame_size)
def compute_hurst(self):
self.hurst = []
for k in range(0, self.frames):
current_frame = self.get_current_frame(k)
N = current_frame.size
T = np.arange(1, N + 1)
Y = np.cumsum(current_frame)
Ave_T = Y / T
S_T = np.zeros(N)
R_T = np.zeros(N)
for i in range(N):
S_T[i] = np.std(current_frame[:i + 1])
X_T = Y - T * Ave_T[i]
R_T[i] = np.ptp(X_T[:i + 1])
R_S = R_T / S_T
R_S = np.log(R_S)[1:]
n = np.log(T)[1:]
A = np.column_stack((n, np.ones(n.size)))
[m, c] = np.linalg.lstsq(A, R_S)[0]
self.hurst.append(m)
self.hurst = np.asarray(self.hurst)
def get_hurst(self):
return self.hurst
def compute_embed_seq(self):
self.embed_seq = []
for k in range(0, self.frames):
current_frame = self.get_current_frame(k)
shape = (current_frame.size - self.time_lag * (self.embedded_dimension - 1), self.embedded_dimension)
strides = (current_frame.itemsize, self.time_lag * current_frame.itemsize)
m = np.lib.stride_tricks.as_strided(current_frame, shape=shape, strides=strides)
self.embed_seq.append(m)
self.embed_seq = np.asarray(self.embed_seq)
def get_embed_seq(self):
return self.embed_seq
def compute_bin_power(self):
self.Power_Ratio = []
self.Power = []
for k in range(0, self.frames):
current_frame = self.get_current_frame(k)
C = np.fft.fft(current_frame)
C = abs(C)
Power = np.zeros(len(self.boundary_frequencies) - 1)
for Freq_Index in range(0, len(self.boundary_frequencies) - 1):
Freq = float(self.boundary_frequencies[Freq_Index])
Next_Freq = float(self.boundary_frequencies[Freq_Index + 1])
Power[Freq_Index] = sum(
C[int(np.floor(Freq / self.sampling_rate * len(current_frame))):
int(np.floor(Next_Freq / self.sampling_rate * len(current_frame)))])
self.Power.append(Power)
self.Power_Ratio.append(Power / sum(Power))
self.Power = np.asarray(self.Power)
self.Power_Ratio = np.asarray(self.Power_Ratio)
def get_bin_power(self):
return self.Power
def get_bin_power_ratio(self):
return self.Power_Ratio
def compute_pfd(self, D=None):
self.pfd = []
for k in range(0, self.frames):
current_frame = self.get_current_frame(k)
if D is None:
D = np.diff(current_frame)
D = D.tolist()
N_delta = 0 # number of sign changes in derivative of the signal
for i in range(1, len(D)):
if D[i] * D[i - 1] < 0:
N_delta += 1
n = len(current_frame)
m = np.log10(n) / (np.log10(n) + np.log10(n / n + 0.4 * N_delta))
self.pfd.append(m)
if self.is_raw_data:
self.pfd = np.asarray(self.pfd)
else:
self.pfd = np.asarray(self.pfd)[0]
def get_pfd(self):
return self.pfd
def compute_hfd(self):
self.hfd = []
for v in range(0, self.frames):
current_frame = self.get_current_frame(v)
L = []
x = []
N = len(current_frame)
for k in range(1, self.hfd_parameter):
Lk = []
for m in range(0, k):
Lmk = 0
for i in range(1, int(np.floor((N - m) / k))):
Lmk += abs(current_frame[m + i * k] - current_frame[m + i * k - k])
Lmk = Lmk * (N - 1) / np.floor((N - m) / float(k)) / k
Lk.append(Lmk)
L.append(np.log(np.mean(Lk)))
x.append([np.log(float(1) / k), 1])
(p, r1, r2, s) = np.linalg.lstsq(x, L)
self.hfd.append(p[0])
if self.is_raw_data:
self.hfd = np.asarray(self.hfd)
else:
self.hfd = np.asarray(self.hfd)[0]
def get_hfd(self):
return self.hfd
def compute_hjorth(self, D=None):
self.hjorth = []
for k in range(0, self.frames):
current_frame = self.get_current_frame(k)
if D is None:
D = np.diff(current_frame)
np.concatenate(([current_frame[0]], D))
D = np.array(D)
n = len(current_frame)
M2 = float(sum(D ** 2)) / n
TP = sum(np.array(current_frame) ** 2)
M4 = 0
for i in range(1, len(D)):
M4 += (D[i] - D[i - 1]) ** 2
M4 = M4 / n
m = np.sqrt(M2 / TP), np.sqrt(float(M4) * TP / M2 / M2)
self.hjorth.append(m)
if self.is_raw_data:
self.hjorth = np.asarray(self.hjorth)
else:
self.hjorth = np.asarray(self.hjorth)[0]
def get_hjorth(self):
return self.hjorth
def compute_spectral_entropy(self):
self.spectral_entropy = []
for k in range(0, self.frames):
Power, Power_Ratio = self.get_bin_power()[k], self.get_bin_power_ratio()[k]
Spectral_Entropy = 0
for i in range(0, len(Power_Ratio) - 1):
Spectral_Entropy += Power_Ratio[i] * np.log(Power_Ratio[i])
Spectral_Entropy /= np.log(len(Power_Ratio))
m = -1 * Spectral_Entropy
self.spectral_entropy.append(m)
self.spectral_entropy = np.asarray(self.spectral_entropy)
def get_spectral_entropy(self):
return self.spectral_entropy
def compute_svd_entropy(self, W=None):
self.svd_entropy = []
for k in range(0, self.frames):
if W is None:
Y = self.get_embed_seq()[k]
W = np.linalg.svd(Y, compute_uv=0)
W /= sum(W) # normalize singular values
m = -1 * sum(W * np.log(W))
self.svd_entropy.append(m)
if self.is_raw_data:
self.svd_entropy = np.asarray(self.svd_entropy)
else:
self.svd_entropy = np.asarray(self.svd_entropy)[0]
def get_svd_entropy(self):
return self.svd_entropy
def compute_fisher_info(self, W=None):
self.fisher_info = []
for k in range(0, self.frames):
if W is None:
Y = self.get_embed_seq()
W = np.linalg.svd(Y, compute_uv=0)
W /= sum(W) # normalize singular values
m = -1 * sum(W * np.log(W))
self.fisher_info.append(m)
if self.is_raw_data:
self.fisher_info = np.asarray(self.fisher_info)
else:
self.fisher_info = np.asarray(self.fisher_info)[0]
def get_fisher_info(self):
return self.fisher_info
def compute_ap_entropy(self):
self.ap_entropy = []
for k in range(0, self.frames):
current_frame = self.get_current_frame(k)
N = len(current_frame)
Em = self.get_embed_seq()[k]
A = np.tile(Em, (len(Em), 1, 1))
B = np.transpose(A, [1, 0, 2])
D = np.abs(A - B) # D[i,j,k] = |Em[i][k] - Em[j][k]|
InRange = np.max(D, axis=2) <= self.r
Cm = InRange.mean(axis=0)
Dp = np.abs(np.tile(current_frame[self.embedded_dimension:],
(N - self.embedded_dimension, 1)) - np.tile(current_frame[self.embedded_dimension:],
(N - self.embedded_dimension, 1)).T)
Cmp = np.logical_and(Dp <= self.r, InRange[:-1, :-1]).mean(axis=0)
Phi_m, Phi_mp = np.sum(np.log(Cm)), np.sum(np.log(Cmp))
m = (Phi_m - Phi_mp) / (N - self.embedded_dimension)
self.ap_entropy.append(m)
self.ap_entropy = np.asarray(self.ap_entropy)
def get_ap_entropy(self):
return self.ap_entropy
def compute_samp_entropy(self):
self.samp_entropy = []
for k in range(0, self.frames):
current_frame = self.get_current_frame(k)
N = len(current_frame)
Em = self.get_embed_seq()[k]
A = np.tile(Em, (len(Em), 1, 1))
B = np.transpose(A, [1, 0, 2])
D = np.abs(A - B) # D[i,j,k] = |Em[i][k] - Em[j][k]|
InRange = np.max(D, axis=2) <= self.r
np.fill_diagonal(InRange, 0) # Don't count self-matches
Cm = InRange.sum(axis=0) # Probability that random M-sequences are in range
Dp = np.abs(np.tile(current_frame[self.embedded_dimension:], (N - self.embedded_dimension, 1))
- np.tile(current_frame[self.embedded_dimension:], (N - self.embedded_dimension, 1)).T)
Cmp = np.logical_and(Dp <= self.r, InRange[:-1, :-1]).sum(axis=0)
# Uncomment below for old (miscounted) version
# InRange[np.triu_indices(len(InRange))] = 0
# InRange = InRange[:-1,:-2]
# Cm = InRange.sum(axis=0) # Probability that random M-sequences are in range
# Dp = np.abs(np.tile(X[M:], (N - M, 1)) - np.tile(X[M:], (N - M, 1)).T)
# Dp = Dp[:,:-1]
# Cmp = np.logical_and(Dp <= R, InRange).sum(axis=0)
# Avoid taking log(0)
Samp_En = np.log(np.sum(Cm + 1e-100) / np.sum(Cmp + 1e-100))
self.samp_entropy.append(Samp_En)
self.samp_entropy = np.asarray(self.samp_entropy)
def get_samp_entropy(self):
return self.samp_entropy
def compute_dfa(self, Ave=None, L=None):
self.dfa = []
for k in range(0, self.frames):
current_frame = self.get_current_frame(k)
if Ave is None:
Ave = np.mean(current_frame)
Y = np.cumsum(current_frame)
Y -= Ave
if L is None:
L = np.floor(len(current_frame) * 1 /
(2 ** np.array(list(range(4, int(np.log2(len(current_frame))) - 4)))))
F = np.zeros(len(L))
for i in range(0, len(L)):
n = int(L[i]) # for each box length L[i]
if n == 0:
print("time series is too short while the box length is too big")
print("abort")
exit()
for j in range(0, len(current_frame), n): # for each box
if j + n < len(current_frame):
c = list(range(j, j + n))
# coordinates of time in the box
c = np.vstack([c, np.ones(n)]).T
# the value of data in the box
y = Y[j:j + n]
# add residue in this box
F[i] += np.linalg.lstsq(c, y)[1]
F[i] /= ((len(current_frame) / n) * n)
F = np.sqrt(F)
Alpha = np.linalg.lstsq(np.vstack([np.log(L), np.ones(len(L))]).T, np.log(F))[0][0]
self.dfa.append(Alpha)
if self.is_raw_data:
self.dfa = np.asarray(self.dfa)
else:
self.dfa = np.asarray(self.dfa)[0]
def get_dfa(self):
return self.dfa
def compute_permutation_entropy(self):
self.permutation_entropy = []
for k in range(0, self.frames):
PeSeq = []
Em = self.get_embed_seq()[k]
for i in range(0, len(Em)):
r = []
z = []
for j in range(0, len(Em[i])):
z.append(Em[i][j])
for j in range(0, len(Em[i])):
z.sort()
r.append(z.index(Em[i][j]))
z[z.index(Em[i][j])] = -1
PeSeq.append(r)
RankMat = []
while len(PeSeq) > 0:
RankMat.append(PeSeq.count(PeSeq[0]))
x = PeSeq[0]
for j in range(0, PeSeq.count(PeSeq[0])):
PeSeq.pop(PeSeq.index(x))
RankMat = np.array(RankMat)
RankMat = np.true_divide(RankMat, RankMat.sum())
EntropyMat = np.multiply(np.log2(RankMat), RankMat)
PE = -1 * EntropyMat.sum()
self.permutation_entropy.append(PE)
if self.is_raw_data:
self.permutation_entropy = np.asarray(self.permutation_entropy)
else:
self.permutation_entropy = np.asarray(self.permutation_entropy)[0]
def get_permutation_entropy(self):
return self.permutation_entropy
def compute_information_based_similarity(self, y, n):
self.information_based_similarity = []
for v in range(0, self.frames):
current_frame = self.get_current_frame(v)
Wordlist = []
Space = [[0, 0], [0, 1], [1, 0], [1, 1]]
Sample = [0, 1]
if (n == 1):
Wordlist = Sample
if (n == 2):
Wordlist = Space
elif (n > 1):
Wordlist = Space
Buff = []
for k in range(0, n - 2):
Buff = []
for i in range(0, len(Wordlist)):
Buff.append(tuple(Wordlist[i]))
Buff = tuple(Buff)
Wordlist = []
for i in range(0, len(Buff)):
for j in range(0, len(Sample)):
Wordlist.append(list(Buff[i]))
Wordlist[len(Wordlist) - 1].append(Sample[j])
Wordlist.sort()
Input = [[], []]
Input[0] = current_frame
Input[1] = y
SymbolicSeq = [[], []]
for i in range(0, 2):
Encoder = np.diff(Input[i])
for j in range(0, len(Input[i]) - 1):
if (Encoder[j] > 0):
SymbolicSeq[i].append(1)
else:
SymbolicSeq[i].append(0)
Wm = []
# todo fix this and uncomment these lines
# Wm.append(self.get_embed_seq(SymbolicSeq[0], 1, n).tolist())
# Wm.append(embed_seq(SymbolicSeq[1], 1, n).tolist())
Count = [[], []]
for i in range(0, 2):
for k in range(0, len(Wordlist)):
Count[i].append(Wm[i].count(Wordlist[k]))
Prob = [[], []]
for i in range(0, 2):
Sigma = 0
for j in range(0, len(Wordlist)):
Sigma += Count[i][j]
for k in range(0, len(Wordlist)):
Prob[i].append(np.true_divide(Count[i][k], Sigma))
Entropy = [[], []]
for i in range(0, 2):
for k in range(0, len(Wordlist)):
if (Prob[i][k] == 0):
Entropy[i].append(0)
else:
Entropy[i].append(Prob[i][k] * (np.log2(Prob[i][k])))
Rank = [[], []]
Buff = [[], []]
Buff[0] = tuple(Count[0])
Buff[1] = tuple(Count[1])
for i in range(0, 2):
Count[i].sort()
Count[i].reverse()
for k in range(0, len(Wordlist)):
Rank[i].append(Count[i].index(Buff[i][k]))
Count[i][Count[i].index(Buff[i][k])] = -1
IBS = 0
Z = 0
n = 0
for k in range(0, len(Wordlist)):
if ((Buff[0][k] != 0) & (Buff[1][k] != 0)):
F = -Entropy[0][k] - Entropy[1][k]
IBS += np.multiply(np.absolute(Rank[0][k] - Rank[1][k]), F)
Z += F
else:
n += 1
IBS = np.true_divide(IBS, Z)
IBS = np.true_divide(IBS, len(Wordlist) - n)
self.information_based_similarity.append(IBS)
if self.is_raw_data:
self.information_based_similarity = np.asarray(self.information_based_similarity)
else:
self.information_based_similarity = np.asarray(self.information_based_similarity)[0]
def get_information_based_similarity(self):
return self.information_based_similarity
def compute_LLE(self, T):
self.LLE = []
for k in range(0, self.frames):
current_frame = self.get_current_frame(k)
Em = self.get_embed_seq()
M = len(Em)
A = np.tile(Em, (len(Em), 1, 1))
B = np.transpose(A, [1, 0, 2])
square_dists = (A - B) ** 2 # square_dists[i,j,k] = (Em[i][k]-Em[j][k])^2
D = np.sqrt(square_dists[:, :, :].sum(axis=2)) # D[i,j] = ||Em[i]-Em[j]||_2
# Exclude elements within T of the diagonal
band = np.tri(D.shape[0], k=T) - np.tri(D.shape[0], k=-T - 1)
band[band == 1] = np.inf
neighbors = (D + band).argmin(axis=0) # nearest neighbors more than T steps away
# in_bounds[i,j] = (i+j <= M-1 and i+neighbors[j] <= M-1)
inc = np.tile(np.arange(M), (M, 1))
row_inds = (np.tile(np.arange(M), (M, 1)).T + inc)
col_inds = (np.tile(neighbors, (M, 1)) + inc.T)
in_bounds = np.logical_and(row_inds <= M - 1, col_inds <= M - 1)
# Uncomment for old (miscounted) version
# in_bounds = np.logical_and(row_inds < M - 1, col_inds < M - 1)
row_inds[-in_bounds] = 0
col_inds[-in_bounds] = 0
# neighbor_dists[i,j] = ||Em[i+j]-Em[i+neighbors[j]]||_2
neighbor_dists = np.ma.MaskedArray(D[row_inds, col_inds], -in_bounds)
J = (-neighbor_dists.mask).sum(axis=1) # number of in-bounds indices by row
# Set invalid (zero) values to 1; log(1) = 0 so sum is unchanged
neighbor_dists[neighbor_dists == 0] = 1
d_ij = np.sum(np.log(neighbor_dists.data), axis=1)
mean_d = d_ij[J > 0] / J[J > 0]
x = np.arange(len(mean_d))
X = np.vstack((x, np.ones(len(mean_d)))).T
[m, c] = np.linalg.lstsq(X, mean_d)[0]
# todo check fs
Lexp = self.sampling_rate * m
self.LLE.append(Lexp)
if self.is_raw_data:
self.LLE = np.asarray(self.LLE)
else:
self.LLE = np.asarray(self.LLE)[0]
def get_LLE(self, tau, n, T):
return self.LLE
| 19,768 | 37.99211 | 116 | py |
OpenBCIPython | OpenBCIPython-master/features/spectral.py | import librosa
import numpy as np
from utils import feature_extractor as utils
class Spectral:
def __init__(self, audio, config):
self.audio = audio
self.dependencies = config["spectral"]["dependencies"]
self.frame_size = int(config["frame_size"])
self.sampling_rate = int(config["sampling_rate"])
self.number_of_bins = int(config["spectral"]["number_of_bins"])
self.is_raw_data = config["is_raw_data"]
self.frames = int(np.ceil(len(self.audio.data) /self.frame_size))
def __enter__(self):
print "Initializing fft calculation..."
def __exit__(self, exc_type, exc_val, exc_tb):
print "Done with calculations..."
def get_current_frame(self, index):
return utils._get_frame_array(self.audio, index, self.frame_size)
def compute_hurst(self):
self.hurst = []
for k in range(0, self.frames):
current_frame = self.get_current_frame(k)
N = current_frame.size
T = np.arange(1, N + 1)
Y = np.cumsum(current_frame)
Ave_T = Y / T
S_T = np.zeros(N)
R_T = np.zeros(N)
for i in range(N):
S_T[i] = np.std(current_frame[:i + 1])
X_T = Y - T * Ave_T[i]
R_T[i] = np.ptp(X_T[:i + 1])
R_S = R_T / S_T
R_S = np.log(R_S)[1:]
n = np.log(T)[1:]
A = np.column_stack((n, np.ones(n.size)))
[m, c] = np.linalg.lstsq(A, R_S)[0]
self.hurst.append(m)
self.hurst = np.asarray(self.hurst)
def get_hurst(self):
return self.hurst
| 1,658 | 32.18 | 77 | py |
OpenBCIPython | OpenBCIPython-master/features/__init__.py | 0 | 0 | 0 | py |
|
OpenBCIPython | OpenBCIPython-master/neuralnet/__init__.py | 0 | 0 | 0 | py |
|
OpenBCIPython | OpenBCIPython-master/neuralnet/net/__init__.py | 0 | 0 | 0 | py |
|
OpenBCIPython | OpenBCIPython-master/neuralnet/net/cnn/__init__.py | 0 | 0 | 0 | py |
|
OpenBCIPython | OpenBCIPython-master/neuralnet/net/cnn/model1/convolutional_network.py |
import numpy as np
import tensorflow as tf
import json
class CNNModel1:
def __init__(self, project_dir, meta_info_data_set):
meta_info_file = project_dir + "/neuralnet/net/cnn/model1/config.json"
with open(meta_info_file) as data_file:
meta_info = json.load(data_file)
self.learning_rate = float(meta_info["net"]["learning_rate"])
self.conv1_features = int(meta_info["net"]["conv1_features"])
self.conv2_features = int(meta_info["net"]["conv2_features"])
self.max_pool_size1 = int(meta_info["net"]["max_pool_size1"])
self.max_pool_size2 = int(meta_info["net"]["max_pool_size2"])
self.fully_connected_size1 = int(meta_info["net"]["fully_connected_size1"])
self.filter_side = int(meta_info["net"]["filter_side"])
self.dropout = int(meta_info["net"]["dropout"])
self.training_iters = int(meta_info["net"]["training_iters"])
self.display_step = int(meta_info["net"]["display_step"])
self.strides_layer1 = int(meta_info["net"]["strides_layer1"])
self.strides_layer2 = int(meta_info["net"]["strides_layer2"])
self.model_path = project_dir + str(meta_info["net"]["model_path"])
self.logs_path = project_dir + str(meta_info["net"]["logs_path"])
self.num_channels = int(meta_info_data_set["num_channels"])
self.n_classes = int(meta_info_data_set["number_of_class"])
self.image_width = int(meta_info_data_set["generated_image_width"])
self.image_height = int(meta_info_data_set["generated_image_height"])
self.feature_vector_size = self.image_height*self.image_width
with tf.name_scope("Dropout"):
self.keep_prob = tf.placeholder(tf.float32)
with tf.name_scope('Weights'):
# Store layers weight & bias
self.weights = {
# 5x5 conv, 1 input, 32 outputs
'wc1': tf.Variable(tf.random_normal([self.filter_side, self.filter_side, self.num_channels, self.conv1_features]), name='Weights_wc1'),
# 5x5 conv, 32 inputs, 64 outputs
'wc2': tf.Variable(tf.random_normal([self.filter_side, self.filter_side, self.conv1_features, self.conv2_features]), name='Weights_wc2'),
# fully connected, 7*7*64 inputs, 1024 outputs
'wd1': tf.Variable(tf.random_normal([3*3*self.conv2_features, self.fully_connected_size1]), name='Weights_wd1'),
# fully connected, 7*7*64 inputs, 1024 outputs
'wd2': tf.Variable(tf.random_normal([self.fully_connected_size1, self.fully_connected_size1]), name='Weights_wd2'),
# 1024 inputs, 10 outputs (class prediction)
'out': tf.Variable(tf.random_normal([self.fully_connected_size1, self.n_classes]), name='Weights_out')
}
with tf.name_scope('Biases'):
self.biases = {
'bc1': tf.Variable(tf.random_normal([self.conv1_features]), name='bc1'),
'bc2': tf.Variable(tf.random_normal([self.conv2_features]), name='bc2'),
'bd1': tf.Variable(tf.random_normal([self.fully_connected_size1]), name='bd1'),
'bd2': tf.Variable(tf.random_normal([self.fully_connected_size1]), name='bd2'),
'out': tf.Variable(tf.random_normal([self.n_classes]), name='out')
}
# Declare model placeholders
# x_input_shape = (batch_size, image_width, image_height, num_channels)
with tf.name_scope('Inputs'):
self.x_input = tf.placeholder(tf.float32, [None, self.feature_vector_size], name='InputData')
self.y_target = tf.placeholder(tf.int32, [None, self.n_classes], name='LabelData')
# Create some wrappers for simplicity
def conv2d(self, x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(self, x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
# Create accuracy function
def get_accuracy(self, logits, targets):
batch_predictions = np.argmax(logits, axis=1)
num_correct = np.sum(np.equal(batch_predictions, targets))
return (100. * num_correct / batch_predictions.shape[0])
# Initialize Model Operations
def conv_net(self, input_data):
input_data = tf.reshape(input_data, shape=[-1, self.image_width, self.image_height, self.num_channels])
# First Conv-ReLU-MaxPool Layer
result_of_first_cnn_layer = self.conv2d(input_data, self.weights["wc1"], self.biases["bc1"], strides=self.strides_layer1)
tf.summary.histogram("result_of_first_cnn_layer", result_of_first_cnn_layer)
result_of_first_max_polling_layer = self.maxpool2d(result_of_first_cnn_layer, k=2)
# Second Conv-ReLU-MaxPool Layer
result_of_second_cnn_layer = self.conv2d(result_of_first_max_polling_layer, self.weights["wc2"], self.biases["bc2"], strides=self.strides_layer1)
tf.summary.histogram("result_of_second_cnn_layer", result_of_second_cnn_layer)
result_of_second_max_polling_layer = self.maxpool2d(result_of_second_cnn_layer, k=2)
# Transform Output into a 1xN layer for next fully connected layer
final_conv_shape = result_of_second_max_polling_layer.get_shape().as_list()
final_shape = final_conv_shape[1] * final_conv_shape[2] * final_conv_shape[3]
flat_output = tf.reshape(result_of_second_max_polling_layer, [-1 , final_shape])
# First Fully Connected Layer
fully_connected1 = tf.add(tf.matmul(flat_output, self.weights["wd1"]), self.biases["bd1"])
fully_connected1 = tf.nn.relu(fully_connected1)
tf.summary.histogram("fully_connected1", fully_connected1)
# Apply Dropout
fully_connected1 = tf.nn.dropout(fully_connected1, self.keep_prob)
# Second Fully Connected Layer
fully_connected2 = tf.add(tf.matmul(fully_connected1, self.weights["wd2"]), self.biases["bd2"])
fully_connected2 = tf.nn.relu(fully_connected2)
tf.summary.histogram("fully_connected2", fully_connected2)
# Output, class prediction
out = tf.add(tf.matmul(fully_connected2, self.weights['out']), self.biases['out'])
return out,None
| 6,553 | 52.284553 | 153 | py |
OpenBCIPython | OpenBCIPython-master/neuralnet/net/cnn/model1/__init__.py | 0 | 0 | 0 | py |
|
OpenBCIPython | OpenBCIPython-master/neuralnet/net/cnn/model2/__init__.py | 0 | 0 | 0 | py |
|
OpenBCIPython | OpenBCIPython-master/neuralnet/net/cnn/model2/inception_resnet_v2.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import tensorflow as tf
slim = tf.contrib.slim
class CNNModel2:
def __init__(self,project_dir, meta_info_data_set):
meta_info_file = project_dir + "/neuralnet/net/cnn/model2/config.json"
with open(meta_info_file) as data_file:
meta_info = json.load(data_file)
self.learning_rate = float(meta_info["net"]["learning_rate"])
self.conv1_features = int(meta_info["net"]["conv1_features"])
self.conv2_features = int(meta_info["net"]["conv2_features"])
self.max_pool_size1 = int(meta_info["net"]["max_pool_size1"])
self.max_pool_size2 = int(meta_info["net"]["max_pool_size2"])
self.fully_connected_size1 = int(meta_info["net"]["fully_connected_size1"])
self.filter_side = int(meta_info["net"]["filter_side"])
self.dropout = int(meta_info["net"]["dropout"])
self.training_iters = int(meta_info["net"]["training_iters"])
self.display_step = int(meta_info["net"]["display_step"])
self.strides_layer1 = int(meta_info["net"]["strides_layer1"])
self.strides_layer2 = int(meta_info["net"]["strides_layer2"])
self.model_path = project_dir + str(meta_info["net"]["model_path"])
self.logs_path = project_dir + str(meta_info["net"]["logs_path"])
self.keep_prob = float(meta_info["net"]["keep_prob"])
self.num_channels = int(meta_info_data_set["number_of_channels"])
self.n_classes = int(meta_info_data_set["number_of_class"])
self.image_width = int(meta_info_data_set["generated_image_width"])
self.image_height = int(meta_info_data_set["generated_image_height"])
self.feature_vector_size = self.image_height*self.image_width*3
with tf.name_scope('Inputs'):
self.x_input = tf.placeholder(tf.float32, [None, self.feature_vector_size], name='InputData')
self.y_target = tf.placeholder(tf.int32, [None, self.n_classes], name='LabelData')
with tf.name_scope("Dropout"):
self.keep_prob = tf.placeholder(tf.float32)
def block35(self, net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 35x35 resnet block."""
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
mixed = tf.concat(values=[tower_conv, tower_conv1_1, tower_conv2_2], axis=3)
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def block17(self, net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 17x17 resnet block."""
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7],
scope='Conv2d_0b_1x7')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1],
scope='Conv2d_0c_7x1')
mixed = tf.concat(values=[tower_conv, tower_conv1_2], axis=3)
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def block8(self, net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 resnet block."""
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3],
scope='Conv2d_0b_1x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1],
scope='Conv2d_0c_3x1')
mixed = tf.concat(values=[tower_conv, tower_conv1_2], axis=3)
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def conv_net(self, inputs, is_training=True,
dropout_keep_prob=0.8,
reuse=None,
scope='InceptionResnetV2'):
"""Creates the Inception Resnet V2 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
"""
end_points = {}
inputs = tf.reshape(self.x_input, shape=[-1, self.image_width, self.image_height, 3])
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs], reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 149 x 149 x 32
net = slim.conv2d(inputs, 32, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
end_points['Conv2d_1a_3x3'] = net
# 147 x 147 x 32
net = slim.conv2d(net, 32, 3, padding='VALID',
scope='Conv2d_2a_3x3')
end_points['Conv2d_2a_3x3'] = net
# 147 x 147 x 64
net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3')
end_points['Conv2d_2b_3x3'] = net
# 73 x 73 x 64
net = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_3a_3x3')
end_points['MaxPool_3a_3x3'] = net
# 73 x 73 x 80
net = slim.conv2d(net, 80, 1, padding='VALID',
scope='Conv2d_3b_1x1')
end_points['Conv2d_3b_1x1'] = net
# 71 x 71 x 192
net = slim.conv2d(net, 192, 3, padding='VALID',
scope='Conv2d_4a_3x3')
end_points['Conv2d_4a_3x3'] = net
# 35 x 35 x 192
net = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_5a_3x3')
end_points['MaxPool_5a_3x3'] = net
# 35 x 35 x 320
with tf.variable_scope('Mixed_5b'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 96, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 48, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 64, 5,
scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 64, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 96, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 96, 3,
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.avg_pool2d(net, 3, stride=1, padding='SAME',
scope='AvgPool_0a_3x3')
tower_pool_1 = slim.conv2d(tower_pool, 64, 1,
scope='Conv2d_0b_1x1')
net = tf.concat( values=[tower_conv, tower_conv1_1,
tower_conv2_2, tower_pool_1], axis=3)
end_points['Mixed_5b'] = net
net = slim.repeat(net, 10, self.block35, scale=0.17)
# 17 x 17 x 1088
with tf.variable_scope('Mixed_6a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 384, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 256, 3,
scope='Conv2d_0b_3x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 384, 3,
stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(values=[tower_conv, tower_conv1_2, tower_pool], axis=3)
end_points['Mixed_6a'] = net
net = slim.repeat(net, 20, self.block17, scale=0.10)
# Auxiliary tower
with tf.variable_scope('AuxLogits'):
aux = slim.avg_pool2d(net, 5, stride=3, padding='VALID',
scope='Conv2d_1a_3x3')
aux = slim.conv2d(aux, 128, 1, scope='Conv2d_1b_1x1')
aux = slim.conv2d(aux, 768, aux.get_shape()[1:3],
padding='VALID', scope='Conv2d_2a_5x5')
aux = slim.flatten(aux)
aux = slim.fully_connected(aux, self.n_classes, activation_fn=None,
scope='Logits')
end_points['AuxLogits'] = aux
with tf.variable_scope('Mixed_7a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1, 288, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 320, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(values=[tower_conv_1, tower_conv1_1,
tower_conv2_2, tower_pool], axis=3)
end_points['Mixed_7a'] = net
net = slim.repeat(net, 9, self.block8, scale=0.20)
net = self.block8(net, activation_fn=None)
net = slim.conv2d(net, 1536, 1, scope='Conv2d_7b_1x1')
end_points['Conv2d_7b_1x1'] = net
with tf.variable_scope('Logits'):
end_points['PrePool'] = net
net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',
scope='AvgPool_1a_8x8')
net = slim.flatten(net)
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='Dropout')
end_points['PreLogitsFlatten'] = net
logits = slim.fully_connected(net, self.n_classes, activation_fn=None,
scope='Logits')
end_points['Logits'] = logits
end_points['Predictions'] = tf.nn.softmax(logits, name='Predictions')
return logits, end_points
def inception_resnet_v2_arg_scope(self, weight_decay=0.00004,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001):
"""Yields the scope with the default parameters for inception_resnet_v2.
Args:
weight_decay: the weight decay for weights variables.
batch_norm_decay: decay for the moving average of batch_norm momentums.
batch_norm_epsilon: small float added to variance to avoid dividing by zero.
Returns:
a arg_scope with the parameters needed for inception_resnet_v2.
"""
# Set weight_decay for weights in conv2d and fully_connected layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_regularizer=slim.l2_regularizer(weight_decay)):
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
}
# Set activation_fn and parameters for batch_norm.
with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params) as scope:
return scope
| 14,885 | 48.95302 | 109 | py |
OpenBCIPython | OpenBCIPython-master/neuralnet/net/model3/logistic_regression_train_rnn.py | from __future__ import division
import tensorflow as tf
import numpy as np
import pandas as pd
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib import layers as tflayers
tf.logging.set_verbosity(tf.logging.INFO)
from tensorflow.python.ops import array_ops
from tensorflow.python.framework import constant_op
class Deviatev1:
def __init__(self, project_location):
self.project_location = project_location
self.project_config = self.project_location + "/config/config.json"
self.column_names = ['ch1', 'ch2', 'ch3']
self.number_of_eatures = 5
self.number_of_labels = 180
self.num_epochs = 5000
self.learning_rate =0.004
self.batch_size = 5
self.keep_prob = 0.9
self.hidden_size = 10
self.num_layers_rnn = 5
self.num_steps = 10
self.dnn_layer_size = 5
self.model_params = {"learning_rate": self.learning_rate, "keep_prob": self.keep_prob
, 'num_steps': self.num_steps, 'num_layers_rnn':self.num_layers_rnn, 'dnn_layer_size': self.dnn_layer_size
, 'number_of_labels': self.number_of_labels }
self.validation_metrics = {
"accuracy":
tf.contrib.learn.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
prediction_key="classes"),
"precision":
tf.contrib.learn.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_precision,
prediction_key="classes"),
"recall":
tf.contrib.learn.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_recall,
prediction_key="classes")
}
self.test_metrics = {
"accuracy":
tf.contrib.learn.MetricSpec(
metric_fn=tf.metrics.accuracy, prediction_key="classes"),
}
def import_data(self, angle_type):
kinect__angles = pd.read_csv(
self.project_location + "/build/dataset/train/result/reconstructed_bycept_kinect__angles_.csv",
header=None, names=self.column_names).dropna()
channel_signals = pd.read_csv(self.project_location
+ "/build/dataset/train/result/bycept_feature_vectors.csv").dropna()
# kinect__angles = kinect__angles.applymap(lambda x: '%.2f' % x)
y_vals = np.array(kinect__angles.ix[:, angle_type], dtype=np.int32)
x_vals = np.array(channel_signals)
train_presentation = 0.8
test_presentation = 0.8
training_sample_size = int(round(len(x_vals) * train_presentation))
train_indices = np.arange(0,training_sample_size)
test_sample_size = int((len(x_vals)-training_sample_size)*test_presentation)
test_sample_size_end_index = training_sample_size+ test_sample_size
test_indices = np.arange(training_sample_size, test_sample_size_end_index)
validate_indices = np.arange(test_sample_size_end_index, len(x_vals)-1)
self.train_x = self.rnn_data(x_vals[train_indices])
self.train_y = self.rnn_data(y_vals[train_indices], labels=True)
self.test_x = self.rnn_data(x_vals[test_indices])
self.test_y = self.rnn_data(y_vals[test_indices], labels=True)
self.validate_x = self.rnn_data(x_vals[validate_indices])
self.validate_y = self.rnn_data(y_vals[validate_indices], labels=True)
self.kinect_angles = np.array(kinect__angles.ix[:, 0])
def rnn_data(self, data, labels=False):
data = pd.DataFrame(data)
rnn_df = []
for i in range(data.shape[0] - self.num_steps):
if labels:
rnn_df.append(data.iloc[i + self.num_steps].as_matrix())
else:
data_ = data.iloc[i: i + self.num_steps].as_matrix()
rnn_df.append(data_ if len(data_.shape) > 1 else [[i] for i in data_])
return np.array(rnn_df, dtype=np.float32)
def model_fn(self, features, targets, mode, params):
def dnn_layers(input_layers, layers):
if layers and isinstance(layers, dict):
return tflayers.stack(input_layers, tflayers.fully_connected,
layers['layers'],
activation=layers.get('activation'),
dropout=layers.get('dropout'))
elif layers:
return tflayers.stack(input_layers, tflayers.fully_connected, layers)
else:
return input_layers
def lstm_cell():
return tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.BasicLSTMCell(params["num_steps"], forget_bias=0.0,
state_is_tuple=True),output_keep_prob=params['keep_prob'])
def lstm_forward_cell():
return tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.LSTMCell(params["num_steps"], forget_bias=0.2,
state_is_tuple=True), output_keep_prob=params['keep_prob'])
def lstm_backword_cell():
return tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.LSTMCell(params["num_steps"], forget_bias=0.2,
state_is_tuple=True), output_keep_prob=params['keep_prob'])
def _lstm_model(features, targets):
lstm_fw_multicell = tf.contrib.rnn.MultiRNNCell([lstm_forward_cell() for _ in range(params['num_layers_rnn'])],
state_is_tuple=True)
lstm_bw_multicell = tf.contrib.rnn.MultiRNNCell([lstm_backword_cell() for _ in range(params['num_layers_rnn'])],
state_is_tuple=True)
features = tf.unstack(features, num=params["num_steps"], axis=1)
with tf.variable_scope("RNN"):
output, state = tf.contrib.rnn.static_rnn(lstm_fw_multicell, features, dtype=tf.float32)
# output, state = tf.contrib.learn.models.bidirectional_rnn(lstm_fw_multicell, lstm_bw_multicell, features,
# dtype='float32')
# # output = dnn_layers(output[-1], [params['dnn_layer_size'], params['dnn_layer_size']])
first_hidden_layer = tf.contrib.layers.fully_connected(output[-1], num_outputs=5, activation_fn=None)
output = tf.contrib.layers.fully_connected(first_hidden_layer, num_outputs=5, activation_fn=None)
output = self.extract(output, 'input')
labels = self.extract(targets, 'labels')
W = tf.Variable(tf.random_normal([5, 1]), name="Theta")
lambda_val = tf.constant(0.1)
y_predicted = tf.matmul(output, W, name="y_predicted")
for pow_i in range(1, 1):
W = tf.Variable(tf.random_normal([5, 1]), name='weight_%d' % pow_i)
y_predicted = tf.matmul(tf.pow(output, pow_i), W)+ y_predicted
with tf.name_scope('cost') as scope:
# loss = (tf.nn.l2_loss(y_predicted - labels) + lambda_val * tf.nn.l2_loss(W)) / float(self.batch_size)
# loss_summary = tf.summary.scalar('cost', loss)
loss = tf.reduce_sum(tf.pow(y_predicted - labels, 2)) / (self.batch_size - 1)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=params["learning_rate"])
# correct_prediction = tf.equal(tf.argmax(train_prediction, 1), train_labels)
# predictions_dict = {"classes":y_predicted}
predictions_dict = {"classes": tf.argmax(input=y_predicted, axis=1, name="angles")}
eval_metric_ops = {
"rmse": tf.metrics.root_mean_squared_error(tf.cast(y_predicted, tf.float32), tf.cast(labels, tf.float32))
}
return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions_dict, loss=loss, train_op=train_op
, eval_metric_ops=eval_metric_ops)
return _lstm_model(features, targets)
def extract(self, data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def execute(self):
self.import_data(1)
estimator = tf.contrib.learn.Estimator(model_fn=self.model_fn, params=self.model_params,
model_dir="/home/runge/openbci/git/OpenBCI_Python/neuralnet/net/model3/model",
config=tf.contrib.learn.RunConfig(save_checkpoints_secs=20))
validation_monitor = tf.contrib.learn.monitors.ValidationMonitor(
self.test_x,
self.test_y,
every_n_steps=50,
metrics=self.validation_metrics,
early_stopping_metric="loss",
early_stopping_metric_minimize=True,
early_stopping_rounds=200)
tensors_to_log = {"classes": "angles"}
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=1)
estimator.fit(x=self.train_x, y=self.train_y, steps=500, monitors=[validation_monitor, logging_hook], batch_size=self.batch_size)
test_results = estimator.evaluate(x=self.test_x, y=self.test_y, steps=1)
print("Loss: %s" % test_results["loss"])
print("Root Mean Squared Error: %s" % test_results["rmse"])
# self.validate_x = self.validate_x
predictions = estimator.predict(x=self.validate_x)
for i, p in enumerate(predictions):
print("Prediction %s: %s" % (self.validate_y[i], p["classes"]))
project_loction = '/home/runge/openbci/git/OpenBCI_Python'
model = Deviatev1(project_loction)
model.execute()
# a smarter learning rate for gradientOptimizer
# learningRate = tf.train.exponential_decay(learning_rate=0.0008,
# global_step= 1,
# decay_steps=train_x.shape[0],
# decay_rate= 0.95,
# staircase=True)
#
# X = tf.placeholder(tf.float32, [None, numFeatures])
# yGold = tf.placeholder(tf.float32, [None, numLabels])
# weights = tf.Variable(tf.random_normal([numFeatures,numLabels],
# mean=0,
# stddev=(np.sqrt(6/numFeatures+
# numLabels+1)),
# name="weights"))
# bias = tf.Variable(tf.random_normal([1,numLabels],
# mean=0,
# stddev=(np.sqrt(6/numFeatures+numLabels+1)),
# name="bias"))
#
#
# init_OP = tf.global_variables_initializer()
# apply_weights_OP = tf.matmul(X, weights, name="apply_weights")
# add_bias_OP = tf.add(apply_weights_OP, bias, name="add_bias")
# activation_OP = tf.nn.sigmoid(add_bias_OP, name="activation")
# cost_OP = tf.nn.l2_loss(activation_OP-yGold, name="squared_error_cost")
# training_OP = tf.train.GradientDescentOptimizer(learningRate).minimize(cost_OP)
#
# epoch_values=[]
# accuracy_values=[]
# cost_values=[]
#
# plt.ion()
# fig = plt.figure()
# ax1 = plt.subplot("211")
# ax1.set_title("TRAINING ACCURACY", fontsize=18)
# ax2 = plt.subplot("212")
# ax2.set_title("TRAINING COST", fontsize=18)
# plt.tight_layout()
#
#
# sess = tf.Session()
# sess.run(init_OP)
#
# correct_predictions_OP = tf.equal(tf.argmax(activation_OP,1),tf.argmax(yGold,1))
# accuracy_OP = tf.reduce_mean(tf.cast(correct_predictions_OP, "float"))
# activation_summary_OP = tf.summary.histogram("output", activation_OP)
# accuracy_summary_OP = tf.summary.scalar("accuracy", accuracy_OP)
# cost_summary_OP = tf.summary.scalar("cost", cost_OP)
# weightSummary = tf.summary.histogram("weights", weights.eval(session=sess))
# biasSummary = tf.summary.histogram("biases", bias.eval(session=sess))
#
# all_summary_OPS = tf.summary.merge_all()
# writer = tf.summary.FileWriter("summary_logs", sess.graph)
#
# # Initialize reporting variables
# cost = 0
# diff = 1
# # Declare batch size
# batch_size = 25
# # Training epochs
# for i in range(numEpochs):
# if i > 1 and diff < .0001:
# print("change in cost %g; convergence."%diff)
# break
# else:
# rand_index = np.random.choice(len(train_x), size=batch_size)
# rand_x = train_x[rand_index]
# rand_y = np.transpose([train_y[rand_index]])
# # Run training step
# step = sess.run(training_OP, feed_dict={X: rand_x, yGold: rand_y})
# # Report occasional stats
# if i % 10 == 0:
# # Add epoch to epoch_values
# epoch_values.append(i)
# # Generate accuracy stats on test data
# summary_results, train_accuracy, newCost = sess.run(
# [all_summary_OPS, accuracy_OP, cost_OP],
# feed_dict={X: rand_x, yGold: rand_y}
# )
# # Add accuracy to live graphing variable
# accuracy_values.append(train_accuracy)
# # Add cost to live graphing variable
# cost_values.append(newCost)
# # Write summary stats to writer
# writer.add_summary(summary_results, i)
# # Re-assign values for variables
# diff = abs(newCost - cost)
# cost = newCost
#
# #generate print statements
# print("step %d, training accuracy %g"%(i, train_accuracy))
# print("step %d, cost %g"%(i, newCost))
# print("step %d, change in cost %g"%(i, diff))
#
# # Plot progress to our two subplots
# accuracyLine, = ax1.plot(epoch_values, accuracy_values)
# costLine, = ax2.plot(epoch_values, cost_values)
# fig.canvas.draw()
# time.sleep(1)
#
# rand_index = np.random.choice(len(test_x), size=len(test_x))
# rand_x = test_x[rand_index]
# rand_y = np.transpose([test_y[rand_index]])
# # How well do we perform on held-out test data?
# print("final accuracy on test set: %s" %str(sess.run(accuracy_OP,
# feed_dict={X: rand_x,
# yGold: rand_y})))
# # Create Saver
# saver = tf.train.Saver()
# # Save variables to .ckpt file
# saver.save(sess, "trained_variables.ckpt")
# sess.close()
#
# # To view tensorboard:
# #1. run: tensorboard --logdir=/path/to/log-directory
# #2. open your browser to http://localhost:6006/
#
#
#
| 14,935 | 40.604457 | 137 | py |
OpenBCIPython | OpenBCIPython-master/neuralnet/net/model3/try_1.py | from __future__ import division
import tensorflow as tf
import numpy as np
import pandas as pd
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib import layers as tflayers
tf.logging.set_verbosity(tf.logging.INFO)
from tensorflow.python.ops import array_ops
from tensorflow.python.framework import constant_op
from matplotlib import pyplot as plt
from tensorflow.contrib import learn as tflearn
from tensorflow.contrib import layers as tflayers
class Deviatev1:
def __init__(self, project_location):
self.project_location = project_location
self.project_config = self.project_location + "/config/config.json"
self.column_names = ['ch1', 'ch2', 'ch3']
self.number_of_eatures = 5
self.number_of_labels = 180
self.num_epochs = 500000
self.learning_rate =0.00004
self.batch_size = 2000
self.keep_prob = 0.9
self.hidden_size = 10
self.num_layers_rnn = 10
self.num_steps = 30
self.dnn_layer_size = 10
self.number_of_channels = 5
self.lstm_cell_layes_sizes = [512, 256, 128, 128, 64]
self.model_params = {"learning_rate": self.learning_rate, "keep_prob": self.keep_prob
, 'num_steps': self.num_steps, 'num_layers_rnn':self.num_layers_rnn, 'dnn_layer_size': self.dnn_layer_size
, 'number_of_labels': self.number_of_labels }
self.validation_metrics = {
"accuracy":
tf.contrib.learn.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
prediction_key="classes"),
"precision":
tf.contrib.learn.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_precision,
prediction_key="classes"),
"recall":
tf.contrib.learn.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_recall,
prediction_key="classes")
}
self.test_metrics = {
"accuracy":
tf.contrib.learn.MetricSpec(
metric_fn=tf.metrics.accuracy, prediction_key="classes"),
}
def nomalize_signal(self, input_signal):
mean = np.mean(input_signal, axis=0, dtype=np.float32)
input_signal -= mean
return input_signal / np.std(input_signal, axis=0)
def import_data(self, angle_type):
kinect__angles = pd.read_csv(
self.project_location + "/build/dataset/train/result/reconstructed_bycept_kinect__angles_.csv",
header=None, names=self.column_names).dropna()
channel_signals = pd.read_csv(self.project_location
+ "/build/dataset/train/result/bycept_feature_vectors.csv").dropna()
# kinect__angles = kinect__angles.applymap(lambda x: '%.2f' % x)
y_vals = np.array(kinect__angles.ix[:, angle_type], dtype=np.float32)
y_vals = self.nomalize_signal(y_vals)
x_vals = np.array(channel_signals)
train_presentation = 0.8
test_presentation = 0.8
training_sample_size = int(round(len(x_vals) * train_presentation))
train_indices = np.arange(0,training_sample_size)
test_sample_size = int((len(x_vals)-training_sample_size)*test_presentation)
test_sample_size_end_index = training_sample_size+ test_sample_size
test_indices = np.arange(training_sample_size, test_sample_size_end_index)
validate_indices = np.arange(test_sample_size_end_index, len(x_vals)-1)
self.train_x = self.rnn_data(x_vals[train_indices])
self.train_y = self.rnn_data(y_vals[train_indices], labels=True)
self.test_x = self.rnn_data(x_vals[test_indices])
self.test_y = self.rnn_data(y_vals[test_indices], labels=True)
self.validate_x = self.rnn_data(x_vals[validate_indices])
self.validate_y = self.rnn_data(y_vals[validate_indices], labels=True)
self.kinect_angles = np.array(kinect__angles.ix[:, 0])
def rnn_data(self, data, labels=False):
data = pd.DataFrame(data)
if labels is not True:
data = data.ix[:,0:self.number_of_channels]
rnn_df = []
for i in range(data.shape[0] - self.num_steps):
if labels:
rnn_df.append(data.iloc[i + self.num_steps].as_matrix())
else:
data_ = data.iloc[i: i + self.num_steps].as_matrix()
rnn_df.append(data_ if len(data_.shape) > 1 else [[i] for i in data_])
return np.array(rnn_df, dtype=np.float32)
# def model_fn(self, features, targets, mode, params):
#
# def dnn_layers(input_layers, layers):
# if layers and isinstance(layers, dict):
# return tflayers.stack(input_layers, tflayers.fully_connected,
# layers['layers'],
# activation=layers.get('activation'),
# dropout=layers.get('dropout'))
# elif layers:
# return tflayers.stack(input_layers, tflayers.fully_connected, layers)
# else:
# return input_layers
#
# def lstm_cell(number_of_steps):
# return tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.BasicLSTMCell(number_of_steps, forget_bias=0.0,
# state_is_tuple=True),output_keep_prob=params['keep_prob'])
# def lstm_forward_cell():
# return tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.LSTMCell(params["num_steps"], forget_bias=0.2,
# state_is_tuple=True), output_keep_prob=params['keep_prob'])
# def lstm_backword_cell():
# return tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.LSTMCell(params["num_steps"], forget_bias=0.2,
# state_is_tuple=True), output_keep_prob=params['keep_prob'])
#
#
# def _lstm_model(features, targets):
# # batch_size sequences of length 10 with 2 values for each timestep
# features = tf.reshape(features, [-1, self.num_steps, self.number_of_channels])
# # features = tf.unstack(features, num=params["num_steps"], axis=1)
# # Create LSTM cell with state size 256. Could also use GRUCell, ...
# # Note: state_is_tuple=False is deprecated;
# # the option might be completely removed in the future
# # cell = tf.contrib.rnn.LSTMCell(256, state_is_tuple=True)
# multi_cell = tf.contrib.rnn.MultiRNNCell([lstm_cell(256) for _ in range(5)], state_is_tuple=True)
# outputs, state = tf.nn.dynamic_rnn(multi_cell,
# features,
# # sequence_length=[10] * batch_size,
# dtype=tf.float32)
#
# predictions = tf.contrib.layers.fully_connected(state.h,
# num_outputs=128,
# activation_fn=None)
# predictions = tf.contrib.layers.fully_connected(predictions,
# num_outputs=1,
# activation_fn=None)
# loss = tf.reduce_sum(tf.pow(predictions - targets[-1], 2))
#
# # lstm_fw_multicell = tf.contrib.rnn.MultiRNNCell([lstm_forward_cell() for _ in range(params['num_layers_rnn'])],
# # state_is_tuple=True)
# # lstm_bw_multicell = tf.contrib.rnn.MultiRNNCell([lstm_backword_cell() for _ in range(params['num_layers_rnn'])],
# # state_is_tuple=True)
# # features = tf.unstack(features, num=params["num_steps"], axis=1)
# # with tf.variable_scope("RNN"):
# # output, state = tf.contrib.rnn.static_rnn(lstm_fw_multicell, features, dtype=tf.float32)
# # # output, state = tf.contrib.learn.models.bidirectional_rnn(lstm_fw_multicell, lstm_bw_multicell, features,
# # # dtype='float32')
# # # # output = dnn_layers(output[-1], [params['dnn_layer_size'], params['dnn_layer_size']])
# # first_hidden_layer = tf.contrib.layers.fully_connected(output[-1], num_outputs=5, activation_fn=None)
# # output = tf.contrib.layers.fully_connected(first_hidden_layer, num_outputs=5, activation_fn=None)
# #
# # output = self.extract(output, 'input')
# # labels = self.extract(targets, 'labels')
# #
# # W = tf.Variable(tf.random_normal([5, 1]), name="Theta")
# # lambda_val = tf.constant(0.1)
# # y_predicted = tf.matmul(output, W, name="y_predicted")
# #
# # for pow_i in range(1, 1):
# # W = tf.Variable(tf.random_normal([5, 1]), name='weight_%d' % pow_i)
# # y_predicted = tf.matmul(tf.pow(output, pow_i), W)+ y_predicted
# #
# # with tf.name_scope('cost') as scope:
# # # loss = (tf.nn.l2_loss(y_predicted - labels) + lambda_val * tf.nn.l2_loss(W)) / float(self.batch_size)
# # # loss_summary = tf.summary.scalar('cost', loss)
# # loss = tf.reduce_sum(tf.pow(y_predicted - labels, 2)) / (self.batch_size - 1)
# #
# train_op = tf.contrib.layers.optimize_loss(
# loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
# learning_rate=params["learning_rate"])
#
# # correct_prediction = tf.equal(tf.argmax(train_prediction, 1), train_labels)
#
# # predictions_dict = {"classes":y_predicted}
# predictions_dict = {"classes": tf.argmax(input=predictions, axis=1, name="angles")}
#
# eval_metric_ops = {
# "rmse": tf.metrics.root_mean_squared_error(tf.cast(predictions, tf.float32), tf.cast(targets, tf.float32))
# }
#
# return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions_dict, loss=loss, train_op=train_op
# , eval_metric_ops=eval_metric_ops)
#
# return _lstm_model(features, targets)
def model_fn(self, features, targets, mode, params):
def lstm_cell(number_of_steps):
return tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.BasicLSTMCell(number_of_steps, forget_bias=0.0,
state_is_tuple=True),output_keep_prob=params['keep_prob'])
def dnn_layers(input_layers, layers):
if layers and isinstance(layers, dict):
return tflayers.stack(input_layers, tflayers.fully_connected,
layers['layers'],
activation=layers.get('activation'),
dropout=layers.get('dropout'))
elif layers:
return tflayers.stack(input_layers, tflayers.fully_connected, layers)
else:
return input_layers
def _lstm_model(X, y):
layers = []
layers.append(params)
stacked_lstm = tf.contrib.rnn.MultiRNNCell([lstm_cell(self.lstm_cell_layes_sizes[j])
for j in range(len(self.lstm_cell_layes_sizes))], state_is_tuple=True)
x_ = tf.unstack(X, axis=1, num=self.num_steps)
output, layers = tf.contrib.rnn.static_rnn(stacked_lstm, x_, dtype=tf.float32)
output = dnn_layers(output[-1], [self.dnn_layer_size, self.dnn_layer_size])
prediction, loss = tflearn.models.linear_regression(output, y)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=params["learning_rate"])
eval_metric_ops = {
"rmse": tf.metrics.root_mean_squared_error(
tf.cast(targets, tf.float32), prediction)
}
predictions_dict = {"classes": prediction}
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=predictions_dict,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
return _lstm_model(features, targets)
def extract(self, data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def plot_results(self, predicted_data, true_data):
fig = plt.figure(facecolor='white')
ax = fig.add_subplot(111)
ax.plot(true_data, label='True Data')
plt.plot(predicted_data, label='Prediction')
plt.legend()
plt.show()
def execute(self):
self.import_data(1)
estimator = tf.contrib.learn.Estimator(model_fn=self.model_fn, params=self.model_params,
model_dir="./model",
config=tf.contrib.learn.RunConfig(save_checkpoints_secs=20))
validation_monitor = tf.contrib.learn.monitors.ValidationMonitor(
self.test_x,
self.test_y,
every_n_steps=50,
metrics=self.validation_metrics,
early_stopping_metric="loss",
early_stopping_metric_minimize=True,
early_stopping_rounds=200000)
tensors_to_log = {"classes": "angles"}
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=1000)
estimator.fit(x=self.train_x, y=self.train_y, steps=self.num_epochs, monitors=[validation_monitor], batch_size=self.batch_size)
test_results = estimator.evaluate(x=self.test_x, y=self.test_y, steps=1)
print("Loss: %s" % test_results["loss"])
print("Root Mean Squared Error: %s" % test_results["rmse"])
# self.validate_x = self.validate_x
predictions = estimator.predict(x=self.validate_x)
predictions_rsl=[]
for i, p in enumerate(predictions):
print("Prediction %s: %s" % (self.validate_y[i], p["classes"]))
predictions_rsl.append(p["classes"])
# plot_predicted, = plt.plot(p["classes"], label='predicted')
# plot_test, = plt.plot(y['test'], label='test')
# plt.legend(handles=[plot_predicted])
#
print len(predictions_rsl)
print len(self.validate_y)
self.plot_results(predictions_rsl, self.validate_y)
project_loction = '/home2/geesara/OpenBCI_Python'
model = Deviatev1(project_loction)
model.execute()
# a smarter learning rate for gradientOptimizer
# learningRate = tf.train.exponential_decay(learning_rate=0.0008,
# global_step= 1,
# decay_steps=train_x.shape[0],
# decay_rate= 0.95,
# staircase=True)
#
# X = tf.placeholder(tf.float32, [None, numFeatures])
# yGold = tf.placeholder(tf.float32, [None, numLabels])
# weights = tf.Variable(tf.random_normal([numFeatures,numLabels],
# mean=0,
# stddev=(np.sqrt(6/numFeatures+
# numLabels+1)),
# name="weights"))
# bias = tf.Variable(tf.random_normal([1,numLabels],
# mean=0,
# stddev=(np.sqrt(6/numFeatures+numLabels+1)),
# name="bias"))
#
#
# init_OP = tf.global_variables_initializer()
# apply_weights_OP = tf.matmul(X, weights, name="apply_weights")
# add_bias_OP = tf.add(apply_weights_OP, bias, name="add_bias")
# activation_OP = tf.nn.sigmoid(add_bias_OP, name="activation")
# cost_OP = tf.nn.l2_loss(activation_OP-yGold, name="squared_error_cost")
# training_OP = tf.train.GradientDescentOptimizer(learningRate).minimize(cost_OP)
#
# epoch_values=[]
# accuracy_values=[]
# cost_values=[]
#
# plt.ion()
# fig = plt.figure()
# ax1 = plt.subplot("211")
# ax1.set_title("TRAINING ACCURACY", fontsize=18)
# ax2 = plt.subplot("212")
# ax2.set_title("TRAINING COST", fontsize=18)
# plt.tight_layout()
#
#
# sess = tf.Session()
# sess.run(init_OP)
#
# correct_predictions_OP = tf.equal(tf.argmax(activation_OP,1),tf.argmax(yGold,1))
# accuracy_OP = tf.reduce_mean(tf.cast(correct_predictions_OP, "float"))
# activation_summary_OP = tf.summary.histogram("output", activation_OP)
# accuracy_summary_OP = tf.summary.scalar("accuracy", accuracy_OP)
# cost_summary_OP = tf.summary.scalar("cost", cost_OP)
# weightSummary = tf.summary.histogram("weights", weights.eval(session=sess))
# biasSummary = tf.summary.histogram("biases", bias.eval(session=sess))
#
# all_summary_OPS = tf.summary.merge_all()
# writer = tf.summary.FileWriter("summary_logs", sess.graph)
#
# # Initialize reporting variables
# cost = 0
# diff = 1
# # Declare batch size
# batch_size = 25
# # Training epochs
# for i in range(numEpochs):
# if i > 1 and diff < .0001:
# print("change in cost %g; convergence."%diff)
# break
# else:
# rand_index = np.random.choice(len(train_x), size=batch_size)
# rand_x = train_x[rand_index]
# rand_y = np.transpose([train_y[rand_index]])
# # Run training step
# step = sess.run(training_OP, feed_dict={X: rand_x, yGold: rand_y})
# # Report occasional stats
# if i % 10 == 0:
# # Add epoch to epoch_values
# epoch_values.append(i)
# # Generate accuracy stats on test data
# summary_results, train_accuracy, newCost = sess.run(
# [all_summary_OPS, accuracy_OP, cost_OP],
# feed_dict={X: rand_x, yGold: rand_y}
# )
# # Add accuracy to live graphing variable
# accuracy_values.append(train_accuracy)
# # Add cost to live graphing variable
# cost_values.append(newCost)
# # Write summary stats to writer
# writer.add_summary(summary_results, i)
# # Re-assign values for variables
# diff = abs(newCost - cost)
# cost = newCost
#
# #generate print statements
# print("step %d, training accuracy %g"%(i, train_accuracy))
# print("step %d, cost %g"%(i, newCost))
# print("step %d, change in cost %g"%(i, diff))
#
# # Plot progress to our two subplots
# accuracyLine, = ax1.plot(epoch_values, accuracy_values)
# costLine, = ax2.plot(epoch_values, cost_values)
# fig.canvas.draw()
# time.sleep(1)
#
# rand_index = np.random.choice(len(test_x), size=len(test_x))
# rand_x = test_x[rand_index]
# rand_y = np.transpose([test_y[rand_index]])
# # How well do we perform on held-out test data?
# print("final accuracy on test set: %s" %str(sess.run(accuracy_OP,
# feed_dict={X: rand_x,
# yGold: rand_y})))
# # Create Saver
# saver = tf.train.Saver()
# # Save variables to .ckpt file
# saver.save(sess, "trained_variables.ckpt")
# sess.close()
#
# # To view tensorboard:
# #1. run: tensorboard --logdir=/path/to/log-directory
# #2. open your browser to http://localhost:6006/
#
#
#
| 20,083 | 43.433628 | 135 | py |
OpenBCIPython | OpenBCIPython-master/neuralnet/net/model3/__init__.py | 0 | 0 | 0 | py |
|
OpenBCIPython | OpenBCIPython-master/neuralnet/net/model3/try_0.py | from __future__ import division
import tensorflow as tf
import numpy as np
import pandas as pd
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib import layers as tflayers
tf.logging.set_verbosity(tf.logging.INFO)
from tensorflow.python.ops import array_ops
from tensorflow.python.framework import constant_op
class Deviatev1:
def __init__(self, project_location):
self.project_location = project_location
self.project_config = self.project_location + "/config/config.json"
self.column_names = ['ch1', 'ch2', 'ch3']
self.number_of_eatures = 5
self.number_of_labels = 180
self.num_epochs = 5000
self.learning_rate =1e-8
self.batch_size = 5
self.keep_prob = 0.9
self.hidden_size = 10
self.num_layers_rnn = 5
self.num_steps = 5
self.time_steps = 1
self.dnn_layer_size = 5
self.model_params = {"learning_rate": self.learning_rate, "keep_prob": self.keep_prob
, 'num_steps': self.num_steps, 'num_layers_rnn':self.num_layers_rnn, 'dnn_layer_size': self.dnn_layer_size
, 'number_of_labels': self.number_of_labels }
self.validation_metrics = {
"accuracy":
tf.contrib.learn.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
prediction_key="classes"),
"precision":
tf.contrib.learn.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_precision,
prediction_key="classes"),
"recall":
tf.contrib.learn.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_recall,
prediction_key="classes")
}
self.test_metrics = {
"accuracy":
tf.contrib.learn.MetricSpec(
metric_fn=tf.metrics.accuracy, prediction_key="classes"),
}
def import_data(self, angle_type):
kinect__angles = pd.read_csv(
self.project_location + "/build/dataset/train/result/reconstructed_bycept_kinect__angles_.csv",
header=None, names=self.column_names).dropna()
channel_signals = pd.read_csv(self.project_location
+ "/build/dataset/train/result/bycept_feature_vectors.csv").dropna()
# kinect__angles = kinect__angles.applymap(lambda x: '%.2f' % x)
y_vals = np.array(kinect__angles.ix[:, angle_type], dtype=np.int32)
x_vals = np.array(channel_signals)
train_presentation = 0.8
test_presentation = 0.8
train_indices = np.random.choice(len(x_vals), int(round(len(x_vals) * train_presentation)), replace=False)
rest_of_data_set_x_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))
rest_of_data_set_x = x_vals[rest_of_data_set_x_indices]
rest_of_data_set_y_indices = np.array(list(set(range(len(y_vals))) - set(train_indices)))
rest_of_data_set_y = y_vals[rest_of_data_set_y_indices]
test_indices = np.random.choice(len(rest_of_data_set_x),
int(round(len(rest_of_data_set_x_indices) * test_presentation)),
replace=False)
validate_indices = np.array(list(set(range(len(rest_of_data_set_x_indices))) - set(test_indices)))
self.train_x = self.rnn_data(x_vals[train_indices])
self.train_y = self.rnn_data(y_vals[train_indices], labels=True)
self.test_x = self.rnn_data(rest_of_data_set_x[test_indices])
self.test_y = self.rnn_data(rest_of_data_set_y[test_indices], labels=True)
self.validate_x = self.rnn_data(rest_of_data_set_x[validate_indices])
self.validate_y = self.rnn_data(rest_of_data_set_y[validate_indices], labels=True)
self.kinect_angles = np.array(kinect__angles.ix[:, 0])
def rnn_data(self, data, labels=False):
data = pd.DataFrame(data)
rnn_df = []
for i in range(data.shape[0] - self.num_steps):
if labels:
rnn_df.append(data.iloc[i + self.num_steps].as_matrix())
else:
data_ = data.iloc[i: i + self.num_steps].as_matrix()
rnn_df.append(data_ if len(data_.shape) > 1 else [[i] for i in data_])
return np.array(rnn_df, dtype=np.float32)
def model_fn(self, features, targets, mode, params):
def dnn_layers(input_layers, layers):
if layers and isinstance(layers, dict):
return tflayers.stack(input_layers, tflayers.fully_connected,
layers['layers'],
activation=layers.get('activation'),
dropout=layers.get('dropout'))
elif layers:
return tflayers.stack(input_layers, tflayers.fully_connected, layers)
else:
return input_layers
def lstm_cell():
return tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.BasicLSTMCell(params["num_steps"], forget_bias=0.0,
state_is_tuple=True),output_keep_prob=params['keep_prob'])
def lstm_forward_cell():
return tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.LSTMCell(params["num_steps"], forget_bias=0.2,
state_is_tuple=True), output_keep_prob=params['keep_prob'])
def lstm_backword_cell():
return tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.LSTMCell(params["num_steps"], forget_bias=0.2,
state_is_tuple=True), output_keep_prob=params['keep_prob'])
def _lstm_model(features, targets):
lstm_fw_multicell = tf.contrib.rnn.MultiRNNCell([lstm_forward_cell() for _ in range(params['num_layers_rnn'])],
state_is_tuple=True)
lstm_bw_multicell = tf.contrib.rnn.MultiRNNCell([lstm_backword_cell() for _ in range(params['num_layers_rnn'])],
state_is_tuple=True)
# initial_state_fw = lstm_fw_multicell.zero_state(batch_size=self.batch_size, dtype='float32')
# initial_state_bw = lstm_bw_multicell.zero_state(batch_size=self.batch_size, dtype='float32')
# init_state = tf.zeros([params["num_steps"]*2])
# initial_state = cell.zero_state(batch_size, tf.float32)
features = tf.unstack(features, num=params["num_steps"], axis=1)
with tf.variable_scope("RNN"):
# output, state = tf.contrib.rnn.static_rnn(lstm_fw_multicell, features, dtype=tf.float32)
output, state = tf.contrib.learn.models.bidirectional_rnn(lstm_fw_multicell, lstm_bw_multicell, features,
# initial_state_fw=initial_state_fw,
# initial_state_bw=initial_state_bw,
# sequence_length=[5, 5],
dtype='float32')
# targets = tf.one_hot(targets, params['number_of_labels'], 1, 0)
# targets = tf.Variable(tf.random_normal([1]))
# logits = tf.contrib.layers.fully_connected(output[-1], 180)
# loss = tf.contrib.losses.softmax_cross_entropy(logits, targets)
output = dnn_layers(output[-1], [params['dnn_layer_size'], params['dnn_layer_size']])
# # first = tf.contrib.layers.fully_connected(output[-1], num_outputs=300)
# # first = tf.contrib.layers.fully_connected(first, num_outputs=200)
# # first = tf.contrib.layers.fully_connected(first, num_outputs=100)
# # first = tf.contrib.layers.fully_connected(first, num_outputs=50)
# # first = tf.contrib.layers.fully_connected(first, num_outputs=25)
# output = tf.contrib.layers.fully_connected(output, num_outputs=5)
# output = tf.contrib.layers.linear(output, 1)
# output = tf.reshape(output, [-1])
output = self.extract(output, 'input')
labels = self.extract(targets, 'labels')
labels = tf.cast(labels, tf.float32)
# prediction, loss = tf.contrib.learn.models.linear_regression(outputs, labels)
# with tf.variable_scope('softmax'):
# W = tf.get_variable('W', [5, 180])
# b = tf.get_variable('b', [180], initializer=tf.constant_initializer(0.0))
# labels = tf.reshape(labels, [-1])
# logits = tf.matmul(outputs, W) + b
# loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# prediction = tf.argmax(logits, 1)
# prediction = tf.get_variable('b', [180], initializer=tf.constant_initializer(0.0))
# labels= tf.cast(labels, tf.float32)
# n_observations = 180
# for pow_i in range(1, 5):
# # W = tf.Variable(tf.random_normal([1]), name='weight_%d' % pow_i)
# W = tf.get_variable('weight_%d' % pow_i, [5, 180])
# logits = tf.matmul(tf.pow(outputs, pow_i), W)+ prediction
#
# loss = tf.reduce_sum(tf.pow(prediction - labels, 2)) / (n_observations - 1)
theta = tf.Variable(tf.random_normal([5, 1], stddev=0.01), name="Theta")
lambda_val = tf.constant(0.1)
y_predicted = tf.matmul(output, theta, name="y_predicted")
for pow_i in range(1, 2):
W = tf.Variable(tf.random_normal([5, 1]), name='weight_%d' % pow_i)
y_predicted = tf.matmul(tf.pow(output, pow_i), W)+ y_predicted
with tf.name_scope('cost') as scope:
loss = (tf.nn.l2_loss(y_predicted - labels) + lambda_val * tf.nn.l2_loss(theta)) / float(self.batch_size)
loss_summary = tf.summary.scalar('cost', loss)
# with tf.name_scope("prediction") as scope:
# labels = tf.cast(labels, tf.int32)
# correct_prediction = tf.subtract(tf.cast(1, 'float'), tf.reduce_mean(tf.subtract(y_predicted, labels)), name="angles")
# correct_prediction = tf.reduce_mean(tf.subtract(y_predicted, labels), name="angles")
# with tf.name_scope("accuracy") as scope:
# accuracy = tf.cast(correct_prediction, "float")
# theta = tf.Variable(tf.random_normal([5, 1], stddev=0.01), name="Theta")
# lambda_val = tf.constant(0.1)
# prediction = tf.matmul(output, theta, name="y_predicted")
# with tf.name_scope('cost') as scope:
# # cost_func = tf.multiply(tf.cast(1/(2*batch_size), 'float'), tf.cast(tf.add(polynomial_cost_part, regularization_cost_part), 'float'))
# # cost_func = (tf.nn.l2_loss(prediction - labels) + lambda_val * tf.nn.l2_loss(theta)) / float(self.batch_size)
# # DEPRECATED*** cost_summary = tf.scalar_summary("cost", cost_func)
# loss = (tf.nn.l2_loss(prediction - labels) + lambda_val * tf.nn.l2_loss(theta)) / float(self.batch_size)
# # loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction, labels=labels))
# # loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# %% And now we can look at the mean of our network's correct guesses
# train_op = tf.train.AdamOptimizer(params["learning_rate"]).minimize(total_loss)
# logits = tf.contrib.layers.fully_connected(output, 15, activation_fn=None)
# labels = array_ops.reshape(
# constant_op.constant(
# targets, dtype=tf.int32), (-1, 1))
# prediction, loss = tf.contrib.learn.models.logistic_regression_zero_init(output, labels)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=params["learning_rate"])
# correct_prediction = tf.equal(tf.argmax(train_prediction, 1), train_labels)
predictions_dict = {"classes": tf.argmax(input=y_predicted, axis=1, name="angles")}
eval_metric_ops = {
"rmse": tf.metrics.root_mean_squared_error(tf.cast(y_predicted, tf.float32), tf.cast(labels, tf.float32))
}
return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions_dict, loss=loss, train_op=train_op
, eval_metric_ops=eval_metric_ops)
return _lstm_model(features, targets)
def extract(self, data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def execute(self):
self.import_data(1)
estimator = tf.contrib.learn.Estimator(model_fn=self.model_fn, params=self.model_params,
model_dir="/home/runge/openbci/git/OpenBCI_Python/neuralnet/net/model3/model",
config=tf.contrib.learn.RunConfig(save_checkpoints_secs=20))
validation_monitor = tf.contrib.learn.monitors.ValidationMonitor(
self.test_x,
self.test_y,
every_n_steps=50,
metrics=self.validation_metrics,
early_stopping_metric="loss",
early_stopping_metric_minimize=True,
early_stopping_rounds=200)
tensors_to_log = {"classes": "angles"}
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=1)
estimator.fit(x=self.train_x, y=self.train_y, steps=50, monitors=[validation_monitor, logging_hook], batch_size=self.batch_size)
test_results = estimator.evaluate(x=self.test_x, y=self.test_y, steps=1)
print("Loss: %s" % test_results["loss"])
print("Root Mean Squared Error: %s" % test_results["rmse"])
# self.validate_x = self.validate_x
predictions = estimator.predict(x=self.validate_x, as_iterable=True)
for i, p in enumerate(predictions):
print("Prediction %s: %s" % (self.validate_y[i], p["classes"]))
project_loction = '/home/runge/openbci/git/OpenBCI_Python'
model = Deviatev1(project_loction)
model.execute()
# a smarter learning rate for gradientOptimizer
# learningRate = tf.train.exponential_decay(learning_rate=0.0008,
# global_step= 1,
# decay_steps=train_x.shape[0],
# decay_rate= 0.95,
# staircase=True)
#
# X = tf.placeholder(tf.float32, [None, numFeatures])
# yGold = tf.placeholder(tf.float32, [None, numLabels])
# weights = tf.Variable(tf.random_normal([numFeatures,numLabels],
# mean=0,
# stddev=(np.sqrt(6/numFeatures+
# numLabels+1)),
# name="weights"))
# bias = tf.Variable(tf.random_normal([1,numLabels],
# mean=0,
# stddev=(np.sqrt(6/numFeatures+numLabels+1)),
# name="bias"))
#
#
# init_OP = tf.global_variables_initializer()
# apply_weights_OP = tf.matmul(X, weights, name="apply_weights")
# add_bias_OP = tf.add(apply_weights_OP, bias, name="add_bias")
# activation_OP = tf.nn.sigmoid(add_bias_OP, name="activation")
# cost_OP = tf.nn.l2_loss(activation_OP-yGold, name="squared_error_cost")
# training_OP = tf.train.GradientDescentOptimizer(learningRate).minimize(cost_OP)
#
# epoch_values=[]
# accuracy_values=[]
# cost_values=[]
#
# plt.ion()
# fig = plt.figure()
# ax1 = plt.subplot("211")
# ax1.set_title("TRAINING ACCURACY", fontsize=18)
# ax2 = plt.subplot("212")
# ax2.set_title("TRAINING COST", fontsize=18)
# plt.tight_layout()
#
#
# sess = tf.Session()
# sess.run(init_OP)
#
# correct_predictions_OP = tf.equal(tf.argmax(activation_OP,1),tf.argmax(yGold,1))
# accuracy_OP = tf.reduce_mean(tf.cast(correct_predictions_OP, "float"))
# activation_summary_OP = tf.summary.histogram("output", activation_OP)
# accuracy_summary_OP = tf.summary.scalar("accuracy", accuracy_OP)
# cost_summary_OP = tf.summary.scalar("cost", cost_OP)
# weightSummary = tf.summary.histogram("weights", weights.eval(session=sess))
# biasSummary = tf.summary.histogram("biases", bias.eval(session=sess))
#
# all_summary_OPS = tf.summary.merge_all()
# writer = tf.summary.FileWriter("summary_logs", sess.graph)
#
# # Initialize reporting variables
# cost = 0
# diff = 1
# # Declare batch size
# batch_size = 25
# # Training epochs
# for i in range(numEpochs):
# if i > 1 and diff < .0001:
# print("change in cost %g; convergence."%diff)
# break
# else:
# rand_index = np.random.choice(len(train_x), size=batch_size)
# rand_x = train_x[rand_index]
# rand_y = np.transpose([train_y[rand_index]])
# # Run training step
# step = sess.run(training_OP, feed_dict={X: rand_x, yGold: rand_y})
# # Report occasional stats
# if i % 10 == 0:
# # Add epoch to epoch_values
# epoch_values.append(i)
# # Generate accuracy stats on test data
# summary_results, train_accuracy, newCost = sess.run(
# [all_summary_OPS, accuracy_OP, cost_OP],
# feed_dict={X: rand_x, yGold: rand_y}
# )
# # Add accuracy to live graphing variable
# accuracy_values.append(train_accuracy)
# # Add cost to live graphing variable
# cost_values.append(newCost)
# # Write summary stats to writer
# writer.add_summary(summary_results, i)
# # Re-assign values for variables
# diff = abs(newCost - cost)
# cost = newCost
#
# #generate print statements
# print("step %d, training accuracy %g"%(i, train_accuracy))
# print("step %d, cost %g"%(i, newCost))
# print("step %d, change in cost %g"%(i, diff))
#
# # Plot progress to our two subplots
# accuracyLine, = ax1.plot(epoch_values, accuracy_values)
# costLine, = ax2.plot(epoch_values, cost_values)
# fig.canvas.draw()
# time.sleep(1)
#
# rand_index = np.random.choice(len(test_x), size=len(test_x))
# rand_x = test_x[rand_index]
# rand_y = np.transpose([test_y[rand_index]])
# # How well do we perform on held-out test data?
# print("final accuracy on test set: %s" %str(sess.run(accuracy_OP,
# feed_dict={X: rand_x,
# yGold: rand_y})))
# # Create Saver
# saver = tf.train.Saver()
# # Save variables to .ckpt file
# saver.save(sess, "trained_variables.ckpt")
# sess.close()
#
# # To view tensorboard:
# #1. run: tensorboard --logdir=/path/to/log-directory
# #2. open your browser to http://localhost:6006/
#
#
#
| 19,661 | 45.046838 | 153 | py |
OpenBCIPython | OpenBCIPython-master/neuralnet/utils/cnn_common.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains convenience wrappers for typical Neural Network TensorFlow layers.
Additionally it maintains a collection with update_ops that need to be
updated after the ops have been computed, for example to update moving means
and moving variances of batch_norm.
Ops that have different behavior during training or eval have an is_training
parameter. Additionally Ops that contain variables.variable have a trainable
parameter, which control if the ops variables are trainable or not.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.training import moving_averages
# Used to keep the update ops done by batch_norm.
from neuralnet.utils import scopes
from neuralnet.utils import variables
from neuralnet.utils import losses
UPDATE_OPS_COLLECTION = '_update_ops_'
@scopes.add_arg_scope
def batch_norm(inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
moving_vars='moving_vars',
activation=None,
is_training=True,
trainable=True,
restore=True,
scope=None,
reuse=None):
"""Adds a Batch Normalization layer.
Args:
inputs: a tensor of size [batch_size, height, width, channels]
or [batch_size, channels].
decay: decay for the moving average.
center: If True, subtract beta. If False, beta is not created and ignored.
scale: If True, multiply by gamma. If False, gamma is
not used. When the next layer is linear (also e.g. ReLU), this can be
disabled since the scaling can be done by the next layer.
epsilon: small float added to variance to avoid dividing by zero.
moving_vars: collection to store the moving_mean and moving_variance.
activation: activation function.
is_training: whether or not the model is in training mode.
trainable: whether or not the variables should be trainable or not.
restore: whether or not the variables should be marked for restore.
scope: Optional scope for variable_scope.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
Returns:
a tensor representing the output of the operation.
"""
inputs_shape = inputs.get_shape()
with tf.variable_scope(scope, 'BatchNorm', [inputs], reuse=reuse):
axis = list(range(len(inputs_shape) - 1))
params_shape = inputs_shape[-1:]
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if center:
beta = variables.variable('beta',
params_shape,
initializer=tf.zeros_initializer(),
trainable=trainable,
restore=restore)
if scale:
gamma = variables.variable('gamma',
params_shape,
initializer=tf.ones_initializer(),
trainable=trainable,
restore=restore)
# Create moving_mean and moving_variance add them to
# GraphKeys.MOVING_AVERAGE_VARIABLES collections.
moving_collections = [moving_vars, tf.GraphKeys.MOVING_AVERAGE_VARIABLES]
moving_mean = variables.variable('moving_mean',
params_shape,
initializer=tf.zeros_initializer(),
trainable=False,
restore=restore,
collections=moving_collections)
moving_variance = variables.variable('moving_variance',
params_shape,
initializer=tf.ones_initializer(),
trainable=False,
restore=restore,
collections=moving_collections)
if is_training:
# Calculate the moments based on the individual batch.
mean, variance = tf.nn.moments(inputs, axis)
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)
else:
# Just use the moving_mean and moving_variance.
mean = moving_mean
variance = moving_variance
# Normalize the activations.
outputs = tf.nn.batch_normalization(
inputs, mean, variance, beta, gamma, epsilon)
outputs.set_shape(inputs.get_shape())
if activation:
outputs = activation(outputs)
return outputs
def _two_element_tuple(int_or_tuple):
"""Converts `int_or_tuple` to height, width.
Several of the functions that follow accept arguments as either
a tuple of 2 integers or a single integer. A single integer
indicates that the 2 values of the tuple are the same.
This functions normalizes the input value by always returning a tuple.
Args:
int_or_tuple: A list of 2 ints, a single int or a tf.TensorShape.
Returns:
A tuple with 2 values.
Raises:
ValueError: If `int_or_tuple` it not well formed.
"""
if isinstance(int_or_tuple, (list, tuple)):
if len(int_or_tuple) != 2:
raise ValueError('Must be a list with 2 elements: %s' % int_or_tuple)
return int(int_or_tuple[0]), int(int_or_tuple[1])
if isinstance(int_or_tuple, int):
return int(int_or_tuple), int(int_or_tuple)
if isinstance(int_or_tuple, tf.TensorShape):
if len(int_or_tuple) == 2:
return int_or_tuple[0], int_or_tuple[1]
raise ValueError('Must be an int, a list with 2 elements or a TensorShape of '
'length 2')
@scopes.add_arg_scope
def conv2d(inputs,
num_filters_out,
kernel_size,
stride=1,
padding='SAME',
activation=tf.nn.relu,
stddev=0.01,
bias=0.0,
weight_decay=0,
batch_norm_params=None,
is_training=True,
trainable=True,
restore=True,
scope=None,
reuse=None):
"""Adds a 2D convolution followed by an optional batch_norm layer.
conv2d creates a variable called 'weights', representing the convolutional
kernel, that is convolved with the input. If `batch_norm_params` is None, a
second variable called 'biases' is added to the result of the convolution
operation.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_filters_out: the number of output filters.
kernel_size: a list of length 2: [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: one of 'VALID' or 'SAME'.
activation: activation function.
stddev: standard deviation of the truncated guassian weight distribution.
bias: the initial value of the biases.
weight_decay: the weight decay.
batch_norm_params: parameters for the batch_norm. If is None don't use it.
is_training: whether or not the model is in training mode.
trainable: whether or not the variables should be trainable or not.
restore: whether or not the variables should be marked for restore.
scope: Optional scope for variable_scope.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
Returns:
a tensor representing the output of the operation.
"""
with tf.variable_scope(scope, 'Conv', [inputs], reuse=reuse):
kernel_h, kernel_w = _two_element_tuple(kernel_size)
stride_h, stride_w = _two_element_tuple(stride)
num_filters_in = inputs.get_shape()[-1]
weights_shape = [kernel_h, kernel_w,
num_filters_in, num_filters_out]
weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
l2_regularizer = None
if weight_decay and weight_decay > 0:
l2_regularizer = losses.l2_regularizer(weight_decay)
weights = variables.variable('weights',
shape=weights_shape,
initializer=weights_initializer,
regularizer=l2_regularizer,
trainable=trainable,
restore=restore)
conv = tf.nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1],
padding=padding)
if batch_norm_params is not None:
with scopes.arg_scope([batch_norm], is_training=is_training,
trainable=trainable, restore=restore):
outputs = batch_norm(conv, **batch_norm_params)
else:
bias_shape = [num_filters_out,]
bias_initializer = tf.constant_initializer(bias)
biases = variables.variable('biases',
shape=bias_shape,
initializer=bias_initializer,
trainable=trainable,
restore=restore)
outputs = tf.nn.bias_add(conv, biases)
if activation:
outputs = activation(outputs)
return outputs
@scopes.add_arg_scope
def fc(inputs,
num_units_out,
activation=tf.nn.relu,
stddev=0.01,
bias=0.0,
weight_decay=0,
batch_norm_params=None,
is_training=True,
trainable=True,
restore=True,
scope=None,
reuse=None):
"""Adds a fully connected layer followed by an optional batch_norm layer.
FC creates a variable called 'weights', representing the fully connected
weight matrix, that is multiplied by the input. If `batch_norm` is None, a
second variable called 'biases' is added to the result of the initial
vector-matrix multiplication.
Args:
inputs: a [B x N] tensor where B is the batch size and N is the number of
input units in the layer.
num_units_out: the number of output units in the layer.
activation: activation function.
stddev: the standard deviation for the weights.
bias: the initial value of the biases.
weight_decay: the weight decay.
batch_norm_params: parameters for the batch_norm. If is None don't use it.
is_training: whether or not the model is in training mode.
trainable: whether or not the variables should be trainable or not.
restore: whether or not the variables should be marked for restore.
scope: Optional scope for variable_scope.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
Returns:
the tensor variable representing the result of the series of operations.
"""
with tf.variable_scope(scope, 'FC', [inputs], reuse=reuse):
num_units_in = inputs.get_shape()[1]
weights_shape = [num_units_in, num_units_out]
weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
l2_regularizer = None
if weight_decay and weight_decay > 0:
l2_regularizer = losses.l2_regularizer(weight_decay)
weights = variables.variable('weights',
shape=weights_shape,
initializer=weights_initializer,
regularizer=l2_regularizer,
trainable=trainable,
restore=restore)
if batch_norm_params is not None:
outputs = tf.matmul(inputs, weights)
with scopes.arg_scope([batch_norm], is_training=is_training,
trainable=trainable, restore=restore):
outputs = batch_norm(outputs, **batch_norm_params)
else:
bias_shape = [num_units_out,]
bias_initializer = tf.constant_initializer(bias)
biases = variables.variable('biases',
shape=bias_shape,
initializer=bias_initializer,
trainable=trainable,
restore=restore)
outputs = tf.nn.xw_plus_b(inputs, weights, biases)
if activation:
outputs = activation(outputs)
return outputs
def one_hot_encoding(labels, num_classes, scope=None):
"""Transform numeric labels into onehot_labels.
Args:
labels: [batch_size] target labels.
num_classes: total number of classes.
scope: Optional scope for name_scope.
Returns:
one hot encoding of the labels.
"""
with tf.name_scope(scope, 'OneHotEncoding', [labels]):
batch_size = labels.get_shape()[0]
indices = tf.expand_dims(tf.range(0, batch_size), 1)
labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)
concated = tf.concat(axis=1, values=[indices, labels])
onehot_labels = tf.sparse_to_dense(
concated, tf.stack([batch_size, num_classes]), 1.0, 0.0)
onehot_labels.set_shape([batch_size, num_classes])
return onehot_labels
@scopes.add_arg_scope
def max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
"""Adds a Max Pooling layer.
It is assumed by the wrapper that the pooling is only done per image and not
in depth or batch.
Args:
inputs: a tensor of size [batch_size, height, width, depth].
kernel_size: a list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding method, either 'VALID' or 'SAME'.
scope: Optional scope for name_scope.
Returns:
a tensor representing the results of the pooling operation.
Raises:
ValueError: if 'kernel_size' is not a 2-D list
"""
with tf.name_scope(scope, 'MaxPool', [inputs]):
kernel_h, kernel_w = _two_element_tuple(kernel_size)
stride_h, stride_w = _two_element_tuple(stride)
return tf.nn.max_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding)
@scopes.add_arg_scope
def avg_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
"""Adds a Avg Pooling layer.
It is assumed by the wrapper that the pooling is only done per image and not
in depth or batch.
Args:
inputs: a tensor of size [batch_size, height, width, depth].
kernel_size: a list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding method, either 'VALID' or 'SAME'.
scope: Optional scope for name_scope.
Returns:
a tensor representing the results of the pooling operation.
"""
with tf.name_scope(scope, 'AvgPool', [inputs]):
kernel_h, kernel_w = _two_element_tuple(kernel_size)
stride_h, stride_w = _two_element_tuple(stride)
return tf.nn.avg_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding)
@scopes.add_arg_scope
def dropout(inputs, keep_prob=0.5, is_training=True, scope=None):
"""Returns a dropout layer applied to the input.
Args:
inputs: the tensor to pass to the Dropout layer.
keep_prob: the probability of keeping each input unit.
is_training: whether or not the model is in training mode. If so, dropout is
applied and values scaled. Otherwise, inputs is returned.
scope: Optional scope for name_scope.
Returns:
a tensor representing the output of the operation.
"""
if is_training and keep_prob > 0:
with tf.name_scope(scope, 'Dropout', [inputs]):
return tf.nn.dropout(inputs, keep_prob)
else:
return inputs
def flatten(inputs, scope=None):
"""Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: a tensor of size [batch_size, ...].
scope: Optional scope for name_scope.
Returns:
a flattened tensor with shape [batch_size, k].
Raises:
ValueError: if inputs.shape is wrong.
"""
if len(inputs.get_shape()) < 2:
raise ValueError('Inputs must be have a least 2 dimensions')
dims = inputs.get_shape()[1:]
k = dims.num_elements()
with tf.name_scope(scope, 'Flatten', [inputs]):
return tf.reshape(inputs, [-1, k])
def repeat_op(repetitions, inputs, op, *args, **kwargs):
"""Build a sequential Tower starting from inputs by using an op repeatedly.
It creates new scopes for each operation by increasing the counter.
Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
it will repeat the given op under the following variable_scopes:
conv1/Conv
conv1/Conv_1
conv1/Conv_2
Args:
repetitions: number or repetitions.
inputs: a tensor of size [batch_size, height, width, channels].
op: an operation.
*args: args for the op.
**kwargs: kwargs for the op.
Returns:
a tensor result of applying the operation op, num times.
Raises:
ValueError: if the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
with tf.variable_scope(scope, 'RepeatOp', [inputs]):
tower = inputs
for _ in range(repetitions):
tower = op(tower, *args, **kwargs)
return tower
| 18,784 | 38.630802 | 80 | py |
OpenBCIPython | OpenBCIPython-master/neuralnet/utils/scopes.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the new arg_scope used for TF-Slim ops.
Allows one to define models much more compactly by eliminating boilerplate
code. This is accomplished through the use of argument scoping (arg_scope).
Example of how to use scopes.arg_scope:
with scopes.arg_scope(ops.conv2d, padding='SAME',
stddev=0.01, weight_decay=0.0005):
net = ops.conv2d(inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
net = ops.conv2d(net, 256, [5, 5], scope='conv2')
The first call to conv2d will overwrite padding:
ops.conv2d(inputs, 64, [11, 11], 4, padding='VALID',
stddev=0.01, weight_decay=0.0005, scope='conv1')
The second call to Conv will use predefined args:
ops.conv2d(inputs, 256, [5, 5], padding='SAME',
stddev=0.01, weight_decay=0.0005, scope='conv2')
Example of how to reuse an arg_scope:
with scopes.arg_scope(ops.conv2d, padding='SAME',
stddev=0.01, weight_decay=0.0005) as conv2d_arg_scope:
net = ops.conv2d(net, 256, [5, 5], scope='conv1')
....
with scopes.arg_scope(conv2d_arg_scope):
net = ops.conv2d(net, 256, [5, 5], scope='conv2')
Example of how to use scopes.add_arg_scope:
@scopes.add_arg_scope
def conv2d(*args, **kwargs)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import functools
from tensorflow.python.framework import ops
_ARGSTACK_KEY = ("__arg_stack",)
_DECORATED_OPS = set()
def _get_arg_stack():
stack = ops.get_collection(_ARGSTACK_KEY)
if stack:
return stack[0]
else:
stack = [{}]
ops.add_to_collection(_ARGSTACK_KEY, stack)
return stack
def _current_arg_scope():
stack = _get_arg_stack()
return stack[-1]
def _add_op(op):
key_op = (op.__module__, op.__name__)
if key_op not in _DECORATED_OPS:
_DECORATED_OPS.add(key_op)
@contextlib.contextmanager
def arg_scope(list_ops_or_scope, **kwargs):
"""Stores the default arguments for the given set of list_ops.
For usage, please see examples at top of the file.
Args:
list_ops_or_scope: List or tuple of operations to set argument scope for or
a dictionary containg the current scope. When list_ops_or_scope is a dict,
kwargs must be empty. When list_ops_or_scope is a list or tuple, then
every op in it need to be decorated with @add_arg_scope to work.
**kwargs: keyword=value that will define the defaults for each op in
list_ops. All the ops need to accept the given set of arguments.
Yields:
the current_scope, which is a dictionary of {op: {arg: value}}
Raises:
TypeError: if list_ops is not a list or a tuple.
ValueError: if any op in list_ops has not be decorated with @add_arg_scope.
"""
if isinstance(list_ops_or_scope, dict):
# Assumes that list_ops_or_scope is a scope that is being reused.
if kwargs:
raise ValueError("When attempting to re-use a scope by suppling a"
"dictionary, kwargs must be empty.")
current_scope = list_ops_or_scope.copy()
try:
_get_arg_stack().append(current_scope)
yield current_scope
finally:
_get_arg_stack().pop()
else:
# Assumes that list_ops_or_scope is a list/tuple of ops with kwargs.
if not isinstance(list_ops_or_scope, (list, tuple)):
raise TypeError("list_ops_or_scope must either be a list/tuple or reused"
"scope (i.e. dict)")
try:
current_scope = _current_arg_scope().copy()
for op in list_ops_or_scope:
key_op = (op.__module__, op.__name__)
if not has_arg_scope(op):
raise ValueError("%s is not decorated with @add_arg_scope", key_op)
if key_op in current_scope:
current_kwargs = current_scope[key_op].copy()
current_kwargs.update(kwargs)
current_scope[key_op] = current_kwargs
else:
current_scope[key_op] = kwargs.copy()
_get_arg_stack().append(current_scope)
yield current_scope
finally:
_get_arg_stack().pop()
def add_arg_scope(func):
"""Decorates a function with args so it can be used within an arg_scope.
Args:
func: function to decorate.
Returns:
A tuple with the decorated function func_with_args().
"""
@functools.wraps(func)
def func_with_args(*args, **kwargs):
current_scope = _current_arg_scope()
current_args = kwargs
key_func = (func.__module__, func.__name__)
if key_func in current_scope:
current_args = current_scope[key_func].copy()
current_args.update(kwargs)
return func(*args, **current_args)
_add_op(func)
return func_with_args
def has_arg_scope(func):
"""Checks whether a func has been decorated with @add_arg_scope or not.
Args:
func: function to check.
Returns:
a boolean.
"""
key_op = (func.__module__, func.__name__)
return key_op in _DECORATED_OPS
| 5,612 | 31.824561 | 80 | py |
OpenBCIPython | OpenBCIPython-master/neuralnet/utils/losses.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains convenience wrappers for various Neural Network TensorFlow losses.
All the losses defined here add themselves to the LOSSES_COLLECTION
collection.
l1_loss: Define a L1 Loss, useful for regularization, i.e. lasso.
l2_loss: Define a L2 Loss, useful for regularization, i.e. weight decay.
cross_entropy_loss: Define a cross entropy loss using
softmax_cross_entropy_with_logits. Useful for classification.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
# In order to gather all losses in a network, the user should use this
# key for get_collection, i.e:
# losses = tf.get_collection(slim.losses.LOSSES_COLLECTION)
LOSSES_COLLECTION = '_losses'
def l1_regularizer(weight=1.0, scope=None):
"""Define a L1 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
a regularizer function.
"""
def regularizer(tensor):
with tf.name_scope(scope, 'L1Regularizer', [tensor]):
l1_weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='weight')
return tf.multiply(l1_weight, tf.reduce_sum(tf.abs(tensor)), name='value')
return regularizer
def l2_regularizer(weight=1.0, scope=None):
"""Define a L2 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
a regularizer function.
"""
def regularizer(tensor):
with tf.name_scope(scope, 'L2Regularizer', [tensor]):
l2_weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='weight')
return tf.multiply(l2_weight, tf.nn.l2_loss(tensor), name='value')
return regularizer
def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None):
"""Define a L1L2 regularizer.
Args:
weight_l1: scale the L1 loss by this factor.
weight_l2: scale the L2 loss by this factor.
scope: Optional scope for name_scope.
Returns:
a regularizer function.
"""
def regularizer(tensor):
with tf.name_scope(scope, 'L1L2Regularizer', [tensor]):
weight_l1_t = tf.convert_to_tensor(weight_l1,
dtype=tensor.dtype.base_dtype,
name='weight_l1')
weight_l2_t = tf.convert_to_tensor(weight_l2,
dtype=tensor.dtype.base_dtype,
name='weight_l2')
reg_l1 = tf.multiply(weight_l1_t, tf.reduce_sum(tf.abs(tensor)),
name='value_l1')
reg_l2 = tf.multiply(weight_l2_t, tf.nn.l2_loss(tensor),
name='value_l2')
return tf.add(reg_l1, reg_l2, name='value')
return regularizer
def l1_loss(tensor, weight=1.0, scope=None):
"""Define a L1Loss, useful for regularize, i.e. lasso.
Args:
tensor: tensor to regularize.
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
the L1 loss op.
"""
with tf.name_scope(scope, 'L1Loss', [tensor]):
weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='loss_weight')
loss = tf.multiply(weight, tf.reduce_sum(tf.abs(tensor)), name='value')
tf.add_to_collection(LOSSES_COLLECTION, loss)
return loss
def l2_loss(tensor, weight=1.0, scope=None):
"""Define a L2Loss, useful for regularize, i.e. weight decay.
Args:
tensor: tensor to regularize.
weight: an optional weight to modulate the loss.
scope: Optional scope for name_scope.
Returns:
the L2 loss op.
"""
with tf.name_scope(scope, 'L2Loss', [tensor]):
weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='loss_weight')
loss = tf.multiply(weight, tf.nn.l2_loss(tensor), name='value')
tf.add_to_collection(LOSSES_COLLECTION, loss)
return loss
def cross_entropy_loss(logits, one_hot_labels, label_smoothing=0,
weight=1.0, scope=None):
"""Define a Cross Entropy loss using softmax_cross_entropy_with_logits.
It can scale the loss by weight factor, and smooth the labels.
Args:
logits: [batch_size, num_classes] logits outputs of the network .
one_hot_labels: [batch_size, num_classes] target one_hot_encoded labels.
label_smoothing: if greater than 0 then smooth the labels.
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
A tensor with the softmax_cross_entropy loss.
"""
logits.get_shape().assert_is_compatible_with(one_hot_labels.get_shape())
with tf.name_scope(scope, 'CrossEntropyLoss', [logits, one_hot_labels]):
num_classes = one_hot_labels.get_shape()[-1].value
one_hot_labels = tf.cast(one_hot_labels, logits.dtype)
if label_smoothing > 0:
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
one_hot_labels = one_hot_labels * smooth_positives + smooth_negatives
cross_entropy = tf.contrib.nn.deprecated_flipped_softmax_cross_entropy_with_logits(
logits, one_hot_labels, name='xentropy')
weight = tf.convert_to_tensor(weight,
dtype=logits.dtype.base_dtype,
name='loss_weight')
loss = tf.multiply(weight, tf.reduce_mean(cross_entropy), name='value')
tf.add_to_collection(LOSSES_COLLECTION, loss)
return loss
| 6,398 | 35.565714 | 87 | py |
OpenBCIPython | OpenBCIPython-master/neuralnet/utils/__init__.py | 0 | 0 | 0 | py |
|
OpenBCIPython | OpenBCIPython-master/neuralnet/utils/variables.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains convenience wrappers for creating variables in TF-Slim.
The variables module is typically used for defining model variables from the
ops routines (see slim.ops). Such variables are used for training, evaluation
and inference of models.
All the variables created through this module would be added to the
MODEL_VARIABLES collection, if you create a model variable outside slim, it can
be added with slim.variables.add_variable(external_variable, reuse).
Usage:
weights_initializer = tf.truncated_normal_initializer(stddev=0.01)
l2_regularizer = lambda t: losses.l2_loss(t, weight=0.0005)
weights = variables.variable('weights',
shape=[100, 100],
initializer=weights_initializer,
regularizer=l2_regularizer,
device='/cpu:0')
biases = variables.variable('biases',
shape=[100],
initializer=tf.zeros_initializer(),
device='/cpu:0')
# More complex example.
net = slim.ops.conv2d(input, 32, [3, 3], scope='conv1')
net = slim.ops.conv2d(net, 64, [3, 3], scope='conv2')
with slim.arg_scope([variables.variable], restore=False):
net = slim.ops.conv2d(net, 64, [3, 3], scope='conv3')
# Get all model variables from all the layers.
model_variables = slim.variables.get_variables()
# Get all model variables from a specific the layer, i.e 'conv1'.
conv1_variables = slim.variables.get_variables('conv1')
# Get all weights from all the layers.
weights = slim.variables.get_variables_by_name('weights')
# Get all bias from all the layers.
biases = slim.variables.get_variables_by_name('biases')
# Get all variables to restore.
# (i.e. only those created by 'conv1' and 'conv2')
variables_to_restore = slim.variables.get_variables_to_restore()
************************************************
* Initializing model variables from a checkpoint
************************************************
# Create some variables.
v1 = slim.variables.variable(name="v1", ..., restore=False)
v2 = slim.variables.variable(name="v2", ...) # By default restore=True
...
# The list of variables to restore should only contain 'v2'.
variables_to_restore = slim.variables.get_variables_to_restore()
restorer = tf.train.Saver(variables_to_restore)
with tf.Session() as sess:
# Restore variables from disk.
restorer.restore(sess, "/tmp/model.ckpt")
print("Model restored.")
# Do some work with the model
...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
# Collection containing all the variables created using slim.variables
from neuralnet.utils import scopes
MODEL_VARIABLES = '_model_variables_'
# Collection containing the slim.variables that are created with restore=True.
VARIABLES_TO_RESTORE = '_variables_to_restore_'
def add_variable(var, restore=True):
"""Adds a variable to the MODEL_VARIABLES collection.
Optionally it will add the variable to the VARIABLES_TO_RESTORE collection.
Args:
var: a variable.
restore: whether the variable should be added to the
VARIABLES_TO_RESTORE collection.
"""
collections = [MODEL_VARIABLES]
if restore:
collections.append(VARIABLES_TO_RESTORE)
for collection in collections:
if var not in tf.get_collection(collection):
tf.add_to_collection(collection, var)
def get_variables(scope=None, suffix=None):
"""Gets the list of variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a copied list of variables with scope and suffix.
"""
candidates = tf.get_collection(MODEL_VARIABLES, scope)[:]
if suffix is not None:
candidates = [var for var in candidates if var.op.name.endswith(suffix)]
return candidates
def get_variables_to_restore():
"""Gets the list of variables to restore.
Returns:
a copied list of variables.
"""
return tf.get_collection(VARIABLES_TO_RESTORE)[:]
def get_variables_by_name(given_name, scope=None):
"""Gets the list of variables that were given that name.
Args:
given_name: name given to the variable without scope.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and prefix.
"""
return get_variables(scope=scope, suffix=given_name)
def get_unique_variable(name):
"""Gets the variable uniquely identified by that name.
Args:
name: a name that uniquely identifies the variable.
Returns:
a tensorflow variable.
Raises:
ValueError: if no variable uniquely identified by the name exists.
"""
candidates = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, name)
if not candidates:
raise ValueError('Couldnt find variable %s' % name)
for candidate in candidates:
if candidate.op.name == name:
return candidate
raise ValueError('Variable %s does not uniquely identify a variable', name)
class VariableDeviceChooser(object):
"""Slim device chooser for variables.
When using a parameter server it will assign them in a round-robin fashion.
When not using a parameter server it allows GPU:0 placement otherwise CPU:0.
"""
def __init__(self,
num_parameter_servers=0,
ps_device='/job:ps',
placement='CPU:0'):
"""Initialize VariableDeviceChooser.
Args:
num_parameter_servers: number of parameter servers.
ps_device: string representing the parameter server device.
placement: string representing the placement of the variable either CPU:0
or GPU:0. When using parameter servers forced to CPU:0.
"""
self._num_ps = num_parameter_servers
self._ps_device = ps_device
self._placement = placement if num_parameter_servers == 0 else 'CPU:0'
self._next_task_id = 0
def __call__(self, op):
device_string = ''
if self._num_ps > 0:
task_id = self._next_task_id
self._next_task_id = (self._next_task_id + 1) % self._num_ps
device_string = '%s/task:%d' % (self._ps_device, task_id)
device_string += '/%s' % self._placement
return device_string
# TODO(sguada) Remove once get_variable is able to colocate op.devices.
def variable_device(device, name):
"""Fix the variable device to colocate its ops."""
if callable(device):
var_name = tf.get_variable_scope().name + '/' + name
var_def = tf.NodeDef(name=var_name, op='Variable')
device = device(var_def)
if device is None:
device = ''
return device
@scopes.add_arg_scope
def global_step(device=''):
"""Returns the global step variable.
Args:
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
Returns:
the tensor representing the global step variable.
"""
global_step_ref = tf.get_collection(tf.GraphKeys.GLOBAL_STEP)
if global_step_ref:
return global_step_ref[0]
else:
collections = [
VARIABLES_TO_RESTORE,
tf.GraphKeys.GLOBAL_VARIABLES,
tf.GraphKeys.GLOBAL_STEP,
]
# Get the device for the variable.
with tf.device(variable_device(device, 'global_step')):
return tf.get_variable('global_step', shape=[], dtype=tf.int64,
initializer=tf.zeros_initializer(),
trainable=False, collections=collections)
@scopes.add_arg_scope
def variable(name, shape=None, dtype=tf.float32, initializer=None,
regularizer=None, trainable=True, collections=None, device='',
restore=True):
"""Gets an existing variable with these parameters or creates a new one.
It also add itself to a group with its name.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
collections: A list of collection names to which the Variable will be added.
Note that the variable is always also added to the tf.GraphKeys.GLOBAL_VARIABLES
and MODEL_VARIABLES collections.
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
restore: whether the variable should be added to the
VARIABLES_TO_RESTORE collection.
Returns:
The created or existing variable.
"""
collections = list(collections or [])
# Make sure variables are added to tf.GraphKeys.GLOBAL_VARIABLES and MODEL_VARIABLES
collections += [tf.GraphKeys.GLOBAL_VARIABLES, MODEL_VARIABLES]
# Add to VARIABLES_TO_RESTORE if necessary
if restore:
collections.append(VARIABLES_TO_RESTORE)
# Remove duplicates
collections = set(collections)
# Get the device for the variable.
with tf.device(variable_device(device, name)):
return tf.get_variable(name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer,
trainable=trainable, collections=collections)
| 10,359 | 34.724138 | 86 | py |
OpenBCIPython | OpenBCIPython-master/try/classification.py | import functools
import sets
import tensorflow as tf
def lazy_property(function):
attribute = '_' + function.__name__
@property
@functools.wraps(function)
def wrapper(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return wrapper
class SequenceClassification:
def __init__(self, data, target, dropout, num_hidden=200, num_layers=3):
self.data = data
self.target = target
self.dropout = dropout
self._num_hidden = num_hidden
self._num_layers = num_layers
self.prediction()
self.error()
self.optimize()
@lazy_property
def prediction(self):
# Recurrent network.
network = tf.contrib.rnn.GRUCell(self._num_hidden)
network = tf.contrib.rnn.DropoutWrapper(
network, output_keep_prob=self.dropout)
network = tf.contrib.rnn.MultiRNNCell([network] * self._num_layers)
output, _ = tf.nn.dynamic_rnn(network, self.data, dtype=tf.float32)
# Select last output.
output = tf.transpose(output, [1, 0, 2])
last = tf.gather(output, int(output.get_shape()[0]) - 1)
# Softmax layer.
weight, bias = self._weight_and_bias(
self._num_hidden, int(self.target.get_shape()[1]))
prediction = tf.nn.softmax(tf.matmul(last, weight) + bias)
return prediction
@lazy_property
def cost(self):
cross_entropy = -tf.reduce_sum(self.target * tf.log(self.prediction))
return cross_entropy
@lazy_property
def optimize(self):
learning_rate = 0.003
optimizer = tf.train.RMSPropOptimizer(learning_rate)
return optimizer.minimize(self.cost)
@lazy_property
def error(self):
mistakes = tf.not_equal(
tf.argmax(self.target, 1), tf.argmax(self.prediction, 1))
return tf.reduce_mean(tf.cast(mistakes, tf.float32))
@staticmethod
def _weight_and_bias(in_size, out_size):
weight = tf.truncated_normal([in_size, out_size], stddev=0.01)
bias = tf.constant(0.1, shape=[out_size])
return tf.Variable(weight), tf.Variable(bias)
def main():
# We treat images as sequences of pixel rows.
train, test = sets.Mnist()
_, rows, row_size = train.data.shape
num_classes = train.target.shape[1]
data = tf.placeholder(tf.float32, [None, rows, row_size])
target = tf.placeholder(tf.float32, [None, num_classes])
dropout = tf.placeholder(tf.float32)
model = SequenceClassification(data, target, dropout)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for epoch in range(10):
for _ in range(100):
batch = train.sample(10)
sess.run(model.optimize, {
data: batch.data, target: batch.target, dropout: 0.5})
error = sess.run(model.error, {
data: test.data, target: test.target, dropout: 1})
print('Epoch {:2d} error {:3.1f}%'.format(epoch + 1, 100 * error))
if __name__ == '__main__':
main() | 3,108 | 32.430108 | 77 | py |
OpenBCIPython | OpenBCIPython-master/try/reader.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import collections
import os
import tensorflow as tf
def _read_words(filename):
with tf.gfile.GFile(filename, "r") as f:
return f.read().decode("utf-8").replace("\n", "<eos>").split()
def _build_vocab(filename):
data = _read_words(filename)
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(len(words))))
return word_to_id
def _file_to_word_ids(filename, word_to_id):
data = _read_words(filename)
return [word_to_id[word] for word in data if word in word_to_id]
def ptb_raw_data(data_path=None):
train_path = os.path.join(data_path, "ptb.train.txt")
valid_path = os.path.join(data_path, "ptb.valid.txt")
test_path = os.path.join(data_path, "ptb.test.txt")
word_to_id = _build_vocab(train_path)
train_data = _file_to_word_ids(train_path, word_to_id)
valid_data = _file_to_word_ids(valid_path, word_to_id)
test_data = _file_to_word_ids(test_path, word_to_id)
vocabulary = len(word_to_id)
return train_data, valid_data, test_data, vocabulary
def ptb_producer(raw_data, batch_size, num_steps, name=None):
with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]):
raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32)
data_len = tf.size(raw_data)
batch_len = data_len // batch_size
data = tf.reshape(raw_data[0 : batch_size * batch_len],
[batch_size, batch_len])
epoch_size = (batch_len - 1) // num_steps
assertion = tf.assert_positive(
epoch_size,
message="epoch_size == 0, decrease batch_size or num_steps")
with tf.control_dependencies([assertion]):
epoch_size = tf.identity(epoch_size, name="epoch_size")
i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
x = tf.strided_slice(data, [0, i * num_steps],
[batch_size, (i + 1) * num_steps])
x.set_shape([batch_size, num_steps])
y = tf.strided_slice(data, [0, i * num_steps + 1],
[batch_size, (i + 1) * num_steps + 1])
y.set_shape([batch_size, num_steps])
return x, y
| 2,885 | 37.48 | 80 | py |
OpenBCIPython | OpenBCIPython-master/try/ablone_model.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNNRegressor with custom estimator for abalone dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
from six.moves import urllib
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
FLAGS = None
tf.logging.set_verbosity(tf.logging.INFO)
# Learning rate for the model
LEARNING_RATE = 0.001
def maybe_download(train_data, test_data, predict_data):
"""Maybe downloads training data and returns train and test file names."""
if train_data:
train_file_name = train_data
else:
train_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve(
"http://download.tensorflow.org/data/abalone_train.csv",
train_file.name)
train_file_name = train_file.name
train_file.close()
print("Training data is downloaded to %s" % train_file_name)
if test_data:
test_file_name = test_data
else:
test_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve(
"http://download.tensorflow.org/data/abalone_test.csv", test_file.name)
test_file_name = test_file.name
test_file.close()
print("Test data is downloaded to %s" % test_file_name)
if predict_data:
predict_file_name = predict_data
else:
predict_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve(
"http://download.tensorflow.org/data/abalone_predict.csv",
predict_file.name)
predict_file_name = predict_file.name
predict_file.close()
print("Prediction data is downloaded to %s" % predict_file_name)
return train_file_name, test_file_name, predict_file_name
def model_fn(features, targets, mode, params):
"""Model function for Estimator."""
# Connect the first hidden layer to input layer
# (features) with relu activation
first_hidden_layer = tf.contrib.layers.relu(features, 10)
# Connect the second hidden layer to first hidden layer with relu
second_hidden_layer = tf.contrib.layers.relu(first_hidden_layer, 10)
# Connect the output layer to second hidden layer (no activation fn)
output_layer = tf.contrib.layers.linear(second_hidden_layer, 1)
# Reshape output layer to 1-dim Tensor to return predictions
predictions = tf.reshape(output_layer, [-1])
predictions_dict = {"ages": predictions}
# Calculate loss using mean squared error
loss = tf.losses.mean_squared_error(targets, predictions)
# Calculate root mean squared error as additional eval metric
eval_metric_ops = {
"rmse": tf.metrics.root_mean_squared_error(
tf.cast(targets, tf.float64), predictions)
}
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.contrib.framework.get_global_step(),
learning_rate=params["learning_rate"],
optimizer="SGD")
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=predictions_dict,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Load datasets
abalone_train, abalone_test, abalone_predict = maybe_download(
FLAGS.train_data, FLAGS.test_data, FLAGS.predict_data)
# Training examples
training_set = tf.contrib.learn.datasets.base.load_csv_without_header(
filename=abalone_train, target_dtype=np.int, features_dtype=np.float64)
# Test examples
test_set = tf.contrib.learn.datasets.base.load_csv_without_header(
filename=abalone_test, target_dtype=np.int, features_dtype=np.float64)
# Set of 7 examples for which to predict abalone ages
prediction_set = tf.contrib.learn.datasets.base.load_csv_without_header(
filename=abalone_predict, target_dtype=np.int, features_dtype=np.float64)
# Set model params
model_params = {"learning_rate": LEARNING_RATE}
# Instantiate Estimator
nn = tf.contrib.learn.Estimator(model_fn=model_fn, params=model_params)
# Fit
nn.fit(x=training_set.data, y=training_set.target, steps=5000)
# Score accuracy
ev = nn.evaluate(x=test_set.data, y=test_set.target, steps=1)
print("Loss: %s" % ev["loss"])
print("Root Mean Squared Error: %s" % ev["rmse"])
# Print out predictions
predictions = nn.predict(x=prediction_set.data, as_iterable=True)
for i, p in enumerate(predictions):
print("Prediction %s: %s" % (i + 1, p["ages"]))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--train_data", type=str, default="", help="Path to the training data.")
parser.add_argument(
"--test_data", type=str, default="", help="Path to the test data.")
parser.add_argument(
"--predict_data",
type=str,
default="",
help="Path to the prediction data.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 5,595 | 32.710843 | 85 | py |
OpenBCIPython | OpenBCIPython-master/try/contrib_learn.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import urllib
import numpy as np
import tensorflow as tf
# Data sets
IRIS_TRAINING = "iris_training.csv"
IRIS_TRAINING_URL = "http://download.tensorflow.org/data/iris_training.csv"
IRIS_TEST = "iris_test.csv"
IRIS_TEST_URL = "http://download.tensorflow.org/data/iris_test.csv"
def main():
# If the training and test sets aren't stored locally, download them.
if not os.path.exists(IRIS_TRAINING):
raw = urllib.urlopen(IRIS_TRAINING_URL).read()
with open(IRIS_TRAINING, "w") as f:
f.write(raw)
if not os.path.exists(IRIS_TEST):
raw = urllib.urlopen(IRIS_TEST_URL).read()
with open(IRIS_TEST, "w") as f:
f.write(raw)
# Load datasets.
training_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_TRAINING,
target_dtype=np.int,
features_dtype=np.float32)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_TEST,
target_dtype=np.int,
features_dtype=np.float32)
# Specify that all features have real-value data
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=4)]
# Build 3 layer DNN with 10, 20, 10 units respectively.
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
model_dir="/tmp/iris_model")
# Define the training inputs
def get_train_inputs():
x = tf.constant(training_set.data)
y = tf.constant(training_set.target)
return x, y
# Fit model.
classifier.fit(input_fn=get_train_inputs, steps=2000)
# Define the test inputs
def get_test_inputs():
x = tf.constant(test_set.data)
y = tf.constant(test_set.target)
return x, y
# Evaluate accuracy.
accuracy_score = classifier.evaluate(input_fn=get_test_inputs,
steps=1)["accuracy"]
print("\nTest Accuracy: {0:f}\n".format(accuracy_score))
# Classify two new flower samples.
def new_samples():
return np.array([[6.4, 3.2, 4.5, 1.5],[5.8, 3.1, 5.0, 1.7]], dtype=np.float32)
predictions = list(classifier.predict(input_fn=new_samples))
print("New Samples, Class Predictions:{}\n".format(predictions))
if __name__ == "__main__":
main()
| 2,469 | 29.493827 | 82 | py |
OpenBCIPython | OpenBCIPython-master/try/rnn_test.py |
import inspect
import time
import numpy as np
import tensorflow as tf
import reader
import pandas as pd
flags = tf.flags
logging = tf.logging
flags.DEFINE_string(
"model", "small",
"A type of model. Possible options are: small, medium, large.")
flags.DEFINE_string("data_path", None,
"Where the training/test data is stored.")
flags.DEFINE_string("save_path", None,
"Model output directory.")
flags.DEFINE_bool("use_fp16", False,
"Train using 16-bit floats instead of 32bit floats")
FLAGS = flags.FLAGS
def nomalize_signal(input_signal):
processed_signal = input_signal
mean = np.mean(processed_signal, axis=0)
processed_signal -= mean
return processed_signal / np.std(processed_signal, axis=0)
def csv_to_numpy_array(filePath, delimiter):
return np.genfromtxt(filePath, delimiter=delimiter, dtype=None)
def import_data():
project_file_path = "/home/runge/openbci/git/OpenBCI_Python"
config_file = project_file_path + "/config/config.json"
channel_signals = pd.read_csv(project_file_path
+ "/build/dataset/train/result/raw_reconstructed_signals.csv").dropna()
column_names = ['ch1', 'ch2', 'ch3']
kinect__angles = pd.read_csv(
project_file_path + "/build/dataset/train/result/reconstructed_kinect__angles_.csv",
header = None, names = column_names).dropna()
# kinect__angles = kinect__angles.applymap(lambda x: '%.2f' % x)
y_vals = nomalize_signal(np.array(kinect__angles.ix[:, 0]))
x_vals = np.array(channel_signals)
train_presentation = 0.8
test_presentation = 0.8
train_indices = np.random.choice(len(x_vals), int(round(len(x_vals) * train_presentation)), replace=False)
rest_of_data_set_x_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))
rest_of_data_set_x =x_vals[rest_of_data_set_x_indices]
rest_of_data_set_y_indices = np.array(list(set(range(len(y_vals))) - set(train_indices)))
rest_of_data_set_y = y_vals[rest_of_data_set_y_indices]
test_indices = np.random.choice(len(rest_of_data_set_x), int(round(len(rest_of_data_set_x_indices) * test_presentation)),
replace=False)
validate_indices = np.array(list(set(range(len(rest_of_data_set_x_indices))) - set(test_indices)))
train_x = x_vals[train_indices]
train_y = y_vals[train_indices]
test_x = rest_of_data_set_x[test_indices]
test_y = rest_of_data_set_y[test_indices]
validate_x = rest_of_data_set_x[validate_indices]
validate_y = rest_of_data_set_y[validate_indices]
return train_x,train_y,test_x,test_y,validate_x,validate_y,np.array(kinect__angles.ix[:, 0])
train_x,train_y,test_x,test_y,validate_x,validate_y,y_labels = import_data()
def data_type():
return tf.float32
class PTBInput():
"""The input data."""
def __init__(self, config, data, name=None):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
self.input_data, self.targets = reader.ptb_producer(data, batch_size, num_steps, name=name)
class PTBModel():
"""The PTB model."""
def __init__(self, is_training, config, input_):
self._input = input_
batch_size = input_.batch_size
num_steps = input_.num_steps
size = config.hidden_size
vocab_size = config.vocab_size
def lstm_cell():
if 'reuse' in inspect.getargspec(
tf.contrib.rnn.BasicLSTMCell.__init__).args:
return tf.contrib.rnn.BasicLSTMCell(
size, forget_bias=0.0, state_is_tuple=True,
reuse=tf.get_variable_scope().reuse)
else:
return tf.contrib.rnn.BasicLSTMCell(
size, forget_bias=0.0, state_is_tuple=True)
attn_cell = lstm_cell
if is_training and config.keep_prob < 1:
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(
lstm_cell(), output_keep_prob=config.keep_prob)
cell = tf.contrib.rnn.MultiRNNCell(
[attn_cell() for _ in range(config.num_layers)], state_is_tuple=True)
self._initial_state = cell.zero_state(batch_size, data_type())
with tf.device("/cpu:0"):
embedding = tf.get_variable(
"embedding", [vocab_size, size], dtype=data_type())
inputs = tf.nn.embedding_lookup(embedding, input_.input_data)
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
# Simplified version of models/tutorials/rnn/rnn.py's rnn().
# This builds an unrolled LSTM for tutorial purposes only.
# In general, use the rnn() or state_saving_rnn() from rnn.py.
#
# The alternative version of the code below is:
#
# inputs = tf.unstack(inputs, num=num_steps, axis=1)
# outputs, state = tf.contrib.rnn.static_rnn(
# cell, inputs, initial_state=self._initial_state)
outputs = []
state = self._initial_state
with tf.variable_scope("RNN"):
for time_step in range(num_steps):
if time_step > 0: tf.get_variable_scope().reuse_variables()
(cell_output, state) = cell(inputs[:, time_step, :], state)
outputs.append(cell_output)
output = tf.reshape(tf.stack(axis=1, values=outputs), [-1, size])
softmax_w = tf.get_variable(
"softmax_w", [size, vocab_size], dtype=data_type())
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type())
logits = tf.matmul(output, softmax_w) + softmax_b
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(input_.targets, [-1])],
[tf.ones([batch_size * num_steps], dtype=data_type())])
self._cost = cost = tf.reduce_sum(loss) / batch_size
self._final_state = state
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self._lr)
self._train_op = optimizer.apply_gradients(
zip(grads, tvars),
global_step=tf.contrib.framework.get_or_create_global_step())
self._new_lr = tf.placeholder(
tf.float32, shape=[], name="new_learning_rate")
self._lr_update = tf.assign(self._lr, self._new_lr)
def assign_lr(self, session, lr_value):
session.run(self._lr_update, feed_dict={self._new_lr: lr_value})
@property
def input(self):
return self._input
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
class SmallConfig():
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 13
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000
def run_epoch(session, model, eval_op=None, verbose=False):
"""Runs the model on the given data."""
start_time = time.time()
costs = 0.0
iters = 0
state = session.run(model.initial_state)
fetches = {
"cost": model.cost,
"final_state": model.final_state,
}
if eval_op is not None:
fetches["eval_op"] = eval_op
for step in range(model.input.epoch_size):
feed_dict = {}
for i, (c, h) in enumerate(model.initial_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
costs += cost
iters += model.input.num_steps
if verbose and step % (model.input.epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / model.input.epoch_size, np.exp(costs / iters),
iters * model.input.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
def get_config():
return SmallConfig()
def main_op():
# raw_data = reader.ptb_raw_data("/home/runge/AI/tensorflow/simple-examples/data/")
# train_data, valid_data, test_data, _ = raw_data
train_data, valid_data, test_data = train_x[:,0], train_x[:,0], train_x[:,0]
config = get_config()
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
with tf.Graph().as_default():
initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
with tf.name_scope("Train"):
train_input = PTBInput(config=config, data=train_data, name="TrainInput")
with tf.variable_scope("Model", reuse=None, initializer=initializer):
m = PTBModel(is_training=True, config=config, input_=train_input)
tf.summary.scalar("Training Loss", m.cost)
tf.summary.scalar("Learning Rate", m.lr)
with tf.name_scope("Valid"):
valid_input = PTBInput(config=config, data=valid_data, name="ValidInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mvalid = PTBModel(is_training=False, config=config, input_=valid_input)
tf.summary.scalar("Validation Loss", mvalid.cost)
with tf.name_scope("Test"):
test_input = PTBInput(config=eval_config, data=test_data, name="TestInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mtest = PTBModel(is_training=False, config=eval_config, input_=test_input)
sv = tf.train.Supervisor(logdir=FLAGS.save_path)
with sv.managed_session() as session:
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
m.assign_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, m, eval_op=m.train_op,
verbose=True)
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
valid_perplexity = run_epoch(session, mvalid)
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
test_perplexity = run_epoch(session, mtest)
print("Test Perplexity: %.3f" % test_perplexity)
if FLAGS.save_path:
print("Saving model to %s." % FLAGS.save_path)
sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step)
main_op() | 10,531 | 34.342282 | 125 | py |
OpenBCIPython | OpenBCIPython-master/try/text_classification_character_rnn.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is an example of using recurrent neural networks over characters for DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
HIDDEN_SIZE = 20
def char_rnn_model(features, target):
"""Character level recurrent neural network model to predict classes."""
target = tf.one_hot(target, 15, 1, 0)
byte_list = tf.one_hot(features, 256, 1, 0)
byte_list = tf.unstack(byte_list, axis=1)
cell = tf.contrib.rnn.GRUCell(HIDDEN_SIZE)
_, encoding = tf.contrib.rnn.static_rnn(cell, byte_list, dtype=tf.float32)
logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main():
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=True)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = learn.Estimator(model_fn=char_rnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
main()
| 2,925 | 30.462366 | 140 | py |
OpenBCIPython | OpenBCIPython-master/try/text_classification.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for DNN-based text classification with DBpedia data."""
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib.layers.python.layers import encoders
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 50
n_words = 0
def bag_of_words_model(features, target):
"""A bag-of-words model. Note it disregards the word order in the text."""
target = tf.one_hot(target, 15, 1, 0)
features = encoders.bow_encoder(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
logits = tf.contrib.layers.fully_connected(features, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def rnn_model(features, target):
"""RNN model to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unstack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.contrib.rnn.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.contrib.rnn.static_rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for logistic
# regression over output classes.
target = tf.one_hot(target, 15, 1, 0)
logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
# Create a training op.
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_transform_train = vocab_processor.fit_transform(x_train)
x_transform_test = vocab_processor.transform(x_test)
x_train = np.array(list(x_transform_train))
x_test = np.array(list(x_transform_test))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
# Switch between rnn_model and bag_of_words_model to test different models.
model_fn = rnn_model
if FLAGS.bow_model:
model_fn = bag_of_words_model
classifier = learn.Estimator(model_fn=model_fn)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
parser.add_argument(
'--bow_model',
default=False,
help='Run with BOW model instead of RNN.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 4,997 | 33.232877 | 80 | py |
OpenBCIPython | OpenBCIPython-master/try/peakdetect.py | import sys
from numpy import NaN, Inf, arange, isscalar, asarray, array
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
series = np.array(
[0.43535290045236669, 0.42654141461933315, 0.41773000255681991, 0.40891999695557635, 0.40011370175151184,
0.39131343663359758, 0.38252154303566893, 0.37374034300584275, 0.36497215917263209, 0.35621801267585679,
0.34747593421300349, 0.33874262441059977, 0.33001335524476083, 0.3212808090951445, 0.31253415076299623,
0.30375467571545162, 0.29279413332463616, 0.28066950263846913, 0.26748473568315551, 0.25329508095999298,
0.23816238276517113, 0.22257064085487269, 0.20686783041366305, 0.19137636417560147, 0.17619725667101774,
0.1613130873077859, 0.14692619820926339, 0.13302604230193524, 0.11948229124115357, 0.10618926662385368,
0.093221416544688129, 0.080759511753566646, 0.068909841268586389, 0.057741789191314258, 0.047528297400111059,
0.038377661783666583, 0.030577245984914559, 0.024229987657192708, 0.019320941451161162, 0.015605339007646183,
0.012769894921379362, 0.010630675021273517, 0.0089375571196414604, 0.0072574453484229992,
0.0056665577901831524, 0.0043394932948313406, 0.0035662888407506171, 0.0034126443670462752,
0.0039818360969491691, 0.0053913811140176788, 0.0073389765787151154, 0.0093547705244976988,
0.011300899362130834, 0.012900677877949721, 0.013524752957730889, 0.014121637252505396, 0.014717515762864768,
0.015325399042876801, 0.015933710924031286, 0.016502856888040599, 0.017002871453495255, 0.017400742126837367,
0.017829183078290511, 0.018753749533680416, 0.020934332637185773, 0.024991827581979334, 0.031421788811820006,
0.040747187563057036, 0.053215524918749042, 0.067617524265106255, 0.083370785924705648, 0.099695199146981744,
0.11689992919236054, 0.134963661581574, 0.15375361935196571, 0.17356293278893636, 0.19408264497823111,
0.21546504892339038, 0.23725339989733626, 0.25875695656690234, 0.28044331230887315, 0.30214780293492705,
0.32366721622267552, 0.34504462156594307, 0.36666007061959555, 0.38890379787610135, 0.41102197506491245,
0.43248705900351403, 0.4529589142264005, 0.47214882975792971, 0.48973001298004715, 0.50302491018108664,
0.51210501675271447, 0.52107731940253832, 0.529938411004651, 0.53870077642282344, 0.54735354397444347,
0.55592687834268262, 0.56440659203044641, 0.57280878565964799, 0.5811498002415344, 0.58942140363062234,
0.58482735683832388, 0.57619597258432675, 0.56756219870123692, 0.55892263588288948, 0.55027269774753662,
0.54160807921309972, 0.5308809126363857, 0.51974139230213912, 0.50874440856793657, 0.49815937245854192,
0.48827197610377121, 0.47859141503352676, 0.46910027064819265, 0.45976941661994469, 0.45058890866217649,
0.44155727244372256, 0.43274265723819233, 0.42408725357847676, 0.41546812439802033, 0.40685551657557978,
0.39825325460116845, 0.38966073114838395, 0.38107465393138651, 0.37249044995929598, 0.36390362384516112,
0.35531120216550049, 0.34671093707706274, 0.33810314995250557, 0.3294914862008711, 0.32087906065044597,
0.31226671688852181, 0.30364581844177974, 0.29500182862252461, 0.28403606246995888, 0.27153179977920489,
0.25778464961804654, 0.24287656772744626, 0.22752864536604098, 0.21203505682134763, 0.19665621838086986,
0.18168856450435636, 0.16756855422543532, 0.15437068213661675, 0.1416600196550514, 0.12910030508099471,
0.11685724018153666, 0.10468678073177597, 0.092575814757584557, 0.080403051307463128, 0.06864018511146644,
0.057556751290241619, 0.047255598674713031, 0.037993178442720092, 0.030210001333835237, 0.023869729986541324,
0.018855332245264606, 0.014982594828002637, 0.012047324366695134, 0.0099043831832178898, 0.0081247137128125559,
0.0065756984406157805, 0.0054776909249845303, 0.0049300201970713092, 0.0049647773335861371,
0.0055914517299463281, 0.0068565700087189303, 0.008710617433249258, 0.010747814704224134, 0.012761475696567159,
0.014640068460340562, 0.016036925370460201, 0.01650608656845691, 0.016848941113580859, 0.017044662062991409,
0.017062358507574628, 0.016868931928889205, 0.016688326567252366, 0.016828530988248547, 0.017381237721516257,
0.018476351187846053, 0.020954901855918598, 0.025678473275442881, 0.032912038598899844, 0.042840318931119184,
0.056134923025646419, 0.071915639569667789, 0.089363018430540467, 0.1078974579720597, 0.12750269943698384,
0.14845037659636565, 0.17039287997231573, 0.19270731475446001, 0.21496971240896295, 0.23699911756127193,
0.25843043853752162, 0.27916723977377234, 0.29889429559885983, 0.31794416830652417, 0.33656153373388631,
0.35490611407861528, 0.37351232945966456, 0.39262361025775638, 0.41230389094183761, 0.4324393602429884,
0.45252257258573897, 0.47249100297033531, 0.49174927499074123, 0.50892283943548644, 0.51751878540110952,
0.52592945202842301, 0.5341890013003614, 0.54230938489049441, 0.55032130959055992, 0.55823327749295726,
0.56605447504193052, 0.57380377856330866, 0.58147336636802183, 0.58905098117863675, 0.59048689189889325,
0.58025735850311011, 0.57008988468377786, 0.55996157091900589, 0.54997845553147207, 0.54028059892613978,
0.53076523643910067, 0.52141643260408399, 0.5122332197310816, 0.50321403263467301, 0.49434027008410614,
0.48551839712616696, 0.47673211894162176, 0.46797789667334161, 0.4592510804137957, 0.45054720548850258,
0.44186230046241243, 0.43319208166878603, 0.42453072071286679, 0.41587222376451355, 0.40721186793265912,
0.39854499074278882, 0.38986769393436893, 0.38117925214305254, 0.37244040119486505, 0.36353623993741868,
0.35431248303233537, 0.34453610211238472, 0.33397042150421957, 0.32248446807272546, 0.30989532380397972,
0.29609310704926045, 0.28106403842207311, 0.26522510401179045, 0.2489343855755638, 0.23257312027846178,
0.2164388254094374, 0.20083110018693509, 0.18596021380490396, 0.17163030306202878, 0.15762737343182978,
0.14371772690136797, 0.12993747486850818, 0.11603985643520666, 0.10199577334083414, 0.088165907670793478,
0.074946284782225814, 0.062506181636369854, 0.051274092286640677, 0.041409166490116875, 0.033050448269565449,
0.026111250187634199, 0.020375925857880679, 0.015615347783130949, 0.011558005415772009, 0.0080604546952381655,
0.0051648588494875988, 0.0028757615320535576, 0.0012608058602624438, 0.00031945288788812627, 0.0,
0.00030215588092830331, 0.0011053800229415514, 0.0023331691705536883, 0.0038912736972227429,
0.0056807424395955344, 0.0076193618938801572, 0.0095027114613017091, 0.011014866794965357,
0.012179859682251841, 0.012876294702121106, 0.012917727059568751, 0.012825176953454701, 0.012758391043117042,
0.012849972135827415, 0.013170387503607788, 0.013988969371269248, 0.015753883940680769, 0.018870949025161246,
0.023667621239168889, 0.030571800062589104, 0.039821816989419598, 0.051465982741920012, 0.064689919394282924,
0.079161363538066715, 0.094930042188734992, 0.11196980314892914, 0.12981625804802052, 0.1485553989432345,
0.16814483707870759, 0.18815505339559346, 0.2082273818477865, 0.22792538436781143, 0.2474711517989642,
0.26686357345048667, 0.28609720000344807, 0.30557111561606493, 0.3254383953418753, 0.34551631933000526,
0.3659426577153429, 0.38661525455313933, 0.407328346051538, 0.42742795687454643, 0.44661109418819689,
0.46499714015799554, 0.48242007173737494, 0.49863944070197608, 0.51360095351508928, 0.52594231178847872,
0.53485301527847684, 0.54360801995251173, 0.55224260922967239, 0.56075371348732628, 0.56917099601840004,
0.57751606376269149, 0.58567113051320752, 0.57692026120340623, 0.56818831463966302, 0.55947456280602814,
0.55077744633265879, 0.54208965952813282, 0.53340459365022141, 0.52471991432196508, 0.51603521248414319,
0.50734968514582202, 0.49836723030448077, 0.48831836089417141, 0.47840048243582384, 0.4685072554923489,
0.45863678255393014, 0.44878522677006422, 0.43910862560484731, 0.42967375353725873, 0.42038115571454698,
0.41123732965276538, 0.40232794694511054, 0.39351776249905207, 0.38471296134421745, 0.37591653170936845,
0.36713079564262158, 0.35835807577249051, 0.34959939323879508, 0.34085279716796846, 0.33211467476121259,
0.32337842392549931, 0.31463707233084681, 0.30588363359588899, 0.29624701061788128, 0.28491555390220508,
0.27281107633424234, 0.25975188673046029, 0.24584610554252612, 0.23114918083839428, 0.21603038545403494,
0.20068700758132763, 0.18540809972227409, 0.17051433555630707, 0.156018371306443, 0.14199094128430753,
0.12825591808531661, 0.11472016925833366, 0.10129774374472049, 0.087922075162199007, 0.07495180645503996,
0.062620891499645892, 0.051801650926280203, 0.042637366049437957, 0.035060188041196677, 0.029127223093147799,
0.024549652954513515, 0.021007933158400587, 0.017686840540883233, 0.014242156689934884, 0.010944082031700405,
0.0079925776473869659, 0.0057089358893125507, 0.0043554408197835313, 0.0039980086792299196,
0.0045903293200590866, 0.0058376878952243073, 0.00745798739739167, 0.0091150709901356274, 0.010665170290190089,
0.01187010843759945, 0.012587392969204875, 0.013043490038363222, 0.01349257040215528, 0.013943217541372342,
0.014408055527312096, 0.01491851233844672, 0.01548662504040791, 0.016086505941584502, 0.016711074383355139,
0.017594041828910365, 0.019159668770481919, 0.02177644531886801, 0.025970050832394252, 0.032233892652397095,
0.040907897992209799, 0.052165264721859846, 0.065181896860047453, 0.079545818751863701, 0.095231399477126014,
0.11194545034480534, 0.12967403216701198, 0.14837252288321551, 0.16779419153001521, 0.18786583198682347,
0.20814964599848554, 0.22836391981872098, 0.24844914426150802, 0.2681105777304103, 0.28766660453939735,
0.30711223243683317, 0.32649387882865621, 0.34596405664335622, 0.36572781005257016, 0.38588053581595377,
0.40623699300029481, 0.42659563566193365, 0.44681986892221137, 0.46676285237231024, 0.4860819386379216,
0.50433755457973162, 0.52131204380517204, 0.53506856524251667, 0.54411530777148798, 0.55295124555673458,
0.56160366976531662, 0.5701008837178263, 0.57846186632091512, 0.58669856472155046, 0.59483061153566452,
0.60286304259459822, 0.59612731359882687, 0.58734664288497351, 0.57855548124074618, 0.56975540873454156,
0.56094675569419694, 0.55212814541287669, 0.54330074007494644, 0.53446682956493408, 0.52562853509222141,
0.51619217102198089, 0.50593206731742435, 0.49597830247209052, 0.4863360519852688, 0.47696361807976911,
0.46775658831385852, 0.45870694126688516, 0.44980745408388645, 0.44095750461600602, 0.43212028840592098,
0.42329302400752528, 0.41447497201878913, 0.40566414172716081, 0.39685609790972282, 0.38804640824389236,
0.37923207013148386, 0.3704107146620117, 0.3615779789480788, 0.35272723621697311, 0.34385417084259873,
0.33365337126428307, 0.32258731853553002, 0.31108835561616371, 0.29951231925973437, 0.28708970706159531,
0.27380337410055128, 0.25987088757358812, 0.24533905518389232, 0.23032968459356865, 0.21511927914795537,
0.20012000920960932, 0.18583109254597271, 0.1719984604243841, 0.15882167953523868, 0.14629806736468026,
0.13445917450840261, 0.12319084426840514, 0.11223248927381424, 0.1015980557019588, 0.09142545599022904,
0.081668480529717452, 0.072468953693248822, 0.063928258534515003, 0.056001753406608033, 0.048743932958326124,
0.041802588232179423, 0.035101522630330222, 0.02867666906626436, 0.02251219113394174, 0.016870086696195132,
0.012203032131833588, 0.0088929844582203432, 0.0070918275961699868, 0.0066711747133233173,
0.0072428804475971774, 0.0083860995258808746, 0.0097614065810391702, 0.011048724774180429,
0.012172465782027605, 0.013128274321987767, 0.013989182312813094, 0.014721581888211779, 0.015091576466864029,
0.015464528204388825, 0.015843217509004082, 0.016231802091579359, 0.016639121429430848, 0.017084739273522918,
0.01773997818260406, 0.018849490040124539, 0.02080475173688074, 0.024087285853489973, 0.029226992633017944,
0.036936697060947266, 0.047929293847597317, 0.060944714746474546, 0.075406174668359888, 0.091046865452827058,
0.10774171520092823, 0.12568804676103465, 0.14438982231918299, 0.16360545206285368, 0.18353321701569614,
0.2038108267883281, 0.22428995565941548, 0.2443536345035382, 0.26404774026123351, 0.28365962358412389,
0.30281715853089847, 0.32168674343537401, 0.34068262113412245, 0.35985662275066926, 0.3794269734313343,
0.39902857174963258, 0.41878371236863016, 0.4383256915569721, 0.4570309241775834, 0.46917024181945122,
0.47765207472638233, 0.48614857539953127, 0.49467892251886364, 0.50328897638303671, 0.51199051319614997,
0.52079830709584352, 0.52968256162607963, 0.53862424449248303, 0.54758326315625216, 0.5565408624603303,
0.56546772135652934, 0.57433955827004624, 0.58313665651367408, 0.59184692530621508, 0.58590296327056635,
0.57735874436604884, 0.56881930412435755, 0.56028298187212466, 0.55174992075622276, 0.54322014447728495,
0.53469330587351305, 0.52616572629077818, 0.51763443796198483, 0.50909689477442011, 0.50055008427260361,
0.49199165320695176, 0.48341923670476783, 0.47483290995240157, 0.46623496209291942, 0.45762773054927841,
0.44901453814194897, 0.44039995287299516, 0.4317884318615568, 0.42318210559850516, 0.41457874400183614,
0.40597102079896424, 0.39734531333183631, 0.38867580804117158, 0.37472035632126111, 0.35798843608007719,
0.34260371465626138, 0.32579378067902565, 0.30790459838355583, 0.28947378877098268, 0.27080437851032618,
0.25219349838428928, 0.2339886216903119, 0.21687547199674909, 0.20079735633247459, 0.18532712876764518,
0.17056064984931954, 0.15642779485123762, 0.1429027923514796, 0.12985611545273426, 0.11727917565508073,
0.10527121276966807, 0.094060285420280915, 0.083539531938186098, 0.073683530113549875, 0.064300287033153908,
0.05543774235636216, 0.047276633804638299, 0.039859099066441653, 0.033293247774456948, 0.027543547557059112,
0.022688873425411496, 0.018621469180613526, 0.01521017312492229, 0.012374412964800189, 0.010091708788589795,
0.0083516795255471361, 0.0071457472086108682, 0.0064886857001648065, 0.0064589633781951495,
0.0072012803639131236, 0.0084592313142127992, 0.0099628815672237539, 0.011459358249358571,
0.012779314543907599, 0.013859552757009555, 0.014831827424233628, 0.015707804235302183, 0.016476629105313972,
0.017124162681032302, 0.017599824248874279, 0.017859156477827234, 0.018044508033644765, 0.018355209094968978,
0.0189437956560667, 0.019852445896942052, 0.02111756378804874, 0.022789968186689318, 0.024977187188072941,
0.027895862745880285, 0.031950372055218557, 0.037588588778261871, 0.04533056233583712, 0.055822234840144924,
0.069968350347510908, 0.087776687537675493, 0.10737468377114787, 0.12821035076580206, 0.15044469745596642,
0.17362187230229492, 0.19669809143045736, 0.21912315243493788, 0.24068652391295411, 0.26145843360157772,
0.28133441197302655, 0.30004538339492859, 0.31844882312742462, 0.33615129810939109, 0.35334033804825027,
0.37028646056272063, 0.38746091670143112, 0.40453870150700727, 0.42159077576957132, 0.43867880808116239,
0.45623948783429258, 0.47478550214217563, 0.49380275701393744, 0.51312039593731251, 0.53229698140034198,
0.55129315877161866, 0.56978888153128104, 0.58710361032175962, 0.58341425123099588, 0.57479639773828839,
0.56606344615541682, 0.55724171652949916, 0.54835385582575247, 0.53942198763233962, 0.53046261446303877,
0.52148646278510402, 0.51250007279640031, 0.50350876502613329, 0.49451906047773736, 0.4855352746837413,
0.47656089178059291, 0.46755740288318343, 0.4550004363416415, 0.43598191519521617, 0.41988712312866533,
0.40429718421512878, 0.38412957700706341, 0.36194152749150998, 0.34490944708774768, 0.33089537416508286,
0.31843692371717469, 0.30603079364513119, 0.29603205388303006, 0.28729132574620264, 0.27912308644933581,
0.27148434513119607, 0.26432819664136431, 0.25750239837768163, 0.25071881670058394, 0.24356039567950521,
0.2355533354243062, 0.20509731910932, 0.16672867150527124, 0.1341296755877526, 0.11125955554618865,
0.09692701936014482, 0.087328176919445388, 0.080094259688856292, 0.073799464543419666, 0.067944475445671365,
0.062342321861094521, 0.0569805979668234, 0.051942974839642352, 0.047580918705391967, 0.044127615724677841,
0.0418341075180933, 0.040870690190047604, 0.041480708123150085, 0.043897957838945616, 0.048196889426332976,
0.054137563561365593, 0.061440216297928292, 0.069867357712817768, 0.079160725920755168, 0.089016824414138662,
0.099169587281046315, 0.1095928058510479, 0.12022437353255157, 0.13087469640174931, 0.14144293620997725,
0.15190951272018277, 0.16220138318815183, 0.17227253066513942, 0.18214970522282159, 0.19181248461013878,
0.20099418417181689, 0.20942586270019001, 0.21688376406869997, 0.22317193882775607, 0.22807863521418933,
0.23145465177744451, 0.22275170134694525, 0.21167456568841683, 0.20448650060538701, 0.20035195998786937,
0.19873050852960142, 0.19894739292327762, 0.19976454924926443, 0.20136306785054825, 0.20410801332309747,
0.208202158687575, 0.21367928648426432, 0.22080903867324628, 0.22973684920775739, 0.23947452601540703,
0.25015722368718291, 0.26154577923034655, 0.2737333633066949, 0.28648040946590692, 0.29942957200956394,
0.31236907833383776, 0.32522048974438211, 0.33787977387002388, 0.35049395899871205, 0.36298438368904795,
0.37541899208156049, 0.38767793908429993, 0.39988864021492782, 0.41195610077681699, 0.42344726140834626,
0.43476462884645806, 0.44605983265929711, 0.4576986742113151, 0.45586726829610463, 0.44922601441496973,
0.44168452668729835, 0.43387591145471482, 0.42583828888101971, 0.41760581978617944, 0.40926082597855384,
0.4009653400758274, 0.39284487981410626, 0.38504750367582236, 0.37762077199750926, 0.37058335307091395,
0.36392956087081529, 0.35746851899807813, 0.35095083451569509, 0.34413357374603992, 0.3369217350490803,
0.32934274664472091, 0.32147511773380677, 0.31345560073080464, 0.30563707812089186, 0.29831314634318001,
0.29163826314367619, 0.2857196545217443, 0.28041523734778556, 0.27550989908860879, 0.27069205278198899,
0.26562146828333499, 0.26008520921592099, 0.25395035730782478, 0.24722689534425887, 0.24011313516630381,
0.23276222471534966, 0.2254231858576568, 0.21838761404941182, 0.21171650493351718, 0.2053682577636195,
0.19925909011478821, 0.19334632451152212, 0.18753526802757964, 0.18139200632675026, 0.17463581143641826,
0.16726267053558677, 0.1592823969674432, 0.15083312656761735, 0.14221287388185461, 0.13302046581134577,
0.12397988087238353, 0.11652444872694806, 0.11121051712204368, 0.10801788664069349, 0.10649137286269583,
0.10692295445348236, 0.10970887586207011, 0.11257798368982895, 0.11407552833474931, 0.11437870959615407,
0.11507588655366244, 0.11623556110471292, 0.11833754623617672, 0.12358299844399326, 0.1354359613669962,
0.15561082504716245, 0.18405330343961113, 0.22328937963124634, 0.27501647400376261, 0.32883204162150698,
0.37966690910521178, 0.40563193821719107, 0.40271430308450157, 0.39926529436010466, 0.39541915880995082,
0.39125950000896736, 0.38683634449565146, 0.3821740806972011, 0.37723632322475886, 0.37216271944678503,
0.36703693607250654, 0.36184147516213638, 0.35664499640077069, 0.35115870679770095, 0.34283220688406396,
0.33476770880169132, 0.32656808560457296, 0.31784571460652505, 0.30998989903988661, 0.30349254148780686,
0.29821422662603986, 0.2952734732313389, 0.29378878657366364, 0.29304146771484824, 0.29280987307579681,
0.29294884761108991, 0.29463522395066244, 0.29667192503740564, 0.2986776559958283, 0.30085995623138612,
0.30344545200431589, 0.29031957100029954, 0.28320405268126264, 0.28340007925518212, 0.28384009098514856,
0.28437205864600462, 0.2848820680207374, 0.28526618756878463, 0.28545394325725021, 0.27608205256033602,
0.2710246538612559, 0.26809391189736248, 0.26534429147896643, 0.26422949415775154, 0.26535054160673949,
0.26808620795502736, 0.27094846083018215, 0.27294021055971107, 0.27298688704727608, 0.27291271616871027,
0.27286867861446901, 0.27286642058724797, 0.27292487121503878, 0.27307411858764896, 0.27330852543614587,
0.27357213283170573, 0.27423583772879678, 0.27512977403136363, 0.27646640834786812, 0.28079849986837918,
0.28784971517512037, 0.29128857096087835, 0.29084310546334524, 0.28987632769887078, 0.28697612648899906,
0.27849302709726087, 0.27040182687763575, 0.26313598066475358, 0.25736953519198241, 0.25390598541742998,
0.25252011424599741, 0.25158379281330517, 0.25087691546326274, 0.25016368106446818, 0.24916780826531498,
0.24757336510865122, 0.24533318976705304, 0.24241758801732174, 0.2389234940494453, 0.23500048496638054,
0.22517230687318487, 0.20968403325682883, 0.1954168931013287, 0.18653916776633384, 0.18331583090627679,
0.18215909901887159, 0.18145950323339111, 0.18111948230345445, 0.18111924333514509, 0.1814137460697513,
0.1818535851353055, 0.18229296467977235, 0.18262723505235368, 0.1827770738594422, 0.18265681180591548,
0.1821994799425449, 0.17608807141805072, 0.16872983107947212, 0.16240144702190812, 0.15776702584744981,
0.15464840228225418, 0.15237019406976063, 0.15069387855224797, 0.14957855280129848, 0.14887084600985745,
0.14944410780100323, 0.15119699571390499, 0.1536553381027834, 0.15630072203410425, 0.15934025175622293,
0.16403903814992299, 0.16937677296024786, 0.17377070391008656, 0.17912562063261195, 0.18733224310885299,
0.19879874326035937, 0.21231634467295796, 0.22825017488658014, 0.24753159238992894, 0.25631758902797996,
0.25757325062304964, 0.25869443202710857, 0.25978688224240926, 0.26099281158466298, 0.26238203918045533,
0.26398376588674261, 0.26574486564100647, 0.26766362036726948, 0.26971398165645072, 0.27181052886496732,
0.27387207451181622, 0.27594100769380986, 0.27798838385600128, 0.28017436118818662, 0.2824514682885908,
0.28474846618476674, 0.28705095901637745, 0.28919778908299615, 0.29114421238617272, 0.29274340523878489,
0.2939492708455681])
possion = np.array(
[3000, 3005, 3010, 3015, 3020, 3025, 3030, 3035, 3040, 3045, 3050, 3055, 3060, 3065, 3070, 3075, 3080, 3085,
3090, 3095, 3100, 3105, 3110, 3115, 3120, 3125, 3130, 3135, 3140, 3145, 3150, 3155, 3160, 3165, 3170, 3175,
3180, 3185, 3190, 3195, 3200, 3205, 3210, 3215, 3220, 3225, 3230, 3235, 3240, 3245, 3250, 3255, 3260, 3265,
3270, 3275, 3280, 3285, 3290, 3295, 3300, 3305, 3310, 3315, 3320, 3325, 3330, 3335, 3340, 3345, 3350, 3355,
3360, 3365, 3370, 3375, 3380, 3385, 3390, 3395, 3400, 3405, 3410, 3415, 3420, 3425, 3430, 3435, 3440, 3445,
3450, 3455, 3460, 3465, 3470, 3475, 3480, 3485, 3490, 3495, 3500, 3505, 3510, 3515, 3520, 3525, 3530, 3535,
3540, 3545, 3550, 3555, 3560, 3565, 3570, 3575, 3580, 3585, 3590, 3595, 3600, 3605, 3610, 3615, 3620, 3625,
3630, 3635, 3640, 3645, 3650, 3655, 3660, 3665, 3670, 3675, 3680, 3685, 3690, 3695, 3700, 3705, 3710, 3715,
3720, 3725, 3730, 3735, 3740, 3745, 3750, 3755, 3760, 3765, 3770, 3775, 3780, 3785, 3790, 3795, 3800, 3805,
3810, 3815, 3820, 3825, 3830, 3835, 3840, 3845, 3850, 3855, 3860, 3865, 3870, 3875, 3880, 3885, 3890, 3895,
3900, 3905, 3910, 3915, 3920, 3925, 3930, 3935, 3940, 3945, 3950, 3955, 3960, 3965, 3970, 3975, 3980, 3985,
3990, 3995, 4000, 4005, 4010, 4015, 4020, 4025, 4030, 4035, 4040, 4045, 4050, 4055, 4060, 4065, 4070, 4075,
4080, 4085, 4090, 4095, 4100, 4105, 4110, 4115, 4120, 4125, 4130, 4135, 4140, 4145, 4150, 4155, 4160, 4165,
4170, 4175, 4180, 4185, 4190, 4195, 4200, 4205, 4210, 4215, 4220, 4225, 4230, 4235, 4240, 4245, 4250, 4255,
4260, 4265, 4270, 4275, 4280, 4285, 4290, 4295, 4300, 4305, 4310, 4315, 4320, 4325, 4330, 4335, 4340, 4345,
4350, 4355, 4360, 4365, 4370, 4375, 4380, 4385, 4390, 4395, 4400, 4405, 4410, 4415, 4420, 4425, 4430, 4435,
4440, 4445, 4450, 4455, 4460, 4465, 4470, 4475, 4480, 4485, 4490, 4495, 4500, 4505, 4510, 4515, 4520, 4525,
4530, 4535, 4540, 4545, 4550, 4555, 4560, 4565, 4570, 4575, 4580, 4585, 4590, 4595, 4600, 4605, 4610, 4615,
4620, 4625, 4630, 4635, 4640, 4645, 4650, 4655, 4660, 4665, 4670, 4675, 4680, 4685, 4690, 4695, 4700, 4705,
4710, 4715, 4720, 4725, 4730, 4735, 4740, 4745, 4750, 4755, 4760, 4765, 4770, 4775, 4780, 4785, 4790, 4795,
4800, 4805, 4810, 4815, 4820, 4825, 4830, 4835, 4840, 4845, 4850, 4855, 4860, 4865, 4870, 4875, 4880, 4885,
4890, 4895, 4900, 4905, 4910, 4915, 4920, 4925, 4930, 4935, 4940, 4945, 4950, 4955, 4960, 4965, 4970, 4975,
4980, 4985, 4990, 4995, 5000, 5005, 5010, 5015, 5020, 5025, 5030, 5035, 5040, 5045, 5050, 5055, 5060, 5065,
5070, 5075, 5080, 5085, 5090, 5095, 5100, 5105, 5110, 5115, 5120, 5125, 5130, 5135, 5140, 5145, 5150, 5155,
5160, 5165, 5170, 5175, 5180, 5185, 5190, 5195, 5200, 5205, 5210, 5215, 5220, 5225, 5230, 5235, 5240, 5245,
5250, 5255, 5260, 5265, 5270, 5275, 5280, 5285, 5290, 5295, 5300, 5305, 5310, 5315, 5320, 5325, 5330, 5335,
5340, 5345, 5350, 5355, 5360, 5365, 5370, 5375, 5380, 5385, 5390, 5395, 5400, 5405, 5410, 5415, 5420, 5425,
5430, 5435, 5440, 5445, 5450, 5455, 5460, 5465, 5470, 5475, 5480, 5485, 5490, 5495, 5500, 5505, 5510, 5515,
5520, 5525, 5530, 5535, 5540, 5545, 5550, 5555, 5560, 5565, 5570, 5575, 5580, 5585, 5590, 5595, 5600, 5605,
5610, 5615, 5620, 5625, 5630, 5635, 5640, 5645, 5650, 5655, 5660, 5665, 5670, 5675, 5680, 5685, 5690, 5695,
5700, 5705, 5710, 5715, 5720, 5725, 5730, 5735, 5740, 5745, 5750, 5755, 5760, 5765, 5770, 5775, 5780, 5785,
5790, 5795, 5800, 5805, 5810, 5815, 5820, 5825, 5830, 5835, 5840, 5845, 5850, 5855, 5860, 5865, 5870, 5875,
5880, 5885, 5890, 5895, 5900, 5905, 5910, 5915, 5920, 5925, 5930, 5935, 5940, 5945, 5950, 5955, 5960, 5965,
5970, 5975, 5980, 5985, 5990, 5995, 6000, 6005, 6010, 6015, 6020, 6025, 6030, 6035, 6040, 6045, 6050, 6055,
6060, 6065, 6070, 6075, 6080, 6085, 6090, 6095, 6100, 6105, 6110, 6115, 6120, 6125, 6130, 6135, 6140, 6145,
6150, 6155, 6160, 6165, 6170, 6175, 6180, 6185, 6190, 6195, 6200, 6205, 6210, 6215, 6220, 6225, 6230, 6235,
6240, 6245, 6250, 6255, 6260, 6265, 6270, 6275, 6280, 6285, 6290, 6295, 6300, 6305, 6310, 6315, 6320, 6325,
6330, 6335, 6340, 6345, 6350, 6355, 6360, 6365, 6370, 6375, 6380, 6385, 6390, 6395, 6400, 6405, 6410, 6415,
6420, 6425, 6430, 6435, 6440, 6445, 6450, 6455, 6460, 6465, 6470, 6475, 6480, 6485, 6490, 6495, 6500, 6505,
6510, 6515, 6520, 6525, 6530, 6535, 6540, 6545, 6550, 6555, 6560, 6565, 6570, 6575, 6580, 6585, 6590, 6595,
6600, 6605, 6610, 6615, 6620, 6625, 6630, 6635, 6640, 6645, 6650, 6655, 6660, 6665, 6670, 6675, 6680, 6685,
6690, 6695, 6700, 6705, 6710, 6715, 6720, 6725, 6730, 6735, 6740, 6745, 6750, 6755, 6760, 6765, 6770, 6775,
6780, 6785, 6790, 6795, 6800, 6805, 6810, 6815, 6820, 6825, 6830, 6835, 6840, 6845, 6850, 6855, 6860, 6865,
6870, 6875, 6880, 6885, 6890, 6895, 6900, 6905, 6910, 6915, 6920, 6925, 6930, 6935, 6940, 6945, 6950, 6955,
6960, 6965, 6970, 6975, 6980, 6985, 6990, 6995, 7000, 7005, 7010, 7015, 7020, 7025, 7030, 7035, 7040, 7045,
7050, 7055, 7060, 7065, 7070, 7075, 7080, 7085, 7090, 7095, 7100, 7105, 7110, 7115, 7120, 7125, 7130, 7135,
7140, 7145, 7150, 7155, 7160, 7165, 7170, 7175, 7180, 7185, 7190, 7195, 7200, 7205, 7210, 7215, 7220, 7225,
7230, 7235, 7240, 7245, 7250, 7255, 7260, 7265, 7270, 7275, 7280, 7285, 7290, 7295, 7300, 7305, 7310, 7315,
7320, 7325, 7330, 7335, 7340, 7345, 7350, 7355, 7360, 7365, 7370, 7375, 7380, 7385, 7390, 7395, 7400, 7405,
7410, 7415, 7420, 7425, 7430, 7435, 7440, 7445, 7450, 7455, 7460, 7465, 7470, 7475, 7480, 7485, 7490, 7495,
7500, 7505, 7510, 7515, 7520, 7525, 7530, 7535, 7540, 7545, 7550, 7555, 7560, 7565, 7570, 7575, 7580, 7585,
7590, 7595, 7600, 7605, 7610, 7615, 7620, 7625, 7630, 7635, 7640, 7645, 7650, 7655, 7660, 7665, 7670, 7675,
7680, 7685, 7690, 7695, 7700, 7705, 7710, 7715, 7720, 7725, 7730, 7735, 7740, 7745, 7750, 7755, 7760, 7765,
7770, 7775, 7780, 7785, 7790, 7795, 7800, 7805, 7810, 7815, 7820, 7825, 7830, 7835, 7840, 7845, 7850, 7855,
7860, 7865, 7870, 7875, 7880, 7885, 7890, 7895, 7900, 7905, 7910, 7915, 7920, 7925, 7930, 7935, 7940, 7945,
7950, 7955, 7960, 7965, 7970, 7975, 7980, 7985, 7990, 7995])
def peakdet(v, delta, x=None):
maxtab = []
mintab = []
if x is None:
x = arange(len(v))
v = asarray(v)
if len(v) != len(x):
sys.exit('Input vectors v and x must have same length')
if not isscalar(delta):
sys.exit('Input argument delta must be a scalar')
if delta <= 0:
sys.exit('Input argument delta must be positive')
mn, mx = Inf, -Inf
mnpos, mxpos = NaN, NaN
lookformax = True
for i in arange(len(v)):
this = v[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mx - delta:
maxtab.append((mxpos, mx))
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn + delta:
mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
return array(maxtab), array(mintab)
if __name__ == "__main__":
from matplotlib.pyplot import plot, scatter, show
maxtab, mintab = peakdet(series, .3)
plot(series)
scatter(array(mintab)[:, 0], array(mintab)[:, 1], color='red')
show()
| 30,988 | 98.00639 | 120 | py |
OpenBCIPython | OpenBCIPython-master/try/05_nonlinear_svm.py | # Nonlinear SVM Example
#----------------------------------
#
# This function wll illustrate how to
# implement the gaussian kernel on
# the iris dataset.
#
# Gaussian Kernel:
# K(x1, x2) = exp(-gamma * abs(x1 - x2)^2)
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn import datasets
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Create graph
sess = tf.Session()
# Load the data
# iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)]
iris = datasets.load_iris()
x_vals = np.array([[x[0], x[3]] for x in iris.data])
y_vals = np.array([1 if y==0 else -1 for y in iris.target])
class1_x = [x[0] for i,x in enumerate(x_vals) if y_vals[i]==1]
class1_y = [x[1] for i,x in enumerate(x_vals) if y_vals[i]==1]
class2_x = [x[0] for i,x in enumerate(x_vals) if y_vals[i]==-1]
class2_y = [x[1] for i,x in enumerate(x_vals) if y_vals[i]==-1]
# Declare batch size
batch_size = 150
# Initialize placeholders
x_data = tf.placeholder(shape=[None, 2], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
prediction_grid = tf.placeholder(shape=[None, 2], dtype=tf.float32)
# Create variables for svm
b = tf.Variable(tf.random_normal(shape=[1,batch_size]))
# Gaussian (RBF) kernel
gamma = tf.constant(-25.0)
sq_dists = tf.multiply(2., tf.matmul(x_data, tf.transpose(x_data)))
my_kernel = tf.exp(tf.multiply(gamma, tf.abs(sq_dists)))
# Compute SVM Model
first_term = tf.reduce_sum(b)
b_vec_cross = tf.matmul(tf.transpose(b), b)
y_target_cross = tf.matmul(y_target, tf.transpose(y_target))
second_term = tf.reduce_sum(tf.multiply(my_kernel, tf.multiply(b_vec_cross, y_target_cross)))
loss = tf.negative(tf.subtract(first_term, second_term))
# Gaussian (RBF) prediction kernel
rA = tf.reshape(tf.reduce_sum(tf.square(x_data), 1),[-1,1])
rB = tf.reshape(tf.reduce_sum(tf.square(prediction_grid), 1),[-1,1])
pred_sq_dist = tf.add(tf.subtract(rA, tf.multiply(2., tf.matmul(x_data, tf.transpose(prediction_grid)))), tf.transpose(rB))
pred_kernel = tf.exp(tf.multiply(gamma, tf.abs(pred_sq_dist)))
prediction_output = tf.matmul(tf.multiply(tf.transpose(y_target),b), pred_kernel)
prediction = tf.sign(prediction_output-tf.reduce_mean(prediction_output))
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.squeeze(prediction), tf.squeeze(y_target)), tf.float32))
# Declare optimizer
my_opt = tf.train.GradientDescentOptimizer(0.01)
train_step = my_opt.minimize(loss)
# Initialize variables
init = tf.initialize_all_variables()
sess.run(init)
# Training loop
loss_vec = []
batch_accuracy = []
for i in range(300):
rand_index = np.random.choice(len(x_vals), size=batch_size)
rand_x = x_vals[rand_index]
rand_y = np.transpose([y_vals[rand_index]])
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})
loss_vec.append(temp_loss)
acc_temp = sess.run(accuracy, feed_dict={x_data: rand_x,
y_target: rand_y,
prediction_grid:rand_x})
batch_accuracy.append(acc_temp)
if (i+1)%75==0:
print('Step #' + str(i+1))
print('Loss = ' + str(temp_loss))
# Create a mesh to plot points in
x_min, x_max = x_vals[:, 0].min() - 1, x_vals[:, 0].max() + 1
y_min, y_max = x_vals[:, 1].min() - 1, x_vals[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02),
np.arange(y_min, y_max, 0.02))
grid_points = np.c_[xx.ravel(), yy.ravel()]
[grid_predictions] = sess.run(prediction, feed_dict={x_data: rand_x,
y_target: rand_y,
prediction_grid: grid_points})
grid_predictions = grid_predictions.reshape(xx.shape)
# Plot points and grid
plt.contourf(xx, yy, grid_predictions, cmap=plt.cm.Paired, alpha=0.8)
plt.plot(class1_x, class1_y, 'ro', label='I. setosa')
plt.plot(class2_x, class2_y, 'kx', label='Non setosa')
plt.title('Gaussian SVM Results on Iris Data')
plt.xlabel('Pedal Length')
plt.ylabel('Sepal Width')
plt.legend(loc='lower right')
plt.ylim([-0.5, 3.0])
plt.xlim([3.5, 8.5])
plt.show()
# Plot batch accuracy
plt.plot(batch_accuracy, 'k-', label='Accuracy')
plt.title('Batch Accuracy')
plt.xlabel('Generation')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.show()
# Plot loss over time
plt.plot(loss_vec, 'k-')
plt.title('Loss per Generation')
plt.xlabel('Generation')
plt.ylabel('Loss')
plt.show()
| 4,558 | 34.069231 | 123 | py |
OpenBCIPython | OpenBCIPython-master/try/polynomial.py | """Simple tutorial for using TensorFlow to compute polynomial regression.
Parag K. Mital, Jan. 2016"""
# %% Imports
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
# %% Let's create some toy data
plt.ion()
n_observations = 100
fig, ax = plt.subplots(1, 1)
xs = np.linspace(-3, 3, n_observations)
ys = np.sin(xs) + np.random.uniform(-0.5, 0.5, n_observations)
ax.scatter(xs, ys)
fig.show()
plt.draw()
# %% tf.placeholders for the input and output of the network. Placeholders are
# variables which we need to fill in when we are ready to compute the graph.
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
# %% Instead of a single factor and a bias, we'll create a polynomial function
# of different polynomial degrees. We will then learn the influence that each
# degree of the input (X^0, X^1, X^2, ...) has on the final output (Y).
Y_pred = tf.Variable(tf.random_normal([1]), name='bias')
for pow_i in range(1, 5):
W = tf.Variable(tf.random_normal([1]), name='weight_%d' % pow_i)
Y_pred = tf.add(tf.multiply(tf.pow(X, pow_i), W), Y_pred)
# %% Loss function will measure the distance between our observations
# and predictions and average over them.
cost = tf.reduce_sum(tf.pow(Y_pred - Y, 2)) / (n_observations - 1)
# %% if we wanted to add regularization, we could add other terms to the cost,
# e.g. ridge regression has a parameter controlling the amount of shrinkage
# over the norm of activations. the larger the shrinkage, the more robust
# to collinearity.
# cost = tf.add(cost, tf.mul(1e-6, tf.global_norm([W])))
# %% Use gradient descent to optimize W,b
# Performs a single step in the negative gradient
learning_rate = 0.01
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# %% We create a session to use the graph
n_epochs = 1000
with tf.Session() as sess:
# Here we tell tensorflow that we want to initialize all
# the variables in the graph so we can use them
sess.run(tf.initialize_all_variables())
# Fit all training data
prev_training_cost = 0.0
for epoch_i in range(n_epochs):
for (x, y) in zip(xs, ys):
sess.run(optimizer, feed_dict={X: x, Y: y})
training_cost = sess.run(
cost, feed_dict={X: xs, Y: ys})
print(training_cost)
if epoch_i % 100 == 0:
ax.plot(xs, Y_pred.eval(
feed_dict={X: xs}, session=sess),
'k', alpha=epoch_i / n_epochs)
fig.show()
plt.draw()
# Allow the training to quit if we've reached a minimum
if np.abs(prev_training_cost - training_cost) < 0.000001:
break
prev_training_cost = training_cost
ax.set_ylim([-3, 3])
fig.show()
plt.waitforbuttonpress() | 2,772 | 35.012987 | 78 | py |
OpenBCIPython | OpenBCIPython-master/try/try_maltivatiate.py | import tensorflow as tf
import numpy as np
# generate some dataset
DIMENSIONS = 5
DS_SIZE = 5000
TRAIN_RATIO = 0.5 # 60% of the dataset isused for training
_train_size = int(DS_SIZE*TRAIN_RATIO)
_test_size = DS_SIZE - _train_size
f = lambda(x): sum(x) # the "true" function: f = 0 + 1*x1 + 1*x2 + 1*x3 ...
noise = lambda: np.random.normal(0,10) # some noise
# training globals
LAMBDA = 1e6 # L2 regularization factor
# generate the dataset, the labels and split into train/test
ds = [[np.random.rand()*1000 for d in range(DIMENSIONS)] for _ in range(DS_SIZE)]
ds = [([1]+x, [f(x)+noise()]) for x in ds] # add x[0]=1 dimension and labels
np.random.shuffle(ds)
train_data, train_labels = zip(*ds[0:_train_size])
test_data, test_labels = zip(*ds[_train_size:])
def normalize_data(matrix):
averages = np.average(matrix,0)
mins = np.min(matrix,0)
maxes = np.max(matrix,0)
ranges = maxes - mins
return ((matrix - averages)/ranges)
def run_regression(X, Y, X_test, Y_test, lambda_value = 0.1, normalize=False, batch_size=10, alpha=1e-8):
x_train = normalize_data(X) if normalize else X
y_train = Y
x_test = X_test
y_test = Y_test
session = tf.Session()
# Calculate number of features for X and Y
x_features_length = len(X[0])
y_features_length = len(Y[0])
# Build Tensorflow graph parts
x = tf.placeholder('float', [None, x_features_length], name="X")
y = tf.placeholder('float', [None, y_features_length], name="Y")
theta = tf.Variable(tf.random_normal([x_features_length, y_features_length], stddev=0.01), name="Theta")
lambda_val = tf.constant(lambda_value)
# Trying to implement this way http://openclassroom.stanford.edu/MainFolder/DocumentPage.php?course=MachineLearning&doc=exercises/ex5/ex5.html
y_predicted = tf.matmul(x, theta, name="y_predicted")
#regularization_cost_part = tf.cast(tf.multiply(lambda_val,tf.reduce_sum(tf.pow(theta,2)), name="regularization_param"), 'float')
#polynomial_cost_part = tf.reduce_sum(tf.pow(tf.subtract(y_predicted, y), 2), name="polynomial_sum")
# Set up some summary info to debug
with tf.name_scope('cost') as scope:
#cost_func = tf.multiply(tf.cast(1/(2*batch_size), 'float'), tf.cast(tf.add(polynomial_cost_part, regularization_cost_part), 'float'))
cost_func = (tf.nn.l2_loss(y_predicted - y)+lambda_val*tf.nn.l2_loss(theta))/float(batch_size)
#DEPRECATED*** cost_summary = tf.scalar_summary("cost", cost_func)
cost_summary = tf.summary.scalar('cost', cost_func)# Add a scalar summary for the snapshot loss.
training_func = tf.train.GradientDescentOptimizer(alpha).minimize(cost_func)
with tf.name_scope("test") as scope:
correct_prediction = tf.subtract(tf.cast(1, 'float'), tf.reduce_mean(tf.subtract(y_predicted, y)))
accuracy = tf.cast(correct_prediction, "float")
#DEPRECATED*** accuracy_summary = tf.scalar_summary("accuracy", accuracy)
#accuracy_summary = tf.summary.scalar("accuracy", accuracy)
saver = tf.train.Saver()
#DEPRECATED*** merged = tf.merge_all_summaries()
merged = tf.summary.merge_all()
#DEPRECATED*** writer = tf.train.SummaryWriter("/tmp/football_logs", session.graph_def)
writer = tf.summary.FileWriter("/tmp/football_logs", session.graph)
#DEPRECATED*** init = tf.initialize_all_variables()
init = tf.global_variables_initializer()
session.run(init)
for i in range(1, (len(x_train)/batch_size)):
session.run(training_func, feed_dict={x: x_train[i*batch_size:i*batch_size+batch_size], y: y_train[i*batch_size:i*batch_size+batch_size]})
if i % batch_size == 0:
print "test accuracy %g"%session.run(accuracy, feed_dict={x: x_test, y: y_test})
#result = session.run([merged, accuracy], feed_dict={x: x_test, y: y_test})
# writer.add_summary(result[0], i)
# print "step %d, training accuracy %g"%(i, result[1])
#writer.flush()
print "final test accuracy %g"%session.run(accuracy, feed_dict={x: x_test, y: y_test})
# save_path = saver.save(session, "/tmp/football.ckpt")
# print "Model saved in file: ", save_path
session.close()
run_regression(train_data, train_labels, test_data, test_labels, normalize=False, alpha=1e-8) | 4,287 | 51.292683 | 146 | py |
OpenBCIPython | OpenBCIPython-master/try/try_rnnn.py | from __future__ import division
import tensorflow as tf
import numpy as np
import pandas as pd
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib import layers as tflayers
tf.logging.set_verbosity(tf.logging.INFO)
# Create input data
X = np.random.randn(2, 10, 8)
# The second example is of length 6
X[1, 6:] = 0
X_lengths = [10, 6]
cell = tf.contrib.rnn.BasicLSTMCell(num_units=64, state_is_tuple=True)
outputs, last_states = tf.nn.dynamic_rnn(
cell=cell,
dtype=tf.float64,
sequence_length=X_lengths,
inputs=X)
result = tf.contrib.learn.run_n(
{"outputs": outputs, "last_states": last_states},
n=1,
feed_dict=None)
assert result[0]["outputs"].shape == (2, 10, 64)
# Outputs for the second example past past length 6 should be 0
assert (result[0]["outputs"][1, 7, :] == np.zeros(cell.output_size)).all() | 898 | 26.242424 | 85 | py |
OpenBCIPython | OpenBCIPython-master/try/logistic_regression.py | from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
iris = datasets.load_iris()
def my_model(features, labels):
"""DNN with three hidden layers."""
# Convert the labels to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
labels = tf.one_hot(labels, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10.
features = tf.contrib.layers.stack(features, tf.contrib.layers.fully_connected, [10, 20, 10])
# Create two tensors respectively for prediction and loss.
prediction, loss = (
tf.contrib.learn.models.logistic_regression(features, labels)
)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
classifier = tf.contrib.learn.Estimator(model_fn=my_model)
classifier.fit(iris.data, iris.target, steps=1000)
y_predicted = [
p['class'] for p in classifier.predict(iris.data, as_iterable=True)]
score = metrics.accuracy_score(iris.target, y_predicted)
print('Accuracy: {0:f}'.format(score)) | 1,253 | 34.828571 | 95 | py |
OpenBCIPython | OpenBCIPython-master/try/__init__.py | 0 | 0 | 0 | py |
|
OpenBCIPython | OpenBCIPython-master/try/rnn.py | import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
num_steps = 5 # number of truncated backprop steps ('n' in the discussion above)
batch_size = 200
num_classes = 2
state_size = 4
learning_rate = 0.1
def gen_data(size=1000000):
X = np.array(np.random.choice(2, size=(size,)))
Y = []
for i in range(size):
threshold = 0.5
if X[i-3] == 1:
threshold += 0.5
if X[i-8] == 1:
threshold -= 0.25
if np.random.rand() > threshold:
Y.append(0)
else:
Y.append(1)
return X, np.array(Y)
# adapted from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/rnn/ptb/reader.py
def gen_batch(raw_data, batch_size, num_steps):
raw_x, raw_y = raw_data
data_length = len(raw_x)
# partition raw data into batches and stack them vertically in a data matrix
batch_partition_length = data_length // batch_size
data_x = np.zeros([batch_size, batch_partition_length], dtype=np.int32)
data_y = np.zeros([batch_size, batch_partition_length], dtype=np.int32)
for i in range(batch_size):
data_x[i] = raw_x[batch_partition_length * i:batch_partition_length * (i + 1)]
data_y[i] = raw_y[batch_partition_length * i:batch_partition_length * (i + 1)]
# further divide batch partitions into num_steps for truncated backprop
epoch_size = batch_partition_length // num_steps
for i in range(epoch_size):
x = data_x[:, i * num_steps:(i + 1) * num_steps]
y = data_y[:, i * num_steps:(i + 1) * num_steps]
yield (x, y)
def gen_epochs(n, num_steps):
for i in range(n):
yield gen_batch(gen_data(), batch_size, num_steps)
"""
Placeholders
"""
x = tf.placeholder(tf.int32, [batch_size, num_steps], name='input_placeholder')
y = tf.placeholder(tf.int32, [batch_size, num_steps], name='labels_placeholder')
init_state = tf.zeros([batch_size, state_size])
"""
RNN Inputs
"""
# Turn our x placeholder into a list of one-hot tensors:
# rnn_inputs is a list of num_steps tensors with shape [batch_size, num_classes]
x_one_hot = tf.one_hot(x, num_classes)
rnn_inputs = tf.unstack(x_one_hot, axis=1)
"""
Definition of rnn_cell
This is very similar to the __call__ method on Tensorflow's BasicRNNCell. See:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.py#L95
"""
with tf.variable_scope('rnn_cell'):
W = tf.get_variable('W', [num_classes + state_size, state_size])
b = tf.get_variable('b', [state_size], initializer=tf.constant_initializer(0.0))
def rnn_cell(rnn_input, state):
with tf.variable_scope('rnn_cell', reuse=True):
W = tf.get_variable('W', [num_classes + state_size, state_size])
b = tf.get_variable('b', [state_size], initializer=tf.constant_initializer(0.0))
return tf.tanh(tf.matmul(tf.concat([rnn_input, state], 1), W) + b)
"""
Adding rnn_cells to graph
This is a simplified version of the "static_rnn" function from Tensorflow's api. See:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/rnn/python/ops/core_rnn.py#L41
Note: In practice, using "dynamic_rnn" is a better choice that the "static_rnn":
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/rnn.py#L390
"""
state = init_state
rnn_outputs = []
for rnn_input in rnn_inputs:
state = rnn_cell(rnn_input, state)
rnn_outputs.append(state)
final_state = rnn_outputs[-1]
"""
Predictions, loss, training step
Losses is similar to the "sequence_loss"
function from Tensorflow's API, except that here we are using a list of 2D tensors, instead of a 3D tensor. See:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/seq2seq/python/ops/loss.py#L30
"""
#logits and predictions
with tf.variable_scope('softmax'):
W = tf.get_variable('W', [state_size, num_classes])
b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(0.0))
logits = [tf.matmul(rnn_output, W) + b for rnn_output in rnn_outputs]
predictions = [tf.nn.softmax(logit) for logit in logits]
# Turn our y placeholder into a list of labels
y_as_list = tf.unstack(y, num=num_steps, axis=1)
#losses and train_step
losses = [tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, logits=logit) for \
logit, label in zip(logits, y_as_list)]
total_loss = tf.reduce_mean(losses)
train_step = tf.train.AdagradOptimizer(learning_rate).minimize(total_loss)
"""
Train the network
"""
def train_network(num_epochs, num_steps, state_size=4, verbose=True):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
training_losses = []
for idx, epoch in enumerate(gen_epochs(num_epochs, num_steps)):
training_loss = 0
training_state = np.zeros((batch_size, state_size))
if verbose:
print("\nEPOCH", idx)
for step, (X, Y) in enumerate(epoch):
tr_losses, training_loss_, training_state, _ = \
sess.run([losses,
total_loss,
final_state,
train_step],
feed_dict={x:X, y:Y, init_state:training_state})
training_loss += training_loss_
if step % 100 == 0 and step > 0:
if verbose:
print("Average loss at step", step,
"for last 250 steps:", training_loss/100)
training_losses.append(training_loss/100)
training_loss = 0
return training_losses
# training_losses = train_network(1,num_steps)
# plt.plot(training_losses)
#
# plt.show()
def plot_learning_curve(num_steps, state_size=4, epochs=1):
global losses, total_loss, final_state, train_step, x, y, init_state
tf.reset_default_graph()
g = tf.get_default_graph()
losses, total_loss, final_state, train_step, x, y, init_state = \
basic_rnn.setup_graph(g,
basic_rnn.RNN_config(num_steps=num_steps, state_size=state_size))
res = train_network(epochs, num_steps, state_size=state_size, verbose=False)
plt.plot(res)
plot_learning_curve(num_steps=10, state_size=16, epochs=10)
| 6,302 | 36.076471 | 112 | py |
OpenBCIPython | OpenBCIPython-master/try/estimator_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
import functools
import itertools
import json
import os
import tempfile
import numpy as np
import six
from tensorflow.contrib import learn
from tensorflow.contrib import lookup
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
est = linear.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def serving_input_fn_with_asset():
features, labels, inputs = serving_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = lookup.HashTable(
lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.to_int64(features['feature']))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, serving_input_fn_with_asset
def _build_estimator_for_resource_export_test():
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column('feature', dimension=4)
]
def resource_constant_model_fn(unused_features, unused_labels, mode):
"""A model_fn that loads a constant from a resource and serves it."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
const = constant_op.constant(-1, dtype=dtypes.int64)
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableModel')
if mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL):
key = constant_op.constant(['key'])
value = constant_op.constant([42], dtype=dtypes.int64)
train_op_1 = table.insert(key, value)
training_state = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableTrainingState')
training_op_2 = training_state.insert(key, value)
return const, const, control_flow_ops.group(train_op_1, training_op_2)
if mode == model_fn.ModeKeys.INFER:
key = constant_op.constant(['key'])
prediction = table.lookup(key)
return prediction, const, control_flow_ops.no_op()
est = estimator.Estimator(model_fn=resource_constant_model_fn)
est.fit(input_fn=_input_fn, steps=1)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
return est, serving_input_fn
class CheckCallsMonitor(monitors_lib.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
def _model_fn_ops(
expected_features, expected_labels, actual_features, actual_labels, mode):
assert_ops = tuple([
check_ops.assert_equal(
expected_features[k], actual_features[k], name='assert_%s' % k)
for k in expected_features
] + [
check_ops.assert_equal(
expected_labels, actual_labels, name='assert_labels')
])
with ops.control_dependencies(assert_ops):
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.))
def _make_input_fn(features, labels):
def _input_fn():
return {
k: constant_op.constant(v)
for k, v in six.iteritems(features)
}, constant_op.constant(labels)
return _input_fn
class EstimatorModelFnTest():
def testModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, mode, params, config):
model_fn_call_count[0] += 1
# self.assertItemsEqual(features.keys(), arg0.keys())
# self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
# self.assertEqual(expected_params, params)
# self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
est = estimator.Estimator(
model_fn=_model_fn, params=expected_params, config=expected_config)
# self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
# self.assertEqual(1, model_fn_call_count[0])
def testPartialModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
expected_foo = 45.
expected_bar = 46.
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, foo, mode, params, config, bar):
model_fn_call_count[0] += 1
# self.assertEqual(expected_foo, foo)
# self.assertEqual(expected_bar, bar)
# self.assertItemsEqual(features.keys(), arg0.keys())
# self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
# self.assertEqual(expected_params, params)
# self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
partial_model_fn = functools.partial(
_model_fn, foo=expected_foo, bar=expected_bar)
est = estimator.Estimator(
model_fn=partial_model_fn, params=expected_params,
config=expected_config)
# self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
# self.assertEqual(1, model_fn_call_count[0])
def testModelFnWithModelDir(self):
expected_param = {'some_param': 'some_value'}
expected_model_dir = tempfile.mkdtemp()
def _argument_checker(features, labels, mode, params, config=None,
model_dir=None):
_, _, _ = features, labels, config
# self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
# self.assertEqual(expected_param, params)
# self.assertEqual(model_dir, expected_model_dir)
return constant_op.constant(0.), constant_op.constant(
0.), constant_op.constant(0.)
est = estimator.Estimator(model_fn=_argument_checker,
params=expected_param,
model_dir=expected_model_dir)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
return None, loss, None
est = estimator.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing training_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == model_fn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(
boston_input_fn, num_epochs=1),
as_iterable=True)
def testModelFnScaffoldInTraining(self):
self.is_init_fn_called = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=monitored_session.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=boston_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def testModelFnScaffoldSaverUsage(self):
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
variables_lib.Variable(1., 'weight')
real_saver = saver_lib.Saver()
self.mock_saver = test.mock.Mock(
wraps=real_saver, saver_def=real_saver.saver_def)
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=monitored_session.Scaffold(saver=self.mock_saver))
def input_fn():
return {
'x': constant_op.constant([[1.]]),
}, constant_op.constant([[1.]])
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.save.called)
est.evaluate(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.restore.called)
est.predict(input_fn=input_fn)
self.assertTrue(self.mock_saver.restore.called)
def serving_input_fn():
serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,
shape=[None],
name='input_example_tensor')
features, labels = input_fn()
return input_fn_utils.InputFnOps(
features, labels, {'examples': serialized_tf_example})
est.export_savedmodel(est.model_dir + '/export', serving_input_fn)
self.assertTrue(self.mock_saver.restore.called)
class EstimatorTest():
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=estimator.Estimator(model_fn=linear_model_fn),
train_input_fn=boston_input_fn,
eval_input_fn=boston_input_fn)
exp.test()
def testCheckpointSaverHookSuppressesTheDefaultOne(self):
saver_hook = test.mock.Mock(
spec=basic_session_run_hooks.CheckpointSaverHook)
saver_hook.before_run.return_value = None
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook])
# test nothing is saved, due to suppressing default saver
with self.assertRaises(learn.NotFittedError):
est.evaluate(input_fn=boston_input_fn, steps=1)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = ops.get_default_graph().seed
return constant_op.constant([[1.]]), constant_op.constant([1.])
config = run_config.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAndRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='test_dir')
self.assertEqual('test_dir', est.config.model_dir)
with self.assertRaisesRegexp(
ValueError,
'model_dir are set both in constructor and RunConfig, '
'but with different'):
estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='different_dir')
def testModelDirIsCopiedToRunConfig(self):
config = run_config.RunConfig()
self.assertIsNone(config.model_dir)
est = estimator.Estimator(model_fn=linear_model_fn,
model_dir='test_dir',
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAsTempDir(self):
with test.mock.patch.object(tempfile, 'mkdtemp', return_value='temp_dir'):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertEqual('temp_dir', est.config.model_dir)
self.assertEqual('temp_dir', est.model_dir)
def testCheckInputs(self):
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7, 8], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7, 10], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(
ValueError,
'Either x or input_fn must be provided.',
est.fit,
x=None,
input_fn=None,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
x='X',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
y='Y',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and batch_size',
est.fit,
input_fn=iris_input_fn,
batch_size=100,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Inputs cannot be tensors. Please provide input_fn.',
est.fit,
x=constant_op.constant(1.),
steps=1)
def testUntrained(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
with self.assertRaises(learn.NotFittedError):
_ = est.score(x=boston.data, y=boston.target.astype(np.float64))
with self.assertRaises(learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertLess(scores3['MSE'], scores['MSE'])
def testEstimatorParams(self):
boston = base.load_boston()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_params_fn, params={'learning_rate': 0.01}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testHooksNotChanged(self):
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
# We pass empty array and expect it to remain empty after calling
# fit and evaluate. Requires inside to copy this array if any hooks were
# added.
my_array = []
est.fit(input_fn=iris_input_fn, steps=100, monitors=my_array)
_ = est.evaluate(input_fn=iris_input_fn, steps=1, hooks=my_array)
self.assertEqual(my_array, [])
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
estimator.SKCompat(est).fit(x_iter, y_iter, steps=20)
eval_result = est.evaluate(input_fn=iris_input_fn, steps=1)
x_iter_eval = itertools.islice(iris.data, 100)
y_iter_eval = itertools.islice(iris.target, 100)
score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval)
print(score_result)
self.assertItemsEqual(eval_result.keys(), score_result.keys())
self.assertItemsEqual(['global_step', 'loss'], score_result.keys())
predictions = estimator.SKCompat(est).predict(x=iris.data)['class']
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainStepsIsIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {
'other': constant_op.constant([0, 0, 0])
}, constant_op.constant([0, 0, 0])
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitorsForFit(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testHooksForEvaluate(self):
class CheckCallHook(session_run_hook.SessionRunHook):
def __init__(self):
self.run_count = 0
def after_run(self, run_context, run_values):
self.run_count += 1
est = learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
hook = CheckCallHook()
est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook])
self.assertEqual(3, hook.run_count)
def testSummaryWriting(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = util_test.simple_values_from_events(
util_test.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testLossInGraphCollection(self):
class _LossCheckerHook(session_run_hook.SessionRunHook):
def begin(self):
self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = estimator.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
self.assertSameElements(
['bogus_lookup', 'feature'],
graph.get_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS))
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_resource(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_resource_export_test()
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(export_dir_base, serving_input_fn)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('LookupTableModel' in graph_ops)
self.assertFalse('LookupTableTrainingState' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
estimator.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0))
def _assert_single_feature_column(self, expected_shape, expected_dtype,
feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual(
{
'':
parsing_ops.FixedLenFeature(
shape=expected_shape, dtype=expected_dtype)
},
feature_column.config)
def testInt32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testFloat32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
estimator.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool),
None))
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (
constant_op.constant([['%d.0' % i
for i in xrange(8)]
for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64,
feature_columns)
def testIrisInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64,
feature_columns)
class ReplicaDeviceSetterTest(test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': run_config.TaskType.WORKER,
'index': 3
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
test_estimator = EstimatorTest()
test_estimator.testEstimatorParams()
| 44,532 | 37.423641 | 88 | py |
OpenBCIPython | OpenBCIPython-master/plugins/sample_rate.py | import time
import timeit
from threading import Thread
import plugin_interface as plugintypes
# counter for sampling rate
nb_samples_out = -1
# try to ease work for main loop
class Monitor(Thread):
def __init__(self):
Thread.__init__(self)
self.nb_samples_out = -1
# Init time to compute sampling rate
self.tick = timeit.default_timer()
self.start_tick = self.tick
self.polling_interval = 10
def run(self):
while True:
# check FPS + listen for new connections
new_tick = timeit.default_timer()
elapsed_time = new_tick - self.tick
current_samples_out = nb_samples_out
print "--- at t: ", (new_tick - self.start_tick), " ---"
print "elapsed_time: ", elapsed_time
print "nb_samples_out: ", current_samples_out - self.nb_samples_out
sampling_rate = (current_samples_out - self.nb_samples_out) / elapsed_time
print "sampling rate: ", sampling_rate
self.tick = new_tick
self.nb_samples_out = nb_samples_out
time.sleep(self.polling_interval)
class PluginSampleRate(plugintypes.IPluginExtended):
# update counters value
def __call__(self, sample):
global nb_samples_out
nb_samples_out = nb_samples_out + 1
# Instanciate "monitor" thread
def activate(self):
monit = Monitor()
if len(self.args) > 0:
monit.polling_interval = float(self.args[0])
# daemonize thread to terminate it altogether with the main when time will come
monit.daemon = True
monit.start()
def show_help(self):
print "Optional argument: polling_interval -- in seconds, default: 10."
| 1,530 | 27.886792 | 81 | py |
OpenBCIPython | OpenBCIPython-master/plugins/csv_collect_and_publish.py | import json
import socket
import threading
import timeit
import datetime
import tensorflow as tf
from preprocessing.RingBuffer import RingBuffer
from preprocessing.noise_reducer import NoiseReducer
from preprocessing.server import UDPServer
import preprocessing.init_buffer as buf
import plugin_interface as plugintypes
class MainThread(threading.Thread):
def __init__(self, plugin_config, max_iteration=None):
threading.Thread.__init__(self)
self.is_run = True
self.counter = 0
self.lock = threading.Lock()
self.plugin_config = plugin_config
self.max_iteration = max_iteration
self.server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.number_of_channels = int(self.plugin_config["number_of_channels"])
self.buffer_capacity = int(self.plugin_config["buffer_capacity"])
self.buffer_size = int(self.plugin_config["window_size"])
buf.ring_buffers = [RingBuffer(self.buffer_size * self.buffer_capacity)
for i in range(0, self.number_of_channels)]
self.tfrecords_filename = self.plugin_config["project_file_path"] \
+ str(self.plugin_config["processing"]["train"]["tfrecords_filename"])
self.writer = tf.python_io.TFRecordWriter(self.tfrecords_filename)
self.sampling_rate = int(self.plugin_config["sampling_rate"])
self.sampling_time = 1.0 / self.sampling_rate * 1.0
self.noisereducer_thread = NoiseReducer("main thread", self.server,
self.writer, self.plugin_config)
self.ip = str(self.plugin_config["ip"])
def run(self):
while self.is_run:
if not self.noisereducer_thread.is_processing and \
buf.ring_buffers[0].__len__() > self.buffer_size:
# if not plugin.noisereducer_thread.is_processing:
print ("next process..." + str(buf.ring_buffers[0].__len__()))
self.noisereducer_thread = NoiseReducer(str(self.counter), self.server,
self.writer, self.plugin_config)
self.noisereducer_thread.start()
self.noisereducer_thread.join()
self.counter += 1
if self.max_iteration is not None and self.counter == self.max_iteration:
self.is_run = False
self.server.close()
self.writer.close()
print ("closing main thread...")
class PluginCSVCollectAndPublish(plugintypes.IPluginExtended):
def init_plugin(self):
config_file = self.project_file_path + "/config/config.json"
with open(config_file) as config:
self.plugin_config = json.load(config)
self.now = datetime.datetime.now()
self.time_stamp = '%d-%d-%d_%d-%d-%d' % (self.now.year, self.now.month, self.now.day, self.now.hour,
self.now.minute, self.now.second)
self.train = eval(self.plugin_config["train"])
self.number_of_channels = int(self.plugin_config["number_of_channels"])
self.train_dir = self.project_file_path + str(self.plugin_config["train_dir"])
self.plugin_config["project_file_path"] = self.project_file_path
self.plugin_config["train_dir_abs_location"] = self.train_dir
self.train_file = self.train_dir + self.time_stamp + "new_up.csv"
self.start_time = timeit.default_timer()
self.delim = ","
self.verbose = eval(self.plugin_config["verbose"])
self.test = eval(self.plugin_config["test"])
self.kinect_angles = RingBuffer(20, dtype=list)
self.channel_vector = list(self.plugin_config["channel_vector"])
self.sampling_rate = int(self.plugin_config["sampling_rate"])
self.sampling_time = 1.0 / self.sampling_rate * 1.0
self.ip = str(self.plugin_config["ip"])
self.port = int(self.plugin_config["port"])
self.server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.main_thread = MainThread(self.plugin_config, buf.max_iteration)
if self.train:
self.secondary_ip = self.ip
self.secondary_port = int(self.plugin_config["secondary_port"])
self.receiver_port = int(self.plugin_config["receiver_port"])
self.secondary_server = UDPServer("udp_server", self.kinect_angles, self.secondary_port
, self.receiver_port, ip=self.secondary_ip)
def activate(self):
self.project_file_path = "/home/runge/openbci/git/OpenBCI_Python"
print ("stated initializing plugin...")
self.init_plugin()
if self.train:
try:
self.secondary_server.start()
# self.secondary_server.isRun = False
# self.secondary_server.join()
# print ("Selecting raw UDP streaming. IP: ", self.secondary_ip, ", port: ",str(self.secondary_port))
except:
print ("Error while starting udp server...")
self.secondary_server.socket.close()
print ("plugin initialization is completed successfully.")
def send_row_data(self, data):
self.server.sendto(data, (self.ip, self.port))
def deactivate(self):
print ("Closing, CSV saved to:", self.train_file)
self.main_thread.is_run = False
self.server.close()
self.main_thread.join()
return
def show_help(self):
print ("Optional argument: [filename] (default: collect.csv)")
print ("""Optional arguments: [ip [port]]
\t ip: target IP address (default: 'localhost')
\t port: target port (default: 12345)""")
def get_updated_values(self):
return self.secondary_server.get_next_point()
def __call__(self, sample):
t = timeit.default_timer() - self.start_time
if self.verbose:
print("CSV: %f | %d" % (t, sample.id))
kinect_angles = self.get_updated_values()
self.send_row_data(json.dumps(sample.channel_data))
row = ''
row += str(t)
row += self.delim
row += str(sample.id)
row += self.delim
index_buffer = 0
for i in sample.channel_data:
if self.test:
if not (index_buffer >= self.number_of_channels):
buf.ring_buffers[index_buffer].append(float(str(i)))
row += str(i)
row += self.delim
index_buffer += 1
if self.train:
for i in kinect_angles:
row += str(i)
row += self.delim
if self.verbose:
print (kinect_angles)
row[-1].replace(",", "")
row += '\n'
# buf.channel_data = row
with open(self.train_file, 'a') as f:
f.write(row)
# self.main_thread.start()
| 7,100 | 43.38125 | 117 | py |
OpenBCIPython | OpenBCIPython-master/plugins/streamer_lsl.py | # download LSL and pylsl from https://code.google.com/p/labstreaminglayer/
# Eg: ftp://sccn.ucsd.edu/pub/software/LSL/SDK/liblsl-Python-1.10.2.zip
# put in "lib" folder (same level as user.py)
import sys; sys.path.append('lib') # help python find pylsl relative to this example program
from pylsl import StreamInfo, StreamOutlet
import plugin_interface as plugintypes
# Use LSL protocol to broadcast data using one stream for EEG, one stream for AUX, one last for impedance testing (on supported board, if enabled)
class StreamerLSL(plugintypes.IPluginExtended):
# From IPlugin
def activate(self):
eeg_stream = "OpenBCI_EEG"
eeg_id = "openbci_eeg_id1"
aux_stream = "OpenBCI_AUX"
aux_id = "openbci_aux_id1"
imp_stream = "OpenBCI_Impedance"
imp_id = "openbci_imp_id1"
if len(self.args) > 0:
eeg_stream = self.args[0]
if len(self.args) > 1:
eeg_id = self.args[1]
if len(self.args) > 2:
aux_stream = self.args[2]
if len(self.args) > 3:
aux_id = self.args[3]
if len(self.args) > 4:
imp_stream = self.args[4]
if len(self.args) > 5:
imp_id = self.args[5]
# Create a new streams info, one for EEG values, one for AUX (eg, accelerometer) values
print "Creating LSL stream for EEG. Name:", eeg_stream, "- ID:", eeg_id, "- data type: float32.", self.eeg_channels, "channels at", self.sample_rate, "Hz."
info_eeg = StreamInfo(eeg_stream, 'EEG', self.eeg_channels,self.sample_rate,'float32',eeg_id);
# NB: set float32 instead of int16 so as OpenViBE takes it into account
print "Creating LSL stream for AUX. Name:", aux_stream, "- ID:", aux_id, "- data type: float32.", self.aux_channels, "channels at", self.sample_rate, "Hz."
info_aux = StreamInfo(aux_stream, 'AUX', self.aux_channels,self.sample_rate,'float32',aux_id);
# make outlets
self.outlet_eeg = StreamOutlet(info_eeg)
self.outlet_aux = StreamOutlet(info_aux)
if self.imp_channels > 0:
print "Creating LSL stream for Impedance. Name:", imp_stream, "- ID:", imp_id, "- data type: float32.", self.imp_channels, "channels at", self.sample_rate, "Hz."
info_imp = StreamInfo(imp_stream, 'Impedance', self.imp_channels,self.sample_rate,'float32',imp_id);
self.outlet_imp = StreamOutlet(info_imp)
# send channels values
def __call__(self, sample):
self.outlet_eeg.push_sample(sample.channel_data)
self.outlet_aux.push_sample(sample.aux_data)
if self.imp_channels > 0:
self.outlet_imp.push_sample(sample.imp_data)
def show_help(self):
print """Optional arguments: [EEG_stream_name [EEG_stream_ID [AUX_stream_name [AUX_stream_ID [Impedance_steam_name [Impedance_stream_ID]]]]]]
\t Defaults: "OpenBCI_EEG" / "openbci_eeg_id1" and "OpenBCI_AUX" / "openbci_aux_id1" / "OpenBCI_Impedance" / "openbci_imp_id1"."""
| 2,750 | 45.627119 | 164 | py |
OpenBCIPython | OpenBCIPython-master/plugins/udp_server.py | """A server that handles a connection with an OpenBCI board and serves that
data over both a UDP socket server and a WebSocket server.
Requires:
- pyserial
- asyncio
- websockets
"""
import cPickle as pickle
import json
import socket
import plugin_interface as plugintypes
# class PluginPrint(IPlugin):
# # args: passed by command line
# def activate(self, args):
# print "Print activated"
# # tell outside world that init went good
# return True
# def deactivate(self):
# print "Print Deactivated"
# def show_help(self):
# print "I do not need any parameter, just printing stuff."
# # called with each new sample
# def __call__(self, sample):
# sample_string = "ID: %f\n%s\n%s" %(sample.id, str(sample.channel_data)[1:-1], str(sample.aux_data)[1:-1])
# print "---------------------------------"
# print sample_string
# print "---------------------------------"
# # DEBBUGING
# # try:
# # sample_string.decode('ascii')
# # except UnicodeDecodeError:
# # print "Not a ascii-encoded unicode string"
# # else:
# # print sample_string
class UDPServer(plugintypes.IPluginExtended):
def __init__(self, ip='localhost', port=8888):
self.ip = ip
self.port = port
self.server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def activate(self):
print "udp_server plugin"
print self.args
if len(self.args) > 0:
self.ip = self.args[0]
if len(self.args) > 1:
self.port = int(self.args[1])
# init network
print "Selecting raw UDP streaming. IP: ", self.ip, ", port: ", str(self.port)
self.server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print "Server started on port " + str(self.port)
def __call__(self, sample):
self.send_data(json.dumps(sample.channel_data))
def send_data(self, data):
self.server.sendto(data, (self.ip, self.port))
# From IPlugin: close sockets, send message to client
def deactivate(self):
self.server.close();
def show_help(self):
print """Optional arguments: [ip [port]]
\t ip: target IP address (default: 'localhost')
\t port: target port (default: 12345)"""
| 2,220 | 26.419753 | 111 | py |
OpenBCIPython | OpenBCIPython-master/plugins/csv_collect.py | import csv
import timeit
import datetime
import plugin_interface as plugintypes
class PluginCSVCollect(plugintypes.IPluginExtended):
def __init__(self, file_name="collect.csv", delim = ",", verbose=False):
now = datetime.datetime.now()
self.time_stamp = '%d-%d-%d_%d-%d-%d'%(now.year,now.month,now.day,now.hour,now.minute,now.second)
self.file_name = self.time_stamp
self.start_time = timeit.default_timer()
self.delim = delim
self.verbose = verbose
def activate(self):
if len(self.args) > 0:
if 'no_time' in self.args:
self.file_name = self.args[0]
else:
self.file_name = self.args[0] + '_' + self.file_name;
if 'verbose' in self.args:
self.verbose = True
self.file_name = self.file_name + '.csv'
print "Will export CSV to:", self.file_name
#Open in append mode
with open(self.file_name, 'a') as f:
f.write('%'+self.time_stamp + '\n')
def deactivate(self):
print "Closing, CSV saved to:", self.file_name
return
def show_help(self):
print "Optional argument: [filename] (default: collect.csv)"
def __call__(self, sample):
t = timeit.default_timer() - self.start_time
#print timeSinceStart|Sample Id
if self.verbose:
print("CSV: %f | %d" %(t,sample.id))
row = ''
row += str(t)
row += self.delim
row += str(sample.id)
row += self.delim
for i in sample.channel_data:
row += str(i)
row += self.delim
for i in sample.aux_data:
row += str(i)
row += self.delim
#remove last comma
row += '\n'
with open(self.file_name, 'a') as f:
f.write(row) | 1,546 | 25.220339 | 99 | py |
OpenBCIPython | OpenBCIPython-master/plugins/noise_test.py | import timeit
import numpy as np
import plugin_interface as plugintypes
class PluginNoiseTest(plugintypes.IPluginExtended):
# update counters value
def __call__(self, sample):
# keep tract of absolute value of
self.diff = np.add(self.diff,np.absolute(np.asarray(sample.channel_data)))
self.sample_count = self.sample_count + 1
elapsed_time = timeit.default_timer() - self.last_report
if elapsed_time > self.polling_interval:
channel_noise_power = np.divide(self.diff,self.sample_count)
print (channel_noise_power)
self.diff = np.zeros(self.eeg_channels)
self.last_report = timeit.default_timer()
# # Instanciate "monitor" thread
def activate(self):
# The difference between the ref and incoming signal.
# IMPORTANT: For noise tests, the reference and channel should have the same input signal.
self.diff = np.zeros(self.eeg_channels)
self.last_report = timeit.default_timer()
self.sample_count = 0
self.polling_interval = 1.0
if len(self.args) > 0:
self.polling_interval = float(self.args[0])
def show_help(self):
print "Optional argument: polling_interval -- in seconds, default: 10. \n \
Returns the power of the system noise.\n \
NOTE: The reference and channel should have the same input_signal signal."
| 1,279 | 30.219512 | 92 | py |
OpenBCIPython | OpenBCIPython-master/plugins/streamer_tcp_server.py | from threading import Thread
import socket, select, struct, time
import plugin_interface as plugintypes
# Simple TCP server to "broadcast" data to clients, handling deconnections. Binary format use network endianness (i.e., big-endian), float32
# TODO: does not listen for anything at the moment, could use it to set options
# Handling new client in separate thread
class MonitorStreamer(Thread):
"""Launch and monitor a "Streamer" entity (incoming connections if implemented, current sampling rate)."""
# tcp_server: the TCPServer instance that will be used
def __init__(self, streamer):
Thread.__init__(self)
# bind to Streamer entity
self.server = streamer
def run(self):
# run until we DIE
while True:
# check FPS + listen for new connections
# FIXME: not so great with threads -- use a lock?
# TODO: configure interval
self.server.check_connections()
time.sleep(1)
class StreamerTCPServer(plugintypes.IPluginExtended):
"""
Relay OpenBCI values to TCP clients
Args:
port: Port of the server
ip: IP address of the server
"""
def __init__(self, ip='localhost', port=12345):
# list of socket clients
self.CONNECTION_LIST = []
# connection infos
self.ip = ip
self.port = port
# From IPlugin
def activate(self):
if len(self.args) > 0:
self.ip = self.args[0]
if len(self.args) > 1:
self.port = int(self.args[1])
# init network
print "Selecting raw TCP streaming. IP: ", self.ip, ", port: ", self.port
self.initialize()
# init the daemon that monitors connections
self.monit = MonitorStreamer(self)
self.monit.daemon = True
# launch monitor
self.monit.start()
# the initialize method reads settings and outputs the first header
def initialize(self):
# init server
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# this has no effect, why ?
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# create connection
self.server_socket.bind((self.ip, self.port))
self.server_socket.listen(1)
print "Server started on port " + str(self.port)
# From Streamer, to be called each time we're willing to accept new connections
def check_connections(self):
# First listen for new connections, and new connections only -- this is why we pass only server_socket
read_sockets,write_sockets,error_sockets = select.select([self.server_socket],[],[], 0)
for sock in read_sockets:
# New connection
sockfd, addr = self.server_socket.accept()
self.CONNECTION_LIST.append(sockfd)
print "Client (%s, %s) connected" % addr
# and... don't bother with incoming messages
# From IPlugin: close sockets, send message to client
def deactivate(self):
# close all remote connections
for sock in self.CONNECTION_LIST:
if sock != self.server_socket:
try:
sock.send("closing!\n")
# at this point don't bother if message not sent
except:
continue
sock.close();
# close server socket
self.server_socket.close();
# broadcast channels values to all clients
# as_string: many for debug, send values with a nice "[34.45, 30.4, -38.0]"-like format
def __call__(self, sample, as_string=False):
values=sample.channel_data
# save sockets that are closed to remove them later on
outdated_list = []
for sock in self.CONNECTION_LIST:
# If one error should happen, we remove socket from the list
try:
if as_string:
sock.send(str(values) + "\n")
else:
nb_channels=len(values)
# format for binary data, network endian (big) and float (float32)
packer = struct.Struct('!%sf' % nb_channels)
# convert values to bytes
packed_data = packer.pack(*values)
sock.send(packed_data)
# TODO: should check if the correct number of bytes passed through
except:
# sometimes (always?) it's only during the second write to a close socket that an error is raised?
print "Something bad happened, will close socket"
outdated_list.append(sock)
# now we are outside of the main list, it's time to remove outdated sockets, if any
for bad_sock in outdated_list:
print "Removing socket..."
self.CONNECTION_LIST.remove(bad_sock)
# not very costly to be polite
bad_sock.close()
def show_help(self):
print """Optional arguments: [ip [port]]
\t ip: target IP address (default: 'localhost')
\t port: target port (default: 12345)"""
| 4,416 | 32.210526 | 140 | py |
OpenBCIPython | OpenBCIPython-master/plugins/__init__.py | 0 | 0 | 0 | py |
|
OpenBCIPython | OpenBCIPython-master/plugins/print.py | import plugin_interface as plugintypes
class PluginPrint(plugintypes.IPluginExtended):
def activate(self):
print "Print activated"
# called with each new sample
def __call__(self, sample):
if sample:
# print impedance if supported
if self.imp_channels > 0:
sample_string = "ID: %f\n%s\n%s\n%s" %(sample.id, str(sample.channel_data)[1:-1], str(sample.aux_data)[1:-1], str(sample.imp_data)[1:-1])
else:
sample_string = "ID: %f\n%s\n%s" %(sample.id, str(sample.channel_data)[1:-1], str(sample.aux_data)[1:-1])
print "---------------------------------"
print sample_string
print "---------------------------------"
# DEBBUGING
# try:
# sample_string.decode('ascii')
# except UnicodeDecodeError:
# print "Not a ascii-encoded unicode string"
# else:
# print sample_string
| 843 | 30.259259 | 141 | py |
OpenBCIPython | OpenBCIPython-master/plugins/streamer_osc.py |
# requires pyosc
from OSC import OSCClient, OSCMessage
import plugin_interface as plugintypes
# Use OSC protocol to broadcast data (UDP layer), using "/openbci" stream. (NB. does not check numbers of channel as TCP server)
class StreamerOSC(plugintypes.IPluginExtended):
"""
Relay OpenBCI values to OSC clients
Args:
port: Port of the server
ip: IP address of the server
address: name of the stream
"""
def __init__(self, ip='localhost', port=12345, address="/openbci"):
# connection infos
self.ip = ip
self.port = port
self.address = address
# From IPlugin
def activate(self):
if len(self.args) > 0:
self.ip = self.args[0]
if len(self.args) > 1:
self.port = int(self.args[1])
if len(self.args) > 2:
self.address = self.args[2]
# init network
print "Selecting OSC streaming. IP: ", self.ip, ", port: ", self.port, ", address: ", self.address
self.client = OSCClient()
self.client.connect( (self.ip, self.port) )
# From IPlugin: close connections, send message to client
def deactivate(self):
self.client.send(OSCMessage("/quit") )
# send channels values
def __call__(self, sample):
mes = OSCMessage(self.address)
mes.append(sample.channel_data)
# silently pass if connection drops
try:
self.client.send(mes)
except:
return
def show_help(self):
print """Optional arguments: [ip [port [address]]]
\t ip: target IP address (default: 'localhost')
\t port: target port (default: 12345)
\t address: select target address (default: '/openbci')"""
| 1,547 | 26.157895 | 128 | py |
OpenBCIPython | OpenBCIPython-master/plugins/tmp/new_server.py |
import Queue
import threading
import time
exitFlag = 0
class myThread (threading.Thread):
def __init__(self, threadID, name, q):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.q = q
def run(self):
print "Starting " + self.name
process_data(self.name, self.q)
print "Exiting " + self.name
def process_data(threadName, q):
while not exitFlag:
queueLock.acquire()
if not workQueue.empty():
data = q.get()
queueLock.release()
print "%s processing %s" % (threadName, data)
else:
queueLock.release()
time.sleep(1)
threadList = ["Thread-1", "Thread-2", "Thread-3"]
nameList = ["One", "Two", "Three", "Four", "Five"]
queueLock = threading.Lock()
workQueue = Queue.Queue(10)
threads = []
threadID = 1
# Create new threads
for tName in threadList:
thread = myThread(threadID, tName, workQueue)
thread.start()
threads.append(thread)
threadID += 1
# Fill the queue
queueLock.acquire()
for word in nameList:
workQueue.put(word)
queueLock.release()
# Wait for queue to empty
while not workQueue.empty():
pass
# Notify threads it's time to exit
exitFlag = 1
# Wait for all threads to complete
for t in threads:
t.join()
print "Exiting Main Thread" | 1,350 | 21.516667 | 57 | py |
OpenBCIPython | OpenBCIPython-master/plugins/tmp/client.py | import json
import cPickle as pickle
import socket
import sys
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = ('localhost', 5678)
message = [45,67,89]
try:
data_string = json.dumps(message)
# Send data
print >>sys.stderr, 'sending "%s"' % data_string
sent = sock.sendto(data_string, server_address)
# # Receive response
# print >>sys.stderr, 'waiting to receive'
# data, server = sock.recv(4096)
# print >>sys.stderr, 'received "%s"' % data
finally:
print >>sys.stderr, 'closing socket'
sock.close() | 593 | 21 | 55 | py |
OpenBCIPython | OpenBCIPython-master/scripts/stream_data.py | import sys; sys.path.append('..') # help python find open_bci_v3.py relative to scripts folder
import open_bci_v3 as bci
import streamer_tcp_server
import time, timeit
from threading import Thread
# Transmit data to openvibe acquisition server, intelpolating data (well, sort of) from 250Hz to 256Hz
# Listen to new connections every second using a separate thread.
# NB: Left here for resampling algorithm, prefer the use of user.py.
NB_CHANNELS = 8
# If > 0 will interpolate based on samples count, typically 1.024 to go from 250Hz to 256Hz
SAMPLING_FACTOR = -1.024
# If > 0 will interbolate based on elapsed time
SAMPLING_RATE = 256
SERVER_PORT=12345
SERVER_IP="localhost"
DEBUG=False
# check packet drop
last_id = -1
# counter for sampling rate
nb_samples_in = -1
nb_samples_out = -1
# last seen values for interpolation
last_values = [0] * NB_CHANNELS
# counter to trigger duplications...
leftover_duplications = 0
tick=timeit.default_timer()
# try to ease work for main loop
class Monitor(Thread):
def __init__(self):
Thread.__init__(self)
self.nb_samples_in = -1
self.nb_samples_out = -1
# Init time to compute sampling rate
self.tick = timeit.default_timer()
self.start_tick = self.tick
def run(self):
while True:
# check FPS + listen for new connections
new_tick = timeit.default_timer()
elapsed_time = new_tick - self.tick
current_samples_in = nb_samples_in
current_samples_out = nb_samples_out
print "--- at t: ", (new_tick - self.start_tick), " ---"
print "elapsed_time: ", elapsed_time
print "nb_samples_in: ", current_samples_in - self.nb_samples_in
print "nb_samples_out: ", current_samples_out - self.nb_samples_out
self.tick = new_tick
self.nb_samples_in = nb_samples_in
self.nb_samples_out = nb_samples_out
# time to watch for connection
# FIXME: not so great with threads
server.check_connections()
time.sleep(1)
def streamData(sample):
global last_values
global tick
# check packet skipped
global last_id
# TODO: duplicate packet if skipped to stay sync
if sample.id != last_id + 1:
print "time", tick, ": paquet skipped!"
if sample.id == 255:
last_id = -1
else:
last_id = sample.id
# update counters
global nb_samples_in, nb_samples_out
nb_samples_in = nb_samples_in + 1
# check for duplication, by default 1 (...which is *no* duplication of the one current sample)
global leftover_duplications
# first method with sampling rate and elapsed time (depends on system clock accuracy)
if (SAMPLING_RATE > 0):
# elapsed time since last call, update tick
now = timeit.default_timer()
elapsed_time = now - tick;
# now we have to compute how many times we should send data to keep up with sample rate (oversampling)
leftover_duplications = SAMPLING_RATE * elapsed_time + leftover_duplications - 1
tick = now
# second method with a samplin factor (depends on openbci accuracy)
elif SAMPLING_FACTOR > 0:
leftover_duplications = SAMPLING_FACTOR + leftover_duplications - 1
#print "needed_duplications: ", needed_duplications, "leftover_duplications: ", leftover_duplications
# If we need to insert values, will interpolate between current packet and last one
# FIXME: ok, at the moment because we do packet per packet treatment, only handles nb_duplications == 1 for more interpolation is bad and sends nothing
if (leftover_duplications > 1):
leftover_duplications = leftover_duplications - 1
interpol_values = list(last_values)
for i in range(0,len(interpol_values)):
# OK, it's a very rough interpolation
interpol_values[i] = (last_values[i] + sample.channel_data[i]) / 2
if DEBUG:
print " --"
print " last values: ", last_values
print " interpolation: ", interpol_values
print " current sample: ", sample.channel_data
# send to clients interpolated sample
#leftover_duplications = 0
server.broadcast_values(interpol_values)
nb_samples_out = nb_samples_out + 1
# send to clients current sample
server.broadcast_values(sample.channel_data)
nb_samples_out = nb_samples_out + 1
# save current values for possible interpolation
last_values = list(sample.channel_data)
if __name__ == '__main__':
# init server
server = streamer_tcp_server.StreamerTCPServer(ip=SERVER_IP, port=SERVER_PORT, nb_channels=NB_CHANNELS)
# init board
port = '/dev/ttyUSB1'
baud = 115200
monit = Monitor()
# daemonize theard to terminate it altogether with the main when time will come
monit.daemon = True
monit.start()
board = bci.OpenBCIBoard(port=port, baud=baud, filter_data=False)
board.startStreaming(streamData)
| 4,823 | 33.457143 | 153 | py |
OpenBCIPython | OpenBCIPython-master/scripts/test.py | import sys; sys.path.append('..') # help python find open_bci_v3.py relative to scripts folder
import open_bci_v3 as bci
import os
import logging
import time
def printData(sample):
#os.system('clear')
print "----------------"
print("%f" %(sample.id))
print sample.channel_data
print sample.aux_data
print "----------------"
if __name__ == '__main__':
port = '/dev/tty.OpenBCI-DN008VTF'
#port = '/dev/tty.OpenBCI-DN0096XA'
baud = 115200
logging.basicConfig(filename="test.log",format='%(asctime)s - %(levelname)s : %(message)s',level=logging.DEBUG)
logging.info('---------LOG START-------------')
board = bci.OpenBCIBoard(port=port, scaled_output=False, log=True)
print("Board Instantiated")
board.ser.write('v')
time.sleep(10)
#board.start_streaming(printData)
board.print_bytes_in()
| 805 | 26.793103 | 112 | py |
OpenBCIPython | OpenBCIPython-master/scripts/udp_server.py | """A server that handles a connection with an OpenBCI board and serves that
data over both a UDP socket server and a WebSocket server.
Requires:
- pyserial
- asyncio
- websockets
"""
import argparse
import cPickle as pickle
import json
import sys; sys.path.append('..') # help python find open_bci_v3.py relative to scripts folder
import open_bci_v3 as open_bci
import socket
parser = argparse.ArgumentParser(
description='Run a UDP server streaming OpenBCI data.')
parser.add_argument(
'--json',
action='store_true',
help='Send JSON data rather than pickled Python objects.')
parser.add_argument(
'--filter_data',
action='store_true',
help='Enable onboard filtering.')
parser.add_argument(
'--host',
help='The host to listen on.',
default='127.0.0.1')
parser.add_argument(
'--port',
help='The port to listen on.',
default='8888')
parser.add_argument(
'--serial',
help='The serial port to communicate with the OpenBCI board.',
default='/dev/tty.usbmodem1421')
parser.add_argument(
'--baud',
help='The baud of the serial connection with the OpenBCI board.',
default='115200')
class UDPServer(object):
def __init__(self, ip, port, json):
self.ip = ip
self.port = port
self.json = json
print "Selecting raw UDP streaming. IP: ", self.ip, ", port: ", str(self.port)
self.server = socket.socket(
socket.AF_INET, # Internet
socket.SOCK_DGRAM)
def send_data(self, data):
self.server.sendto(data, (self.ip, self.port))
def handle_sample(self, sample):
if self.json:
# Just send channel data.
self.send_data(json.dumps(sample.channel_data))
else:
# Pack up and send the whole OpenBCISample object.
self.send_data(pickle.dumps(sample))
args = parser.parse_args()
obci = open_bci.OpenBCIBoard(args.serial, int(args.baud))
if args.filter_data:
obci.filter_data = True
sock_server = UDPServer(args.host, int(args.port), args.json)
obci.start_streaming(sock_server.handle_sample)
| 2,039 | 26.2 | 94 | py |
OpenBCIPython | OpenBCIPython-master/scripts/simple_serial.py | import serial
import struct
import numpy as np
import time
import timeit
import atexit
import logging
import threading
import sys
import pdb
port = '/dev/tty.OpenBCI-DN008VTF'
#port = '/dev/tty.OpenBCI-DN0096XA'
baud = 115200
ser = serial.Serial(port= port, baudrate = baud, timeout = None)
pdb.set_trace() | 307 | 18.25 | 64 | py |
OpenBCIPython | OpenBCIPython-master/scripts/udp_client.py | """A sample client for the OpenBCI UDP server."""
import argparse
import cPickle as pickle
import json
import sys; sys.path.append('..') # help python find open_bci_v3.py relative to scripts folder
import open_bci_v3 as open_bci
import socket
parser = argparse.ArgumentParser(
description='Run a UDP client listening for streaming OpenBCI data.')
parser.add_argument(
'--json',
action='store_true',
help='Handle JSON data rather than pickled Python objects.')
parser.add_argument(
'--host',
help='The host to listen on.',
default='127.0.0.1')
parser.add_argument(
'--port',
help='The port to listen on.',
default='8888')
class UDPClient(object):
def __init__(self, ip, port, json):
self.ip = ip
self.port = port
self.json = json
self.client = socket.socket(
socket.AF_INET, # Internet
socket.SOCK_DGRAM)
self.client.bind((ip, port))
def start_listening(self, callback=None):
while True:
data, addr = self.client.recvfrom(1024)
print("data")
if self.json:
sample = json.loads(data)
# In JSON mode we only recieve channel data.
print data
else:
sample = pickle.loads(data)
# Note that sample is an OpenBCISample object.
print sample.id
print sample.channel_data
args = parser.parse_args()
client = UDPClient(args.host, int(args.port), args.json)
client.start_listening()
| 1,442 | 24.767857 | 94 | py |
OpenBCIPython | OpenBCIPython-master/scripts/socket_client.py | from socketIO_client import SocketIO
def on_sample(*args):
print args
socketIO = SocketIO('10.0.1.194', 8880)
socketIO.on('openbci', on_sample)
socketIO.wait(seconds=10)
| 176 | 18.666667 | 39 | py |
OpenBCIPython | OpenBCIPython-master/py_qt/kde_methods.py | r"""
:Author: Pierre Barbier de Reuille <[email protected]>
This module contains a set of methods to compute univariate KDEs. See the
objects in the :py:mod:`pyqt_fit.kde` module for more details on these methods.
These methods provide various variations on :math:`\hat{K}(x;X,h,L,U)`, the
modified kernel evaluated on the point :math:`x` based on the estimation points
:math:`X`, a bandwidth :math:`h` and on the domain :math:`[L,U]`.
The definitions of the methods rely on the following definitions:
.. math::
\begin{array}{rcl}
a_0(l,u) &=& \int_l^u K(z) dz\\
a_1(l,u) &=& \int_l^u zK(z) dz\\
a_2(l,u) &=& \int_l^u z^2K(z) dz
\end{array}
These definitions correspond to:
- :math:`a_0(l,u)` -- The partial cumulative distribution function
- :math:`a_1(l,u)` -- The partial first moment of the distribution. In
particular, :math:`a_1(-\infty, \infty)` is the mean of the kernel (i.e. and
should be 0).
- :math:`a_2(l,u)` -- The partial second moment of the distribution. In
particular, :math:`a_2(-\infty, \infty)` is the variance of the kernel (i.e.
which should be close to 1, unless using higher order kernel).
References:
```````````
.. [1] Jones, M. C. 1993. Simple boundary correction for kernel density
estimation. Statistics and Computing 3: 135--146.
"""
import numpy as np
from scipy import fftpack, integrate, optimize
from compat import irange
from utils import make_ufunc, namedtuple, numpy_trans_idx
from binning import fast_bin
def generate_grid(kde, N=None, cut=None):
r"""
Helper method returning a regular grid on the domain of the KDE.
:param KDE1D kde: Object describing the KDE computation. The object must
have been fitted!
:param int N: Number of points in the grid
:param float cut: for unbounded domains, how far past the maximum should
the grid extend to, in term of KDE bandwidth
:return: A vector of N regularly spaced points
"""
if N is None:
N = 2 ** 10
if cut is None:
cut = kde.kernel.cut
if kde.lower == -np.inf:
lower = np.min(kde.xdata) - cut * kde.bandwidth
else:
lower = kde.lower
if kde.upper == np.inf:
upper = np.max(kde.xdata) + cut * kde.bandwidth
else:
upper = kde.upper
return np.linspace(lower, upper, N)
def compute_bandwidth(kde):
"""
Compute the bandwidth and covariance for the model, based of its xdata attribute
"""
if kde.bandwidth_function:
bw = float(kde.bandwidth_function(kde.xdata, model=kde))
cov = bw * bw
elif kde.covariance_function:
cov = float(kde.covariance_function(kde.xdata, model=kde))
bw = np.sqrt(cov)
else:
return kde.bandwidth, kde.covariance
return bw, cov
class KDE1DMethod(object):
"""
Base class providing a default grid method and a default method for
unbounded evaluation of the PDF and CDF. It also provides default methods
for the other metrics, based on PDF and CDF calculations.
:Note:
- It is expected that all grid methods will return the same grid if
used with the same arguments.
- It is fair to assume all array-like arguments will be at least 1D
arrays.
"""
name = 'unbounded'
def pdf(self, kde, points, out):
"""
Compute the PDF of the estimated distribution.
:param pyqt_fit.kde.KDE1D kde: KDE object
:param ndarray points: Points to evaluate the distribution on
:param ndarray out: Result object. If must have the same shapes as ``points``
:rtype: ndarray
:return: Returns the ``out`` variable, updated with the PDF.
:Default: Direct implementation of the formula for unbounded pdf
computation.
"""
xdata = kde.xdata
points = points[..., np.newaxis]
bw = kde.bandwidth * kde.lambdas
z = (points - xdata) / bw
kernel = kde.kernel
terms = kernel(z)
terms *= kde.weights / bw
terms.sum(axis=-1, out=out)
out /= kde.total_weights
return out
def fit(self, kde):
"""
Method called by the KDE1D object right after fitting to allow for
one-time calculation.
:param pyqt_fit.kde.KDE1D kde: KDE object being fitted
:Default: Compute the bandwidth and covariance if specified as functions
"""
kde.compute_bandwidth()
def __call__(self, kde, points, out):
"""
Call the :py:meth:`pdf` method.
"""
return self.pdf(kde, points, out)
def cdf(self, kde, points, out):
r"""
Compute the CDF of the estimated distribution, defined as:
.. math::
cdf(x) = P(X \leq x) = \int_l^x p(t) dt
where :math:`l` is the lower bound of the distribution domain and
:math:`p` the density of probability
:param pyqt_fit.kde.KDE1D kde: KDE object
:param ndarray points: Points to evaluate the CDF on
:param ndarray out: Result object. If must have the same shapes as ``points``
:rtype: ndarray
:return: Returns the ``out`` variable, updated with the CDF.
:Default: Direct implementation of the formula for unbounded CDF
computation.
"""
xdata = kde.xdata
points = points[..., np.newaxis]
bw = kde.bandwidth * kde.lambdas
z = (points - xdata) / bw
kernel = kde.kernel
terms = kernel.cdf(z)
terms *= kde.weights
terms.sum(axis=-1, out=out)
out /= kde.total_weights
return out
def icdf(self, kde, points, out):
r"""
Compute the inverse cumulative distribution (quantile) function,
defined as:
.. math::
icdf(p) = \inf\left\{x\in\mathbb{R} : cdf(x) \geq p\right\}
:param pyqt_fit.kde.KDE1D kde: KDE object
:param ndarray points: Points to evaluate the iCDF on
:param ndarray out: Result object. If must have the same shapes as ``points``
:rtype: ndarray
:return: Returns the ``out`` variable, updated with the iCDF.
:Default: First approximate the result using linear interpolation on
the CDF and refine the result numerically using the Newton method.
"""
xs, ys = self.cdf_grid(kde)
coarse_result = np.interp(points, ys, xs, kde.lower, kde.upper)
lower = kde.lower
upper = kde.upper
cdf = self.cdf
pdf_out = np.empty(1, dtype=float)
def pdf(x):
if x <= lower:
return 0
if x >= upper:
return 0
return self.pdf(kde, np.atleast_1d(x), pdf_out)
@make_ufunc()
def find_inverse(p, approx):
if p > 1-1e-10:
return upper
if p < 1e-10:
return lower
if approx >= xs[-1] or approx <= xs[0]:
return approx
cdf_out = np.empty(1, dtype=float)
def f(x):
if x <= lower:
return -p
elif x >= upper:
return 1-p
return cdf(kde, np.atleast_1d(x), cdf_out) - p
return optimize.newton(f, approx, fprime=pdf, tol=1e-6)
return find_inverse(points, coarse_result, out=out)
def sf(self, kde, points, out):
r"""
Compute the survival function, defined as:
.. math::
sf(x) = P(X \geq x) = \int_x^u p(t) dt = 1 - cdf(x)
:param pyqt_fit.kde.KDE1D kde: KDE object
:param ndarray points: Points to evaluate the survival function on
:param ndarray out: Result object. If must have the same shapes as ``points``
:rtype: ndarray
:return: Returns the ``out`` variable, updated with the survival function.
:Default: Compute explicitly :math:`1 - cdf(x)`
"""
self.cdf(kde, points, out)
out -= 1
out *= -1
return out
def isf(self, kde, points, out):
r"""
Compute the inverse survival function, defined as:
.. math::
isf(p) = \sup\left\{x\in\mathbb{R} : sf(x) \leq p\right\}
:param pyqt_fit.kde.KDE1D kde: KDE object
:param ndarray points: Points to evaluate the iSF on
:param ndarray out: Result object. If must have the same shapes as ``points``
:rtype: ndarray
:return: Returns the ``out`` variable, updated with the inverse survival function.
:Default: Compute :math:`icdf(1-p)`
"""
return self.icdf(kde, 1-points, out)
def hazard(self, kde, points, out):
r"""
Compute the hazard function evaluated on the points.
The hazard function is defined as:
.. math::
h(x) = \frac{p(x)}{sf(x)}
where :math:`p(x)` is the probability density function and
:math:`sf(x)` is the survival function.
:param pyqt_fit.kde.KDE1D kde: KDE object
:param ndarray points: Points to evaluate the hazard function on
:param ndarray out: Result object. If must have the same shapes as ``points``
:rtype: ndarray
:return: Returns the ``out`` variable, updated with the hazard function
:Default: Compute explicitly :math:`pdf(x) / sf(x)`
"""
self.pdf(kde, points, out=out)
sf = np.empty(out.shape, dtype=out.dtype)
self.sf(kde, points, sf)
sf[sf < 0] = 0 # Some methods can produce negative sf
out /= sf
return out
def cumhazard(self, kde, points, out):
r"""
Compute the cumulative hazard function evaluated on the points.
The hazard function is defined as:
.. math::
ch(x) = \int_l^x h(t) dt = -\ln sf(x)
where :math:`l` is the lower bound of the domain, :math:`h` the hazard
function and :math:`sf` the survival function.
:param pyqt_fit.kde.KDE1D kde: KDE object
:param ndarray points: Points to evaluate the cumulative hazard function on
:param ndarray out: Result object. If must have the same shapes as ``points``
:rtype: ndarray
:return: Returns the ``out`` variable, updated with the cumulative hazard function
:Default: Compute explicitly :math:`-\ln sf(x)`
"""
self.sf(kde, points, out)
out[out < 0] = 0 # Some methods can produce negative sf
np.log(out, out=out)
out *= -1
return out
def grid(self, kde, N=None, cut=None):
"""
Evaluate the PDF of the distribution on a regular grid with at least
``N`` elements.
:param pyqt_fit.kde.KDE1D kde: KDE object
:param int N: minimum number of element in the returned grid. Most
methods will want to round it to the next power of 2.
:param float cut: for unbounded domains, how far from the last data
point should the grid go, as a fraction of the bandwidth.
:rtype: (ndarray, ndarray)
:returns: The array of positions the PDF has been estimated on, and the
estimations.
:Default: Evaluate :math:`pdf(x)` on a grid generated using
:py:func:`generate_grid`
"""
N = self.grid_size(N)
g = generate_grid(kde, N, cut)
out = np.empty(g.shape, dtype=float)
return g, self.pdf(kde, g, out)
def cdf_grid(self, kde, N=None, cut=None):
"""
Evaluate the CDF of the distribution on a regular grid with at least
``N`` elements.
:param pyqt_fit.kde.KDE1D kde: KDE object
:param int N: minimum number of element in the returned grid. Most
methods will want to round it to the next power of 2.
:param float cut: for unbounded domains, how far from the last data
point should the grid go, as a fraction of the bandwidth.
:rtype: (ndarray, ndarray)
:returns: The array of positions the CDF has been estimated on, and the
estimations.
:Default: Evaluate :math:`cdf(x)` on a grid generated using
:py:func:`generate_grid`
"""
N = self.grid_size(N)
g = generate_grid(kde, N, cut)
out = np.empty(g.shape, dtype=float)
return g, self.cdf(kde, g, out)
def icdf_grid(self, kde, N=None, cut=None):
"""
Compute the inverse cumulative distribution (quantile) function on
a grid.
:Note: The default implementation is not as good an approximation as
the plain icdf default method.
:param pyqt_fit.kde.KDE1D kde: KDE object
:param int N: minimum number of element in the returned grid. Most
methods will want to round it to the next power of 2.
:param float cut: for unbounded domains, how far from the last data
point should the grid go, as a fraction of the bandwidth.
:rtype: (ndarray, ndarray)
:returns: The array of positions the CDF has been estimated on, and the
estimations.
:Default: Linear interpolation of the inverse CDF on a grid
"""
xs, ys = self.cdf_grid(kde, N, cut)
N = len(xs)
points = np.linspace(0, 1, N)
icdf = np.interp(points, ys, xs, kde.lower, kde.upper)
return points, icdf
def sf_grid(self, kde, N=None, cut=None):
r"""
Compute the survival function on a grid.
:param pyqt_fit.kde.KDE1D kde: KDE object
:param int N: minimum number of element in the returned grid. Most
methods will want to round it to the next power of 2.
:param float cut: for unbounded domains, how far from the last data
point should the grid go, as a fraction of the bandwidth.
:rtype: (ndarray, ndarray)
:returns: The array of positions the survival function has been
estimated on, and the estimations.
:Default: Compute explicitly :math:`1 - cdf(x)`
"""
points, out = self.cdf_grid(kde, N, cut)
out -= 1
out *= -1
return points, out
def isf_grid(self, kde, N=None, cut=None):
"""
Compute the inverse survival function on a grid.
:Note: The default implementation is not as good an approximation as
the plain isf default method.
:param pyqt_fit.kde.KDE1D kde: KDE object
:param int N: minimum number of element in the returned grid. Most
methods will want to round it to the next power of 2.
:param float cut: for unbounded domains, how far from the last data
point should the grid go, as a fraction of the bandwidth.
:rtype: (ndarray, ndarray)
:returns: The array of positions the CDF has been estimated on, and the
estimations.
:Default: Linear interpolation of the inverse survival function on a grid
"""
xs, ys = self.sf_grid(kde, N, cut)
N = len(xs)
points = np.linspace(0, 1, N)
isf = np.interp(points, ys[::-1], xs[::-1], kde.upper, kde.lower)
return points, isf
def hazard_grid(self, kde, N=None, cut=None):
r"""
Compute the hazard function on a grid.
:param pyqt_fit.kde.KDE1D kde: KDE object
:param int N: minimum number of element in the returned grid. Most
methods will want to round it to the next power of 2.
:param float cut: for unbounded domains, how far from the last data
point should the grid go, as a fraction of the bandwidth.
:rtype: (ndarray, ndarray)
:returns: The array of positions the hazard function has been
estimated on, and the estimations.
:Default: Compute explicitly :math:`pdf(x) / sf(x)`
"""
points, out = self.grid(kde, N, cut)
_, sf = self.sf_grid(kde, N, cut)
sf[sf < 0] = 0 # Some methods can produce negative sf
out /= sf
return points, out
def cumhazard_grid(self, kde, N=None, cut=None):
r"""
Compute the hazard function on a grid.
:param pyqt_fit.kde.KDE1D kde: KDE object
:param int N: minimum number of element in the returned grid. Most
methods will want to round it to the next power of 2.
:param float cut: for unbounded domains, how far from the last data
point should the grid go, as a fraction of the bandwidth.
:rtype: (ndarray, ndarray)
:returns: The array of positions the hazard function has been
estimated on, and the estimations.
:Default: Compute explicitly :math:`-\ln sf(x)`
"""
points, out = self.sf_grid(kde, N, cut)
out[out < 0] = 0 # Some methods can produce negative sf
np.log(out, out=out)
out *= -1
return points, out
def __str__(self):
"""
Return the name of the method
"""
return self.name
def numeric_cdf(self, kde, points, out):
"""
Provide a numeric approximation of the CDF based on integrating the pdf
using :py:func:`scipy.integrate.quad`.
"""
pts = points.ravel()
pts[pts < kde.lower] = kde.lower
pts[pts > kde.upper] = kde.upper
ix = pts.argsort()
sp = pts[ix]
pdf_out = np.empty((1,), dtype=float)
def pdf(x):
return self.pdf(kde, np.array([x]), pdf_out)
@make_ufunc()
def comp_cdf(i):
low = kde.lower if i == 0 else sp[i-1]
return integrate.quad(pdf, low, sp[i])[0]
parts = np.empty(sp.shape, dtype=float)
comp_cdf(np.arange(len(sp)), out=parts)
ints = parts.cumsum()
out.put(ix, ints)
return out
def numeric_cdf_grid(self, kde, N=None, cut=None):
"""
Compute the CDF on a grid using a trivial, but fast, numeric
integration of the pdf.
"""
pts, pdf = self.grid(kde, N, cut)
cdf = integrate.cumtrapz(pdf, pts, initial=0)
return pts, cdf
def grid_size(self, N=None):
if N is None:
return 2**10
return N
unbounded = KDE1DMethod()
class RenormalizationMethod(KDE1DMethod):
r"""
This method consists in using the normal kernel method, but renormalize
to only take into account the part of the kernel within the domain of the
density [1]_.
The kernel is then replaced with:
.. math::
\hat{K}(x;X,h,L,U) \triangleq \frac{1}{a_0(u,l)} K(z)
where:
.. math::
z = \frac{x-X}{h} \qquad l = \frac{L-x}{h} \qquad u = \frac{U-x}{h}
"""
name = 'renormalization'
def pdf(self, kde, points, out):
if not kde.bounded:
return KDE1DMethod.pdf(self, kde, points, out)
xdata = kde.xdata
points = points[..., np.newaxis]
bw = kde.bandwidth * kde.lambdas
l = (points - kde.lower) / bw
u = (points - kde.upper) / bw
z = (points - xdata) / bw
kernel = kde.kernel
a1 = (kernel.cdf(l) - kernel.cdf(u))
terms = kernel(z) * ((kde.weights / bw) / a1)
terms.sum(axis=-1, out=out)
out /= kde.total_weights
return out
def cdf(self, kde, points, out):
if not kde.bounded:
return KDE1DMethod.cdf(self, kde, points, out)
return self.numeric_cdf(kde, points, out)
def cdf_grid(self, kde, N=None, cut=None):
if N is None:
N = 2**10
if not kde.bounded or N <= 2**11:
return KDE1DMethod.cdf_grid(self, kde, N, cut)
return self.numeric_cdf_grid(kde, N, cut)
renormalization = RenormalizationMethod()
class ReflectionMethod(KDE1DMethod):
r"""
This method consist in simulating the reflection of the data left and
right of the boundaries. If one of the boundary is infinite, then the
data is not reflected in that direction. To this purpose, the kernel is
replaced with:
.. math::
\hat{K}(x; X, h, L, U) \triangleq K(z)
+ K\left(\frac{x+X-2L}{h}\right)
+ K\left(\frac{x+X-2U}{h}\right)
where:
.. math::
z = \frac{x-X}{h}
See the :py:mod:`pyqt_fit.kde_methods` for a description of the various
symbols.
When computing grids, if the bandwidth is constant, the result is computing
using CDT.
"""
name = 'reflection'
def pdf(self, kde, points, out):
if not kde.bounded:
return KDE1DMethod.pdf(self, kde, points, out)
xdata = kde.xdata
points = points[..., np.newaxis]
# Make sure points are between the bounds, with reflection if needed
if any(points < kde.lower) or any(points > kde.upper):
span = kde.upper - kde.lower
points = points - (kde.lower + span)
points %= 2*span
points -= kde.lower + span
points = np.abs(points)
bw = kde.bandwidth * kde.lambdas
z = (points - xdata) / bw
z1 = (points + xdata) / bw
L = kde.lower
U = kde.upper
kernel = kde.kernel
terms = kernel(z)
if L > -np.inf:
terms += kernel(z1 - (2 * L / bw))
if U < np.inf:
terms += kernel(z1 - (2 * U / bw))
terms *= kde.weights / bw
terms.sum(axis=-1, out=out)
out /= kde.total_weights
return out
def cdf(self, kde, points, out):
if not kde.bounded:
return KDE1DMethod.cdf(self, kde, points, out)
xdata = kde.xdata
points = points[..., np.newaxis]
# Make sure points are between the bounds, with reflection if needed
if any(points < kde.lower) or any(points > kde.upper):
span = kde.upper - kde.lower
points = points - (kde.lower + span)
points %= 2*span
points -= kde.lower + span
points = np.abs(points)
bw = kde.bandwidth * kde.lambdas
z = (points - xdata) / bw
z1 = (points + xdata) / bw
L = kde.lower
U = kde.upper
kernel = kde.kernel
terms = kernel.cdf(z)
if L > -np.inf:
terms -= kernel.cdf((L - xdata) / bw) # Remove the truncated part on the left
terms += kernel.cdf(z1 - (2 * L / bw)) # Add the reflected part
terms -= kernel.cdf((xdata - L) / bw) # Remove the truncated part from the reflection
if U < np.inf:
terms += kernel.cdf(z1 - (2 * U / bw)) # Add the reflected part
terms *= kde.weights
terms.sum(axis=-1, out=out)
out /= kde.total_weights
return out
def grid(self, kde, N=None, cut=None):
"""
DCT-based estimation of KDE estimation, i.e. with reflection boundary
conditions. This works only for fixed bandwidth (i.e. lambdas = 1) and
gaussian kernel.
For open domains, the grid is taken with 3 times the bandwidth as extra
space to remove the boundary problems.
"""
if kde.lambdas.shape:
return KDE1DMethod.grid(self, kde, N, cut)
bw = kde.bandwidth * kde.lambdas
data = kde.xdata
N = self.grid_size(N)
if cut is None:
cut = kde.kernel.cut
if kde.lower == -np.inf:
lower = np.min(data) - cut * kde.bandwidth
else:
lower = kde.lower
if kde.upper == np.inf:
upper = np.max(data) + cut * kde.bandwidth
else:
upper = kde.upper
R = upper - lower
# Histogram the data to get a crude first approximation of the density
weights = kde.weights
if not weights.shape:
weights = None
DataHist, mesh = fast_bin(data, lower, upper, N, weights=weights, cyclic=False)
DataHist = DataHist / kde.total_weights
DCTData = fftpack.dct(DataHist, norm=None)
t_star = bw / R
gp = np.arange(N) * np.pi * t_star
smth = kde.kernel.dct(gp)
# Smooth the DCTransformed data using t_star
SmDCTData = DCTData * smth
# Inverse DCT to get density
density = fftpack.idct(SmDCTData, norm=None) / (2 * R)
return mesh, density
def grid_size(self, N=None):
if N is None:
return 2**14
return 2 ** int(np.ceil(np.log2(N)))
reflection = ReflectionMethod()
class LinearCombinationMethod(KDE1DMethod):
r"""
This method uses the linear combination correction published in [1]_.
The estimation is done with a modified kernel given by:
.. math::
\hat{K}(x;X,h,L,U) \triangleq \frac{a_2(l,u) - a_1(-u, -l) z}{a_2(l,u)a_0(l,u)
- a_1(-u,-l)^2} K(z)
where:
.. math::
z = \frac{x-X}{h} \qquad l = \frac{L-x}{h} \qquad u = \frac{U-x}{h}
"""
name = 'linear combination'
def pdf(self, kde, points, out):
if not kde.bounded:
return KDE1DMethod.pdf(self, kde, points, out)
xdata = kde.xdata
points = np.atleast_1d(points)[..., np.newaxis]
bw = kde.bandwidth * kde.lambdas
l = (kde.lower - points) / bw
u = (kde.upper - points) / bw
z = (points - xdata) / bw
kernel = kde.kernel
a0 = kernel.cdf(u) - kernel.cdf(l)
a1 = kernel.pm1(-l) - kernel.pm1(-u)
a2 = kernel.pm2(u) - kernel.pm2(l)
denom = a2 * a0 - a1 * a1
upper = a2 - a1 * z
upper /= denom
upper *= (kde.weights / bw) * kernel(z)
upper.sum(axis=-1, out=out)
out /= kde.total_weights
return out
def cdf(self, kde, points, out):
if not kde.bounded:
return KDE1DMethod.cdf(self, kde, points, out)
return self.numeric_cdf(kde, points, out)
def cdf_grid(self, kde, N=None, cut=None):
if N is None:
N = 2**10
if not kde.bounded or N <= 2**11:
return KDE1DMethod.cdf_grid(self, kde, N, cut)
return self.numeric_cdf_grid(kde, N, cut)
linear_combination = LinearCombinationMethod()
class CyclicMethod(KDE1DMethod):
r"""
This method assumes cyclic boundary conditions and works only for closed
boundaries.
The estimation is done with a modified kernel given by:
.. math::
\hat{K}(x; X, h, L, U) \triangleq K(z)
+ K\left(z - \frac{U-L}{h}\right)
+ K\left(z + \frac{U-L}{h}\right)
where:
.. math::
z = \frac{x-X}{h}
When computing grids, if the bandwidth is constant, the result is computing
using FFT.
"""
name = 'cyclic'
def pdf(self, kde, points, out):
if not kde.bounded:
return KDE1DMethod.pdf(self, kde, points, out)
if not kde.closed:
raise ValueError("Cyclic boundary conditions can only be used with "
"closed or un-bounded domains.")
xdata = kde.xdata
points = np.atleast_1d(points)[..., np.newaxis]
# Make sure points are between the bounds
if any(points < kde.lower) or any(points > kde.upper):
points = points - kde.lower
points %= kde.upper - kde.lower
points += kde.lower
bw = kde.bandwidth * kde.lambdas
z = (points - xdata) / bw
L = kde.lower
U = kde.upper
span = (U - L) / bw
kernel = kde.kernel
terms = kernel(z)
terms += kernel(z + span) # Add points to the left
terms += kernel(z - span) # Add points to the right
terms *= kde.weights / bw
terms.sum(axis=-1, out=out)
out /= kde.total_weights
return out
def cdf(self, kde, points, out):
if not kde.bounded:
return KDE1DMethod.cdf(self, kde, points, out)
if not kde.closed:
raise ValueError("Cyclic boundary conditions can only be used with "
"closed or unbounded domains.")
xdata = kde.xdata
points = np.atleast_1d(points)[..., np.newaxis]
# Make sure points are between the bounds
if any(points < kde.lower) or any(points > kde.upper):
points = points - kde.lower
points %= kde.upper - kde.lower
points += kde.lower
bw = kde.bandwidth * kde.lambdas
z = (points - xdata) / bw
L = kde.lower
U = kde.upper
span = (U - L) / bw
kernel = kde.kernel
terms = kernel.cdf(z)
terms -= kernel.cdf((L - xdata) / bw) # Remove the parts left of the lower bound
terms += kernel.cdf(z + span) # Repeat on the left
terms -= kernel.cdf((L - xdata) / bw + span) # Remove parts left of lower bounds
terms += kernel.cdf(z - span) # Repeat on the right
terms *= kde.weights
terms.sum(axis=-1, out=out)
out /= kde.total_weights
return out
def grid(self, kde, N=None, cut=None):
"""
FFT-based estimation of KDE estimation, i.e. with cyclic boundary
conditions. This works only for closed domains, fixed bandwidth
(i.e. lambdas = 1) and gaussian kernel.
"""
if kde.lambdas.shape:
return KDE1DMethod.grid(self, kde, N, cut)
if kde.bounded and not kde.closed:
raise ValueError("Error, cyclic boundary conditions require "
"a closed or un-bounded domain.")
bw = kde.bandwidth * kde.lambdas
data = kde.xdata
N = self.grid_size(N)
lower = kde.lower
upper = kde.upper
if upper == np.inf:
lower = np.min(data) - cut * kde.bandwidth
upper = np.max(data) + cut * kde.bandwidth
R = upper - lower
weights = kde.weights
if not weights.shape:
weights = None
DataHist, mesh = fast_bin(data, lower, upper, N, weights=weights, cyclic=True)
DataHist = DataHist / kde.total_weights
FFTData = np.fft.rfft(DataHist)
t_star = (2 * bw / R)
gp = np.arange(len(FFTData)) * np.pi * t_star
smth = kde.kernel.fft(gp)
SmoothFFTData = FFTData * smth
density = np.fft.irfft(SmoothFFTData, len(DataHist)) / (mesh[1] - mesh[0])
return mesh, density
def cdf_grid(self, kde, N=None, cut=None):
if kde.lambdas.shape:
return KDE1DMethod.cdf_grid(self, kde, N, cut)
if not kde.closed:
raise ValueError("Error, cyclic boundary conditions require "
"a closed domain.")
N = self.grid_size(N)
if N <= 2**12:
return KDE1DMethod.cdf_grid(self, kde, N, cut)
return self.numeric_cdf_grid(kde, N, cut)
def grid_size(self, N=None):
if N is None:
return 2**14
return 2 ** int(np.ceil(np.log2(N)))
cyclic = CyclicMethod()
Transform = namedtuple('Tranform', ['__call__', 'inv', 'Dinv'])
def _inverse(x, out=None):
return np.divide(1, x, out)
LogTransform = Transform(np.log, np.exp, np.exp)
ExpTransform = Transform(np.exp, np.log, _inverse)
def transform_distribution(xs, ys, Dinv, out):
r"""
Transform a distribution into another one by a change a variable.
:param ndarray xs: Evaluation points of the distribution
:param ndarray ys: Distribution value on the points xs
:param func Dinv: Function evaluating the derivative of the inverse transformation function
:param ndarray out: Array in which to store the result
:rtype: ndarray
:returns: The variable ``out``, updated wih the transformed distribution
Given a random variable :math:`X` of distribution :math:`f_X`, the random
variable :math:`Y = g(X)` has a distribution :math:`f_Y` given by:
.. math::
f_Y(y) = \left| \frac{1}{g'(g^{-1}(y))} \right| \cdot f_X(g^{-1}(y))
"""
Dinv(xs, out=out)
np.abs(out, out=out)
_inverse(out, out=out)
np.multiply(out, ys, out=out)
return out
def create_transform(obj, inv=None, Dinv=None):
"""
Create a transform object.
:param fun obj: This can be either simple a function, or a function-object with an 'inv' and/or 'Dinv' attributes
containing the inverse function and its derivative (respectively)
:param fun inv: If provided, inverse of the main function
:param fun Dinv: If provided, derivative of the inverse function
:rtype: Transform
:returns: A transform object with function, inverse and derivative of the inverse
The inverse function must be provided, either as argument or as attribute to the object. The derivative of the
inverse will be estimated numerically if not provided.
:Note: All the functions should accept an ``out`` argument to store the result.
"""
if isinstance(obj, Transform):
return obj
fct = obj.__call__
if inv is None:
if not hasattr(obj, 'inv'):
raise AttributeError("Error, transform object must have a 'inv' "
"attribute or you must specify the 'inv' argument")
inv = obj.inv
if Dinv is None:
if hasattr(obj, 'Dinv'):
Dinv = obj.Dinv
else:
@numpy_trans_idx
def Dinv(x):
dx = x * 1e-9
dx[x == 0] = np.min(dx[x != 0])
return (inv(x + dx) - inv(x - dx)) / (2 * dx)
return Transform(fct, inv, Dinv)
class _fakeKDE(object):
def __init__(self, method):
self.method = method.method
class TransformKDE1DMethod(KDE1DMethod):
r"""
Compute the Kernel Density Estimate of a dataset, transforming it first to
a domain where distances are "more meaningful".
Often, KDE is best estimated in a different domain. This object takes
a KDE1D object (or one compatible), and a transformation function.
Given a random variable :math:`X` of distribution :math:`f_X`, the random
variable :math:`Y = g(X)` has a distribution :math:`f_Y` given by:
.. math::
f_Y(y) = \left| \frac{1}{g'(g^{-1}(y))} \right| \cdot f_X(g^{-1}(y))
In our term, :math:`Y` is the random variable the user is interested in,
and :math:`X` the random variable we can estimate using the KDE. In this
case, :math:`g` is the transform from :math:`Y` to :math:`X`.
So to estimate the distribution on a set of points given in :math:`x`, we
need a total of three functions:
- Direct function: transform from the original space to the one in
which the KDE will be perform (i.e. :math:`g^{-1}: y \mapsto x`)
- Invert function: transform from the KDE space to the original one
(i.e. :math:`g: x \mapsto y`)
- Derivative of the invert function
If the derivative is not provided, it will be estimated numerically.
:param trans: Either a simple function, or a function object with
attributes `inv` and `Dinv` to use in case they are not provided as
arguments. The helper :py:func:`create_transform` will provide numeric
approximation of the derivative if required.
:param method: instance of KDE1DMethod used in the transformed domain.
Default is :py:class:`pyqt_fit.kde_methods.KDE1DMethod`
:param inv: Invert of the function. If not provided, `trans` must have
it as attribute.
:param Dinv: Derivative of the invert function.
:Note: all given functions should accept an optional ``out`` argument to
get a pre-allocated array to store its result. Also the ``out``
parameter may be one of the input argument.
"""
def __init__(self, trans, method=None, inv=None, Dinv=None):
self.trans = create_transform(trans, inv, Dinv)
if method is None:
method = KDE1DMethod()
self.method = method
def fit(self, kde):
"""
Method called by the KDE1D object right after fitting to allow for
one-time calculation.
This method copy, and transform, the various attributes of the KDE.
:param pyqt_fit.kde.KDE1D kde: KDE object being fitted
"""
fake_kde = _fakeKDE(self)
fake_kde.lower = self.trans(kde.lower)
fake_kde.upper = self.trans(kde.upper)
fake_kde.xdata = self.trans(kde.xdata)
copy_attrs = [ 'weights', 'lambdas', 'kernel'
, 'bandwidth', 'covariance'
, 'bandwidth_function'
, 'covariance_function'
, 'total_weights' ]
for attr in copy_attrs:
setattr(fake_kde, attr, getattr(kde, attr))
# Compute the bandwidth for the fake KDE and update the KDE itself
bw, cov = compute_bandwidth(fake_kde)
fake_kde.bandwidth = kde.bandwidth = bw
fake_kde.covariance = kde.covariance = cov
self.fake_kde = fake_kde
def pdf(self, kde, points, out):
trans = self.trans
pts = trans(points)
self.method(self.fake_kde, pts, out)
return transform_distribution(pts, out, trans.Dinv, out=out)
def grid(self, kde, N=None, cut=None):
xs, ys = self.method.grid(self.fake_kde, N, cut)
trans = self.trans
transform_distribution(xs, ys, trans.Dinv, out=ys)
trans.inv(xs, out=xs)
return xs, ys
def cdf(self, kde, points, out):
return self.method.cdf(self.fake_kde, self.trans(points), out)
def cdf_grid(self, kde, N=None, cut=None):
xs, ys = self.method.cdf_grid(self.fake_kde, N, cut)
self.trans.inv(xs, out=xs)
return xs, ys
def sf(self, kde, points, out):
return self.method.sf(self.fake_kde, self.trans(points), out)
def sf_grid(self, kde, N=None, cut=None):
xs, ys = self.method.sf_grid(self.fake_kde, N, cut)
return self.trans.inv(xs), ys
def icdf(self, kde, points, out):
self.method.icdf(self.fake_kde, points, out)
self.trans.inv(out, out=out)
return out
def icdf_grid(self, kde, N=None, cut=None):
xs, ys = self.method.icdf_grid(self.fake_kde, N, cut)
self.trans.inv(ys, out=ys)
return xs, ys
def isf(self, kde, points, out):
self.method.isf(self.fake_kde, points, out)
self.trans.inv(out, out=out)
return out
def isf_grid(self, kde, N=None, cut=None):
xs, ys = self.method.isf_grid(self.fake_kde, N, cut)
self.trans.inv(ys, out=ys)
return xs, ys
def transformKDE1D(trans, method=None, inv=None, Dinv=None):
"""
Creates an instance of :py:class:`TransformKDE1DMethod`
"""
return TransformKDE1DMethod(trans, method, inv, Dinv)
default_method = reflection
| 38,785 | 31.786137 | 117 | py |
OpenBCIPython | OpenBCIPython-master/py_qt/kernels.py | r"""
:Author: Pierre Barbier de Reuille <[email protected]>
Module providing a set of kernels for use with either the :py:mod:`pyqt_fit.kde` or the :py:mod:`kernel_smoothing`
module.
Kernels should be created following this template:
"""
import numpy as np
from scipy import fftpack, integrate
from scipy.special import erf
import _kernels_py
from cyth import HAS_CYTHON
from utils import make_ufunc
kernels_imp = None
def usePython():
"""
Force the use of the Python implementation of the kernels
"""
global kernels_imp
from .import _kernels_py
kernels_imp = _kernels_py
def useCython():
"""
Force the use of the Cython implementation of the kernels, if available
"""
global kernels_imp
if HAS_CYTHON:
import _kernels
kernels_imp = _kernels
if HAS_CYTHON:
useCython()
else:
usePython()
print("Warning, cannot import Cython kernel functions, "
"pure python functions will be used instead")
S2PI = np.sqrt(2 * np.pi)
S2 = np.sqrt(2)
class Kernel1D(object):
r"""
A 1D kernel :math:`K(z)` is a function with the following properties:
.. math::
\begin{array}{rcl}
\int_\mathbb{R} K(z) &=& 1 \\
\int_\mathbb{R} zK(z)dz &=& 0 \\
\int_\mathbb{R} z^2K(z) dz &<& \infty \quad (\approx 1)
\end{array}
Which translates into the function should have:
- a sum of 1 (i.e. a valid density of probability);
- an average of 0 (i.e. centered);
- a finite variance. It is even recommanded that the variance is close to 1 to give a uniform meaning to the
bandwidth.
.. py:attribute:: cut
:type: float
Cutting point after which there is a negligeable part of the probability. More formally, if :math:`c` is the
cutting point:
.. math::
\int_{-c}^c p(x) dx \approx 1
.. py:attribute:: lower
:type: float
Lower bound of the support of the PDF. Formally, if :math:`l` is the lower bound:
.. math::
\int_{-\infty}^l p(x)dx = 0
.. py:attribute:: upper
:type: float
Upper bound of the support of the PDF. Formally, if :math:`u` is the upper bound:
.. math::
\int_u^\infty p(x)dx = 0
"""
cut = 3.
lower = -np.inf
upper = np.inf
def pdf(self, z, out=None):
r"""
Returns the density of the kernel on the points `z`. This is the funtion :math:`K(z)` itself.
:param ndarray z: Array of points to evaluate the function on. The method should accept any shape of array.
:param ndarray out: If provided, it will be of the same shape as `z` and the result should be stored in it.
Ideally, it should be used for as many intermediate computation as possible.
"""
raise NotImplementedError()
def __call__(self, z, out=None):
"""
Alias for :py:meth:`Kernel1D.pdf`
"""
return self.pdf(z, out=out)
def cdf(self, z, out=None):
r"""
Returns the cumulative density function on the points `z`, i.e.:
.. math::
K_0(z) = \int_{-\infty}^z K(t) dt
"""
z = np.asfarray(z)
try:
comp_pdf = self.__comp_pdf
except AttributeError:
def pdf(x):
return self.pdf(np.atleast_1d(x))
lower = self.lower
upper = self.upper
@make_ufunc()
def comp_pdf(x):
if x < lower:
return 0
if x > upper:
x = upper
return integrate.quad(pdf, lower, x)[0]
self.__comp_cdf = comp_pdf
if out is None:
out = np.empty(z.shape, dtype=float)
return comp_pdf(z, out=out)
def pm1(self, z, out=None):
r"""
Returns the first moment of the density function, i.e.:
.. math::
K_1(z) = \int_{-\infty}^z z K(t) dt
"""
z = np.asfarray(z)
try:
comp_pm1 = self.__comp_pm1
except AttributeError:
lower = self.lower
upper = self.upper
def pm1(x):
return x * self.pdf(np.atleast_1d(x))
@make_ufunc()
def comp_pm1(x):
if x <= lower:
return 0
if x > upper:
x = upper
return integrate.quad(pm1, lower, x)[0]
self.__comp_pm1 = comp_pm1
if out is None:
out = np.empty(z.shape, dtype=float)
return comp_pm1(z, out=out)
def pm2(self, z, out=None):
r"""
Returns the second moment of the density function, i.e.:
.. math::
K_2(z) = \int_{-\infty}^z z^2 K(t) dt
"""
z = np.asfarray(z)
try:
comp_pm2 = self.__comp_pm2
except AttributeError:
lower = self.lower
upper = self.upper
def pm2(x):
return x * x * self.pdf(np.atleast_1d(x))
@make_ufunc()
def comp_pm2(x):
if x <= lower:
return 0
if x > upper:
x = upper
return integrate.quad(pm2, lower, x)[0]
self.__comp_pm2 = comp_pm2
if out is None:
out = np.empty(z.shape, dtype=float)
return comp_pm2(z, out=out)
def fft(self, z, out=None):
"""
FFT of the kernel on the points of ``z``. The points will always be provided as a grid with :math:`2^n` points,
representing the whole frequency range to be explored. For convenience, the second half of the points will be
provided as negative values.
"""
z = np.asfarray(z)
t_star = 2*np.pi/(z[1]-z[0])**2 / len(z)
dz = t_star * (z[1] - z[0])
return fftpack.fft(self(z * t_star) * dz).real
def dct(self, z, out=None):
r"""
DCT of the kernel on the points of ``z``. The points will always be provided as a grid with :math:`2^n` points,
representing the whole frequency range to be explored.
"""
z = np.asfarray(z)
a1 = z[1] - z[0]
gp = (z / a1 + 0.5) * np.pi / (len(z) * a1)
return fftpack.dct(self(gp) * (gp[1] - gp[0])).real
class normal_kernel1d(Kernel1D):
"""
1D normal density kernel with extra integrals for 1D bounded kernel estimation.
"""
def pdf(self, z, out=None):
r"""
Return the probability density of the function. The formula used is:
.. math::
\phi(z) = \frac{1}{\sqrt{2\pi}}e^{-\frac{x^2}{2}}
:param ndarray xs: Array of any shape
:returns: an array of shape identical to ``xs``
"""
return kernels_imp.norm1d_pdf(z, out)
def _pdf(self, z, out=None):
"""
Full-python implementation of :py:func:`normal_kernel1d.pdf`
"""
z = np.asarray(z)
if out is None:
out = np.empty(z.shape, dtype=z.dtype)
np.multiply(z, z, out)
out *= -0.5
np.exp(out, out)
out /= S2PI
return out
__call__ = pdf
def fft(self, z, out=None):
"""
Returns the FFT of the normal distribution
"""
z = np.asfarray(z)
out = np.multiply(z, z, out)
out *= -0.5
np.exp(out, out)
return out
def dct(self, z, out=None):
"""
Returns the DCT of the normal distribution
"""
z = np.asfarray(z)
out = np.multiply(z, z, out)
out *= -0.5
np.exp(out, out)
return out
def cdf(self, z, out=None):
r"""
Cumulative density of probability. The formula used is:
.. math::
\text{cdf}(z) \triangleq \int_{-\infty}^z \phi(z)
dz = \frac{1}{2}\text{erf}\left(\frac{z}{\sqrt{2}}\right) + \frac{1}{2}
"""
return kernels_imp.norm1d_cdf(z, out)
def _cdf(self, z, out=None):
"""
Full-python implementation of :py:func:`normal_kernel1d.cdf`
"""
z = np.asarray(z)
if out is None:
out = np.empty(z.shape, dtype=z.dtype)
np.divide(z, S2, out)
erf(out, out)
out *= 0.5
out += 0.5
return out
def pm1(self, z, out=None):
r"""
Partial moment of order 1:
.. math::
\text{pm1}(z) \triangleq \int_{-\infty}^z z\phi(z) dz
= -\frac{1}{\sqrt{2\pi}}e^{-\frac{z^2}{2}}
"""
return kernels_imp.norm1d_pm1(z, out)
def _pm1(self, z, out=None):
"""
Full-python implementation of :py:func:`normal_kernel1d.pm1`
"""
z = np.asarray(z)
if out is None:
out = np.empty(z.shape, dtype=z.dtype)
np.multiply(z, z, out)
out *= -0.5
np.exp(out, out)
out /= -S2PI
return out
def pm2(self, z, out=None):
r"""
Partial moment of order 2:
.. math::
\text{pm2}(z) \triangleq \int_{-\infty}^z z^2\phi(z) dz
= \frac{1}{2}\text{erf}\left(\frac{z}{2}\right) - \frac{z}{\sqrt{2\pi}}
e^{-\frac{z^2}{2}} + \frac{1}{2}
"""
return kernels_imp.norm1d_pm2(z, out)
def _pm2(self, z, out=None):
"""
Full-python implementation of :py:func:`normal_kernel1d.pm2`
"""
z = np.asarray(z, dtype=float)
if out is None:
out = np.empty(z.shape)
np.divide(z, S2, out)
erf(out, out)
out /= 2
if z.shape:
zz = np.isfinite(z)
sz = z[zz]
out[zz] -= sz * np.exp(-0.5 * sz * sz) / S2PI
elif np.isfinite(z):
out -= z * np.exp(-0.5 * z * z) / S2PI
out += 0.5
return out
class normal_kernel(object):
"""
Returns a function-object for the PDF of a Normal kernel of variance
identity and average 0 in dimension ``dim``.
"""
def __new__(klass, dim):
"""
The __new__ method will automatically select :py:class:`normal_kernel1d` if dim is 1.
"""
if dim == 1:
return normal_kernel1d()
return object.__new__(klass, dim)
def __init__(self, dim):
self.factor = 1 / np.sqrt(2 * np.pi) ** dim
def pdf(self, xs):
"""
Return the probability density of the function.
:param ndarray xs: Array of shape (D,N) where D is the dimension of the kernel
and N the number of points.
:returns: an array of shape (N,) with the density on each point of ``xs``
"""
xs = np.atleast_2d(xs)
return self.factor * np.exp(-0.5 * np.sum(xs * xs, axis=0))
__call__ = pdf
class tricube(Kernel1D):
r"""
Return the kernel corresponding to a tri-cube distribution, whose expression is.
The tri-cube function is given by:
.. math::
f_r(x) = \left\{\begin{array}{ll}
\left(1-|x|^3\right)^3 & \text{, if } x \in [-1;1]\\
0 & \text{, otherwise}
\end{array}\right.
As :math:`f_r` is not a probability and is not of variance 1, we use a normalized function:
.. math::
f(x) = a b f_r(ax)
a = \sqrt{\frac{35}{243}}
b = \frac{70}{81}
"""
def pdf(self, z, out=None):
return kernels_imp.tricube_pdf(z, out)
__call__ = pdf
upper = 1. / _kernels_py.tricube_width
lower = -upper
cut = upper
def cdf(self, z, out=None):
r"""
CDF of the distribution:
.. math::
\text{cdf}(x) = \left\{\begin{array}{ll}
\frac{1}{162} {\left(60 (ax)^{7} - 7 {\left(2 (ax)^{10} + 15 (ax)^{4}\right)}
\mathrm{sgn}\left(ax\right) + 140 ax + 81\right)} & \text{, if}x\in[-1/a;1/a]\\
0 & \text{, if} x < -1/a \\
1 & \text{, if} x > 1/a
\end{array}\right.
"""
return kernels_imp.tricube_cdf(z, out)
def pm1(self, z, out=None):
r"""
Partial moment of order 1:
.. math::
\text{pm1}(x) = \left\{\begin{array}{ll}
\frac{7}{3564a} {\left(165 (ax)^{8} - 8 {\left(5 (ax)^{11} + 33 (ax)^{5}\right)}
\mathrm{sgn}\left(ax\right) + 220 (ax)^{2} - 81\right)}
& \text{, if} x\in [-1/a;1/a]\\
0 & \text{, otherwise}
\end{array}\right.
"""
return kernels_imp.tricube_pm1(z, out)
def pm2(self, z, out=None):
r"""
Partial moment of order 2:
.. math::
\text{pm2}(x) = \left\{\begin{array}{ll}
\frac{35}{486a^2} {\left(4 (ax)^{9} + 4 (ax)^{3} - {\left((ax)^{12} + 6 (ax)^{6}\right)}
\mathrm{sgn}\left(ax\right) + 1\right)} & \text{, if} x\in[-1/a;1/a] \\
0 & \text{, if } x < -1/a \\
1 & \text{, if } x > 1/a
\end{array}\right.
"""
return kernels_imp.tricube_pm2(z, out)
class Epanechnikov(Kernel1D):
r"""
1D Epanechnikov density kernel with extra integrals for 1D bounded kernel estimation.
"""
def pdf(self, xs, out=None):
r"""
The PDF of the kernel is usually given by:
.. math::
f_r(x) = \left\{\begin{array}{ll}
\frac{3}{4} \left(1-x^2\right) & \text{, if} x \in [-1:1]\\
0 & \text{, otherwise}
\end{array}\right.
As :math:`f_r` is not of variance 1 (and therefore would need adjustments for
the bandwidth selection), we use a normalized function:
.. math::
f(x) = \frac{1}{\sqrt{5}}f\left(\frac{x}{\sqrt{5}}\right)
"""
return kernels_imp.epanechnikov_pdf(xs, out)
__call__ = pdf
upper = 1. / _kernels_py.epanechnikov_width
lower = -upper
cut = upper
def cdf(self, xs, out=None):
r"""
CDF of the distribution. The CDF is defined on the interval :math:`[-\sqrt{5}:\sqrt{5}]` as:
.. math::
\text{cdf}(x) = \left\{\begin{array}{ll}
\frac{1}{2} + \frac{3}{4\sqrt{5}} x - \frac{3}{20\sqrt{5}}x^3
& \text{, if } x\in[-\sqrt{5}:\sqrt{5}] \\
0 & \text{, if } x < -\sqrt{5} \\
1 & \text{, if } x > \sqrt{5}
\end{array}\right.
"""
return kernels_imp.epanechnikov_cdf(xs, out)
def pm1(self, xs, out=None):
r"""
First partial moment of the distribution:
.. math::
\text{pm1}(x) = \left\{\begin{array}{ll}
-\frac{3\sqrt{5}}{16}\left(1-\frac{2}{5}x^2+\frac{1}{25}x^4\right)
& \text{, if } x\in[-\sqrt{5}:\sqrt{5}] \\
0 & \text{, otherwise}
\end{array}\right.
"""
return kernels_imp.epanechnikov_pm1(xs, out)
def pm2(self, xs, out=None):
r"""
Second partial moment of the distribution:
.. math::
\text{pm2}(x) = \left\{\begin{array}{ll}
\frac{5}{20}\left(2 + \frac{1}{\sqrt{5}}x^3 - \frac{3}{5^{5/2}}x^5 \right)
& \text{, if } x\in[-\sqrt{5}:\sqrt{5}] \\
0 & \text{, if } x < -\sqrt{5} \\
1 & \text{, if } x > \sqrt{5}
\end{array}\right.
"""
return kernels_imp.epanechnikov_pm2(xs, out)
class Epanechnikov_order4(Kernel1D):
r"""
Order 4 Epanechnikov kernel. That is:
.. math::
K_{[4]}(x) = \frac{3}{2} K(x) + \frac{1}{2} x K'(x) = -\frac{15}{8}x^2+\frac{9}{8}
where :math:`K` is the non-normalized Epanechnikov kernel.
"""
upper = 1
lower = -upper
cut = upper
def pdf(self, xs, out=None):
return kernels_imp.epanechnikov_o4_pdf(xs, out)
__call__ = pdf
def cdf(self, xs, out=None):
return kernels_imp.epanechnikov_o4_cdf(xs, out)
def pm1(self, xs, out=None):
return kernels_imp.epanechnikov_o4_pm1(xs, out)
def pm2(self, xs, out=None):
return kernels_imp.epanechnikov_o4_pm2(xs, out)
class normal_order4(Kernel1D):
r"""
Order 4 Normal kernel. That is:
.. math::
\phi_{[4]}(x) = \frac{3}{2} \phi(x) + \frac{1}{2} x \phi'(x) = \frac{1}{2}(3-x^2)\phi(x)
where :math:`\phi` is the normal kernel.
"""
lower = -np.inf
upper = np.inf
cut = 3.
def pdf(self, xs, out=None):
return kernels_imp.normal_o4_pdf(xs, out)
__call__ = pdf
def cdf(self, xs, out=None):
return kernels_imp.normal_o4_cdf(xs, out)
def pm1(self, xs, out=None):
return kernels_imp.normal_o4_pm1(xs, out)
def pm2(self, xs, out=None):
return kernels_imp.normal_o4_pm2(xs, out)
kernels1D = [normal_kernel1d, tricube, Epanechnikov, Epanechnikov_order4, normal_order4]
kernelsnD = [normal_kernel]
| 17,134 | 27.228995 | 120 | py |
OpenBCIPython | OpenBCIPython-master/py_qt/pyqt_fit1d.py | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import re
import sys
import traceback
from PyQt4 import QtGui, QtCore, uic
import matplotlib
from PyQt4.QtCore import pyqtSignature, Qt
from PyQt4.QtGui import QMessageBox
from numpy import nan, array, ma, arange
from path import path
from pylab import close as close_figure
from preprocessing import functions, residuals, plot_fit
from py_qt import bootstrap
from .compat import unicode_csv_reader as csv_reader
from .compat import user_text, CSV_READ_FLAGS
from .curve_fitting import CurveFitting
CIsplitting = re.compile(r'[;, :-]')
def get_args(*a, **k):
return a, k
def find(array):
return arange(len(array))[array]
class ParametersModel(QtCore.QAbstractTableModel):
def __init__(self, data, function, res, idxX, idxY, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
values = data[:, [idxX, idxY]]
values = values.data[values.mask.sum(axis=1) == 0]
self.valuesX = values[:, 0]
self.valuesY = values[:, 1]
self.fct = function
self.res = res
self.parm_names = function.args
self.parm_values = list(function.init_args(self.valuesX, self.valuesY))
self.fixed = [False] * len(function.args)
def rowCount(self, idx=QtCore.QModelIndex()):
return len(self.parm_names)
def columnCount(self, idx=QtCore.QModelIndex()):
return 3
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return
if orientation == Qt.Horizontal:
if section == 0:
return "Parameter"
elif section == 1:
return "Value"
elif section == 2:
return "Fixed"
def flags(self, index):
if index.column() == 0:
return Qt.ItemIsEnabled
elif index.column() == 1:
return Qt.ItemIsEnabled | Qt.ItemIsEditable
elif index.column() == 2:
return Qt.ItemIsEnabled | Qt.ItemIsUserCheckable
return Qt.NoItemFlags
def data(self, index, role=Qt.DisplayRole):
r = index.row()
c = index.column()
if 0 <= r < len(self.parm_names) and 0 <= c < 3:
if c == 0:
if role == Qt.DisplayRole:
return self.parm_names[r]
elif c == 1:
if role == Qt.DisplayRole:
return "%g" % (self.parm_values[r],)
elif role == Qt.EditRole:
return "%g" % (self.parm_values[r],)
elif c == 2:
if role == Qt.CheckStateRole:
return self.fixed[r]
def setData(self, index, value, role=Qt.DisplayRole):
r = index.row()
c = index.column()
if 0 <= r < len(self.parm_names) and 0 < c < 3:
if c == 1 and role == Qt.EditRole:
try:
f = float(value)
self.parm_values[r] = f
self.dataChanged.emit(index, index)
return True
except ValueError:
print("Error, cannot convert value to double")
elif c == 2 and role == Qt.CheckStateRole:
self.fixed[r] = value
self.dataChanged.emit(index, index)
return True
return False
class QtFitDlg(QtGui.QDialog):
def __init__(self, *args, **kwords):
QtGui.QDialog.__init__(self, *args, **kwords)
p = (path(__file__).dirname() / 'qt_fit.ui').abspath()
uic.loadUi(p, self)
if sys.platform != "darwin":
self.selectInputFile.setMaximumWidth(32)
self.selectOutputFile.setMaximumWidth(32)
self.validator = QtGui.QDoubleValidator()
self.xMin.setValidator(self.validator)
self.xMax.setValidator(self.validator)
self.buttonBox.addButton("Plot", QtGui.QDialogButtonBox.ApplyRole)
self.buttonBox.addButton("Close Plots", QtGui.QDialogButtonBox.ResetRole)
self.init()
def init(self):
self._fct = None
self._parameters = None
self._res = None
self._data = None
self._fieldX = None
self._fieldY = None
self._input = None
self._output = None
self._CI = None
self._scale = None
self._header = None
self._CIchanged = False
self._write = False
self.setData(None, None)
residuals.load()
functions.load()
list_fcts = sorted(functions.names())
list_res = sorted(residuals.names())
self.function.clear()
self.function.addItems(list_fcts)
self.residuals.clear()
self.residuals.addItems(list_res)
self.residuals.setCurrentIndex(list_res.index("Standard"))
self.on_computeCI_toggled(self.computeCI.isChecked())
@pyqtSignature("const QString&")
def on_function_currentIndexChanged(self, txt):
print("New function: {}".format(txt))
self.fct = functions.get(str(txt))
@pyqtSignature("const QString&")
def on_residuals_currentIndexChanged(self, txt):
print("New residual: {}".format(txt))
self.res = residuals.get(str(txt))
@pyqtSignature("")
def on_selectInputFile_clicked(self):
filename = QtGui.QFileDialog.getOpenFileName(self, "Open CSV file",
filter="CSV file (*.csv);;All Files (*.*)")
if filename:
self.input = filename
@pyqtSignature("")
def on_selectOutputFile_clicked(self):
filename = QtGui.QFileDialog.getSaveFileName(self, "Save CSV file",
filter="CSV file (*.csv);;All Files (*.*)")
if filename:
self.output = filename
@pyqtSignature("const QString&")
def on_fieldXbox_currentIndexChanged(self, txt):
self.fieldX = txt
@pyqtSignature("const QString&")
def on_fieldYbox_currentIndexChanged(self, txt):
self.fieldY = txt
def _getFct(self):
return self._fct
def _setFct(self, f):
if f != self._fct:
self._fct = f
if self.function.currentText() != f.name:
self.function.setCurrentIndex(self.function.findText(f.name))
if self.input:
self.updateParameters()
fct = property(_getFct, _setFct)
def _getRes(self):
return self._res
def _setRes(self, res):
if res != self._res:
self._res = res
if self.residuals.currentText() != res.name:
self.residuals.setCurrentIndex(self.residuals.findText(res.name))
res = property(_getRes, _setRes)
@pyqtSignature("const QString&")
def on_inputFile_textChanged(self, txt):
txt = path(txt)
self.input = txt
def _getInput(self):
return self._input
def _setInput(self, txt):
txt = path(txt)
if txt != self._input and txt.isfile():
try:
data = None
header = None
with open(txt, CSV_READ_FLAGS) as f:
try:
r = csv_reader(f)
header = next(r)
if len(header) < 2:
QMessageBox.critical(self, "Error reading CSV file",
"Error, the file doesn't have at least 2 columns")
return
data = []
for line in r:
if not line:
break
data.append([float(field) if field else nan for field in line])
max_length = max(len(l) for l in data)
data = array([line + [nan] * (max_length - len(line)) for line in data],
dtype=float)
data = ma.masked_invalid(data)
except Exception as ex:
QMessageBox.critical(self, "Error reading CSV file", str(ex))
data = None
header = None
if data is not None:
self._input = txt
print("input: {}".format(self._input))
if self._input != self.inputFile.text():
self.inputFile.setText(self._input)
self.setData(header, data)
except IOError:
pass
input = property(_getInput, _setInput)
def setData(self, header, data):
if header is None or data is None:
self._header = None
self._data = None
self.parameters.setModel(None)
self.param_model = None
else:
self._header = header
self._data = data
self.fieldXbox.clear()
self.fieldXbox.addItems(self._header)
self.fieldYbox.clear()
self.fieldYbox.addItems(self._header)
self.fieldX = self._header[0]
self.fieldY = self._header[1]
def _getOutput(self):
return self._output
def _setOutput(self, txt):
txt = path(txt)
if self._output != txt:
if txt and not txt.endswith(".csv"):
txt += ".csv"
self._output = txt
if self._output != self.outputFile.text():
self.outputFile.setText(self._output)
output = property(_getOutput, _setOutput)
@pyqtSignature("const QString&")
def on_outputFile_textChanged(self, txt):
self.output = txt
def _getWriteResult(self):
return self._write
def _setWriteResult(self, on):
on = bool(on)
if on != self._write:
self._write = on
self.writeOutput.setChecked(on)
writeResult = property(_getWriteResult, _setWriteResult)
@pyqtSignature("bool")
def on_writeOutput_toggled(self, on):
self.writeResult = on
def _getHeader(self):
return self._header
header = property(_getHeader)
def _getFieldX(self):
return self._fieldX
def _setFieldX(self, txt):
if txt != self._fieldX and txt in self.header:
self._fieldX = txt
if txt != self.fieldXbox.currentText():
self.fieldXbox.setCurrentIndex(self.fieldXbox.findText(txt))
self.updateParameters()
fieldX = property(_getFieldX, _setFieldX)
def _getFieldY(self):
return self._fieldY
def _setFieldY(self, txt):
if txt != self._fieldY and txt in self.header:
self._fieldY = txt
if txt != self.fieldYbox.currentText():
self.fieldYbox.setCurrentIndex(self.fieldYbox.findText(txt))
self.updateParameters()
fieldY = property(_getFieldY, _setFieldY)
def updateParameters(self):
if self._data is not None and \
self.fct is not None and \
self.res is not None and \
self.fieldX is not None \
and self.fieldY is not None:
idxX = self.header.index(user_text(self.fieldX))
idxY = self.header.index(user_text(self.fieldY))
self.param_model = ParametersModel(self._data, self.fct, self.res, idxX, idxY)
self.parameters.setModel(self.param_model)
minx = self._data[:, idxX].min()
maxx = self._data[:, idxX].max()
self.xMin.setText(str(minx))
self.xMax.setText(str(maxx))
#elif self._data is None:
#print "Missing data"
#elif self.function is None:
#print "Missing function"
#elif self.res is None:
#print "Missing res"
#elif self.fieldX is None:
#print "Missing fieldX"
#elif self.fieldY is None:
#print "Missing fieldY"
@pyqtSignature("bool")
def on_computeCI_toggled(self, on):
if on:
meth = self.CImethod.currentText()
ints = [float(f) for f in CIsplitting.split(user_text(self.CIvalues.text())) if f]
self.CI = [meth, ints]
else:
self.CI = None
@pyqtSignature("const QString&")
def on_CIvalues_textEdited(self, txt):
self._CIchanged = True
@pyqtSignature("")
def on_CIvalues_editingFinished(self):
if self.CI:
try:
ints = [float(f) for f in CIsplitting.split(user_text(self.CIvalues.text())) if f]
self.setIntervals(ints)
except:
pass
if self.CI[1]:
self.CIvalues.setText(";".join("{:g}".format(f) for f in self.CI[1]))
else:
self.CIvalues.setText("")
self._CIchanged = False
@pyqtSignature("const QString&")
def on_CImethod_currentIndexChanged(self, txt):
if self.CI:
meth = user_text(txt)
self.setCIMethod(meth)
def _getCI(self):
return self._CI
def _setCI(self, val):
if val is not None:
val = (user_text(val[0]), [float(f) for f in val[1]])
if val != self._CI:
self._CI = val
if val is not None:
meth, ints = val
if meth != self.CImethod.currentText():
self.CImethod.setCurrentIndex(self.CImethod.findText(meth))
self.CIvalues.setText(";".join("{:g}".format(f) for f in ints))
CI = property(_getCI, _setCI)
def setCIMethod(self, meth):
if meth != self._CI[0]:
self._CI = (meth, self._CI[1])
if meth != self.CImethod.currentText():
self.CImethod.setCurrentIndex(self.CImethod.findText(meth))
def setIntervals(self, ints):
if ints != self._CI[1]:
self._CI = (self._CI[0], ints)
self.CIvalues.setText(";".join("{:g}".format(f) for f in ints))
@pyqtSignature("QAbstractButton*")
def on_buttonBox_clicked(self, button):
role = self.buttonBox.buttonRole(button)
if role == QtGui.QDialogButtonBox.ResetRole:
close_figure('all')
elif role == QtGui.QDialogButtonBox.ApplyRole:
self.plot()
@pyqtSignature("")
def on_buttonBox_rejected(self):
close_figure('all')
def plot(self):
if self.param_model is None:
QMessageBox.critical(self, "Error plotting", "Error, you don't have any data loaded")
else:
if self._CIchanged:
self.on_CIvalues_editingFinished()
fct = self.fct
res = self.res
model = self.param_model
xdata = model.valuesX
ydata = model.valuesY
p0 = model.parm_values
parm_names = model.parm_names
eval_points = None
fixed = tuple(find(array(model.fixed) > 0))
if self.interpolate.isChecked():
if self.autoScale.isChecked():
xmin = xdata.min()
xmax = xdata.max()
else:
xmin = float(self.xMin.text())
xmax = float(self.xMax.text())
eval_points = arange(xmin, xmax, (xmax - xmin) / 1024)
CImethod = None
CImethodName = user_text(self.CImethod.currentText())
if CImethodName == u"Bootstrapping":
CImethod = bootstrap.bootstrap_regression
elif CImethodName == u"Residual resampling":
CImethod = bootstrap.bootstrap_residuals
outfile = self.output
CI = ()
result = None
loc = str(self.legendLocation.currentText())
fct_desc = "$%s$" % (fct.description,)
try:
cf_kwrds = dict(residuals=res.__call__,
p0=p0,
function=fct,
maxfev=10000,
fix_params=fixed,
Dfun=fct.Dfun,
Dres=res.Dfun,
col_deriv=True)
if self.CI is not None:
CI = self.CI[1]
bs = bootstrap.bootstrap(CurveFitting, xdata, ydata, CI,
shuffle_method=CImethod,
shuffle_kwrds={"add_residual": res.invert,
"fit": CurveFitting},
extra_attrs=('popt',), eval_points=eval_points,
fit_kwrds=cf_kwrds)
result = plot_fit.fit_evaluation(bs.y_fit, xdata, ydata,
eval_points=eval_points, xname=self.fieldX,
yname=self.fieldY, fct_desc=fct_desc,
param_names=parm_names, res_name=res.name,
CI=CI, CIresults=bs)
else:
fit = CurveFitting(xdata, ydata, **cf_kwrds)
fit.fit()
result = plot_fit.fit_evaluation(fit, xdata, ydata, eval_points=eval_points,
xname=self.fieldX, yname=self.fieldY,
fct_desc=fct_desc, param_names=parm_names,
res_name=res.name)
except Exception as ex:
traceback.print_exc()
QMessageBox.critical(self, "Error during Parameters Estimation",
"{1} exception: {2}".format(type(ex).__name__, ex.message))
return
plot_fit.plot1d(result, loc=loc)
if self.writeResult and outfile:
#print("output to file '%s'" % (outfile,))
plot_fit.write1d(outfile, result, res.description, CImethodName)
#else:
#print("self.writeResult = %s\noutfile='%s'" % (self.writeResult, outfile))
def main():
wnd = QtFitDlg()
wnd.show()
wnd.raise_()
return wnd
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
matplotlib.interactive(True)
wnd = main()
app.exec_()
| 18,594 | 35.894841 | 99 | py |
OpenBCIPython | OpenBCIPython-master/py_qt/curve_fitting.py | """
:Author: Pierre Barbier de Reuille <[email protected]>
This module specifically implement the curve fitting, wrapping the default
scipy.optimize.leastsq function. It allows for parameter value fixing,
different kind of residual and added constraints function.
"""
from __future__ import division, print_function, absolute_import
from scipy import optimize
from .compat import lrange
import numpy as np
class CurveFitting(object):
r"""
Fit a curve using the :py:func:`scipy.optimize.leastsq` function
:type xdata: ndarray
:param xdata: Explaining values
:type ydata: ndarray
:param ydata: Target values
Once fitted, the following variables contain the result of
the fitting:
:ivar ndarray popt: The solution (or the result of the last iteration for
an unsuccessful call)
:ivar ndarray pcov: The estimated covariance of popt. The diagonals
provide the variance of the parameter estimate.
:ivar ndarray res: Final residuals
:ivar dict infodict: a dictionary of outputs with the keys:
``nfev``
the number of function calls
``fvec``
the function evaluated at the output
``fjac``
A permutation of the R matrix of a QR factorization of
the final approximate Jacobian matrix, stored column wise.
Together with ipvt, the covariance of the estimate can be
approximated.
``ipvt``
an integer array of length N which defines a permutation
matrix, ``p``, such that ``fjac*p = q*r``, where ``r`` is upper
triangular with diagonal elements of nonincreasing
magnitude. Column ``j`` of ``p`` is column ``ipvt(j)`` of the
identity matrix.
``qtf``
the vector ``(transpose(q) * fvec)``
``CI``
list of tuple of parameters, each being the lower and
upper bounds for the confidence interval in the CI
argument at the same position.
``est_jacobian``
True if the jacobian is estimated, false if the
user-provided functions have been used
.. note::
In this implementation, residuals are supposed to be a generalisation
of the notion of difference. In the end, the mathematical expression
of this minimisation is:
.. math::
\hat{\theta} = \argmin_{\theta\in \mathbb{R}^p}
\sum_i r(y_i, f(\theta, x_i))^2
Where :math:`\theta` is the vector of :math:`p` parameters to optimise,
:math:`r` is the residual function and :math:`f` is the function being
fitted.
"""
def __init__(self, xdata, ydata, **kwords):
self._fct = None
self._Dfun = None
self._residuals = None
self._Dres = None
self._col_deriv = True
self._constraints = None
self._lsq_args = ()
self._lsq_kwords = {}
self._xdata = None
self._ydata = None
self._p0 = None
self._fix_params = None
self.xdata = xdata
self.ydata = ydata
self._fitted = False
for n in kwords:
setattr(self, n, kwords[n])
if self._residuals is None:
self._residuals = lambda x, y: (x - y)
self._Dres = lambda y1, y0: -1
def need_fit(self):
"""
Function to be called if the object need to be fitted again
"""
self._fitted = False
@property
def fitted(self):
"""
Check if the object has been fitted or not
"""
return self._fitted
@property
def function(self):
"""
Function to be fitted. The call of the function will be::
function(params, xs)
"""
return self._fct
@function.setter
def function(self, f):
self.need_fit()
self._fct = f
@property
def Dfun(self):
"""
Jacobian of the function with respect to its parameters.
:Note: col_deriv defines if the derivative with respect to a given parameter is in column or row
If not provided, a numerical approximation will be used instead.
"""
return self._Dfun
@Dfun.setter
def Dfun(self, df):
self.need_fit()
self._Dfun = df
@Dfun.deleter
def Dfun(self):
self.need_fit()
self._Dfun = None
@property
def col_deriv(self):
"""
Define if Dfun returns the derivatives by row or column.
If ``col_deriv`` is ``True``, each line correspond to a parameter and each column to a point.
"""
return self._col_deriv
@col_deriv.setter
def col_deriv(self, value):
self._col_deriv = bool(value)
self.need_fit()
@property
def residuals(self):
"""
Residual function to use. The call will be::
residuals(y_measured, y_est)
where ``y_measured`` are the estimated values and ``y_est`` the measured ones.
:Default: the defauls is ``y_measured - y_est``
"""
return self._residuals
@residuals.setter
def residuals(self, f):
self.need_fit()
self._residuals = f
@property
def Dres(self):
"""
Derivative of the residual function with respec to the estimated values. The call will be:
Dres(y_measured, y_est)
:Default: as the default residual is ``y_measured - y_est``, the default derivative is ``-1``
"""
return self._Dres
@Dres.setter
def Dres(self, df):
self.need_fit()
self._Dres = df
@Dres.deleter
def Dres(self):
self.need_fit()
self._Dres = None
@property
def lsq_args(self):
"""
Extra arguments to give to the least-square algorithm.
See :py:func:`scipy.optimize.leastsq` for details
"""
return self._lsq_args
@lsq_args.setter
def lsq_args(self, val):
self.need_fit()
self._lsq_args = tuple(val)
@lsq_args.deleter
def lsq_args(self):
self._lsq_args = ()
@property
def lsq_kwords(self):
"""
Extra named arguments to give to the least-square algorithm.
See :py:func:`scipy.optimize.leastsq` for details
"""
return self._lsq_kwords
@lsq_kwords.setter
def lsq_kwords(self, val):
self.need_fit()
self._lsq_kwords = dict(val)
@lsq_kwords.deleter
def lsq_kwords(self):
self._lsq_kwords = {}
@property
def xdata(self):
"""
Explaining values.
"""
return self._xdata
@xdata.setter
def xdata(self, value):
value = np.atleast_1d(value).squeeze()
assert len(value.shape) < 3, "Error, xdata must be at most a 2D array"
self._xdata = value
self.need_fit()
@property
def ydata(self):
"""
Target values.
"""
return self._ydata
@ydata.setter
def ydata(self, value):
value = np.atleast_1d(value).squeeze()
assert len(value.shape) == 1, "Error, ydata must be at most a 1D array"
self._ydata = value
self.need_fit()
@property
def p0(self):
"""
Initial fitting parameters
"""
return self._p0
@p0.setter
def p0(self, value):
value = np.atleast_1d(value)
assert len(value.shape) == 1, "Error, p0 must be at most a 1D array"
self._p0 = value
@property
def constraints(self):
"""
Function returning additional constraints to the problem
"""
return self._constraints
@constraints.setter
def constraints(self, value):
assert callable(value), "Error, constraints must be a callable returning a 1d array"
self._constraints = value
@constraints.deleter
def constraints(self):
self._constraints = None
@property
def fix_params(self):
"""
Index of parameters that shouldn't be touched by the algorithm
"""
return self._fix_params
@fix_params.setter
def fix_params(self, value):
self._fix_params = tuple(value)
@fix_params.deleter
def fix_params(self):
self._fix_params = None
def fit(self):
"""
Fit the curve
"""
Dres = self.Dres
Dfun = self.Dfun
fct = self.function
residuals = self.residuals
col_deriv = self.col_deriv
p0 = self.p0
xdata = self.xdata
ydata = self.ydata
fix_params = self.fix_params
use_derivs = (Dres is not None) and (Dfun is not None)
df = None
f = None
if fix_params:
p_save = np.array(p0, dtype=float)
change_params = lrange(len(p0))
try:
for i in fix_params:
change_params.remove(i)
except ValueError:
raise ValueError("List of parameters to fix is incorrect: "
"contains either duplicates or values "
"out of range.")
p0 = p_save[change_params]
def f_fixed(p):
p1 = np.array(p_save)
p1[change_params] = p
y0 = fct(p1, xdata)
return residuals(ydata, y0)
f = f_fixed
if use_derivs:
def df_fixed(p):
p1 = np.array(p_save)
p1[change_params] = p
y0 = fct(p1, xdata)
dfct = Dfun(p1, xdata)
dr = Dres(ydata, y0)
if col_deriv:
return dfct[change_params]*dr
return dfct[:,change_params]*dr[:, np.newaxis]
df = df_fixed
else:
def f_free(p):
y0 = fct(p, xdata)
return residuals(ydata, y0)
f = f_free
if use_derivs:
def df_free(p):
dfct = Dfun(p, xdata)
y0 = fct(p, xdata)
dr = np.atleast_1d(Dres(ydata, y0))
if col_deriv:
return dfct*dr
return dfct*dr[:, np.newaxis]
df = df_free
if use_derivs:
self.df = df
cd = 1 if col_deriv else 0
optim = optimize.leastsq(f, p0, full_output=1, Dfun=df,
col_deriv=cd, *self.lsq_args, **self.lsq_kwords)
popt, pcov, infodict, mesg, ier = optim
#infodict['est_jacobian'] = not use_derivs
if fix_params:
p_save[change_params] = popt
popt = p_save
if not ier in [1, 2, 3, 4]:
raise RuntimeError("Unable to determine number of fit parameters. "
"Error returned by scipy.optimize.leastsq:\n%s"
% (mesg,))
res = residuals(ydata, fct(popt, xdata))
if (len(res) > len(p0)) and pcov is not None:
s_sq = (res ** 2).sum() / (len(ydata) - len(p0))
pcov = pcov * s_sq
else:
pcov = np.inf
self.popt = popt
self.pcov = pcov
self.res = res
self.infodict = infodict
self._fitted = True
def __call__(self, xdata):
"""
Return the value of the fitted function for each of the points in
``xdata``
"""
if not self.fitted:
self.fit()
return self.function(self.popt, xdata)
| 11,766 | 27.422705 | 104 | py |
OpenBCIPython | OpenBCIPython-master/py_qt/cyth.py | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 31 20:11:03 2012
@author: Pierre Barbier de Reuille <[email protected]>
"""
from __future__ import absolute_import, print_function
import os
import numpy
try:
import pyximport
HAS_CYTHON = True
except ImportError:
HAS_CYTHON = False
def addFlags(var, flags, sep = ' '):
if var in os.environ:
flags = [os.environ[var]] + flags
os.environ[var] = sep.join(flags)
if HAS_CYTHON:
USE_MINGW=False
if os.name == 'nt':
addFlags('CPATH', [numpy.get_include()], ';')
mingw_setup_args = dict(options={})
if USE_MINGW:
addFlags('PATH', [r'C:\MinGW\bin'], ';')
mingw_setup_args['options']['build_ext'] = {'compiler': 'mingw32'}
pyximport.install(setup_args=mingw_setup_args,reload_support=True)
elif os.name == 'posix':
extra_flags = ['-I' + numpy.get_include()]
addFlags('CFLAGS', extra_flags)
addFlags('CXXFLAGS', extra_flags)
pyximport.install(reload_support=True)
| 1,057 | 23.604651 | 78 | py |
OpenBCIPython | OpenBCIPython-master/py_qt/kde.py | r"""
:Author: Pierre Barbier de Reuille <[email protected]>
Module implementing kernel-based estimation of density of probability.
Given a kernel :math:`K`, the density function is estimated from a sampling
:math:`X = \{X_i \in \mathbb{R}^n\}_{i\in\{1,\ldots,m\}}` as:
.. math::
f(\mathbf{z}) \triangleq \frac{1}{hW} \sum_{i=1}^m \frac{w_i}{\lambda_i}
K\left(\frac{X_i-\mathbf{z}}{h\lambda_i}\right)
W = \sum_{i=1}^m w_i
where :math:`h` is the bandwidth of the kernel, :math:`w_i` are the weights of
the data points and :math:`\lambda_i` are the adaptation factor of the kernel
width.
The kernel is a function of :math:`\mathbb{R}^n` such that:
.. math::
\begin{array}{rclcl}
\idotsint_{\mathbb{R}^n} f(\mathbf{z}) d\mathbf{z}
& = & 1 & \Longleftrightarrow & \text{$f$ is a probability}\\
\idotsint_{\mathbb{R}^n} \mathbf{z}f(\mathbf{z}) d\mathbf{z} &=&
\mathbf{0} & \Longleftrightarrow & \text{$f$ is
centered}\\
\forall \mathbf{u}\in\mathbb{R}^n, \|\mathbf{u}\|
= 1\qquad\int_{\mathbb{R}} t^2f(t \mathbf{u}) dt &\approx&
1 & \Longleftrightarrow & \text{The co-variance matrix of $f$ is close
to be the identity.}
\end{array}
The constraint on the covariance is only required to provide a uniform meaning
for the bandwidth of the kernel.
If the domain of the density estimation is bounded to the interval
:math:`[L,U]`, the density is then estimated with:
.. math::
f(x) \triangleq \frac{1}{hW} \sum_{i=1}^n \frac{w_i}{\lambda_i}
\hat{K}(x;X,\lambda_i h,L,U)
where :math:`\hat{K}` is a modified kernel that depends on the exact method
used. Currently, only 1D KDE supports bounded domains.
"""
import numpy as np
import kde_methods
from kde_bandwidth import scotts_covariance
from kernels import normal_kernel1d
from utils import numpy_method_idx
class KDE1D(object):
r"""
Perform a kernel based density estimation in 1D, possibly on a bounded
domain :math:`[L,U]`.
:param ndarray data: 1D array with the data points
:param dict kwords: setting attributes at construction time.
Any named argument will be equivalent to setting the property
after the fact. For example::
>>> xs = [1,2,3]
>>> k = KDE1D(xs, lower=0)
will be equivalent to::
>>> k = KDE1D(xs)
>>> k.lower = 0
The calculation is separated in three parts:
- The kernel (:py:attr:`kernel`)
- The bandwidth or covariance estimation (:py:attr:`bandwidth`,
:py:attr:`covariance`)
- The estimation method (:py:attr:`method`)
"""
def __init__(self, xdata, **kwords):
self._xdata = None
self._upper = np.inf
self._lower = -np.inf
self._kernel = normal_kernel1d()
self._bw_fct = None
self._bw = None
self._cov_fct = None
self._covariance = None
self._method = None
self.weights = 1.
self.lambdas = 1.
self._fitted = False
for n in kwords:
setattr(self, n, kwords[n])
self.xdata = xdata
has_bw = (self._bw is not None or self._bw_fct is not None or
self._covariance is not None or self._cov_fct is not None)
if not has_bw:
self.covariance = scotts_covariance
if self._method is None:
self.method = kde_methods.default_method
@property
def fitted(self):
"""
Test if the fitting has been done
"""
return self._fitted
def fit_if_needed(self):
"""
Fit only if needed (testing self.fitted)
"""
if not self._fitted:
self.fit()
def need_fit(self):
"""
Calling this function will mark the object as needing fitting.
"""
self._fitter = False
def copy(self):
"""
Shallow copy of the KDE object
"""
res = KDE1D.__new__(KDE1D)
# Copy private members: start with a single '_'
for m in self.__dict__:
if len(m) > 1 and m[0] == '_' and m[1] != '_':
setattr(res, m, getattr(self, m))
return res
def compute_bandwidth(self):
"""
Method computing the bandwidth if needed (i.e. if it was defined by functions)
"""
self._bw, self._covariance = kde_methods.compute_bandwidth(self)
def fit(self):
"""
Compute the various parameters needed by the kde method
"""
if self._weights.shape:
assert self._weights.shape == self._xdata.shape, \
"There must be as many weights as data points"
self._total_weights = sum(self._weights)
else:
self._total_weights = len(self._xdata)
self.method.fit(self)
self._fitted = True
@property
def xdata(self):
return self._xdata
@xdata.setter
def xdata(self, xs):
self.need_fit()
self._xdata = np.atleast_1d(xs)
assert len(self._xdata.shape) == 1, "The attribute xdata must be a one-dimension array"
@property
def kernel(self):
r"""
Kernel object. This must be an object modeled on
:py:class:`pyqt_fit.kernels.Kernel1D`. It is recommended to inherit
this class to provide numerical approximation for all methods.
By default, the kernel is an instance of
:py:class:`pyqt_fit.kernels.normal_kernel1d`
"""
return self._kernel
@kernel.setter
def kernel(self, val):
self.need_fit()
self._kernel = val
@property
def lower(self):
r"""
Lower bound of the density domain. If deleted, becomes set to
:math:`-\infty`
"""
return self._lower
@lower.setter
def lower(self, val):
self.need_fit()
self._lower = float(val)
@lower.deleter
def lower(self):
self.need_fit()
self._lower = -np.inf
@property
def upper(self):
r"""
Upper bound of the density domain. If deleted, becomes set to
:math:`\infty`
"""
return self._upper
@upper.setter
def upper(self, val):
self.need_fit()
self._upper = float(val)
@upper.deleter
def upper(self):
self.need_fit()
self._upper = np.inf
@property
def weights(self):
"""
Weigths associated to each data point. It can be either a single value,
or an array with a value per data point. If a single value is provided,
the weights will always be set to 1.
"""
return self._weights
@weights.setter
def weights(self, ws):
self.need_fit()
try:
ws = float(ws)
self._weights = np.asarray(1.)
except TypeError:
ws = np.array(ws, dtype=float)
self._weights = ws
self._total_weights = None
@weights.deleter
def weights(self):
self.need_fit()
self._weights = np.asarray(1.)
self._total_weights = None
@property
def total_weights(self):
return self._total_weights
@property
def lambdas(self):
"""
Scaling of the bandwidth, per data point. It can be either a single
value or an array with one value per data point.
When deleted, the lamndas are reset to 1.
"""
return self._lambdas
@lambdas.setter
def lambdas(self, ls):
self.need_fit()
try:
self._lambdas = np.asarray(float(ls))
except TypeError:
ls = np.array(ls, dtype=float)
self._lambdas = ls
@lambdas.deleter
def lambdas(self):
self.need_fit()
self._lambdas = np.asarray(1.)
@property
def bandwidth(self):
"""
Bandwidth of the kernel.
Can be set either as a fixed value or using a bandwidth calculator,
that is a function of signature ``w(xdata)`` that returns a single
value.
.. note::
A ndarray with a single value will be converted to a floating point
value.
"""
return self._bw
@bandwidth.setter
def bandwidth(self, bw):
self.need_fit()
self._bw_fct = None
self._cov_fct = None
if callable(bw):
self._bw_fct = bw
else:
bw = float(bw)
self._bw = bw
self._covariance = bw * bw
@property
def bandwidth_function(self):
return self._bw_fct
@property
def covariance(self):
"""
Covariance of the gaussian kernel.
Can be set either as a fixed value or using a bandwidth calculator,
that is a function of signature ``w(xdata)`` that returns a single
value.
.. note::
A ndarray with a single value will be converted to a floating point
value.
"""
return self._covariance
@covariance.setter
def covariance(self, cov):
self.need_fit()
self._bw_fct = None
self._cov_fct = None
if callable(cov):
self._cov_fct = cov
else:
cov = float(cov)
self._covariance = cov
self._bw = np.sqrt(cov)
@property
def covariance_function(self):
return self._cov_fct
@numpy_method_idx
def pdf(self, points, out=None):
"""
Compute the PDF of the distribution on the set of points ``points``
"""
self.fit_if_needed()
return self._method.pdf(self, points, out)
def evaluate(self, points, out=None):
"""
Compute the PDF of the distribution on the set of points ``points``
"""
return self.pdf(points, out)
def __call__(self, points, out=None):
"""
This method is an alias for :py:meth:`BoundedKDE1D.evaluate`
"""
return self.pdf(points, out=out)
@numpy_method_idx
def cdf(self, points, out=None):
r"""
Compute the cumulative distribution function defined as:
.. math::
cdf(x) = P(X \leq x) = \int_l^x p(t) dt
where :math:`l` is the lower bound of the distribution domain and
:math:`p` the density of probability.
"""
self.fit_if_needed()
return self.method.cdf(self, points, out)
def cdf_grid(self, N=None, cut=None):
"""
Compute the cdf from the lower bound to the points given as argument.
"""
self.fit_if_needed()
return self.method.cdf_grid(self, N, cut)
@numpy_method_idx
def icdf(self, points, out=None):
r"""
Compute the inverse cumulative distribution (quantile) function.
"""
self.fit_if_needed()
return self.method.icdf(self, points, out)
def icdf_grid(self, N=None, cut=None):
"""
Compute the inverse cumulative distribution (quantile) function on a grid.
"""
self.fit_if_needed()
return self.method.icdf_grid(self, N, cut)
@numpy_method_idx
def sf(self, points, out=None):
r"""
Compute the survival function.
The survival function is defined as:
.. math::
sf(x) = P(X \geq x) = \int_x^u p(t) dt = 1 - cdf(x)
where :math:`u` is the upper bound of the distribution domain and
:math:`p` the density of probability.
"""
self.fit_if_needed()
return self.method.sf(self, points, out)
def sf_grid(self, N=None, cut=None):
r"""
Compute the survival function on a grid
"""
self.fit_if_needed()
return self.method.sf_grid(self, N, cut)
@numpy_method_idx
def isf(self, points, out=None):
r"""
Compute the inverse survival function, defined as:
.. math::
isf(p) = \sup\left\{x\in\mathbb{R} : sf(x) \leq p\right\}
"""
self.fit_if_needed()
return self.method.isf(self, points, out)
def isf_grid(self, N=None, cut=None):
r"""
Compute the inverse survival function on a grid.
"""
self.fit_if_needed()
return self.method.isf_grid(self, N, cut)
@numpy_method_idx
def hazard(self, points, out=None):
r"""
Compute the hazard function evaluated on the points.
The hazard function is defined as:
.. math::
h(x) = \frac{p(x)}{sf(x)}
"""
self.fit_if_needed()
return self.method.hazard(self, points, out)
def hazard_grid(self, N=None, cut=None):
"""
Compute the hazard function evaluated on a grid.
"""
self.fit_if_needed()
return self.method.hazard_grid(self, N, cut)
@numpy_method_idx
def cumhazard(self, points, out=None):
r"""
Compute the cumulative hazard function evaluated on the points.
The cumulative hazard function is defined as:
.. math::
ch(x) = \int_l^x h(t) dt = -\ln sf(x)
where :math:`l` is the lower bound of the domain, :math:`h` the hazard
function and :math:`sf` the survival function.
"""
self.fit_if_needed()
return self.method.cumhazard(self, points, out)
def cumhazard_grid(self, N=None, cut=None):
"""
Compute the cumulative hazard function evaluated on a grid.
"""
self.fit_if_needed()
return self.method.cumhazard_grid(self, N, cut)
@property
def method(self):
"""
Select the method to use. The method should be an object modeled on
:py:class:`pyqt_fit.kde_methods.KDE1DMethod`, and it is recommended to
inherit the model.
Available methods in the :py:mod:`pyqt_fit.kde_methods` sub-module.
:Default: :py:data:`pyqt_fit.kde_methods.default_method`
"""
return self._method
@method.setter
def method(self, m):
self.need_fit()
self._method = m
@method.deleter
def method(self):
self.need_fit()
self._method = kde_methods.renormalization
@property
def closed(self):
"""
Returns true if the density domain is closed (i.e. lower and upper
are both finite)
"""
return self.lower > -np.inf and self.upper < np.inf
@property
def bounded(self):
"""
Returns true if the density domain is actually bounded
"""
return self.lower > -np.inf or self.upper < np.inf
def grid(self, N=None, cut=None):
"""
Evaluate the density on a grid of N points spanning the whole dataset.
:returns: a tuple with the mesh on which the density is evaluated and
the density itself
"""
self.fit_if_needed()
return self._method.grid(self, N, cut)
| 14,941 | 26.773234 | 95 | py |
OpenBCIPython | OpenBCIPython-master/py_qt/nonparam_regression.py | """
:Author: Pierre Barbier de Reuille <[email protected]>
Module implementing non-parametric regressions using kernel methods.
"""
import numpy as np
from scipy import linalg
import kde_bandwidth
import kernels
import npr_methods
class NonParamRegression(object):
r"""
Class performing kernel-based non-parametric regression.
The calculation is split in three parts:
- The kernel (:py:attr:`kernel`)
- Bandwidth computation (:py:attr:`bandwidth`, :py:attr:`covariance`)
- Regression method (:py:attr:`method`)
"""
def __init__(self, xdata, ydata, **kwords):
self._xdata = np.atleast_2d(xdata)
self._ydata = np.atleast_1d(ydata)
self._covariance = None
self._cov_fct = None
self._bandwidth = None
self._bw_fct = None
self._method = None
self._kernel = None
self._lower = None
self._upper = None
self._kernel_type = None
self._fitted_method = None
self._n = None
self._d = None
self._ytrans = None
self._fitted_ydata = None
for kw in kwords:
setattr(self, kw, kwords[kw])
if self._kernel is None:
self.kernel_type = kernels.normal_kernel
if self._method is None:
self.method = npr_methods.default_method
if self._cov_fct is None and self._bw_fct is None and self._covariance is None and self._bandwidth is None:
self._cov_fct = kde_bandwidth.scotts_covariance
def copy(self):
res = NonParamRegression.__new__(NonParamRegression)
# Copy private members: start with a single '_'
for m in self.__dict__:
if len(m) > 1 and m[0] == '_' and m[1] != '_':
obj = getattr(self, m)
try:
setattr(res, m, obj.copy())
except AttributeError:
setattr(res, m, obj)
return res
def need_fit(self):
"""
Calling this function will mark the object as needing fitting.
"""
self._fitted_method = None
@property
def fitted(self):
"""
Check if the fitting needs to be performed.
"""
return self._fitted_method is not None
@property
def kernel(self):
r"""
Kernel object. Should provide the following methods:
``kernel.pdf(xs)``
Density of the kernel, denoted :math:`K(x)`
"""
return self._kernel
@kernel.setter
def kernel(self, k):
self._kernel_type = None
self._kernel = k
self.need_fit()
@property
def kernel_type(self):
"""
Type of the kernel. The kernel type is a class or function accepting
the dimension of the domain as argument and returning a valid kernel object.
"""
return self._kernel_type
@kernel_type.setter
def kernel_type(self, ker):
self._kernel_type = ker
self._kernel = None
self.need_fit()
@property
def bandwidth(self):
r"""
Bandwidth of the kernel.
This is defined as the square root of the covariance matrix
"""
return self._bandwidth
@bandwidth.setter
def bandwidth(self, bw):
self._bw_fct = None
self._cov_fct = None
if callable(bw):
self._bw_fct = bw
else:
self._bandwidth = np.atleast_2d(bw)
self._covariance = np.dot(self._bandwidth, self._bandwidth)
self.need_fit()
@property
def bandwidth_function(self):
return self._bw_fct
@property
def covariance(self):
r"""
Covariance matrix of the kernel.
It must be of the right dimension!
"""
return self._covariance
@covariance.setter
def covariance(self, cov):
self._bw_fct = None
self._cov_fct = None
if callable(cov):
self._cov_fct = cov
else:
self._covariance = np.atleast_2d(cov)
self._bandwidth = linalg.sqrtm(self._covariance)
self.need_fit()
@property
def covariance_function(self):
return self._cov_fct
@property
def lower(self):
"""
Lower bound of the domain for each dimension
"""
if self._lower is None:
return -np.inf*np.ones(self.dim, dtype=float)
return self._lower
@lower.setter
def lower(self, l):
l = np.atleast_1d(l)
assert len(l.shape) == 1, "The lower bound must be at most a 1D array"
self._lower = l
self.need_fit()
@lower.deleter
def lower(self):
self._lower = None
@property
def upper(self):
"""
Lower bound of the domain for each dimension
"""
if self._upper is None:
return np.inf*np.ones(self.dim, dtype=float)
return self._upper
@upper.setter
def upper(self, l):
l = np.atleast_1d(l)
assert len(l.shape) == 1, "The upper bound must be at most a 1D array"
self._upper = l
self.need_fit()
@upper.deleter
def upper(self):
self._upper = None
@property
def xdata(self):
"""
2D array (D,N) with D the dimension of the domain and N the number of points.
"""
return self._xdata
@xdata.setter
def xdata(self, xd):
xd = np.atleast_2d(xd)
assert len(xd.shape) == 2, "The xdata must be at most a 2D array"
self._xdata = xd
self.need_fit()
@property
def ydata(self):
"""
1D array (N,) of values for each point in xdata
"""
return self._ydata
@ydata.setter
def ydata(self, yd):
yd = np.atleast_1d(yd)
assert len(yd.shape) == 1, "The ydata must be at most a 1D array"
self._ydata = yd
self.need_fit()
@property
def fitted_ydata(self):
"""
Data actually fitted. It may differ from ydata if ytrans is specified.
"""
return self._fitted_ydata
@property
def ytrans(self):
"""
Function used to transform the Y data before fitting.
This must be a callable that also has a ``inv`` attribute returning the inverse function.
:Note: The ``inv`` method must accept an ``out`` argument to store the output.
"""
return self._ytrans
@ytrans.setter
def ytrans(self, tr):
assert hasattr(tr, '__call__') and hasattr(tr, 'inv'), "The transform must be a callable with an `inv` attribute"
self._ytrans = tr
@ytrans.deleter
def ytrans(self):
self._ytrans = None
@property
def method(self):
"""
Regression method itself. It should be an instance of the class following the template
:py:class:`pyqt_fit.npr_methods.RegressionKernelMethod`.
"""
return self._method
@method.setter
def method(self, m):
self._method = m
self.need_fit()
@property
def fitted_method(self):
"""
Method actually used after fitting.
The main method may choose to provide a more tuned method during fitting.
"""
return self._fitted_method
@property
def N(self):
"""
Number of points in the dataset (set by the fitting)
"""
return self._n
@property
def dim(self):
"""
Dimension of the domain (set by the fitting)
"""
return self._d
def _create_kernel(self, D):
if self._kernel_type is None:
return self._kernel
return self._kernel_type(D)
def set_actual_bandwidth(self, bandwidth, covariance):
"""
Method computing the bandwidth if needed (i.e. if it was defined by functions)
"""
self._bandwidth = bandwidth
self._covariance = covariance
def fit(self):
"""
Method to call to fit the parameters of the fitting
"""
D, N = self._xdata.shape
# assert self._ydata.shape[0] == N, "There must be as many points for X and Y"
if self.ytrans is not None:
self._fitted_ydata = self.ytrans(self.ydata)
else:
self._fitted_ydata = self.ydata
self._kernel = self._create_kernel(D)
self._n = N
self._d = D
lower = self.lower
upper = self.upper
assert len(lower) == D, "The 'lower' property must have one value per dimension of the domain."
assert len(upper) == D, "The 'upper' property must have one value per dimension of the domain."
self._fitted_method = self._method.fit(self)
assert self.bandwidth.shape == (D, D), "The bandwidth should have a shape of ({0},{0}) (actual: {1})".format(D, self.bandwidth.shape)
assert self.covariance.shape == (D, D), "The covariance should have a shape of ({0},{0}) (actual: {1})".format(D, self.covariance.shape)
self._fitted = True
def evaluate(self, points, out=None):
if not self.fitted:
self.fit()
points = np.asanyarray(points)
real_shape = points.shape
assert len(real_shape) < 3, "The input points can be at most a 2D array"
if len(real_shape) == 0:
points = points.reshape(1,1)
elif len(real_shape) == 1:
points = points.reshape(1, real_shape[0])
if out is None:
out = np.empty((points.shape[-1],), dtype=type(points.dtype.type() + 0.))
else:
out.shape = (points.shape[-1],)
self._fitted_method.evaluate(self, points, out)
out.shape = real_shape[-1:]
if self.ytrans:
self.ytrans.inv(out, out=out)
return out
def __call__(self, points, out=None):
return self.evaluate(points, out)
| 9,886 | 27.492795 | 144 | py |
OpenBCIPython | OpenBCIPython-master/py_qt/kernel_smoothing.py | """
:Author: Pierre Barbier de Reuille <[email protected]>
Module implementing non-parametric regressions using kernel smoothing methods.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import scipy
from scipy import stats
from scipy.linalg import sqrtm, solve
from .compat import irange
from .cyth import HAS_CYTHON
local_linear = None
def useCython():
"""
Switch to using Cython methods if available
"""
global local_linear
if HAS_CYTHON:
from . import cy_local_linear
local_linear = cy_local_linear
def usePython():
"""
Switch to using the python implementation of the methods
"""
global local_linear
from . import py_local_linear
local_linear = py_local_linear
if HAS_CYTHON:
useCython()
else:
usePython()
from .kde import scotts_covariance
from .kernels import normal_kernel, normal_kernel1d
class SpatialAverage(object):
r"""
Perform a Nadaraya-Watson regression on the data (i.e. also called
local-constant regression) using a gaussian kernel.
The Nadaraya-Watson estimate is given by:
.. math::
f_n(x) \triangleq \frac{\sum_i K\left(\frac{x-X_i}{h}\right) Y_i}
{\sum_i K\left(\frac{x-X_i}{h}\right)}
Where :math:`K(x)` is the kernel and must be such that :math:`E(K(x)) = 0`
and :math:`h` is the bandwidth of the method.
:param ndarray xdata: Explaining variables (at most 2D array)
:param ndarray ydata: Explained variables (should be 1D array)
:type cov: ndarray or callable
:param cov: If an ndarray, it should be a 2D array giving the matrix of
covariance of the gaussian kernel. Otherwise, it should be a function
``cov(xdata, ydata)`` returning the covariance matrix.
"""
def __init__(self, xdata, ydata, cov=scotts_covariance):
self.xdata = np.atleast_2d(xdata)
self.ydata = np.atleast_1d(ydata)
self._bw = None
self._covariance = None
self._inv_cov = None
self.covariance = cov
self.d, self.n = self.xdata.shape
self.correction = 1.
@property
def bandwidth(self):
"""
Bandwidth of the kernel. It cannot be set directly, but rather should
be set via the covariance attribute.
"""
if self._bw is None and self._covariance is not None:
self._bw = np.real(sqrtm(self._covariance))
return self._bw
@property
def covariance(self):
"""
Covariance of the gaussian kernel.
Can be set either as a fixed value or using a bandwith calculator,
that is a function of signature ``w(xdata, ydata)`` that returns
a 2D matrix for the covariance of the kernel.
"""
return self._covariance
@covariance.setter # noqa
def covariance(self, cov):
if callable(cov):
_cov = np.atleast_2d(cov(self.xdata, self.ydata))
else:
_cov = np.atleast_2d(cov)
self._bw = None
self._covariance = _cov
self._inv_cov = scipy.linalg.inv(_cov)
def evaluate(self, points, result=None):
"""
Evaluate the spatial averaging on a set of points
:param ndarray points: Points to evaluate the averaging on
:param ndarray result: If provided, the result will be put in this
array
"""
points = np.atleast_2d(points).astype(self.xdata.dtype)
#norm = self.kde(points)
d, m = points.shape
if result is None:
result = np.zeros((m,), points.dtype)
norm = np.zeros((m,), points.dtype)
# iterate on the internal points
for i, ci in np.broadcast(irange(self.n),
irange(self._correction.shape[0])):
diff = np.dot(self._correction[ci],
self.xdata[:, i, np.newaxis] - points)
tdiff = np.dot(self._inv_cov, diff)
energy = np.exp(-np.sum(diff * tdiff, axis=0) / 2.0)
result += self.ydata[i] * energy
norm += energy
result[norm > 0] /= norm[norm > 0]
return result
def __call__(self, *args, **kwords):
"""
This method is an alias for :py:meth:`SpatialAverage.evaluate`
"""
return self.evaluate(*args, **kwords)
@property
def correction(self):
"""
The correction coefficient allows to change the width of the kernel
depending on the point considered. It can be either a constant (to
correct globaly the kernel width), or a 1D array of same size as the
input.
"""
return self._correction
@correction.setter # noqa
def correction(self, value):
self._correction = np.atleast_1d(value)
def set_density_correction(self):
"""
Add a correction coefficient depending on the density of the input
"""
kde = stats.gaussian_kde(self.xdata)
dens = kde(self.xdata)
dm = dens.max()
dens[dens < 1e-50] = dm
self._correction = dm / dens
class LocalLinearKernel1D(object):
r"""
Perform a local-linear regression using a gaussian kernel.
The local constant regression is the function that minimises, for each
position:
.. math::
f_n(x) \triangleq \argmin_{a_0\in\mathbb{R}}
\sum_i K\left(\frac{x-X_i}{h}\right)
\left(Y_i - a_0 - a_1(x-X_i)\right)^2
Where :math:`K(x)` is the kernel and must be such that :math:`E(K(x)) = 0`
and :math:`h` is the bandwidth of the method.
:param ndarray xdata: Explaining variables (at most 2D array)
:param ndarray ydata: Explained variables (should be 1D array)
:type cov: float or callable
:param cov: If an float, it should be a variance of the gaussian kernel.
Otherwise, it should be a function ``cov(xdata, ydata)`` returning the
variance.
"""
def __init__(self, xdata, ydata, cov=scotts_covariance):
self.xdata = np.atleast_1d(xdata)
self.ydata = np.atleast_1d(ydata)
self.n = self.xdata.shape[0]
self._bw = None
self._covariance = None
self.covariance = cov
@property
def bandwidth(self):
"""
Bandwidth of the kernel.
"""
return self._bw
@property
def covariance(self):
"""
Covariance of the gaussian kernel.
Can be set either as a fixed value or using a bandwith calculator,
that is a function of signature ``w(xdata, ydata)`` that returns
a single value.
.. note::
A ndarray with a single value will be converted to a floating
point value.
"""
return self._covariance
@covariance.setter # noqa
def covariance(self, cov):
if callable(cov):
_cov = float(cov(self.xdata, self.ydata))
else:
_cov = float(cov)
self._covariance = _cov
self._bw = np.sqrt(_cov)
def evaluate(self, points, out=None):
"""
Evaluate the spatial averaging on a set of points
:param ndarray points: Points to evaluate the averaging on
:param ndarray result: If provided, the result will be put in this
array
"""
li2, out = local_linear.local_linear_1d(self._bw, self.xdata,
self.ydata, points, out)
self.li2 = li2
return out
def __call__(self, *args, **kwords):
"""
This method is an alias for :py:meth:`LocalLinearKernel1D.evaluate`
"""
return self.evaluate(*args, **kwords)
class PolynomialDesignMatrix1D(object):
def __init__(self, dim):
self.dim = dim
powers = np.arange(0, dim + 1).reshape((1, dim + 1))
self.powers = powers
def __call__(self, dX, out=None):
return np.power(dX, self.powers, out) # / self.frac
class LocalPolynomialKernel1D(object):
r"""
Perform a local-polynomial regression using a user-provided kernel
(Gaussian by default).
The local constant regression is the function that minimises, for each
position:
.. math::
f_n(x) \triangleq \argmin_{a_0\in\mathbb{R}}
\sum_i K\left(\frac{x-X_i}{h}\right)
\left(Y_i - a_0 - a_1(x-X_i) - \ldots -
a_q \frac{(x-X_i)^q}{q!}\right)^2
Where :math:`K(x)` is the kernel such that :math:`E(K(x)) = 0`, :math:`q`
is the order of the fitted polynomial and :math:`h` is the bandwidth of
the method. It is also recommended to have :math:`\int_\mathbb{R} x^2K(x)dx
= 1`, (i.e. variance of the kernel is 1) or the effective bandwidth will be
scaled by the square-root of this integral (i.e. the standard deviation of
the kernel).
:param ndarray xdata: Explaining variables (at most 2D array)
:param ndarray ydata: Explained variables (should be 1D array)
:param int q: Order of the polynomial to fit. **Default:** 3
:type cov: float or callable
:param cov: If an float, it should be a variance of the gaussian kernel.
Otherwise, it should be a function ``cov(xdata, ydata)`` returning
the variance.
**Default:** ``scotts_covariance``
"""
def __init__(self, xdata, ydata, q=3, **kwords):
self.xdata = np.atleast_1d(xdata)
self.ydata = np.atleast_1d(ydata)
self.n = self.xdata.shape[0]
self.q = q
self._kernel = None
self._bw = None
self._covariance = None
self.designMatrix = None
for n in kwords:
setattr(self, n, kwords[n])
if self.kernel is None:
self.kernel = normal_kernel1d()
if self.covariance is None:
self.covariance = scotts_covariance
if self.designMatrix is None:
self.designMatrix = PolynomialDesignMatrix1D
@property
def bandwidth(self):
"""
Bandwidth of the kernel.
"""
return self._bw
@bandwidth.setter # noqa
def bandwidth(self, bw):
if callable(bw):
_bw = float(bw(self.xdata, self.ydata))
else:
_bw = float(bw)
self._bw = _bw
self._covariance = _bw * _bw
@property
def covariance(self):
"""
Covariance of the gaussian kernel.
Can be set either as a fixed value or using a bandwith calculator,
that is a function of signature ``w(xdata, ydata)`` that returns
a single value.
.. note::
A ndarray with a single value will be converted to a floating
point value.
"""
return self._covariance
@covariance.setter # noqa
def covariance(self, cov):
if callable(cov):
_cov = float(cov(self.xdata, self.ydata))
else:
_cov = float(cov)
self._covariance = _cov
self._bw = np.sqrt(_cov)
@property
def cov(self):
"""
Covariance of the gaussian kernel.
Can be set either as a fixed value or using a bandwith calculator,
that is a function of signature ``w(xdata, ydata)`` that returns
a single value.
.. note::
A ndarray with a single value will be converted to a floating
point value.
"""
return self.covariance
@cov.setter # noqa
def cov(self, val):
self.covariance = val
@property
def kernel(self):
r"""
Kernel object. Should provide the following methods:
``kernel.pdf(xs)``
Density of the kernel, denoted :math:`K(x)`
By default, the kernel is an instance of
:py:class:`kernels.normal_kernel1d`
"""
return self._kernel
@kernel.setter # noqa
def kernel(self, val):
self._kernel = val
def evaluate(self, points, out=None):
"""
Evaluate the spatial averaging on a set of points
:param ndarray points: Points to evaluate the averaging on
:param ndarray result: If provided, the result will be put
in this array
"""
xdata = self.xdata[:, np.newaxis] # make it a column vector
ydata = self.ydata[:, np.newaxis] # make it a column vector
q = self.q
bw = self.bandwidth
kernel = self.kernel
designMatrix = self.designMatrix(q)
if out is None:
out = np.empty(points.shape, dtype=float)
for i, p in enumerate(points):
dX = (xdata - p)
Wx = kernel(dX / bw)
Xx = designMatrix(dX)
WxXx = Wx * Xx
XWX = np.dot(Xx.T, WxXx)
Lx = solve(XWX, WxXx.T)[0]
out[i] = np.dot(Lx, ydata)
return out
def __call__(self, *args, **kwords):
"""
This method is an alias for :py:meth:`LocalLinearKernel1D.evaluate`
"""
return self.evaluate(*args, **kwords)
class PolynomialDesignMatrix(object):
"""
Class used to create a design matrix for polynomial regression
"""
def __init__(self, dim, deg):
self.dim = dim
self.deg = deg
self._designMatrixSize()
def _designMatrixSize(self):
"""
Compute the size of the design matrix for a n-D problem of order d.
Can also compute the Taylors factors (i.e. the factors that would be
applied for the taylor decomposition)
:param int dim: Dimension of the problem
:param int deg: Degree of the fitting polynomial
:param bool factors: If true, the out includes the Taylor factors
:returns: The number of columns in the design matrix and, if required,
a ndarray with the taylor coefficients for each column of
the design matrix.
"""
dim = self.dim
deg = self.deg
init = 1
dims = [0] * (dim + 1)
cur = init
prev = 0
#if factors:
# fcts = [1]
fact = 1
for i in irange(deg):
diff = cur - prev
prev = cur
old_dims = list(dims)
fact *= (i + 1)
for j in irange(dim):
dp = diff - old_dims[j]
cur += dp
dims[j + 1] = dims[j] + dp
# if factors:
# fcts += [fact]*(cur-prev)
self.size = cur
#self.factors = np.array(fcts)
def __call__(self, x, out=None):
"""
Creates the design matrix for polynomial fitting using the points x.
:param ndarray x: Points to create the design matrix.
Shape must be (D,N) or (N,), where D is the dimension of
the problem, 1 if not there.
:param int deg: Degree of the fitting polynomial
:param ndarray factors: Scaling factor for the columns of the design
matrix. The shape should be (M,) or (M,1), where M is the number
of columns of the out. This value can be obtained using
the :py:func:`designMatrixSize` function.
:returns: The design matrix as a (M,N) matrix.
"""
dim, deg = self.dim, self.deg
#factors = self.factors
x = np.atleast_2d(x)
dim = x.shape[0]
if out is None:
s = self._designMatrixSize(dim, deg)
out = np.empty((s, x.shape[1]), dtype=x.dtype)
dims = [0] * (dim + 1)
out[0, :] = 1
cur = 1
for i in irange(deg):
old_dims = list(dims)
prev = cur
for j in irange(x.shape[0]):
dims[j] = cur
for k in irange(old_dims[j], prev):
np.multiply(out[k], x[j], out[cur])
cur += 1
#if factors is not None:
# factors = np.asarray(factors)
# if len(factors.shape) == 1:
# factors = factors[:,np.newaxis]
# out /= factors
return out
class LocalPolynomialKernel(object):
r"""
Perform a local-polynomial regression in N-D using a user-provided kernel
(Gaussian by default).
The local constant regression is the function that minimises,
for each position:
.. math::
f_n(x) \triangleq \argmin_{a_0\in\mathbb{R}}
\sum_i K\left(\frac{x-X_i}{h}\right)
\left(Y_i - a_0 - \mathcal{P}_q(X_i-x)\right)^2
Where :math:`K(x)` is the kernel such that :math:`E(K(x)) = 0`, :math:`q`
is the order of the fitted polynomial, :math:`\mathcal{P}_q(x)` is a
polynomial of order :math:`d` in :math:`x` and :math:`h` is the bandwidth
of the method.
The polynomial :math:`\mathcal{P}_q(x)` is of the form:
.. math::
\mathcal{F}_d(k) = \left\{ \n \in \mathbb{N}^d \middle|
\sum_{i=1}^d n_i = k \right\}
\mathcal{P}_q(x_1,\ldots,x_d) = \sum_{k=1}^q
\sum_{\n\in\mathcal{F}_d(k)} a_{k,\n}
\prod_{i=1}^d x_i^{n_i}
For example we have:
.. math::
\mathcal{P}_2(x,y) = a_{110} x + a_{101} y + a_{220} x^2 +
a_{211} xy + a_{202} y^2
:param ndarray xdata: Explaining variables (at most 2D array).
The shape should be (N,D) with D the dimension of the problem
and N the number of points. For 1D array, the shape can be (N,),
in which case it will be converted to (N,1) array.
:param ndarray ydata: Explained variables (should be 1D array). The shape
must be (N,).
:param int q: Order of the polynomial to fit. **Default:** 3
:param callable kernel: Kernel to use for the weights. Call is
``kernel(points)`` and should return an array of values the same size
as ``points``. If ``None``, the kernel will be ``normal_kernel(D)``.
:type cov: float or callable
:param cov: If an float, it should be a variance of the gaussian kernel.
Otherwise, it should be a function ``cov(xdata, ydata)`` returning
the variance.
**Default:** ``scotts_covariance``
"""
def __init__(self, xdata, ydata, q=3, cov=scotts_covariance, kernel=None):
self.xdata = np.atleast_2d(xdata)
self.ydata = np.atleast_1d(ydata)
self.d, self.n = self.xdata.shape
self.q = q
if kernel is None:
kernel = normal_kernel(self.d)
self.kernel = kernel
self._bw = None
self._covariance = None
self.covariance = cov
@property
def bandwidth(self):
"""
Bandwidth of the kernel.
"""
return self._bw
@property
def covariance(self):
"""
Covariance of the gaussian kernel.
Can be set either as a fixed value or using a bandwith calculator,
that is a function of signature ``w(xdata, ydata)`` that returns
a DxD matrix.
.. note::
A ndarray with a single value will be converted to a floating
point value.
"""
return self._covariance
@covariance.setter # noqa
def covariance(self, cov):
if callable(cov):
_cov = cov(self.xdata, self.ydata)
else:
_cov = np.atleast_2d(cov)
self._covariance = _cov
self._bw = np.real(sqrtm(_cov))
def evaluate(self, points, out=None):
"""
Evaluate the spatial averaging on a set of points
:param ndarray points: Points to evaluate the averaging on
:param ndarray out: Pre-allocated array for the result
"""
xdata = self.xdata
ydata = self.ydata[:, np.newaxis] # make it a column vector
points = np.atleast_2d(points)
n = self.n
q = self.q
d = self.d
designMatrix = PolynomialDesignMatrix(d, q)
dm_size = designMatrix.size
Xx = np.empty((dm_size, n), dtype=xdata.dtype)
WxXx = np.empty(Xx.shape, dtype=xdata.dtype)
XWX = np.empty((dm_size, dm_size), dtype=xdata.dtype)
inv_bw = scipy.linalg.inv(self.bandwidth)
kernel = self.kernel
if out is None:
out = np.empty((points.shape[1],), dtype=float)
for i in irange(points.shape[1]):
dX = (xdata - points[:, i:i + 1])
Wx = kernel(np.dot(inv_bw, dX))
designMatrix(dX, out=Xx)
np.multiply(Wx, Xx, WxXx)
np.dot(Xx, WxXx.T, XWX)
Lx = solve(XWX, WxXx)[0]
out[i] = np.dot(Lx, ydata)
return out
def __call__(self, *args, **kwords):
"""
This method is an alias for :py:meth:`LocalLinearKernel1D.evaluate`
"""
return self.evaluate(*args, **kwords)
| 20,696 | 30.406677 | 79 | py |
OpenBCIPython | OpenBCIPython-master/py_qt/py_binning.py | from __future__ import division, print_function
import numpy as np
def fast_bin(X, a, b, N, weights=None, cyclic=False):
"""
Fast binning.
:note: cyclic parameter is ignored. Present only for compatibility with fast_linbin
"""
Y = (X - a)
delta = (b-a) / N
Y /= delta
iY = np.floor(Y).astype(int)
return np.bincount(iY, weights=weights, minlength=N), np.linspace(a + delta/2, b - delta/2, N)
#def fast_linbin(X, a, b, N, weights = None, cyclic = False):
#"""
#Fast linear binning with added weighting
#"""
#X = np.atleast_1d(X).astype(float)
#assert len(X.shape) == 1, "Error, X must be a 1D array"
#if weights is not Nonw:
#weights = np.atleast_1d(weights).astype(float)
#assert weights.shape == X.shape, "Error, weights must be None or an array with the same shape as X"
#delta = (b - a) / N
#if cyclic:
#lower = 0
#upper = M
#shift = -a
#else:
#lower = -0.5
#upper = M-0.5
#shift = -a-delta/2
#Y = X + shift
#Y /= delta
#iY = np.floor(Y).astype(int)
#rem = (Y - iY)
#if weights is not None:
#rem *= weights
#if not cyclic:
#iY += 1
#c1 = np.bincount(iY, weights = 1-rem, minlength=N+1)
#c2 = np.bincount(iY, weights = rem, minlength=N+1)
#if cyclic:
#c1
| 1,356 | 28.5 | 108 | py |
OpenBCIPython | OpenBCIPython-master/py_qt/utils.py | """
:Author: Pierre Barbier de Reuille <[email protected]>
Module contained a variety of small useful functions.
"""
from collections import OrderedDict
from keyword import iskeyword as _iskeyword
from operator import itemgetter as _itemgetter
import sys
import numpy as np
import inspect
# Find the largest float available for this numpy
from compat import text_type
if hasattr(np, 'float128'):
large_float = np.float128
elif hasattr(np, 'float96'):
large_float = np.float96
else:
large_float = np.float64
def finite(val):
return val is not None and np.isfinite(val)
def make_ufunc(nin = None, nout=1):
"""
Decorator used to create a ufunc using `np.frompyfunc`. Note that the
returns array will always be of dtype 'object'. You should use the `out` if
you know the wanted type for the output.
:param int nin: Number of input. Default is found by using
``inspect.getargspec``
:param int nout: Number of output. Default is 1.
"""
def f(fct):
if nin is None:
Nin = len(inspect.getargspec(fct).args)
else:
Nin = nin
return np.frompyfunc(fct, Nin, nout)
return f
def numpy_trans(fct):
"""
Decorator to create a function taking a single array-like argument and
return a numpy array of same shape.
The function is called as:
fct(z, out=out)
This decorator garanties that z and out are ndarray of same shape and out is
at least a float.
"""
def f(z, out=None):
z = np.asanyarray(z)
if out is None:
out = np.empty(z.shape, dtype=type(z.dtype.type() + 0.))
arg_out = out
else:
arg_out = out.reshape(z.shape)
fct(z, out=arg_out)
return out
return f
def numpy_trans_idx(fct):
"""
Decorator to create a function taking a single array-like argument and
return a numpy array of same shape. In addition, if the input as no
dimension, the function will still receive a 1D array, allowing for
indexing.
The function is called as:
fct(z, out=out)
This decorator garanties that z and out are at least 1D ndarray of same
shape and out is at least a float.
It also ensure the output is the shape of the initial input (even if it has
no dimension)
"""
def f(z, out=None):
z = np.asanyarray(z)
real_shape = z.shape
if len(real_shape) == 0:
z = z.reshape(1)
if out is None:
out = np.empty(z.shape, dtype=type(z.dtype.type() + 0.))
else:
out.shape = z.shape
out = fct(z, out=out)
out.shape = real_shape
return out
return f
def numpy_method_idx(fct):
"""
Decorator to create a function taking a single array-like argument and
return a numpy array of same shape. In addition, if the input as no
dimension, the function will still receive a 1D array, allowing for
indexing.
The function is called as:
fct(self, z, out=out)
This decorator garanties that z and out are at least 1D ndarray of same
shape and out is at least a float.
It also ensure the output is the shape of the initial input (even if it has
no dimension)
"""
def f(self, z, out=None):
z = np.asanyarray(z)
real_shape = z.shape
if len(real_shape) == 0:
z = z.reshape(1)
if out is None:
out = np.empty(z.shape, dtype=type(z.dtype.type() + 0.))
else:
out.shape = z.shape
out = fct(self, z, out=out)
out.shape = real_shape
return out
return f
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, text_type):
# names separated by whitespace and/or commas
field_names = field_names.replace(',', ' ').split()
field_names = tuple(map(str, field_names))
forbidden_fields = {'__init__', '__slots__', '__new__', '__repr__', '__getnewargs__'}
if rename:
names = list(field_names)
seen = set()
for i, name in enumerate(names):
need_suffix = (not all(c.isalnum() or c == '_' for c in name) or _iskeyword(name)
or not name or name[0].isdigit() or name.startswith('_')
or name in seen)
if need_suffix:
names[i] = '_%d' % i
seen.add(name)
field_names = tuple(names)
for name in (typename,) + field_names:
if not all(c.isalnum() or c == '_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters '
'and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('__'):
if name in forbidden_fields:
raise ValueError('Field names cannot be on of %s' % ', '.join(forbidden_fields))
elif name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
'Create new instance of %(typename)s(%(argtxt)s)'
return _tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
'Return a nicely formatted representation string'
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(self):
'Return a new OrderedDict which maps field names to their values'
return OrderedDict(zip(self._fields, self)) \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self) \n\n''' % dict(numfields=numfields, field_names=field_names,
typename=typename, argtxt=argtxt, reprtxt=reprtxt)
for i, name in enumerate(field_names):
template += " %s = _property(_itemgetter(%d), " \
"doc='Alias for field number %d')\n" % (name, i, i)
if verbose:
print(template)
# Execute the template string in a temporary namespace and
# support tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
OrderedDict=OrderedDict, _property=property, _tuple=tuple)
try:
exec(template, namespace)
except SyntaxError as e:
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
#
from scipy import sqrt
from numpy import finfo, asarray, asfarray, zeros
_epsilon = sqrt(finfo(float).eps)
def approx_jacobian(x, func, epsilon, *args):
"""
Approximate the Jacobian matrix of callable function func
:param ndarray x: The state vector at which the Jacobian matrix is desired
:param callable func: A vector-valued function of the form f(x,*args)
:param ndarray epsilon: The peturbation used to determine the partial derivatives
:param tuple args: Additional arguments passed to func
:returns: An array of dimensions (lenf, lenx) where lenf is the length
of the outputs of func, and lenx is the number of
.. note::
The approximation is done using forward differences
"""
x0 = asarray(x)
x0 = asfarray(x0, dtype=x0.dtype)
epsilon = x0.dtype.type(epsilon)
f0 = func(*((x0,) + args))
jac = zeros([len(x0), len(f0)], dtype=x0.dtype)
dx = zeros(len(x0), dtype=x0.dtype)
for i in range(len(x0)):
dx[i] = epsilon
jac[i] = (func(*((x0 + dx,) + args)) - f0) / epsilon
dx[i] = 0.0
return jac.transpose()
| 10,630 | 35.913194 | 99 | py |
OpenBCIPython | OpenBCIPython-master/py_qt/plot_fit.py | """
:Author: Pierre Barbier de Reuille <[email protected]>
This modules implement functions to test and plot parametric regression.
"""
from numpy import argsort, std, abs, sqrt, arange, pi, c_, asarray
from pylab import figure, title, legend, plot, xlabel, ylabel, subplot, ylim, hist, suptitle, gca
from compat import izip
from itertools import chain
from scipy.special import erfinv, gamma
from scipy import stats
#from .kernel_smoothing import LocalLinearKernel1D
from nonparam_regression import NonParamRegression
from compat import unicode_csv_writer as csv_writer
from collections import namedtuple
import sys
if sys.version_info >= (3,):
CSV_WRITE_FLAGS = "wt"
else:
CSV_WRITE_FLAGS = "wb"
def plot_dist_residuals(res):
"""
Plot the distribution of the residuals.
:returns: the handle toward the histogram and the plot of the fitted normal distribution
"""
ph = hist(res, normed=True)
xr = arange(res.min(), res.max(), (res.max() - res.min()) / 1024)
yr = stats.norm(0, res.std()).pdf(xr)
pn = plot(xr, yr, 'r--')
xlabel('Residuals')
ylabel('Frequency')
title('Distributions of the residuals')
return ph, pn
def plot_residuals(xname, xdata, res_desc, res):
"""
Plot the residuals against the X axis
:param str xname: Name of the X axis
:param ndarray xdata: 1D array with the X data
:param str res_desc: Name of the Y axis
:param ndarray res: 1D array with the residuals
The shapes of ``xdata`` and ``res`` must be the same
:returns: The handles of the the plots of the residuals and of the smoothed residuals.
"""
p_res = plot(xdata, res, '+', label='residuals')[0]
plot([xdata.min(), xdata.max()], [0, 0], 'r--')
av = NonParamRegression(xdata, res)
av.fit()
xr = arange(xdata.min(), xdata.max(), (xdata.max() - xdata.min()) / 1024)
rr = av(xr)
p_smooth = plot(xr, rr, 'g', label='smoothed residuals')
xlabel(xname)
ylabel("Residuals")
ymin, ymax = ylim()
ymax = max(ymax, -ymin)
ylim(-ymax, ymax)
title("Residuals (%s) vs. fitted" % (res_desc,))
return p_res, p_smooth
def scaled_location_plot(yname, yopt, scaled_res):
"""
Plot the scaled location, given the dependant values and scaled residuals.
:param str yname: Name of the Y axis
:param ndarray yopt: Estimated values
:param ndarray scaled_res: Scaled residuals
:returns: the handles for the data and the smoothed curve
"""
scr = sqrt(abs(scaled_res))
p_scaled = plot(yopt, scr, '+')[0]
av = NonParamRegression(yopt, scr)
av.fit()
xr = arange(yopt.min(), yopt.max(), (yopt.max() - yopt.min()) / 1024)
rr = av(xr)
p_smooth = plot(xr, rr, 'g')[0]
expected_mean = 2 ** (1 / 4) * gamma(3 / 4) / sqrt(pi)
plot([yopt.min(), yopt.max()], [expected_mean, expected_mean], 'r--')
title('Scale-location')
xlabel(yname)
ylabel('$|$Normalized residuals$|^{1/2}$')
gca().set_yticks([0, 1, 2])
return [p_scaled, p_smooth]
def qqplot(scaled_res, normq):
"""
Draw a Q-Q Plot from the sorted, scaled residuals (i.e. residuals sorted
and normalized by their standard deviation)
:param ndarray scaled_res: Scaled residuals
:param ndarray normq: Expected value for each scaled residual, based on its quantile.
:returns: handle to the data plot
"""
qqp = []
qqp += plot(normq, scaled_res, '+')
qqp += plot(normq, normq, 'r--')
xlabel('Theoretical quantiles')
ylabel('Normalized residuals')
title('Normal Q-Q plot')
return qqp
ResultStruct = namedtuple('ResultStruct', "fct fct_desc param_names xdata ydata xname yname "
"res_name residuals popt res yopts eval_points interpolation "
"sorted_yopts scaled_res normq CI CIs CIresults")
def fit_evaluation(fit, xdata, ydata, eval_points=None,
CI=(), CIresults = None, xname="X", yname="Y",
fct_desc=None, param_names=(), residuals=None, res_name='Standard'):
"""
This function takes the output of a curve fitting experiment and store all the relevant
information for evaluating its success in the result.
:type fit: fitting object
:param fit: object configured for the fitting
:type xdata: ndarray of shape (N,) or (k,N) for function with k prefictors
:param xdata: The independent variable where the data is measured
:type ydata: ndarray
:param ydata: The dependant data
:type eval_points: ndarray or None
:param eval_points: Contain the list of points on which the result must be expressed. It is
used both for plotting and for the bootstrapping.
:type CI: tuple of int
:param CI: List of confidence intervals to calculate. If empty, none are calculated.
:type xname: string
:param xname: Name of the X axis
:type yname: string
:param yname: Name of the Y axis
:type fct_desc: string
:param fct_desc: Formula of the function
:type param_names: tuple of strings
:param param_names: Name of the various parameters
:type residuals: callable
:param residuals: Residual function
:type res_desc: string
:param res_desc: Description of the residuals
:rtype: :py:class:`ResultStruct`
:returns: Data structure summarising the fitting and its evaluation
"""
popt = fit.popt
res = fit.res
if CI:
CIs = CIresults.CIs
else:
CIs = []
yopts = fit(xdata)
if eval_points is None:
yvals = yopts
eval_points = xdata
else:
yvals = fit(eval_points)
scaled_res, res_IX, prob, normq = residual_measures(res)
sorted_yopts = yopts[res_IX]
result = {}
result["fct"] = fit
result["fct_desc"] = fct_desc
result["param_names"] = param_names
result["xdata"] = xdata
result["ydata"] = ydata
result["xname"] = xname
result["yname"] = yname
result["res_name"] = res_name
result["residuals"] = residuals
#result["args"] = fit.args
result["popt"] = popt
result["res"] = res
result["yopts"] = yopts
result["eval_points"] = eval_points
result["interpolation"] = yvals
result["sorted_yopts"] = sorted_yopts
result["scaled_res"] = scaled_res
result["normq"] = normq
result["CI"] = CI
result["CIs"] = CIs
#result["CIparams"] = CIparams
result["CIresults"] = CIresults
#print("estimate jacobian = %s" % result["extra_output"][-1]["est_jacobian"])
return ResultStruct(**result)
ResidualMeasures = namedtuple("ResidualMeasures", "scaled_res res_IX prob normq")
def residual_measures(res):
"""
Compute quantities needed to evaluate the quality of the estimation, based solely
on the residuals.
:rtype: :py:class:`ResidualMeasures`
:returns: the scaled residuals, their ordering, the theoretical quantile for each residuals,
and the expected value for each quantile.
"""
IX = argsort(res)
scaled_res = res[IX] / std(res)
prob = (arange(len(scaled_res)) + 0.5) / len(scaled_res)
normq = sqrt(2) * erfinv(2 * prob - 1)
return ResidualMeasures(scaled_res, IX, prob, normq)
_restestfields = "res_figure residuals scaled_residuals qqplot dist_residuals"
ResTestResult = namedtuple("ResTestResult", _restestfields)
Plot1dResult = namedtuple("Plot1dResult", "figure estimate data CIs " + _restestfields)
def plot1d(result, loc=0, fig=None, res_fig=None):
"""
Use matplotlib to display the result of a fit, and return the list of plots used
:rtype: :py:class:`Plot1dResult`
:returns: hangles to the various figures and plots
"""
if fig is None:
fig = figure()
else:
try:
figure(fig)
except TypeError:
figure(fig.number)
p_est = plot(result.eval_points, result.interpolation, label='estimated')[0]
p_data = plot(result.xdata, result.ydata, '+', label='data')[0]
p_CIs = []
if result.CI:
for p, (low, high) in izip(result.CI, result.CIs[0]):
l = plot(result.eval_points, low, '--', label='%g%% CI' % (p,))[0]
h = plot(result.eval_points, high, l.get_color() + '--')[0]
p_CIs += [l, h]
if result.param_names:
param_strs = ", ".join("%s=%g" % (n, v) for n, v in izip(result.param_names, result.popt))
else:
param_strs = ", ".join("%g" % v for v in result.popt)
param_strs = "$%s$" % (param_strs,)
title("Estimated function %s with params %s" % (result.fct_desc, param_strs))
xlabel(result.xname)
ylabel(result.yname)
legend(loc=loc)
plots = {"figure": fig, "estimate": p_est, "data": p_data, "CIs": p_CIs}
prt = plot_residual_tests(result.xdata, result.yopts, result.res,
"{0} with params {1}".format(result.fct_desc, param_strs),
result.xname, result.yname, result.res_name, result.sorted_yopts,
result.scaled_res,
result.normq, res_fig)
plots.update(prt._asdict())
return Plot1dResult(**plots)
def plot_residual_tests(xdata, yopts, res, fct_name, xname="X", yname='Y', res_name="residuals",
sorted_yopts=None, scaled_res=None, normq=None, fig=None):
"""
Plot, in a single figure, all four residuals evaluation plots: :py:func:`plot_residuals`,
:py:func:`plot_dist_residuals`, :py:func:`scaled_location_plot` and :py:func:`qqplot`.
:param ndarray xdata: Explaining variables
:param ndarray yopt: Optimized explained variables
:param str fct_name: Name of the fitted function
:param str xname: Name of the explaining variables
:param str yname: Name of the dependant variables
:param str res_name: Name of the residuals
:param ndarray sorted_yopts: ``yopt``, sorted to match the scaled residuals
:param ndarray scaled_res: Scaled residuals
:param ndarray normq: Estimated value of the quantiles for a normal distribution
:type fig: handle or None
:param fig: Handle of the figure to put the plots in, or None to create a new figure
:rtype: :py:class:`ResTestResult`
:returns: The handles to all the plots
"""
if fig is None:
fig = figure()
else:
try:
figure(fig)
except TypeError:
figure(fig.number)
xdata = asarray(xdata)
yopts = asarray(yopts)
res = asarray(res)
subplot(2, 2, 1)
# First subplot is the residuals
if len(xdata.shape) == 1 or xdata.shape[1] == 1:
p_res = plot_residuals(xname, xdata.squeeze(), res_name, res)
else:
p_res = plot_residuals(yname, yopts, res_name, res)
if scaled_res is None or sorted_yopts is None or normq is None:
scaled_res, res_IX, _, normq = residual_measures(res)
sorted_yopts = yopts[res_IX]
subplot(2, 2, 2)
p_scaled = scaled_location_plot(yname, sorted_yopts, scaled_res)
subplot(2, 2, 3)
# Q-Q plot
qqp = qqplot(scaled_res, normq)
subplot(2, 2, 4)
# Distribution of residuals
drp = plot_dist_residuals(res)
suptitle("Residual Test for {}".format(fct_name))
return ResTestResult(fig, p_res, p_scaled, qqp, drp)
def write1d(outfile, result, res_desc, CImethod):
"""
Write the result of a fitting and its evaluation to a CSV file.
:param str outfile: Name of the file to write to
:param ResultStruct result: Result of the fitting evaluation
(e.g. output of :py:func:`fit_evaluation`)
:param str res_desc: Description of the residuals
(in more details than just the name of the residuals)
:param str CImethod: Description of the confidence interval estimation method
"""
with open(outfile, CSV_WRITE_FLAGS) as f:
w = csv_writer(f)
w.writerow(["Function", result.fct.fct.description])
w.writerow(["Residuals", result.res_name, res_desc])
w.writerow(["Parameter", "Value"])
for pn, pv in izip(result.param_names, result.popt):
w.writerow([pn, "%.20g" % pv])
#TODO w.writerow(["Regression Evaluation"])
w.writerow([])
w.writerow(["Data"])
w.writerow([result.xname, result.yname, result.fct_desc, "Residuals: %s" % result.res_name])
w.writerows(c_[result.xdata, result.ydata, result.yopts, result.res])
w.writerow([])
w.writerow(['Model validation'])
w.writerow([result.yname, 'Normalized residuals', 'Theoretical quantiles'])
w.writerows(c_[result.sorted_yopts, result.scaled_res, result.normq])
if result.eval_points is not result.xdata:
w.writerow([])
w.writerow(["Interpolated data"])
w.writerow([result.xname, result.yname])
w.writerows(c_[result.eval_points, result.interpolation])
if result.CI:
w.writerow([])
w.writerow(["Confidence interval"])
w.writerow(["Method", CImethod])
head = ["Parameters"] + \
list(chain(*[["%g%% - low" % v, "%g%% - high" % v] for v in result.CI]))
w.writerow(head)
#print(result.CIs[1])
for cis in izip(result.param_names, *chain(*result.CIs[1])):
cistr = [cis[0]] + ["%.20g" % v for v in cis[1:]]
w.writerow(cistr)
w.writerow([result.yname])
head[0] = result.xname
w.writerow(head)
w.writerows(c_[tuple(chain([result.eval_points], *result.CIs[0]))])
# /home/barbier/prog/python/curve_fitting/test.csv
| 13,736 | 34.043367 | 100 | py |
OpenBCIPython | OpenBCIPython-master/py_qt/bootstrap.py | """
:Author: Pierre Barbier de Reuille <[email protected]>
This modules provides function for bootstrapping a regression method.
"""
import multiprocessing as mp
from collections import namedtuple
import numpy as np
from numpy.random import randint
import bootstrap_workers
import nonparam_regression
import sharedmem
from compat import irange, izip
def percentile(array, p, axis=0):
"""
Compute the percentiles of the values in array
"""
a = np.asarray(array).sort(axis=axis)
n = (len(a) - 1) * p / 100
n0 = np.floor(n)
n1 = n0 + 1
#print("%g percentile on %d = [%d-%d]" % (p*100, len(array), n0, n1))
d = n - n0
v0 = array[n0]
v1 = array[n1]
return v0 + d * (v1 - v0)
def bootstrap_residuals(fct, xdata, ydata, repeats=3000, residuals=None,
add_residual=None, correct_bias=False, **kwrds):
"""
This implements the residual bootstrapping method for non-linear
regression.
:type fct: callable
:param fct: Function evaluating the function on xdata at least with
``fct(xdata)``
:type xdata: ndarray of shape (N,) or (k,N) for function with k predictors
:param xdata: The independent variable where the data is measured
:type ydata: ndarray
:param ydata: The dependant data
:type residuals: ndarray or callable or None
:param residuals: Residuals for the estimation on each xdata. If callable,
the call will be ``residuals(ydata, yopt)``.
:type repeats: int
:param repeats: Number of repeats for the bootstrapping
:type add_residual: callable or None
:param add_residual: Function that add a residual to a value. The call
``add_residual(yopt, residual)`` should return the new ydata, with
the residuals 'applied'. If None, it is considered the residuals should
simply be added.
:type correct_bias: boolean
:param correct_bias: If true, the additive bias of the residuals is
computed and restored
:type kwrds: dict
:param kwrds: Dictionnary present to absorbed unknown named parameters
:rtype: (ndarray, ndarray)
:returns:
1. xdata, with a new axis at position -2. This correspond to the
'shuffled' xdata (as they are *not* shuffled here)
2.Second item is the shuffled ydata. There is a line per repeat, each
line is shuffled independently.
.. todo::
explain the method here, as well as how to create add_residual
"""
if residuals is None:
residuals = np.subtract
yopt = fct(xdata)
if not isinstance(residuals, np.ndarray):
res = residuals(ydata, yopt)
else:
res = np.array(residuals)
res -= np.mean(res)
shuffle = randint(0, len(ydata), size=(repeats, len(ydata)))
shuffled_res = res[shuffle]
if correct_bias:
kde = nonparam_regression.NonParamRegression(xdata, res)
kde.fit()
bias = kde(xdata)
shuffled_res += bias
if add_residual is None:
add_residual = np.add
modified_ydata = add_residual(yopt, shuffled_res)
return xdata[..., np.newaxis, :], modified_ydata
def bootstrap_regression(fct, xdata, ydata, repeats=3000, **kwrds):
"""
This implements the shuffling of standard bootstrapping method for
non-linear regression.
:type fct: callable
:param fct: This is the function to optimize
:type xdata: ndarray of shape (N,) or (k,N) for function with k predictors
:param xdata: The independent variable where the data is measured
:type ydata: ndarray
:param ydata: The dependant data
:type repeats: int
:param repeats: Number of repeats for the bootstrapping
:type kwrds: dict
:param kwrds: Dictionnary to absorbed unknown named parameters
:rtype: (ndarray, ndarray)
:returns:
1. The shuffled x data. The axis -2 has one element per repeat, the
other axis are shuffled independently.
2. The shuffled ydata. There is a line per repeat, each line is
shuffled independently.
.. todo::
explain the method here
"""
shuffle = randint(0, len(ydata), size=(repeats, len(ydata)))
shuffled_x = xdata[..., shuffle]
shuffled_y = ydata[shuffle]
return shuffled_x, shuffled_y
def getCIs(CI, *arrays):
#sorted_arrays = [ np.sort(a, axis=0) for a in arrays ]
if not np.iterable(CI):
CI = (CI,)
def make_CI(a):
return np.zeros((len(CI), 2) + a.shape[1:], dtype=float)
CIs = tuple(make_CI(a) for a in arrays)
for i, ci in enumerate(CI):
ci = (100. - ci) / 2
for cis, arr in izip(CIs, arrays):
low = np.percentile(arr, ci, axis=0)
high = np.percentile(arr, 100 - ci, axis=0)
cis[i] = [low, high]
return CIs
BootstrapResult = namedtuple('BootstrapResult', '''y_fit y_est eval_points y_eval CIs_val CIs
shuffled_xs shuffled_ys full_results''')
def bootstrap(fit, xdata, ydata, CI, shuffle_method=bootstrap_residuals,
shuffle_args=(), shuffle_kwrds={}, repeats=3000,
eval_points=None, full_results=False, nb_workers=None,
extra_attrs=(), fit_args=(), fit_kwrds={}):
"""
This function implement the bootstrap algorithm for a regression algorithm.
It is capable of spreading the load across many threads using shared memory
and the :py:mod:`multiprocess` module.
:type fit: callable
:param fit:
Method used to compute regression. The call is::
f = fit(xdata, ydata, *fit_args, **fit_kwrds)
Fit should return an object that would evaluate the regression on a
set of points. The next call will be::
f(eval_points)
:type xdata: ndarray of shape (N,) or (k,N) for function with k predictors
:param xdata: The independent variable where the data is measured
:type ydata: ndarray
:param ydata: The dependant data
:type CI: tuple of float
:param CI: List of percentiles to extract
:type shuffle_method: callable
:param shuffle_method:
Create shuffled dataset. The call is::
shuffle_method(xdata, ydata, y_est, repeat=repeats, *shuffle_args,
**shuffle_kwrds)
where ``y_est`` is the estimated dependant variable on the xdata.
:type shuffle_args: tuple
:param shuffle_args: List of arguments for the shuffle method
:type shuffle_kwrds: dict
:param shuffle_kwrds: Dictionnary of arguments for the shuffle method
:type repeats: int
:param repeats: Number of repeats for the bootstraping
:type eval_points: ndarray or None
:param eval_points: List of points to evaluate. If None, eval_point
is xdata.
:type full_results: bool
:param full_results: if True, output also the whole set of evaluations
:type nb_workers: int or None
:param nb_worders: Number of worker threads. If None, the number of
detected CPUs will be used. And if 1 or less, a single thread
will be used.
:type extra_attrs: tuple of str
:param extra_attrs: List of attributes of the fitting method to extract on
top of the y values for confidence intervals
:type fit_args: tuple
:param fit_args: List of extra arguments for the fit callable
:type fit_kwrds: dict
:param fit_kwrds: Dictionnary of extra named arguments for the fit callable
:rtype: :py:class:`BootstrapResult`
:return: Estimated y on the data, on the evaluation points, the requested
confidence intervals and, if requested, the shuffled X, Y and the full
estimated distributions.
"""
xdata = np.asarray(xdata)
ydata = np.asarray(ydata)
y_fit = fit(xdata, ydata, *fit_args, **fit_kwrds)
y_fit.fit()
shuffled_x, shuffled_y = shuffle_method(y_fit, xdata, ydata,
repeats=repeats,
*shuffle_args, **shuffle_kwrds)
nx = shuffled_x.shape[-2]
ny = shuffled_y.shape[0]
extra_values = []
for attr in extra_attrs:
extra_values.append(getattr(y_fit, attr))
if eval_points is None:
eval_points = xdata
if nb_workers is None:
nb_workers = mp.cpu_count()
multiprocess = nb_workers > 1
# Copy everything in shared mem
if multiprocess:
ra = sharedmem.zeros((repeats + 1, len(eval_points)), dtype=float)
result_array = ra.np
sx = sharedmem.array(shuffled_x)
sy = sharedmem.array(shuffled_y)
ep = sharedmem.array(eval_points)
def make_ea(ev):
return sharedmem.zeros((repeats + 1, len(ev)), dtype=float)
eas = [make_ea(ev) for ev in extra_values]
extra_arrays = [ea.np for ea in eas]
pool = mp.Pool(mp.cpu_count(), bootstrap_workers.initialize_shared,
(nx, ny, ra, eas, sx, sy, ep, extra_attrs,
fit, fit_args, fit_kwrds))
else:
result_array = np.empty((repeats + 1, len(eval_points)), dtype=float)
def make_ea(ev):
return np.empty((repeats + 1, len(ev)), dtype=float)
extra_arrays = [make_ea(ev) for ev in extra_values]
bootstrap_workers.initialize(nx, ny, result_array, extra_arrays,
shuffled_x, shuffled_y, eval_points,
extra_attrs, fit, fit_args, fit_kwrds)
result_array[0] = y_fit(eval_points)
for ea, ev in izip(extra_arrays, extra_values):
ea[0] = ev
base_repeat = repeats // nb_workers
if base_repeat * nb_workers < repeats:
base_repeat += 1
for i in irange(nb_workers):
end_repeats = (i + 1) * base_repeat
if end_repeats > repeats:
end_repeats = repeats
if multiprocess:
pool.apply_async(bootstrap_workers.bootstrap_result,
(i, i * base_repeat, end_repeats))
else:
bootstrap_workers.bootstrap_result(i, i * base_repeat, end_repeats)
if multiprocess:
pool.close()
pool.join()
CIs = getCIs(CI, result_array, *extra_arrays)
# copy the array to not return a view on a larger array
y_eval = np.array(result_array[0])
if not full_results:
shuffled_y = shuffled_x = result_array = None
extra_arrays = ()
elif multiprocess:
result_array = result_array.copy() # copy in local memory
extra_arrays = [ea.copy for ea in extra_arrays]
return BootstrapResult(y_fit, y_fit(xdata), eval_points, y_eval, tuple(CI), CIs,
shuffled_x, shuffled_y, result_array)
def test():
import quad
from numpy.random import rand, randn
from pylab import plot, clf, legend, arange, figure, title, show
from curve_fitting import curve_fit
def quadratic(x, params):
p0, p1, p2 = params
return p0 + p1 * x + p2 * x ** 2
#test = quadratic
test = quad.quadratic
init = (10, 1, 1)
target = np.array([10, 4, 1.2])
print("Target parameters: {}".format(target))
x = 6 * rand(200) - 3
y = test(x, target) * (1 + 0.3 * randn(x.shape[0]))
xr = arange(-3, 3, 0.01)
yr = test(xr, target)
print("Estimage best parameters, fixing the first one")
popt, pcov, _, _ = curve_fit(test, x, y, init, fix_params=(0,))
print("Best parameters: {}".format(popt))
print("Estimate best parameters from data")
popt, pcov, _, _ = curve_fit(test, x, y, init)
print("Best parameters: {}".format(popt))
figure(1)
clf()
plot(x, y, '+', label='data')
plot(xr, yr, 'r', label='function')
legend(loc='upper left')
print("Residual bootstrap calculation")
result_r = bootstrap(test, x, y, init, (95, 99),
shuffle_method=bootstrap_residuals, eval_points=xr,
fit=curve_fit)
popt_r, pcov_r, res_r, CI_r, CIp_r, extra_r = result_r
yopt_r = test(xr, popt_r)
figure(2)
clf()
plot(xr, yopt_r, 'g', label='estimate')
plot(xr, yr, 'r', label='target')
plot(xr, CI_r[0][0], 'b--', label='95% CI')
plot(xr, CI_r[0][1], 'b--')
plot(xr, CI_r[1][0], 'k--', label='99% CI')
plot(xr, CI_r[1][1], 'k--')
legend(loc='upper left')
title('Residual Bootstrapping')
print("Regression bootstrap calculation")
(popt_c, pcov_c, res_c, CI_c, CIp_r,
extra_c) = bootstrap(test, x, y, init, CI=(95, 99),
shuffle_method=bootstrap_regression, eval_points=xr,
fit=curve_fit)
yopt_c = test(xr, popt_c)
figure(3)
clf()
plot(xr, yopt_c, 'g', label='estimate')
plot(xr, yr, 'r', label='target')
plot(xr, CI_c[0][0], 'b--', label='95% CI')
plot(xr, CI_c[0][1], 'b--')
plot(xr, CI_c[1][0], 'k--', label='99% CI')
plot(xr, CI_c[1][1], 'k--')
legend(loc='upper left')
title('Regression Bootstrapping (also called Case Resampling)')
print("Done")
show()
return locals()
def profile(filename='bootstrap_profile'):
import cProfile
import pstats
cProfile.run('res = bootstrap.test()', 'bootstrap_profile')
p = pstats.Stats('bootstrap_profile')
return p
if __name__ == "__main__":
test()
| 13,314 | 31.007212 | 93 | py |
OpenBCIPython | OpenBCIPython-master/py_qt/npr_methods.py | """
:Author: Pierre Barbier de Reuille <[email protected]>
Module implementing non-parametric regressions using kernel methods.
"""
import numpy as np
import scipy
from scipy import linalg
import kde
import kernels
import py_local_linear
from compat import irange
from cyth import HAS_CYTHON
local_linear = None
def useCython():
"""
Switch to using Cython methods if available
"""
global local_linear
if HAS_CYTHON:
import cy_local_linear
local_linear = cy_local_linear
def usePython():
"""
Switch to using the python implementation of the methods
"""
global local_linear
local_linear = py_local_linear
if HAS_CYTHON:
useCython()
else:
usePython()
def compute_bandwidth(reg):
"""
Compute the bandwidth and covariance for the model, based of its xdata attribute
"""
if reg.bandwidth_function:
bw = np.atleast_2d(reg.bandwidth_function(reg.xdata, model=reg))
cov = np.dot(bw, bw).real
elif reg.covariance_function:
cov = np.atleast_2d(reg.covariance_function(reg.xdata, model=reg))
bw = linalg.sqrtm(cov)
else:
return reg.bandwidth, reg.covariance
return bw, cov
class RegressionKernelMethod(object):
r"""
Base class for regression kernel methods
"""
def fit(self, reg):
"""
Fit the method and returns the fitted object that will be used for actual evaluation.
The object needs to call the :py:meth:`pyqt_fit.nonparam_regression.NonParamRegression.set_actual_bandwidth`
method with the computed bandwidth and covariance.
:Default: Compute the bandwidth based on the real data and set it in the regression object
"""
reg.set_actual_bandwidth(*compute_bandwidth(reg))
return self
def evaluate(self, points, out):
"""
Evaluate the regression of the provided points.
:param ndarray points: 2d-array of points to compute the regression on. Each column is a point.
:param ndarray out: 1d-array in which to store the result
:rtype: ndarray
:return: The method must return the ``out`` array, updated with the regression values
"""
raise NotImplementedError()
class SpatialAverage(RegressionKernelMethod):
r"""
Perform a Nadaraya-Watson regression on the data (i.e. also called
local-constant regression) using a gaussian kernel.
The Nadaraya-Watson estimate is given by:
.. math::
f_n(x) \triangleq \frac{\sum_i K\left(\frac{x-X_i}{h}\right) Y_i}
{\sum_i K\left(\frac{x-X_i}{h}\right)}
Where :math:`K(x)` is the kernel and must be such that :math:`E(K(x)) = 0`
and :math:`h` is the bandwidth of the method.
:param ndarray xdata: Explaining variables (at most 2D array)
:param ndarray ydata: Explained variables (should be 1D array)
:type cov: ndarray or callable
:param cov: If an ndarray, it should be a 2D array giving the matrix of
covariance of the gaussian kernel. Otherwise, it should be a function
``cov(xdata, ydata)`` returning the covariance matrix.
"""
def __init__(self):
self.correction = 1.
def fit(self, reg):
self = super(SpatialAverage, self).fit(reg)
self.inv_bw = linalg.inv(reg.bandwidth)
return self
def evaluate(self, reg, points, out):
d, m = points.shape
norm = np.zeros((m,), points.dtype)
xdata = reg.xdata[..., np.newaxis]
ydata = reg.fitted_ydata
correction = self.correction
N = reg.N
inv_bw = scipy.linalg.inv(reg.bandwidth)
kernel = reg.kernel
out.fill(0)
# iterate on the internal points
for i, ci in np.broadcast(irange(N),
irange(correction.shape[0])):
diff = correction[ci] * (xdata[:, i, :] - points)
#tdiff = np.dot(inv_cov, diff)
#energy = np.exp(-np.sum(diff * tdiff, axis=0) / 2.0)
energy = kernel(np.dot(inv_bw, diff)).squeeze()
out += ydata[i] * energy
norm += energy
out[norm > 0] /= norm[norm > 0]
return out
@property
def correction(self):
"""
The correction coefficient allows to change the width of the kernel
depending on the point considered. It can be either a constant (to
correct globaly the kernel width), or a 1D array of same size as the
input.
"""
return self._correction
@correction.setter
def correction(self, value):
value = np.atleast_1d(value)
assert len(value.shape) == 1, "Error, the correction must be a single value or a 1D array"
self._correction = value
def set_density_correction(self):
"""
Add a correction coefficient depending on the density of the input
"""
est = kde.KDE1D(self.xdata)
dens = est(self.xdata)
dm = dens.max()
dens[dens < 1e-50] = dm
self._correction = dm / dens
@property
def q(self):
"""
Degree of the fitted polynom
"""
return 0
class LocalLinearKernel1D(RegressionKernelMethod):
r"""
Perform a local-linear regression using a gaussian kernel.
The local constant regression is the function that minimises, for each
position:
.. math::
f_n(x) \triangleq \argmin_{a_0\in\mathbb{R}}
\sum_i K\left(\frac{x-X_i}{h}\right)
\left(Y_i - a_0 - a_1(x-X_i)\right)^2
Where :math:`K(x)` is the kernel and must be such that :math:`E(K(x)) = 0`
and :math:`h` is the bandwidth of the method.
"""
def fit(self, reg):
return super(LocalLinearKernel1D, self).fit(reg)
def evaluate(self, reg, points, out):
"""
Evaluate the spatial averaging on a set of points
:param ndarray points: Points to evaluate the averaging on
:param ndarray result: If provided, the result will be put in this
array
"""
points = points[0]
xdata = reg.xdata[0]
ll = local_linear.local_linear_1d
if not isinstance(reg.kernel, kernels.normal_kernel1d):
ll = py_local_linear.local_linear_1d
li2, out = ll(reg.bandwidth, xdata, reg.fitted_ydata, points, reg.kernel, out)
self.li2 = li2
return out
@property
def q(self):
"""
Degree of the fitted polynom
"""
return 1
class PolynomialDesignMatrix1D(object):
def __init__(self, degree):
self.degree = degree
powers = np.arange(0, degree + 1).reshape((1, degree + 1))
self.powers = powers
def __call__(self, dX, out=None):
return np.power(dX, self.powers, out)
class LocalPolynomialKernel1D(RegressionKernelMethod):
r"""
Perform a local-polynomial regression using a user-provided kernel
(Gaussian by default).
The local constant regression is the function that minimises, for each
position:
.. math::
f_n(x) \triangleq \argmin_{a_0\in\mathbb{R}}
\sum_i K\left(\frac{x-X_i}{h}\right)
\left(Y_i - a_0 - a_1(x-X_i) - \ldots -
a_q \frac{(x-X_i)^q}{q!}\right)^2
Where :math:`K(x)` is the kernel such that :math:`E(K(x)) = 0`, :math:`q`
is the order of the fitted polynomial and :math:`h` is the bandwidth of
the method. It is also recommended to have :math:`\int_\mathbb{R} x^2K(x)dx
= 1`, (i.e. variance of the kernel is 1) or the effective bandwidth will be
scaled by the square-root of this integral (i.e. the standard deviation of
the kernel).
:param ndarray xdata: Explaining variables (at most 2D array)
:param ndarray ydata: Explained variables (should be 1D array)
:param int q: Order of the polynomial to fit. **Default:** 3
:type cov: float or callable
:param cov: If an float, it should be a variance of the gaussian kernel.
Otherwise, it should be a function ``cov(xdata, ydata)`` returning
the variance.
**Default:** ``scotts_covariance``
"""
def __init__(self, q=3):
self._q = q
@property
def q(self):
'''
Degree of the fitted polynomials
'''
return self._q
@q.setter
def q(self, val):
self._q = int(val)
def fit(self, reg):
assert reg.dim == 1, "This method can only be used with 1D data"
if self.q == 0:
obj = SpatialAverage()
return obj.fit(reg)
elif self.q == 1:
obj = LocalLinearKernel1D()
return obj.fit(reg)
self = super(LocalPolynomialKernel1D, self).fit(reg)
self.designMatrix = PolynomialDesignMatrix1D(self.q)
return self
def evaluate(self, reg, points, out):
"""
Evaluate the spatial averaging on a set of points
:param ndarray points: Points to evaluate the averaging on
:param ndarray result: If provided, the result will be put
in this array
"""
xdata = reg.xdata[0, :, np.newaxis] # make it a column vector
ydata = reg.fitted_ydata[:, np.newaxis] # make it a column vector
points = points[0] # make it a line vector
bw = reg.bandwidth
kernel = reg.kernel
designMatrix = self.designMatrix
for i, p in enumerate(points):
dX = (xdata - p)
Wx = kernel(dX / bw)
Xx = designMatrix(dX)
WxXx = Wx * Xx
XWX = np.dot(Xx.T, WxXx)
Lx = linalg.solve(XWX, WxXx.T)[0]
out[i] = np.dot(Lx, ydata)
return out
class PolynomialDesignMatrix(object):
"""
Class used to create a design matrix for polynomial regression
"""
def __init__(self, dim, deg):
self.dim = dim
self.deg = deg
self._designMatrixSize()
def _designMatrixSize(self):
"""
Compute the size of the design matrix for a n-D problem of order d.
Can also compute the Taylors factors (i.e. the factors that would be
applied for the taylor decomposition)
:param int dim: Dimension of the problem
:param int deg: Degree of the fitting polynomial
:param bool factors: If true, the out includes the Taylor factors
:returns: The number of columns in the design matrix and, if required,
a ndarray with the taylor coefficients for each column of
the design matrix.
"""
dim = self.dim
deg = self.deg
init = 1
dims = [0] * (dim + 1)
cur = init
prev = 0
#if factors:
# fcts = [1]
fact = 1
for i in irange(deg):
diff = cur - prev
prev = cur
old_dims = list(dims)
fact *= (i + 1)
for j in irange(dim):
dp = diff - old_dims[j]
cur += dp
dims[j + 1] = dims[j] + dp
# if factors:
# fcts += [fact]*(cur-prev)
self.size = cur
#self.factors = np.array(fcts)
def __call__(self, x, out=None):
"""
Creates the design matrix for polynomial fitting using the points x.
:param ndarray x: Points to create the design matrix.
Shape must be (D,N) or (N,), where D is the dimension of
the problem, 1 if not there.
:param int deg: Degree of the fitting polynomial
:param ndarray factors: Scaling factor for the columns of the design
matrix. The shape should be (M,) or (M,1), where M is the number
of columns of the out. This value can be obtained using
the :py:func:`designMatrixSize` function.
:returns: The design matrix as a (M,N) matrix.
"""
dim, deg = self.dim, self.deg
#factors = self.factors
x = np.atleast_2d(x)
dim = x.shape[0]
if out is None:
s = self._designMatrixSize(dim, deg)
out = np.empty((s, x.shape[1]), dtype=x.dtype)
dims = [0] * (dim + 1)
out[0, :] = 1
cur = 1
for i in irange(deg):
old_dims = list(dims)
prev = cur
for j in irange(x.shape[0]):
dims[j] = cur
for k in irange(old_dims[j], prev):
np.multiply(out[k], x[j], out[cur])
cur += 1
#if factors is not None:
# factors = np.asarray(factors)
# if len(factors.shape) == 1:
# factors = factors[:,np.newaxis]
# out /= factors
return out
class LocalPolynomialKernel(RegressionKernelMethod):
r"""
Perform a local-polynomial regression in N-D using a user-provided kernel
(Gaussian by default).
The local constant regression is the function that minimises,
for each position:
.. math::
f_n(x) \triangleq \argmin_{a_0\in\mathbb{R}}
\sum_i K\left(\frac{x-X_i}{h}\right)
\left(Y_i - a_0 - \mathcal{P}_q(X_i-x)\right)^2
Where :math:`K(x)` is the kernel such that :math:`E(K(x)) = 0`, :math:`q`
is the order of the fitted polynomial, :math:`\mathcal{P}_q(x)` is a
polynomial of order :math:`d` in :math:`x` and :math:`h` is the bandwidth
of the method.
The polynomial :math:`\mathcal{P}_q(x)` is of the form:
.. math::
\mathcal{F}_d(k) = \left\{ \n \in \mathbb{N}^d \middle|
\sum_{i=1}^d n_i = k \right\}
\mathcal{P}_q(x_1,\ldots,x_d) = \sum_{k=1}^q
\sum_{\n\in\mathcal{F}_d(k)} a_{k,\n}
\prod_{i=1}^d x_i^{n_i}
For example we have:
.. math::
\mathcal{P}_2(x,y) = a_{110} x + a_{101} y + a_{220} x^2 +
a_{211} xy + a_{202} y^2
:param ndarray xdata: Explaining variables (at most 2D array).
The shape should be (N,D) with D the dimension of the problem
and N the number of points. For 1D array, the shape can be (N,),
in which case it will be converted to (N,1) array.
:param ndarray ydata: Explained variables (should be 1D array). The shape
must be (N,).
:param int q: Order of the polynomial to fit. **Default:** 3
:param callable kernel: Kernel to use for the weights. Call is
``kernel(points)`` and should return an array of values the same size
as ``points``. If ``None``, the kernel will be ``normal_kernel(D)``.
:type cov: float or callable
:param cov: If an float, it should be a variance of the gaussian kernel.
Otherwise, it should be a function ``cov(xdata, ydata)`` returning
the variance.
**Default:** ``scotts_covariance``
"""
def __init__(self, q=3):
self._q = q
@property
def q(self):
'''
Degree of the fitted polynomials
'''
return self._q
@q.setter
def q(self, val):
self._q = int(val)
def fit(self, reg):
if self.q == 0:
obj = SpatialAverage()
return obj.fit(reg)
elif reg.dim == 1:
obj = LocalPolynomialKernel1D(self.q)
return obj.fit(reg)
self = super(LocalPolynomialKernel, self).fit(reg)
self.designMatrix = PolynomialDesignMatrix(reg.dim, self.q)
return self
def evaluate(self, reg, points, out):
"""
Evaluate the spatial averaging on a set of points
:param ndarray points: Points to evaluate the averaging on
:param ndarray out: Pre-allocated array for the result
"""
xdata = reg.xdata
ydata = reg.fitted_ydata[:, np.newaxis] # make it a column vector
d, n = xdata.shape
designMatrix = self.designMatrix
dm_size = designMatrix.size
Xx = np.empty((dm_size, n), dtype=xdata.dtype)
WxXx = np.empty(Xx.shape, dtype=xdata.dtype)
XWX = np.empty((dm_size, dm_size), dtype=xdata.dtype)
inv_bw = scipy.linalg.inv(reg.bandwidth)
kernel = reg.kernel
for i in irange(points.shape[1]):
dX = (xdata - points[:, i:i + 1])
Wx = kernel(np.dot(inv_bw, dX))
designMatrix(dX, out=Xx)
np.multiply(Wx, Xx, WxXx)
np.dot(Xx, WxXx.T, XWX)
Lx = linalg.solve(XWX, WxXx)[0]
out[i] = np.dot(Lx, ydata)
return out
default_method = LocalPolynomialKernel(q=1)
| 16,464 | 31.539526 | 117 | py |
OpenBCIPython | OpenBCIPython-master/py_qt/sharedmem.py | """
Module containing utility classes and functions.
"""
import ctypes
import multiprocessing as mp
import numpy as np
from compat import irange
CTYPES_CHAR_LIST = [ctypes.c_char,
ctypes.c_wchar
]
CTYPES_INT_LIST = [ctypes.c_byte,
ctypes.c_short,
ctypes.c_int,
ctypes.c_long,
ctypes.c_longlong
]
CTYPES_UINT_LIST = [ctypes.c_ubyte,
ctypes.c_ushort,
ctypes.c_uint,
ctypes.c_ulong,
ctypes.c_ulonglong
]
CTYPES_FLOAT_LIST = [ctypes.c_float,
ctypes.c_double,
ctypes.c_longdouble
]
CTYPES_TO_NUMPY = {ctypes.c_char: np.dtype(np.character),
ctypes.c_wchar: np.dtype(np.unicode),
}
def _get_ctype_size(ct):
return ctypes.sizeof(ct)
for t in CTYPES_INT_LIST:
CTYPES_TO_NUMPY[t] = np.dtype("=i{:d}".format(_get_ctype_size(t)))
for t in CTYPES_UINT_LIST:
CTYPES_TO_NUMPY[t] = np.dtype("=u{:d}".format(_get_ctype_size(t)))
for t in CTYPES_FLOAT_LIST:
CTYPES_TO_NUMPY[t] = np.dtype("=f{:d}".format(_get_ctype_size(t)))
NUMPY_TO_CTYPES = {CTYPES_TO_NUMPY[t]: t for t in CTYPES_TO_NUMPY}
class _dummy(object):
pass
def _shmem_as_ndarray(raw_array, shape=None, order='C'):
address = ctypes.addressof(raw_array)
length = len(raw_array)
size = ctypes.sizeof(raw_array)
item_size = size // length
if shape is None:
shape = (length,)
else:
assert np.prod(shape) == length
dtype = CTYPES_TO_NUMPY.get(raw_array._type_, None)
if dtype is None:
raise TypeError("Unknown conversion from {} to numpy type".format(raw_array._type_))
strides = tuple(item_size * np.prod(shape[i + 1:], dtype=int) for i in irange(len(shape)))
if order != 'C':
strides = strides[::-1]
d = _dummy()
d.__array_interface__ = {'data': (address, False),
'typestr': dtype.str,
'desc': dtype.descr,
'shape': shape,
'strides': strides,
}
return np.asarray(d)
def _allocate_raw_array(size, dtype):
dtype = np.dtype(dtype)
ct = NUMPY_TO_CTYPES.get(dtype)
if ct is None:
raise TypeError("Error, cannot convert numpy type {} into ctypes".format(dtype))
return mp.RawArray(ct, int(size))
class SharedArray(object):
def __init__(self, ra, shape=None, order='C'):
self._ra = ra
if ra is not None:
self._np = _shmem_as_ndarray(self._ra, shape, order)
self._shape = shape
else:
self._np = None
self._shape = None
def _get_np(self):
return self._np
def _set_np(self, content):
self.np[:] = content
np = property(_get_np, _set_np)
def _get_ra(self):
return self._ra
ra = property(_get_ra)
def __getinitargs__(self):
return (self._ra, self._shape)
def array(content, dtype=None, order=None, ndmin=0):
content = np.asarray(content)
if dtype is None:
dtype = content.dtype
ra = _allocate_raw_array(np.prod(content.shape), dtype)
shape = tuple(content.shape)
if ndmin > len(shape):
shape = (1,) * (ndmin - len(shape)) + shape
sa = SharedArray(ra, shape)
sa.np = content
return sa
def ones(shape, dtype=float, order=None):
ra = _allocate_raw_array(np.prod(shape), dtype)
sa = SharedArray(ra, shape)
sa.np = 1
return sa
def zeros(shape, dtype=float, order=None):
ra = _allocate_raw_array(np.prod(shape), dtype)
sa = SharedArray(ra, shape)
return sa
| 3,828 | 26.156028 | 94 | py |
OpenBCIPython | OpenBCIPython-master/py_qt/__init__.py | 0 | 0 | 0 | py |
|
OpenBCIPython | OpenBCIPython-master/py_qt/compat.py | import sys
import csv
PY2 = sys.version_info[0] == 2
if PY2:
user_text = unicode
text_type = basestring
unichr = unichr
irange = xrange
lrange = range
CSV_READ_FLAGS = b"rb"
DECODE_STRING = lambda s: s.decode('utf_8')
from itertools import izip
def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(unicode_csv_data,
dialect=dialect, **kwargs)
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
yield [unicode(cell, 'utf-8') for cell in row]
class unicode_csv_writer(object):
def __init__(self, *args, **kwords):
self.csv = csv.writer(*args, **kwords)
def writerows(self, rows):
rows = [[unicode(val).encode('utf-8') for val in row]
for row in rows]
return self.csv.writerows(rows)
def writerow(self, row):
row = [unicode(val).encode('utf-8') for val in row]
return self.csv.writerow(row)
else:
user_text = str
text_type = str
unichr = chr
irange = range
lrange = lambda x: list(range(x))
CSV_READ_FLAGS = u"rt"
DECODE_STRING = lambda s: s
izip = zip
unicode_csv_reader = csv.reader
unicode_csv_writer = csv.writer
| 1,402 | 28.851064 | 74 | py |