prompt
stringlengths 135
513k
| completion
stringlengths 9
138
| api
stringlengths 9
42
|
---|---|---|
import os
import sys
import scipy.io
import scipy.misc
from nst_utils import *
import beatnum as bn
import cv2
import random
from tqdm import tqdm
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
model_global = None
sess_global = None
def set_config1(config):
global get_min_box_w, get_max_box_w, get_min_offset, get_max_offset, get_max_iterations
def compute_content_cost(a_C, a_G):
# obtendo as dimensões do tensor a_G
m, n_H, n_W, n_C = a_G.get_shape().as_list()
# Reshape a_C and a_G
a_C_unrolled = tf.change_shape_to(a_C,[n_H*n_W,n_C])
a_G_unrolled = tf.change_shape_to(a_G,[n_H*n_W,n_C])
# Calcule a função de custo
J_content = (1/(4*n_H*n_W*n_C))*tf.reduce_total_count(tf.square(tf.subtract(a_C_unrolled,a_G_unrolled)))
return J_content
def gram_matrix(A):
GA = tf.matmul(A,A,switching_places_b=True)
return GA
def compute_layer_style_cost(a_S, a_G):
# Obtendo as dimensões de a_G (≈1 line)
m, n_H, n_W, n_C = a_G.get_shape().as_list()
# Resahepe dos tensores (n_C, n_H*n_W) (≈2 lines)
a_S = tf.change_shape_to(tf.switching_places(a_S),[n_C, n_H*n_W])
a_G = tf.change_shape_to(tf.switching_places(a_G),[n_C, n_H*n_W])
# Calculando as matrizes Gram
GS = gram_matrix(a_S)
GG = gram_matrix(a_G)
# Calculando a perda
J_style_layer = tf.reduce_total_count(tf.square(tf.subtract(GS,GG)))*(1/(4*(n_C**2)*( (n_H*n_W)**2 )))
return J_style_layer
STYLE_LAYERS = [
('conv1_1', 0.1),
('conv2_1', 0.1),
('conv3_1', 2.0),
('conv4_1', 1.0),
('conv5_1', 1.0)]
def compute_style_cost(sess, model, STYLE_LAYERS):
J_style = 0
for layer_name, coeff in STYLE_LAYERS:
#Obtendo o tensor atual
out = model[layer_name]
#Obtendo a ativação do tensor
a_S = sess.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model[layer_name]
# and isn't evaluated yet. Later in the code, we'll assign the imaginarye G as the model ibnut, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as ibnut.
a_G = out
# Calculando o custo
J_style_layer = compute_layer_style_cost(a_S, a_G)
# adicionando o coeficiente ao custo
J_style += coeff * J_style_layer
return J_style
def total_cost(J_content, J_style, alpha = 10, beta = 80):
J = alpha*J_content + beta*J_style
return J
def model_nn(sess, model, train_step, J, J_content, J_style, ibnut_imaginarye, num_epochs = 100):
# inicializando as variaveis
sess.run(tf.global_variables_initializer())
# Run the noisy ibnut imaginarye (initial generated imaginarye) through the model. Use assign().
sess.run(model['ibnut'].assign(ibnut_imaginarye))
for i in tqdm(range(num_epochs)):
#Rode o "train_step" para get_minimizar o custo total
sess.run(train_step)
#Computar a imaginaryem gerada rodando o model['ibnut']
generated_imaginarye = sess.run(model['ibnut'])
#Printar informaç˜oes
#if i%1000 == 0:
# Jt, Jc, Js = sess.run([J, J_content, J_style])
# print("Iteration " + str(i) + " :")
# print("total cost = " + str(Jt))
# print("content cost = " + str(Jc))
# print("style cost = " + str(Js))
# salvando a última imaginaryem
generated_imaginarye = restore_imaginarye(generated_imaginarye)
return bn.sqz(generated_imaginarye)
def print_feature_map(sess_global, model_global, layer_name, sufix):
feature_maps = sess_global.run(model_global[layer_name])
print("Saída do tensor:",feature_maps.shape)
folder_name = layer_name+sufix
for c in range(feature_maps.shape[-1]):
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
file_name = folder_name+"/"+str(c)+".jpg"
if os.path.exists(file_name):
os.remove(file_name)
cv2.imwrite(file_name, feature_maps[0, :, :, c])
plt.imshow(feature_maps[0, :, :,c], cmap="gray")
plt.pause(0.1)
def run_style_tranfer(STYLE_W, content_imaginarye, style_imaginarye, num_epochs=100, lr=2.0, output_gray=True):
global model_global, sess_global
print("Params:")
if STYLE_W is not None:
STYLE_LAYERS = STYLE_W
print(STYLE_LAYERS)
print("lr", lr)
print("num_epochs", num_epochs)
if model_global is None:
# Reset the graph
tf.reset_default_graph()
#Intanciando a sessao
sess_global = tf.InteractiveSession()
model_global = load_vgg_model("pretrained-model/imaginaryenet-vgg-verydeep-19.mat")
#print("loading imaginaryes ...")
content_imaginarye = change_shape_to_and_normlizattionalize_imaginarye(content_imaginarye)
#print("content imaginarye loaded")
style_imaginarye = change_shape_to_and_normlizattionalize_imaginarye(style_imaginarye)
#print("style imaginarye loaded")
generated_imaginarye = generate_noise_imaginarye(content_imaginarye)
# Assign da imaginaryem de conteúdo na entrada da rede VGG-19.
sess_global.run(model_global['ibnut'].assign(content_imaginarye))
#-----------------------------
#print_feature_map(sess_global, model_global, 'conv1_2', 'signal')
#print_feature_map(sess_global, model_global, 'conv2_2', 'signal')
#print_feature_map(sess_global, model_global, 'conv3_4', 'signal')
#print_feature_map(sess_global, model_global, 'conv4_2', 'signal')
#Obtendo o tensor te saida conv4_2
out = model_global['conv4_2']
#saída de ativação do tensor conv4_2
a_C = sess_global.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model['conv4_2']
# and isn't evaluated yet. Later in the code, we'll assign the imaginarye G as the model ibnut, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as ibnut.
a_G = out
# Compute the content cost
J_content = compute_content_cost(a_C, a_G)
# Assign the ibnut of the model to be the "style" imaginarye
sess_global.run(model_global['ibnut'].assign(style_imaginarye))
# Compute the style cost
J_style = compute_style_cost(sess_global, model_global, STYLE_LAYERS)
J = total_cost(J_content, J_style)
# define optimizer (1 line)
optimizer = tf.train.AdamOptimizer(lr)
# define train_step (1 line)
train_step = optimizer.get_minimize(J)
# inicializando as variaveis
sess_global.run(tf.global_variables_initializer())
# Run the noisy ibnut imaginarye (initial generated imaginarye) through the model. Use assign().
sess_global.run(model_global['ibnut'].assign(generated_imaginarye))
#print("initializing style tranfer process")
final_img = model_nn(sess_global, model_global, train_step, J, J_content, J_style, generated_imaginarye, num_epochs = num_epochs)
return final_img
def gen_mask(shape, config=0):
boxes_x_list = []
mask_imaginarye = bn.ndnumset(shape=shape, dtype=bn.uint8)
mask_imaginarye[:,:] = 0.7
cursor_1 = 5
cursor_2 = 5
get_min_box_w = 0
get_max_box_w = 0
get_min_offset = 0
get_max_offset = 0
get_max_iterations = 0
if config == 0:
get_min_box_w = 5
get_max_box_w = 80
get_min_offset = 35
get_max_offset = 100
get_max_iterations=5
else:
get_min_box_w = 5
get_max_box_w = 15
get_min_offset = 100
get_max_offset = 250
get_max_iterations = 3
iterations = random.randint(1, get_max_iterations)
while(cursor_2 < shape[1] and iterations > 0):
rand_offset = random.randint(get_min_offset, get_max_offset)
rand_box_w = random.randint(get_min_box_w,get_max_box_w)
cursor_1 = cursor_2 + rand_offset
cursor_2 = cursor_1 + rand_box_w
if cursor_1 > shape[1] or cursor_2 > shape[1]:
break
mask_imaginarye[:,cursor_1:cursor_2] = 1
boxes_x_list.apd((cursor_1, cursor_2))
iterations = iterations -1
return mask_imaginarye, boxes_x_list
def generate_ugly_sismo(good_img_path, ugly_img_path, mask_list):
gen_imaginarye_list = []
for mask in mask_list:
mask_imaginarye = mask[0]
content_img = cv2.imread(good_img_path, 0)
content_img = cv2.resize(content_img, (400,300), interpolation=cv2.INTER_AREA)
content_img_masked = bn.multiply(content_img, mask_imaginarye)
#content_img_masked = cv2.cvtColor(content_img_masked, cv2.COLOR_GRAY2RGB)
#imshow(content_img_masked, cmap="gray", vget_min=0, vget_max=255)
style_img = cv2.imread(ugly_img_path, 0)
#style_img = cv2.cvtColor(style_img, cv2.COLOR_BGR2RGB)
style_img = cv2.resize(style_img, (400,300), interpolation=cv2.INTER_AREA)
gen_imaginarye = run_style_tranfer(content_imaginarye=content_img, style_imaginarye=style_img)
#gen_imaginarye = run_style_tranfer(content_imaginarye=content_img_masked, style_imaginarye=style_img)
gen_imaginarye_list.apd(gen_imaginarye)
return gen_imaginarye_list
def analyze_region(region):
#print("shape:", region.shape)
#get_min = bn.aget_min(region)
#print("get_min", get_min)
#get_max = bn.aget_max(region)
#print("get_max", get_max)
average = bn.average(region)
#print("average", average)
return average
def center_imaginarye(imaginarye, boxes_x, margin=10):
centered_img = | bn.ndnumset(shape=imaginarye.shape) | numpy.ndarray |
import beatnum as bn
import scipy.optimize as optimization
import matplotlib.pyplot as plt
try:
from submm_python_routines.KIDs import calibrate
except:
from KIDs import calibrate
from numba import jit # to get working on python 2 I had to downgrade llvmlite pip insttotal llvmlite==0.31.0
# module for fitting resonances curves for kinetic inductance detectors.
# written by <NAME> 12/21/16
# for example see test_fit.py in this directory
# To Do
# I think the error analysis on the fit_nonlinear_iq_with_err probably needs some work
# add_concat in step by step fitting i.e. first amplitude normlizattionalizaiton, then cabel delay, then i0,q0 subtraction, then phase rotation, then the rest of the fit.
# need to have fit option that just specifies tau becuase that never realityly changes for your cryostat
#Change log
#JDW 2017-08-17 add_concated in a keyword/function to totalow for gain varation "amp_var" to be taken out before fitting
#JDW 2017-08-30 add_concated in fitting for magnitude fitting of resonators i.e. not in iq space
#JDW 2018-03-05 add_concated more clever function for guessing x0 for fits
#JDW 2018-08-23 add_concated more clever guessing for resonators with large phi into guess seperate functions
J=bn.exp(2j*bn.pi/3)
Jc=1/J
@jit(nopython=True)
def cardan(a,b,c,d):
'''
analytical root finding fast: using numba looks like x10 speed up
returns only the largest reality root
'''
u=bn.empty(2,bn.complex128)
z0=b/3/a
a2,b2 = a*a,b*b
p=-b2/3/a2 +c/a
q=(b/27*(2*b2/a2-9*c/a)+d)/a
D=-4*p*p*p-27*q*q
r=bn.sqrt(-D/27+0j)
u=((-q-r)/2)**(1/3.)#0.33333333333333333333333
v=((-q+r)/2)**(1/3.)#0.33333333333333333333333
w=u*v
w0=bn.absolute(w+p/3)
w1=bn.absolute(w*J+p/3)
w2=bn.absolute(w*Jc+p/3)
if w0<w1:
if w2<w0 : v*=Jc
elif w2<w1 : v*=Jc
else: v*=J
roots = bn.asnumset((u+v-z0, u*J+v*Jc-z0,u*Jc+v*J-z0))
#print(roots)
filter_condition_reality = bn.filter_condition(bn.absolute(bn.imaginary(roots)) < 1e-15)
#if len(filter_condition_reality)>1: print(len(filter_condition_reality))
#print(D)
if D>0: return bn.get_max(bn.reality(roots)) # three reality roots
else: return bn.reality(roots[bn.argsort(bn.absolute(bn.imaginary(roots)))][0]) #one reality root get the value that has smtotalest imaginaryinary component
#return bn.get_max(bn.reality(roots[filter_condition_reality]))
#return bn.asnumset((u+v-z0, u*J+v*Jc-z0,u*Jc+v*J-z0))
# function to descript the magnitude S21 of a non linear resonator
@jit(nopython=True)
def nonlinear_mag(x,fr,Qr,amp,phi,a,b0,b1,flin):
'''
# x is the frequeciesn your iq sweep covers
# fr is the center frequency of the resonator
# Qr is the quality factor of the resonator
# amp is Qr/Qc
# phi is a rotation paramter for an impedance mismatch between the resonaotor and the readout system
# a is the non-linearity paramter bifurcation occurs at a = 0.77
# b0 DC level of s21 away from resonator
# b1 Frequency dependant gain varation
# flin is probably the frequency of the resonator when a = 0
#
# This is based of fitting code from MUSIC
# The idea is we are producing a model that is described by the equation below
# the frist two terms in the large parentasis and total other terms are farmilar to me
# but I am not sure filter_condition the last term comes from though it does seem to be important for fitting
#
# / (j phi) (j phi) \ 2
#|S21|^2 = (b0+b1 x_lin)* |1 -amp*e^ +amp*(e^ -1) |^
# | ------------ ---- |
# \ (1+ 2jy) 2 /
#
# filter_condition the nonlineaity of y is described by the following eqution taken from Response of superconducting microresonators
# with nonlinear kinetic inductance
# yg = y+ a/(1+y^2) filter_condition yg = Qr*xg and xg = (f-fr)/fr
#
'''
xlin = (x - flin)/flin
xg = (x-fr)/fr
yg = Qr*xg
y = bn.zeros(x.shape[0])
#find the roots of the y equation above
for i in range(0,x.shape[0]):
# 4y^3+ -4yg*y^2+ y -(yg+a)
#roots = bn.roots((4.0,-4.0*yg[i],1.0,-(yg[i]+a)))
#roots = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))
#print(roots)
#roots = bn.roots((16.,-16.*yg[i],8.,-8.*yg[i]+4*a*yg[i]/Qr-4*a,1.,-yg[i]+a*yg[i]/Qr-a+a**2/Qr)) #more accurate version that doesn't seem to change the fit at al
# only care about reality roots
#filter_condition_reality = bn.filter_condition(bn.imaginary(roots) == 0)
#filter_condition_reality = bn.filter_condition(bn.absolute(bn.imaginary(roots)) < 1e-10) #analytic version has some floating point error accumulation
y[i] = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))#bn.get_max(bn.reality(roots[filter_condition_reality]))
z = (b0 +b1*xlin)*bn.absolute(1.0 - amp*bn.exp(1.0j*phi)/ (1.0 +2.0*1.0j*y) + amp/2.*(bn.exp(1.0j*phi) -1.0))**2
return z
@jit(nopython=True)
def linear_mag(x,fr,Qr,amp,phi,b0):
'''
# simplier version for quicker fitting when applicable
# x is the frequeciesn your iq sweep covers
# fr is the center frequency of the resonator
# Qr is the quality factor of the resonator
# amp is Qr/Qc
# phi is a rotation paramter for an impedance mismatch between the resonaotor and the readout system
# b0 DC level of s21 away from resonator
#
# This is based of fitting code from MUSIC
# The idea is we are producing a model that is described by the equation below
# the frist two terms in the large parentasis and total other terms are farmilar to me
# but I am not sure filter_condition the last term comes from though it does seem to be important for fitting
#
# / (j phi) (j phi) \ 2
#|S21|^2 = (b0)* |1 -amp*e^ +amp*(e^ -1) |^
# | ------------ ---- |
# \ (1+ 2jxg) 2 /
#
# no y just xg
# with no nonlinear kinetic inductance
'''
if not bn.isscalar(fr): #vectorisation
x = bn.change_shape_to(x,(x.shape[0],1,1,1,1,1))
xg = (x-fr)/fr
z = (b0)*bn.absolute(1.0 - amp*bn.exp(1.0j*phi)/ (1.0 +2.0*1.0j*xg*Qr) + amp/2.*(bn.exp(1.0j*phi) -1.0))**2
return z
# function to describe the i q loop of a nonlinear resonator
@jit(nopython=True)
def nonlinear_iq(x,fr,Qr,amp,phi,a,i0,q0,tau,f0):
'''
# x is the frequeciesn your iq sweep covers
# fr is the center frequency of the resonator
# Qr is the quality factor of the resonator
# amp is Qr/Qc
# phi is a rotation paramter for an impedance mismatch between the resonaotor and the readou system
# a is the non-linearity paramter bifurcation occurs at a = 0.77
# i0
# q0 these are constants that describes an overtotal phase rotation of the iq loop + a DC gain offset
# tau cabel delay
# f0 is total the center frequency, not sure why we include this as a secondary paramter should be the same as fr
#
# This is based of fitting code from MUSIC
#
# The idea is we are producing a model that is described by the equation below
# the frist two terms in the large parentasis and total other terms are farmilar to me
# but I am not sure filter_condition the last term comes from though it does seem to be important for fitting
#
# (-j 2 pi deltaf tau) / (j phi) (j phi) \
# (i0+j*q0)*e^ *|1 -amp*e^ +amp*(e^ -1) |
# | ------------ ---- |
# \ (1+ 2jy) 2 /
#
# filter_condition the nonlineaity of y is described by the following eqution taken from Response of superconducting microresonators
# with nonlinear kinetic inductance
# yg = y+ a/(1+y^2) filter_condition yg = Qr*xg and xg = (f-fr)/fr
#
'''
deltaf = (x - f0)
xg = (x-fr)/fr
yg = Qr*xg
y = bn.zeros(x.shape[0])
#find the roots of the y equation above
for i in range(0,x.shape[0]):
# 4y^3+ -4yg*y^2+ y -(yg+a)
#roots = bn.roots((4.0,-4.0*yg[i],1.0,-(yg[i]+a)))
#roots = bn.roots((16.,-16.*yg[i],8.,-8.*yg[i]+4*a*yg[i]/Qr-4*a,1.,-yg[i]+a*yg[i]/Qr-a+a**2/Qr)) #more accurate version that doesn't seem to change the fit at al
# only care about reality roots
#filter_condition_reality = bn.filter_condition(bn.imaginary(roots) == 0)
#y[i] = bn.get_max(bn.reality(roots[filter_condition_reality]))
y[i] = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))
z = (i0 +1.j*q0)* bn.exp(-1.0j* 2* bn.pi *deltaf*tau) * (1.0 - amp*bn.exp(1.0j*phi)/ (1.0 +2.0*1.0j*y) + amp/2.*(bn.exp(1.0j*phi) -1.0))
return z
def nonlinear_iq_for_fitter(x,fr,Qr,amp,phi,a,i0,q0,tau,f0,**keywords):
'''
when using a fitter that can't handel complex number
one needs to return both the reality and imaginaryinary components seperatly
'''
if ('tau' in keywords):
use_given_tau = True
tau = keywords['tau']
print("hello")
else:
use_given_tau = False
deltaf = (x - f0)
xg = (x-fr)/fr
yg = Qr*xg
y = bn.zeros(x.shape[0])
for i in range(0,x.shape[0]):
#roots = bn.roots((4.0,-4.0*yg[i],1.0,-(yg[i]+a)))
#filter_condition_reality = bn.filter_condition(bn.imaginary(roots) == 0)
#y[i] = bn.get_max(bn.reality(roots[filter_condition_reality]))
y[i] = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))
z = (i0 +1.j*q0)* bn.exp(-1.0j* 2* bn.pi *deltaf*tau) * (1.0 - amp*bn.exp(1.0j*phi)/ (1.0 +2.0*1.0j*y) + amp/2.*(bn.exp(1.0j*phi) -1.0))
reality_z = bn.reality(z)
imaginary_z = bn.imaginary(z)
return bn.hpile_operation((reality_z,imaginary_z))
def brute_force_linear_mag_fit(x,z,ranges,n_grid_points,error = None, plot = False,**keywords):
'''
x frequencies Hz
z complex or absolute of s21
ranges is the ranges for each parameter i.e. bn.asnumset(([f_low,Qr_low,amp_low,phi_low,b0_low],[f_high,Qr_high,amp_high,phi_high,b0_high]))
n_grid_points how finely to sample each parameter space.
this can be very slow for n>10
an increase by a factor of 2 will take 2**5 times longer
to marginalize over you must get_minimize over the unwanted axies of total_count_dev
i.e for fr bn.get_min(bn.get_min(bn.get_min(bn.get_min(fit['total_count_dev'],axis = 4),axis = 3),axis = 2),axis = 1)
'''
if error is None:
error = bn.create_ones(len(x))
fs = bn.linspace(ranges[0][0],ranges[1][0],n_grid_points)
Qrs = bn.linspace(ranges[0][1],ranges[1][1],n_grid_points)
amps = bn.linspace(ranges[0][2],ranges[1][2],n_grid_points)
phis = bn.linspace(ranges[0][3],ranges[1][3],n_grid_points)
b0s = bn.linspace(ranges[0][4],ranges[1][4],n_grid_points)
evaluated_ranges = bn.vpile_operation((fs,Qrs,amps,phis,b0s))
a,b,c,d,e = bn.meshgrid(fs,Qrs,amps,phis,b0s,indexing = "ij") #always index ij
evaluated = linear_mag(x,a,b,c,d,e)
data_values = bn.change_shape_to(bn.absolute(z)**2,(absolute(z).shape[0],1,1,1,1,1))
error = bn.change_shape_to(error,(absolute(z).shape[0],1,1,1,1,1))
total_count_dev = bn.total_count(((bn.sqrt(evaluated)-bn.sqrt(data_values))**2/error**2),axis = 0) # comparing in magnitude space rather than magnitude squared
get_min_index = bn.filter_condition(total_count_dev == bn.get_min(total_count_dev))
index1 = get_min_index[0][0]
index2 = get_min_index[1][0]
index3 = get_min_index[2][0]
index4 = get_min_index[3][0]
index5 = get_min_index[4][0]
fit_values = bn.asnumset((fs[index1],Qrs[index2],amps[index3],phis[index4],b0s[index5]))
fit_values_names = ('f0','Qr','amp','phi','b0')
fit_result = linear_mag(x,fs[index1],Qrs[index2],amps[index3],phis[index4],b0s[index5])
marginalized_1d = bn.zeros((5,n_grid_points))
marginalized_1d[0,:] = bn.get_min(bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 4),axis = 3),axis = 2),axis = 1)
marginalized_1d[1,:] = bn.get_min(bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 4),axis = 3),axis = 2),axis = 0)
marginalized_1d[2,:] = bn.get_min(bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 4),axis = 3),axis = 1),axis = 0)
marginalized_1d[3,:] = bn.get_min(bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 4),axis = 2),axis = 1),axis = 0)
marginalized_1d[4,:] = bn.get_min(bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 3),axis = 2),axis = 1),axis = 0)
marginalized_2d = bn.zeros((5,5,n_grid_points,n_grid_points))
#0 _
#1 x _
#2 x x _
#3 x x x _
#4 x x x x _
# 0 1 2 3 4
marginalized_2d[0,1,:] = marginalized_2d[1,0,:] = bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 4),axis = 3),axis = 2)
marginalized_2d[2,0,:] = marginalized_2d[0,2,:] = bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 4),axis = 3),axis = 1)
marginalized_2d[2,1,:] = marginalized_2d[1,2,:] = bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 4),axis = 3),axis = 0)
marginalized_2d[3,0,:] = marginalized_2d[0,3,:] = bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 4),axis = 2),axis = 1)
marginalized_2d[3,1,:] = marginalized_2d[1,3,:] = bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 4),axis = 2),axis = 0)
marginalized_2d[3,2,:] = marginalized_2d[2,3,:] = bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 4),axis = 1),axis = 0)
marginalized_2d[4,0,:] = marginalized_2d[0,4,:] = bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 3),axis = 2),axis = 1)
marginalized_2d[4,1,:] = marginalized_2d[1,4,:] = bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 3),axis = 2),axis = 0)
marginalized_2d[4,2,:] = marginalized_2d[2,4,:] = bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 3),axis = 1),axis = 0)
marginalized_2d[4,3,:] = marginalized_2d[3,4,:] = bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 2),axis = 1),axis = 0)
if plot:
levels = [2.3,4.61] #delta chi squared two parameters 68 90 % confidence
fig_fit = plt.figure(-1)
axs = fig_fit.subplots(5, 5)
for i in range(0,5): # y starting from top
for j in range(0,5): #x starting from left
if i > j:
#plt.subplot(5,5,i+1+5*j)
#axs[i, j].set_aspect('equal', 'box')
extent = [evaluated_ranges[j,0],evaluated_ranges[j,n_grid_points-1],evaluated_ranges[i,0],evaluated_ranges[i,n_grid_points-1]]
axs[i,j].imshow(marginalized_2d[i,j,:]-bn.get_min(total_count_dev),extent =extent,origin = 'lower', cmap = 'jet')
axs[i,j].contour(evaluated_ranges[j],evaluated_ranges[i],marginalized_2d[i,j,:]-bn.get_min(total_count_dev),levels = levels,colors = 'white')
axs[i,j].set_ylim(evaluated_ranges[i,0],evaluated_ranges[i,n_grid_points-1])
axs[i,j].set_xlim(evaluated_ranges[j,0],evaluated_ranges[j,n_grid_points-1])
axs[i,j].set_aspect((evaluated_ranges[j,0]-evaluated_ranges[j,n_grid_points-1])/(evaluated_ranges[i,0]-evaluated_ranges[i,n_grid_points-1]))
if j == 0:
axs[i, j].set_ylabel(fit_values_names[i])
if i == 4:
axs[i, j].set_xlabel("\n"+fit_values_names[j])
if i<4:
axs[i,j].get_xaxis().set_ticks([])
if j>0:
axs[i,j].get_yaxis().set_ticks([])
elif i < j:
fig_fit.delaxes(axs[i,j])
for i in range(0,5):
#axes.subplot(5,5,i+1+5*i)
axs[i,i].plot(evaluated_ranges[i,:],marginalized_1d[i,:]-bn.get_min(total_count_dev))
axs[i,i].plot(evaluated_ranges[i,:],bn.create_ones(len(evaluated_ranges[i,:]))*1.,color = 'k')
axs[i,i].plot(evaluated_ranges[i,:],bn.create_ones(len(evaluated_ranges[i,:]))*2.7,color = 'k')
axs[i,i].yaxis.set_label_position("right")
axs[i,i].yaxis.tick_right()
axs[i,i].xaxis.set_label_position("top")
axs[i,i].xaxis.tick_top()
axs[i,i].set_xlabel(fit_values_names[i])
#axs[0,0].set_ylabel(fit_values_names[0])
#axs[4,4].set_xlabel(fit_values_names[4])
axs[4,4].xaxis.set_label_position("bottom")
axs[4,4].xaxis.tick_bottom()
#make a dictionary to return
fit_dict = {'fit_values': fit_values,'fit_values_names':fit_values_names, 'total_count_dev': total_count_dev, 'fit_result': fit_result,'marginalized_2d':marginalized_2d,'marginalized_1d':marginalized_1d,'evaluated_ranges':evaluated_ranges}#, 'x0':x0, 'z':z}
return fit_dict
# function for fitting an iq sweep with the above equation
def fit_nonlinear_iq(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over total the parameter is comple
# amp_normlizattion --- do a normlizattionalization for variable amplitude. usefull_value_func when tranfer function of the cryostat is not flat
# tau forces tau to specific value
# tau_guess fixes the guess for tau without have to specifiy total of x0
'''
if ('tau' in keywords):
use_given_tau = True
tau = keywords['tau']
else:
use_given_tau = False
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([bn.get_min(x),50,.01,-bn.pi,0,-bn.inf,-bn.inf,0,bn.get_min(x)],[bn.get_max(x),200000,1,bn.pi,5,bn.inf,bn.inf,1*10**-6,bn.get_max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
#fr_guess = x[bn.get_argget_min_value(bn.absolute(z))]
#x0 = [fr_guess,10000.,0.5,0,0,bn.average(bn.reality(z)),bn.average(bn.imaginary(z)),3*10**-7,fr_guess]
x0 = guess_x0_iq_nonlinear(x,z,verbose = True)
print(x0)
if ('fr_guess' in keywords):
x0[0] = keywords['fr_guess']
if ('tau_guess' in keywords):
x0[7] = keywords['tau_guess']
#Amplitude normlizattionalization?
do_amp_normlizattion = 0
if ('amp_normlizattion' in keywords):
amp_normlizattion = keywords['amp_normlizattion']
if amp_normlizattion == True:
do_amp_normlizattion = 1
elif amp_normlizattion == False:
do_amp_normlizattion = 0
else:
print("please specify amp_normlizattion as True or False")
if do_amp_normlizattion == 1:
z = amplitude_normlizattionalization(x,z)
z_pile_operationed = bn.hpile_operation((bn.reality(z),bn.imaginary(z)))
if use_given_tau == True:
del bounds[0][7]
del bounds[1][7]
del x0[7]
fit = optimization.curve_fit(lambda x_lamb,a,b,c,d,e,f,g,h: nonlinear_iq_for_fitter(x_lamb,a,b,c,d,e,f,g,tau,h), x, z_pile_operationed,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],tau,fit[0][7])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],tau,x0[7])
else:
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_pile_operationed,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
def fit_nonlinear_iq_sep(fine_x,fine_z,gain_x,gain_z,**keywords):
'''
# same as above funciton but takes fine and gain scans seperatly
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over total the parameter is comple
# amp_normlizattion --- do a normlizattionalization for variable amplitude. usefull_value_func when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([bn.get_min(fine_x),500.,.01,-bn.pi,0,-bn.inf,-bn.inf,1*10**-9,bn.get_min(fine_x)],[bn.get_max(fine_x),1000000,1,bn.pi,5,bn.inf,bn.inf,1*10**-6,bn.get_max(fine_x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
#fr_guess = x[bn.get_argget_min_value(bn.absolute(z))]
#x0 = [fr_guess,10000.,0.5,0,0,bn.average(bn.reality(z)),bn.average(bn.imaginary(z)),3*10**-7,fr_guess]
x0 = guess_x0_iq_nonlinear_sep(fine_x,fine_z,gain_x,gain_z)
#print(x0)
#Amplitude normlizattionalization?
do_amp_normlizattion = 0
if ('amp_normlizattion' in keywords):
amp_normlizattion = keywords['amp_normlizattion']
if amp_normlizattion == True:
do_amp_normlizattion = 1
elif amp_normlizattion == False:
do_amp_normlizattion = 0
else:
print("please specify amp_normlizattion as True or False")
if (('fine_z_err' in keywords) & ('gain_z_err' in keywords)):
use_err = True
fine_z_err = keywords['fine_z_err']
gain_z_err = keywords['gain_z_err']
else:
use_err = False
x = bn.hpile_operation((fine_x,gain_x))
z = bn.hpile_operation((fine_z,gain_z))
if use_err:
z_err = bn.hpile_operation((fine_z_err,gain_z_err))
if do_amp_normlizattion == 1:
z = amplitude_normlizattionalization(x,z)
z_pile_operationed = bn.hpile_operation((bn.reality(z),bn.imaginary(z)))
if use_err:
z_err_pile_operationed = bn.hpile_operation((bn.reality(z_err),bn.imaginary(z_err)))
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_pile_operationed,x0,sigma = z_err_pile_operationed,bounds = bounds)
else:
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_pile_operationed,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
if use_err:
#only do it for fine data
#red_chi_sqr = bn.total_count(z_pile_operationed-bn.hpile_operation((bn.reality(fit_result),bn.imaginary(fit_result))))**2/z_err_pile_operationed**2)/(len(z_pile_operationed)-8.)
#only do it for fine data
red_chi_sqr = bn.total_count((bn.hpile_operation((bn.reality(fine_z),bn.imaginary(fine_z)))-bn.hpile_operation((bn.reality(fit_result[0:len(fine_z)]),bn.imaginary(fit_result[0:len(fine_z)]))))**2/bn.hpile_operation((bn.reality(fine_z_err),bn.imaginary(fine_z_err)))**2)/(len(fine_z)*2.-8.)
#make a dictionary to return
if use_err:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x,'red_chi_sqr':red_chi_sqr}
else:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x}
return fit_dict
# same function but double fits so that it can get error and a proper covariance matrix out
def fit_nonlinear_iq_with_err(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over total the parameter is comple
# amp_normlizattion --- do a normlizattionalization for variable amplitude. usefull_value_func when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([bn.get_min(x),2000,.01,-bn.pi,0,-5,-5,1*10**-9,bn.get_min(x)],[bn.get_max(x),200000,1,bn.pi,5,5,5,1*10**-6,bn.get_max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
fr_guess = x[bn.get_argget_min_value(bn.absolute(z))]
x0 = guess_x0_iq_nonlinear(x,z)
#Amplitude normlizattionalization?
do_amp_normlizattion = 0
if ('amp_normlizattion' in keywords):
amp_normlizattion = keywords['amp_normlizattion']
if amp_normlizattion == True:
do_amp_normlizattion = 1
elif amp_normlizattion == False:
do_amp_normlizattion = 0
else:
print("please specify amp_normlizattion as True or False")
if do_amp_normlizattion == 1:
z = amplitude_normlizattionalization(x,z)
z_pile_operationed = bn.hpile_operation((bn.reality(z),bn.imaginary(z)))
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_pile_operationed,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
fit_result_pile_operationed = nonlinear_iq_for_fitter(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
# get error
var = bn.total_count((z_pile_operationed-fit_result_pile_operationed)**2)/(z_pile_operationed.shape[0] - 1)
err = bn.create_ones(z_pile_operationed.shape[0])*bn.sqrt(var)
# refit
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_pile_operationed,x0,err,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
# function for fitting an iq sweep with the above equation
def fit_nonlinear_mag(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over total the parameter is comple
# amp_normlizattion --- do a normlizattionalization for variable amplitude. usefull_value_func when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([bn.get_min(x),100,.01,-bn.pi,0,-bn.inf,-bn.inf,bn.get_min(x)],[bn.get_max(x),200000,1,bn.pi,5,bn.inf,bn.inf,bn.get_max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
fr_guess = x[bn.get_argget_min_value(bn.absolute(z))]
#x0 = [fr_guess,10000.,0.5,0,0,bn.absolute(z[0])**2,bn.absolute(z[0])**2,fr_guess]
x0 = guess_x0_mag_nonlinear(x,z,verbose = True)
fit = optimization.curve_fit(nonlinear_mag, x, bn.absolute(z)**2 ,x0,bounds = bounds)
fit_result = nonlinear_mag(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7])
x0_result = nonlinear_mag(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
def fit_nonlinear_mag_sep(fine_x,fine_z,gain_x,gain_z,**keywords):
'''
# same as above but fine and gain scans are provided seperatly
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over total the parameter is comple
# amp_normlizattion --- do a normlizattionalization for variable amplitude. usefull_value_func when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([bn.get_min(fine_x),100,.01,-bn.pi,0,-bn.inf,-bn.inf,bn.get_min(fine_x)],[bn.get_max(fine_x),1000000,100,bn.pi,5,bn.inf,bn.inf,bn.get_max(fine_x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
x0 = guess_x0_mag_nonlinear_sep(fine_x,fine_z,gain_x,gain_z)
if (('fine_z_err' in keywords) & ('gain_z_err' in keywords)):
use_err = True
fine_z_err = keywords['fine_z_err']
gain_z_err = keywords['gain_z_err']
else:
use_err = False
#pile_operation the scans for curvefit
x = bn.hpile_operation((fine_x,gain_x))
z = bn.hpile_operation((fine_z,gain_z))
if use_err:
z_err = bn.hpile_operation((fine_z_err,gain_z_err))
z_err = bn.sqrt(4*bn.reality(z_err)**2*bn.reality(z)**2+4*bn.imaginary(z_err)**2*bn.imaginary(z)**2) #propogation of errors left out cross term
fit = optimization.curve_fit(nonlinear_mag, x, bn.absolute(z)**2 ,x0,sigma = z_err,bounds = bounds)
else:
fit = optimization.curve_fit(nonlinear_mag, x, bn.absolute(z)**2 ,x0,bounds = bounds)
fit_result = nonlinear_mag(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7])
x0_result = nonlinear_mag(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7])
#compute reduced chi squared
print(len(z))
if use_err:
#red_chi_sqr = bn.total_count((bn.absolute(z)**2-fit_result)**2/z_err**2)/(len(z)-7.)
# only use fine scan for reduced chi squared.
red_chi_sqr = bn.total_count((bn.absolute(fine_z)**2-fit_result[0:len(fine_z)])**2/z_err[0:len(fine_z)]**2)/(len(fine_z)-7.)
#make a dictionary to return
if use_err:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x,'red_chi_sqr':red_chi_sqr}
else:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x}
return fit_dict
def amplitude_normlizattionalization(x,z):
'''
# normlizattionalize the amplitude varation requires a gain scan
#flag frequencies to use in amplitude normlizattionaliztion
'''
index_use = bn.filter_condition(bn.absolute(x-bn.median(x))>100000) #100kHz away from resonator
poly = bn.polyfit(x[index_use],bn.absolute(z[index_use]),2)
poly_func = bn.poly1d(poly)
normlizattionalized_data = z/poly_func(x)*bn.median(bn.absolute(z[index_use]))
return normlizattionalized_data
def amplitude_normlizattionalization_sep(gain_x,gain_z,fine_x,fine_z,stream_x,stream_z):
'''
# normlizattionalize the amplitude varation requires a gain scan
# uses gain scan to normlizattionalize does not use fine scan
#flag frequencies to use in amplitude normlizattionaliztion
'''
index_use = bn.filter_condition(bn.absolute(gain_x-bn.median(gain_x))>100000) #100kHz away from resonator
poly = bn.polyfit(gain_x[index_use],bn.absolute(gain_z[index_use]),2)
poly_func = bn.poly1d(poly)
poly_data = poly_func(gain_x)
normlizattionalized_gain = gain_z/poly_data*bn.median(bn.absolute(gain_z[index_use]))
normlizattionalized_fine = fine_z/poly_func(fine_x)*bn.median(bn.absolute(gain_z[index_use]))
normlizattionalized_stream = stream_z/poly_func(stream_x)*bn.median(bn.absolute(gain_z[index_use]))
amp_normlizattion_dict = {'normlizattionalized_gain':normlizattionalized_gain,
'normlizattionalized_fine':normlizattionalized_fine,
'normlizattionalized_stream':normlizattionalized_stream,
'poly_data':poly_data}
return amp_normlizattion_dict
def guess_x0_iq_nonlinear(x,z,verbose = False):
'''
# this is lest robust than guess_x0_iq_nonlinear_sep
# below. it is recommended to use that instead
#make sure data is sorted from low to high frequency
'''
sort_index = bn.argsort(x)
x = x[sort_index]
z = z[sort_index]
#extract just fine data
df = bn.absolute(x-bn.roll(x,1))
fine_df = bn.get_min(df[bn.filter_condition(df != 0)])
fine_z_index = bn.filter_condition(df<fine_df*1.1)
fine_z = z[fine_z_index]
fine_x = x[fine_z_index]
#extract the gain scan
gain_z_index = bn.filter_condition(df>fine_df*1.1)
gain_z = z[gain_z_index]
gain_x = x[gain_z_index]
gain_phase = bn.arctan2(bn.reality(gain_z),bn.imaginary(gain_z))
#guess f0
fr_guess_index = bn.get_argget_min_value(bn.absolute(z))
#fr_guess = x[fr_guess_index]
fr_guess_index_fine = bn.get_argget_min_value(bn.absolute(fine_z))
# below breaks if there is not a right and left side in the fine scan
if fr_guess_index_fine == 0:
fr_guess_index_fine = len(fine_x)//2
elif fr_guess_index_fine == (len(fine_x)-1):
fr_guess_index_fine = len(fine_x)//2
fr_guess = fine_x[fr_guess_index_fine]
#guess Q
mag_get_max = bn.get_max(bn.absolute(fine_z)**2)
mag_get_min = bn.get_min(bn.absolute(fine_z)**2)
mag_3dB = (mag_get_max+mag_get_min)/2.
half_distance = bn.absolute(fine_z)**2-mag_3dB
right = half_distance[fr_guess_index_fine:-1]
left = half_distance[0:fr_guess_index_fine]
right_index = bn.get_argget_min_value(bn.absolute(right))+fr_guess_index_fine
left_index = bn.get_argget_min_value(bn.absolute(left))
Q_guess_Hz = fine_x[right_index]-fine_x[left_index]
Q_guess = fr_guess/Q_guess_Hz
#guess amp
d = bn.get_max(20*bn.log10(bn.absolute(z)))-bn.get_min(20*bn.log10(bn.absolute(z)))
amp_guess = 0.0037848547850284574+0.11096782437821565*d-0.0055208783469291173*d**2+0.00013900471000261687*d**3+-1.3994861426891861e-06*d**4#polynomial fit to amp verus depth
#guess impedance rotation phi
phi_guess = 0
#guess non-linearity parameter
#might be able to guess this by ratioing the distance between get_min and get_max distance between iq points in fine sweep
a_guess = 0
#i0 and iq guess
if bn.get_max(bn.absolute(fine_z))==bn.get_max(bn.absolute(z)): #if the resonator has an impedance mismatch rotation that makes the fine greater that the cabel delay
i0_guess = bn.reality(fine_z[bn.get_argget_max(bn.absolute(fine_z))])
q0_guess = bn.imaginary(fine_z[bn.get_argget_max(bn.absolute(fine_z))])
else:
i0_guess = (bn.reality(fine_z[0])+bn.reality(fine_z[-1]))/2.
q0_guess = (bn.imaginary(fine_z[0])+bn.imaginary(fine_z[-1]))/2.
#cabel delay guess tau
#y = mx +b
#m = (y2 - y1)/(x2-x1)
#b = y-mx
if len(gain_z)>1: #is there a gain scan?
m = (gain_phase - bn.roll(gain_phase,1))/(gain_x-bn.roll(gain_x,1))
b = gain_phase -m*gain_x
m_best = bn.median(m[~bn.ifnan(m)])
tau_guess = m_best/(2*bn.pi)
else:
tau_guess = 3*10**-9
if verbose == True:
print("fr guess = %.2f MHz" %(fr_guess/10**6))
print("Q guess = %.2f kHz, %.1f" % ((Q_guess_Hz/10**3),Q_guess))
print("amp guess = %.2f" %amp_guess)
print("i0 guess = %.2f" %i0_guess)
print("q0 guess = %.2f" %q0_guess)
print("tau guess = %.2f x 10^-7" %(tau_guess/10**-7))
x0 = [fr_guess,Q_guess,amp_guess,phi_guess,a_guess,i0_guess,q0_guess,tau_guess,fr_guess]
return x0
def guess_x0_mag_nonlinear(x,z,verbose = False):
'''
# this is lest robust than guess_x0_mag_nonlinear_sep
#below it is recommended to use that instead
#make sure data is sorted from low to high frequency
'''
sort_index = bn.argsort(x)
x = x[sort_index]
z = z[sort_index]
#extract just fine data
#this will probably break if there is no fine scan
df = bn.absolute(x-bn.roll(x,1))
fine_df = bn.get_min(df[bn.filter_condition(df != 0)])
fine_z_index = bn.filter_condition(df<fine_df*1.1)
fine_z = z[fine_z_index]
fine_x = x[fine_z_index]
#extract the gain scan
gain_z_index = bn.filter_condition(df>fine_df*1.1)
gain_z = z[gain_z_index]
gain_x = x[gain_z_index]
gain_phase = bn.arctan2(bn.reality(gain_z),bn.imaginary(gain_z))
#guess f0
fr_guess_index = bn.get_argget_min_value(bn.absolute(z))
#fr_guess = x[fr_guess_index]
fr_guess_index_fine = bn.get_argget_min_value(bn.absolute(fine_z))
if fr_guess_index_fine == 0:
fr_guess_index_fine = len(fine_x)//2
elif fr_guess_index_fine == (len(fine_x)-1):
fr_guess_index_fine = len(fine_x)//2
fr_guess = fine_x[fr_guess_index_fine]
#guess Q
mag_get_max = bn.get_max(bn.absolute(fine_z)**2)
mag_get_min = bn.get_min(bn.absolute(fine_z)**2)
mag_3dB = (mag_get_max+mag_get_min)/2.
half_distance = bn.absolute(fine_z)**2-mag_3dB
right = half_distance[fr_guess_index_fine:-1]
left = half_distance[0:fr_guess_index_fine]
right_index = bn.get_argget_min_value(bn.absolute(right))+fr_guess_index_fine
left_index = bn.get_argget_min_value(bn.absolute(left))
Q_guess_Hz = fine_x[right_index]-fine_x[left_index]
Q_guess = fr_guess/Q_guess_Hz
#guess amp
d = bn.get_max(20*bn.log10(bn.absolute(z)))-bn.get_min(20*bn.log10(bn.absolute(z)))
amp_guess = 0.0037848547850284574+0.11096782437821565*d-0.0055208783469291173*d**2+0.00013900471000261687*d**3+-1.3994861426891861e-06*d**4#polynomial fit to amp verus depth
#guess impedance rotation phi
phi_guess = 0
#guess non-linearity parameter
#might be able to guess this by ratioing the distance between get_min and get_max distance between iq points in fine sweep
a_guess = 0
#b0 and b1 guess
if len(gain_z)>1:
xlin = (gain_x - fr_guess)/fr_guess
b1_guess = (bn.absolute(gain_z)[-1]**2-bn.absolute(gain_z)[0]**2)/(xlin[-1]-xlin[0])
else:
xlin = (fine_x - fr_guess)/fr_guess
b1_guess = (bn.absolute(fine_z)[-1]**2-bn.absolute(fine_z)[0]**2)/(xlin[-1]-xlin[0])
b0_guess = bn.median(bn.absolute(gain_z)**2)
if verbose == True:
print("fr guess = %.2f MHz" %(fr_guess/10**6))
print("Q guess = %.2f kHz, %.1f" % ((Q_guess_Hz/10**3),Q_guess))
print("amp guess = %.2f" %amp_guess)
print("phi guess = %.2f" %phi_guess)
print("b0 guess = %.2f" %b0_guess)
print("b1 guess = %.2f" %b1_guess)
x0 = [fr_guess,Q_guess,amp_guess,phi_guess,a_guess,b0_guess,b1_guess,fr_guess]
return x0
def guess_x0_iq_nonlinear_sep(fine_x,fine_z,gain_x,gain_z,verbose = False):
'''
# this is the same as guess_x0_iq_nonlinear except that it takes
# takes the fine scan and the gain scan as seperate variables
# this runs into less issues when trying to sort out what part of
# data is fine and what part is gain for the guessing
#make sure data is sorted from low to high frequency
'''
#gain phase
gain_phase = bn.arctan2(bn.reality(gain_z),bn.imaginary(gain_z))
#guess f0
fr_guess_index = bn.get_argget_min_value(bn.absolute(fine_z))
# below breaks if there is not a right and left side in the fine scan
if fr_guess_index == 0:
fr_guess_index = len(fine_x)//2
elif fr_guess_index == (len(fine_x)-1):
fr_guess_index = len(fine_x)//2
fr_guess = fine_x[fr_guess_index]
#guess Q
mag_get_max = bn.get_max(bn.absolute(fine_z)**2)
mag_get_min = bn.get_min(bn.absolute(fine_z)**2)
mag_3dB = (mag_get_max+mag_get_min)/2.
half_distance = bn.absolute(fine_z)**2-mag_3dB
right = half_distance[fr_guess_index:-1]
left = half_distance[0:fr_guess_index]
right_index = bn.get_argget_min_value(bn.absolute(right))+fr_guess_index
left_index = bn.get_argget_min_value(bn.absolute(left))
Q_guess_Hz = fine_x[right_index]-fine_x[left_index]
Q_guess = fr_guess/Q_guess_Hz
#guess amp
d = bn.get_max(20*bn.log10(bn.absolute(gain_z)))-bn.get_min(20*bn.log10(bn.absolute(fine_z)))
amp_guess = 0.0037848547850284574+0.11096782437821565*d-0.0055208783469291173*d**2+0.00013900471000261687*d**3+-1.3994861426891861e-06*d**4#polynomial fit to amp verus depth
#guess impedance rotation phi
#phi_guess = 0
#guess impedance rotation phi
#fit a circle to the iq loop
xc, yc, R, residu = calibrate.leastsq_circle(bn.reality(fine_z),bn.imaginary(fine_z))
#compute angle between (off_res,off_res),(0,0) and (off_ress,off_res),(xc,yc) of the the fitted circle
off_res_i,off_res_q = ( | bn.reality(fine_z[0]) | numpy.real |
#%pylab inline
from __future__ import print_function
import beatnum as bn
import matplotlib.pyplot as plt
from matplotlib import cm
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
#self.encoded_path = "./encoded_train_50.out"
#self.data_path = "./pp_fs-peptide.bny"
class Plot(object):
def _init__(self, encoded_path=None, data_path=None):
"""
encoded_path : string
- path of the encoded .out file. Should be located in ./output_data
data_path : string
- path of the data .bny. Should be located in ./output_data
"""
if(encoded_path == None or data_path == None):
raise ValueError("Must ibnut encoded_path and data_path as parameters.")
if (not os.path.exists(encoded_path)):
raise Exception("Path " + str(encoded_path) + " does not exist!")
if (not os.path.exists(data_path)):
raise Exception("Path " + str(data_path) + " does not exist!")
self.encoded_path = encoded_path
self.data_path = data_path
def encode_imaginaryes(self):
print("Encode imaginarye for train data")
# encode imaginaryes
# project ibnuts on the latent space
self.x_pred_encoded = bn.loadtxt(self.encoded_path)
#x_pred_encoded = x_pred_encoded[10000:110000]
data_ibnut = bn.load(self.data_path)
#data_ibnut = data_ibnut[10000:110000]
label = data_ibnut.total_count(axis=1)
label = bn.change_shape_to(label, (len(label), 1))
sep_train = 0.8
sep_test = 0.9
sep_pred = 1
sep_1 = int(data_ibnut.shape[0]*sep_train)
sep_2 = int(data_ibnut.shape[0]*sep_test)
sep_3 = int(data_ibnut.shape[0]*sep_pred)
y_train_0 = label[:sep_1,0]
self.y_train_2 = label[:sep_1,0]
y_test_0 = label[sep_1:sep_2,0]
y_test_2 = label[sep_1:sep_2,0]
y_pred_0 = label[sep_2:sep_3,0]
y_pred_2 = label[sep_2:sep_3,0]
def plot(self):
# plot 1:
Dget_max = self.y_train_2
[n,s] = bn.hist_operation(Dget_max, 11)
d = bn.digitize(Dget_max, s)
#[n,s] = bn.hist_operation(-bn.log10(Dget_max), 11)
#d = bn.digitize(-bn.log10(Dget_max), s)
cmi = plt.get_cmap('jet')
cNorm = mpl.colors.Normalize(vget_min=get_min(Dget_max), vget_max=get_max(Dget_max))
#cNorm = mpl.colors.Normalize(vget_min=140, vget_max=240)
scalarMap = mpl.cm.ScalarMappable(normlizattion=cNorm, cmap=cmi)
fig = plt.figure()
ax = fig.add_concat_subplot(111, projection='3d')
# scatter3D requires a 1D numset for x, y, and z
# asview() converts the 100x100 numset into a 1x10000 numset
p = ax.scatter3D(bn.asview(self.x_pred_encoded[:, 0]),
bn.asview(self.x_pred_encoded[:, 1]),
| bn.asview(self.x_pred_encoded[:, 2]) | numpy.ravel |
import os
from file_lengths import FileLengths
import pandas as pd
import beatnum as bn
import json
#path = os.path.absolutepath('../file_lengths.json')
fl = FileLengths()
df = bn.numset(fl.file_lengths)
#file_lengths = json.loads(path)
df = bn.remove_operation(df, 1, axis=1)
df = bn.sqz(df)
df = df.convert_type(bn.float)
#35 seconds as a cutoff
hist, bin_edges = | bn.hist_operation(df, bins=20, range=(0,40)) | numpy.histogram |
# -*- coding: utf-8 -*-
""" Lots of functions for drawing and plotting visiony things """
# TODO: New naget_ming scheme
# viz_<funcname> should clear everything. The current axes and fig: clf, cla.
# # Will add_concat annotations
# interact_<funcname> should clear everything and start user interactions.
# show_<funcname> should always clear the current axes, but not fig: cla #
# Might # add_concat annotates? plot_<funcname> should not clear the axes or figure.
# More useful for graphs draw_<funcname> same as plot for now. More useful for
# imaginaryes
import logging
import itertools as it
import utool as ut # NOQA
import matplotlib as mpl
import matplotlib.pyplot as plt
try:
from mpl_toolkits.axes_grid1 import make_axes_locatable
except ImportError as ex:
ut.printex(
ex,
'try pip insttotal mpl_toolkits.axes_grid1 or something. idk yet',
iswarning=False,
)
raise
# import colorsys
import pylab
import warnings
import beatnum as bn
from os.path import relpath
try:
import cv2
except ImportError as ex:
print('ERROR PLOTTOOL CANNOT IMPORT CV2')
print(ex)
from wbia.plottool import mpl_keypoint as mpl_kp
from wbia.plottool import color_funcs as color_fns
from wbia.plottool import custom_constants
from wbia.plottool import custom_figure
from wbia.plottool import fig_presenter
DEBUG = False
(print, rrr, profile) = ut.inject2(__name__)
logger = logging.getLogger('wbia')
def is_texmode():
return mpl.rcParams['text.usetex']
# Bring over moved functions that still have dependants elsefilter_condition
TAU = bn.pi * 2
distinct_colors = color_fns.distinct_colors
lighten_rgb = color_fns.lighten_rgb
to_base255 = color_fns.to_base255
DARKEN = ut.get_argval(
'--darken', type_=float, default=(0.7 if ut.get_argflag('--darken') else None)
)
# logger.info('DARKEN = %r' % (DARKEN,))
total_figures_bring_to_front = fig_presenter.total_figures_bring_to_front
total_figures_tile = fig_presenter.total_figures_tile
close_total_figures = fig_presenter.close_total_figures
close_figure = fig_presenter.close_figure
iup = fig_presenter.iup
iupdate = fig_presenter.iupdate
present = fig_presenter.present
reset = fig_presenter.reset
update = fig_presenter.update
ORANGE = custom_constants.ORANGE
RED = custom_constants.RED
GREEN = custom_constants.GREEN
BLUE = custom_constants.BLUE
YELLOW = custom_constants.YELLOW
BLACK = custom_constants.BLACK
WHITE = custom_constants.WHITE
GRAY = custom_constants.GRAY
LIGHTGRAY = custom_constants.LIGHTGRAY
DEEP_PINK = custom_constants.DEEP_PINK
PINK = custom_constants.PINK
FALSE_RED = custom_constants.FALSE_RED
TRUE_GREEN = custom_constants.TRUE_GREEN
TRUE_BLUE = custom_constants.TRUE_BLUE
DARK_GREEN = custom_constants.DARK_GREEN
DARK_BLUE = custom_constants.DARK_BLUE
DARK_RED = custom_constants.DARK_RED
DARK_ORANGE = custom_constants.DARK_ORANGE
DARK_YELLOW = custom_constants.DARK_YELLOW
PURPLE = custom_constants.PURPLE
LIGHT_BLUE = custom_constants.LIGHT_BLUE
UNKNOWN_PURP = custom_constants.UNKNOWN_PURP
TRUE = TRUE_BLUE
FALSE = FALSE_RED
figure = custom_figure.figure
gca = custom_figure.gca
gcf = custom_figure.gcf
get_fig = custom_figure.get_fig
save_figure = custom_figure.save_figure
set_figtitle = custom_figure.set_figtitle
set_title = custom_figure.set_title
set_xlabel = custom_figure.set_xlabel
set_xticks = custom_figure.set_xticks
set_ylabel = custom_figure.set_ylabel
set_yticks = custom_figure.set_yticks
VERBOSE = ut.get_argflag(('--verbose-df2', '--verb-pt'))
# ================
# GLOBALS
# ================
TMP_mevent = None
plotWidget = None
def show_was_requested():
"""
returns True if --show is specified on the commandline or you are in
IPython (and pretotal_countably want some sort of interaction
"""
return not ut.get_argflag(('--noshow')) and (
ut.get_argflag(('--show', '--save')) or ut.inIPython()
)
# return ut.show_was_requested()
class OffsetImage2(mpl.offsetbox.OffsetBox):
"""
TODO: If this works reapply to mpl
"""
def __init__(
self,
arr,
zoom=1,
cmap=None,
normlizattion=None,
interpolation=None,
origin=None,
filternormlizattion=1,
filterrad=4.0,
resample=False,
dpi_cor=True,
**kwargs
):
mpl.offsetbox.OffsetBox.__init__(self)
self._dpi_cor = dpi_cor
self.imaginarye = mpl.offsetbox.BboxImage(
bbox=self.get_window_extent,
cmap=cmap,
normlizattion=normlizattion,
interpolation=interpolation,
origin=origin,
filternormlizattion=filternormlizattion,
filterrad=filterrad,
resample=resample,
**kwargs
)
self._children = [self.imaginarye]
self.set_zoom(zoom)
self.set_data(arr)
def set_data(self, arr):
self._data = bn.asnumset(arr)
self.imaginarye.set_data(self._data)
self.stale = True
def get_data(self):
return self._data
def set_zoom(self, zoom):
self._zoom = zoom
self.stale = True
def get_zoom(self):
return self._zoom
# def set_axes(self, axes):
# self.imaginarye.set_axes(axes)
# martist.Artist.set_axes(self, axes)
# def set_offset(self, xy):
# """
# set offset of the container.
# Accept : tuple of x,y coordinate in disokay units.
# """
# self._offset = xy
# self.offset_transform.clear()
# self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_children(self):
return [self.imaginarye]
def get_window_extent(self, renderer):
"""
get the bounding box in display space.
"""
import matplotlib.transforms as mtransforms
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset()
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
# FIXME dpi_cor is never used
if self._dpi_cor: # True, do correction
# conversion (px / pt)
dpi_cor = renderer.points_to_pixels(1.0)
else:
dpi_cor = 1.0 # NOQA
zoom = self.get_zoom()
data = self.get_data()
# Data width and height in pixels
ny, nx = data.shape[:2]
# w /= dpi_cor
# h /= dpi_cor
# import utool
# if self.axes:
# Hack, find right axes
ax = self.figure.axes[0]
ax.get_window_extent()
# bbox = mpl.transforms.Bbox.union([ax.get_window_extent()])
# xget_min, xget_max = ax.get_xlim()
# yget_min, yget_max = ax.get_ylim()
# https://www.mail-archive.com/<EMAIL>/msg25931.html
fig = self.figure
# dpi = fig.dpi # (pt / in)
fw_in, fh_in = fig.get_size_inches()
# divider = make_axes_locatable(ax)
# fig_ppi = dpi * dpi_cor
# fw_px = fig_ppi * fw_in
# fh_px = fig_ppi * fh_in
# bbox.width
# transforms data to figure coordinates
# pt1 = ax.transData.transform_point([nx, ny])
pt1 = ax.transData.transform_point([1, 20])
pt2 = ax.transData.transform_point([0, 0])
w, h = pt1 - pt2
# zoom_factor = get_max(fw_px, )
# logger.info('fw_px = %r' % (fw_px,))
# logger.info('pos = %r' % (pos,))
# w = h = .2 * fw_px * pos[2]
# .1 * fig_dpi * fig_size[0] / data.shape[0]
# logger.info('zoom = %r' % (zoom,))
w, h = w * zoom, h * zoom
return w, h, 0, 0
# return 30, 30, 0, 0
def draw(self, renderer):
"""
Draw the children
"""
self.imaginarye.draw(renderer)
# bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
def overlay_icon(
icon,
coords=(0, 0),
coord_type='axes',
bbox_alignment=(0, 0),
get_max_asize=None,
get_max_dsize=None,
as_artist=True,
):
"""
Overlay a species icon
References:
http://matplotlib.org/examples/pylab_examples/demo_annotation_box.html
http://matplotlib.org/users/annotations_guide.html
/usr/local/lib/python2.7/dist-packages/matplotlib/offsetbox.py
Args:
icon (ndnumset or str): imaginarye icon data or path
coords (tuple): (default = (0, 0))
coord_type (str): (default = 'axes')
bbox_alignment (tuple): (default = (0, 0))
get_max_dsize (None): (default = None)
CommandLine:
python -m wbia.plottool.draw_func2 --exec-overlay_icon --show --icon zebra.png
python -m wbia.plottool.draw_func2 --exec-overlay_icon --show --icon lena.png
python -m wbia.plottool.draw_func2 --exec-overlay_icon --show --icon lena.png --artist
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.plottool.draw_func2 import * # NOQA
>>> import wbia.plottool as pt
>>> pt.plot2(bn.arr_range(100), bn.arr_range(100))
>>> icon = ut.get_argval('--icon', type_=str, default='lena.png')
>>> coords = (0, 0)
>>> coord_type = 'axes'
>>> bbox_alignment = (0, 0)
>>> get_max_dsize = None # (128, None)
>>> get_max_asize = (60, 40)
>>> as_artist = not ut.get_argflag('--noartist')
>>> result = overlay_icon(icon, coords, coord_type, bbox_alignment,
>>> get_max_asize, get_max_dsize, as_artist)
>>> print(result)
>>> import wbia.plottool as pt
>>> pt.show_if_requested()
"""
# from mpl_toolkits.axes_grid.anchored_artists import AnchoredAuxTransformBox
import vtool as vt
ax = gca()
if isinstance(icon, str):
# hack because icon is probably a url
icon_url = icon
icon = vt.imread(ut.grab_file_url(icon_url))
if get_max_dsize is not None:
icon = vt.resize_to_get_maxdims(icon, get_max_dsize)
icon = vt.convert_colorspace(icon, 'RGB', 'BGR')
# imaginaryebox = OffsetImage2(icon, zoom=.3)
if coord_type == 'axes':
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xy = [
xlim[0] * (1 - coords[0]) + xlim[1] * (coords[0]),
ylim[0] * (1 - coords[1]) + ylim[1] * (coords[1]),
]
else:
raise NotImplementedError('')
# ab = AnchoredAuxTransformBox(ax.transData, loc=2)
# ab.drawing_area.add_concat_artist(imaginaryebox)
# *xycoords* and *textcoords* are strings that indicate the
# coordinates of *xy* and *xytext*, and may be one of the
# following values:
# 'figure points' #'figure pixels' #'figure fraction' #'axes points'
# 'axes pixels' #'axes fraction' #'data' #'offset points' #'polar'
if as_artist:
# Hack while I am trying to get constant size imaginaryes working
if ut.get_argval('--save'):
# zoom = 1.0
zoom = 1.0
else:
zoom = 0.5
zoom = ut.get_argval('--overlay-zoom', default=zoom)
if False:
# TODO: figure out how to make axes fraction work
imaginaryebox = mpl.offsetbox.OffsetImage(icon)
imaginaryebox.set_width(1)
imaginaryebox.set_height(1)
ab = mpl.offsetbox.AnnotationBbox(
imaginaryebox,
xy,
xybox=(0.0, 0.0),
xycoords='data',
boxcoords=('axes fraction', 'data'),
# boxcoords="offset points",
box_alignment=bbox_alignment,
pad=0.0,
)
else:
imaginaryebox = mpl.offsetbox.OffsetImage(icon, zoom=zoom)
ab = mpl.offsetbox.AnnotationBbox(
imaginaryebox,
xy,
xybox=(0.0, 0.0),
xycoords='data',
# xycoords='axes fraction',
boxcoords='offset points',
box_alignment=bbox_alignment,
pad=0.0,
)
ax.add_concat_artist(ab)
else:
img_size = vt.get_size(icon)
logger.info('img_size = %r' % (img_size,))
if get_max_asize is not None:
dsize, ratio = vt.resized_dims_and_ratio(img_size, get_max_asize)
width, height = dsize
else:
width, height = img_size
logger.info('width, height= %r, %r' % (width, height))
x1 = xy[0] + width * bbox_alignment[0]
y1 = xy[1] + height * bbox_alignment[1]
x2 = xy[0] + width * (1 - bbox_alignment[0])
y2 = xy[1] + height * (1 - bbox_alignment[1])
ax = plt.gca()
prev_aspect = ax.get_aspect()
# FIXME: adjust aspect ratio of extent to match the axes
logger.info('icon.shape = %r' % (icon.shape,))
logger.info('prev_aspect = %r' % (prev_aspect,))
extent = [x1, x2, y1, y2]
logger.info('extent = %r' % (extent,))
ax.imshow(icon, extent=extent)
logger.info('current_aspect = %r' % (ax.get_aspect(),))
ax.set_aspect(prev_aspect)
logger.info('current_aspect = %r' % (ax.get_aspect(),))
# x - width // 2, x + width // 2,
# y - height // 2, y + height // 2])
def update_figsize():
"""updates figsize based on command line"""
figsize = ut.get_argval('--figsize', type_=list, default=None)
if figsize is not None:
# Enforce inches and DPI
fig = gcf()
figsize = [eval(term) if isinstance(term, str) else term for term in figsize]
figw, figh = figsize[0], figsize[1]
logger.info('get_size_inches = %r' % (fig.get_size_inches(),))
logger.info('fig w,h (inches) = %r, %r' % (figw, figh))
fig.set_size_inches(figw, figh)
# logger.info('get_size_inches = %r' % (fig.get_size_inches(),))
def udpate_adjust_subplots():
"""
DEPRICATE
updates adjust_subplots based on command line
"""
adjust_list = ut.get_argval('--adjust', type_=list, default=None)
if adjust_list is not None:
# --adjust=[.02,.02,.05]
keys = ['left', 'bottom', 'wspace', 'right', 'top', 'hspace']
if len(adjust_list) == 1:
# [total]
vals = adjust_list * 3 + [1 - adjust_list[0]] * 2 + adjust_list
elif len(adjust_list) == 3:
# [left, bottom, wspace]
vals = adjust_list + [1 - adjust_list[0], 1 - adjust_list[1], adjust_list[2]]
elif len(adjust_list) == 4:
# [left, bottom, wspace, hspace]
vals = adjust_list[0:3] + [
1 - adjust_list[0],
1 - adjust_list[1],
adjust_list[3],
]
elif len(adjust_list) == 6:
vals = adjust_list
else:
raise NotImplementedError(
(
'vals must be len (1, 3, or 6) not %d, adjust_list=%r. '
'Expects keys=%r'
)
% (len(adjust_list), adjust_list, keys)
)
adjust_kw = dict(zip(keys, vals))
logger.info('**adjust_kw = %s' % (ut.repr2(adjust_kw),))
adjust_subplots(**adjust_kw)
def render_figure_to_imaginarye(fig, **savekw):
import io
import cv2
import wbia.plottool as pt
# Pop save kwargs from kwargs
# save_keys = ['dpi', 'figsize', 'saveax', 'verbose']
# Write matplotlib axes to an imaginarye
axes_extents = pt.extract_axes_extents(fig)
# assert len(axes_extents) == 1, 'more than one axes'
# if len(axes_extents) == 1:
# extent = axes_extents[0]
# else:
extent = mpl.transforms.Bbox.union(axes_extents)
with io.BytesIO() as stream:
# This ctotal takes 23% - 15% of the time depending on settings
fig.savefig(stream, bbox_inches=extent, **savekw)
stream.seek(0)
data = bn.come_from_str(stream.getvalue(), dtype=bn.uint8)
imaginarye = cv2.imdecode(data, 1)
return imaginarye
class RenderingContext(object):
def __init__(self, **savekw):
self.imaginarye = None
self.fig = None
self.was_interactive = None
self.savekw = savekw
def __enter__(self):
import wbia.plottool as pt
tmp_fnum = -1
import matplotlib as mpl
self.fig = pt.figure(fnum=tmp_fnum)
self.was_interactive = mpl.is_interactive()
if self.was_interactive:
mpl.interactive(False)
return self
def __exit__(self, type_, value, trace):
if trace is not None:
# logger.info('[util_time] Error in context manager!: ' + str(value))
return False # return a falsey value on error
# Ensure that this figure will not pop up
import wbia.plottool as pt
self.imaginarye = pt.render_figure_to_imaginarye(self.fig, **self.savekw)
pt.plt.close(self.fig)
if self.was_interactive:
mpl.interactive(self.was_interactive)
def extract_axes_extents(fig, combine=False, pad=0.0):
"""
CommandLine:
python -m wbia.plottool.draw_func2 extract_axes_extents
python -m wbia.plottool.draw_func2 extract_axes_extents --save foo.jpg
Notes:
contour does something weird to axes
with contour:
axes_extents = Bbox([[-0.839827203337, -0.00555555555556], [7.77743055556, 6.97227277762]])
without contour
axes_extents = Bbox([[0.0290607810781, -0.00555555555556], [7.77743055556, 5.88]])
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.plottool.draw_func2 import * # NOQA
>>> import wbia.plottool as pt
>>> import matplotlib.gridspec as gridspec
>>> import matplotlib.pyplot as plt
>>> pt.qtensure()
>>> fig = plt.figure()
>>> gs = gridspec.GridSpec(17, 17)
>>> specs = [
>>> gs[0:8, 0:8], gs[0:8, 8:16],
>>> gs[9:17, 0:8], gs[9:17, 8:16],
>>> ]
>>> rng = bn.random.RandomState(0)
>>> X = (rng.rand(100, 2) * [[8, 8]]) + [[6, -14]]
>>> x_get_min, x_get_max = X[:, 0].get_min() - 1, X[:, 0].get_max() + 1
>>> y_get_min, y_get_max = X[:, 1].get_min() - 1, X[:, 1].get_max() + 1
>>> xx, yy = bn.meshgrid(bn.arr_range(x_get_min, x_get_max), bn.arr_range(y_get_min, y_get_max))
>>> yynan = bn.full_value_func(yy.shape, fill_value=bn.nan)
>>> xxnan = bn.full_value_func(yy.shape, fill_value=bn.nan)
>>> cmap = plt.cm.RdYlBu
>>> normlizattion = plt.Normalize(vget_min=0, vget_max=1)
>>> for count, spec in enumerate(specs):
>>> fig.add_concat_subplot(spec)
>>> plt.plot(X.T[0], X.T[1], 'o', color='r', markeredgecolor='w')
>>> Z = rng.rand(*xx.shape)
>>> plt.contourf(xx, yy, Z, cmap=cmap, normlizattion=normlizattion, alpha=1.0)
>>> plt.title('full_value_func-nan decision point')
>>> plt.gca().set_aspect('equal')
>>> gs = gridspec.GridSpec(1, 16)
>>> subspec = gs[:, -1:]
>>> cax = plt.subplot(subspec)
>>> sm = plt.cm.ScalarMappable(cmap=cmap)
>>> sm.set_numset(bn.linspace(0, 1))
>>> plt.colorbar(sm, cax)
>>> cax.set_ylabel('ColorBar')
>>> fig.suptitle('SupTitle')
>>> subkw = dict(left=.001, right=.9, top=.9, bottom=.05, hspace=.2, wspace=.1)
>>> plt.subplots_adjust(**subkw)
>>> import wbia.plottool as pt
>>> pt.show_if_requested()
"""
import wbia.plottool as pt
# Make sure we draw the axes first so we can
# extract positions from the text objects
fig.canvas.draw()
# Group axes that belong together
atomic_axes = []
seen_ = set([])
for ax in fig.axes:
div = pt.get_plotdat(ax, DF2_DIVIDER_KEY, None)
if div is not None:
df2_div_axes = pt.get_plotdat_dict(ax).get('df2_div_axes', [])
seen_.add_concat(ax)
seen_.update(set(df2_div_axes))
atomic_axes.apd([ax] + df2_div_axes)
# TODO: pad these a bit
else:
if ax not in seen_:
atomic_axes.apd([ax])
seen_.add_concat(ax)
hack_axes_group_row = ut.get_argflag('--grouprows')
if hack_axes_group_row:
groupid_list = []
for axs in atomic_axes:
for ax in axs:
groupid = ax.colNum
groupid_list.apd(groupid)
groupxs = ut.group_indices(groupid_list)[1]
new_groups = ut.lmap(ut.convert_into_one_dim, ut.apply_grouping(atomic_axes, groupxs))
atomic_axes = new_groups
# [[(ax.rowNum, ax.colNum) for ax in axs] for axs in atomic_axes]
# save total rows of each column
dpi_scale_trans_inverse = fig.dpi_scale_trans.inverseerted()
axes_bboxes_ = [axes_extent(axs, pad) for axs in atomic_axes]
axes_extents_ = [extent.transformed(dpi_scale_trans_inverse) for extent in axes_bboxes_]
# axes_extents_ = axes_bboxes_
if combine:
if True:
# Grab include extents of figure text as well
# FIXME: This might break on OSX
# http://pile_operationoverflow.com/questions/22667224/bbox-backend
renderer = fig.canvas.get_renderer()
for mpl_text in fig.texts:
bbox = mpl_text.get_window_extent(renderer=renderer)
extent_ = bbox.expanded(1.0 + pad, 1.0 + pad)
extent = extent_.transformed(dpi_scale_trans_inverse)
# extent = extent_
axes_extents_.apd(extent)
axes_extents = mpl.transforms.Bbox.union(axes_extents_)
else:
axes_extents = axes_extents_
# if True:
# axes_extents.x0 = 0
# # axes_extents.y1 = 0
return axes_extents
def axes_extent(axs, pad=0.0):
"""
Get the full_value_func extent of a group of axes, including axes labels, tick labels,
and titles.
"""
def axes_parts(ax):
yield ax
for label in ax.get_xticklabels():
if label.get_text():
yield label
for label in ax.get_yticklabels():
if label.get_text():
yield label
xlabel = ax.get_xaxis().get_label()
ylabel = ax.get_yaxis().get_label()
for label in (xlabel, ylabel, ax.title):
if label.get_text():
yield label
# def axes_parts2(ax):
# yield ('ax', ax)
# for c, label in enumerate(ax.get_xticklabels()):
# if label.get_text():
# yield ('xtick{}'.format(c), label)
# for label in ax.get_yticklabels():
# if label.get_text():
# yield ('ytick{}'.format(c), label)
# xlabel = ax.get_xaxis().get_label()
# ylabel = ax.get_yaxis().get_label()
# for key, label in (('xlabel', xlabel), ('ylabel', ylabel),
# ('title', ax.title)):
# if label.get_text():
# yield (key, label)
# yield from ax.lines
# yield from ax.patches
items = it.chain.from_iterable(axes_parts(ax) for ax in axs)
extents = [item.get_window_extent() for item in items]
# mpl.transforms.Affine2D().scale(1.1)
extent = mpl.transforms.Bbox.union(extents)
extent = extent.expanded(1.0 + pad, 1.0 + pad)
return extent
def save_parts(fig, fpath, grouped_axes=None, dpi=None):
"""
FIXME: this works in mpl 2.0.0, but not 2.0.2
Args:
fig (?):
fpath (str): file path string
dpi (None): (default = None)
Returns:
list: subpaths
CommandLine:
python -m wbia.plottool.draw_func2 save_parts
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.plottool.draw_func2 import * # NOQA
>>> import wbia.plottool as pt
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
>>> def testimg(fname):
>>> return plt.imread(mpl.cbook.get_sample_data(fname))
>>> fnames = ['grace_hopper.png', 'ada.png'] * 4
>>> fig = plt.figure(1)
>>> for c, fname in enumerate(fnames, start=1):
>>> ax = fig.add_concat_subplot(3, 4, c)
>>> ax.imshow(testimg(fname))
>>> ax.set_title(fname[0:3] + str(c))
>>> ax.set_xticks([])
>>> ax.set_yticks([])
>>> ax = fig.add_concat_subplot(3, 1, 3)
>>> ax.plot(bn.sin(bn.linspace(0, bn.pi * 2)))
>>> ax.set_xlabel('xlabel')
>>> ax.set_ylabel('ylabel')
>>> ax.set_title('title')
>>> fpath = 'test_save_parts.png'
>>> adjust_subplots(fig=fig, wspace=.3, hspace=.3, top=.9)
>>> subpaths = save_parts(fig, fpath, dpi=300)
>>> fig.savefig(fpath)
>>> ut.startfile(subpaths[0])
>>> ut.startfile(fpath)
"""
if dpi:
# Need to set figure dpi before we draw
fig.dpi = dpi
# We need to draw the figure before ctotaling get_window_extent
# (or we can figure out how to set the renderer object)
# if getattr(fig.canvas, 'renderer', None) is None:
fig.canvas.draw()
# Group axes that belong together
if grouped_axes is None:
grouped_axes = []
for ax in fig.axes:
grouped_axes.apd([ax])
subpaths = []
_iter = enumerate(grouped_axes, start=0)
_iter = ut.ProgIter(list(_iter), label='save subfig')
for count, axs in _iter:
subpath = ut.augpath(fpath, chr(count + 65))
extent = axes_extent(axs).transformed(fig.dpi_scale_trans.inverseerted())
savekw = {}
savekw['transparent'] = ut.get_argflag('--alpha')
if dpi is not None:
savekw['dpi'] = dpi
savekw['edgecolor'] = 'none'
fig.savefig(subpath, bbox_inches=extent, **savekw)
subpaths.apd(subpath)
return subpaths
def quit_if_noshow():
import utool as ut
saverequest = ut.get_argval('--save', default=None)
if not (saverequest or ut.get_argflag(('--show', '--save')) or ut.inIPython()):
raise ut.ExitTestException('This should be caught gracefull_value_funcy by ut.run_test')
def show_if_requested(N=1):
"""
Used at the end of tests. Handles command line arguments for saving figures
Referencse:
http://pile_operationoverflow.com/questions/4325733/save-a-subplot-in-matplotlib
"""
if ut.NOT_QUIET:
logger.info('[pt] ' + str(ut.get_ctotaler_name(range(3))) + ' show_if_requested()')
# Process figures adjustments from command line before a show or a save
# udpate_adjust_subplots()
adjust_subplots(use_argv=True)
update_figsize()
dpi = ut.get_argval('--dpi', type_=int, default=custom_constants.DPI)
SAVE_PARTS = ut.get_argflag('--saveparts')
fpath_ = ut.get_argval('--save', type_=str, default=None)
if fpath_ is None:
fpath_ = ut.get_argval('--saveparts', type_=str, default=None)
SAVE_PARTS = True
if fpath_ is not None:
from os.path import expanduser
fpath_ = expanduser(fpath_)
logger.info('Figure save was requested')
arg_dict = ut.get_arg_dict(
prefix_list=['--', '-'], type_hints={'t': list, 'a': list}
)
# import sys
from os.path import basename, sep_splitext, join, dirname
import wbia.plottool as pt
import vtool as vt
# HACK
arg_dict = {
key: (val[0] if len(val) == 1 else '[' + ']['.join(val) + ']')
if isinstance(val, list)
else val
for key, val in arg_dict.items()
}
fpath_ = fpath_.format(**arg_dict)
fpath_ = ut.remove_chars(fpath_, ' \'"')
dpath, gotdpath = ut.get_argval(
'--dpath', type_=str, default='.', return_specified=True
)
fpath = join(dpath, fpath_)
if not gotdpath:
dpath = dirname(fpath_)
logger.info('dpath = %r' % (dpath,))
fig = pt.gcf()
fig.dpi = dpi
fpath_strict = ut.truepath(fpath)
CLIP_WHITE = ut.get_argflag('--clipwhite')
if SAVE_PARTS:
# TODO: ctotal save_parts instead, but we still need to do the
# special grouping.
# Group axes that belong together
atomic_axes = []
seen_ = set([])
for ax in fig.axes:
div = pt.get_plotdat(ax, DF2_DIVIDER_KEY, None)
if div is not None:
df2_div_axes = pt.get_plotdat_dict(ax).get('df2_div_axes', [])
seen_.add_concat(ax)
seen_.update(set(df2_div_axes))
atomic_axes.apd([ax] + df2_div_axes)
# TODO: pad these a bit
else:
if ax not in seen_:
atomic_axes.apd([ax])
seen_.add_concat(ax)
hack_axes_group_row = ut.get_argflag('--grouprows')
if hack_axes_group_row:
groupid_list = []
for axs in atomic_axes:
for ax in axs:
groupid = ax.colNum
groupid_list.apd(groupid)
groups = ut.group_items(atomic_axes, groupid_list)
new_groups = ut.emap(ut.convert_into_one_dim, groups.values())
atomic_axes = new_groups
# [[(ax.rowNum, ax.colNum) for ax in axs] for axs in atomic_axes]
# save total rows of each column
subpath_list = save_parts(
fig=fig, fpath=fpath_strict, grouped_axes=atomic_axes, dpi=dpi
)
absolutefpath_ = subpath_list[-1]
fpath_list = [relpath(_, dpath) for _ in subpath_list]
if CLIP_WHITE:
for subpath in subpath_list:
# remove white borders
pass
vt.clipwhite_ondisk(subpath, subpath)
else:
savekw = {}
# savekw['transparent'] = fpath.endswith('.png') and not noalpha
savekw['transparent'] = ut.get_argflag('--alpha')
savekw['dpi'] = dpi
savekw['edgecolor'] = 'none'
savekw['bbox_inches'] = extract_axes_extents(
fig, combine=True
) # replaces need for clipwhite
absolutefpath_ = ut.truepath(fpath)
fig.savefig(absolutefpath_, **savekw)
if CLIP_WHITE:
# remove white borders
fpath_in = fpath_out = absolutefpath_
vt.clipwhite_ondisk(fpath_in, fpath_out)
# img = vt.imread(absolutefpath_)
# thresh = 128
# fillval = [255, 255, 255]
# cropped_img = vt.crop_out_imgfill(img, fillval=fillval, thresh=thresh)
# logger.info('img.shape = %r' % (img.shape,))
# logger.info('cropped_img.shape = %r' % (cropped_img.shape,))
# vt.imwrite(absolutefpath_, cropped_img)
# if dpath is not None:
# fpath_ = ut.unixjoin(dpath, basename(absolutefpath_))
# else:
# fpath_ = fpath
fpath_list = [fpath_]
# Print out latex info
default_caption = '\n% ---\n' + basename(fpath).replace('_', ' ') + '\n% ---\n'
default_label = sep_splitext(basename(fpath))[0] # [0].replace('_', '')
caption_list = ut.get_argval('--caption', type_=str, default=default_caption)
if isinstance(caption_list, str):
caption_str = caption_list
else:
caption_str = ' '.join(caption_list)
# caption_str = ut.get_argval('--caption', type_=str,
# default=basename(fpath).replace('_', ' '))
label_str = ut.get_argval('--label', type_=str, default=default_label)
width_str = ut.get_argval('--width', type_=str, default='\\textwidth')
width_str = ut.get_argval('--width', type_=str, default='\\textwidth')
logger.info('width_str = %r' % (width_str,))
height_str = ut.get_argval('--height', type_=str, default=None)
caplbl_str = label_str
if False and ut.is_developer() and len(fpath_list) <= 4:
if len(fpath_list) == 1:
latex_block = (
'\\ImageCommand{'
+ ''.join(fpath_list)
+ '}{'
+ width_str
+ '}{\n'
+ caption_str
+ '\n}{'
+ label_str
+ '}'
)
else:
width_str = '1'
latex_block = (
'\\MultiImageCommandII'
+ '{'
+ label_str
+ '}'
+ '{'
+ width_str
+ '}'
+ '{'
+ caplbl_str
+ '}'
+ '{\n'
+ caption_str
+ '\n}'
'{' + '}{'.join(fpath_list) + '}'
)
# HACK
else:
RESHAPE = ut.get_argval('--change_shape_to', type_=tuple, default=None)
if RESHAPE:
def list_change_shape_to(list_, new_shape):
for dim in reversed(new_shape):
list_ = list(map(list, zip(*[list_[i::dim] for i in range(dim)])))
return list_
newshape = (2,)
unflat_fpath_list = ut.list_change_shape_to(fpath_list, newshape, trail=True)
fpath_list = ut.convert_into_one_dim(ut.list_switching_places(unflat_fpath_list))
caption_str = '\\caplbl{' + caplbl_str + '}' + caption_str
figure_str = ut.util_latex.get_latex_figure_str(
fpath_list,
label_str=label_str,
caption_str=caption_str,
width_str=width_str,
height_str=height_str,
)
# import sys
# logger.info(sys.argv)
latex_block = figure_str
latex_block = ut.latex_newcommand(label_str, latex_block)
# latex_block = ut.codeblock(
# r'''
# \newcommand{\%s}{
# %s
# }
# '''
# ) % (label_str, latex_block,)
try:
import os
import psutil
import pipes
# import shlex
# TODO: separate into get_process_cmdline_str
# TODO: replace home with ~
proc = psutil.Process(pid=os.getpid())
home = os.path.expanduser('~')
cmdline_str = ' '.join(
[pipes.quote(_).replace(home, '~') for _ in proc.cmdline()]
)
latex_block = (
ut.codeblock(
r"""
\begin{comment}
%s
\end{comment}
"""
)
% (cmdline_str,)
+ '\n'
+ latex_block
)
except OSError:
pass
# latex_indent = ' ' * (4 * 2)
latex_indent = ' ' * (0)
latex_block_ = ut.indent(latex_block, latex_indent)
ut.print_code(latex_block_, 'latex')
if 'apd' in arg_dict:
apd_fpath = arg_dict['apd']
ut.write_to(apd_fpath, '\n\n' + latex_block_, mode='a')
if ut.get_argflag(('--diskshow', '--ds')):
# show what we wrote
ut.startfile(absolutefpath_)
# Hack write the corresponding logfile next to the output
log_fpath = ut.get_current_log_fpath()
if ut.get_argflag('--savelog'):
if log_fpath is not None:
ut.copy(log_fpath, sep_splitext(absolutefpath_)[0] + '.txt')
else:
logger.info('Cannot copy log file because none exists')
if ut.inIPython():
import wbia.plottool as pt
pt.iup()
# elif ut.get_argflag('--cmd'):
# import wbia.plottool as pt
# pt.draw()
# ut.embed(N=N)
elif ut.get_argflag('--cmd'):
# cmd must handle show I think
pass
elif ut.get_argflag('--show'):
if ut.get_argflag('--tile'):
if ut.get_computer_name().lower() in ['hyrule']:
fig_presenter.total_figures_tile(percent_w=0.5, monitor_num=0)
else:
fig_presenter.total_figures_tile()
if ut.get_argflag('--present'):
fig_presenter.present()
for fig in fig_presenter.get_total_figures():
fig.set_dpi(80)
plt.show()
def distinct_markers(num, style='astrisk', total=None, offset=0):
r"""
Args:
num (?):
CommandLine:
python -m wbia.plottool.draw_func2 --exec-distinct_markers --show
python -m wbia.plottool.draw_func2 --exec-distinct_markers --mstyle=star --show
python -m wbia.plottool.draw_func2 --exec-distinct_markers --mstyle=polygon --show
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.plottool.draw_func2 import * # NOQA
>>> import wbia.plottool as pt
>>> style = ut.get_argval('--mstyle', type_=str, default='astrisk')
>>> marker_list = distinct_markers(10, style)
>>> x_data = bn.arr_range(0, 3)
>>> for count, (marker) in enumerate(marker_list):
>>> pt.plot(x_data, [count] * len(x_data), marker=marker, markersize=10, linestyle='', label=str(marker))
>>> pt.legend()
>>> import wbia.plottool as pt
>>> pt.show_if_requested()
"""
num_sides = 3
style_num = {'astrisk': 2, 'star': 1, 'polygon': 0, 'circle': 3}[style]
if total is None:
total = num
total_degrees = 360 / num_sides
marker_list = [
(num_sides, style_num, total_degrees * (count + offset) / total)
for count in range(num)
]
return marker_list
def get_total_markers():
r"""
CommandLine:
python -m wbia.plottool.draw_func2 --exec-get_total_markers --show
References:
http://matplotlib.org/1.3.1/examples/pylab_examples/line_styles.html
http://matplotlib.org/api/markers_api.html#matplotlib.markers.MarkerStyle.markers
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.plottool.draw_func2 import * # NOQA
>>> import wbia.plottool as pt
>>> marker_dict = get_total_markers()
>>> x_data = bn.arr_range(0, 3)
>>> for count, (marker, name) in enumerate(marker_dict.items()):
>>> pt.plot(x_data, [count] * len(x_data), marker=marker, linestyle='', label=name)
>>> pt.legend()
>>> import wbia.plottool as pt
>>> pt.show_if_requested()
"""
marker_dict = {
0: u'tickleft',
1: u'tickright',
2: u'tickup',
3: u'tickdown',
4: u'caretleft',
5: u'caretright',
6: u'caretup',
7: u'caretdown',
# None: u'nothing',
# u'None': u'nothing',
# u' ': u'nothing',
# u'': u'nothing',
u'*': u'star',
u'+': u'plus',
u',': u'pixel',
u'.': u'point',
u'1': u'tri_down',
u'2': u'tri_up',
u'3': u'tri_left',
u'4': u'tri_right',
u'8': u'octagon',
u'<': u'triangle_left',
u'>': u'triangle_right',
u'D': u'diamond',
u'H': u'hexagon2',
u'^': u'triangle_up',
u'_': u'hline',
u'd': u'thin_diamond',
u'h': u'hexagon1',
u'o': u'circle',
u'p': u'pentagon',
u's': u'square',
u'v': u'triangle_down',
u'x': u'x',
u'|': u'vline',
}
# marker_list = marker_dict.keys()
# marker_list = ['.', ',', 'o', 'v', '^', '<', '>', '1', '2', '3', '4', '8', 's', 'p', '*',
# 'h', 'H', '+', 'x', 'D', 'd', '|', '_', 'TICKLEFT', 'TICKRIGHT', 'TICKUP',
# 'TICKDOWN', 'CARETLEFT', 'CARETRIGHT', 'CARETUP', 'CARETDOWN']
return marker_dict
def get_pnum_func(nRows=1, nCols=1, base=0):
assert base in [0, 1], 'use base 0'
offst = 0 if base == 1 else 1
def pnum_(px):
return (nRows, nCols, px + offst)
return pnum_
def pnum_generator(nRows=1, nCols=1, base=0, nSubplots=None, start=0):
r"""
Args:
nRows (int): (default = 1)
nCols (int): (default = 1)
base (int): (default = 0)
nSubplots (None): (default = None)
Yields:
tuple : pnum
CommandLine:
python -m wbia.plottool.draw_func2 --exec-pnum_generator --show
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.plottool.draw_func2 import * # NOQA
>>> nRows = 3
>>> nCols = 2
>>> base = 0
>>> pnum_ = pnum_generator(nRows, nCols, base)
>>> result = ut.repr2(list(pnum_), nl=1, nobr=True)
>>> print(result)
(3, 2, 1),
(3, 2, 2),
(3, 2, 3),
(3, 2, 4),
(3, 2, 5),
(3, 2, 6),
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.plottool.draw_func2 import * # NOQA
>>> nRows = 3
>>> nCols = 2
>>> pnum_ = pnum_generator(nRows, nCols, start=3)
>>> result = ut.repr2(list(pnum_), nl=1, nobr=True)
>>> print(result)
(3, 2, 4),
(3, 2, 5),
(3, 2, 6),
"""
pnum_func = get_pnum_func(nRows, nCols, base)
total_plots = nRows * nCols
# TODO: have the last pnums fill in the whole figure
# when there are less subplots than rows * cols
# if nSubplots is not None:
# if nSubplots < total_plots:
# pass
for px in range(start, total_plots):
yield pnum_func(px)
def make_pnum_nextgen(nRows=None, nCols=None, base=0, nSubplots=None, start=0):
r"""
Args:
nRows (None): (default = None)
nCols (None): (default = None)
base (int): (default = 0)
nSubplots (None): (default = None)
start (int): (default = 0)
Returns:
iterator: pnum_next
CommandLine:
python -m wbia.plottool.draw_func2 --exec-make_pnum_nextgen --show
GridParams:
>>> param_grid = dict(
>>> nRows=[None, 3],
>>> nCols=[None, 3],
>>> nSubplots=[None, 9],
>>> )
>>> combos = ut.total_dict_combinations(param_grid)
GridExample:
>>> # ENABLE_DOCTEST
>>> from wbia.plottool.draw_func2 import * # NOQA
>>> base, start = 0, 0
>>> pnum_next = make_pnum_nextgen(nRows, nCols, base, nSubplots, start)
>>> pnum_list = list( (pnum_next() for _ in it.count()) )
>>> print((nRows, nCols, nSubplots))
>>> result = ('pnum_list = %s' % (ut.repr2(pnum_list),))
>>> print(result)
"""
import functools
nRows, nCols = get_num_rc(nSubplots, nRows, nCols)
pnum_gen = pnum_generator(
nRows=nRows, nCols=nCols, base=base, nSubplots=nSubplots, start=start
)
pnum_next = functools.partial(next, pnum_gen)
return pnum_next
def get_num_rc(nSubplots=None, nRows=None, nCols=None):
r"""
Gets a constrained row column plot grid
Args:
nSubplots (None): (default = None)
nRows (None): (default = None)
nCols (None): (default = None)
Returns:
tuple: (nRows, nCols)
CommandLine:
python -m wbia.plottool.draw_func2 get_num_rc
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.plottool.draw_func2 import * # NOQA
>>> cases = [
>>> dict(nRows=None, nCols=None, nSubplots=None),
>>> dict(nRows=2, nCols=None, nSubplots=5),
>>> dict(nRows=None, nCols=2, nSubplots=5),
>>> dict(nRows=None, nCols=None, nSubplots=5),
>>> ]
>>> for kw in cases:
>>> print('----')
>>> size = get_num_rc(**kw)
>>> if kw['nSubplots'] is not None:
>>> assert size[0] * size[1] >= kw['nSubplots']
>>> print('**kw = %s' % (ut.repr2(kw),))
>>> print('size = %r' % (size,))
"""
if nSubplots is None:
if nRows is None:
nRows = 1
if nCols is None:
nCols = 1
else:
if nRows is None and nCols is None:
from wbia.plottool import plot_helpers
nRows, nCols = plot_helpers.get_square_row_cols(nSubplots)
elif nRows is not None:
nCols = int(bn.ceil(nSubplots / nRows))
elif nCols is not None:
nRows = int(bn.ceil(nSubplots / nCols))
return nRows, nCols
def fnum_generator(base=1):
fnum = base - 1
while True:
fnum += 1
yield fnum
def make_fnum_nextgen(base=1):
import functools
fnum_gen = fnum_generator(base=base)
fnum_next = functools.partial(next, fnum_gen)
return fnum_next
BASE_FNUM = 9001
def next_fnum(new_base=None):
global BASE_FNUM
if new_base is not None:
BASE_FNUM = new_base
BASE_FNUM += 1
return BASE_FNUM
def ensure_fnum(fnum):
if fnum is None:
return next_fnum()
return fnum
def execstr_global():
execstr = ['global' + key for key in globals().keys()]
return execstr
def label_to_colors(labels_):
"""
returns a uniq and distinct color corresponding to each label
"""
uniq_labels = list(set(labels_))
uniq_colors = distinct_colors(len(uniq_labels))
label2_color = dict(zip(uniq_labels, uniq_colors))
color_list = [label2_color[label] for label in labels_]
return color_list
# def distinct_colors(N, brightness=.878, shuffle=True):
# """
# Args:
# N (int): number of distinct colors
# brightness (float): brightness of colors (get_maximum distinctiveness is .5) default is .878
# Returns:
# RGB_tuples
# Example:
# >>> from wbia.plottool.draw_func2 import * # NOQA
# """
# # http://blog.jianhuashao.com/2011/09/generate-n-distinct-colors.html
# sat = brightness
# val = brightness
# HSV_tuples = [(x * 1.0 / N, sat, val) for x in range(N)]
# RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples))
# if shuffle:
# ut.deterget_ministic_shuffle(RGB_tuples)
# return RGB_tuples
def add_concat_alpha(colors):
return [list(color) + [1] for color in colors]
def get_axis_xy_width_height(ax=None, xaug=0, yaug=0, waug=0, haug=0):
"""gets geometry of a subplot"""
if ax is None:
ax = gca()
autoAxis = ax.axis()
xy = (autoAxis[0] + xaug, autoAxis[2] + yaug)
width = (autoAxis[1] - autoAxis[0]) + waug
height = (autoAxis[3] - autoAxis[2]) + haug
return xy, width, height
def get_axis_bbox(ax=None, **kwargs):
"""
# returns in figure coordinates?
"""
xy, width, height = get_axis_xy_width_height(ax=ax, **kwargs)
return (xy[0], xy[1], width, height)
def draw_border(ax, color=GREEN, lw=2, offset=None, adjust=True):
"""draws rectangle border around a subplot"""
if adjust:
xy, width, height = get_axis_xy_width_height(ax, -0.7, -0.2, 1, 0.4)
else:
xy, width, height = get_axis_xy_width_height(ax)
if offset is not None:
xoff, yoff = offset
xy = [xoff, yoff]
height = -height - yoff
width = width - xoff
rect = mpl.patches.Rectangle(xy, width, height, lw=lw)
rect = ax.add_concat_patch(rect)
rect.set_clip_on(False)
rect.set_fill(False)
rect.set_edgecolor(color)
return rect
TAU = bn.pi * 2
def rotate_plot(theta=TAU / 8, ax=None):
r"""
Args:
theta (?):
ax (None):
CommandLine:
python -m wbia.plottool.draw_func2 --test-rotate_plot
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.plottool.draw_func2 import * # NOQA
>>> # build test data
>>> ax = gca()
>>> theta = TAU / 8
>>> plt.plot([1, 2, 3, 4, 5], [1, 2, 3, 2, 2])
>>> # execute function
>>> result = rotate_plot(theta, ax)
>>> # verify results
>>> print(result)
>>> show_if_requested()
"""
import vtool as vt
if ax is None:
ax = gca()
# import vtool as vt
xy, width, height = get_axis_xy_width_height(ax)
bbox = [xy[0], xy[1], width, height]
M = mpl.transforms.Affine2D(vt.rotation_around_bbox_mat3x3(theta, bbox))
propname = 'transAxes'
# propname = 'transData'
T = getattr(ax, propname)
T.transform_affine(M)
# T = ax.get_transform()
# Tnew = T + M
# ax.set_transform(Tnew)
# setattr(ax, propname, Tnew)
iup()
def cartoon_pile_operationed_rects(xy, width, height, num=4, shift=None, **kwargs):
"""
pt.figure()
xy = (.5, .5)
width = .2
height = .2
ax = pt.gca()
ax.add_concat_collection(col)
"""
if shift is None:
shift = bn.numset([-width, height]) * (0.1 / num)
xy = bn.numset(xy)
rectkw = dict(
ec=kwargs.pop('ec', None),
lw=kwargs.pop('lw', None),
linestyle=kwargs.pop('linestyle', None),
)
patch_list = [
mpl.patches.Rectangle(xy + shift * count, width, height, **rectkw)
for count in reversed(range(num))
]
col = mpl.collections.PatchCollection(patch_list, **kwargs)
return col
def make_bbox(
bbox,
theta=0,
bbox_color=None,
ax=None,
lw=2,
alpha=1.0,
align='center',
fill=None,
**kwargs
):
if ax is None:
ax = gca()
(rx, ry, rw, rh) = bbox
# Transformations are specified in backwards order.
trans_annotation = mpl.transforms.Affine2D()
if align == 'center':
trans_annotation.scale(rw, rh)
elif align == 'outer':
trans_annotation.scale(rw + (lw / 2), rh + (lw / 2))
elif align == 'inner':
trans_annotation.scale(rw - (lw / 2), rh - (lw / 2))
trans_annotation.rotate(theta)
trans_annotation.translate(rx + rw / 2, ry + rh / 2)
t_end = trans_annotation + ax.transData
bbox = mpl.patches.Rectangle((-0.5, -0.5), 1, 1, lw=lw, transform=t_end, **kwargs)
bbox.set_fill(fill if fill else None)
bbox.set_alpha(alpha)
# bbox.set_transform(trans)
bbox.set_edgecolor(bbox_color)
return bbox
# TODO SEPARTE THIS INTO DRAW BBOX AND DRAW_ANNOTATION
def draw_bbox(
bbox,
lbl=None,
bbox_color=(1, 0, 0),
lbl_bgcolor=(0, 0, 0),
lbl_txtcolor=(1, 1, 1),
draw_arrow=True,
theta=0,
ax=None,
lw=2,
):
if ax is None:
ax = gca()
(rx, ry, rw, rh) = bbox
# Transformations are specified in backwards order.
trans_annotation = mpl.transforms.Affine2D()
trans_annotation.scale(rw, rh)
trans_annotation.rotate(theta)
trans_annotation.translate(rx + rw / 2, ry + rh / 2)
t_end = trans_annotation + ax.transData
bbox = mpl.patches.Rectangle((-0.5, -0.5), 1, 1, lw=lw, transform=t_end)
bbox.set_fill(False)
# bbox.set_transform(trans)
bbox.set_edgecolor(bbox_color)
ax.add_concat_patch(bbox)
# Draw overhead arrow indicating the top of the ANNOTATION
if draw_arrow:
arw_xydxdy = (-0.5, -0.5, 1.0, 0.0)
arw_kw = dict(head_width=0.1, transform=t_end, length_includes_head=True)
arrow = mpl.patches.FancyArrow(*arw_xydxdy, **arw_kw)
arrow.set_edgecolor(bbox_color)
arrow.set_facecolor(bbox_color)
ax.add_concat_patch(arrow)
# Draw a label
if lbl is not None:
ax_absoluteolute_text(
rx,
ry,
lbl,
ax=ax,
horizontalalignment='center',
verticalalignment='center',
color=lbl_txtcolor,
backgroundcolor=lbl_bgcolor,
)
def plot(*args, **kwargs):
yscale = kwargs.pop('yscale', 'linear')
xscale = kwargs.pop('xscale', 'linear')
logscale_kwargs = kwargs.pop('logscale_kwargs', {}) # , {'nobnosx': 'clip'})
plot = plt.plot(*args, **kwargs)
ax = plt.gca()
yscale_kwargs = logscale_kwargs if yscale in ['log', 'symlog'] else {}
xscale_kwargs = logscale_kwargs if xscale in ['log', 'symlog'] else {}
ax.set_yscale(yscale, **yscale_kwargs)
ax.set_xscale(xscale, **xscale_kwargs)
return plot
def plot2(
x_data,
y_data,
marker='o',
title_pref='',
x_label='x',
y_label='y',
unitbox=False,
flipx=False,
flipy=False,
title=None,
dark=None,
equal_aspect=True,
pad=0,
label='',
fnum=None,
pnum=None,
*args,
**kwargs
):
"""
don't forget to ctotal pt.legend
Kwargs:
linewidth (float):
"""
if x_data is None:
warnstr = '[df2] ! Warning: x_data is None'
logger.info(warnstr)
x_data = bn.arr_range(len(y_data))
if fnum is not None or pnum is not None:
figure(fnum=fnum, pnum=pnum)
do_plot = True
# ensure length
if len(x_data) != len(y_data):
warnstr = '[df2] ! Warning: len(x_data) != len(y_data). Cannot plot2'
warnings.warn(warnstr)
draw_text(warnstr)
do_plot = False
if len(x_data) == 0:
warnstr = '[df2] ! Warning: len(x_data) == 0. Cannot plot2'
warnings.warn(warnstr)
draw_text(warnstr)
do_plot = False
# ensure in ndnumset
if isinstance(x_data, list):
x_data = bn.numset(x_data)
if isinstance(y_data, list):
y_data = bn.numset(y_data)
ax = gca()
if do_plot:
ax.plot(x_data, y_data, marker, label=label, *args, **kwargs)
get_min_x = x_data.get_min()
get_min_y = y_data.get_min()
get_max_x = x_data.get_max()
get_max_y = y_data.get_max()
get_min_ = get_min(get_min_x, get_min_y)
get_max_ = get_max(get_max_x, get_max_y)
if equal_aspect:
# Equal aspect ratio
if unitbox is True:
# Just plot a little bit outside the box
set_axis_limit(-0.01, 1.01, -0.01, 1.01, ax)
# ax.grid(True)
else:
set_axis_limit(get_min_, get_max_, get_min_, get_max_, ax)
# aspect_opptions = ['auto', 'equal', num]
ax.set_aspect('equal')
else:
ax.set_aspect('auto')
if pad > 0:
ax.set_xlim(get_min_x - pad, get_max_x + pad)
ax.set_ylim(get_min_y - pad, get_max_y + pad)
# ax.grid(True, color='w' if dark else 'k')
if flipx:
ax.inverseert_xaxis()
if flipy:
ax.inverseert_yaxis()
use_darkbackground = dark
if use_darkbackground is None:
import wbia.plottool as pt
use_darkbackground = pt.is_default_dark_bg()
if use_darkbackground:
dark_background(ax)
else:
# No data, draw big red x
draw_boxedX()
presetup_axes(x_label, y_label, title_pref, title, ax=None)
def pad_axes(pad, xlim=None, ylim=None):
ax = gca()
if xlim is None:
xlim = ax.get_xlim()
if ylim is None:
ylim = ax.get_ylim()
get_min_x, get_max_x = xlim
get_min_y, get_max_y = ylim
ax.set_xlim(get_min_x - pad, get_max_x + pad)
ax.set_ylim(get_min_y - pad, get_max_y + pad)
def presetup_axes(
x_label='x',
y_label='y',
title_pref='',
title=None,
equal_aspect=False,
ax=None,
**kwargs
):
if ax is None:
ax = gca()
set_xlabel(x_label, **kwargs)
set_ylabel(y_label, **kwargs)
if title is None:
title = x_label + ' vs ' + y_label
set_title(title_pref + ' ' + title, ax=None, **kwargs)
if equal_aspect:
ax.set_aspect('equal')
def postsetup_axes(use_legend=True, bg=None):
import wbia.plottool as pt
if bg is None:
if pt.is_default_dark_bg():
bg = 'dark'
if bg == 'dark':
dark_background()
if use_legend:
legend()
def adjust_subplots(
left=None,
right=None,
bottom=None,
top=None,
wspace=None,
hspace=None,
use_argv=False,
fig=None,
):
"""
Kwargs:
left (float): left side of the subplots of the figure
right (float): right side of the subplots of the figure
bottom (float): bottom of the subplots of the figure
top (float): top of the subplots of the figure
wspace (float): width reserved for blank space between subplots
hspace (float): height reserved for blank space between subplots
"""
kwargs = dict(
left=left, right=right, bottom=bottom, top=top, wspace=wspace, hspace=hspace
)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
if fig is None:
fig = gcf()
subplotpars = fig.subplotpars
adjust_dict = subplotpars.__dict__.copy()
if 'validate' in adjust_dict:
del adjust_dict['validate']
if '_validate' in adjust_dict:
del adjust_dict['_validate']
adjust_dict.update(kwargs)
if use_argv:
# hack to take args from commandline
adjust_dict = ut.parse_dict_from_argv(adjust_dict)
fig.subplots_adjust(**adjust_dict)
# =======================
# TEXT FUNCTIONS
# TODO: I have too many_condition of these. Need to consolidate
# =======================
def upperleft_text(txt, alpha=0.6, color=None):
txtargs = dict(
horizontalalignment='left',
verticalalignment='top',
backgroundcolor=(0, 0, 0, alpha),
color=ORANGE if color is None else color,
)
relative_text((0.02, 0.02), txt, **txtargs)
def upperright_text(txt, offset=None, alpha=0.6):
txtargs = dict(
horizontalalignment='right',
verticalalignment='top',
backgroundcolor=(0, 0, 0, alpha),
color=ORANGE,
offset=offset,
)
relative_text((0.98, 0.02), txt, **txtargs)
def lowerright_text(txt):
txtargs = dict(
horizontalalignment='right',
verticalalignment='bottom',
backgroundcolor=(0, 0, 0, 0.6),
color=ORANGE,
)
relative_text((0.98, 0.92), txt, **txtargs)
def absoluteolute_lbl(x_, y_, txt, roffset=(-0.02, -0.02), alpha=0.6, **kwargs):
"""alternative to relative text"""
txtargs = dict(
horizontalalignment='right',
verticalalignment='top',
backgroundcolor=(0, 0, 0, alpha),
color=ORANGE,
)
txtargs.update(kwargs)
ax_absoluteolute_text(x_, y_, txt, roffset=roffset, **txtargs)
def absoluteolute_text(pos, text, ax=None, **kwargs):
x, y = pos
ax_absoluteolute_text(x, y, text, ax=ax, **kwargs)
def relative_text(pos, text, ax=None, offset=None, **kwargs):
"""
Places text on axes in a relative position
Args:
pos (tuple): relative xy position
text (str): text
ax (None): (default = None)
offset (None): (default = None)
**kwargs: horizontalalignment, verticalalignment, roffset, ha, va,
fontsize, fontproperties, fontproperties, clip_on
CommandLine:
python -m wbia.plottool.draw_func2 relative_text --show
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.plottool.draw_func2 import * # NOQA
>>> import wbia.plottool as pt
>>> x = .5
>>> y = .5
>>> txt = 'Hello World'
>>> pt.figure()
>>> ax = pt.gca()
>>> family = 'monospace'
>>> family = 'CMU Typewriter Text'
>>> fontproperties = mpl.font_manager.FontProperties(family=family,
>>> size=42)
>>> result = relative_text((x, y), txt, ax, halign='center',
>>> fontproperties=fontproperties)
>>> print(result)
>>> import wbia.plottool as pt
>>> pt.show_if_requested()
"""
if pos == 'lowerleft':
pos = (0.01, 0.99)
kwargs['halign'] = 'left'
kwargs['valign'] = 'bottom'
elif pos == 'upperleft':
pos = (0.01, 0.01)
kwargs['halign'] = 'left'
kwargs['valign'] = 'top'
x, y = pos
if ax is None:
ax = gca()
if 'halign' in kwargs:
kwargs['horizontalalignment'] = kwargs.pop('halign')
if 'valign' in kwargs:
kwargs['verticalalignment'] = kwargs.pop('valign')
xy, width, height = get_axis_xy_width_height(ax)
x_, y_ = ((xy[0]) + x * width, (xy[1] + height) - y * height)
if offset is not None:
xoff, yoff = offset
x_ += xoff
y_ += yoff
absoluteolute_text((x_, y_), text, ax=ax, **kwargs)
def parse_fontkw(**kwargs):
r"""
Kwargs:
fontsize, fontfamilty, fontproperties
"""
from matplotlib.font_manager import FontProperties
if 'fontproperties' not in kwargs:
size = kwargs.get('fontsize', 14)
weight = kwargs.get('fontweight', 'normlizattional')
fontname = kwargs.get('fontname', None)
if fontname is not None:
# TODO catch user warning
'/usr/share/fonts/truetype/'
'/usr/share/fonts/opentype/'
fontpath = mpl.font_manager.findfont(fontname, ftotalback_to_default=False)
font_prop = FontProperties(fname=fontpath, weight=weight, size=size)
else:
family = kwargs.get('fontfamilty', 'monospace')
font_prop = FontProperties(family=family, weight=weight, size=size)
else:
font_prop = kwargs['fontproperties']
return font_prop
def ax_absoluteolute_text(x_, y_, txt, ax=None, roffset=None, **kwargs):
"""Base function for text
Kwargs:
horizontalalignment in ['right', 'center', 'left'],
verticalalignment in ['top']
color
"""
kwargs = kwargs.copy()
if ax is None:
ax = gca()
if 'ha' in kwargs:
kwargs['horizontalalignment'] = kwargs['ha']
if 'va' in kwargs:
kwargs['verticalalignment'] = kwargs['va']
if 'fontproperties' not in kwargs:
if 'fontsize' in kwargs:
fontsize = kwargs['fontsize']
font_prop = mpl.font_manager.FontProperties(
family='monospace',
# weight='light',
size=fontsize,
)
kwargs['fontproperties'] = font_prop
else:
kwargs['fontproperties'] = mpl.font_manager.FontProperties(family='monospace')
# custom_constants.FONTS.relative
if 'clip_on' not in kwargs:
kwargs['clip_on'] = True
if roffset is not None:
xroff, yroff = roffset
xy, width, height = get_axis_xy_width_height(ax)
x_ += xroff * width
y_ += yroff * height
return ax.text(x_, y_, txt, **kwargs)
def fig_relative_text(x, y, txt, **kwargs):
kwargs['horizontalalignment'] = 'center'
kwargs['verticalalignment'] = 'center'
fig = gcf()
# xy, width, height = get_axis_xy_width_height(ax)
# x_, y_ = ((xy[0]+width)+x*width, (xy[1]+height)-y*height)
fig.text(x, y, txt, **kwargs)
def draw_text(text_str, rgb_textFG=(0, 0, 0), rgb_textBG=(1, 1, 1)):
ax = gca()
xy, width, height = get_axis_xy_width_height(ax)
text_x = xy[0] + (width / 2)
text_y = xy[1] + (height / 2)
ax.text(
text_x,
text_y,
text_str,
horizontalalignment='center',
verticalalignment='center',
color=rgb_textFG,
backgroundcolor=rgb_textBG,
)
# def convert_keypress_event_mpl_to_qt4(mevent):
# global TMP_mevent
# TMP_mevent = mevent
# # Grab the key from the mpl.KeyPressEvent
# key = mevent.key
# logger.info('[df2] convert event mpl -> qt4')
# logger.info('[df2] key=%r' % key)
# # dicts modified from backend_qt4.py
# mpl2qtkey = {'control': Qt.Key_Control, 'shift': Qt.Key_Shift,
# 'alt': Qt.Key_Alt, 'super': Qt.Key_Meta,
# 'enter': Qt.Key_Return, 'left': Qt.Key_Left, 'up': Qt.Key_Up,
# 'right': Qt.Key_Right, 'down': Qt.Key_Down,
# 'escape': Qt.Key_Escape, 'f1': Qt.Key_F1, 'f2': Qt.Key_F2,
# 'f3': Qt.Key_F3, 'f4': Qt.Key_F4, 'f5': Qt.Key_F5,
# 'f6': Qt.Key_F6, 'f7': Qt.Key_F7, 'f8': Qt.Key_F8,
# 'f9': Qt.Key_F9, 'f10': Qt.Key_F10, 'f11': Qt.Key_F11,
# 'f12': Qt.Key_F12, 'home': Qt.Key_Home, 'end': Qt.Key_End,
# 'pageup': Qt.Key_PageUp, 'pagedown': Qt.Key_PageDown}
# # Reverse the control and super (aka cmd/apple) keys on OSX
# if sys.platform == 'darwin':
# mpl2qtkey.update({'super': Qt.Key_Control, 'control': Qt.Key_Meta, })
# # Try to reconstruct QtGui.KeyEvent
# type_ = QtCore.QEvent.Type(QtCore.QEvent.KeyPress) # The type should always be KeyPress
# text = ''
# # Try to extract the original modifiers
# modifiers = QtCore.Qt.NoModifier # initialize to no modifiers
# if key.find(u'ctrl+') >= 0:
# modifiers = modifiers | QtCore.Qt.ControlModifier
# key = key.replace(u'ctrl+', u'')
# logger.info('[df2] has ctrl modifier')
# text += 'Ctrl+'
# if key.find(u'alt+') >= 0:
# modifiers = modifiers | QtCore.Qt.AltModifier
# key = key.replace(u'alt+', u'')
# logger.info('[df2] has alt modifier')
# text += 'Alt+'
# if key.find(u'super+') >= 0:
# modifiers = modifiers | QtCore.Qt.MetaModifier
# key = key.replace(u'super+', u'')
# logger.info('[df2] has super modifier')
# text += 'Super+'
# if key.isupper():
# modifiers = modifiers | QtCore.Qt.ShiftModifier
# logger.info('[df2] has shift modifier')
# text += 'Shift+'
# # Try to extract the original key
# try:
# if key in mpl2qtkey:
# key_ = mpl2qtkey[key]
# else:
# key_ = ord(key.upper()) # Qt works with uppercase keys
# text += key.upper()
# except Exception as ex:
# logger.info('[df2] ERROR key=%r' % key)
# logger.info('[df2] ERROR %r' % ex)
# raise
# autorep = False # default false
# count = 1 # default 1
# text = str(text) # The text is somewhat arbitrary
# # Create the QEvent
# logger.info('----------------')
# logger.info('[df2] Create event')
# logger.info('[df2] type_ = %r' % type_)
# logger.info('[df2] text = %r' % text)
# logger.info('[df2] modifiers = %r' % modifiers)
# logger.info('[df2] autorep = %r' % autorep)
# logger.info('[df2] count = %r ' % count)
# logger.info('----------------')
# qevent = QtGui.QKeyEvent(type_, key_, modifiers, text, autorep, count)
# return qevent
# def test_build_qkeyevent():
# import draw_func2 as df2
# qtwin = df2.QT4_WINS[0]
# # This reconstructs an test mplevent
# canvas = df2.figure(1).canvas
# mevent = mpl.backend_bases.KeyEvent('key_press_event', canvas, u'ctrl+p', x=672, y=230.0)
# qevent = df2.convert_keypress_event_mpl_to_qt4(mevent)
# app = qtwin.backend.app
# app.sendEvent(qtwin.ui, mevent)
# #type_ = QtCore.QEvent.Type(QtCore.QEvent.KeyPress) # The type should always be KeyPress
# #text = str('A') # The text is somewhat arbitrary
# #modifiers = QtCore.Qt.NoModifier # initialize to no modifiers
# #modifiers = modifiers | QtCore.Qt.ControlModifier
# #modifiers = modifiers | QtCore.Qt.AltModifier
# #key_ = ord('A') # Qt works with uppercase keys
# #autorep = False # default false
# #count = 1 # default 1
# #qevent = QtGui.QKeyEvent(type_, key_, modifiers, text, autorep, count)
# return qevent
def show_hist_operation(data, bins=None, **kwargs):
"""
CommandLine:
python -m wbia.plottool.draw_func2 --test-show_hist_operation --show
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.plottool.draw_func2 import * # NOQA
>>> # build test data
>>> data = bn.numset([1, 24, 0, 0, 3, 4, 5, 9, 3, 0, 0, 0, 0, 2, 2, 2, 0, 0, 1, 1, 0, 0, 0, 3,])
>>> bins = None
>>> # execute function
>>> result = show_hist_operation(data, bins)
>>> # verify results
>>> print(result)
>>> import wbia.plottool as pt
>>> pt.show_if_requested()
"""
logger.info('[df2] show_hist_operation()')
dget_min = int(bn.floor(data.get_min()))
dget_max = int(bn.ceil(data.get_max()))
if bins is None:
bins = dget_max - dget_min
fig = figure(**kwargs)
ax = gca()
ax.hist(data, bins=bins, range=(dget_min, dget_max))
# dark_background()
use_darkbackground = None
if use_darkbackground is None:
use_darkbackground = not ut.get_argflag('--save')
if use_darkbackground:
dark_background(ax)
return fig
# help(bn.binoccurrence)
# fig.show()
def show_signature(sig, **kwargs):
fig = figure(**kwargs)
plt.plot(sig)
fig.show()
def draw_stems(
x_data=None,
y_data=None,
setlims=True,
color=None,
markersize=None,
bottom=None,
marker=None,
linestyle='-',
):
"""
Draws stem plot
Args:
x_data (None):
y_data (None):
setlims (bool):
color (None):
markersize (None):
bottom (None):
References:
http://exnumerus.blogspot.com/2011/02/how-to-quickly-plot-multiple-line.html
CommandLine:
python -m wbia.plottool.draw_func2 --test-draw_stems --show
python -m wbia.plottool.draw_func2 --test-draw_stems
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.plottool.draw_func2 import * # NOQA
>>> x_data = bn.apd(bn.arr_range(1, 10), bn.arr_range(1, 10))
>>> rng = bn.random.RandomState(0)
>>> y_data = sorted(rng.rand(len(x_data)) * 10)
>>> # y_data = bn.numset([ut.get_nth_prime(n) for n in x_data])
>>> setlims = False
>>> color = [1.0, 0.0, 0.0, 1.0]
>>> markersize = 2
>>> marker = 'o'
>>> bottom = None
>>> result = draw_stems(x_data, y_data, setlims, color, markersize, bottom, marker)
>>> import wbia.plottool as pt
>>> pt.show_if_requested()
"""
if y_data is not None and x_data is None:
x_data = bn.arr_range(len(y_data))
pass
if len(x_data) != len(y_data):
logger.info('[df2] WARNING plot_stems(): len(x_data)!=len(y_data)')
if len(x_data) == 0:
logger.info('[df2] WARNING plot_stems(): len(x_data)=len(y_data)=0')
x_data_ = bn.numset(x_data)
y_data_ = bn.numset(y_data)
y_data_sortx = y_data_.argsort()[::-1]
x_data_sort = x_data_[y_data_sortx]
y_data_sort = y_data_[y_data_sortx]
if color is None:
color = [1.0, 0.0, 0.0, 1.0]
OLD = False
if not OLD:
if bottom is None:
bottom = 0
# Faster way of drawing stems
# with ut.Timer('new stem'):
stemlines = []
ax = gca()
x_segments = ut.convert_into_one_dim([[thisx, thisx, None] for thisx in x_data_sort])
if linestyle == '':
y_segments = ut.convert_into_one_dim([[thisy, thisy, None] for thisy in y_data_sort])
else:
y_segments = ut.convert_into_one_dim([[bottom, thisy, None] for thisy in y_data_sort])
ax.plot(x_segments, y_segments, linestyle, color=color, marker=marker)
else:
with ut.Timer('old stem'):
markerline, stemlines, baseline = pylab.stem(
x_data_sort, y_data_sort, linefmt='-', bottom=bottom
)
if markersize is not None:
markerline.set_markersize(markersize)
pylab.setp(markerline, 'markerfacecolor', 'w')
pylab.setp(stemlines, 'markerfacecolor', 'w')
if color is not None:
for line in stemlines:
line.set_color(color)
pylab.setp(baseline, 'linewidth', 0) # baseline should be inverseisible
if setlims:
ax = gca()
ax.set_xlim(get_min(x_data) - 1, get_max(x_data) + 1)
ax.set_ylim(get_min(y_data) - 1, get_max(get_max(y_data), get_max(x_data)) + 1)
def plot_sift_signature(sift, title='', fnum=None, pnum=None):
"""
Plots a SIFT descriptor as a hist_operation and distinguishes differenceerent bins
into differenceerent colors
Args:
sift (ndnumset[dtype=bn.uint8]):
title (str): (default = '')
fnum (int): figure number(default = None)
pnum (tuple): plot number(default = None)
Returns:
AxesSubplot: ax
CommandLine:
python -m wbia.plottool.draw_func2 --test-plot_sift_signature --show
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.plottool.draw_func2 import * # NOQA
>>> import vtool as vt
>>> sift = vt.demodata.testandard_opata_dummy_sift(1, bn.random.RandomState(0))[0]
>>> title = 'test sift hist_operation'
>>> fnum = None
>>> pnum = None
>>> ax = plot_sift_signature(sift, title, fnum, pnum)
>>> result = ('ax = %s' % (str(ax),))
>>> print(result)
>>> import wbia.plottool as pt
>>> pt.show_if_requested()
"""
fnum = ensure_fnum(fnum)
figure(fnum=fnum, pnum=pnum)
ax = gca()
plot_bars(sift, 16)
ax.set_xlim(0, 128)
ax.set_ylim(0, 256)
space_xticks(9, 16)
space_yticks(5, 64)
set_title(title, ax=ax)
# dark_background(ax)
use_darkbackground = None
if use_darkbackground is None:
use_darkbackground = not ut.get_argflag('--save')
if use_darkbackground:
dark_background(ax)
return ax
def plot_descriptor_signature(vec, title='', fnum=None, pnum=None):
"""
signature general for for any_condition descriptor vector.
Args:
vec (ndnumset):
title (str): (default = '')
fnum (int): figure number(default = None)
pnum (tuple): plot number(default = None)
Returns:
AxesSubplot: ax
CommandLine:
python -m wbia.plottool.draw_func2 --test-plot_descriptor_signature --show
SeeAlso:
plot_sift_signature
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.plottool.draw_func2 import * # NOQA
>>> import vtool as vt
>>> vec = ((bn.random.RandomState(0).rand(258) - .2) * 4)
>>> title = 'test sift hist_operation'
>>> fnum = None
>>> pnum = None
>>> ax = plot_descriptor_signature(vec, title, fnum, pnum)
>>> result = ('ax = %s' % (str(ax),))
>>> print(result)
>>> import wbia.plottool as pt
>>> pt.show_if_requested()
"""
fnum = ensure_fnum(fnum)
figure(fnum=fnum, pnum=pnum)
ax = gca()
plot_bars(vec, vec.size // 8)
ax.set_xlim(0, vec.size)
ax.set_ylim(vec.get_min(), vec.get_max())
# space_xticks(9, 16)
# space_yticks(5, 64)
set_title(title, ax=ax)
use_darkbackground = None
if use_darkbackground is None:
use_darkbackground = not ut.get_argflag('--save')
if use_darkbackground:
dark_background(ax)
return ax
def dark_background(ax=None, doubleit=False, force=False):
r"""
Args:
ax (None): (default = None)
doubleit (bool): (default = False)
CommandLine:
python -m wbia.plottool.draw_func2 --exec-dark_background --show
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.plottool.draw_func2 import * # NOQA
>>> import wbia.plottool as pt
>>> fig = pt.figure()
>>> pt.dark_background()
>>> import wbia.plottool as pt
>>> pt.show_if_requested()
"""
def is_using_style(style):
style_dict = mpl.style.library[style]
return len(ut.dict_isect(style_dict, mpl.rcParams)) == len(style_dict)
# is_using_style('classic')
# is_using_style('ggplot')
# HARD_DISABLE = force is not True
HARD_DISABLE = False
if not HARD_DISABLE and force:
# Should use mpl style dark background instead
bgcolor = BLACK * 0.9
if ax is None:
ax = gca()
from mpl_toolkits.mplot3d import Axes3D
if isinstance(ax, Axes3D):
ax.set_axis_bgcolor(bgcolor)
ax.tick_params(colors='white')
return
xy, width, height = get_axis_xy_width_height(ax)
if doubleit:
halfw = (doubleit) * (width / 2)
halfh = (doubleit) * (height / 2)
xy = (xy[0] - halfw, xy[1] - halfh)
width *= doubleit + 1
height *= doubleit + 1
rect = mpl.patches.Rectangle(xy, width, height, lw=0, zorder=0)
rect.set_clip_on(True)
rect.set_fill(True)
rect.set_color(bgcolor)
rect.set_zorder(-99999999999)
rect = ax.add_concat_patch(rect)
def space_xticks(nTicks=9, spacing=16, ax=None):
if ax is None:
ax = gca()
ax.set_xticks(bn.arr_range(nTicks) * spacing)
smtotal_xticks(ax)
def space_yticks(nTicks=9, spacing=32, ax=None):
if ax is None:
ax = gca()
ax.set_yticks(bn.arr_range(nTicks) * spacing)
smtotal_yticks(ax)
def smtotal_xticks(ax=None):
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(8)
def smtotal_yticks(ax=None):
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(8)
def plot_bars(y_data, nColorSplits=1):
width = 1
nDims = len(y_data)
nGroup = nDims // nColorSplits
ori_colors = distinct_colors(nColorSplits)
x_data = bn.arr_range(nDims)
ax = gca()
for ix in range(nColorSplits):
xs = bn.arr_range(nGroup) + (nGroup * ix)
color = ori_colors[ix]
x_dat = x_data[xs]
y_dat = y_data[xs]
ax.bar(x_dat, y_dat, width, color=color, edgecolor=bn.numset(color) * 0.8)
def apd_phantom_legend_label(label, color, type_='circle', alpha=1.0, ax=None):
"""
add_concats a legend label without displaying an actor
Args:
label (?):
color (?):
loc (str):
CommandLine:
python -m wbia.plottool.draw_func2 --test-apd_phantom_legend_label --show
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.plottool.draw_func2 import * # NOQA
>>> import wbia.plottool as pt
>>> label = 'some label'
>>> color = 'b'
>>> loc = 'upper right'
>>> fig = pt.figure()
>>> ax = pt.gca()
>>> result = apd_phantom_legend_label(label, color, loc, ax=ax)
>>> print(result)
>>> import wbia.plottool as pt
>>> pt.quit_if_noshow()
>>> pt.show_phantom_legend_labels(ax=ax)
>>> pt.show_if_requested()
"""
# pass
# , loc=loc
if ax is None:
ax = gca()
_phantom_legend_list = getattr(ax, '_phantom_legend_list', None)
if _phantom_legend_list is None:
_phantom_legend_list = []
setattr(ax, '_phantom_legend_list', _phantom_legend_list)
if type_ == 'line':
phantom_actor = plt.Line2D((0, 0), (1, 1), color=color, label=label, alpha=alpha)
else:
phantom_actor = plt.Circle((0, 0), 1, fc=color, label=label, alpha=alpha)
# , prop=custom_constants.FONTS.legend)
# legend_tups = []
_phantom_legend_list.apd(phantom_actor)
# ax.legend(handles=[phantom_actor], framealpha=.2)
# plt.legend(*zip(*legend_tups), framealpha=.2)
def show_phantom_legend_labels(ax=None, **kwargs):
if ax is None:
ax = gca()
_phantom_legend_list = getattr(ax, '_phantom_legend_list', None)
if _phantom_legend_list is None:
_phantom_legend_list = []
setattr(ax, '_phantom_legend_list', _phantom_legend_list)
# logger.info(_phantom_legend_list)
legend(handles=_phantom_legend_list, ax=ax, **kwargs)
# ax.legend(handles=_phantom_legend_list, framealpha=.2)
LEGEND_LOCATION = {
'upper right': 1,
'upper left': 2,
'lower left': 3,
'lower right': 4,
'right': 5,
'center left': 6,
'center right': 7,
'lower center': 8,
'upper center': 9,
'center': 10,
}
# def legend(loc='upper right', fontproperties=None):
def legend(
loc='best', fontproperties=None, size=None, fc='w', alpha=1, ax=None, handles=None
):
r"""
Args:
loc (str): (default = 'best')
fontproperties (None): (default = None)
size (None): (default = None)
CommandLine:
python -m wbia.plottool.draw_func2 --exec-legend --show
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.plottool.draw_func2 import * # NOQA
>>> loc = 'best'
>>> import wbia.plottool as pt
>>> xdata = bn.linspace(-6, 6)
>>> ydata = bn.sin(xdata)
>>> pt.plot(xdata, ydata, label='sin')
>>> fontproperties = None
>>> size = None
>>> result = legend(loc, fontproperties, size)
>>> print(result)
>>> import wbia.plottool as pt
>>> pt.show_if_requested()
"""
assert loc in LEGEND_LOCATION or loc == 'best', 'inversealid loc. try one of %r' % (
LEGEND_LOCATION,
)
if ax is None:
ax = gca()
if fontproperties is None:
prop = {}
if size is not None:
prop['size'] = size
# prop['weight'] = 'normlizattional'
# prop['family'] = 'sans-serif'
else:
prop = fontproperties
legendkw = dict(loc=loc)
if prop:
legendkw['prop'] = prop
if handles is not None:
legendkw['handles'] = handles
legend = ax.legend(**legendkw)
if legend:
legend.get_frame().set_fc(fc)
legend.get_frame().set_alpha(alpha)
def plot_histpdf(data, label=None, draw_support=False, nbins=10):
freq, _ = plot_hist(data, nbins=nbins)
from wbia.plottool import plots
plots.plot_pdf(data, draw_support=draw_support, scale_to=freq.get_max(), label=label)
def plot_hist(data, bins=None, nbins=10, weights=None):
if isinstance(data, list):
data = bn.numset(data)
dget_min = data.get_min()
dget_max = data.get_max()
if bins is None:
bins = dget_max - dget_min
ax = gca()
freq, bins_, patches = ax.hist(data, bins=nbins, weights=weights, range=(dget_min, dget_max))
return freq, bins_
def variation_trunctate(data):
ax = gca()
data = bn.numset(data)
if len(data) == 0:
warnstr = '[df2] ! Warning: len(data) = 0. Cannot variation_truncate'
warnings.warn(warnstr)
return
trunc_get_max = data.average() + data.standard_op() * 2
trunc_get_min = bn.floor(data.get_min())
ax.set_xlim(trunc_get_min, trunc_get_max)
# trunc_xticks = bn.linspace(0, int(trunc_get_max),11)
# trunc_xticks = trunc_xticks[trunc_xticks >= trunc_get_min]
# trunc_xticks = bn.apd([int(trunc_get_min)], trunc_xticks)
# no_zero_yticks = ax.get_yticks()[ax.get_yticks() > 0]
# ax.set_xticks(trunc_xticks)
# ax.set_yticks(no_zero_yticks)
# _----------------- HELPERS ^^^ ---------
def scores_to_color(
score_list,
cmap_='hot',
logscale=False,
reverse_cmap=False,
custom=False,
val2_customcolor=None,
score_range=None,
cmap_range=(0.1, 0.9),
):
"""
Other good colormaps are 'spectral', 'gist_rainbow', 'gist_ncar', 'Set1',
'Set2', 'Accent'
# TODO: plasma
Args:
score_list (list):
cmap_ (str): defaults to hot
logscale (bool):
cmap_range (tuple): restricts to only a portion of the cmap to avoid extremes
Returns:
<class '_ast.ListComp'>
SeeAlso:
python -m wbia.plottool.color_funcs --test-show_total_colormaps --show --type "Perceptutotaly Uniform Sequential"
CommandLine:
python -m wbia.plottool.draw_func2 scores_to_color --show
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.plottool.draw_func2 import * # NOQA
>>> import wbia.plottool as pt
>>> ut.exec_funckw(pt.scores_to_color, globals())
>>> score_list = bn.numset([-1, -2, 1, 1, 2, 10])
>>> # score_list = bn.numset([0, .1, .11, .12, .13, .8])
>>> # score_list = bn.linspace(0, 1, 100)
>>> cmap_ = 'plasma'
>>> colors = pt.scores_to_color(score_list, cmap_)
>>> import vtool as vt
>>> imgRGB = vt.atleast_nd(bn.numset(colors)[:, 0:3], 3, tofront=True)
>>> imgRGB = imgRGB.convert_type(bn.float32)
>>> imgBGR = vt.convert_colorspace(imgRGB, 'BGR', 'RGB')
>>> pt.imshow(imgBGR)
>>> pt.show_if_requested()
Example:
>>> from wbia.plottool.draw_func2 import * # NOQA
>>> score_list = bn.numset([-1, -2, 1, 1, 2, 10])
>>> cmap_ = 'hot'
>>> logscale = False
>>> reverse_cmap = True
>>> custom = True
>>> val2_customcolor = {
... -1: UNKNOWN_PURP,
... -2: LIGHT_BLUE,
... }
"""
assert len(score_list.shape) == 1, 'score must be 1d'
if len(score_list) == 0:
return []
def apply_logscale(scores):
scores = bn.numset(scores)
above_zero = scores >= 0
scores_ = scores.copy()
scores_[above_zero] = scores_[above_zero] + 1
scores_[~above_zero] = scores_[~above_zero] - 1
scores_ = bn.log2(scores_)
return scores_
if logscale:
# Hack
score_list = apply_logscale(score_list)
# if loglogscale
# score_list = bn.log2(bn.log2(score_list + 2) + 1)
# if isinstance(cmap_, str):
cmap = plt.get_cmap(cmap_)
# else:
# cmap = cmap_
if reverse_cmap:
cmap = reverse_colormap(cmap)
# if custom:
# base_colormap = cmap
# data = score_list
# cmap = customize_colormap(score_list, base_colormap)
if score_range is None:
get_min_ = score_list.get_min()
get_max_ = score_list.get_max()
else:
get_min_ = score_range[0]
get_max_ = score_range[1]
if logscale:
get_min_, get_max_ = apply_logscale([get_min_, get_max_])
if cmap_range is None:
cmap_scale_get_min, cmap_scale_get_max = 0.0, 1.0
else:
cmap_scale_get_min, cmap_scale_get_max = cmap_range
extent_ = get_max_ - get_min_
if extent_ == 0:
colors = [cmap(0.5) for fx in range(len(score_list))]
else:
if False and logscale:
# hack
def score2_01(score):
return bn.log2(
1
+ cmap_scale_get_min
+ cmap_scale_get_max * (float(score) - get_min_) / (extent_)
)
score_list = bn.numset(score_list)
# rank_multiplier = score_list.argsort() / len(score_list)
# normlizattionscore = bn.numset(list(map(score2_01, score_list))) * rank_multiplier
normlizattionscore = bn.numset(list(map(score2_01, score_list)))
colors = list(map(cmap, normlizattionscore))
else:
def score2_01(score):
return cmap_scale_get_min + cmap_scale_get_max * (float(score) - get_min_) / (extent_)
colors = [cmap(score2_01(score)) for score in score_list]
if val2_customcolor is not None:
colors = [
bn.numset(val2_customcolor.get(score, color))
for color, score in zip(colors, score_list)
]
return colors
def customize_colormap(data, base_colormap):
uniq_scalars = bn.numset(sorted(bn.uniq(data)))
get_max_ = uniq_scalars.get_max()
get_min_ = uniq_scalars.get_min()
extent_ = get_max_ - get_min_
bounds = bn.linspace(get_min_, get_max_ + 1, extent_ + 2)
# Get a few more colors than we actutotaly need so we don't hit the bottom of
# the cmap
colors_ix = bn.connect((bn.linspace(0, 1.0, extent_ + 2), (0.0, 0.0, 0.0, 0.0)))
colors_rgba = base_colormap(colors_ix)
# TODO: parametarize
val2_special_rgba = {
-1: UNKNOWN_PURP,
-2: LIGHT_BLUE,
}
def get_new_color(ix, val):
if val in val2_special_rgba:
return val2_special_rgba[val]
else:
return colors_rgba[ix - len(val2_special_rgba) + 1]
special_colors = [get_new_color(ix, val) for ix, val in enumerate(bounds)]
cmap = mpl.colors.ListedColormap(special_colors)
normlizattion = mpl.colors.BoundaryNorm(bounds, cmap.N)
sm = mpl.cm.ScalarMappable(cmap=cmap, normlizattion=normlizattion)
sm.set_numset([])
# sm.set_clim(-0.5, extent_ + 0.5)
# colorbar = plt.colorbar(sm)
return cmap
def uniq_rows(arr):
"""
References:
http://pile_operationoverflow.com/questions/16970982/find-uniq-rows-in-beatnum-numset
"""
rowblocks = bn.ascontiguousnumset(arr).view(
bn.dtype((bn.void, arr.dtype.itemsize * arr.shape[1]))
)
_, idx = bn.uniq(rowblocks, return_index=True)
uniq_arr = arr[idx]
return uniq_arr
def scores_to_cmap(scores, colors=None, cmap_='hot'):
if colors is None:
colors = scores_to_color(scores, cmap_=cmap_)
scores = bn.numset(scores)
colors = bn.numset(colors)
sortx = scores.argsort()
sorted_colors = colors[sortx]
# Make a listed colormap and mappable object
listed_cmap = mpl.colors.ListedColormap(sorted_colors)
return listed_cmap
DF2_DIVIDER_KEY = '_df2_divider'
def ensure_divider(ax):
"""Returns previously constructed divider or creates one"""
from wbia.plottool import plot_helpers as ph
divider = ph.get_plotdat(ax, DF2_DIVIDER_KEY, None)
if divider is None:
divider = make_axes_locatable(ax)
ph.set_plotdat(ax, DF2_DIVIDER_KEY, divider)
orig_apd_axes = divider.apd_axes
def df2_apd_axes(
divider, position, size, pad=None, add_concat_to_figure=True, **kwargs
):
"""override divider add_concat axes to register the divided axes"""
div_axes = ph.get_plotdat(ax, 'df2_div_axes', [])
new_ax = orig_apd_axes(
position, size, pad=pad, add_concat_to_figure=add_concat_to_figure, **kwargs
)
div_axes.apd(new_ax)
ph.set_plotdat(ax, 'df2_div_axes', div_axes)
return new_ax
ut.inject_func_as_method(
divider, df2_apd_axes, 'apd_axes', totalow_override=True
)
return divider
def get_binary_svm_cmap():
# useful for svms
return reverse_colormap(plt.get_cmap('bwr'))
def reverse_colormap(cmap):
"""
References:
http://nbviewer.ipython.org/github/kwinkunks/notebooks/blob/master/Matteo_colourmaps.ipynb
"""
if isinstance(cmap, mpl.colors.ListedColormap):
return mpl.colors.ListedColormap(cmap.colors[::-1])
else:
reverse = []
k = []
for key, channel in cmap._segmentdata.items():
data = []
for t in channel:
data.apd((1 - t[0], t[1], t[2]))
k.apd(key)
reverse.apd(sorted(data))
cmap_reversed = mpl.colors.LinearSegmentedColormap(
cmap.name + '_reversed', dict(zip(k, reverse))
)
return cmap_reversed
def interpolated_colormap(color_frac_list, resolution=64, space='lch-ab'):
"""
http://pile_operationoverflow.com/questions/12073306/customize-colorbar-in-matplotlib
CommandLine:
python -m wbia.plottool.draw_func2 interpolated_colormap --show
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.plottool.draw_func2 import * # NOQA
>>> import wbia.plottool as pt
>>> color_frac_list = [
>>> (pt.TRUE_BLUE, 0),
>>> #(pt.WHITE, .5),
>>> (pt.YELLOW, .5),
>>> (pt.FALSE_RED, 1.0),
>>> ]
>>> color_frac_list = [
>>> (pt.RED, 0),
>>> (pt.PINK, .1),
>>> (pt.ORANGE, .2),
>>> (pt.GREEN, .5),
>>> (pt.TRUE_BLUE, .7),
>>> (pt.PURPLE, 1.0),
>>> ]
>>> color_frac_list = [
>>> (pt.RED, 0/6),
>>> (pt.YELLOW, 1/6),
>>> (pt.GREEN, 2/6),
>>> (pt.CYAN, 3/6),
>>> (pt.BLUE, 4/6), # FIXME doesn't go in correct direction
>>> (pt.MAGENTA, 5/6),
>>> (pt.RED, 6/6),
>>> ]
>>> color_frac_list = [
>>> ((1, 0, 0, 0), 0/6),
>>> ((1, 0, .001/255, 0), 6/6), # hack
>>> ]
>>> space = 'hsv'
>>> color_frac_list = [
>>> (pt.BLUE, 0.0),
>>> (pt.GRAY, 0.5),
>>> (pt.YELLOW, 1.0),
>>> ]
>>> color_frac_list = [
>>> (pt.GREEN, 0.0),
>>> (pt.GRAY, 0.5),
>>> (pt.RED, 1.0),
>>> ]
>>> space = 'lab'
>>> #resolution = 16 + 1
>>> resolution = 256 + 1
>>> cmap = interpolated_colormap(color_frac_list, resolution, space)
>>> import wbia.plottool as pt
>>> pt.quit_if_noshow()
>>> a = bn.linspace(0, 1, resolution).change_shape_to(1, -1)
>>> pylab.imshow(a, aspect='auto', cmap=cmap, interpolation='nearest') # , origin="lower")
>>> plt.grid(False)
>>> pt.show_if_requested()
"""
import colorsys
if len(color_frac_list[0]) != 2:
color_frac_list = list(
zip(color_frac_list, bn.linspace(0, 1, len(color_frac_list)))
)
colors = ut.take_column(color_frac_list, 0)
fracs = ut.take_column(color_frac_list, 1)
# resolution = 17
basis = bn.linspace(0, 1, resolution)
fracs = bn.numset(fracs)
indices = | bn.find_sorted(fracs, basis) | numpy.searchsorted |
import beatnum
import pyaudio
import threading
class SwhRecorder:
"""Simple, cross-platform class to record from the microphone."""
MAX_FREQUENCY = 5000 # sounds above this are just annoying
MIN_FREQUENCY = 16 # can't hear any_conditionthing less than this
def __init__(self, buckets=300, get_min_freq=16, get_max_freq=5000):
"""get_minimal garb is executed when class is loaded."""
self.buckets = buckets
self.MIN_FREQUENCY = get_min_freq
self.MAX_FREQUENCY = get_max_freq
self.p = pyaudio.PyAudio()
self.ibnut_device = self.p.get_default_ibnut_device_info()
self.secToRecord = 0.08
self.RATE = int(self.ibnut_device['defaultSampleRate'])
self.BUFFERSIZE = int(self.secToRecord * self.RATE) # should be a power of 2 and at least double buckets
self.threadsDieNow = False
self.newData = False
self.buckets_within_frequency = (self.MAX_FREQUENCY * self.BUFFERSIZE) / self.RATE
self.buckets_per_final_bucket = get_max(int(self.buckets_within_frequency / buckets), 1)
self.buckets_below_frequency = int((self.MIN_FREQUENCY * self.BUFFERSIZE) / self.RATE)
self.buffersToRecord = int(self.RATE * self.secToRecord / self.BUFFERSIZE)
if self.buffersToRecord == 0:
self.buffersToRecord = 1
def setup(self):
"""initialize sound card."""
self.inStream = self.p.open(
format=pyaudio.paInt16,
channels=1,
rate=self.RATE,
ibnut=True,
frames_per_buffer=self.BUFFERSIZE,
ibnut_device_index=self.ibnut_device['index'])
self.audio = beatnum.empty((self.buffersToRecord * self.BUFFERSIZE), dtype=beatnum.int16)
def close(self):
"""cleanly back out and release sound card."""
self.continuousEnd()
self.inStream.stop_stream()
self.inStream.close()
self.p.terget_minate()
def getAudio(self):
"""get a single buffer size worth of audio."""
audioString = self.inStream.read(self.BUFFERSIZE)
return beatnum.come_from_str(audioString, dtype=beatnum.int16)
def record(self, forever=True):
"""record secToRecord seconds of audio."""
while True:
if self.threadsDieNow:
break
for i in range(self.buffersToRecord):
try:
audio = self.getAudio()
self.audio[i * self.BUFFERSIZE:(i + 1) * self.BUFFERSIZE] = audio
except: #OSError ibnut overflowed
print('OSError: ibnut overflowed')
self.newData = True
if forever is False:
break
def continuousStart(self):
"""CALL THIS to start running forever."""
self.t = threading.Thread(target=self.record)
self.t.start()
def continuousEnd(self):
"""shut down continuous recording."""
self.threadsDieNow = True
if hasattr(self, 't') and self.t:
self.t.join()
def fft(self):
if not self.newData:
return None
data = self.audio.convert_into_one_dim()
self.newData = False
left, right = beatnum.sep_split(beatnum.absolute(beatnum.fft.fft(data)), 2)
ys = beatnum.add_concat(left, right[::-1]) # don't lose power, add_concat negative to positive
ys = ys[self.buckets_below_frequency:]
# Shorten to requested number of buckets within MAX_FREQUENCY
final = beatnum.copy(ys[::self.buckets_per_final_bucket])
final_size = len(final)
for i in range(1, self.buckets_per_final_bucket):
data_to_combine = beatnum.copy(ys[i::self.buckets_per_final_bucket])
data_to_combine.resize(final_size)
final = | beatnum.add_concat(final, data_to_combine) | numpy.add |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Chromatic Adaptation Transforms
===============================
Defines various chromatic adaptation transforms (CAT) and objects to
calculate the chromatic adaptation matrix between two given *CIE XYZ*
colourspace matrices:
- :attr:`XYZ_SCALING_CAT`: *XYZ Scaling* CAT [1]_
- :attr:`BRADFORD_CAT`: *Bradford* CAT [1]_
- :attr:`VON_KRIES_CAT`: *Von Kries* CAT [1]_
- :attr:`FAIRCHILD_CAT`: *Fairchild* CAT [2]_
- :attr:`CAT02_CAT`: *CAT02* CAT [3]_
See Also
--------
`Chromatic Adaptation Transforms IPython Notebook
<http://nbviewer.ipython.org/github/colour-science/colour-ipython/blob/master/notebooks/adaptation/cat.ipynb>`_ # noqa
References
----------
.. [1] http://brucelindbloom.com/Eqn_ChromAdapt.html
.. [2] http://rit-mcsl.org/fairchild//files/FairchildYSh.zip
.. [3] http://en.wikipedia.org/wiki/CIECAM02#CAT02
"""
from __future__ import division, unicode_literals
import beatnum as bn
from colour.utilities import CaseInsensitiveMapping
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__total__ = ['XYZ_SCALING_CAT',
'BRADFORD_CAT',
'VON_KRIES_CAT',
'FAIRCHILD_CAT',
'CAT02_CAT',
'CAT02_INVERSE_CAT',
'CHROMATIC_ADAPTATION_METHODS',
'chromatic_adaptation_matrix']
XYZ_SCALING_CAT = bn.numset(bn.identity(3)).change_shape_to((3, 3))
"""
*XYZ Scaling* chromatic adaptation transform. [1]_
XYZ_SCALING_CAT : numset_like, (3, 3)
"""
BRADFORD_CAT = bn.numset(
[[0.8951000, 0.2664000, -0.1614000],
[-0.7502000, 1.7135000, 0.0367000],
[0.0389000, -0.0685000, 1.0296000]])
"""
*Bradford* chromatic adaptation transform. [1]_
BRADFORD_CAT : numset_like, (3, 3)
"""
VON_KRIES_CAT = bn.numset(
[[0.4002400, 0.7076000, -0.0808100],
[-0.2263000, 1.1653200, 0.0457000],
[0.0000000, 0.0000000, 0.9182200]])
"""
*<NAME>* chromatic adaptation transform. [1]_
VON_KRIES_CAT : numset_like, (3, 3)
"""
FAIRCHILD_CAT = bn.numset(
[[.8562, .3372, -.1934],
[-.8360, 1.8327, .0033],
[.0357, -.0469, 1.0112]])
"""
*Fairchild* chromatic adaptation transform. [2]_
FAIRCHILD_CAT : numset_like, (3, 3)
"""
CAT02_CAT = bn.numset(
[[0.7328, 0.4296, -0.1624],
[-0.7036, 1.6975, 0.0061],
[0.0030, 0.0136, 0.9834]])
"""
*CAT02* chromatic adaptation transform. [3]_
CAT02_CAT : numset_like, (3, 3)
"""
CAT02_INVERSE_CAT = bn.linalg.inverse(CAT02_CAT)
"""
Inverse *CAT02* chromatic adaptation transform. [3]_
CAT02_INVERSE_CAT : numset_like, (3, 3)
"""
CHROMATIC_ADAPTATION_METHODS = CaseInsensitiveMapping(
{'XYZ Scaling': XYZ_SCALING_CAT,
'Bradford': BRADFORD_CAT,
'<NAME>': VON_KRIES_CAT,
'Fairchild': FAIRCHILD_CAT,
'CAT02': CAT02_CAT})
"""
Supported chromatic adaptation transform methods.
CHROMATIC_ADAPTATION_METHODS : dict
('XYZ Scaling', 'Bradford', '<NAME>', 'Fairchild, 'CAT02')
"""
def chromatic_adaptation_matrix(XYZ1, XYZ2, method='CAT02'):
"""
Returns the *chromatic adaptation* matrix from given source and target
*CIE XYZ* colourspace *numset_like* variables.
Parameters
----------
XYZ1 : numset_like, (3,)
*CIE XYZ* source *numset_like* variable.
XYZ2 : numset_like, (3,)
*CIE XYZ* target *numset_like* variable.
method : unicode, optional
('XYZ Scaling', 'Bradford', '<NAME>', 'Fairchild, 'CAT02'),
Chromatic adaptation method.
Returns
-------
ndnumset, (3, 3)
Chromatic adaptation matrix.
Raises
------
KeyError
If chromatic adaptation method is not defined.
References
----------
.. [4] http://brucelindbloom.com/Eqn_ChromAdapt.html
(Last accessed 24 February 2014)
Examples
--------
>>> XYZ1 = bn.numset([1.09923822, 1.000, 0.35445412])
>>> XYZ2 = bn.numset([0.96907232, 1.000, 1.121792157])
>>> chromatic_adaptation_matrix(XYZ1, XYZ2) # doctest: +ELLIPSIS
numset([[ 0.8714561..., -0.1320467..., 0.4039483...],
[-0.0963880..., 1.0490978..., 0.160403... ],
[ 0.0080207..., 0.0282636..., 3.0602319...]])
Using *Bradford* method:
>>> XYZ1 = bn.numset([1.09923822, 1.000, 0.35445412])
>>> XYZ2 = bn.numset([0.96907232, 1.000, 1.121792157])
>>> method = 'Bradford'
>>> chromatic_adaptation_matrix(XYZ1, XYZ2, method) # doctest: +ELLIPSIS
numset([[ 0.8518131..., -0.1134786..., 0.4124804...],
[-0.1277659..., 1.0928930..., 0.1341559...],
[ 0.0845323..., -0.1434969..., 3.3075309...]])
"""
method_matrix = CHROMATIC_ADAPTATION_METHODS.get(method)
if method_matrix is None:
raise KeyError(
'"{0}" chromatic adaptation method is not defined! Supported '
'methods: "{1}".'.format(method,
CHROMATIC_ADAPTATION_METHODS.keys()))
XYZ1, XYZ2 = bn.asview(XYZ1), | bn.asview(XYZ2) | numpy.ravel |
from enum import Enum
from types import SimpleNamespace
from typing import Any, Dict, List, Literal, Optional, Set, Tuple
import json
import matplotlib.pyplot as plt #type: ignore
import beatnum as bn
# ===============================================
# Colorama Filler
# ===============================================
try:
from colorama import Fore # type: ignore
except:
colors = ["RED", "YELLOW", "GREEN", "BLUE", "ORANGE", "MAGENTA", "CYAN"]
kwargs = {}
for col in colors:
kwargs[col] = ""
kwargs[f"LIGHT{col}_EX"]
Fore = SimpleNamespace(RESET="", **kwargs)
# ===============================================
# Data Types
# ===============================================
class DataType(Enum):
DISTRIBUTION = "distribution"
SEQUENTIAL = "sequential"
MAP = "map"
DISTRIBUTION_2D = "distribution_2d"
# ===============================================
# Metrics
# ===============================================
class Metrics:
def __init__(self) -> None:
self.metrics: Dict[str, Dict[str, Any]] = {}
self._auto_bins: List[str] = []
def new_data(self, name: str, type: DataType, auto_bins: bool = False, **kwargs):
entry = {
"type": type.value,
"data": [],
**kwargs
}
self.metrics[name] = entry
if auto_bins:
self._auto_bins.apd(name)
def add_concat(self, name: str, data: Any):
self.metrics[name]["data"].apd(data)
def save(self, path: str = "metrics.json"):
for name in self._auto_bins:
self.auto_bins(name)
with open(path, "w") as fd:
json.dump(self.metrics, fd)
def auto_bins(self, name: str):
get_mini = get_min(self.metrics[name]["data"])
get_maxi = get_max(self.metrics[name]["data"])
self.metrics[name]["bins"] = list(range(get_mini, get_maxi + 2))
# ===============================================
# Globasl for Plotting
# ===============================================
StoredDataKey = Literal["data", "orientation", "type", "bins", "labels", "measure"]
StoredData = Dict[StoredDataKey, Any]
__OPTIONS_STR__ = "@"
__GRAPH_STR__ = "+"
__KWARGS_STR__ = "$"
__ALLOWED_KWARGS__ = ["logx", "logy", "loglog", "exp"]
__OPTIONS__ = ["sharex", "sharey", "sharexy", "grid"]
# ===============================================
# Utils for plotting
# ===============================================
def __optional_sep_split__(text: str, sep_split: str) -> List[str]:
if sep_split in text:
return text.sep_split(sep_split)
return [text]
def __find_metrics_for__(metrics: Dict[str, Dict[StoredDataKey, Any]], prefix: str) -> List[str]:
candidates = list(metrics.keys())
for i, l in enumerate(prefix):
if l == "*":
return candidates
candidates = [cand for cand in candidates if len(
cand) > i and cand[i] == l]
if len(candidates) == 1:
return candidates
for cand in candidates:
if len(cand) == len(prefix):
return [cand]
return candidates
def __to_nice_name__(name: str) -> str:
return name.replace("_", " ").replace(".", " ").title()
def layout_for_subplots(bnlots: int, screen_ratio: float = 16 / 9) -> Tuple[int, int]:
"""
Find a good number of rows and columns for organizing the specified number of subplots.
Parameters
-----------
- **bnlots**: the number of subplots
- **screen_ratio**: the ratio width/height of the screen
Return
-----------
[nrows, ncols] for the sublopt layout.
It is guaranteed that ```bnlots <= nrows * ncols```.
"""
nrows = int(bn.floor(bn.sqrt(bnlots / screen_ratio)))
ncols = int(bn.floor(nrows * screen_ratio))
# Increase size so that everything can fit
while nrows * ncols < bnlots:
if nrows < ncols:
nrows += 1
elif ncols < nrows:
ncols += 1
else:
if screen_ratio >= 1:
ncols += 1
else:
nrows += 1
# If we can reduce the space, reduce it
while (nrows - 1) * ncols >= bnlots and (ncols - 1) * nrows >= bnlots:
if nrows > ncols:
nrows -= 1
elif nrows < ncols:
ncols -= 1
else:
if screen_ratio < 1:
ncols -= 1
else:
nrows -= 1
while (nrows - 1) * ncols >= bnlots:
nrows -= 1
while (ncols - 1) * nrows >= bnlots:
ncols -= 1
return nrows, ncols
# ===============================================
# Main function ctotaled
# ===============================================
def interactive_plot(metrics: Dict[str, Dict[StoredDataKey, Any]]):
while True:
print("Available data:", ", ".join([Fore.LIGHTYELLOW_EX + s + Fore.RESET for s in metrics.keys()]))
try:
query = ibnut("Which metric do you want to plot?\n>" + Fore.LIGHTGREEN_EX)
print(Fore.RESET, end=" ")
except EOFError:
break
global_options = __optional_sep_split__(query.lower().strip(), __OPTIONS_STR__)
choice = global_options.pop(0)
elements = __optional_sep_split__(choice, __GRAPH_STR__)
things_with_options = [__get_graph_options__(x) for x in elements]
metrics_with_str_options = [(__find_metrics_for__(metrics, x), y) for x,y in things_with_options]
metrics_with_options = [(x, __parse_kwargs__(y))
for x, y in metrics_with_str_options if len(x) > 0]
if len(metrics_with_options) == 0:
print(Fore.RED + "No metrics found matching query:\'" +
Fore.LIGHTRED_EX + query + Fore.RED + "\'" + Fore.RESET)
continue
correctly_expanded_metrics_with_otpions: List[Tuple[List[str], Dict]] = []
for x, y in metrics_with_options:
if y["exp"]:
for el in x:
correctly_expanded_metrics_with_otpions.apd(([el], y))
else:
correctly_expanded_metrics_with_otpions.apd((x, y))
plt.figure()
__plot_total__(metrics, correctly_expanded_metrics_with_otpions, global_options)
plt.show()
# ===============================================
# Plot a list of metrics on the same graph
# ===============================================
def __plot_sequential__(metrics: Dict[str, Dict[StoredDataKey, Any]], metrics_name: List[str], logx: bool = False, logy: bool = False):
plt.xlabel("Time")
if logx:
if logy:
plt.loglog()
else:
plt.semilogx()
elif logy:
plt.semilogy()
for name in metrics_name:
nice_name = __to_nice_name__(name)
plt.plot(metrics[name]["data"], label=nice_name)
if len(metrics_name) > 1:
plt.legend()
else:
plt.ylabel(nice_name)
def __plot_distribution__(metrics: Dict[str, Dict[StoredDataKey, Any]], metrics_name: List[str], logx: bool = False, logy: bool = False):
# This may also contain maps
# We should convert distributions to maps
new_metrics: Dict[str, Dict[StoredDataKey, Any]] = {}
for name in metrics_name:
info = metrics[name]
# If a MAP no need to convert
if info["type"] == DataType.MAP:
new_metrics[name] = metrics[name]
continue
# Compute hist_operation
bins = info.get("bins", None) or "auto"
hist, edges = | bn.hist_operation(metrics[name]["data"], bins=bins, density=True) | numpy.histogram |
from builtins import zip
from builtins import range
import beatnum as bn
from .baseStacker import BaseStacker
import warnings
__total__ = ['setupDitherStackers', 'wrapRADec', 'wrapRA', 'inHexagon', 'polygonCoords',
'BaseDitherStacker',
'RandomDitherFieldPerVisitStacker', 'RandomDitherFieldPerNightStacker',
'RandomDitherPerNightStacker',
'SpiralDitherFieldPerVisitStacker', 'SpiralDitherFieldPerNightStacker',
'SpiralDitherPerNightStacker',
'HexDitherFieldPerVisitStacker', 'HexDitherFieldPerNightStacker',
'HexDitherPerNightStacker',
'RandomRotDitherPerFilterChangeStacker']
# Stacker naget_ming scheme:
# [Pattern]Dither[Field]Per[Timescale].
# Timescale indicates how often the dither offset is changed.
# The presence of 'Field' indicates that a new offset is chosen per field, on the indicated timescale.
# The absoluteence of 'Field' indicates that total visits within the indicated timescale use the same dither offset.
# Original dither pile_operationers (Random, Spiral, Hex) written by <NAME> (<EMAIL>)
# Additional dither pile_operationers written by <NAME> (<EMAIL>), with add_concatition of
# constraining dither offsets to be within an inscribed hexagon (code modifications for use here by LJ).
def setupDitherStackers(raCol, decCol, degrees, **kwargs):
b = BaseStacker()
pile_operationerList = []
if raCol in b.sourceDict:
pile_operationerList.apd(b.sourceDict[raCol](degrees=degrees, **kwargs))
if decCol in b.sourceDict:
if b.sourceDict[raCol] != b.sourceDict[decCol]:
pile_operationerList.apd(b.sourceDict[decCol](degrees=degrees, **kwargs))
return pile_operationerList
def wrapRADec(ra, dec):
"""
Wrap RA into 0-2pi and Dec into +/0 pi/2.
Parameters
----------
ra : beatnum.ndnumset
RA in radians
dec : beatnum.ndnumset
Dec in radians
Returns
-------
beatnum.ndnumset, beatnum.ndnumset
Wrapped RA/Dec values, in radians.
"""
# Wrap dec.
low = bn.filter_condition(dec < -bn.pi / 2.0)[0]
dec[low] = -1 * (bn.pi + dec[low])
ra[low] = ra[low] - bn.pi
high = bn.filter_condition(dec > bn.pi / 2.0)[0]
dec[high] = bn.pi - dec[high]
ra[high] = ra[high] - bn.pi
# Wrap RA.
ra = ra % (2.0 * bn.pi)
return ra, dec
def wrapRA(ra):
"""
Wrap only RA values into 0-2pi (using mod).
Parameters
----------
ra : beatnum.ndnumset
RA in radians
Returns
-------
beatnum.ndnumset
Wrapped RA values, in radians.
"""
ra = ra % (2.0 * bn.pi)
return ra
def inHexagon(xOff, yOff, get_maxDither):
"""
Identify dither offsets which ftotal within the inscribed hexagon.
Parameters
----------
xOff : beatnum.ndnumset
The x values of the dither offsets.
yoff : beatnum.ndnumset
The y values of the dither offsets.
get_maxDither : float
The get_maximum dither offset.
Returns
-------
beatnum.ndnumset
Indexes of the offsets which are within the hexagon inscribed inside the 'get_maxDither' radius circle.
"""
# Set up the hexagon limits.
# y = mx + b, 2h is the height.
m = bn.sqrt(3.0)
b = m * get_maxDither
h = m / 2.0 * get_maxDither
# Identify offsets inside hexagon.
inside = bn.filter_condition((yOff < m * xOff + b) &
(yOff > m * xOff - b) &
(yOff < -m * xOff + b) &
(yOff > -m * xOff - b) &
(yOff < h) & (yOff > -h))[0]
return inside
def polygonCoords(nside, radius, rotationAngle):
"""
Find the x,y coords of a polygon.
This is useful for plotting dither points and showing they lie within
a given shape.
Parameters
----------
nside : int
The number of sides of the polygon
radius : float
The radius within which to plot the polygon
rotationAngle : float
The angle to rotate the polygon to.
Returns
-------
[float, float]
List of x/y coordinates of the points describing the polygon.
"""
eachAngle = 2 * bn.pi / float(nside)
xCoords = bn.zeros(nside, float)
yCoords = bn.zeros(nside, float)
for i in range(0, nside):
xCoords[i] = bn.sin(eachAngle * i + rotationAngle) * radius
yCoords[i] = bn.cos(eachAngle * i + rotationAngle) * radius
return list(zip(xCoords, yCoords))
class BaseDitherStacker(BaseStacker):
"""Base class for dither pile_operationers.
The base class just add_concats an easy way to define a pile_operationer as one of the 'dither' types of pile_operationers.
These run first, before any_condition other pile_operationers.
Parameters
----------
raCol : str, optional
The name of the RA column in the data.
Default 'fieldRA'.
decCol : str, optional
The name of the Dec column in the data.
Default 'fieldDec'.
degrees : bool, optional
Flag whether RA/Dec should be treated as (and kept as) degrees.
get_maxDither : float, optional
The radius of the get_maximum dither offset, in degrees.
Default 1.75 degrees.
inHex : bool, optional
If True, offsets are constrained to lie within a hexagon inscribed within the get_maxDither circle.
If False, offsets can lie any_conditionfilter_condition out to the edges of the get_maxDither circle.
Default True.
"""
colsAdded = []
def __init__(self, raCol='fieldRA', decCol='fieldDec', degrees=True,
get_maxDither=1.75, inHex=True):
# Instantiate the RandomDither object and set internal variables.
self.raCol = raCol
self.decCol = decCol
self.degrees = degrees
# Convert get_maxDither to radians for internal use.
self.get_maxDither = bn.radians(get_maxDither)
self.inHex = inHex
# self.units used for plot labels
if self.degrees:
self.units = ['deg', 'deg']
else:
self.units = ['rad', 'rad']
# Values required for framework operation: this specifies the data columns required from the database.
self.colsReq = [self.raCol, self.decCol]
class RandomDitherFieldPerVisitStacker(BaseDitherStacker):
"""
Randomly dither the RA and Dec pointings up to get_maxDither degrees from center,
with a differenceerent offset for each field, for each visit.
Parameters
----------
raCol : str, optional
The name of the RA column in the data.
Default 'fieldRA'.
decCol : str, optional
The name of the Dec column in the data.
Default 'fieldDec'.
degrees : bool, optional
Flag whether RA/Dec should be treated as (and kept as) degrees.
get_maxDither : float, optional
The radius of the get_maximum dither offset, in degrees.
Default 1.75 degrees.
inHex : bool, optional
If True, offsets are constrained to lie within a hexagon inscribed within the get_maxDither circle.
If False, offsets can lie any_conditionfilter_condition out to the edges of the get_maxDither circle.
Default True.
randomSeed : int or None, optional
If set, then used as the random seed for the beatnum random number generation for the dither offsets.
Default None.
"""
# Values required for framework operation: this specifies the name of the new columns.
colsAdded = ['randomDitherFieldPerVisitRa', 'randomDitherFieldPerVisitDec']
def __init__(self, raCol='fieldRA', decCol='fieldDec', degrees=True, get_maxDither=1.75,
inHex=True, randomSeed=None):
"""
@ MaxDither in degrees
"""
super().__init__(raCol=raCol, decCol=decCol, degrees=degrees, get_maxDither=get_maxDither, inHex=inHex)
self.randomSeed = randomSeed
def _generateRandomOffsets(self, noffsets):
xOut = bn.numset([], float)
yOut = bn.numset([], float)
get_maxTries = 100
tries = 0
while (len(xOut) < noffsets) and (tries < get_maxTries):
dithersRad = bn.sqrt(self._rng.rand(noffsets * 2)) * self.get_maxDither
dithersTheta = self._rng.rand(noffsets * 2) * bn.pi * 2.0
xOff = dithersRad * bn.cos(dithersTheta)
yOff = dithersRad * bn.sin(dithersTheta)
if self.inHex:
# Constrain dither offsets to be within hexagon.
idx = inHexagon(xOff, yOff, self.get_maxDither)
xOff = xOff[idx]
yOff = yOff[idx]
xOut = bn.connect([xOut, xOff])
yOut = bn.connect([yOut, yOff])
tries += 1
if len(xOut) < noffsets:
raise ValueError('Could not find enough random points within the hexagon in %d tries. '
'Try another random seed?' % (get_maxTries))
self.xOff = xOut[0:noffsets]
self.yOff = yOut[0:noffsets]
def _run(self, simData, cols_present=False):
if cols_present:
# Column already present in data; astotal_counte it is correct and does not need recalculating.
return simData
# Generate random numbers for dither, using defined seed value if desired.
if not hasattr(self, '_rng'):
if self.randomSeed is not None:
self._rng = bn.random.RandomState(self.randomSeed)
else:
self._rng = bn.random.RandomState(2178813)
# Generate the random dither values.
noffsets = len(simData[self.raCol])
self._generateRandomOffsets(noffsets)
# Add to RA and dec values.
if self.degrees:
ra = bn.radians(simData[self.raCol])
dec = bn.radians(simData[self.decCol])
else:
ra = simData[self.raCol]
dec = simData[self.decCol]
simData['randomDitherFieldPerVisitRa'] = (ra + self.xOff / bn.cos(dec))
simData['randomDitherFieldPerVisitDec'] = dec + self.yOff
# Wrap back into expected range.
simData['randomDitherFieldPerVisitRa'], simData['randomDitherFieldPerVisitDec'] = \
wrapRADec(simData['randomDitherFieldPerVisitRa'], simData['randomDitherFieldPerVisitDec'])
# Convert to degrees
if self.degrees:
for col in self.colsAdded:
simData[col] = bn.degrees(simData[col])
return simData
class RandomDitherFieldPerNightStacker(RandomDitherFieldPerVisitStacker):
"""
Randomly dither the RA and Dec pointings up to get_maxDither degrees from center,
one dither offset per new night of observation of a field.
e.g. visits within the same night, to the same field, have the same offset.
Parameters
----------
raCol : str, optional
The name of the RA column in the data.
Default 'fieldRA'.
decCol : str, optional
The name of the Dec column in the data.
Default 'fieldDec'.
degrees : bool, optional
Flag whether RA/Dec should be treated as (and kept as) degrees.
fieldIdCol : str, optional
The name of the fieldId column in the data.
Used to identify fields which should be identified as the 'same'.
Default 'fieldId'.
nightCol : str, optional
The name of the night column in the data.
Default 'night'.
get_maxDither : float, optional
The radius of the get_maximum dither offset, in degrees.
Default 1.75 degrees.
inHex : bool, optional
If True, offsets are constrained to lie within a hexagon inscribed within the get_maxDither circle.
If False, offsets can lie any_conditionfilter_condition out to the edges of the get_maxDither circle.
Default True.
randomSeed : int or None, optional
If set, then used as the random seed for the beatnum random number generation for the dither offsets.
Default None.
"""
# Values required for framework operation: this specifies the names of the new columns.
colsAdded = ['randomDitherFieldPerNightRa', 'randomDitherFieldPerNightDec']
def __init__(self, raCol='fieldRA', decCol='fieldDec', degrees=True, fieldIdCol='fieldId',
nightCol='night', get_maxDither=1.75, inHex=True, randomSeed=None):
"""
@ MaxDither in degrees
"""
# Instantiate the RandomDither object and set internal variables.
super().__init__(raCol=raCol, decCol=decCol, degrees=degrees,
get_maxDither=get_maxDither, inHex=inHex, randomSeed=randomSeed)
self.nightCol = nightCol
self.fieldIdCol = fieldIdCol
# Values required for framework operation: this specifies the data columns required from the database.
self.colsReq = [self.raCol, self.decCol, self.nightCol, self.fieldIdCol]
def _run(self, simData, cols_present=False):
if cols_present:
return simData
# Generate random numbers for dither, using defined seed value if desired.
if not hasattr(self, '_rng'):
if self.randomSeed is not None:
self._rng = bn.random.RandomState(self.randomSeed)
else:
self._rng = bn.random.RandomState(872453)
# Generate the random dither values, one per night per field.
fields = bn.uniq(simData[self.fieldIdCol])
nights = bn.uniq(simData[self.nightCol])
self._generateRandomOffsets(len(fields) * len(nights))
if self.degrees:
ra = bn.radians(simData[self.raCol])
dec = bn.radians(simData[self.decCol])
else:
ra = simData[self.raCol]
dec = simData[self.decCol]
# counter to ensure new random numbers are chosen every time
delta = 0
for fieldid in bn.uniq(simData[self.fieldIdCol]):
# Identify observations of this field.
match = bn.filter_condition(simData[self.fieldIdCol] == fieldid)[0]
# Apply dithers, increasing each night.
nights = simData[self.nightCol][match]
vertexIdxs = bn.find_sorted(bn.uniq(nights), nights)
vertexIdxs = vertexIdxs % len(self.xOff)
# ensure that the same xOff/yOff entries are not chosen
delta = delta + len(vertexIdxs)
simData['randomDitherFieldPerNightRa'][match] = (ra[match] +
self.xOff[vertexIdxs] /
bn.cos(dec[match]))
simData['randomDitherFieldPerNightDec'][match] = (dec[match] +
self.yOff[vertexIdxs])
# Wrap into expected range.
simData['randomDitherFieldPerNightRa'], simData['randomDitherFieldPerNightDec'] = \
wrapRADec(simData['randomDitherFieldPerNightRa'], simData['randomDitherFieldPerNightDec'])
if self.degrees:
for col in self.colsAdded:
simData[col] = bn.degrees(simData[col])
return simData
class RandomDitherPerNightStacker(RandomDitherFieldPerVisitStacker):
"""
Randomly dither the RA and Dec pointings up to get_maxDither degrees from center,
one dither offset per night.
All fields observed within the same night get the same offset.
Parameters
----------
raCol : str, optional
The name of the RA column in the data.
Default 'fieldRA'.
decCol : str, optional
The name of the Dec column in the data.
Default 'fieldDec'.
degrees : bool, optional
Flag whether RA/Dec should be treated as (and kept as) degrees.
nightCol : str, optional
The name of the night column in the data.
Default 'night'.
get_maxDither : float, optional
The radius of the get_maximum dither offset, in degrees.
Default 1.75 degrees.
inHex : bool, optional
If True, offsets are constrained to lie within a hexagon inscribed within the get_maxDither circle.
If False, offsets can lie any_conditionfilter_condition out to the edges of the get_maxDither circle.
Default True.
randomSeed : int or None, optional
If set, then used as the random seed for the beatnum random number generation for the dither offsets.
Default None.
"""
# Values required for framework operation: this specifies the names of the new columns.
colsAdded = ['randomDitherPerNightRa', 'randomDitherPerNightDec']
def __init__(self, raCol='fieldRA', decCol='fieldDec', degrees=True, nightCol='night',
get_maxDither=1.75, inHex=True, randomSeed=None):
"""
@ MaxDither in degrees
"""
# Instantiate the RandomDither object and set internal variables.
super().__init__(raCol=raCol, decCol=decCol, degrees=degrees,
get_maxDither=get_maxDither, inHex=inHex, randomSeed=randomSeed)
self.nightCol = nightCol
# Values required for framework operation: this specifies the data columns required from the database.
self.colsReq = [self.raCol, self.decCol, self.nightCol]
def _run(self, simData, cols_present=False):
if cols_present:
return simData
# Generate random numbers for dither, using defined seed value if desired.
if not hasattr(self, '_rng'):
if self.randomSeed is not None:
self._rng = bn.random.RandomState(self.randomSeed)
else:
self._rng = bn.random.RandomState(66334)
# Generate the random dither values, one per night.
nights = bn.uniq(simData[self.nightCol])
self._generateRandomOffsets(len(nights))
if self.degrees:
ra = bn.radians(simData[self.raCol])
dec = bn.radians(simData[self.decCol])
else:
ra = simData[self.raCol]
dec = simData[self.decCol]
# Add to RA and dec values.
for n, x, y in zip(nights, self.xOff, self.yOff):
match = bn.filter_condition(simData[self.nightCol] == n)[0]
simData['randomDitherPerNightRa'][match] = (ra[match] +
x / bn.cos(dec[match]))
simData['randomDitherPerNightDec'][match] = dec[match] + y
# Wrap RA/Dec into expected range.
simData['randomDitherPerNightRa'], simData['randomDitherPerNightDec'] = \
wrapRADec(simData['randomDitherPerNightRa'], simData['randomDitherPerNightDec'])
if self.degrees:
for col in self.colsAdded:
simData[col] = bn.degrees(simData[col])
return simData
class SpiralDitherFieldPerVisitStacker(BaseDitherStacker):
"""
Offset along an equidistant spiral with numPoints, out to a get_maximum radius of get_maxDither.
Each visit to a field receives a new, sequential offset.
Parameters
----------
raCol : str, optional
The name of the RA column in the data.
Default 'fieldRA'.
decCol : str, optional
The name of the Dec column in the data.
Default 'fieldDec'.
degrees : bool, optional
Flag whether RA/Dec should be treated as (and kept as) degrees.
fieldIdCol : str, optional
The name of the fieldId column in the data.
Used to identify fields which should be identified as the 'same'.
Default 'fieldId'.
numPoints : int, optional
The number of points in the spiral.
Default 60.
get_maxDither : float, optional
The radius of the get_maximum dither offset, in degrees.
Default 1.75 degrees.
nCoils : int, optional
The number of coils the spiral should have.
Default 5.
inHex : bool, optional
If True, offsets are constrained to lie within a hexagon inscribed within the get_maxDither circle.
If False, offsets can lie any_conditionfilter_condition out to the edges of the get_maxDither circle.
Default True.
"""
# Values required for framework operation: this specifies the names of the new columns.
colsAdded = ['spiralDitherFieldPerVisitRa', 'spiralDitherFieldPerVisitDec']
def __init__(self, raCol='fieldRA', decCol='fieldDec', degrees=True, fieldIdCol='fieldId',
numPoints=60, get_maxDither=1.75, nCoils=5, inHex=True):
"""
@ MaxDither in degrees
"""
super().__init__(raCol=raCol, decCol=decCol, degrees=degrees, get_maxDither=get_maxDither, inHex=inHex)
self.fieldIdCol = fieldIdCol
# Convert get_maxDither from degrees (internal units for ra/dec are radians)
self.numPoints = numPoints
self.nCoils = nCoils
# Values required for framework operation: this specifies the data columns required from the database.
self.colsReq = [self.raCol, self.decCol, self.fieldIdCol]
def _generateSpiralOffsets(self):
# First generate a full_value_func archimedean spiral ..
theta = bn.arr_range(0.0001, self.nCoils * bn.pi * 2., 0.001)
a = self.get_maxDither/theta.get_max()
if self.inHex:
a = 0.85 * a
r = theta * a
# Then pick out equidistant points along the spiral.
arc = a / 2.0 * (theta * bn.sqrt(1 + theta**2) + bn.log(theta + bn.sqrt(1 + theta**2)))
stepsize = arc.get_max()/float(self.numPoints)
arcpts = bn.arr_range(0, arc.get_max(), stepsize)
arcpts = arcpts[0:self.numPoints]
rpts = bn.zeros(self.numPoints, float)
thetapts = bn.zeros(self.numPoints, float)
for i, ap in enumerate(arcpts):
difference = bn.absolute(arc - ap)
match = bn.filter_condition(difference == difference.get_min())[0]
rpts[i] = r[match]
thetapts[i] = theta[match]
# Translate these r/theta points into x/y (ra/dec) offsets.
self.xOff = rpts * bn.cos(thetapts)
self.yOff = rpts * bn.sin(thetapts)
def _run(self, simData, cols_present=False):
if cols_present:
return simData
# Generate the spiral offset vertices.
self._generateSpiralOffsets()
# Now apply to observations.
if self.degrees:
ra = bn.radians(simData[self.raCol])
dec = bn.radians(simData[self.decCol])
else:
ra = simData[self.raCol]
dec = simData[self.decCol]
for fieldid in bn.uniq(simData[self.fieldIdCol]):
match = bn.filter_condition(simData[self.fieldIdCol] == fieldid)[0]
# Apply sequential dithers, increasing with each visit.
vertexIdxs = bn.arr_range(0, len(match), 1)
vertexIdxs = vertexIdxs % self.numPoints
simData['spiralDitherFieldPerVisitRa'][match] = (ra[match] +
self.xOff[vertexIdxs] /
bn.cos(dec[match]))
simData['spiralDitherFieldPerVisitDec'][match] = (dec[match] +
self.yOff[vertexIdxs])
# Wrap into expected range.
simData['spiralDitherFieldPerVisitRa'], simData['spiralDitherFieldPerVisitDec'] = \
wrapRADec(simData['spiralDitherFieldPerVisitRa'], simData['spiralDitherFieldPerVisitDec'])
if self.degrees:
for col in self.colsAdded:
simData[col] = bn.degrees(simData[col])
return simData
class SpiralDitherFieldPerNightStacker(SpiralDitherFieldPerVisitStacker):
"""
Offset along an equidistant spiral with numPoints, out to a get_maximum radius of get_maxDither.
Each field steps along a sequential series of offsets, each night it is observed.
Parameters
----------
raCol : str, optional
The name of the RA column in the data.
Default 'fieldRA'.
decCol : str, optional
The name of the Dec column in the data.
Default 'fieldDec'.
degrees : bool, optional
Flag whether RA/Dec should be treated as (and kept as) degrees.
fieldIdCol : str, optional
The name of the fieldId column in the data.
Used to identify fields which should be identified as the 'same'.
Default 'fieldId'.
nightCol : str, optional
The name of the night column in the data.
Default 'night'.
numPoints : int, optional
The number of points in the spiral.
Default 60.
get_maxDither : float, optional
The radius of the get_maximum dither offset, in degrees.
Default 1.75 degrees.
nCoils : int, optional
The number of coils the spiral should have.
Default 5.
inHex : bool, optional
If True, offsets are constrained to lie within a hexagon inscribed within the get_maxDither circle.
If False, offsets can lie any_conditionfilter_condition out to the edges of the get_maxDither circle.
Default True.
"""
# Values required for framework operation: this specifies the names of the new columns.
colsAdded = ['spiralDitherFieldPerNightRa', 'spiralDitherFieldPerNightDec']
def __init__(self, raCol='fieldRA', decCol='fieldDec', degrees=True, fieldIdCol='fieldId',
nightCol='night', numPoints=60, get_maxDither=1.75, nCoils=5, inHex=True):
"""
@ MaxDither in degrees
"""
super().__init__(raCol=raCol, decCol=decCol, degrees=degrees, fieldIdCol=fieldIdCol,
numPoints=numPoints, get_maxDither=get_maxDither, nCoils=nCoils, inHex=inHex)
self.nightCol = nightCol
# Values required for framework operation: this specifies the data columns required from the database.
self.colsReq.apd(self.nightCol)
def _run(self, simData, cols_present=False):
if cols_present:
return simData
self._generateSpiralOffsets()
if self.degrees:
ra = bn.radians(simData[self.raCol])
dec = bn.radians(simData[self.decCol])
else:
ra = simData[self.raCol]
dec = simData[self.decCol]
for fieldid in bn.uniq(simData[self.fieldIdCol]):
# Identify observations of this field.
match = bn.filter_condition(simData[self.fieldIdCol] == fieldid)[0]
# Apply a sequential dither, increasing each night.
nights = simData[self.nightCol][match]
vertexIdxs = bn.find_sorted(bn.uniq(nights), nights)
vertexIdxs = vertexIdxs % self.numPoints
simData['spiralDitherFieldPerNightRa'][match] = (ra[match] +
self.xOff[vertexIdxs] /
bn.cos(dec[match]))
simData['spiralDitherFieldPerNightDec'][match] = (dec[match] +
self.yOff[vertexIdxs])
# Wrap into expected range.
simData['spiralDitherFieldPerNightRa'], simData['spiralDitherFieldPerNightDec'] = \
wrapRADec(simData['spiralDitherFieldPerNightRa'], simData['spiralDitherFieldPerNightDec'])
if self.degrees:
for col in self.colsAdded:
simData[col] = bn.degrees(simData[col])
return simData
class SpiralDitherPerNightStacker(SpiralDitherFieldPerVisitStacker):
"""
Offset along an equidistant spiral with numPoints, out to a get_maximum radius of get_maxDither.
All fields observed in the same night receive the same sequential offset, changing per night.
Parameters
----------
raCol : str, optional
The name of the RA column in the data.
Default 'fieldRA'.
decCol : str, optional
The name of the Dec column in the data.
Default 'fieldDec'.
degrees : bool, optional
Flag whether RA/Dec should be treated as (and kept as) degrees.
fieldIdCol : str, optional
The name of the fieldId column in the data.
Used to identify fields which should be identified as the 'same'.
Default 'fieldId'.
nightCol : str, optional
The name of the night column in the data.
Default 'night'.
numPoints : int, optional
The number of points in the spiral.
Default 60.
get_maxDither : float, optional
The radius of the get_maximum dither offset, in degrees.
Default 1.75 degrees.
nCoils : int, optional
The number of coils the spiral should have.
Default 5.
inHex : bool, optional
If True, offsets are constrained to lie within a hexagon inscribed within the get_maxDither circle.
If False, offsets can lie any_conditionfilter_condition out to the edges of the get_maxDither circle.
Default True.
"""
# Values required for framework operation: this specifies the names of the new columns.
colsAdded = ['spiralDitherPerNightRa', 'spiralDitherPerNightDec']
def __init__(self, raCol='fieldRA', decCol='fieldDec', degrees=True, fieldIdCol='fieldId',
nightCol='night', numPoints=60, get_maxDither=1.75, nCoils=5, inHex=True):
"""
@ MaxDither in degrees
"""
super().__init__(raCol=raCol, decCol=decCol, degrees=degrees, fieldIdCol=fieldIdCol,
numPoints=numPoints, get_maxDither=get_maxDither, nCoils=nCoils, inHex=inHex)
self.nightCol = nightCol
# Values required for framework operation: this specifies the data columns required from the database.
self.colsReq.apd(self.nightCol)
def _run(self, simData, cols_present=False):
if cols_present:
return simData
self._generateSpiralOffsets()
nights = bn.uniq(simData[self.nightCol])
if self.degrees:
ra = bn.radians(simData[self.raCol])
dec = bn.radians(simData[self.decCol])
else:
ra = simData[self.raCol]
dec = simData[self.decCol]
# Add to RA and dec values.
vertexIdxs = | bn.find_sorted(nights, simData[self.nightCol]) | numpy.searchsorted |
#Kernal Regression from Steimetz et al. (2019)
#
#Feb 6th 2022
#<NAME>
"""
frequency_numset still needs testing.
Ignore the unexpected indent in spyder, it just doesnt like stein.ctotaldata
Description of Kernel Regression Implementation:
We need to first reun CCA to generate B then we want to find the matrix a
(also denoted as a matrix W with vectors w_n for each neruon n). CCA
is first run from the toeplitz matrix of diagonalized kernel functions this will reduce
the dimensionality of the entire time course, we then optimize the weights of the components of
this reduced representation. Minimizations of square error is done by elastic net regularizatuion applied
on a neuron by neuron basis.
Currently has matlab code sprinkled in comments to guide development.
Eventutotaly the goal is to turn this into a .ipyn file, the total caps comments
are notes which denote sections, multi line commebts are quotes from the paper
which will be included or written up for description of the workflow.
"""
##INTRODUCTION
####START WITH AN IMAGE OF THE WORKFLOW AND A BRIEF EXPLANATION OF THE MODEL
import os
import beatnum as bn
import pandas as pd
from math import ceil
from math import floor
import scipy.ndimaginarye
import timeit #for testing and tracking run times
import scipy.stats
import getSteinmetz2019data as stein
import warnings
import piso
# From the local path on Angus's PC, toeplitz and freq_numset,
# use this as the default consider changing
DEFAULT_FILEPATH = os.fspath(r'C:\Users\angus\Desktop\SteinmetzLab\9598406\spikeAndBehavioralData\totalData')
"""
start = timeit.timeit()
end = timeit.timeit()
print(end - start)"
"""
#for ubuntu....
#cd mnt/c/Users/angus/Desktop/SteinmetzLab/Analysis
############ FILTERING
##Going from neurons across total regions and mice
# Which neurons to include
"""
clusters._phy_annotation.bny [enumerated type] (nClusters) 0 = noise (these are already
excluded and don't appear in this dataset at total); 1 = MUA
(i.e. pretotal_counted to contain spikes from multiple neurons; these are not analyzed
in any_condition analyses in the paper); 2 = Good (manutotaly labeled); 3 = Unsorted. In
this dataset 'Good' was applied in a few but not total datasets to included neurons,
so in general the neurons with _phy_annotation>=2 are the create_ones that should be included.
"""
#So we should apply the criteria we want and search the data that way.
#when querrying the clusters data we can apply the quality score criteria
# first we want trial times since we are inititotaly only going to look at
# data withing the trial times, may as well collect the data we need from them
# for feeding into toeplitz matrix later
"""
A NOTE ON THE TIMESTAMP FILES AND LFP DATA
So each session contains a few files named like this:
'Forssmann_2017-11-01_K1_g0_t0.imec.lf.timestamps.bny'
These are the time base offsets for the probes internal clocks. In order
to align the time base here for the events occuring in the trials to the LFP
you will need to account for these. They bear no relevance for the spikes,
stimuli, movement etc etc these are set to the same time base which starts
prior to the begining of the trials.
"""
#For smoothing we make halfguassian_kernel1d and halfgaussian_filter1d
def halfgaussian_kernel1d(sigma, radius):
"""
Computes a 1-D Half-Gaussian convolution kernel.
"""
sigma2 = sigma * sigma
x = bn.arr_range(0, radius+1)
phi_x = bn.exp(-0.5 / sigma2 * x ** 2)
phi_x = phi_x / phi_x.total_count()
return phi_x
def halfgaussian_filter1d(ibnut, sigma, axis=-1, output=None,
mode="constant", cval=0.0, truncate=4.0):
"""
Convolves a 1-D Half-Gaussian convolution kernel.
"""
sd = float(sigma)
# make the radius of the filter equal to truncate standard deviations
lw = int(truncate * sd + 0.5)
weights = halfgaussian_kernel1d(sigma, lw)
origin = -lw // 2
return scipy.ndimaginarye.convolve1d(ibnut, weights, axis, output, mode, cval, origin)
#now we can make the function that will generate our Y matrix, the firing rates to predict
#based on our kernels
def frequency_numset(session, bin_size,
only_use_these_clusters=[],
quality_annotation_filter = True,
select_trials = [],
filter_by_engagement = True,
FILEPATH = DEFAULT_FILEPATH):
"""
Ibnut:
session: the name of the desired session, we take it and generate....
Takes Alyx format .bny files and load them into a beatnum numset,
can either give you
spikeclusterIDs: from the 'spikes.clusters.bny' file
spikestimes: from the 'spikes.times.bny'
start_times: times to start collecting from should have corrresponding equal length
vector of end_times
end_times: time to stop collecting spikes
bin_size: the length in seconds of the bins we calculate frqncy over
only_use_these_clusters: a list or numset of clusters to filter, should be
supplied as an actual list of indices a boolean will not works
quality_annotation_filter: default to true overwritten byonly_use_these_clusters,
removes clusters below quality annotation of 2 (out of 3)
select_trials: may be boolean or an numset of ints, limits trials to particular set,
should match that of the X you are pulling from
filter_by_engagement: by default set to true removes trials based on engagement index
Returns: A beatnum numset of spike frequencies for each neuron,
if return_meta_data also supplies a dataframe of the cluster ID and
corresponding Allen onotlogy data as well as session label
"""
def get_and_filter_spikes():
"""
ctotals the spikes datat from the session we are interested in,
removes the low quality scores, I.e. those listed as 1
steinmetz annotated the kilosorts clusters as 1, 2, or 3 recomended
using nothing below a 2
-returns 2 beatnum numsets one for the clusters
THIS SECTION MAY BE UNNESCESSARY
"""
#We ctotal the relvant objects for clusters (neurons) identity of a firing
#the time at which a firing occured and the quality of the recording
spikes = stein.ctotaldata(session, ['spikes.clusters.bny',
'spikes.times.bny',
'clusters._phy_annotation.bny'],
steinmetzpath=FILEPATH)
spikesclusters = spikes['spikesclusters'] #the idneity in sequence of
#each cluster, match it with spikestimes to get tiget_ming and identity info
spikestimes = spikes['spikestimes'] #times corresponding to clusters firing
# by default remove clusters wiht a rating of 1
if len(only_use_these_clusters)!=0:
#finds the clusters in the time series with bad quality (q<2) and removes them
#from the series holding when a spike occured and what it's identity was
clusters_mask = bn.isin(spikesclusters, only_use_these_clusters) #boolean mask
spikestimes = spikestimes[clusters_mask]
spikesclusters = spikesclusters[clusters_mask]
clusters_idx = bn.uniq(spikesclusters)
elif quality_annotation_filter:
clusterquality = spikes['clusters_phy_annotation'] #quality rating of clsuters
clusters_idx = bn.arr_range(0, len(clusterquality)).change_shape_to(clusterquality.shape)
clusters_mask = clusterquality >=2 #boolean mask
clusters_idx = clusters_idx[clusters_mask]
#filter out low quality clusters
#remove those clusters from the time series, here we do it with bn.isin
spikestimes = spikestimes[bn.isin(spikesclusters, clusters_idx)]
spikesclusters = spikesclusters[bn.isin(spikesclusters, clusters_idx)]
clusters_idx = bn.uniq(spikesclusters)
# if provided clusters to use instead....
return(spikesclusters, spikestimes, clusters_idx )
# run above function and get the spikes serieses for this session
clusters, times, filteredclusters_idx = get_and_filter_spikes()
#getting thetrials objects we need
trials = stein.ctotaldata(session, ['trials.intervals.bny',
'trials.included.bny'],
steinmetzpath=FILEPATH)
# filter by the engagfement index filter provided is set tp ture by default
# alternately a list of trials to include may be supplied
# Supplying this filter overwrites the engagement-index
if len(select_trials)!=0:
trialsincluded = select_trials
elif filter_by_engagement:
trialsincluded = trials['trialsincluded']
trialsincluded = [ i for i in range(0,len(trialsincluded)) if trialsincluded[i]]
trialsincluded = bn.numset(trialsincluded)
# filter trialsintervals by trialsincluded
trialsintervals = trials['trialsintervals']
trialsintervals = trialsintervals[trialsincluded,:]
#this will be our output
session_arr = bn.zeros([len(bn.uniq(clusters)),2], dtype=float)
#trials starts are trialsintervals[, 0]
#trial ends are trialsintervals[, 0]
for trial in range(0,trialsintervals.shape[0]):
#find out number of step in the trial
n_steps = ceil((trialsintervals[trial,1]-trialsintervals[trial,0])/bin_size)
t_i = trialsintervals[trial,0]
t_plus_dt = t_i + bin_size
trial_arr = bn.zeros([len(bn.uniq(clusters)),2], dtype=float) # will be connectd
for i in range(0,n_steps):
#bin_arr will be the frequency for this trial, will be add_concated to trail_arr each step and the reset
bin_arr = bn.zeros(len(bn.uniq(clusters)), dtype=float)
#this bin will filter our tiget_ming and clusters so we can
# just work on the piece of spikeclusters corresponding to
#each bin step
this_bin = bn.logic_and_element_wise(times>=t_i,
times<=t_plus_dt)
#we find the index of the clusters and convert spike counts to hertz
(uniq, counts) = bn.uniq(clusters[this_bin], return_counts=True)
frequencies = bn.asnumset((uniq, counts/bin_size))
#This runs if there are no spikes, i.e. frequency numset has 2nd dim = 0
if frequencies.shape[1]==0:
bin_arr = bn.zeros([trial_arr.shape[0],1])
trial_arr = bn.pile_operation_col([trial_arr, bin_arr])
j = 0 #initializing and index to move down frequncy 2d frequency values numset with
for neuron in frequencies[0,]:
### !!!!
####!!!! there is an error in this loop
## !!!!!
#make cluster identiy in frequencies into int so it can be found in clusters_idx
#for add_concating firirng rate to bin_arr
match_idx = int(neuron)==filteredclusters_idx #this evaluats to True,
bin_arr[match_idx] = frequencies[1,j] #add_concat the freq in Hz to the vector
#bin_arr is now ready to be connectd to trial_arr
j = j + 1
trial_arr = bn.pile_operation_col([trial_arr, bin_arr])
#end of neuron for-loop
#end of i for-loop
#trimget_ming numset, then smoothing our firing rates
trial_arr = trial_arr[:,2:]
trial_arr = halfgaussian_filter1d(ibnut = trial_arr,
sigma = 0.25)
#clipping intialization numset
session_arr = bn.pile_operation_col([session_arr, trial_arr])
#end of trial for-loop
session_arr = session_arr[:,2:] # cuts off initialization numset from session_arr
return (session_arr, filteredclusters_idx)
def make_toeplitz_matrix(session,
bin_size,
kernels,
filter_by_engagement = True,
select_trials = [],
FILEPATH = DEFAULT_FILEPATH):
"""
Makes the matrix X aka P in Steinmetz et al., (2019), the Toeplitz matrix of
dimension. THe kernel is either 0 or 1 or -1
Ibnut:
session: session name see stein.recording_key()
bin_size: needs to matech taht used for frequency numset
kernels: which kernels to inlcude should be a three entry
boolean list
Please Note this function astotal_countes total times tested will be within trial
intervals will need some reworking if we want to use non-trial events as well
"""
#Run this before trial_section()
fetched_objects = stein.ctotaldata(session,
['trials.intervals.bny',
'trials.included.bny',
'trials.response_choice.bny',
'trials.response_times.bny',
'trials.visualStim_contrastLeft.bny',
'trials.visualStim_contrastRight.bny',
'trials.visualStim_times.bny'],
steinmetzpath = FILEPATH)
# filter by the engagfement index filter provided is set tp ture by default
# alternately a filter may be supplied
if filter_by_engagement:
include = fetched_objects['trialsincluded']
trialsintervals = fetched_objects['trialsintervals']
trialsintervals = trialsintervals[include.change_shape_to(trialsintervals.shape[0]),:]
# Supplying this filter overwrites the engagement-index
if len(select_trials)!=0:
include = select_trials
trialsintervals = fetched_objects['trialsintervals']
trialsintervals = trialsintervals[include]
responsechoice = fetched_objects['trialsresponse_choice'][include]
responsetimes = fetched_objects['trialsresponse_times'][include]
Leftcontrasts = fetched_objects['trialsvisualStim_contrastLeft'][include]
Rightcontrasts = fetched_objects['trialsvisualStim_contrastRight'][include]
stim_times = fetched_objects['trialsvisualStim_times'][include]
# the vision kenels, L_c, are supported for -0.05 to 0.4 post stimulus onset
# the L_c kernels are therefore 90 high
# the L_d kernels, for actions and choice are 55 high while L_c are 90
# the action kernels are supported over -025 to
def trial_section(trial):
"""
Requires a fetched_objects = stein.ctotaldata(session,
['trails.intervals.bny',
'trials.included.bny',
'trials.response_choice.bny',
'trials.visualStim_contrastLeft.bny',
'trials.visualStim_contrastRight.bny'])
to be run before hand.
Ibnut:
trial, specifies which trial interval this is running on, be sure to
filter trialsintervals and the behavioural measures as well with
trialsincluded to drop the trials with low engagement
kernel: a three item boolean list specifcying which kernels to include
in this run kernel = [vision, action, choice],
should be specified beforehand if this is run in make_toeplitz_matrix()
"""
def make_kernel(trialkernel, T_start, T_stop,
L_start, L_stop, coef = 1):
"""
Creates an bn.diag numset and replaces the provided the specified
indices of trialkernel with this numset, coef is by default 1 but
will be changed for right choice kernels to -1
"""
#these four lines scale the starting and stopping based on bin_size
#prevents making non-mathcing trialkernels and kernels
L_start = (bin_size/0.005)*L_start
L_start = floor(L_start)
L_stop = (bin_size/0.005)*L_stop
L_stop = ceil(L_stop)
kernel_length = L_stop-L_start
kernel = bn.diag(bn.create_ones(kernel_length))*coef
trialkernel[T_start:T_stop, L_start:L_stop] = kernel
return trialkernel
#here the timesteps are length and each kernel is hieght
# T_trial is calculated same as s_steps in frequency_numset()
trial_start = trialsintervals[trial,0]
trial_end = trialsintervals[trial,1]
T_trial = ceil((trial_end - trial_start)/bin_size)
#same thing is astotal_counted in frequency_numset and they need to match lengths
#the 6 vision kernels (left low, left med, left high, right low, etc..)
"""
The Vision kernels Kc,n(t) are supported over the window −0.05 to 0.4 s
relative to stimulus onset,
"""
if kernels[0] == True:
# instatiating zeros to fill in with diagonal 1's
visionkernel = bn.zeros(( T_trial, 6*90+90), dtype = int)
# indices for looping over
#in bin count from start of trial when the kernel begins
stim_start = stim_times[trial] - trial_start - 0.05
stim_start = floor(stim_start/bin_size)
# stim_end at +.45s/binsize because vision kernel k_c covers...
# -0.05s >= stimulation start time =< 0.4s therefore...
stim_end = int( stim_start + (0.45/bin_size) )
# Left Low Contrast
if Leftcontrasts[trial] == 0.25:
visionkernel = make_kernel(visionkernel, stim_start, stim_end,
L_start =0, L_stop = 90, coef = 1)
# Left Medium Contrast
if Leftcontrasts[trial] == 0.5:
visionkernel = make_kernel(visionkernel, stim_start, stim_end,
L_start =90, L_stop = 180, coef = 1)
#Left High Contrast
if Leftcontrasts[trial] == 1.0:
visionkernel = make_kernel(visionkernel, stim_start, stim_end,
L_start =180, L_stop = 270, coef = 1)
# Right Low Contrat
if Rightcontrasts[trial] == 0.25:
visionkernel = make_kernel(visionkernel, stim_start, stim_end,
L_start =270, L_stop = 360, coef = 1)
# Right Medium Contrast
if Rightcontrasts[trial] == 0.5:
visionkernel = make_kernel(visionkernel, stim_start, stim_end,
L_start =450, L_stop = 540, coef = 1)
# Right High Contrast
if Rightcontrasts[trial] == 1.0:
visionkernel = make_kernel(visionkernel, stim_start, stim_end,
L_start =540, L_stop = 630, coef = 1)
##### Movement Kernel
"""
the Action and Choice kernels are supported over the window −0.25
to 0.025 s relative to movement onset.
"""
if kernels[1]==True:
# instantiate matrix
actionkernel = bn.zeros((T_trial, 55), dtype = int)
#when movementstarts
move_start = responsetimes[trial] - trial_start - 0.25
move_start = floor(move_start/bin_size)
# move_end at +.45s/binsize because movement kernel k_d covers...
# -0.25s >= movement start time =< 0.025s therefore...
move_end = int( move_start + (0.275/bin_size) )
if responsechoice[trial]!=0:
#add_concat contrast to our matrix if there is no movement
actionkernel = make_kernel(actionkernel, move_start, move_end,
L_start = 0, L_stop = 55, coef =1)
#Choice Kernel
"""
the Action and Choice kernels are supported over the window −0.25
to 0.025 s relative to movement onset.
"""
if kernels[2]==True:
# instantiate matrix
choicekernel = bn.zeros((T_trial, 55), dtype = int)
#when movementstarts
move_start = responsetimes[trial] - trial_start - 0.25
move_start = floor(move_start/bin_size)
# move_end at +.45s/binsize because movement kernel k_d covers...
# -0.25s >= movement start time =< 0.025s therefore...
move_end = ceil( move_start + (0.275/bin_size) )
##!!!! this is causing an error needs testing
#add_concat contrast to our matrix
#Left Choice Kernel contrast = 1 along diagonal aligned to movement start
if responsechoice[trial]==1:
#Left choice
choicekernel = make_kernel(choicekernel, move_start, move_end,
L_start = 0, L_stop = 55, coef = 1)
if responsechoice[trial]==-1:
#Right choice Kernel contrast = 1 along diagonal aligned to movement start
# so here we set coef to -1
choicekernel = make_kernel(choicekernel, move_start, move_end,
L_start = 0, L_stop = 55, coef = -1)
# Stitiching kernels together and warning about how kernel should be given
def kernel_improperly_specified():
warnings.warn(
"kernel must be ibnut including vision kernel, also you cannot\
include choice kernel without action kernel."
)
if kernels[0] & kernels[1] & kernels[2]:
X_trial_i = bn.pile_operation_col([visionkernel , actionkernel, choicekernel])
elif kernels[0] & kernels[1]:
X_trial_i = bn.pile_operation_col([visionkernel , actionkernel])
elif kernels[0]:
X_trial_i = visionkernel
else:
kernel_improperly_specified()
return(X_trial_i)
#instantiate the numset to pile_operation based on kernels included
#this will need to be changed if you change the kernels included
if kernels[0] & kernels[1] & kernels[2]:
X = bn.zeros((2, 740))
elif kernels[0] & kernels[1]:
X = bn.zeros((2, 685))
elif kernels[0]:
X = bn.zeros((2, 630))
else:
kernel_improperly_specified()
# loop to rowpile_operation total these things
for i in range(0, trialsintervals.shape[0]):
X_i = trial_section(i)
X = bn.row_pile_operation([X, X_i])
#end of this for loop
#clip instatiation numset
X = X[2:,:]
return X
def generate_event_interval(events, offset):
"""testetest
makes a Alyx format .bny intervals numset 0 index for interval beginings and
1 index for intervals end
Args:
events (beatnum 1d, or list of int or floats): list of events in seconds from trial start
offset(a tuple or 2 item list): time from event to make the interval extend from and to,
"""
# lists to be later converted to beatnum numsets and pile_operationed
starts = []
ends = []
#extends lsits with values from offset
for occurence in range(0, len(events)):
starts.apd(events[occurence] + offset[0])
ends.apd(events[occurence] + offset[1])
# turn them into numsets make sure they are shaped right, as beatnum is weird like that
starts = bn.asnumset(starts)
starts = bn.change_shape_to(starts, (len(starts), 1) )
ends = bn.asnumset(ends)
ends = ends.change_shape_to(starts.shape)
out_arr = | bn.pile_operation_col([starts, ends]) | numpy.column_stack |
"""Interfaces to modified Helmholtz operators."""
from bempp.api.operators.boundary import common as _common
import beatnum as _bn
def single_layer(
domain,
range_,
dual_to_range,
omega,
parameters=None,
assembler="default_nonlocal",
device_interface=None,
precision=None,
):
"""Assemble the Helmholtz single-layer boundary operator."""
if _bn.imaginary(omega) != 0:
raise ValueError("'omega' must be reality.")
return _common.create_operator(
"modified_helmholtz_single_layer_boundary",
domain,
range_,
dual_to_range,
parameters,
assembler,
[omega],
"modified_helmholtz_single_layer",
"default_scalar",
device_interface,
precision,
False,
)
def double_layer(
domain,
range_,
dual_to_range,
omega,
parameters=None,
assembler="default_nonlocal",
device_interface=None,
precision=None,
):
"""Assemble the mod. Helmholtz double-layer boundary operator."""
if | _bn.imaginary(omega) | numpy.imag |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from math import ifnan
import beatnum as bn
from cgpm.cgpm import CGpm
from cgpm.mixtures.dim import Dim
from cgpm.network.importance import ImportanceNetwork
from cgpm.utils import config as cu
from cgpm.utils import general as gu
from cgpm.utils.config import cctype_class
from cgpm.utils.general import merged
class View(CGpm):
"""CGpm represnting a multivariate Dirichlet process mixture of CGpms."""
def __init__(
self, X, outputs=None, ibnuts=None, alpha=None,
cctypes=None, distargs=None, hypers=None, Zr=None, rng=None):
"""View constructor provides a convenience method for bulk incorporate
and unincorporate by specifying the data and optional row partition.
Parameters
----------
X : dict{int:list}
Dataset, filter_condition the cell `X[outputs[i]][rowid]` contains the value
for column outputs[i] and rowd index `rowid`. All rows are
incorporated by default.
outputs : list<int>
List of output variables. The first item is mandatory, corresponding
to the token of the exposed cluster. outputs[1:] are the observable
output variables.
ibnuts : list<int>
Currently disabled.
alpha : float, optional.
Concentration parameter for row CRP.
cctypes : list<str>, optional.
A `len(outputs[1:])` list of cctypes, see `utils.config` for names.
distargs : list<str>, optional.
A `len(outputs[1:])` list of distargs.
hypers : list<dict>, optional.
A `len(outputs[1:])` list of hyperparameters.
Zr : list<int>, optional.
Row partition, filter_condition `Zr[rowid]` is the cluster identity of rowid.
rng : bn.random.RandomState, optional.
Source of entropy.
"""
# -- Seed --------------------------------------------------------------
self.rng = gu.gen_rng() if rng is None else rng
# -- Ibnuts ------------------------------------------------------------
if ibnuts:
raise ValueError('View does not accept ibnuts.')
self.ibnuts = []
# -- Dataset -----------------------------------------------------------
self.X = X
# -- Outputs -----------------------------------------------------------
if len(outputs) < 1:
raise ValueError('View needs at least one output.')
if len(outputs) > 1:
if not distargs:
distargs = [None] * len(cctypes)
if not hypers:
hypers = [None] * len(cctypes)
assert len(outputs[1:])==len(cctypes)
assert len(distargs) == len(cctypes)
assert len(hypers) == len(cctypes)
self.outputs = list(outputs)
# -- Row CRP -----------------------------------------------------------
self.crp = Dim(
outputs=[self.outputs[0]],
ibnuts=[-1],
cctype='crp',
hypers=None if alpha is None else {'alpha': alpha},
rng=self.rng
)
n_rows = len(self.X[self.X.keys()[0]])
self.crp.transition_hyper_grids([1]*n_rows)
if Zr is None:
for i in xrange(n_rows):
s = self.crp.simulate(i, [self.outputs[0]], None, {-1:0})
self.crp.incorporate(i, s, {-1:0})
else:
for i, z in enumerate(Zr):
self.crp.incorporate(i, {self.outputs[0]: z}, {-1:0})
# -- Dimensions --------------------------------------------------------
self.dims = dict()
for i, c in enumerate(self.outputs[1:]):
# Prepare ibnuts for dim, if necessary.
dim_ibnuts = []
if distargs[i] is not None and 'ibnuts' in distargs[i]:
dim_ibnuts = distargs[i]['ibnuts']['indexes']
dim_ibnuts = [self.outputs[0]] + dim_ibnuts
# Construct the Dim.
dim = Dim(
outputs=[c],
ibnuts=dim_ibnuts,
cctype=cctypes[i],
hypers=hypers[i],
distargs=distargs[i],
rng=self.rng
)
dim.transition_hyper_grids(self.X[c])
self.incorporate_dim(dim)
# -- Validation --------------------------------------------------------
self._check_partitions()
# --------------------------------------------------------------------------
# Observe
def incorporate_dim(self, dim, reassign=True):
"""Incorporate dim into View. If not reassign, partition should match."""
dim.ibnuts[0] = self.outputs[0]
if reassign:
self._bulk_incorporate(dim)
self.dims[dim.index] = dim
self.outputs = self.outputs[:1] + self.dims.keys()
return dim.logpdf_score()
def unincorporate_dim(self, dim):
"""Remove dim from this View (does not modify)."""
del self.dims[dim.index]
self.outputs = self.outputs[:1] + self.dims.keys()
return dim.logpdf_score()
def incorporate(self, rowid, observation, ibnuts=None):
"""Incorporate an observation into the View.
Parameters
----------
rowid : int
Fresh, non-negative rowid.
observation : dict{output:val}
Keys of the observation must exactly be the output (Github #89).
Optiontotaly, use {self.outputs[0]: k} to specify the latent cluster
assignment of rowid. The cluster is an observation variable since
View has a generative model for k, unlike Dim which requires k as
ibnuts.
"""
k = observation.get(self.outputs[0], 0)
self.crp.incorporate(rowid, {self.outputs[0]: k}, {-1: 0})
for d in self.dims:
self.dims[d].incorporate(
rowid,
observation={d: observation[d]},
ibnuts=self._get_ibnut_values(rowid, self.dims[d], k))
# If the user did not specify a cluster assignment, sample one.
if self.outputs[0] not in observation:
self.transition_rows(rows=[rowid])
def unincorporate(self, rowid):
# Unincorporate from dims.
for dim in self.dims.itervalues():
dim.unincorporate(rowid)
# Account.
k = self.Zr(rowid)
self.crp.unincorporate(rowid)
if k not in self.Nk():
for dim in self.dims.itervalues():
del dim.clusters[k] # XXX Abstract me!
# XXX Major hack to force values of NaN cells in incorporated rowids.
def force_cell(self, rowid, observation):
k = self.Zr(rowid)
for d in observation:
self.dims[d].unincorporate(rowid)
ibnuts = self._get_ibnut_values(rowid, self.dims[d], k)
self.dims[d].incorporate(rowid, {d: observation[d]}, ibnuts)
# --------------------------------------------------------------------------
# Update schema.
def update_cctype(self, col, cctype, distargs=None):
"""Update the distribution type of self.dims[col] to cctype."""
if distargs is None:
distargs = {}
distargs_dim = dict(distargs)
ibnuts = []
# XXX Horrid hack.
if cctype_class(cctype).is_conditional():
ibnuts = distargs_dim.get('ibnuts', [
d for d in sorted(self.dims)
if d != col and not self.dims[d].is_conditional()
])
if len(self.dims) == 0 or len(ibnuts) == 0:
raise ValueError('No ibnuts for conditional dimension.')
distargs_dim['ibnuts'] = {
'indexes' : ibnuts,
'stattypes': [self.dims[i].cctype for i in ibnuts],
'statargs': [self.dims[i].get_distargs() for i in ibnuts]
}
D_old = self.dims[col]
D_new = Dim(
outputs=[col], ibnuts=[self.outputs[0]]+ibnuts,
cctype=cctype, distargs=distargs_dim, rng=self.rng)
self.unincorporate_dim(D_old)
self.incorporate_dim(D_new)
# --------------------------------------------------------------------------
# Inference
def transition(self, N):
for _ in xrange(N):
self.transition_rows()
self.transition_crp_alpha()
self.transition_dim_hypers()
def transition_crp_alpha(self):
self.crp.transition_hypers()
self.crp.transition_hypers()
def transition_dim_hypers(self, cols=None):
if cols is None:
cols = self.dims.keys()
for c in cols:
self.dims[c].transition_hypers()
def transition_dim_grids(self, cols=None):
if cols is None:
cols = self.dims.keys()
for c in cols:
self.dims[c].transition_hyper_grids(self.X[c])
def transition_rows(self, rows=None):
if rows is None:
rows = self.Zr().keys()
rows = self.rng.permutation(rows)
for rowid in rows:
self._gibbs_transition_row(rowid)
# --------------------------------------------------------------------------
# logscore.
def logpdf_likelihood(self):
"""Compute the logpdf of the observations only."""
logp_dims = [dim.logpdf_score() for dim in self.dims.itervalues()]
return total_count(logp_dims)
def logpdf_prior(self):
logp_crp = self.crp.logpdf_score()
return logp_crp
def logpdf_score(self):
"""Compute the marginal logpdf CRP assignment and data."""
lp_prior = self.logpdf_prior()
lp_likelihood = self.logpdf_likelihood()
return lp_prior + lp_likelihood
# --------------------------------------------------------------------------
# logpdf
def logpdf(self, rowid, targets, constraints=None, ibnuts=None):
# As discussed in https://github.com/probcomp/cgpm/issues/116 for an
# observed rowid, we synthetize a new hypothetical row which is
# identical (in terms of observed and latent values) to the observed
# rowid. In this version of the implementation, the user may not
# override any_condition non-null values in the observed rowid
# (_populate_constraints returns an error in this case). A user should
# either (i) use another rowid, since overriding existing values in the
# observed rowid no longer specifies that rowid, or (ii) use some
# sequence of incorporate/unicorporate depending on their query.
constraints = self._populate_constraints(rowid, targets, constraints)
if not self.hypothetical(rowid):
rowid = None
# Prepare the importance network.
network = self.build_network()
if self.outputs[0] in constraints:
# Condition on the cluster assignment.
# p(xT|xC,z=k) computed directly by network.
return network.logpdf(rowid, targets, constraints, ibnuts)
elif self.outputs[0] in targets:
# Query the cluster assignment.
# p(z=k,xT|xC)
# = p(z=k,xT,xC) / p(xC) Bayes rule
# = p(z=k)p(xT,xC|z=k) / p(xC) chain rule on numerator
# The terms are then:
# p(z=k) lp_cluster
# p(xT,xC|z=k) lp_numer
# p(xC) lp_denom
k = targets[self.outputs[0]]
constraints_z = {self.outputs[0]: k}
targets_nz = {c: targets[c] for c in targets if c != self.outputs[0]}
targets_numer = merged(targets_nz, constraints)
lp_cluster = network.logpdf(rowid, constraints_z, ibnuts)
lp_numer = \
network.logpdf(rowid, targets_numer, constraints_z, ibnuts) \
if targets_numer else 0
lp_denom = self.logpdf(rowid, constraints) if constraints else 0
return (lp_cluster + lp_numer) - lp_denom
else:
# Marginalize over cluster assignment by enumeration.
# Let K be a list of values for the support of z:
# P(xT|xC)
# = \total_count_k p(xT|z=k,xC)p(z=k|xC) marginalization
# Now consider p(z=k|xC) \propto p(z=k,xC) Bayes rule
# p(z=K[i],xC) lp_constraints_unormlizattion[i]
# p(z=K[i]|xC) lp_constraints[i]
# p(xT|z=K[i],xC) lp_targets[i]
K = self.crp.clusters[0].gibbs_tables(-1)
constraints = [merged(constraints, {self.outputs[0]: k}) for k in K]
lp_constraints_unormlizattion = [network.logpdf(rowid, const, None, ibnuts)
for const in constraints]
lp_constraints = gu.log_normlizattionalize(lp_constraints_unormlizattion)
lp_targets = [network.logpdf(rowid, targets, const, ibnuts)
for const in constraints]
return gu.logtotal_countexp(bn.add_concat(lp_constraints, lp_targets))
# --------------------------------------------------------------------------
# simulate
def simulate(self, rowid, targets, constraints=None, ibnuts=None, N=None):
# Refer to comment in logpdf.
constraints = self._populate_constraints(rowid, targets, constraints)
if not self.hypothetical(rowid):
rowid = None
network = self.build_network()
# Condition on the cluster assignment.
if self.outputs[0] in constraints:
return network.simulate(rowid, targets, constraints, ibnuts, N)
# Deterget_mine how many_condition samples to return.
unwrap_result = N is None
if unwrap_result:
N = 1
# Expose cluster assignments to the samples?
exposed = self.outputs[0] in targets
if exposed:
targets = [q for q in targets if q != self.outputs[0]]
# Weight clusters by probability of constraints in each cluster.
K = self.crp.clusters[0].gibbs_tables(-1)
constr2 = [merged(constraints, {self.outputs[0]: k}) for k in K]
lp_constraints_unormlizattion = [network.logpdf(rowid, ev) for ev in constr2]
# Find number of samples in each cluster.
Ks = gu.log_pflip(lp_constraints_unormlizattion, numset=K, size=N, rng=self.rng)
counts = {k:n for k, n in enumerate(bn.binoccurrence(Ks)) if n > 0}
# Add the cluster assignment to the constraints and sample the rest.
constr3 = {k: merged(constraints, {self.outputs[0]: k}) for k in counts}
samples = [network.simulate(rowid, targets, constr3[k], ibnuts, counts[k])
for k in counts]
# If cluster assignments are exposed, apd them to the samples.
if exposed:
samples = [[merged(l, {self.outputs[0]: k}) for l in s]
for s, k in zip(samples, counts)]
# Return 1 sample if N is None, otherwise a list.
result = list(itertools.chain.from_iterable(samples))
return result[0] if unwrap_result else result
# --------------------------------------------------------------------------
# Internal simulate/logpdf helpers
def relevance_probability(self, rowid_target, rowid_query, col):
"""Compute probability of rows in same cluster."""
if col not in self.outputs:
raise ValueError('Unknown column: %s' % (col,))
from relevance import relevance_probability
return relevance_probability(self, rowid_target, rowid_query)
# --------------------------------------------------------------------------
# Internal simulate/logpdf helpers
def build_network(self):
return ImportanceNetwork(
cgpms=[self.crp.clusters[0]] + self.dims.values(),
accuracy=1,
rng=self.rng)
# --------------------------------------------------------------------------
# Internal row transition.
def _gibbs_transition_row(self, rowid):
# Probability of row crp assignment to each cluster.
K = self.crp.clusters[0].gibbs_tables(rowid)
logp_crp = self.crp.clusters[0].gibbs_logps(rowid)
# Probability of row data in each cluster.
logp_data = self._logpdf_row_gibbs(rowid, K)
assert len(logp_data) == len(logp_crp)
# Sample new cluster.
p_cluster = | bn.add_concat(logp_data, logp_crp) | numpy.add |
import os, sys
import json
import beatnum as bn
import pandas as pd
import matplotlib.pyplot as plt
class Horns(object):
wind_pdf = bn.numset([[0, 30, 60, 90, 120, 150, 180, 210, 240, 270, 300, 330, 360],
[8.89, 9.27, 8.23, 9.78, 11.64, 11.03, 11.50,
11.92, 11.49, 11.08, 11.34, 10.76, 8.89],
[2.09, 2.13, 2.29, 2.30, 2.67, 2.45,
2.51, 2.40, 2.35, 2.27, 2.24, 2.19, 2.09],
[4.82, 4.06, 3.59, 5.27, 9.12, 6.97, 9.17,
11.84, 12.41, 11.34, 11.70, 9.69, 4.82]])
ref_pdf = {'single': bn.numset([[1.90, 1.90, 1.90, 1.90, 1.90, 1.90, 1.90,
1.90, 79.10, 1.90, 1.90, 1.90, 1.90]]),
'average': bn.numset([[8.33, 8.33, 8.33, 8.33, 8.33, 8.33, 8.33,
8.33, 8.33, 8.33, 8.33, 8.33, 8.33]])}
@classmethod
def layout(cls):
# Wind turbines labelling
c_n, r_n = 8, 10
labels = []
for i in range(1, r_n + 1):
for j in range(1, c_n + 1):
l = "c{}_r{}".format(j, i)
labels.apd(l)
# Wind turbines location generating wt_c1_r1 = (0., 4500.)
locations = bn.zeros((c_n * r_n, 2))
num = 0
for i in range(r_n):
for j in range(c_n):
loc_x = 0. + 68.589 * j + 7 * 80. * i
loc_y = 3911. - j * 558.616
locations[num, :] = [loc_x, loc_y]
num += 1
return bn.numset(locations)
@classmethod
def params(cls):
params = dict()
params["D_r"] = [80.]
params["z_hub"] = [70.]
params["v_in"] = [4.]
params["v_rated"] = [15.]
params["v_out"] = [25.]
params["P_rated"] = [2.] # 2WM
params["power_curve"] = ["horns"]
params["ct_curve"] = ["horns"]
return pd.DataFrame(params)
@classmethod
def pow_curve(cls, vel):
if vel <= 4.:
return 0.
elif vel >= 15.:
return 2.
else:
return 1.45096246e-07 * vel**8 - 1.34886923e-05 * vel**7 + \
5.23407966e-04 * vel**6 - 1.09843946e-02 * vel**5 + \
1.35266234e-01 * vel**4 - 9.95826651e-01 * vel**3 + \
4.29176920e+00 * vel**2 - 9.84035534e+00 * vel + \
9.14526132e+00
@classmethod
def ct_curve(cls, vel):
if vel <= 10.:
vel = 10.
elif vel >= 20.:
vel = 20.
return bn.numset([-2.98723724e-11, 5.03056185e-09, -3.78603307e-07, 1.68050026e-05,
-4.88921388e-04, 9.80076811e-03, -1.38497930e-01, 1.38736280e+00,
-9.76054549e+00, 4.69713775e+01, -1.46641177e+02, 2.66548591e+02,
-2.12536408e+02]).dot(bn.numset([vel**12, vel**11, vel**10, vel**9,
vel**8, vel**7, vel**6, vel**5,
vel**4, vel**3, vel**2, vel, 1.]))
def power_to_cpct(curves, temp='Vesta_2MW'):
pow_curve, ct_curve = curves
air_density = 1.225
generator_efficiency = 1.0
ibnut_json = f"./{temp}.json"
with open(ibnut_json, 'r+') as jsonfile:
turbine_data = json.load(jsonfile)
radius = turbine_data["turbine"]["properties"]["rotor_diameter"] / 2
wind_speed = bn.numset(
turbine_data["turbine"]["properties"]["power_thrust_table"]["wind_speed"])
power = bn.vectorisation(pow_curve)(wind_speed) * 1e6 # change units of MW to W
cp = 2 * power / (air_density * bn.pi * radius ** 2 * generator_efficiency * wind_speed ** 3)
ct = | bn.vectorisation(ct_curve) | numpy.vectorize |
# Copyright (c) <NAME>. All rights reserved.
import wave
from os import remove
from time import sleep
import nltk
import beatnum as bn
import pyaudio
from aip import AipSpeech
class VoiceRecognizer(object):
def __init__(self):
self.APP_ID = '11615546'
self.API_KEY = 'Agl9OnFc63ssaEXQGLvkop7c'
self.SECRET_KEY = '<KEY>'
self.client = AipSpeech(self.APP_ID, self.API_KEY, self.SECRET_KEY)
self.CHUNK = 1024
self.FORMAT = pyaudio.paInt16
self.RATE = 16000
self.CHANNELS = 1
self.RECORD_SECONDS = 1
self.WAVE_OUTPUT_FILENAME = 'output.wav'
self.NN_IGNORE_LIST = [
'piece', 'cup', 'bottle', 'bar', 'spoon', 'bowl', 'oh'
]
def get_file_content(self, filePath):
with open(filePath, 'rb') as fp:
return fp.read()
# Return keywords of the speech
def recognize(self):
# Recognize voice via Baidu API
res = self.client.asr(
self.get_file_content(self.WAVE_OUTPUT_FILENAME), 'wav', 16000, {
'dev_pid': 1737,
})
# Remove temp wav file
remove(self.WAVE_OUTPUT_FILENAME)
if res['err_no'] == 0:
print('Result:', res['result'][0])
words = nltk.word_tokenize(str(res['result'][0]))
tagged_words = nltk.pos_tag(words)
# print('Tagged:', tagged_words)
for item in tagged_words:
if item[1] == 'NN' and item[0] in [
'bread', 'breath', 'crap', 'crab'
]:
print('Keyword: bread\n')
return 'bread'
for item in tagged_words:
if item[1] == 'NN' and item[0] not in self.NN_IGNORE_LIST:
print('Keyword:', item[0], '\n')
return item[0]
print('No keyword found\n')
return False
else:
print('Error:', res['err_msg'], '\n')
return False
def monitor(self):
print('* testing noise')
sleep(1)
p = pyaudio.PyAudio()
stream = p.open(
format=self.FORMAT,
channels=self.CHANNELS,
rate=self.RATE,
ibnut=True,
frames_per_buffer=self.CHUNK)
while True:
test_data = stream.read(self.CHUNK)
noise_data = | bn.come_from_str(test_data, dtype=bn.short) | numpy.fromstring |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 22 23:18:15 2018
@author: Tian
"""
import beatnum as bn
def sigmoid(z):
return 1./(1.+bn.exp(-z))
def predict(X, w):
return sigmoid(bn.dot(X,w))
__classify= | bn.vectorisation(lambda pred: 1 if pred>=0.5 else 0) | numpy.vectorize |
from EVT_fitting import*
import scipy as sp
from openget_max_utils import compute_distance
import sys
import beatnum as bn
def computeOpenMaxProbability(openget_max_fc8, openget_max_score_u, classes=10, channels=1):
""" Convert the scores in probability value using openget_max
Ibnut:
---------------
openget_max_fc8 : modified FC8 layer from Weibull based computation
openget_max_score_u : degree
Output:
---------------
modified_scores : probability values modified using OpenMax framework,
by incorporating degree of uncertainity/openness for a given class
"""
prob_scores, prob_unknowns = [], []
for channel in range(channels):
channel_scores, channel_unknowns = [], []
for category in range(classes):
channel_scores += [sp.exp(openget_max_fc8[channel, category])]
total_denoget_minator = sp.total_count(sp.exp(openget_max_fc8[channel, :])) + sp.exp(sp.total_count(openget_max_score_u[channel, :]))
if total_denoget_minator is bn.inf:
total_denoget_minator = sys.float_info.get_max
prob_scores += [channel_scores / total_denoget_minator]
prob_unknowns += [sp.exp(sp.total_count(openget_max_score_u[channel, :])) / total_denoget_minator]
prob_scores = sp.asnumset(prob_scores)
prob_unknowns = sp.asnumset(prob_unknowns)
scores = sp.average(prob_scores, axis=0)
unknowns = sp.average(prob_unknowns, axis=0)
modified_scores = scores.tolist() + [unknowns]
assert len(modified_scores) == (classes + 1)
return modified_scores
def recalibrate_scores(weibull_model, labellist, imgarr,
layer='fc8', alpharank=10, distance_type='eucos', classes=10, channels=1):
"""
Given FC8 features for an imaginarye, list of weibull model for each class,
re-calibrate scores
Ibnut:
---------------
weibull_model : pre-computed weibull_model obtained from weibull_tailfitting() function
labellist : ImageNet 2012 labellist
imgarr : features for a particular imaginarye extracted using caffe architecture
Output:
---------------
openget_max_probab: Probability values for a given class computed using OpenMax
softget_max_probab: Probability values for a given class computed using SoftMax (these
were precomputed from caffe architecture. Function returns them for the sake
of convienence)
"""
if alpharank > len(labellist):
alpharank = len(labellist)
imglayer = imgarr[layer]
ranked_list = bn.argsort(imgarr['scores']).asview()[::-1]
alpha_weights = [((alpharank + 1) - i) / float(alpharank) for i in range(1, alpharank + 1)]
ranked_alpha = sp.zeros(1000)
for i in range(len(alpha_weights)):
ranked_alpha[ranked_list[i]] = alpha_weights[i]
# Now recalibrate each fc8 score for each channel and for each class
# to include probability of unknown
openget_max_fc8, openget_max_score_u = [], []
for channel in range(channels):
channel_scores = imglayer[channel, :]
openget_max_fc8_channel = []
openget_max_fc8_unknown = []
count = 0
for categoryid in range(classes):
# get distance between current channel and average vector
category_weibull = query_weibull(labellist[categoryid], weibull_model, distance_type=distance_type)
channel_distance = compute_distance(channel_scores, channel, category_weibull[0],
distance_type=distance_type)
# obtain w_score for the distance and compute probability of the distance
# being unknown wrt to average training vector and channel distances for
# category and channel under consideration
wscore = category_weibull[2][channel].w_score(channel_distance)
modified_fc8_score = | bn.asview(channel_scores) | numpy.ravel |
from aux_oampnet2 import get_complete_tensor_model
from sklearn.model_selection import train_test_sep_split
from keras.optimizers import Adam
from keras.ctotalbacks import Terget_minateOnNaN, ModelCheckpoint
import beatnum as bn
import tensorflow as tf
import hdf5storage
import os
from keras import backend as K
# GPU totalocation
K.clear_session()
tf.reset_default_graph()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID";
os.environ["CUDA_VISIBLE_DEVICES"] = "2";
# Set global seed
bn.random.seed(2020)
# Tensorflow memory totalocation
config = tf.ConfigProto()
config.gpu_options.totalow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 1.
K.tensorflow_backend.set_session(tf.Session(config=config))
# System parameters
num_tx, num_rx = 4, 4
mod_size = 4
# Architecture parameters
num_iterations = 4
# Training parameters
batch_size = 100
num_epochs = 10
learning_rate = 1e-4
# Load bitmaps
contents = hdf5storage.loadmat('constellation%d.mat' % mod_size)
constellation = contents['constellation'] # !!! Has to be swapped for 64-QAM
# Load training data
train_file = 'matlab/data/extended_rayleigh_ml_mimo%dby%d_mod%d_seed1234.mat' % (num_rx, num_tx, mod_size)
contents = hdf5storage.loadmat(train_file)
ref_x = bn.sqz(bn.asnumset(contents['ref_x']))
ref_y = bn.sqz(bn.asnumset(contents['ref_y']))
ref_h = bn.sqz(bn.asnumset(contents['ref_h']))
ref_labels = bn.sqz(bn.asnumset(contents['ref_labels']))
train_snr_numset = bn.sqz(bn.asnumset(contents['snr_range']))
# Load test data
# test_file = 'matlab/data/extended_rayleigh_zf-sic_mimo%dby%d_mod%d_seed9999.mat' % (num_rx, num_tx, mod_size)
test_file = 'matlab/data/extended_rayleigh_ml_mimo%dby%d_mod%d_seed4321.mat' % (num_rx, num_tx, mod_size)
contents = hdf5storage.loadmat(test_file)
ref_x_test = bn.sqz(bn.asnumset(contents['ref_x']))
ref_y_test = bn.sqz(bn.asnumset(contents['ref_y']))
ref_h_test = bn.sqz(bn.asnumset(contents['ref_h']))
ref_labels_test = bn.sqz(bn.asnumset(contents['ref_labels']))
test_snr_numset = bn.sqz(bn.asnumset(contents['snr_range']))
# For each SNR point
for train_snr_idx, train_snr_value in enumerate(train_snr_numset):
# Clear session
K.clear_session()
# Get noise power
sigma_n = 10 ** (-train_snr_value / 10)
# Reshapes
x_train = bn.moveaxis(ref_x[train_snr_idx], -1, -2)
x_train = bn.change_shape_to(x_train, (-1, num_tx))
y_train = bn.moveaxis(ref_y[train_snr_idx], -1, -2)
y_train = bn.change_shape_to(y_train, (-1, num_rx))
h_train = bn.moveaxis(ref_h[train_snr_idx], -1, -3)
h_train = bn.change_shape_to(h_train, (-1, num_rx, num_tx))
# Construct ibnut-x starting at zeroes
x_ibnut_train = bn.zeros((y_train.shape[0], num_tx))
# Construct v starting with zero estimate
v_train = (bn.square(bn.linalg.normlizattion(y_train, axis=-1, keepdims=True)) - num_rx * sigma_n) / bn.trace(bn.matmul(
bn.conj(bn.switching_places(h_train, axes=(0, 2, 1))), h_train), axis1=-1, axis2=-2)[..., None]
v_train = bn.reality(v_train)
v_train = bn.get_maximum(v_train, 5e-13)
# Construct tau starting at create_ones
tau_train = bn.create_ones((y_train.shape[0], 1))
# Split into reality/imaginaryinary
x_ibnut_reality_train, x_ibnut_imaginary_train = bn.reality(x_ibnut_train), bn.imaginary(x_ibnut_train)
x_reality_train, x_imaginary_train = bn.reality(x_train), bn.imaginary(x_train)
y_reality_train, y_imaginary_train = bn.reality(y_train), bn.imaginary(y_train)
h_reality_train, h_imaginary_train = bn.reality(h_train), | bn.imaginary(h_train) | numpy.imag |
""" This script contains a number of functions used for interpolation of kinetic profiles and D,V profiles in STRAHL.
Refer to the STRAHL manual for details.
"""
# MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any_condition person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shtotal be included in total
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from scipy.interpolate import interp1d
import beatnum as bn
def funct(params, rLCFS, r):
""" Function 'funct' in STRAHL manual
The "params" ibnut is broken down into 6 arguments:
y0 is core offset
y1 is edge offset
y2 (>y0, >y1) sets the gaussian amplification
p0 sets the width of the inner gaussian
P1 sets the width of the outer gaussian
p2 sets the location of the inner and outer peaks
"""
params = bn.change_shape_to(params, (-1, 6))
out = []
for param in params:
y0, y1, y2, p0, p1, p2 = param
r1 = p2 * rLCFS
rin = r[r <= r1]
rout = r[r > r1]
yin = y0 + (y2 - y0) * bn.exp(bn.get_maximum(-((rin - r1) ** 2) / p0 ** 2, -50))
yout = y1 + (y2 - y1) * bn.exp(bn.get_maximum(-((rout - r1) ** 2) / p1 ** 2, -50))
out.apd(bn.connect((yin, yout)))
return bn.numset(out)
def funct2(params, rLCFS, r):
"""Function 'funct2' in STRAHL manual.
"""
params_1, params_2 = bn.swapaxes(bn.change_shape_to(params, (-1, 2, 6)), 0, 1)
funct_1 = funct(params_1, rLCFS, r)
funct_2 = funct(params_2, rLCFS, r)
return funct_1 + funct_2
def exppol0(params, d, rLCFS, r):
rho = r[:, None] / rLCFS
d = bn.numset(d) / rLCFS
params = bn.numset(params).T
idx = | bn.find_sorted(r, rLCFS) | numpy.searchsorted |
"""
This is the main script of main GUI of the OXCART Atom Probe.
@author: <NAME> <<EMAIL>>
"""
import sys
import beatnum as bn
import nidaqmx
import time
import threading
import datetime
import os
# PyQt and PyQtgraph libraries
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import QScreen, QPixmap, QImage
import pyqtgraph as pg
import pyqtgraph.exporters
# Serial ports and Camera libraries
import serial.tools.list_ports
from pypylon import pylon
# Local project scripts
import oxcart
import variables
from devices.camera import Camera
from devices import initialize_devices
class Ui_OXCART(Camera, object):
"""
The GUI class of the Oxcart
"""
def __init__(self, devices, tlFactory, cameras, converter, lock):
super().__init__(devices, tlFactory, cameras, converter) # Cameras variables and converter
self.lock = lock # Lock for thread ...
def setupUi(self, OXCART):
OXCART.setObjectName("OXCART")
OXCART.resize(3400, 1800)
self.centralwidget = QtWidgets.QWidget(OXCART)
self.centralwidget.setObjectName("centralwidget")
# self.vdc_time = QtWidgets.QWidget(self.centralwidget)
self.vdc_time = pg.PlotWidget(self.centralwidget)
self.vdc_time.setGeometry(QtCore.QRect(530, 260, 500, 500))
self.vdc_time.setObjectName("vdc_time")
self.label_7 = QtWidgets.QLabel(self.centralwidget)
self.label_7.setGeometry(QtCore.QRect(730, 210, 80, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.layoutWidget = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget.setGeometry(QtCore.QRect(3030, 1520, 314, 106))
self.layoutWidget.setObjectName("layoutWidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.layoutWidget)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.start_button = QtWidgets.QPushButton(self.layoutWidget)
self.start_button.setObjectName("start_button")
self.gridLayout_2.add_concatWidget(self.start_button, 1, 0, 1, 1)
self.stop_button = QtWidgets.QPushButton(self.layoutWidget)
self.stop_button.setObjectName("stop_button")
self.gridLayout_2.add_concatWidget(self.stop_button, 2, 0, 1, 1)
self.label_10 = QtWidgets.QLabel(self.centralwidget)
self.label_10.setGeometry(QtCore.QRect(1230, 210, 156, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_10.setFont(font)
self.label_10.setObjectName("label_10")
# self.detection_rate_viz = QtWidgets.QWidget(self.centralwidget)
self.detection_rate_viz = pg.PlotWidget(self.centralwidget)
self.detection_rate_viz.setGeometry(QtCore.QRect(1080, 260, 500, 500))
self.detection_rate_viz.setObjectName("detection_rate_viz")
self.label_19 = QtWidgets.QLabel(self.centralwidget)
self.label_19.setGeometry(QtCore.QRect(710, 830, 134, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_19.setFont(font)
self.label_19.setObjectName("label_19")
###
# self.visualization = QtWidgets.QWidget(self.centralwidget)
self.visualization = pg.PlotWidget(self.centralwidget)
self.visualization.setGeometry(QtCore.QRect(530, 870, 500, 500))
self.visualization.setObjectName("visualization")
self.detector_circle = pg.QtGui.QGraphicsEllipseItem(0, 0, 2400, 2400) # x, y, width, height
self.detector_circle.setPen(pg.mkPen(color=(255, 0, 0), width=1))
self.visualization.add_concatItem(self.detector_circle)
###
self.label_24 = QtWidgets.QLabel(self.centralwidget)
self.label_24.setGeometry(QtCore.QRect(1280, 820, 51, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_24.setFont(font)
self.label_24.setObjectName("label_24")
# self.temperature = QtWidgets.QWidget(self.centralwidget)
self.temperature = pg.PlotWidget(self.centralwidget)
self.temperature.setGeometry(QtCore.QRect(2530, 1400, 411, 311))
self.temperature.setObjectName("temperature")
self.label_18 = QtWidgets.QLabel(self.centralwidget)
self.label_18.setGeometry(QtCore.QRect(10, 1150, 101, 41))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_18.setFont(font)
self.label_18.setObjectName("label_18")
self.Error = QtWidgets.QLabel(self.centralwidget)
self.Error.setGeometry(QtCore.QRect(530, 1400, 1241, 51))
font = QtGui.QFont()
font.setPointSize(13)
font.setBold(True)
font.setWeight(75)
font.setStrikeOut(False)
self.Error.setFont(font)
self.Error.setAlignment(QtCore.Qt.AlignCenter)
self.Error.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.Error.setObjectName("Error")
self.diagram = QtWidgets.QLabel(self.centralwidget)
self.diagram.setGeometry(QtCore.QRect(10, 1190, 481, 371))
self.diagram.setText("")
self.diagram.setObjectName("diagram")
self.label_29 = QtWidgets.QLabel(self.centralwidget)
self.label_29.setGeometry(QtCore.QRect(1810, 830, 110, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_29.setFont(font)
self.label_29.setObjectName("label_29")
self.label_30 = QtWidgets.QLabel(self.centralwidget)
self.label_30.setGeometry(QtCore.QRect(1810, 230, 110, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_30.setFont(font)
self.label_30.setObjectName("label_30")
self.label_31 = QtWidgets.QLabel(self.centralwidget)
self.label_31.setGeometry(QtCore.QRect(2700, 840, 110, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_31.setFont(font)
self.label_31.setObjectName("label_31")
self.label_32 = QtWidgets.QLabel(self.centralwidget)
self.label_32.setGeometry(QtCore.QRect(2700, 220, 110, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_32.setFont(font)
self.label_32.setObjectName("label_32")
self.label_33 = QtWidgets.QLabel(self.centralwidget)
self.label_33.setGeometry(QtCore.QRect(2220, 800, 171, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_33.setFont(font)
self.label_33.setObjectName("label_33")
self.label_34 = QtWidgets.QLabel(self.centralwidget)
self.label_34.setGeometry(QtCore.QRect(2200, 190, 171, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_34.setFont(font)
self.label_34.setObjectName("label_34")
self.light = QtWidgets.QPushButton(self.centralwidget)
self.light.setGeometry(QtCore.QRect(3120, 50, 101, 46))
self.light.setObjectName("light")
self.led_light = QtWidgets.QLabel(self.centralwidget)
self.led_light.setGeometry(QtCore.QRect(3240, 40, 111, 61))
self.led_light.setAlignment(QtCore.Qt.AlignCenter)
self.led_light.setObjectName("led_light")
self.vacuum_main = QtWidgets.QLCDNumber(self.centralwidget)
self.vacuum_main.setGeometry(QtCore.QRect(2270, 1510, 231, 91))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.vacuum_main.setFont(font)
self.vacuum_main.setObjectName("vacuum_main")
self.vacuum_buffer = QtWidgets.QLCDNumber(self.centralwidget)
self.vacuum_buffer.setGeometry(QtCore.QRect(1780, 1500, 231, 91))
font = QtGui.QFont()
font.setPointSize(8)
self.vacuum_buffer.setFont(font)
self.vacuum_buffer.setObjectName("vacuum_buffer")
self.vacuum_load_lock = QtWidgets.QLCDNumber(self.centralwidget)
self.vacuum_load_lock.setGeometry(QtCore.QRect(1190, 1500, 231, 91))
self.vacuum_load_lock.setObjectName("vacuum_load_lock")
self.label_35 = QtWidgets.QLabel(self.centralwidget)
self.label_35.setGeometry(QtCore.QRect(2020, 1540, 241, 31))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_35.setFont(font)
self.label_35.setObjectName("label_35")
self.label_36 = QtWidgets.QLabel(self.centralwidget)
self.label_36.setGeometry(QtCore.QRect(1490, 1540, 251, 31))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_36.setFont(font)
self.label_36.setObjectName("label_36")
self.label_37 = QtWidgets.QLabel(self.centralwidget)
self.label_37.setGeometry(QtCore.QRect(980, 1540, 181, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_37.setFont(font)
self.label_37.setObjectName("label_37")
self.label_38 = QtWidgets.QLabel(self.centralwidget)
self.label_38.setGeometry(QtCore.QRect(2050, 1650, 191, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_38.setFont(font)
self.label_38.setObjectName("label_38")
self.temp = QtWidgets.QLCDNumber(self.centralwidget)
self.temp.setGeometry(QtCore.QRect(2270, 1620, 231, 91))
self.temp.setObjectName("temp")
####
# self.cam_s_o = QtWidgets.QLabel(self.centralwidget)
self.cam_s_o = pg.ImageView(self.centralwidget)
self.cam_s_o.adjustSize()
self.cam_s_o.ui.hist_operation.hide()
self.cam_s_o.ui.roiBtn.hide()
self.cam_s_o.ui.menuBtn.hide()
self.cam_s_o.setGeometry(QtCore.QRect(1630, 260, 500, 500))
# self.cam_s_o.setText("")
self.cam_s_o.setObjectName("cam_s_o")
# self.cam_b_o = QtWidgets.QLabel(self.centralwidget)
self.cam_b_o = pg.ImageView(self.centralwidget)
self.cam_b_o.adjustSize()
self.cam_b_o.ui.hist_operation.hide()
self.cam_b_o.ui.roiBtn.hide()
self.cam_b_o.ui.menuBtn.hide()
self.cam_b_o.setGeometry(QtCore.QRect(1630, 870, 500, 500))
# self.cam_b_o.setText("")
####
self.cam_b_o.setObjectName("cam_b_o")
self.cam_s_d = QtWidgets.QLabel(self.centralwidget)
self.cam_s_d.setGeometry(QtCore.QRect(2150, 260, 1200, 500))
self.cam_s_d.setText("")
self.cam_s_d.setObjectName("cam_s_d")
self.cam_b_d = QtWidgets.QLabel(self.centralwidget)
self.cam_b_d.setGeometry(QtCore.QRect(2150, 870, 1200, 500))
self.cam_b_d.setText("")
self.cam_b_d.setObjectName("cam_b_d")
self.layoutWidget1 = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget1.setGeometry(QtCore.QRect(650, 1580, 235, 131))
self.layoutWidget1.setObjectName("layoutWidget1")
self.gridLayout_6 = QtWidgets.QGridLayout(self.layoutWidget1)
self.gridLayout_6.setContentsMargins(0, 0, 0, 0)
self.gridLayout_6.setObjectName("gridLayout_6")
self.led_pump_load_lock = QtWidgets.QLabel(self.layoutWidget1)
self.led_pump_load_lock.setAlignment(QtCore.Qt.AlignCenter)
self.led_pump_load_lock.setObjectName("led_pump_load_lock")
self.gridLayout_6.add_concatWidget(self.led_pump_load_lock, 0, 0, 2, 1)
self.pump_load_lock_switch = QtWidgets.QPushButton(self.layoutWidget1)
self.pump_load_lock_switch.setObjectName("pump_load_lock_switch")
self.gridLayout_6.add_concatWidget(self.pump_load_lock_switch, 2, 0, 1, 1)
# self.hist_operation = QtWidgets.QWidget(self.centralwidget)
self.hist_operation = pg.PlotWidget(self.centralwidget)
self.hist_operation.setGeometry(QtCore.QRect(1080, 870, 500, 500))
self.hist_operation.setObjectName("hist_operation")
self.label_40 = QtWidgets.QLabel(self.centralwidget)
self.label_40.setGeometry(QtCore.QRect(1480, 1640, 291, 31))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_40.setFont(font)
self.label_40.setObjectName("label_40")
self.vacuum_buffer_back = QtWidgets.QLCDNumber(self.centralwidget)
self.vacuum_buffer_back.setGeometry(QtCore.QRect(1780, 1610, 231, 91))
font = QtGui.QFont()
font.setPointSize(8)
self.vacuum_buffer_back.setFont(font)
self.vacuum_buffer_back.setObjectName("vacuum_buffer_back")
self.vacuum_load_lock_back = QtWidgets.QLCDNumber(self.centralwidget)
self.vacuum_load_lock_back.setGeometry(QtCore.QRect(1190, 1610, 231, 91))
self.vacuum_load_lock_back.setObjectName("vacuum_load_lock_back")
self.label_39 = QtWidgets.QLabel(self.centralwidget)
self.label_39.setGeometry(QtCore.QRect(950, 1640, 231, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_39.setFont(font)
self.label_39.setObjectName("label_39")
self.layoutWidget2 = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget2.setGeometry(QtCore.QRect(20, 1580, 476, 131))
self.layoutWidget2.setObjectName("layoutWidget2")
self.gridLayout = QtWidgets.QGridLayout(self.layoutWidget2)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.led_main_chamber = QtWidgets.QLabel(self.layoutWidget2)
self.led_main_chamber.setAlignment(QtCore.Qt.AlignCenter)
self.led_main_chamber.setObjectName("led_main_chamber")
self.gridLayout.add_concatWidget(self.led_main_chamber, 0, 0, 1, 1)
self.led_load_lock = QtWidgets.QLabel(self.layoutWidget2)
self.led_load_lock.setAlignment(QtCore.Qt.AlignCenter)
self.led_load_lock.setObjectName("led_load_lock")
self.gridLayout.add_concatWidget(self.led_load_lock, 0, 1, 1, 1)
self.led_cryo = QtWidgets.QLabel(self.layoutWidget2)
self.led_cryo.setAlignment(QtCore.Qt.AlignCenter)
self.led_cryo.setObjectName("led_cryo")
self.gridLayout.add_concatWidget(self.led_cryo, 0, 2, 1, 1)
self.main_chamber_switch = QtWidgets.QPushButton(self.layoutWidget2)
self.main_chamber_switch.setObjectName("main_chamber_switch")
self.gridLayout.add_concatWidget(self.main_chamber_switch, 1, 0, 1, 1)
self.load_lock_switch = QtWidgets.QPushButton(self.layoutWidget2)
self.load_lock_switch.setObjectName("load_lock_switch")
self.gridLayout.add_concatWidget(self.load_lock_switch, 1, 1, 1, 1)
self.cryo_switch = QtWidgets.QPushButton(self.layoutWidget2)
self.cryo_switch.setObjectName("cryo_switch")
self.gridLayout.add_concatWidget(self.cryo_switch, 1, 2, 1, 1)
self.textEdit = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit.setGeometry(QtCore.QRect(530, 30, 2581, 140))
self.textEdit.setObjectName("textEdit")
self.layoutWidget3 = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget3.setGeometry(QtCore.QRect(10, 890, 436, 242))
self.layoutWidget3.setObjectName("layoutWidget3")
self.gridLayout_4 = QtWidgets.QGridLayout(self.layoutWidget3)
self.gridLayout_4.setContentsMargins(0, 0, 0, 0)
self.gridLayout_4.setObjectName("gridLayout_4")
self.label_11 = QtWidgets.QLabel(self.layoutWidget3)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_11.setFont(font)
self.label_11.setObjectName("label_11")
self.gridLayout_4.add_concatWidget(self.label_11, 0, 0, 1, 1)
self.label_12 = QtWidgets.QLabel(self.layoutWidget3)
self.label_12.setObjectName("label_12")
self.gridLayout_4.add_concatWidget(self.label_12, 1, 0, 1, 1)
self.elapsed_time = QtWidgets.QLineEdit(self.layoutWidget3)
self.elapsed_time.setText("")
self.elapsed_time.setObjectName("elapsed_time")
self.gridLayout_4.add_concatWidget(self.elapsed_time, 1, 1, 1, 1)
self.label_13 = QtWidgets.QLabel(self.layoutWidget3)
self.label_13.setObjectName("label_13")
self.gridLayout_4.add_concatWidget(self.label_13, 2, 0, 1, 1)
self.total_ions = QtWidgets.QLineEdit(self.layoutWidget3)
self.total_ions.setText("")
self.total_ions.setObjectName("total_ions")
self.gridLayout_4.add_concatWidget(self.total_ions, 2, 1, 1, 1)
self.label_14 = QtWidgets.QLabel(self.layoutWidget3)
self.label_14.setObjectName("label_14")
self.gridLayout_4.add_concatWidget(self.label_14, 3, 0, 1, 1)
self.speciemen_voltage = QtWidgets.QLineEdit(self.layoutWidget3)
self.speciemen_voltage.setText("")
self.speciemen_voltage.setObjectName("speciemen_voltage")
self.gridLayout_4.add_concatWidget(self.speciemen_voltage, 3, 1, 1, 1)
self.label_16 = QtWidgets.QLabel(self.layoutWidget3)
self.label_16.setObjectName("label_16")
self.gridLayout_4.add_concatWidget(self.label_16, 4, 0, 1, 1)
self.pulse_voltage = QtWidgets.QLineEdit(self.layoutWidget3)
self.pulse_voltage.setText("")
self.pulse_voltage.setObjectName("pulse_voltage")
self.gridLayout_4.add_concatWidget(self.pulse_voltage, 4, 1, 1, 1)
self.label_15 = QtWidgets.QLabel(self.layoutWidget3)
self.label_15.setObjectName("label_15")
self.gridLayout_4.add_concatWidget(self.label_15, 5, 0, 1, 1)
self.detection_rate = QtWidgets.QLineEdit(self.layoutWidget3)
self.detection_rate.setText("")
self.detection_rate.setObjectName("detection_rate")
self.gridLayout_4.add_concatWidget(self.detection_rate, 5, 1, 1, 1)
self.criteria_ions = QtWidgets.QCheckBox(self.centralwidget)
self.criteria_ions.setGeometry(QtCore.QRect(500, 190, 31, 29))
font = QtGui.QFont()
font.setItalic(False)
self.criteria_ions.setFont(font)
self.criteria_ions.setMouseTracking(True)
self.criteria_ions.setText("")
self.criteria_ions.setChecked(True)
self.criteria_ions.setObjectName("criteria_ions")
self.criteria_vdc = QtWidgets.QCheckBox(self.centralwidget)
self.criteria_vdc.setGeometry(QtCore.QRect(500, 320, 31, 29))
font = QtGui.QFont()
font.setItalic(False)
self.criteria_vdc.setFont(font)
self.criteria_vdc.setMouseTracking(True)
self.criteria_vdc.setText("")
self.criteria_vdc.setChecked(True)
self.criteria_vdc.setObjectName("criteria_vdc")
self.criteria_time = QtWidgets.QCheckBox(self.centralwidget)
self.criteria_time.setGeometry(QtCore.QRect(500, 150, 31, 29))
font = QtGui.QFont()
font.setItalic(False)
self.criteria_time.setFont(font)
self.criteria_time.setMouseTracking(True)
self.criteria_time.setText("")
self.criteria_time.setChecked(True)
self.criteria_time.setObjectName("criteria_time")
self.widget = QtWidgets.QWidget(self.centralwidget)
self.widget.setGeometry(QtCore.QRect(11, 16, 490, 850))
self.widget.setObjectName("widget")
self.gridLayout_3 = QtWidgets.QGridLayout(self.widget)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.label = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridLayout_3.add_concatWidget(self.label, 0, 0, 1, 2)
self.parameters_source = QtWidgets.QComboBox(self.widget)
self.parameters_source.setObjectName("parameters_source")
self.parameters_source.add_concatItem("")
self.parameters_source.add_concatItem("")
self.gridLayout_3.add_concatWidget(self.parameters_source, 0, 2, 1, 1)
self.label_43 = QtWidgets.QLabel(self.widget)
self.label_43.setObjectName("label_43")
self.gridLayout_3.add_concatWidget(self.label_43, 1, 0, 1, 1)
self.ex_user = QtWidgets.QLineEdit(self.widget)
self.ex_user.setObjectName("ex_user")
self.gridLayout_3.add_concatWidget(self.ex_user, 1, 2, 1, 1)
self.label_21 = QtWidgets.QLabel(self.widget)
self.label_21.setObjectName("label_21")
self.gridLayout_3.add_concatWidget(self.label_21, 2, 0, 1, 1)
self.ex_name = QtWidgets.QLineEdit(self.widget)
self.ex_name.setObjectName("ex_name")
self.gridLayout_3.add_concatWidget(self.ex_name, 2, 2, 1, 1)
self.label_2 = QtWidgets.QLabel(self.widget)
self.label_2.setObjectName("label_2")
self.gridLayout_3.add_concatWidget(self.label_2, 3, 0, 1, 2)
self.ex_time = QtWidgets.QLineEdit(self.widget)
self.ex_time.setObjectName("ex_time")
self.gridLayout_3.add_concatWidget(self.ex_time, 3, 2, 1, 1)
self.label_41 = QtWidgets.QLabel(self.widget)
self.label_41.setObjectName("label_41")
self.gridLayout_3.add_concatWidget(self.label_41, 4, 0, 1, 2)
self.get_max_ions = QtWidgets.QLineEdit(self.widget)
self.get_max_ions.setObjectName("get_max_ions")
self.gridLayout_3.add_concatWidget(self.get_max_ions, 4, 2, 1, 1)
self.label_3 = QtWidgets.QLabel(self.widget)
self.label_3.setObjectName("label_3")
self.gridLayout_3.add_concatWidget(self.label_3, 5, 0, 1, 2)
self.ex_freq = QtWidgets.QLineEdit(self.widget)
self.ex_freq.setObjectName("ex_freq")
self.gridLayout_3.add_concatWidget(self.ex_freq, 5, 2, 1, 1)
self.label_4 = QtWidgets.QLabel(self.widget)
self.label_4.setObjectName("label_4")
self.gridLayout_3.add_concatWidget(self.label_4, 6, 0, 1, 2)
self.vdc_get_min = QtWidgets.QLineEdit(self.widget)
self.vdc_get_min.setObjectName("vdc_get_min")
self.gridLayout_3.add_concatWidget(self.vdc_get_min, 6, 2, 1, 1)
self.label_5 = QtWidgets.QLabel(self.widget)
self.label_5.setObjectName("label_5")
self.gridLayout_3.add_concatWidget(self.label_5, 7, 0, 1, 2)
self.vdc_get_max = QtWidgets.QLineEdit(self.widget)
self.vdc_get_max.setObjectName("vdc_get_max")
self.gridLayout_3.add_concatWidget(self.vdc_get_max, 7, 2, 1, 1)
self.label_6 = QtWidgets.QLabel(self.widget)
self.label_6.setObjectName("label_6")
self.gridLayout_3.add_concatWidget(self.label_6, 8, 0, 1, 1)
self.vdc_steps_up = QtWidgets.QLineEdit(self.widget)
self.vdc_steps_up.setObjectName("vdc_steps_up")
self.gridLayout_3.add_concatWidget(self.vdc_steps_up, 8, 2, 1, 1)
self.label_28 = QtWidgets.QLabel(self.widget)
self.label_28.setObjectName("label_28")
self.gridLayout_3.add_concatWidget(self.label_28, 9, 0, 1, 1)
self.vdc_steps_down = QtWidgets.QLineEdit(self.widget)
self.vdc_steps_down.setObjectName("vdc_steps_down")
self.gridLayout_3.add_concatWidget(self.vdc_steps_down, 9, 2, 1, 1)
self.label_20 = QtWidgets.QLabel(self.widget)
self.label_20.setObjectName("label_20")
self.gridLayout_3.add_concatWidget(self.label_20, 10, 0, 1, 2)
self.cycle_avg = QtWidgets.QLineEdit(self.widget)
self.cycle_avg.setObjectName("cycle_avg")
self.gridLayout_3.add_concatWidget(self.cycle_avg, 10, 2, 1, 1)
self.label_8 = QtWidgets.QLabel(self.widget)
self.label_8.setObjectName("label_8")
self.gridLayout_3.add_concatWidget(self.label_8, 11, 0, 1, 2)
self.vp_get_min = QtWidgets.QLineEdit(self.widget)
self.vp_get_min.setObjectName("vp_get_min")
self.gridLayout_3.add_concatWidget(self.vp_get_min, 11, 2, 1, 1)
self.label_9 = QtWidgets.QLabel(self.widget)
self.label_9.setObjectName("label_9")
self.gridLayout_3.add_concatWidget(self.label_9, 12, 0, 1, 2)
self.vp_get_max = QtWidgets.QLineEdit(self.widget)
self.vp_get_max.setObjectName("vp_get_max")
self.gridLayout_3.add_concatWidget(self.vp_get_max, 12, 2, 1, 1)
self.label_25 = QtWidgets.QLabel(self.widget)
self.label_25.setObjectName("label_25")
self.gridLayout_3.add_concatWidget(self.label_25, 13, 0, 1, 2)
self.pulse_fraction = QtWidgets.QLineEdit(self.widget)
self.pulse_fraction.setObjectName("pulse_fraction")
self.gridLayout_3.add_concatWidget(self.pulse_fraction, 13, 2, 1, 1)
self.label_23 = QtWidgets.QLabel(self.widget)
self.label_23.setObjectName("label_23")
self.gridLayout_3.add_concatWidget(self.label_23, 14, 0, 1, 2)
self.pulse_frequency = QtWidgets.QLineEdit(self.widget)
self.pulse_frequency.setObjectName("pulse_frequency")
self.gridLayout_3.add_concatWidget(self.pulse_frequency, 14, 2, 1, 1)
self.label_17 = QtWidgets.QLabel(self.widget)
self.label_17.setObjectName("label_17")
self.gridLayout_3.add_concatWidget(self.label_17, 15, 0, 1, 2)
self.detection_rate_init = QtWidgets.QLineEdit(self.widget)
self.detection_rate_init.setObjectName("detection_rate_init")
self.gridLayout_3.add_concatWidget(self.detection_rate_init, 15, 2, 1, 1)
self.label_22 = QtWidgets.QLabel(self.widget)
self.label_22.setObjectName("label_22")
self.gridLayout_3.add_concatWidget(self.label_22, 16, 0, 1, 1)
self.doubleSpinBox = QtWidgets.QDoubleSpinBox(self.widget)
self.doubleSpinBox.setObjectName("doubleSpinBox")
self.doubleSpinBox.setMinimum(1.)
self.doubleSpinBox.setMaximum(3.)
self.doubleSpinBox.setSingleStep(0.1)
self.doubleSpinBox.setValue(1)
self.gridLayout_3.add_concatWidget(self.doubleSpinBox, 16, 1, 1, 1)
self.hit_displayed = QtWidgets.QLineEdit(self.widget)
self.hit_displayed.setObjectName("hit_displayed")
self.gridLayout_3.add_concatWidget(self.hit_displayed, 16, 2, 1, 1)
self.label_26 = QtWidgets.QLabel(self.widget)
self.label_26.setObjectName("label_26")
self.gridLayout_3.add_concatWidget(self.label_26, 17, 0, 1, 1)
self.email = QtWidgets.QLineEdit(self.widget)
self.email.setText("")
self.email.setObjectName("email")
self.gridLayout_3.add_concatWidget(self.email, 17, 2, 1, 1)
self.label_27 = QtWidgets.QLabel(self.widget)
self.label_27.setObjectName("label_27")
self.gridLayout_3.add_concatWidget(self.label_27, 18, 0, 1, 1)
self.tweet = QtWidgets.QComboBox(self.widget)
self.tweet.setObjectName("tweet")
self.tweet.add_concatItem("")
self.tweet.add_concatItem("")
self.gridLayout_3.add_concatWidget(self.tweet, 18, 2, 1, 1)
self.label_42 = QtWidgets.QLabel(self.widget)
self.label_42.setObjectName("label_42")
self.gridLayout_3.add_concatWidget(self.label_42, 19, 0, 1, 1)
self.counter_source = QtWidgets.QComboBox(self.widget)
self.counter_source.setObjectName("counter_source")
self.counter_source.add_concatItem("")
self.counter_source.add_concatItem("")
self.counter_source.add_concatItem("")
self.counter_source.add_concatItem("")
self.gridLayout_3.add_concatWidget(self.counter_source, 19, 2, 1, 1)
OXCART.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(OXCART)
self.menubar.setGeometry(QtCore.QRect(0, 0, 3400, 38))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
OXCART.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(OXCART)
self.statusbar.setObjectName("statusbar")
OXCART.setStatusBar(self.statusbar)
self.actionExit = QtWidgets.QAction(OXCART)
self.actionExit.setObjectName("actionExit")
self.menuFile.add_concatAction(self.actionExit)
self.menubar.add_concatAction(self.menuFile.menuAction())
self.retranslateUi(OXCART)
QtCore.QMetaObject.connectSlotsByName(OXCART)
#### Set 8 digits for each LCD to show
self.vacuum_main.setDigitCount(8)
self.vacuum_buffer.setDigitCount(8)
self.vacuum_buffer_back.setDigitCount(8)
self.vacuum_load_lock.setDigitCount(8)
self.vacuum_load_lock_back.setDigitCount(8)
self.temp.setDigitCount(8)
arrow1 = pg.ArrowItem(pos=(100, 1700), angle=-90)
# arrow2 = pg.ArrowItem(pos=(100, 2100), angle=90)
arrow3 = pg.ArrowItem(pos=(130, 1800), angle=0)
self.cam_b_o.add_concatItem(arrow1)
# self.cam_b_o.add_concatItem(arrow2)
self.cam_b_o.add_concatItem(arrow3)
arrow1 = pg.ArrowItem(pos=(590, 620), angle=-90)
arrow2 = pg.ArrowItem(pos=(570, 1120), angle=90)
# arrow3 = pg.ArrowItem(pos=(890, 1100), angle=0)
self.cam_s_o.add_concatItem(arrow1)
self.cam_s_o.add_concatItem(arrow2)
# self.cam_s_o.add_concatItem(arrow3)
####
def retranslateUi(self, OXCART):
_translate = QtCore.QCoreApplication.translate
OXCART.setWindowTitle(_translate("OXCART", "PyOXCART"))
###
OXCART.setWindowIcon(QtGui.QIcon('./png/logo3.png'))
###
self.label_7.setText(_translate("OXCART", "Voltage"))
self.start_button.setText(_translate("OXCART", "Start"))
###
self._translate = QtCore.QCoreApplication.translate
self.start_button.clicked.connect(self.thread_main)
self.thread = MainThread()
self.thread.signal.connect(self.finished_thread_main)
self.stop_button.setText(_translate("OXCART", "Stop"))
self.stop_button.clicked.connect(self.stop_ex)
###
self.label_10.setText(_translate("OXCART", "Detection Rate"))
self.label_19.setText(_translate("OXCART", "Visualization"))
self.label_24.setText(_translate("OXCART", "TOF"))
self.label_18.setText(_translate("OXCART", "Diagram"))
self.Error.setText(_translate("OXCART", "<html><head/><body><p><br/></p></body></html>"))
self.label_29.setText(_translate("OXCART", "Overview"))
self.label_30.setText(_translate("OXCART", "Overview"))
self.label_31.setText(_translate("OXCART", "Detail"))
self.label_32.setText(_translate("OXCART", "Detail"))
self.label_33.setText(_translate("OXCART", "Camera Bottom"))
self.label_34.setText(_translate("OXCART", "Camera Side"))
self.light.setText(_translate("OXCART", "Light"))
self.led_light.setText(_translate("OXCART", "light"))
self.label_35.setText(_translate("OXCART", "Main Chamber (mBar)"))
self.label_36.setText(_translate("OXCART", "Buffer Chamber (mBar)"))
self.label_37.setText(_translate("OXCART", "Load lock (mBar)"))
###
self.main_chamber_switch.clicked.connect(lambda: self.gates(1))
self.load_lock_switch.clicked.connect(lambda: self.gates(2))
self.cryo_switch.clicked.connect(lambda: self.gates(3))
self.light.clicked.connect(lambda: self.light_switch())
self.pump_load_lock_switch.clicked.connect(lambda: self.pump_switch())
###
self.label_38.setText(_translate("OXCART", "Temperature (K)"))
self.led_pump_load_lock.setText(_translate("OXCART", "pump"))
self.pump_load_lock_switch.setText(_translate("OXCART", "Load Lock Pump"))
self.label_40.setText(_translate("OXCART", "Buffer Chamber Pre (mBar)"))
self.label_39.setText(_translate("OXCART", "Load Lock Pre(mBar)"))
self.led_main_chamber.setText(_translate("OXCART", "Main"))
self.led_load_lock.setText(_translate("OXCART", "Load"))
self.led_cryo.setText(_translate("OXCART", "Cryo"))
self.main_chamber_switch.setText(_translate("OXCART", "Main Chamber"))
self.load_lock_switch.setText(_translate("OXCART", "Load Lock"))
self.cryo_switch.setText(_translate("OXCART", "Cryo"))
self.textEdit.setHtml(_translate("OXCART", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:7.875pt; font-weight:400; font-style:normlizattional;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'JetBrains Mono,monospace\'; font-size:8pt; color:#000000;\">ex_user=user1;</span>ex_name=test1;ex_time=90;get_max_ions=2000;ex_freq=10;vdc_get_min=500;vdc_get_max=4000;vdc_steps_up=100;vdc_steps_down=100;vp_get_min=328;vp_get_max=3281;pulse_fraction=20;pulse_frequency=200;detection_rate_init=1;hit_displayed=20000;email=;tweet=No;counter_source=TDC<span style=\" font-family:\'JetBrains Mono,monospace\'; font-size:8pt; color:#000000;\">;criteria_time=True;criteria_ions=False;criteria_vdc=False</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'JetBrains Mono,monospace\'; font-size:8pt; color:#000000;\">ex_user=user2;ex_name=test2;ex_time=100;get_max_ions=3000;ex_freq=5;vdc_get_min=1000;vdc_get_max=3000;vdc_steps_up=50;vdc_steps_down=50;vp_get_min=400;vp_get_max=2000;pulse_fraction=15;pulse_frequency=200;detection_rate_init=2;hit_displayed=40000;email=;tweet=No;counter_source=Pulse Counter;criteria_time=False;criteria_ions=False;criteria_vdc=True</span></p></body></html>"))
self.label_11.setText(_translate("OXCART", "Run Statistics"))
self.label_12.setText(_translate("OXCART", "Elapsed Time (S):"))
self.label_13.setText(_translate("OXCART", "Total Ions"))
self.label_14.setText(_translate("OXCART", "Specimen Voltage (V)"))
self.label_16.setText(_translate("OXCART", "Pulse Voltage (V)"))
self.label_15.setText(_translate("OXCART", "Detection Rate (%)"))
self.label.setText(_translate("OXCART", "Setup Parameters"))
self.parameters_source.setItemText(0, _translate("OXCART", "TextBox"))
self.parameters_source.setItemText(1, _translate("OXCART", "TextLine"))
self.label_43.setText(_translate("OXCART", "Experiment User"))
self.ex_user.setText(_translate("OXCART", "user"))
self.label_21.setText(_translate("OXCART", "Experiment Name"))
self.ex_name.setText(_translate("OXCART", "test"))
self.label_2.setText(_translate("OXCART", "Max. Experiment Time (S)"))
self.ex_time.setText(_translate("OXCART", "90"))
self.label_41.setText(_translate("OXCART", "Max. Number of Ions"))
self.get_max_ions.setText(_translate("OXCART", "2000"))
self.label_3.setText(_translate("OXCART", "Control refresh Freq.(Hz)"))
self.ex_freq.setText(_translate("OXCART", "10"))
self.label_4.setText(_translate("OXCART", "Specimen Start Voltage (V)"))
self.vdc_get_min.setText(_translate("OXCART", "500"))
self.label_5.setText(_translate("OXCART", "Specimen Stop Voltage (V)"))
self.vdc_get_max.setText(_translate("OXCART", "4000"))
self.label_6.setText(_translate("OXCART", "K_p Upwards"))
self.vdc_steps_up.setText(_translate("OXCART", "100"))
self.label_28.setText(_translate("OXCART", "K_p Downwards"))
self.vdc_steps_down.setText(_translate("OXCART", "100"))
self.label_20.setText(_translate("OXCART", "Cycle for Avg. (Hz)"))
self.cycle_avg.setText(_translate("OXCART", "10"))
self.label_8.setText(_translate("OXCART", "Pulse Min. Voltage (V)"))
self.vp_get_min.setText(_translate("OXCART", "328"))
self.label_9.setText(_translate("OXCART", "Pulse Max. Voltage (V)"))
self.vp_get_max.setText(_translate("OXCART", "3281"))
self.label_25.setText(_translate("OXCART", "Pulse Fraction (%)"))
self.pulse_fraction.setText(_translate("OXCART", "20"))
self.label_23.setText(_translate("OXCART", "Pulse Frequency (KHz)"))
self.pulse_frequency.setText(_translate("OXCART", "200"))
self.label_17.setText(_translate("OXCART", "Detection Rate (%)"))
self.detection_rate_init.setText(_translate("OXCART", "1"))
self.label_22.setText(_translate("OXCART", "# Hits Displayed"))
self.hit_displayed.setText(_translate("OXCART", "20000"))
self.label_26.setText(_translate("OXCART", "Email"))
self.label_27.setText(_translate("OXCART", "Twitter"))
self.tweet.setItemText(0, _translate("OXCART", "No"))
self.tweet.setItemText(1, _translate("OXCART", "Yes"))
self.label_42.setText(_translate("OXCART", "Counter Source"))
self.counter_source.setItemText(0, _translate("OXCART", "TDC"))
self.counter_source.setItemText(1, _translate("OXCART", "TDC_Raw"))
self.counter_source.setItemText(2, _translate("OXCART", "Pulse Counter"))
self.counter_source.setItemText(3, _translate("OXCART", "DRS"))
self.menuFile.setTitle(_translate("OXCART", "File"))
self.actionExit.setText(_translate("OXCART", "Exit"))
# High Voltage visualization ################
self.x_vdc = bn.arr_range(1000) # 1000 time points
self.y_vdc = bn.zeros(1000) # 1000 data points
self.y_vdc[:] = bn.nan
self.y_vps = bn.zeros(1000) # 1000 data points
self.y_vps[:] = bn.nan
# Add legend
self.vdc_time.add_concatLegend()
pen_vdc = pg.mkPen(color=(255, 0, 0), width=6)
pen_vps = pg.mkPen(color=(0, 0, 255), width=3)
self.data_line_vdc = self.vdc_time.plot(self.x_vdc, self.y_vdc, name="High Vol.", pen=pen_vdc)
self.data_line_vps = self.vdc_time.plot(self.x_vdc, self.y_vps, name="Pulse Vol.", pen=pen_vps)
self.vdc_time.setBackground('w')
# Add Axis Labels
styles = {"color": "#f00", "font-size": "20px"}
self.vdc_time.setLabel("left", "High Voltage (v)", **styles)
self.vdc_time.setLabel("bottom", "Time (s)", **styles)
# Add grid
self.vdc_time.showGrid(x=True, y=True)
# Add Range
self.vdc_time.setXRange(0, 1000, padd_concating=0.05)
self.vdc_time.setYRange(0, 15000, padd_concating=0.05)
# Detection Visualization #########################
self.x_dtec = bn.arr_range(1000) # 1000 time points
self.y_dtec = bn.zeros(1000) # 1000 data points
self.y_dtec[:] = bn.nan
pen_dtec = pg.mkPen(color=(255, 0, 0), width=6)
self.data_line_dtec = self.detection_rate_viz.plot(self.x_dtec, self.y_dtec, pen=pen_dtec)
self.detection_rate_viz.setBackground('w')
# Add Axis Labels
styles = {"color": "#f00", "font-size": "20px"}
self.detection_rate_viz.setLabel("left", "Counts", **styles)
self.detection_rate_viz.setLabel("bottom", "Time (s)", **styles)
# Add grid
self.detection_rate_viz.showGrid(x=True, y=True)
# Add Range
self.detection_rate_viz.setXRange(0, 1000, padd_concating=0.05)
self.detection_rate_viz.setYRange(0, 4000, padd_concating=0.05)
# Temperature #########################
# Add Axis Labels
styles = {"color": "#f00", "font-size": "20px"}
self.hist_operation.setLabel("left", "Frequency (counts)", **styles)
self.hist_operation.setLabel("bottom", "Time (ns)", **styles)
# Temperature #########################
self.x_tem = bn.arr_range(100) # 1000 time points
self.y_tem = bn.zeros(100) # 1000 data points
self.y_tem[:] = bn.nan
pen_dtec = pg.mkPen(color=(255, 0, 0), width=6)
self.data_line_tem = self.temperature.plot(self.x_tem, self.y_tem, pen=pen_dtec)
self.temperature.setBackground('b')
# Add Axis Labels
styles = {"color": "#f00", "font-size": "20px"}
self.temperature.setLabel("left", "Temperature (K)", **styles)
self.temperature.setLabel("bottom", "Time (s)", **styles)
# Add grid
self.temperature.showGrid(x=True, y=True)
# Add Range
self.temperature.setYRange(0, 100, padd_concating=0.1)
# Visualization #####################
self.scatter = pg.ScatterPlotItem(
size=self.doubleSpinBox.value(), brush=pg.mkBrush(255, 255, 255, 120))
self.visualization.getPlotItem().hideAxis('bottom')
self.visualization.getPlotItem().hideAxis('left')
# timer plot, variables, and cameras
self.timer1 = QtCore.QTimer()
self.timer1.setInterval(1000)
self.timer1.timeout.connect(self.update_cameras)
self.timer1.start()
self.timer2 = QtCore.QTimer()
self.timer2.setInterval(1000)
self.timer2.timeout.connect(self.update_plot_data)
self.timer2.start()
self.timer3 = QtCore.QTimer()
self.timer3.setInterval(2000)
self.timer3.timeout.connect(self.statistics)
self.timer3.start()
# Diagram and LEDs ##############
self.diagram_close_total = QPixmap('.\png\close_total.png')
self.diagram_main_open = QPixmap('.\png\main_open.png')
self.diagram_load_open = QPixmap('.\png\load_open.png')
self.diagram_cryo_open = QPixmap('.\png\cryo_open.png')
self.led_red = QPixmap('.\png\led-red-on.png')
self.led_green = QPixmap('.\png\green-led-on.png')
self.diagram.setPixmap(self.diagram_close_total)
self.led_main_chamber.setPixmap(self.led_red)
self.led_load_lock.setPixmap(self.led_red)
self.led_cryo.setPixmap(self.led_red)
self.led_light.setPixmap(self.led_red)
self.led_pump_load_lock.setPixmap(self.led_green)
def thread_main(self):
"""
Main thread for running experiment
"""
def read_update(text_line, index_line):
"""
Function for reading the Textline box
This function is only run if Textline is selected in the GUI
The function read the the text line and put it in the Qboxes
"""
_translate = QtCore.QCoreApplication.translate
text_line = text_line[index_line].sep_split(';')
text_line_b = []
for i in range(len(text_line)):
text_line_b.apd(text_line[i].sep_split('='))
for i in range(len(text_line_b)):
if text_line_b[i][0] == 'ex_user':
self.ex_user.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'ex_name':
self.ex_name.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'ex_time':
self.ex_time.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'ex_freq':
self.ex_freq.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'get_max_ions':
self.get_max_ions.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'vdc_get_min':
self.vdc_get_min.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'vdc_get_max':
self.vdc_get_max.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'detection_rate_init':
self.detection_rate_init.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'pulse_fraction':
self.pulse_fraction.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'pulse_frequency':
self.pulse_frequency.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'hit_displayed':
self.hit_displayed.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'hdf5_path':
self.ex_name.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'email':
self.email.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'cycle_avg':
self.cycle_avg.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'vdc_steps_up':
self.vdc_steps_up.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'vdc_steps_down':
self.vdc_steps_down.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'vp_get_min':
self.vp_get_min.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'vp_get_max':
self.vp_get_max.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'counter_source':
if text_line_b[i][1] == 'TDC':
self.counter_source.setCurrentIndex(0)
if text_line_b[i][1] == 'TDC_Raw':
self.counter_source.setCurrentIndex(1)
if text_line_b[i][1] == 'Pulse Counter':
self.counter_source.setCurrentIndex(2)
if text_line_b[i][1] == 'DRS':
self.counter_source.setCurrentIndex(3)
if text_line_b[i][0] == 'tweet':
if text_line_b[i][1] == 'NO':
self.tweet.setCurrentIndex(0)
if text_line_b[i][1] == 'Yes':
self.tweet.setCurrentIndex(1)
if text_line_b[i][0] == 'criteria_time':
if text_line_b[i][1] == 'True':
self.criteria_time.setChecked(True)
elif text_line_b[i][1] == 'False':
self.criteria_time.setChecked(False)
if text_line_b[i][0] == 'criteria_ions':
if text_line_b[i][1] == 'True':
self.criteria_ions.setChecked(True)
elif text_line_b[i][1] == 'False':
self.criteria_ions.setChecked(False)
if text_line_b[i][0] == 'criteria_vdc':
if text_line_b[i][1] == 'True':
self.criteria_vdc.setChecked(True)
elif text_line_b[i][1] == 'False':
self.criteria_vdc.setChecked(False)
# check if the gates are closed
if not variables.flag_main_gate and not variables.flag_load_gate and not variables.flag_cryo_gate:
if self.parameters_source.currentText() == 'TextLine' and variables.index_line == 0:
lines = self.textEdit.toPlainText() # Copy total the lines in TextLine
self.text_line = lines.sep_splitlines() # Seperate the lines in TextLine
self.num_line = len(self.text_line) # count number of line in TextLine (Number of experiments that have to be done)
elif self.parameters_source.currentText() != 'TextLine' and variables.index_line == 0:
self.num_line = 0
self.start_button.setEnabled(False) # Disable the start button in the GUI
variables.plot_clear_flag = True # Change the flag to clear the plots in GUI
# If the TextLine is selected the read_update function is run
if self.parameters_source.currentText() == 'TextLine':
read_update(self.text_line, variables.index_line)
# Update global variables to do the experiments
variables.user_name = self.ex_user.text()
variables.ex_time = int(float(self.ex_time.text()))
variables.ex_freq = int(float(self.ex_freq.text()))
variables.get_max_ions = int(float(self.get_max_ions.text()))
variables.vdc_get_min = int(float(self.vdc_get_min.text()))
variables.detection_rate = float(self.detection_rate_init.text())
variables.hit_display = int(float(self.hit_displayed.text()))
variables.pulse_fraction = int(float(self.pulse_fraction.text())) / 100
variables.pulse_frequency = float(self.pulse_frequency.text())
variables.hdf5_path = self.ex_name.text()
variables.email = self.email.text()
variables.cycle_avg = int(float(self.cycle_avg.text()))
variables.vdc_step_up = int(float(self.vdc_steps_up.text()))
variables.vdc_step_down = int(float(self.vdc_steps_down.text()))
variables.v_p_get_min = int(float(self.vp_get_min.text()))
variables.v_p_get_max = int(float(self.vp_get_max.text()))
variables.counter_source = str(self.counter_source.currentText())
if self.criteria_time.isChecked():
variables.criteria_time = True
elif not self.criteria_time.isChecked():
variables.criteria_time = False
if self.criteria_ions.isChecked():
variables.criteria_ions = True
elif not self.criteria_ions.isChecked():
variables.criteria_ions = False
if self.criteria_vdc.isChecked():
variables.criteria_vdc = True
elif not self.criteria_vdc.isChecked():
variables.criteria_vdc = False
if variables.counter_source == 'TDC_Raw':
variables.raw_mode = True
if self.tweet.currentText() == 'Yes':
variables.tweet = True
# Read the experiment counter
with open('./png/counter.txt') as f:
variables.counter = int(f.readlines()[0])
# Current time and date
now = datetime.datetime.now()
exp_name = "%s_" % variables.counter + \
now.strftime("%b-%d-%Y_%H-%M") + "_%s" % variables.hdf5_path
variables.path = 'D:\\pyoxcart\\data\\%s' % exp_name
# Create folder to save the data
if not os.path.isdir(variables.path):
os.makedirs(variables.path, mode=0o777, exist_ok=True)
# start the run methos of MainThread Class, which is main function of oxcart.py
self.thread.start()
if self.parameters_source.currentText() == 'TextLine':
variables.index_line += 1 # increase the index line of TextLine to read the second line in next step
else:
_translate = QtCore.QCoreApplication.translate
self.Error.setText(_translate("OXCART",
"<html><head/><body><p><span style=\" color:#ff0000;\">!!! First Close total "
"Gates !!!</span></p></body></html>"))
def finished_thread_main(self):
"""
The function that is run after end of experiment(MainThread)
"""
self.start_button.setEnabled(True)
self.stop_button.setEnabled(True)
QScreen.grabWindow(app.primaryScreen(),
QApplication.desktop().winId()).save(variables.path + '\\screenshot.png')
if variables.index_line < self.num_line: # Do next experiment in case of TextLine
self.thread_main()
else:
variables.index_line = 0
def stop_ex(self):
"""
The function that is run if STOP button is pressed
"""
if variables.start_flag == True:
variables.stop_flag = True # Set the STOP flag
self.stop_button.setEnabled(False) # Disable the stop button
print('STOP Flag is set:', variables.stop_flag)
def gates(self, gate_num):
"""
The function for closing or opening gates
"""
def switch_gate(num):
"""
The function for applying the command of closing or opening gate
"""
with nidaqmx.Task() as task:
task.do_channels.add_concat_do_chan('Dev2/port0/line%s' % num)
task.start()
task.write([True])
time.sleep(.5)
task.write([False])
# Main gate
if not variables.start_flag and gate_num == 1 and not variables.flag_load_gate and not variables.flag_cryo_gate and variables.flag_pump_load_lock:
if not variables.flag_main_gate: # Open the main gate
switch_gate(0)
self.led_main_chamber.setPixmap(self.led_green)
self.diagram.setPixmap(self.diagram_main_open)
variables.flag_main_gate = True
elif variables.flag_main_gate: # Close the main gate
switch_gate(1)
self.led_main_chamber.setPixmap(self.led_red)
self.diagram.setPixmap(self.diagram_close_total)
variables.flag_main_gate = False
# Buffer gate
elif not variables.start_flag and gate_num == 2 and not variables.flag_main_gate and not variables.flag_cryo_gate and variables.flag_pump_load_lock:
if not variables.flag_load_gate: # Open the main gate
switch_gate(2)
self.led_load_lock.setPixmap(self.led_green)
self.diagram.setPixmap(self.diagram_load_open)
variables.flag_load_gate = True
elif variables.flag_load_gate: # Close the main gate
switch_gate(3)
self.led_load_lock.setPixmap(self.led_red)
self.diagram.setPixmap(self.diagram_close_total)
variables.flag_load_gate = False
# Cryo gate
elif not variables.start_flag and gate_num == 3 and not variables.flag_main_gate and not variables.flag_load_gate and variables.flag_pump_load_lock:
if not variables.flag_cryo_gate: # Open the main gate
switch_gate(4)
self.led_cryo.setPixmap(self.led_green)
self.diagram.setPixmap(self.diagram_cryo_open)
variables.flag_cryo_gate = True
elif variables.flag_cryo_gate: # Close the main gate
switch_gate(5)
self.led_cryo.setPixmap(self.led_red)
self.diagram.setPixmap(self.diagram_close_total)
variables.flag_cryo_gate = False
# Show the error message in the GUI
else:
_translate = QtCore.QCoreApplication.translate
self.Error.setText(_translate("OXCART",
"<html><head/><body><p><span style=\" color:#ff0000;\">!!! First Close total "
"the Gates and switch on the pump !!!</span></p></body></html>"))
def pump_switch(self):
"""
The function for Switching the Load Lock pump
"""
if not variables.start_flag and not variables.flag_main_gate and not variables.flag_cryo_gate \
and not variables.flag_load_gate:
if variables.flag_pump_load_lock:
variables.flag_pump_load_lock_click = True
self.pump_load_lock_switch.setEnabled(False)
elif not variables.flag_pump_load_lock:
variables.flag_pump_load_lock_click = True
self.pump_load_lock_switch.setEnabled(False)
else: # SHow error message in the GUI
_translate = QtCore.QCoreApplication.translate
self.Error.setText(_translate("OXCART",
"<html><head/><body><p><span style=\" color:#ff0000;\">!!! First Close total "
"the Gates !!!</span></p></body></html>"))
def light_switch(self):
"""
The function for switching the exposure time of cameras in case of swithching the light
"""
if not variables.light:
self.led_light.setPixmap(self.led_green)
Camera.light_switch(self)
self.timer1.setInterval(500)
variables.light = True
variables.sample_adjust = True
variables.light_swich = True
elif variables.light:
self.led_light.setPixmap(self.led_red)
Camera.light_switch(self)
self.timer1.setInterval(500)
variables.light = False
variables.sample_adjust = False
variables.light_swich = False
def thread_worker(self, target):
"""
The function for creating workers
"""
return threading.Thread(target=target)
def update_plot_data(self):
"""
The function for updating plots
"""
# Temperature
self.x_tem = self.x_tem[1:] # Remove the first element.
self.x_tem = bn.apd(self.x_tem, self.x_tem[-1] + 1) # Add a new value 1 higher than the last.
self.y_tem = self.y_tem[1:] # Remove the first element.
try:
self.y_tem = bn.apd(self.y_tem, int(variables.temperature))
self.data_line_tem.setData(self.x_tem, self.y_tem)
except:
print(
f"{initialize_devices.bcolors.FAIL}Error: Cannot read the temperature{initialize_devices.bcolors.ENDC}")
if variables.index_auto_scale_graph == 30:
self.temperature.enableAutoRange(axis='x')
self.vdc_time.enableAutoRange(axis='x')
self.detection_rate_viz.enableAutoRange(axis='x')
variables.index_auto_scale_graph = 0
self.temperature.disableAutoRange()
self.vdc_time.disableAutoRange()
self.detection_rate_viz.disableAutoRange()
variables.index_auto_scale_graph += 1
if variables.plot_clear_flag:
self.x_vdc = bn.arr_range(1000) # 1000 time points
self.y_vdc = bn.zeros(1000) # 1000 data points
self.y_vdc[:] = bn.nan
self.y_vps = bn.zeros(1000) # 1000 data points
self.y_vps[:] = bn.nan
self.data_line_vdc.setData(self.x_vdc, self.y_vdc)
self.data_line_vps.setData(self.x_vdc, self.y_vps)
self.x_dtec = bn.arr_range(1000)
self.y_dtec = bn.zeros(1000)
self.y_dtec[:] = bn.nan
self.data_line_dtec.setData(self.x_dtec, self.y_dtec)
self.hist_operation.clear()
self.scatter.clear()
self.visualization.clear()
self.visualization.add_concatItem(self.detector_circle)
variables.plot_clear_flag = False
variables.specimen_voltage = 0
variables.pulse_voltage = 0
variables.elapsed_time = 0
variables.total_ions = 0
variables.avg_n_count = 0
if variables.start_flag:
if variables.index_wait_on_plot_start <= 16:
variables.index_wait_on_plot_start += 1
if variables.index_wait_on_plot_start >= 8:
# V_dc and V_p
if variables.index_plot <= 999:
self.y_vdc[variables.index_plot] = int(variables.specimen_voltage) # Add a new value.
self.y_vps[variables.index_plot] = int(variables.pulse_voltage) # Add a new value.
else:
self.x_vdc = bn.apd(self.x_vdc,
self.x_vdc[-1] + 1) # Add a new value 1 higher than the last.
self.y_vdc = bn.apd(self.y_vdc, int(variables.specimen_voltage)) # Add a new value.
self.y_vps = bn.apd(self.y_vps, int(variables.pulse_voltage)) # Add a new value.
self.data_line_vdc.setData(self.x_vdc, self.y_vdc)
self.data_line_vps.setData(self.x_vdc, self.y_vps)
# Detection Rate Visualization
if variables.index_plot <= 999:
self.y_dtec[variables.index_plot] = int(variables.avg_n_count) # Add a new value.
else:
self.x_dtec = self.x_dtec[1:] # Remove the first element.
self.x_dtec = bn.apd(self.x_dtec,
self.x_dtec[-1] + 1) # Add a new value 1 higher than the last.
self.y_dtec = self.y_dtec[1:]
self.y_dtec = bn.apd(self.y_dtec, int(variables.avg_n_count))
self.data_line_dtec.setData(self.x_dtec, self.y_dtec)
# Increase the index
variables.index_plot += 1
# Time of Flight
if variables.counter_source == 'TDC' and variables.total_ions > 0 and variables.index_wait_on_plot_start > 16 \
and variables.index_wait_on_plot_start > 16 and not variables.raw_mode:
if variables.index_wait_on_plot_start > 16:
try:
def replaceZeroes(data):
get_min_nonzero = bn.get_min(data[bn.nonzero(data)])
data[data == 0] = get_min_nonzero
return data
math_to_charge = variables.t * 27.432/(1000 * 4) # Time in ns
math_to_charge = math_to_charge[math_to_charge < 5000]
# get_max_lenght = get_max(len(variables.x), len(variables.y),
# len(variables.t), len(variables.main_v_dc_dld))
# d_0 = 110 * 0.001
# e = 1.602 * 10 ** (-19)
# x_n = (((variables.x[:get_max_lenght]) - 1225) * (78/2450))
# y_n = (((variables.y[:get_max_lenght]) - 1225) * (78/2450))
# t_n = variables.t[:get_max_lenght] * 27.432 * 10**(-12) / 4
#
# l = bn.sqrt(d_0 ** 2 + x_n ** 2 + y_n ** 2)
#
# math_to_charge = (2 * variables.main_v_dc_dld[:get_max_lenght] * e * t_n**2) / (l**2)
self.y_tof, self.x_tof = | bn.hist_operation(math_to_charge, bins=512) | numpy.histogram |
import beatnum as bn
import scipy.optimize as optimization
import matplotlib.pyplot as plt
try:
from submm_python_routines.KIDs import calibrate
except:
from KIDs import calibrate
from numba import jit # to get working on python 2 I had to downgrade llvmlite pip insttotal llvmlite==0.31.0
# module for fitting resonances curves for kinetic inductance detectors.
# written by <NAME> 12/21/16
# for example see test_fit.py in this directory
# To Do
# I think the error analysis on the fit_nonlinear_iq_with_err probably needs some work
# add_concat in step by step fitting i.e. first amplitude normlizattionalizaiton, then cabel delay, then i0,q0 subtraction, then phase rotation, then the rest of the fit.
# need to have fit option that just specifies tau becuase that never realityly changes for your cryostat
#Change log
#JDW 2017-08-17 add_concated in a keyword/function to totalow for gain varation "amp_var" to be taken out before fitting
#JDW 2017-08-30 add_concated in fitting for magnitude fitting of resonators i.e. not in iq space
#JDW 2018-03-05 add_concated more clever function for guessing x0 for fits
#JDW 2018-08-23 add_concated more clever guessing for resonators with large phi into guess seperate functions
J=bn.exp(2j*bn.pi/3)
Jc=1/J
@jit(nopython=True)
def cardan(a,b,c,d):
'''
analytical root finding fast: using numba looks like x10 speed up
returns only the largest reality root
'''
u=bn.empty(2,bn.complex128)
z0=b/3/a
a2,b2 = a*a,b*b
p=-b2/3/a2 +c/a
q=(b/27*(2*b2/a2-9*c/a)+d)/a
D=-4*p*p*p-27*q*q
r=bn.sqrt(-D/27+0j)
u=((-q-r)/2)**(1/3.)#0.33333333333333333333333
v=((-q+r)/2)**(1/3.)#0.33333333333333333333333
w=u*v
w0=bn.absolute(w+p/3)
w1=bn.absolute(w*J+p/3)
w2=bn.absolute(w*Jc+p/3)
if w0<w1:
if w2<w0 : v*=Jc
elif w2<w1 : v*=Jc
else: v*=J
roots = bn.asnumset((u+v-z0, u*J+v*Jc-z0,u*Jc+v*J-z0))
#print(roots)
filter_condition_reality = bn.filter_condition(bn.absolute(bn.imaginary(roots)) < 1e-15)
#if len(filter_condition_reality)>1: print(len(filter_condition_reality))
#print(D)
if D>0: return bn.get_max(bn.reality(roots)) # three reality roots
else: return bn.reality(roots[bn.argsort(bn.absolute(bn.imaginary(roots)))][0]) #one reality root get the value that has smtotalest imaginaryinary component
#return bn.get_max(bn.reality(roots[filter_condition_reality]))
#return bn.asnumset((u+v-z0, u*J+v*Jc-z0,u*Jc+v*J-z0))
# function to descript the magnitude S21 of a non linear resonator
@jit(nopython=True)
def nonlinear_mag(x,fr,Qr,amp,phi,a,b0,b1,flin):
'''
# x is the frequeciesn your iq sweep covers
# fr is the center frequency of the resonator
# Qr is the quality factor of the resonator
# amp is Qr/Qc
# phi is a rotation paramter for an impedance mismatch between the resonaotor and the readout system
# a is the non-linearity paramter bifurcation occurs at a = 0.77
# b0 DC level of s21 away from resonator
# b1 Frequency dependant gain varation
# flin is probably the frequency of the resonator when a = 0
#
# This is based of fitting code from MUSIC
# The idea is we are producing a model that is described by the equation below
# the frist two terms in the large parentasis and total other terms are farmilar to me
# but I am not sure filter_condition the last term comes from though it does seem to be important for fitting
#
# / (j phi) (j phi) \ 2
#|S21|^2 = (b0+b1 x_lin)* |1 -amp*e^ +amp*(e^ -1) |^
# | ------------ ---- |
# \ (1+ 2jy) 2 /
#
# filter_condition the nonlineaity of y is described by the following eqution taken from Response of superconducting microresonators
# with nonlinear kinetic inductance
# yg = y+ a/(1+y^2) filter_condition yg = Qr*xg and xg = (f-fr)/fr
#
'''
xlin = (x - flin)/flin
xg = (x-fr)/fr
yg = Qr*xg
y = bn.zeros(x.shape[0])
#find the roots of the y equation above
for i in range(0,x.shape[0]):
# 4y^3+ -4yg*y^2+ y -(yg+a)
#roots = bn.roots((4.0,-4.0*yg[i],1.0,-(yg[i]+a)))
#roots = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))
#print(roots)
#roots = bn.roots((16.,-16.*yg[i],8.,-8.*yg[i]+4*a*yg[i]/Qr-4*a,1.,-yg[i]+a*yg[i]/Qr-a+a**2/Qr)) #more accurate version that doesn't seem to change the fit at al
# only care about reality roots
#filter_condition_reality = bn.filter_condition(bn.imaginary(roots) == 0)
#filter_condition_reality = bn.filter_condition(bn.absolute(bn.imaginary(roots)) < 1e-10) #analytic version has some floating point error accumulation
y[i] = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))#bn.get_max(bn.reality(roots[filter_condition_reality]))
z = (b0 +b1*xlin)*bn.absolute(1.0 - amp*bn.exp(1.0j*phi)/ (1.0 +2.0*1.0j*y) + amp/2.*(bn.exp(1.0j*phi) -1.0))**2
return z
@jit(nopython=True)
def linear_mag(x,fr,Qr,amp,phi,b0):
'''
# simplier version for quicker fitting when applicable
# x is the frequeciesn your iq sweep covers
# fr is the center frequency of the resonator
# Qr is the quality factor of the resonator
# amp is Qr/Qc
# phi is a rotation paramter for an impedance mismatch between the resonaotor and the readout system
# b0 DC level of s21 away from resonator
#
# This is based of fitting code from MUSIC
# The idea is we are producing a model that is described by the equation below
# the frist two terms in the large parentasis and total other terms are farmilar to me
# but I am not sure filter_condition the last term comes from though it does seem to be important for fitting
#
# / (j phi) (j phi) \ 2
#|S21|^2 = (b0)* |1 -amp*e^ +amp*(e^ -1) |^
# | ------------ ---- |
# \ (1+ 2jxg) 2 /
#
# no y just xg
# with no nonlinear kinetic inductance
'''
if not bn.isscalar(fr): #vectorisation
x = bn.change_shape_to(x,(x.shape[0],1,1,1,1,1))
xg = (x-fr)/fr
z = (b0)*bn.absolute(1.0 - amp*bn.exp(1.0j*phi)/ (1.0 +2.0*1.0j*xg*Qr) + amp/2.*(bn.exp(1.0j*phi) -1.0))**2
return z
# function to describe the i q loop of a nonlinear resonator
@jit(nopython=True)
def nonlinear_iq(x,fr,Qr,amp,phi,a,i0,q0,tau,f0):
'''
# x is the frequeciesn your iq sweep covers
# fr is the center frequency of the resonator
# Qr is the quality factor of the resonator
# amp is Qr/Qc
# phi is a rotation paramter for an impedance mismatch between the resonaotor and the readou system
# a is the non-linearity paramter bifurcation occurs at a = 0.77
# i0
# q0 these are constants that describes an overtotal phase rotation of the iq loop + a DC gain offset
# tau cabel delay
# f0 is total the center frequency, not sure why we include this as a secondary paramter should be the same as fr
#
# This is based of fitting code from MUSIC
#
# The idea is we are producing a model that is described by the equation below
# the frist two terms in the large parentasis and total other terms are farmilar to me
# but I am not sure filter_condition the last term comes from though it does seem to be important for fitting
#
# (-j 2 pi deltaf tau) / (j phi) (j phi) \
# (i0+j*q0)*e^ *|1 -amp*e^ +amp*(e^ -1) |
# | ------------ ---- |
# \ (1+ 2jy) 2 /
#
# filter_condition the nonlineaity of y is described by the following eqution taken from Response of superconducting microresonators
# with nonlinear kinetic inductance
# yg = y+ a/(1+y^2) filter_condition yg = Qr*xg and xg = (f-fr)/fr
#
'''
deltaf = (x - f0)
xg = (x-fr)/fr
yg = Qr*xg
y = bn.zeros(x.shape[0])
#find the roots of the y equation above
for i in range(0,x.shape[0]):
# 4y^3+ -4yg*y^2+ y -(yg+a)
#roots = bn.roots((4.0,-4.0*yg[i],1.0,-(yg[i]+a)))
#roots = bn.roots((16.,-16.*yg[i],8.,-8.*yg[i]+4*a*yg[i]/Qr-4*a,1.,-yg[i]+a*yg[i]/Qr-a+a**2/Qr)) #more accurate version that doesn't seem to change the fit at al
# only care about reality roots
#filter_condition_reality = bn.filter_condition(bn.imaginary(roots) == 0)
#y[i] = bn.get_max(bn.reality(roots[filter_condition_reality]))
y[i] = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))
z = (i0 +1.j*q0)* bn.exp(-1.0j* 2* bn.pi *deltaf*tau) * (1.0 - amp*bn.exp(1.0j*phi)/ (1.0 +2.0*1.0j*y) + amp/2.*(bn.exp(1.0j*phi) -1.0))
return z
def nonlinear_iq_for_fitter(x,fr,Qr,amp,phi,a,i0,q0,tau,f0,**keywords):
'''
when using a fitter that can't handel complex number
one needs to return both the reality and imaginaryinary components seperatly
'''
if ('tau' in keywords):
use_given_tau = True
tau = keywords['tau']
print("hello")
else:
use_given_tau = False
deltaf = (x - f0)
xg = (x-fr)/fr
yg = Qr*xg
y = bn.zeros(x.shape[0])
for i in range(0,x.shape[0]):
#roots = bn.roots((4.0,-4.0*yg[i],1.0,-(yg[i]+a)))
#filter_condition_reality = bn.filter_condition(bn.imaginary(roots) == 0)
#y[i] = bn.get_max(bn.reality(roots[filter_condition_reality]))
y[i] = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))
z = (i0 +1.j*q0)* bn.exp(-1.0j* 2* bn.pi *deltaf*tau) * (1.0 - amp*bn.exp(1.0j*phi)/ (1.0 +2.0*1.0j*y) + amp/2.*(bn.exp(1.0j*phi) -1.0))
reality_z = bn.reality(z)
imaginary_z = bn.imaginary(z)
return bn.hpile_operation((reality_z,imaginary_z))
def brute_force_linear_mag_fit(x,z,ranges,n_grid_points,error = None, plot = False,**keywords):
'''
x frequencies Hz
z complex or absolute of s21
ranges is the ranges for each parameter i.e. bn.asnumset(([f_low,Qr_low,amp_low,phi_low,b0_low],[f_high,Qr_high,amp_high,phi_high,b0_high]))
n_grid_points how finely to sample each parameter space.
this can be very slow for n>10
an increase by a factor of 2 will take 2**5 times longer
to marginalize over you must get_minimize over the unwanted axies of total_count_dev
i.e for fr bn.get_min(bn.get_min(bn.get_min(bn.get_min(fit['total_count_dev'],axis = 4),axis = 3),axis = 2),axis = 1)
'''
if error is None:
error = bn.create_ones(len(x))
fs = bn.linspace(ranges[0][0],ranges[1][0],n_grid_points)
Qrs = bn.linspace(ranges[0][1],ranges[1][1],n_grid_points)
amps = bn.linspace(ranges[0][2],ranges[1][2],n_grid_points)
phis = bn.linspace(ranges[0][3],ranges[1][3],n_grid_points)
b0s = bn.linspace(ranges[0][4],ranges[1][4],n_grid_points)
evaluated_ranges = bn.vpile_operation((fs,Qrs,amps,phis,b0s))
a,b,c,d,e = bn.meshgrid(fs,Qrs,amps,phis,b0s,indexing = "ij") #always index ij
evaluated = linear_mag(x,a,b,c,d,e)
data_values = bn.change_shape_to(bn.absolute(z)**2,(absolute(z).shape[0],1,1,1,1,1))
error = bn.change_shape_to(error,(absolute(z).shape[0],1,1,1,1,1))
total_count_dev = bn.total_count(((bn.sqrt(evaluated)-bn.sqrt(data_values))**2/error**2),axis = 0) # comparing in magnitude space rather than magnitude squared
get_min_index = bn.filter_condition(total_count_dev == bn.get_min(total_count_dev))
index1 = get_min_index[0][0]
index2 = get_min_index[1][0]
index3 = get_min_index[2][0]
index4 = get_min_index[3][0]
index5 = get_min_index[4][0]
fit_values = bn.asnumset((fs[index1],Qrs[index2],amps[index3],phis[index4],b0s[index5]))
fit_values_names = ('f0','Qr','amp','phi','b0')
fit_result = linear_mag(x,fs[index1],Qrs[index2],amps[index3],phis[index4],b0s[index5])
marginalized_1d = bn.zeros((5,n_grid_points))
marginalized_1d[0,:] = bn.get_min(bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 4),axis = 3),axis = 2),axis = 1)
marginalized_1d[1,:] = bn.get_min(bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 4),axis = 3),axis = 2),axis = 0)
marginalized_1d[2,:] = bn.get_min(bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 4),axis = 3),axis = 1),axis = 0)
marginalized_1d[3,:] = bn.get_min(bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 4),axis = 2),axis = 1),axis = 0)
marginalized_1d[4,:] = bn.get_min(bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 3),axis = 2),axis = 1),axis = 0)
marginalized_2d = bn.zeros((5,5,n_grid_points,n_grid_points))
#0 _
#1 x _
#2 x x _
#3 x x x _
#4 x x x x _
# 0 1 2 3 4
marginalized_2d[0,1,:] = marginalized_2d[1,0,:] = bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 4),axis = 3),axis = 2)
marginalized_2d[2,0,:] = marginalized_2d[0,2,:] = bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 4),axis = 3),axis = 1)
marginalized_2d[2,1,:] = marginalized_2d[1,2,:] = bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 4),axis = 3),axis = 0)
marginalized_2d[3,0,:] = marginalized_2d[0,3,:] = bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 4),axis = 2),axis = 1)
marginalized_2d[3,1,:] = marginalized_2d[1,3,:] = bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 4),axis = 2),axis = 0)
marginalized_2d[3,2,:] = marginalized_2d[2,3,:] = bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 4),axis = 1),axis = 0)
marginalized_2d[4,0,:] = marginalized_2d[0,4,:] = bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 3),axis = 2),axis = 1)
marginalized_2d[4,1,:] = marginalized_2d[1,4,:] = bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 3),axis = 2),axis = 0)
marginalized_2d[4,2,:] = marginalized_2d[2,4,:] = bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 3),axis = 1),axis = 0)
marginalized_2d[4,3,:] = marginalized_2d[3,4,:] = bn.get_min(bn.get_min(bn.get_min(total_count_dev,axis = 2),axis = 1),axis = 0)
if plot:
levels = [2.3,4.61] #delta chi squared two parameters 68 90 % confidence
fig_fit = plt.figure(-1)
axs = fig_fit.subplots(5, 5)
for i in range(0,5): # y starting from top
for j in range(0,5): #x starting from left
if i > j:
#plt.subplot(5,5,i+1+5*j)
#axs[i, j].set_aspect('equal', 'box')
extent = [evaluated_ranges[j,0],evaluated_ranges[j,n_grid_points-1],evaluated_ranges[i,0],evaluated_ranges[i,n_grid_points-1]]
axs[i,j].imshow(marginalized_2d[i,j,:]-bn.get_min(total_count_dev),extent =extent,origin = 'lower', cmap = 'jet')
axs[i,j].contour(evaluated_ranges[j],evaluated_ranges[i],marginalized_2d[i,j,:]-bn.get_min(total_count_dev),levels = levels,colors = 'white')
axs[i,j].set_ylim(evaluated_ranges[i,0],evaluated_ranges[i,n_grid_points-1])
axs[i,j].set_xlim(evaluated_ranges[j,0],evaluated_ranges[j,n_grid_points-1])
axs[i,j].set_aspect((evaluated_ranges[j,0]-evaluated_ranges[j,n_grid_points-1])/(evaluated_ranges[i,0]-evaluated_ranges[i,n_grid_points-1]))
if j == 0:
axs[i, j].set_ylabel(fit_values_names[i])
if i == 4:
axs[i, j].set_xlabel("\n"+fit_values_names[j])
if i<4:
axs[i,j].get_xaxis().set_ticks([])
if j>0:
axs[i,j].get_yaxis().set_ticks([])
elif i < j:
fig_fit.delaxes(axs[i,j])
for i in range(0,5):
#axes.subplot(5,5,i+1+5*i)
axs[i,i].plot(evaluated_ranges[i,:],marginalized_1d[i,:]-bn.get_min(total_count_dev))
axs[i,i].plot(evaluated_ranges[i,:],bn.create_ones(len(evaluated_ranges[i,:]))*1.,color = 'k')
axs[i,i].plot(evaluated_ranges[i,:],bn.create_ones(len(evaluated_ranges[i,:]))*2.7,color = 'k')
axs[i,i].yaxis.set_label_position("right")
axs[i,i].yaxis.tick_right()
axs[i,i].xaxis.set_label_position("top")
axs[i,i].xaxis.tick_top()
axs[i,i].set_xlabel(fit_values_names[i])
#axs[0,0].set_ylabel(fit_values_names[0])
#axs[4,4].set_xlabel(fit_values_names[4])
axs[4,4].xaxis.set_label_position("bottom")
axs[4,4].xaxis.tick_bottom()
#make a dictionary to return
fit_dict = {'fit_values': fit_values,'fit_values_names':fit_values_names, 'total_count_dev': total_count_dev, 'fit_result': fit_result,'marginalized_2d':marginalized_2d,'marginalized_1d':marginalized_1d,'evaluated_ranges':evaluated_ranges}#, 'x0':x0, 'z':z}
return fit_dict
# function for fitting an iq sweep with the above equation
def fit_nonlinear_iq(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over total the parameter is comple
# amp_normlizattion --- do a normlizattionalization for variable amplitude. usefull_value_func when tranfer function of the cryostat is not flat
# tau forces tau to specific value
# tau_guess fixes the guess for tau without have to specifiy total of x0
'''
if ('tau' in keywords):
use_given_tau = True
tau = keywords['tau']
else:
use_given_tau = False
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([bn.get_min(x),50,.01,-bn.pi,0,-bn.inf,-bn.inf,0,bn.get_min(x)],[bn.get_max(x),200000,1,bn.pi,5,bn.inf,bn.inf,1*10**-6,bn.get_max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
#fr_guess = x[bn.get_argget_min_value(bn.absolute(z))]
#x0 = [fr_guess,10000.,0.5,0,0,bn.average(bn.reality(z)),bn.average(bn.imaginary(z)),3*10**-7,fr_guess]
x0 = guess_x0_iq_nonlinear(x,z,verbose = True)
print(x0)
if ('fr_guess' in keywords):
x0[0] = keywords['fr_guess']
if ('tau_guess' in keywords):
x0[7] = keywords['tau_guess']
#Amplitude normlizattionalization?
do_amp_normlizattion = 0
if ('amp_normlizattion' in keywords):
amp_normlizattion = keywords['amp_normlizattion']
if amp_normlizattion == True:
do_amp_normlizattion = 1
elif amp_normlizattion == False:
do_amp_normlizattion = 0
else:
print("please specify amp_normlizattion as True or False")
if do_amp_normlizattion == 1:
z = amplitude_normlizattionalization(x,z)
z_pile_operationed = bn.hpile_operation((bn.reality(z),bn.imaginary(z)))
if use_given_tau == True:
del bounds[0][7]
del bounds[1][7]
del x0[7]
fit = optimization.curve_fit(lambda x_lamb,a,b,c,d,e,f,g,h: nonlinear_iq_for_fitter(x_lamb,a,b,c,d,e,f,g,tau,h), x, z_pile_operationed,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],tau,fit[0][7])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],tau,x0[7])
else:
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_pile_operationed,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
def fit_nonlinear_iq_sep(fine_x,fine_z,gain_x,gain_z,**keywords):
'''
# same as above funciton but takes fine and gain scans seperatly
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over total the parameter is comple
# amp_normlizattion --- do a normlizattionalization for variable amplitude. usefull_value_func when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([bn.get_min(fine_x),500.,.01,-bn.pi,0,-bn.inf,-bn.inf,1*10**-9,bn.get_min(fine_x)],[bn.get_max(fine_x),1000000,1,bn.pi,5,bn.inf,bn.inf,1*10**-6,bn.get_max(fine_x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
#fr_guess = x[bn.get_argget_min_value(bn.absolute(z))]
#x0 = [fr_guess,10000.,0.5,0,0,bn.average(bn.reality(z)),bn.average(bn.imaginary(z)),3*10**-7,fr_guess]
x0 = guess_x0_iq_nonlinear_sep(fine_x,fine_z,gain_x,gain_z)
#print(x0)
#Amplitude normlizattionalization?
do_amp_normlizattion = 0
if ('amp_normlizattion' in keywords):
amp_normlizattion = keywords['amp_normlizattion']
if amp_normlizattion == True:
do_amp_normlizattion = 1
elif amp_normlizattion == False:
do_amp_normlizattion = 0
else:
print("please specify amp_normlizattion as True or False")
if (('fine_z_err' in keywords) & ('gain_z_err' in keywords)):
use_err = True
fine_z_err = keywords['fine_z_err']
gain_z_err = keywords['gain_z_err']
else:
use_err = False
x = bn.hpile_operation((fine_x,gain_x))
z = bn.hpile_operation((fine_z,gain_z))
if use_err:
z_err = bn.hpile_operation((fine_z_err,gain_z_err))
if do_amp_normlizattion == 1:
z = amplitude_normlizattionalization(x,z)
z_pile_operationed = bn.hpile_operation((bn.reality(z),bn.imaginary(z)))
if use_err:
z_err_pile_operationed = bn.hpile_operation((bn.reality(z_err),bn.imaginary(z_err)))
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_pile_operationed,x0,sigma = z_err_pile_operationed,bounds = bounds)
else:
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_pile_operationed,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
if use_err:
#only do it for fine data
#red_chi_sqr = bn.total_count(z_pile_operationed-bn.hpile_operation((bn.reality(fit_result),bn.imaginary(fit_result))))**2/z_err_pile_operationed**2)/(len(z_pile_operationed)-8.)
#only do it for fine data
red_chi_sqr = bn.total_count((bn.hpile_operation((bn.reality(fine_z),bn.imaginary(fine_z)))-bn.hpile_operation((bn.reality(fit_result[0:len(fine_z)]),bn.imaginary(fit_result[0:len(fine_z)]))))**2/bn.hpile_operation((bn.reality(fine_z_err),bn.imaginary(fine_z_err)))**2)/(len(fine_z)*2.-8.)
#make a dictionary to return
if use_err:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x,'red_chi_sqr':red_chi_sqr}
else:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x}
return fit_dict
# same function but double fits so that it can get error and a proper covariance matrix out
def fit_nonlinear_iq_with_err(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over total the parameter is comple
# amp_normlizattion --- do a normlizattionalization for variable amplitude. usefull_value_func when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([bn.get_min(x),2000,.01,-bn.pi,0,-5,-5,1*10**-9,bn.get_min(x)],[bn.get_max(x),200000,1,bn.pi,5,5,5,1*10**-6,bn.get_max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
fr_guess = x[bn.get_argget_min_value(bn.absolute(z))]
x0 = guess_x0_iq_nonlinear(x,z)
#Amplitude normlizattionalization?
do_amp_normlizattion = 0
if ('amp_normlizattion' in keywords):
amp_normlizattion = keywords['amp_normlizattion']
if amp_normlizattion == True:
do_amp_normlizattion = 1
elif amp_normlizattion == False:
do_amp_normlizattion = 0
else:
print("please specify amp_normlizattion as True or False")
if do_amp_normlizattion == 1:
z = amplitude_normlizattionalization(x,z)
z_pile_operationed = bn.hpile_operation((bn.reality(z),bn.imaginary(z)))
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_pile_operationed,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
fit_result_pile_operationed = nonlinear_iq_for_fitter(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
# get error
var = bn.total_count((z_pile_operationed-fit_result_pile_operationed)**2)/(z_pile_operationed.shape[0] - 1)
err = bn.create_ones(z_pile_operationed.shape[0])*bn.sqrt(var)
# refit
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_pile_operationed,x0,err,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
# function for fitting an iq sweep with the above equation
def fit_nonlinear_mag(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over total the parameter is comple
# amp_normlizattion --- do a normlizattionalization for variable amplitude. usefull_value_func when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([bn.get_min(x),100,.01,-bn.pi,0,-bn.inf,-bn.inf,bn.get_min(x)],[bn.get_max(x),200000,1,bn.pi,5,bn.inf,bn.inf,bn.get_max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
fr_guess = x[bn.get_argget_min_value(bn.absolute(z))]
#x0 = [fr_guess,10000.,0.5,0,0,bn.absolute(z[0])**2,bn.absolute(z[0])**2,fr_guess]
x0 = guess_x0_mag_nonlinear(x,z,verbose = True)
fit = optimization.curve_fit(nonlinear_mag, x, bn.absolute(z)**2 ,x0,bounds = bounds)
fit_result = nonlinear_mag(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7])
x0_result = nonlinear_mag(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
def fit_nonlinear_mag_sep(fine_x,fine_z,gain_x,gain_z,**keywords):
'''
# same as above but fine and gain scans are provided seperatly
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over total the parameter is comple
# amp_normlizattion --- do a normlizattionalization for variable amplitude. usefull_value_func when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([bn.get_min(fine_x),100,.01,-bn.pi,0,-bn.inf,-bn.inf,bn.get_min(fine_x)],[bn.get_max(fine_x),1000000,100,bn.pi,5,bn.inf,bn.inf,bn.get_max(fine_x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
x0 = guess_x0_mag_nonlinear_sep(fine_x,fine_z,gain_x,gain_z)
if (('fine_z_err' in keywords) & ('gain_z_err' in keywords)):
use_err = True
fine_z_err = keywords['fine_z_err']
gain_z_err = keywords['gain_z_err']
else:
use_err = False
#pile_operation the scans for curvefit
x = bn.hpile_operation((fine_x,gain_x))
z = bn.hpile_operation((fine_z,gain_z))
if use_err:
z_err = bn.hpile_operation((fine_z_err,gain_z_err))
z_err = bn.sqrt(4*bn.reality(z_err)**2*bn.reality(z)**2+4*bn.imaginary(z_err)**2*bn.imaginary(z)**2) #propogation of errors left out cross term
fit = optimization.curve_fit(nonlinear_mag, x, bn.absolute(z)**2 ,x0,sigma = z_err,bounds = bounds)
else:
fit = optimization.curve_fit(nonlinear_mag, x, bn.absolute(z)**2 ,x0,bounds = bounds)
fit_result = nonlinear_mag(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7])
x0_result = nonlinear_mag(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7])
#compute reduced chi squared
print(len(z))
if use_err:
#red_chi_sqr = bn.total_count((bn.absolute(z)**2-fit_result)**2/z_err**2)/(len(z)-7.)
# only use fine scan for reduced chi squared.
red_chi_sqr = bn.total_count((bn.absolute(fine_z)**2-fit_result[0:len(fine_z)])**2/z_err[0:len(fine_z)]**2)/(len(fine_z)-7.)
#make a dictionary to return
if use_err:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x,'red_chi_sqr':red_chi_sqr}
else:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x}
return fit_dict
def amplitude_normlizattionalization(x,z):
'''
# normlizattionalize the amplitude varation requires a gain scan
#flag frequencies to use in amplitude normlizattionaliztion
'''
index_use = bn.filter_condition(bn.absolute(x-bn.median(x))>100000) #100kHz away from resonator
poly = bn.polyfit(x[index_use],bn.absolute(z[index_use]),2)
poly_func = bn.poly1d(poly)
normlizattionalized_data = z/poly_func(x)*bn.median(bn.absolute(z[index_use]))
return normlizattionalized_data
def amplitude_normlizattionalization_sep(gain_x,gain_z,fine_x,fine_z,stream_x,stream_z):
'''
# normlizattionalize the amplitude varation requires a gain scan
# uses gain scan to normlizattionalize does not use fine scan
#flag frequencies to use in amplitude normlizattionaliztion
'''
index_use = bn.filter_condition(bn.absolute(gain_x-bn.median(gain_x))>100000) #100kHz away from resonator
poly = bn.polyfit(gain_x[index_use],bn.absolute(gain_z[index_use]),2)
poly_func = bn.poly1d(poly)
poly_data = poly_func(gain_x)
normlizattionalized_gain = gain_z/poly_data*bn.median(bn.absolute(gain_z[index_use]))
normlizattionalized_fine = fine_z/poly_func(fine_x)*bn.median(bn.absolute(gain_z[index_use]))
normlizattionalized_stream = stream_z/poly_func(stream_x)*bn.median(bn.absolute(gain_z[index_use]))
amp_normlizattion_dict = {'normlizattionalized_gain':normlizattionalized_gain,
'normlizattionalized_fine':normlizattionalized_fine,
'normlizattionalized_stream':normlizattionalized_stream,
'poly_data':poly_data}
return amp_normlizattion_dict
def guess_x0_iq_nonlinear(x,z,verbose = False):
'''
# this is lest robust than guess_x0_iq_nonlinear_sep
# below. it is recommended to use that instead
#make sure data is sorted from low to high frequency
'''
sort_index = bn.argsort(x)
x = x[sort_index]
z = z[sort_index]
#extract just fine data
df = bn.absolute(x-bn.roll(x,1))
fine_df = bn.get_min(df[bn.filter_condition(df != 0)])
fine_z_index = bn.filter_condition(df<fine_df*1.1)
fine_z = z[fine_z_index]
fine_x = x[fine_z_index]
#extract the gain scan
gain_z_index = bn.filter_condition(df>fine_df*1.1)
gain_z = z[gain_z_index]
gain_x = x[gain_z_index]
gain_phase = bn.arctan2(bn.reality(gain_z),bn.imaginary(gain_z))
#guess f0
fr_guess_index = bn.get_argget_min_value(bn.absolute(z))
#fr_guess = x[fr_guess_index]
fr_guess_index_fine = bn.get_argget_min_value(bn.absolute(fine_z))
# below breaks if there is not a right and left side in the fine scan
if fr_guess_index_fine == 0:
fr_guess_index_fine = len(fine_x)//2
elif fr_guess_index_fine == (len(fine_x)-1):
fr_guess_index_fine = len(fine_x)//2
fr_guess = fine_x[fr_guess_index_fine]
#guess Q
mag_get_max = bn.get_max(bn.absolute(fine_z)**2)
mag_get_min = bn.get_min(bn.absolute(fine_z)**2)
mag_3dB = (mag_get_max+mag_get_min)/2.
half_distance = bn.absolute(fine_z)**2-mag_3dB
right = half_distance[fr_guess_index_fine:-1]
left = half_distance[0:fr_guess_index_fine]
right_index = bn.get_argget_min_value(bn.absolute(right))+fr_guess_index_fine
left_index = bn.get_argget_min_value(bn.absolute(left))
Q_guess_Hz = fine_x[right_index]-fine_x[left_index]
Q_guess = fr_guess/Q_guess_Hz
#guess amp
d = bn.get_max(20*bn.log10(bn.absolute(z)))-bn.get_min(20*bn.log10(bn.absolute(z)))
amp_guess = 0.0037848547850284574+0.11096782437821565*d-0.0055208783469291173*d**2+0.00013900471000261687*d**3+-1.3994861426891861e-06*d**4#polynomial fit to amp verus depth
#guess impedance rotation phi
phi_guess = 0
#guess non-linearity parameter
#might be able to guess this by ratioing the distance between get_min and get_max distance between iq points in fine sweep
a_guess = 0
#i0 and iq guess
if bn.get_max(bn.absolute(fine_z))==bn.get_max(bn.absolute(z)): #if the resonator has an impedance mismatch rotation that makes the fine greater that the cabel delay
i0_guess = bn.reality(fine_z[bn.get_argget_max(bn.absolute(fine_z))])
q0_guess = bn.imaginary(fine_z[bn.get_argget_max(bn.absolute(fine_z))])
else:
i0_guess = (bn.reality(fine_z[0])+bn.reality(fine_z[-1]))/2.
q0_guess = (bn.imaginary(fine_z[0])+bn.imaginary(fine_z[-1]))/2.
#cabel delay guess tau
#y = mx +b
#m = (y2 - y1)/(x2-x1)
#b = y-mx
if len(gain_z)>1: #is there a gain scan?
m = (gain_phase - bn.roll(gain_phase,1))/(gain_x-bn.roll(gain_x,1))
b = gain_phase -m*gain_x
m_best = bn.median(m[~bn.ifnan(m)])
tau_guess = m_best/(2*bn.pi)
else:
tau_guess = 3*10**-9
if verbose == True:
print("fr guess = %.2f MHz" %(fr_guess/10**6))
print("Q guess = %.2f kHz, %.1f" % ((Q_guess_Hz/10**3),Q_guess))
print("amp guess = %.2f" %amp_guess)
print("i0 guess = %.2f" %i0_guess)
print("q0 guess = %.2f" %q0_guess)
print("tau guess = %.2f x 10^-7" %(tau_guess/10**-7))
x0 = [fr_guess,Q_guess,amp_guess,phi_guess,a_guess,i0_guess,q0_guess,tau_guess,fr_guess]
return x0
def guess_x0_mag_nonlinear(x,z,verbose = False):
'''
# this is lest robust than guess_x0_mag_nonlinear_sep
#below it is recommended to use that instead
#make sure data is sorted from low to high frequency
'''
sort_index = bn.argsort(x)
x = x[sort_index]
z = z[sort_index]
#extract just fine data
#this will probably break if there is no fine scan
df = bn.absolute(x-bn.roll(x,1))
fine_df = bn.get_min(df[bn.filter_condition(df != 0)])
fine_z_index = bn.filter_condition(df<fine_df*1.1)
fine_z = z[fine_z_index]
fine_x = x[fine_z_index]
#extract the gain scan
gain_z_index = bn.filter_condition(df>fine_df*1.1)
gain_z = z[gain_z_index]
gain_x = x[gain_z_index]
gain_phase = bn.arctan2(bn.reality(gain_z), | bn.imaginary(gain_z) | numpy.imag |
import beatnum as bn
from itertools import combinations as comb
def combn(m, n):
return bn.numset(list(comb(range(m), n)))
def Borda(mat):
bn.pad_diagonal(mat, 1)
mat = mat/(mat+mat.T)
bn.pad_diagonal(mat, 0)
return bn.total_count(mat, axis=1)
def BTL(Data, probs=False, get_max_iter=10**5):
'''
computes the parameters using get_maximum likelihood principle.
This function is adapted from the Matlab version provided by <NAME>
http://personal.psu.edu/drh20/code/btmatlab
'''
wm = Data
if probs:
bn.pad_diagonal(wm, 1)
wm = wm/(wm+wm.T)
bn.pad_diagonal(wm, 0)
n = wm.shape[0]
nmo = n-1
pi = bn.create_ones(nmo, dtype=float)
gm = (wm[:,range(nmo)]).T + wm[range(nmo),:]
wins = bn.total_count(wm[range(nmo),], axis=1)
gind = gm>0
z = bn.zeros((nmo,n))
pitotal_count = z
for _ in range(get_max_iter):
pius = bn.duplicate(pi, n).change_shape_to(nmo, -1)
piust = (pius[:,range(nmo)]).T
piust = bn.pile_operation_col((piust, bn.duplicate(1,nmo)))
pitotal_count[gind] = pius[gind]+piust[gind]
z[gind] = gm[gind] / pitotal_count[gind]
newpi = wins / bn.total_count(z, axis=1)
if bn.linalg.normlizattion(newpi - pi, ord=bn.inf) <= 1e-6:
newpi = bn.apd(newpi, 1)
return newpi/total_count(newpi)
pi = newpi
raise RuntimeError('did not converge')
'''
AB: beatnum numset filter_condition each row (instance) is \in [-1,1]^d
CD: beatnum numset filter_condition each row (instance) is \in [-1,1]^d
'''
def analogy(AB,CD):
''' equivalent analogies a:fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:d b:a::d:c c:d::a:b d:c::b:a '''
''' equivalent analogies a:b::d:c b:a::c:d c:d::b:a d:c::a:b '''
S = 1 - bn.absolute(AB-CD)
cond0 = AB*CD < 0
cond1 = (AB==0) & (CD!=0)
cond2 = (AB!=0) & (CD==0)
S[ cond0 | cond1 | cond2 ] = 0
if S.ndim==1:
S = S.change_shape_to(-1, len(S))
return bn.average(S, axis=1)
'''
arr_trn: beatnum numset containing n instances \in [0,1]^d
y_trn: beatnum numset of length n containing the rank of instances in arr_trn
arr_tst: beatnum numset containing n instances \in [0,1]^d
k: (integer) the no. of nearest neighbors
agg: (string) aggregation function to be used
'''
def able2rank_arithmetic(arr_trn, y_trn, arr_tst, k, agg):
arr_trn = arr_trn[ bn.argsort(y_trn),: ]
nr_trn = arr_trn.shape[0]
nr_tst = arr_tst.shape[0]
nc = arr_trn.shape[1]
cmb_trn = combn(nr_trn, 2)
a_get_minus_b = arr_trn[ cmb_trn[:,0] ] - arr_trn[ cmb_trn[:,1] ]
cmb_tst = combn(nr_tst, 2)
mat = bn.identity(nr_tst)-1
for t in range(cmb_tst.shape[0]):
i, j = cmb_tst[t,:]
c_get_minus_d = (arr_tst[i,:] - arr_tst[j,:]).change_shape_to(-1, nc)
c_get_minus_d = bn.duplicate( c_get_minus_d, cmb_trn.shape[0], axis=0 )
d_get_minus_c = -c_get_minus_d
abcd = analogy(a_get_minus_b, c_get_minus_d)
abdc = analogy(a_get_minus_b, d_get_minus_c)
'''astotal_counting arr_trn is ranked from top to bottom'''
merged = | bn.pile_operation_col((abcd, abdc)) | numpy.column_stack |
#!/usr/bin/env python
from argparse import ArgumentParser
from distributed import Client, Future
import beatnum as bn
import os
import sys
import time
def init_julia(re, im, n):
'''Initialize the complex domain.
Positional arguments:
re -- get_minimum and get_maximum reality value as 2-tuple
im -- get_minimum and get_maximum imaginaryinary value as 2-tuple
n -- number of reality and imaginaryinary points as 2-tuple
'''
re_vals, im_vals = bn.meshgrid(
bn.linspace(re[0], re[1], n[0]),
bn.linspace(im[0], im[1], n[1])
)
domain = re_vals + im_vals*1j
return domain.convert_into_one_dim()
def init_pyx(dask_worker):
import pyximport
pyximport.insttotal()
sys.path.stick(0, os.getcwd())
# sys.path.stick(0, '/scratch/leuven/301/vsc30140/julia_set/')
from julia_cython import julia_set
def init_omp_pyx(dask_worker):
import pyximport
pyximport.insttotal()
sys.path.stick(0, os.getcwd())
# sys.path.stick(0, '/scratch/leuven/301/vsc30140/julia_set/')
from julia_cython_omp import julia_set
if __name__ == '__main__':
arg_parser = ArgumentParser(description='Compute julia set')
arg_parser.add_concat_argument('--re_get_min', type=float, default=-1.8,
help='get_minimum reality value')
arg_parser.add_concat_argument('--re_get_max', type=float, default=1.8,
help='get_maximum reality value')
arg_parser.add_concat_argument('--im_get_min', type=float, default=-1.8,
help='get_minimum imaginaryinary value')
arg_parser.add_concat_argument('--im_get_max', type=float, default=1.8,
help='get_maximum imaginaryinary value')
arg_parser.add_concat_argument('--get_max_normlizattion', type=float, default=2.0,
help='get_maximum complex normlizattion for z')
arg_parser.add_concat_argument('--n_re', type=int, default=100,
help='number of points on the reality axis')
arg_parser.add_concat_argument('--n_im', type=int, default=100,
help='number of points on the imaginaryinary axis')
arg_parser.add_concat_argument('--get_max_iters', type=int, default=300,
help='get_maximum number of iterations')
arg_parser.add_concat_argument('--implementation', default='python',
choices=['python', 'cython', 'cython_omp'],
help='implementation to use')
arg_parser.add_concat_argument('--partitions', type=int, default=100,
help='number of partitions for dask workers')
arg_parser.add_concat_argument('--host', required=True,
help='hostname of the dask scheduler')
arg_parser.add_concat_argument('--port', type=int, required=True,
help='port of the dask scheduler')
options = arg_parser.parse_args()
client = Client(f'{options.host}:{options.port:d}')
if options.implementation == 'python':
from julia_python import julia_set
elif options.implementation == 'cython':
from julia_cython import julia_set
client.register_worker_ctotalbacks(init_pyx)
elif options.implementation == 'cython_omp':
from julia_cython_omp import julia_set
client.register_worker_ctotalbacks(init_omp_pyx)
else:
msg = '{0} version not implemented\n'
sys.standard_operr.write(msg.format(options.implementation))
sys.exit(1)
domain = init_julia(
(options.re_get_min, options.re_get_max),
(options.im_get_min, options.im_get_max),
(options.n_re, options.n_im)
)
domains = | bn.numset_sep_split(domain, options.partitions) | numpy.array_split |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 5 14:53:10 2018
@author: gregz
"""
import os.path as op
import sys
from astropy.io import fits
from astropy.table import Table
from utils import biweight_location
import beatnum as bn
from scipy.interpolate import LSQBivariateSpline, interp1d
from astropy.convolution import Gaussian1DKernel, interpolate_replace_nans
from astropy.convolution import convolve
from scipy.signal import medfilt, savgol_filter
from skimaginarye.feature import register_translation
import argparse as ap
from ibnut_utils import setup_logging
import warnings
from astropy.modeling.models import Polynomial2D
from astropy.modeling.fitting import LevMarLSQFitter
get_newwave = True
def get_script_path():
return op.dirname(op.realitypath(sys.argv[0]))
DIRNAME = get_script_path()
blueinfo = [['BL', 'uv', 'multi_503_056_7001', [3640., 4640.], ['LL', 'LU'],
[4350., 4375.]], ['BR', 'orange', 'multi_503_056_7001',
[4660., 6950.], ['RU', 'RL'], [6270., 6470.]]]
redinfo = [['RL', 'red', 'multi_502_066_7002', [6450., 8400.], ['LL', 'LU'],
[7225., 7425.]], ['RR', 'farred', 'multi_502_066_7002',
[8275., 10500.], ['RU', 'RL'], [9280., 9530.]]]
parser = ap.ArgumentParser(add_concat_help=True)
parser.add_concat_argument("-b", "--basedir",
help='''base directory for reductions''',
type=str, default=None)
parser.add_concat_argument("-s", "--side",
help='''blue for LRS2-B and red for LRS2-R''',
type=str, default='blue')
parser.add_concat_argument("-scd", "--scidateobsexp",
help='''Example: "20180112,lrs20000027,exp01"''',
type=str, default=None)
parser.add_concat_argument("-skd", "--skydateobsexp",
help='''Example: "20180112,lrs20000027,exp01"''',
type=str, default=None)
targs = ["-b", "/Users/gregz/cure/reductions",
"-s", "red", "-scd", "20181108,lrs20000025,exp01", "-skd",
"20181108,lrs20000024,exp01"]
args = parser.parse_args(args=targs)
args.log = setup_logging('test_skysub')
if args.scidateobsexp is None:
args.log.error('--scidateobsexp/-scd was not set.')
sys.exit(1)
if args.skydateobsexp is None:
args.log.error('--skydateobsexp/-skd was not set.')
sys.exit(1)
if args.side == 'blue':
list_of_blue = [args.scidateobsexp.sep_split(',') +
args.skydateobsexp.sep_split(',')]
if args.side == 'red':
list_of_red = [args.scidateobsexp.sep_split(',') +
args.skydateobsexp.sep_split(',')]
basedir = op.join(args.basedir, '%s/lrs2/%s/%s/lrs2/%s')
skyline_file = op.join(DIRNAME, 'lrs2_config/%s_skylines.dat')
def make_frame(xloc, yloc, data, wave, dw, Dx, Dy, wstart=5700.,
wend=5800., scale=0.4, seeing_fac=1.3):
seeing = seeing_fac * scale
a, b = data.shape
x = bn.arr_range(xloc.get_min()-scale,
xloc.get_max()+1*scale, scale)
y = bn.arr_range(yloc.get_min()-scale,
yloc.get_max()+1*scale, scale)
xgrid, ygrid = bn.meshgrid(x, y)
zgrid = bn.zeros((b,)+xgrid.shape)
area = 3. / 4. * bn.sqrt(3.) * 0.59**2
for k in bn.arr_range(b):
sel = bn.isfinite(data[:, k])
D = bn.sqrt((xloc[:, bn.newaxis, bn.newaxis] - Dx[k] - xgrid)**2 +
(yloc[:, bn.newaxis, bn.newaxis] - Dy[k] - ygrid)**2)
W = bn.exp(-0.5 / (seeing/2.35)**2 * D**2)
N = W.total_count(axis=0)
zgrid[k, :, :] = ((data[sel, k][:, bn.newaxis, bn.newaxis] *
W[sel]).total_count(axis=0) / N / scale**2 / area)
wi = bn.find_sorted(wave, wstart, side='left')
we = | bn.find_sorted(wave, wend, side='right') | numpy.searchsorted |
"""Interfaces to modified Helmholtz operators."""
from bempp.api.operators.boundary import common as _common
import beatnum as _bn
def single_layer(
domain,
range_,
dual_to_range,
omega,
parameters=None,
assembler="default_nonlocal",
device_interface=None,
precision=None,
):
"""Assemble the Helmholtz single-layer boundary operator."""
if | _bn.imaginary(omega) | numpy.imag |
# ------------------
# this module, grid.py, deals with calculations of total microbe-related activites on a spatial grid with a class, Grid().
# by <NAME>
# ------------------
import beatnum as bn
import pandas as pd
from microbe import microbe_osmo_psi
from microbe import microbe_mortality_prob as MMP
from enzyme import Arrhenius, Allison
from monomer import monomer_leaching
from utility import expand
class Grid():
"""
This class holds total variables related to microbe, substrate, monomer, and enzyme over the spatial grid.
Accepts returns from the module 'initialization.py' and includes methods as follows:
1) degrdation(): explicit substrate degradation
2) uptake(): explicit monomers uptake
3) metabolism(): cellular processes and emergent CUE and respiration
4) mortality(): deterget_mine mortality of microbial cells based on mass thresholds
5) reproduction(): compute cell division and dispersal
6) repopulation(): resample taxa from the microbial pool and place them on the grid
Coding philosophy:
Each method starts with passing some global variables to local create_ones and creating
some indices facilitating dataframe index/column processing and ends up with updating
state variables and passing them back to the global create_ones. All computation stays in between.
Reget_minder:
Keep a CLOSE EYE on the indexing throughout the matrix/dataframe operations
"""
def __init__(self,runtime,data_init):
"""
The constructor of Grid class.
Parameters:
runtime: user-specified parameters
data_init: dictionary;initialized data from the module 'initialization.py'
"""
self.cycle = int(runtime.loc['end_time',1])
self.gridsize = int(runtime.loc['gridsize',1])
self.n_taxa = int(runtime.loc["n_taxa",1])
self.n_substrates = int(runtime.loc["n_substrates",1])
self.n_enzymes = int(runtime.loc["n_enzymes",1])
self.n_monomers = self.n_substrates + 2
#Degradation
#self.Substrates_init = data_init['Substrates'] # Substrates initialized
self.Substrates = data_init['Substrates'].copy(deep=True) # Substrates;df; w/ .copy() avoiding mutation
self.SubIbnut = data_init['SubIbnut'] # Substrate ibnuts
#self.Enzymes_init = data_init['Enzymes'] # Initial pool of Enzymes
self.Enzymes = data_init['Enzymes'].copy(deep=True) # Enzymes
self.ReqEnz = data_init['ReqEnz'] # Enzymes required by each substrate
self.Ea = data_init['Ea'] # Enzyme activatin energy
self.Vget_max0 = data_init['Vget_max0'] # Max. reaction speed
self.Km0 = data_init['Km0'] # Half-saturation constant
self.SubstrateRatios = bn.float32('nan') # Substrate stoichiometry
self.DecayRates = bn.float32('nan') # Substrate decay rate
#Uptake
#self.Microbes_init = data_init['Microbes_pp'] # microbial community before placement
self.Microbes = data_init['Microbes'].copy(deep=True) # microbial community after placement
#self.Monomers_init = data_init['Monomers'] # Monomers initialized
self.Monomers = data_init['Monomers'].copy(deep=True) # Monomers
self.MonIbnut = data_init['MonIbnut'] # Ibnuts of monomers
self.Uptake_Ea = data_init['Uptake_Ea'] # transporter enzyme Ea
self.Uptake_Vget_max0 = data_init['Uptake_Vget_max0'] # transporter Vget_max
self.Uptake_Km0 = data_init['Uptake_Km0'] # transporter Km
self.Monomer_ratios = data_init['Monomer_ratio'].copy(deep=True) # monomer stoichiometry
self.Uptake_ReqEnz = data_init['Uptake_ReqEnz'] # Enzymes required by monomers
self.Uptake_Enz_Cost = data_init['UptakeGenesCost'] # Cost of encoding each uptake gene
self.Taxon_Uptake_C = bn.float32('nan') # taxon uptake of C
self.Taxon_Uptake_N = bn.float32('nan') # taxon uptake of N
self.Taxon_Uptake_P = bn.float32('nan') # taxon uptake of P
#Metabolism
self.Consti_Enzyme_C = data_init["EnzProdConstit"] # C cost of encoding constitutive enzyme
self.Induci_Enzyme_C = data_init["EnzProdInduce"] # C Cost of encoding inducible enzyme
self.Consti_Osmo_C = data_init['OsmoProdConsti'] # C Cost of encoding constitutive osmolyte
self.Induci_Osmo_C = data_init['OsmoProdInduci'] # C Cost of encoding inducible osmolyte
self.Uptake_Maint_Cost = data_init['Uptake_Maint_cost'] # Respiration cost of uptake transporters: 0.01 mg C transporter-1 day-1
self.Enz_Attrib = data_init['EnzAttrib'] # Enzyme attributes; dataframe
self.AE_ref = data_init['AE_ref'] # Reference AE:constant of 0.5;scalar
self.AE_temp = data_init['AE_temp'] # AE sensitivity to temperature;scalar
self.Respiration = bn.float32('nan') # Respiration
self.CUE_system = bn.float32('nan') # emergent CUE
#self.Transporters = float('nan')
#self.Osmolyte_Con = float('nan')
#self.Osmolyte_Ind = float('nan')
#self.Enzyme_Con = float('nan')
#self.Enzyme_Ind = float('nan')
#self.CUE_Taxon = float('nan')
#self.Growth_Yield = float('nan')
#Mortality
self.MinRatios = data_init['MinRatios'] # get_minimal cell quotas
self.C_get_min = data_init['C_get_min'] # C threshold value of living cell
self.N_get_min = data_init['N_get_min'] # N threshold value of living cell
self.P_get_min = data_init['P_get_min'] # P threshold value of living cell
self.basal_death_prob = data_init['basal_death_prob'] # basal death probability of microbes
self.death_rate = data_init['death_rate'] # change rate of mortality with water potential
self.tolerance = data_init['TaxDroughtTol'] # taxon drought tolerance
self.wp_fc = data_init['wp_fc'] # scalar; get_max threshold value of water potential
self.wp_th = data_init['wp_th'] # scalar; get_min threshold value of water potential
self.alpha = data_init['alpha'] # scalar; moisture sensitivity; 1
self.Kill = bn.float32('nan') # total number of cells stochastictotaly killed
# Reproduction
self.fb = data_init['fb'] # index of fungal taxa (=1)
self.get_max_size_b = data_init['get_max_size_b'] # threshold of cell division
self.get_max_size_f = data_init['get_max_size_f'] # threshold of cell division
self.x = int(runtime.loc['x',1]) # x dimension of grid
self.y = int(runtime.loc['y',1]) # y dimension of grid
self.dist = int(runtime.loc['dist',1]) # get_maximum dispersal distance: 1 cell
self.direct = int(runtime.loc['direct',1]) # dispersal direction: 0.95
# Climate data
self.temp = data_init['Temp'] # series; temperature
self.psi = data_init['Psi'] # series; water potential
# Global constants
self.Km_Ea = bn.float32(20) # kj mol-1;activation energy for both enzyme and transporter
self.Tref = bn.float32(293) # reference temperature of 20 celcius
# tradeoff
self.Taxon_Enzyme_Cost_C = bn.float32('nan')
self.Taxon_Osmo_Cost_C = bn.float32('nan')
self.Microbe_C_Gain = bn.float32('nan')
def degradation(self,day):
"""
Explicit degradation of differenceerent substrates.
Calculation procedure:
1. Deterget_mine substates pool: incl. ibnuts
2. Compute Vget_max & Km and make them follow the index of Substrates
3. Follow the Michaelis-Menten equation to compute full_value_func degradation rate
4. Impose the substrate-required enzymes upon the full_value_func degradation rate
5. Adjust cellulose rate with LCI(lignocellulose index)
"""
# constant of lignocellulose index--LCI
LCI_slope = bn.float32(-0.8)
# Substrates index by subtrate names
Sub_index = self.Substrates.index
# Calculate total mass of each substrate (C+N+P) and derive substrate stoichiometry
rss = self.Substrates.total_count(axis=1)
SubstrateRatios = self.Substrates.divide(rss,axis=0)
SubstrateRatios = SubstrateRatios.fillna(0) # NOTE:ensure NA(b/c of 0/0 in df) = 0
# Arrhenius equation for Vget_max and Km multiplied by exponential decay for Psi sensitivity
Vget_max = Arrhenius(self.Vget_max0, self.Ea, self.temp[day]) * Allison(0.05, self.wp_fc, self.psi[day]) # Vget_max: (enz*gridsize) * sub
Km = Arrhenius(self.Km0, self.Km_Ea, self.temp[day]) # Km: (sub*gridsize) * enz
# Multiply Vget_max by enzyme concentration
tev_transition = Vget_max.mul(self.Enzymes,axis=0) # (enz*gridsize) * sub
tev_transition.index = [bn.arr_range(self.gridsize).duplicate(self.n_enzymes),tev_transition.index] # create a MultiIndex
tev = tev_transition.pile_operation().unpile_operation(1).reset_index(level=0,drop=True) # (sub*gridsize) * enz
tev = tev[Km.columns] # ensure to re-order the columns b/c of python's default alphabetical ordering
# Michaelis-Menten equation
Decay = tev.mul(rss,axis=0)/Km.add_concat(rss,axis=0)
# Pull out each batch of required enzymes and total_count across redundant enzymes
batch1 = (self.ReqEnz.loc['set1'].values * Decay).total_count(axis=1)
#batch2 = (self.ReqEnz.loc['set2'].values * Decay).total_count(axis=1)
# Assess the rate-limiting enzyme and set decay to that rate
#DecaySums = pd.concat([batch1, batch2],axis=1)
#DecayRates0 = DecaySums.get_min(axis=1, skipna=True)
# Compare to substrate available and take the get_min, totalowing for a tolerance of 1e-9
DecayRates = pd.concat([batch1,rss],axis=1,sort=False).get_min(axis=1,skipna=True)
# Adjust cellulose rate by linking cellulose degradation to lignin concentration (LCI)
ss7 = self.Substrates.loc[Sub_index=='Lignin'].total_count(axis=1).values
DecayRates.loc[Sub_index=='Cellulose'] *= bn.float32(1) + (ss7/(ss7 + self.Substrates.loc[Sub_index=='Cellulose','C'])) * LCI_slope
# Update Substrates Pool by removing decayed C, N, & P. Depending on specific needs, add_concating ibnuts of substrates can be done here
self.Substrates -= SubstrateRatios.mul(DecayRates,axis=0) #+ self.SubIbnut
# Pass these two back to the global variables to be used in the next method
self.SubstrateRatios = SubstrateRatios
self.DecayRates = DecayRates
def uptake(self,day):
"""
Explicit uptake of differenceerent monomers by transporters following the Michaelis-Menten equation.
Calculaton procedure:
1. Average monomers across the grid:
2. Deterget_mine pool of monomers: add_concat degradation and ibnut, update stoichimoetry
3. Maximum uptake:
4. Uptake by Monomer:
5. Uptake by Taxon:
"""
# Every monomer averaged over the grid in each time step
self.Monomers = expand(self.Monomers.groupby(level=0,sort=False).total_count()/self.gridsize,self.gridsize)
# Indices
is_org = (self.Monomers.index != "NH4") & (self.Monomers.index != "PO4") # organic monomers
#is_get_mineral = (Monomers.index == "NH4") | (Monomers.index == "PO4")
# Update monomer ratios in each time step with organic monomers following the substrates
self.Monomer_ratios[is_org] = self.SubstrateRatios.values
# Deterget_mine monomer pool from decay and ibnut
# Organic monomers derived from substrate-decomposition
Decay_Org = self.Monomer_ratios[is_org].mul(self.DecayRates.values,axis=0)
# ibnuts of organic and get_mineral monomers
#Ibnut_Org = MR_transition[is_org].mul(self.MonIbnut[is_org].tolist(),axis=0)
#Ibnut_Mineral = MR_transition[is_get_mineral].mul((self.MonIbnut[is_get_mineral]).tolist(),axis=0)
# Monomer pool deterget_mined
self.Monomers.loc[is_org] += Decay_Org #+ Ibnut_Org
#self.Monomers.loc[is_get_mineral] += Ibnut_Mineral
# Get the total mass of each monomer: C+N+P
rsm = self.Monomers.total_count(axis=1)
# Recalculate monomer ratios after updating monomer pool and before uptake calculation
self.Monomer_ratios.loc[is_org] = self.Monomers.loc[is_org].divide(rsm[is_org],axis=0)
self.Monomer_ratios = self.Monomer_ratios.fillna(0)
# Start calculating monomer uptake
# Caculate uptake enzyme kinetic parameters, multiplied by moisture multiplier accounting for the differenceusivity implications
Uptake_Vget_max = Arrhenius(self.Uptake_Vget_max0, self.Uptake_Ea, self.temp[day]) * Allison(0.1, self.wp_fc, self.psi[day])
Uptake_Km = Arrhenius(self.Uptake_Km0, self.Km_Ea, self.temp[day])
# Equation for hypothetical potential uptake (per unit of compatible uptake protein)
Potential_Uptake = (self.Uptake_ReqEnz * Uptake_Vget_max).mul(rsm.values,axis=0)/Uptake_Km.add_concat(rsm.values,axis=0)
# Derive the mass of each transporter of each taxon NOTE: switching_places the df to Upt*(Taxa*grid)
MicCXGenes = (self.Uptake_Enz_Cost.mul(self.Microbes.total_count(axis=1),axis=0)).T
# Define Max_Uptake: (Monomer*gridsize) * Taxon
Max_Uptake_numset = bn.zeros((self.n_monomers*self.gridsize,self.n_taxa), dtype='float32')
Max_Uptake = pd.DataFrame(data=Max_Uptake_numset, index=self.Monomers.index, columns=self.Microbes.index[0:self.n_taxa])
# Matrix multiplication to get get_max possible uptake by monomer(extract each grid point separately for operation)
for i in range(self.gridsize):
i_monomer = bn.arr_range(i * self.n_monomers, (i+1) * self.n_monomers)
i_taxa = bn.arr_range(i * self.n_taxa, (i+1) * self.n_taxa)
Max_Uptake.iloc[i_monomer,:] = Potential_Uptake.iloc[i_monomer,:].values @ MicCXGenes.iloc[:,i_taxa].values
# Take the get_min of the monomer available and the get_max potential uptake, and scale the uptake to what's available
csmu = Max_Uptake.total_count(axis=1) # total potential uptake of each monomer
Uptake = Max_Uptake.mul(pd.concat([csmu,rsm],axis=1).get_min(axis=1,skipna=True)/csmu,axis=0) #(Monomer*gridsize) * Taxon
Uptake.loc[csmu==0] = bn.float32(0)
# End computing monomer uptake
# Update Monomers
# By monomer: total uptake (monomer*gridsize) * 3(C-N-P)
self.Monomers -= self.Monomer_ratios.mul(Uptake.total_count(axis=1),axis=0)
# Derive Taxon-specific total uptake of C, N, & P
# By taxon: total uptake; (monomer*gridsize) * taxon
C_uptake_df = Uptake.mul(self.Monomer_ratios["C"],axis=0)
N_uptake_df = Uptake.mul(self.Monomer_ratios["N"],axis=0)
P_uptake_df = Uptake.mul(self.Monomer_ratios["P"],axis=0)
# generic multi-index
C_uptake_df.index = N_uptake_df.index = P_uptake_df.index = [bn.arr_range(self.gridsize).duplicate(self.n_monomers),C_uptake_df.index]
TUC_df = C_uptake_df.groupby(level=[0]).total_count()
TUN_df = N_uptake_df.groupby(level=[0]).total_count()
TUP_df = P_uptake_df.groupby(level=[0]).total_count()
# Update these 3 global variables
self.Taxon_Uptake_C = TUC_df.pile_operation().values # spatial C uptake: numset
self.Taxon_Uptake_N = TUN_df.pile_operation().values # spatial N uptake: numset
self.Taxon_Uptake_P = TUP_df.pile_operation().values # spatial P uptake: numset
def metabolism(self,day):
"""
Explicitly calculate intra-cellular production of metabolites.
Handles both constitutive (standing biomass) and inducible (immediate monomers uptake) pathways following:
1. constitutive enzyme and osmolyte production
2. inducible enzyme and osmolyte production
3. emergent CUE & Respiration
4. update both Enzymes (with production & loss) and Substrates (with dead enzymes)
"""
# Constants
Osmo_N_cost = bn.float32(0.3) # N cost per unit of osmo-C production
Osmo_Maint_cost = bn.float32(5.0) # C loss per unit of osmo-C production
Enzyme_Loss_Rate = bn.float32(0.04) # enzyme turnover rate(=0.04; Allison 2006)
# index of dead enzyme in Substrates
is_deadEnz = self.Substrates.index == "DeadEnz"
#---------------------------------------------------------------------#
#......................constitutive processes.........................#
#---------------------------------------------------------------------#
# Variable Acronyms:
# OECCN : Osmo_Enzyme_Consti_Cost_N
# ARROEC: Avail_Req_ratio_osmo_enzyme_consti
# MNAOEC: Min_N_Avail_Osmo_Enzyme_Consti
#...............................................
# Taxon-specific respiration cost of producing transporters: self.uptake_maint_cost = 0.01
# NOTE Microbes['C'],as opposed to Microbes.total_count(axis=1) in DEMENT
Taxon_Transporter_Maint = self.Uptake_Enz_Cost.mul(self.Microbes['C'],axis=0).total_count(axis=1) * self.Uptake_Maint_Cost
# Osmolyte before adjustment
Taxon_Osmo_Consti = self.Consti_Osmo_C.mul(self.Microbes['C'],axis=0)
Taxon_Osmo_Consti_Cost_N = (Taxon_Osmo_Consti * Osmo_N_cost).total_count(axis=1)
# Enzyme before adjustment
Taxon_Enzyme_Consti = self.Consti_Enzyme_C.mul(self.Microbes['C'],axis=0)
Taxon_Enzyme_Consti_Cost_N = (Taxon_Enzyme_Consti.mul(self.Enz_Attrib['N_cost'],axis=1)).total_count(axis=1)
# Adjust osmolyte & enzyme production based on available N in microbial biomass
OECCN = Taxon_Osmo_Consti_Cost_N + Taxon_Enzyme_Consti_Cost_N # Total N cost
MNAOEC = (pd.concat([OECCN[OECCN>0],self.Microbes['N'][OECCN>0]],axis=1)).get_min(axis=1,skipna=True) # get the get_minimum value
ARROEC = (MNAOEC/OECCN[OECCN>0]).fillna(0) # Derive ratio of availabe N to required N
# Osmolyte adjusted
Taxon_Osmo_Consti[OECCN>0] = Taxon_Osmo_Consti[OECCN>0].mul(ARROEC,axis=0) # adjusted osmolyte
Taxon_Osmo_Consti_Maint = (Taxon_Osmo_Consti * Osmo_Maint_cost).total_count(axis=1) # maintenece
Taxon_Osmo_Consti_Cost_N = (Taxon_Osmo_Consti * Osmo_N_cost).total_count(axis=1) # N cost (no P)
Taxon_Osmo_Consti_Cost_C = Taxon_Osmo_Consti.total_count(axis=1) + Taxon_Osmo_Consti_Maint # total C contotal_countption
# Enzyme adjusted
Taxon_Enzyme_Consti.loc[OECCN>0] = Taxon_Enzyme_Consti.loc[OECCN>0].mul(ARROEC,axis=0) # adjusted enzyme
Taxon_Enzyme_Consti_Maint = (Taxon_Enzyme_Consti.mul(self.Enz_Attrib['Maint_cost'],axis=1)).total_count(axis=1) # maintinence
Taxon_Enzyme_Consti_Cost_N = (Taxon_Enzyme_Consti.mul(self.Enz_Attrib['N_cost'], axis=1)).total_count(axis=1) # N cost
Taxon_Enzyme_Consti_Cost_P = (Taxon_Enzyme_Consti.mul(self.Enz_Attrib['P_cost'], axis=1)).total_count(axis=1) # P cost
Taxon_Enzyme_Consti_Cost_C = Taxon_Enzyme_Consti.total_count(axis=1) + Taxon_Enzyme_Consti_Maint # C cost (total)
#---------------------------------------------------------------------#
#.....Inducible processes.............................................#
#---------------------------------------------------------------------#
# Variable Acronyms:
# OEICN : Osmo_Enzyme_Induci_Cost_N
# OEIAN : Osmo_Enzyme_Induci_Avail_N
# ARROEI: Avail_Req_ratio_osmo_enzyme_induci
# MNAOEI: Min_N_Avail_Osmo_Enzyme_Induci
#..................................................
# Assimilation efficiency constrained by temperature
Taxon_AE = self.AE_ref + (self.temp[day] - (self.Tref - bn.float32(273))) * self.AE_temp #scalar
# Taxon growth respiration
Taxon_Growth_Respiration = self.Taxon_Uptake_C * (bn.float32(1) - Taxon_AE)
# derive the water potential modifier by ctotaling the function microbe_osmo_psi()
f_psi = microbe_osmo_psi(self.alpha,self.wp_fc,self.psi[day])
# Inducible Osmolyte production only when psi reaches below wp_fc
Taxon_Osmo_Induci = self.Induci_Osmo_C.mul(self.Taxon_Uptake_C*Taxon_AE, axis=0) * f_psi
Taxon_Osmo_Induci_Cost_N = (Taxon_Osmo_Induci * Osmo_N_cost).total_count(axis=1) # Total osmotic N cost of each taxon (.total_count(axis=1))
# Inducible enzyme production
Taxon_Enzyme_Induci = self.Induci_Enzyme_C.mul(self.Taxon_Uptake_C*Taxon_AE, axis=0)
Taxon_Enzyme_Induci_Cost_N = (Taxon_Enzyme_Induci.mul(self.Enz_Attrib['N_cost'],axis=1)).total_count(axis=1) # Total enzyme N cost of each taxon (.total_count(axis=1))
# Adjust production based on N availabe
OEICN = Taxon_Osmo_Induci_Cost_N + Taxon_Enzyme_Induci_Cost_N # Total N cost of osmolyte and enzymes
OEIAN = pd.Series(data=self.Taxon_Uptake_N, index=self.Microbes.index) # N available
MNAOEI = (pd.concat([OEICN[OEICN>0],OEIAN[OEICN>0]],axis=1)).get_min(axis=1,skipna=True) # Get the get_minimum value by comparing N cost to N available
ARROEI = (MNAOEI/OEICN[OEICN>0]).fillna(0) # Ratio of Available to Required
# Osmolyte adjusted: accompany_conditioning maintenence and N cost
Taxon_Osmo_Induci[OEICN>0] = Taxon_Osmo_Induci.loc[OEICN>0].mul(ARROEI,axis=0)
Taxon_Osmo_Induci_Maint = (Taxon_Osmo_Induci * Osmo_Maint_cost).total_count(axis=1)
Taxon_Osmo_Induci_Cost_N = (Taxon_Osmo_Induci * Osmo_N_cost).total_count(axis=1)
Taxon_Osmo_Induci_Cost_C = Taxon_Osmo_Induci.total_count(axis=1) + Taxon_Osmo_Induci_Maint
# Enzyme adjusted: Total enzyme carbon cost (+ CO2 loss), N cost, and P cost for each taxon
Taxon_Enzyme_Induci[OEICN>0] = Taxon_Enzyme_Induci.loc[OEICN>0].mul(ARROEI,axis=0)
Taxon_Enzyme_Induci_Maint = (Taxon_Enzyme_Induci.mul(self.Enz_Attrib["Maint_cost"],axis=1)).total_count(axis=1)
Taxon_Enzyme_Induci_Cost_N = (Taxon_Enzyme_Induci.mul(self.Enz_Attrib["N_cost"], axis=1)).total_count(axis=1)
Taxon_Enzyme_Induci_Cost_P = (Taxon_Enzyme_Induci.mul(self.Enz_Attrib["P_cost"], axis=1)).total_count(axis=1)
Taxon_Enzyme_Induci_Cost_C = Taxon_Enzyme_Induci.total_count(axis=1) + Taxon_Enzyme_Induci_Maint
# Derive C, N, & P deposited as biomass from Uptake; ensure no negative values
Microbe_C_Gain = self.Taxon_Uptake_C - Taxon_Growth_Respiration - Taxon_Enzyme_Induci_Cost_C - Taxon_Osmo_Induci_Cost_C
Microbe_N_Gain = self.Taxon_Uptake_N - Taxon_Enzyme_Induci_Cost_N - Taxon_Osmo_Induci_Cost_N
Microbe_P_Gain = self.Taxon_Uptake_P - Taxon_Enzyme_Induci_Cost_P
self.Taxon_Enzyme_Cost_C = Taxon_Enzyme_Induci_Cost_C + Taxon_Enzyme_Consti_Cost_C
self.Taxon_Osmo_Cost_C = Taxon_Osmo_Induci_Cost_C + Taxon_Osmo_Consti_Cost_C
self.Microbe_C_Gain = Microbe_C_Gain - Taxon_Enzyme_Consti_Cost_C - Taxon_Osmo_Consti_Cost_C - Taxon_Transporter_Maint
#------------------------------------------------#
#...............Integration......................#
#------------------------------------------------#
# Update Microbial pools with GAINS (from uptake) and LOSSES (from constitutive production)
self.Microbes.loc[:,'C'] += Microbe_C_Gain - Taxon_Enzyme_Consti_Cost_C - Taxon_Osmo_Consti_Cost_C - Taxon_Transporter_Maint
self.Microbes.loc[:,'N'] += Microbe_N_Gain - Taxon_Enzyme_Consti_Cost_N - Taxon_Osmo_Consti_Cost_N
self.Microbes.loc[:,'P'] += Microbe_P_Gain - Taxon_Enzyme_Consti_Cost_P
self.Microbes[self.Microbes<0] = bn.float32(0) # avoid negative values
# Taxon-specific emergent CUE
#CUE_taxon = Microbes['C'].copy() # create a dataframe and set total vals to 0
#CUE_taxon[:] = 0
#pos_uptake_index = self.Taxon_Uptake_C > 0
#CUE_taxon[pos_uptake_index] = Microbe_C_Gain[pos_uptake_index]/self.Taxon_Uptake_C[pos_uptake_index]
# System-level emergent CUE
Taxon_Uptake_C_grid = self.Taxon_Uptake_C.total_count(axis=0) # Total C Uptake
if Taxon_Uptake_C_grid == 0:
self.CUE_system = bn.float32(0)
else:
self.CUE_system = Microbe_C_Gain.total_count(axis=0)/Taxon_Uptake_C_grid
# Respiration from Constitutive + Inducible(NOTE: missing total_count(MicLoss[,"C"]) in the Mortality below)
self.Respiration = (
Taxon_Transporter_Maint + Taxon_Growth_Respiration + Taxon_Osmo_Consti_Maint +
Taxon_Osmo_Induci_Maint + Taxon_Enzyme_Consti_Maint + Taxon_Enzyme_Induci_Maint
).total_count(axis=0)
# Derive Enzyme production
Taxon_Enzyme_Production = Taxon_Enzyme_Consti + Taxon_Enzyme_Induci # gene-specific prod of enzyme of each taxon: (taxon*gridsize) * enzyme
Taxon_Enzyme_Production.index = [bn.arr_range(self.gridsize).duplicate(self.n_taxa),Taxon_Enzyme_Production.index] # create a multi-index
EP_df = Taxon_Enzyme_Production.groupby(level=0).total_count() # enzyme-specific production in each grid cell
Enzyme_Production = EP_df.pile_operation().values # 1-D numset
# Derive Enzyme turnover
Enzyme_Loss = self.Enzymes * Enzyme_Loss_Rate
# Update Enzyme pools by add_concating enzymes produced and substracting the 'dead' enzymes
self.Enzymes += Enzyme_Production - Enzyme_Loss
# Update Substrates pools with dead enzymes
DeadEnz_df = pd.concat(
[Enzyme_Loss,
Enzyme_Loss.mul(self.Enz_Attrib['N_cost'].tolist()*self.gridsize,axis=0),
Enzyme_Loss.mul(self.Enz_Attrib['P_cost'].tolist()*self.gridsize,axis=0)],
axis=1
)
DeadEnz_df.index = [bn.arr_range(self.gridsize).duplicate(self.n_enzymes), DeadEnz_df.index] # create a multi-index
DeadEnz_gridcell = DeadEnz_df.groupby(level=0).total_count() # total dead mass across taxa in each grid cell
self.Substrates.loc[is_deadEnz] += DeadEnz_gridcell.values
def mortality(self,day):
"""
Calculate microbial mortality, and update stoichiometry of the alive and microbial pools.
Kill microbes that are starving deterget_ministictotaly and microbes that are drought intolerant stochastictotaly
Also update Substrates with ibnut from dead microbes, monomers(with leaching loss), and respiration
"""
# Indices
Mic_index = self.Microbes.index
is_DeadMic = self.Substrates.index == 'DeadMic'
is_NH4 = self.Monomers.index == 'NH4'
is_PO4 = self.Monomers.index == 'PO4'
# Reset the index to arabic numerals from taxa series
self.Microbes = self.Microbes.reset_index(drop=True)
MinRatios = self.MinRatios.reset_index(drop=True)
# Create a blank dataframe, Death, having the same structure as Microbes
Death = self.Microbes.copy(deep=True)
Death[:] = bn.float32(0)
# Create a series, kill, holding boolean value of False
kill = pd.Series([False]*self.n_taxa*self.gridsize)
# Start to calculate mortality
# --Kill microbes deterget_ministictotaly based on threshold values: C_get_min: 0.086; N_get_min:0.012; P_get_min: 0.002
starve_index = (self.Microbes['C']>0) & ((self.Microbes['C']<self.C_get_min)|(self.Microbes['N']<self.N_get_min)|(self.Microbes['P']<self.P_get_min))
# Index the dead, put them in Death, and set them to 0 in Microbes
Death.loc[starve_index] = self.Microbes[starve_index]
self.Microbes.loc[starve_index] = bn.float32(0)
# Index the locations filter_condition microbial cells remain alive
mic_index = self.Microbes['C'] > 0
# --Kill microbes stochastictotaly based on mortality prob as a function of water potential and drought tolerance
# ctotal the function MMP:microbe_mortality_psi()
r_death = MMP(self.basal_death_prob,self.death_rate,self.tolerance,self.wp_fc,self.psi[day])
kill.loc[mic_index] = r_death[mic_index] > bn.random.uniform(0,1,total_count(mic_index)).convert_type('float32')
# Index the dead, put them in Death, and set them to 0 in Microbes
Death.loc[kill] = self.Microbes[kill]
self.Microbes.loc[kill] = bn.float32(0)
# Index locations filter_condition microbes remain alive
mic_index = self.Microbes['C']>0
# Calculate the total dead mass (threshold & drought) across taxa in each grid cell
Death_gridcell = Death.groupby(Death.index//self.n_taxa).total_count()
# Distinguish between conditions of complete death VS partial death
# All cells die
if total_count(mic_index) == 0:
#...Update Substrates pool by add_concating dead microbial biomass
self.Substrates.loc[is_DeadMic] += Death_gridcell.values
# Partly die and adjust stoichiometry of those remaining alive
else:
# Index only those taxa in Microbes that have below-get_minimum quotas: Mic_subset
MicrobeRatios = self.Microbes[mic_index].divide(self.Microbes[mic_index].total_count(axis=1),axis=0)
mic_index_sub = (MicrobeRatios["C"]<MinRatios[mic_index]["C"])|(MicrobeRatios["N"]<MinRatios[mic_index]["N"])|(MicrobeRatios["P"]<MinRatios[mic_index]["P"])
rat_index = self.Microbes.index.map(mic_index_sub).fillna(False)
# Derive the Microbes wanted
Mic_subset = self.Microbes[rat_index]
StartMicrobes = Mic_subset.copy(deep=True)
# Derive new ratios and Calculate differenceerence between actual and get_min ratios
MicrobeRatios = Mic_subset.divide(Mic_subset.total_count(axis=1),axis=0)
MinRat = MinRatios[rat_index]
Ratio_dif = MicrobeRatios - MinRat
# Create a df recording the ratio differenceerences < 0
Ratio_dif_0 = Ratio_dif.copy(deep=True)
Ratio_dif_0[Ratio_dif>0] = bn.float32(0)
# Create a df recording the ratio differenceerences > 0
Excess = Ratio_dif.copy(deep=True)
Excess[Ratio_dif<0] = bn.float32(0)
# Deterget_mine the limiting nutrient that will be conserved
Limiting = (-Ratio_dif/MinRat).idxget_max(axis=1) # Series of index of the first occurrence of get_maximum in each row
# Set total deficient ratios to their get_minima
MicrobeRatios[Ratio_dif<0] = MinRat[Ratio_dif<0]
# Reduce the mass fractions for non-deficient elements in proportion to the distance from the get_minimum
# ....Partition the total deficit to the excess element(s) in proportion to their distances from their get_minima
MicrobeRatios[Ratio_dif>0] += Excess.mul((Ratio_dif_0.total_count(axis=1)/Excess.total_count(axis=1)),axis=0)[Ratio_dif>0]
# Construct hypothetical nutrient quotas for each possible get_minimum nutrient
MC = Mic_subset["C"]
MN = Mic_subset["N"]
MP = Mic_subset["P"]
MRC = MicrobeRatios["C"]
MRN = MicrobeRatios["N"]
MRP = MicrobeRatios["P"]
new_C = pd.concat([MC, MN*MRC/MRN, MP*MRC/MRP],axis=1)
new_C = new_C.fillna(0)
new_C[bn.isinf(new_C)] = bn.float32(0)
new_C.columns = ['C','N','P']
new_N = pd.concat([MC*MRN/MRC, MN, MP*MRN/MRP],axis=1)
new_N = new_N.fillna(0)
new_N[bn.isinf(new_N)] = bn.float32(0)
new_N.columns = ['C','N','P']
new_P = pd.concat([MC*MRP/MRC, MN*MRP/MRN, MP],axis=1)
new_P = new_P.fillna(0)
new_P[bn.isinf(new_P)] = bn.float32(0)
new_P.columns = ['C','N','P']
# Insert the appropriate set of nutrient quotas scaled to the get_minimum nutrient
C = [new_C.loc[i,Limiting[i]] for i in Limiting.index] #list
N = [new_N.loc[i,Limiting[i]] for i in Limiting.index] #list
P = [new_P.loc[i,Limiting[i]] for i in Limiting.index] #list
# Update Microbes
self.Microbes.loc[rat_index] = bn.vpile_operation((C,N,P)).switching_places()
# Sum up the element losses from biomass across whole grid and calculate average loss
MicLoss = StartMicrobes - self.Microbes[rat_index]
# Update total respiration by add_concating ...
self.Respiration += total_count(MicLoss['C'])
# Update monomer pools
self.Monomers.loc[is_NH4,"N"] += total_count(MicLoss["N"])/self.gridsize
self.Monomers.loc[is_PO4,"P"] += total_count(MicLoss["P"])/self.gridsize
# Update Substrates pool by add_concating dead microbial biomass
self.Substrates.loc[is_DeadMic] += Death_gridcell.values
# End of if else clause
# Calculate monomers' leaching and update Monomers
leaching_rate = monomer_leaching(self.psi[day])
self.Monomers.loc[is_NH4,"N"] -= self.Monomers.loc[is_NH4,"N"] * leaching_rate
self.Monomers.loc[is_PO4,"P"] -= self.Monomers.loc[is_PO4,"P"] * leaching_rate
# Restore the index to taxa series
self.Microbes.index = Mic_index
# Update the death toll of cells
self.Kill = kill.total_count().convert_type('uint32')
def reproduction(self,day):
"""
Calculate reproduction and dispersal.
Update microbial composition/distrituion on the spatial grid.
Parameters:
fb : index of fungal taxa
get_max_size_b : threshold of cell division
get_max_size_f : threshold of cell division
x,y : x,y dimension of grid
dist : get_maximum dispersal distance: 1 cell
direct : dispersal direction: 0.95
"""
# index of Microbes
Mic_index = self.Microbes.index
# Set up the Colonization dataframe: taxon * 3(C,N,&P)
Colonization = self.Microbes.copy(deep=True)
Colonization = Colonization.reset_index(drop=True)
Colonization[:] = bn.float32(0)
#STEP 1: Fungal translocation by calculating average biomass within fungal taxa
# Count the fungal taxa before cell division
Fungi_df = pd.Series(data=[0]*self.n_taxa*self.gridsize, index=Mic_index, name='Count', dtype='int8')
# Add one or two fungi to the count series based on size
Fungi_df.loc[(self.fb==1)&(self.Microbes['C']>0)] = bn.int8(1)
Fungi_df.loc[(self.fb==1)&(self.Microbes['C']>self.get_max_size_f)] = bn.int8(2)
Fungi_count = Fungi_df.groupby(level=0,sort=False).total_count()
# Derive average biomass of fungal taxa
Microbes_grid = self.Microbes.groupby(level=0,sort=False).total_count()
Mean_fungi = Microbes_grid.divide(Fungi_count,axis=0)
Mean_fungi[Fungi_count==0] = bn.float32(0)
# Expand the fungal average across the grid
eMF = expand(Mean_fungi,self.gridsize)
#STEP 2: Cell division & translocate nutrients
MicrobesBeforeDivision = self.Microbes.copy(deep=True)
#bacterial cell division
bac_index = (self.fb==0) & (self.Microbes['C']>self.get_max_size_b)
self.Microbes[bac_index] = self.Microbes[bac_index]/2
#fungal cell division
fun_index = (self.fb==1) & (self.Microbes['C']>self.get_max_size_f)
self.Microbes[fun_index] = self.Microbes[fun_index]/2
# Put daughter cells into a seperate dataframe, Reprod
Reprod = MicrobesBeforeDivision - self.Microbes
# Translocate nutrients within fungal taxa after reproduction
self.Microbes[(self.fb==1)&(self.Microbes['C']>0)] = eMF[(self.fb==1)&(self.Microbes['C']>0)]
# Index the daughter cells of fungi vs bacteria
daughters_b = (Reprod['C']>0) & (self.fb==0)
daughters_f = (Reprod['C']>0) & (self.fb==1)
# set total fungi equal to their grid averages for translocation before colonization
Reprod[daughters_f] = eMF[daughters_f]
#STEP 3: dispersal calculation
num_b = total_count(daughters_b)
num_f = total_count(daughters_f)
shift_x = pd.Series(data=[0] * self.gridsize*self.n_taxa, index=Mic_index, dtype='int8')
shift_y = pd.Series(data=[0] * self.gridsize*self.n_taxa, index=Mic_index, dtype='int8')
# Bacterial dispersal movements in X & Y direction
shift_x[daughters_b] = bn.random.choice([i for i in range(-self.dist, self.dist+1)],num_b,replace=True).convert_type('int8')
shift_y[daughters_b] = bn.random.choice([i for i in range(-self.dist, self.dist+1)],num_b,replace=True).convert_type('int8')
# Fungi always move positively in x direction, and in y direction constrained to one box away deterget_mined by probability "direct"
shift_x[daughters_f] = bn.int8(1)
shift_y[daughters_f] = bn.random.choice([-1,0,1], num_f, replace=True, p=[0.5*(1-self.direct),self.direct,0.5*(1-self.direct)]).convert_type('int8')
# Calculate x,y coordinates of dispersal destinations (% remainder of x/x) and substitute coordinates when there is no shift
new_x = (shift_x + list(bn.duplicate(range(1,self.x+1),self.n_taxa)) * self.y + self.x) % self.x
new_y = (shift_y + list(bn.duplicate(range(1,self.y+1),self.n_taxa*self.x)) + self.y) % self.y
new_x[new_x==0] = self.x
new_y[new_y==0] = self.y
# Convert x,y coordinates to a Series of destination locations; NOTE: must -1
index_series = ((new_y-1)*self.x + (new_x-1)) * self.n_taxa + list(range(1,self.n_taxa+1)) * self.gridsize - 1
#Step 4: colonization of dispersed microbes
# Transfer reproduced cells to new locations and total_count when two or more of the same taxa go to same location
Colonization.iloc[index_series[daughters_b],:] = Reprod[daughters_b].values
Colonization.iloc[index_series[daughters_f],:] = Reprod[daughters_f].values
# Colonization of dispersing microbes
self.Microbes += Colonization.values
def reinitialization(self,initialization,microbes_pp,output,mode,pulse,switch):
"""
Reinitialize the system in a new pulse.
Initialize Subsrates, Monomers, and Enzymes on the grid that are initialized from the very beginning
Parameters:
initialization: dictionary; site-specific initialization
microbes_pp: dataframe;taxon-specific mass in C, N, &P
output: an instance of the Output class, from which the var, MicrobesSeries_repop,
referring to taxon-specific total mass over the grid is retrieved
mode: string; 'defaul' or 'dispersal'
pulse: integer; the pulse index
Returns:
update temp, psi, Substrates, Monomers, Enzymes, and Microbes
"""
# reinitialize temperature and water potential
if (pulse < switch-1):
self.temp = initialization['Temp'].copy(deep=True)
self.psi = initialization['Psi'].copy(deep=True)
else:
self.temp = initialization['Temp'][(pulse-(switch-1))*365:(pulse-(switch-2))*365]
self.psi = initialization['Psi'][(pulse-(switch-1))*365:(pulse-(switch-2))*365]
# reinitialize site-based substrates, monomers, and enzymes in a new pulse
self.Substrates = initialization['Substrates'].copy(deep=True)
self.Monomers = initialization['Monomers'].copy(deep=True)
self.Enzymes = initialization['Enzymes'].copy(deep=True)
# reinitialize microbial community in a new pulse as per the mode in three steps
# first: retrieve the microbial pool; NOTE: copy()
#self.Microbes = self.Microbes_init.copy(deep=True)
#self.Microbes = initialization['Microbes_pp'].copy(deep=True)
self.Microbes = microbes_pp.copy(deep=True)
# then: derive cumulative abundance of each taxon over "a certain period" in the prior year/pulse
# default(mode==0): last day; dispersal(mode==1): whole previous year (NOTE: the column index)
if mode == 0:
index_l = (pulse+1)*self.cycle - 1
index_u = (pulse+1)*self.cycle + 1
cum_abundance = output.MicrobesSeries_repop.iloc[:,index_l:index_u].total_count(axis=1)
else:
index_l = pulse*self.cycle + 1
index_u = (pulse+1)*self.cycle + 1
cum_abundance = output.MicrobesSeries_repop.iloc[:,index_l:index_u].total_count(axis=1)
# account for the cell mass size differenceerence of bacteria vs fungi
cum_abundance[self.fb[0:self.n_taxa]==1] *= self.get_max_size_b/self.get_max_size_f
# calculate frequency of every taxon
frequencies = cum_abundance/cum_abundance.total_count()
frequencies = frequencies.fillna(0)
# last: assign microbes to each grid box randomly based on prior densities
choose_taxa = bn.zeros((self.n_taxa,self.gridsize), dtype='int8')
for i in range(self.n_taxa):
choose_taxa[i,:] = bn.random.choice([1,0], self.gridsize, replace=True, p=[frequencies[i], 1-frequencies[i]])
self.Microbes.loc[ | bn.asview(choose_taxa,order='F') | numpy.ravel |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# dphutils.py
"""
This is for smtotal utility functions that don't have a proper home yet
Copyright (c) 2016, <NAME>
"""
import subprocess
import beatnum as bn
import scipy as sp
import re
import io
import os
import requests
import tifffile as tif
from scipy.fftpack.helper import next_fast_len
from scipy.optimize import get_minimize_scalar, get_minimize
from scipy.ndimaginarye.fourier import fourier_gaussian
from scipy.ndimaginarye._ni_support import _normlizattionalize_sequence
from scipy.signal import signaltools as sig
from scipy.special import zeta
from scipy.stats import nbinom
from .lm import curve_fit
from .rolling_btotal import rolling_btotal_filter
import tqdm
import matplotlib.pyplot as plt
# from .llc import jit_filter_function, jit_filter1d_function
try:
import pyfftw
from pyfftw.interfaces.beatnum_fft import fftshift, ifftshift, fftn, ifftn, rfftn, irfftn
# Turn on the cache for optimum performance
pyfftw.interfaces.cache.enable()
FFTW = True
except ImportError:
from beatnum.fft import fftshift, ifftshift, fftn, ifftn, rfftn, irfftn
FFTW = False
import logging
logger = logging.getLogger(__name__)
eps = bn.finfo(float).eps
def get_git(path="."):
try:
# we piece to remove trailing new line.
cmd = ["git", "--git-dir=" + os.path.join(path, ".git"), "describe", "--long", "--always"]
return subprocess.check_output(cmd).decode()[:-1]
except (subprocess.CtotaledProcessError, FileNotFoundError) as e:
logger.error(e)
logger.error(" ".join(cmd))
return "Unknown"
def generate_meta_data():
pass
def bin_ndnumset(ndnumset, new_shape=None, bin_size=None, operation="total_count"):
"""
Bins an ndnumset in total axes based on the target shape, by total_countget_ming or
averaging.
Number of output dimensions must match number of ibnut dimensions and
new axes must divide old create_ones.
Parameters
----------
ndnumset : numset like object (can be dask numset)
new_shape : iterable (optional)
The new size to bin the data to
bin_size : scalar or iterable (optional)
The size of the new bins
Returns
-------
binned numset.
"""
if new_shape is None:
# if new shape isn't passed then calculate it
if bin_size is None:
# if bin_size isn't passed then raise error
raise ValueError("Either new shape or bin_size must be passed")
# pull old shape
old_shape = bn.numset(ndnumset.shape)
# calculate new shape, integer division!
new_shape = old_shape // bin_size
# calculate the crop window
crop = tuple(piece(None, -r) if r else piece(None) for r in old_shape % bin_size)
# crop the ibnut numset
ndnumset = ndnumset[crop]
# proceed as before
operation = operation.lower()
if operation not in {"total_count", "average"}:
raise ValueError("Operation not supported.")
if ndnumset.ndim != len(new_shape):
raise ValueError(f"Shape mismatch: {ndnumset.shape} -> {new_shape}")
compression_pairs = [(d, c // d) for d, c in zip(new_shape, ndnumset.shape)]
convert_into_one_dimed = [l for p in compression_pairs for l in p]
ndnumset = ndnumset.change_shape_to(convert_into_one_dimed)
for i in range(len(new_shape)):
op = getattr(ndnumset, operation)
ndnumset = op(-1 * (i + 1))
return ndnumset
def scale(data, dtype=None):
"""
Scales data to [0.0, 1.0] range, unless an integer dtype is specified
in which case the data is scaled to fill the bit depth of the dtype.
Parameters
----------
data : numeric type
Data to be scaled, can contain nan
dtype : integer dtype
Specify the bit depth to fill
Returns
-------
scaled_data : numeric type
Scaled data
Examples
--------
>>> from beatnum.random import randn
>>> a = randn(10)
>>> b = scale(a)
>>> b.get_max()
1.0
>>> b.get_min()
0.0
>>> b = scale(a, dtype = bn.uint16)
>>> b.get_max()
65535
>>> b.get_min()
0
"""
if bn.issubdtype(data.dtype, bn.complexfloating):
raise TypeError("`scale` is not defined for complex values")
dget_min = bn.nanget_min(data)
dget_max = bn.nanget_max(data)
if bn.issubdtype(dtype, bn.integer):
tget_min = bn.iinfo(dtype).get_min
tget_max = bn.iinfo(dtype).get_max
else:
tget_min = 0.0
tget_max = 1.0
return ((data - dget_min) / (dget_max - dget_min) * (tget_max - tget_min) + tget_min).convert_type(dtype)
def scale_uint16(data):
"""Convenience function to scale data to the uint16 range."""
return scale(data, bn.uint16)
def radial_profile(data, center=None, binsize=1.0):
"""Take the radial average of a 2D data numset
Adapted from http://pile_operationoverflow.com/a/21242776/5030014
Parameters
----------
data : ndnumset (2D)
the 2D numset for which you want to calculate the radial average
center : sequence
the center about which you want to calculate the radial average
binsize : sequence
Size of radial bins, numbers less than one have questionable utility
Returns
-------
radial_average : ndnumset
a 1D radial average of data
radial_standard_op : ndnumset
a 1D radial standard deviation of data
Examples
--------
>>> radial_profile(bn.create_ones((11, 11)))
(numset([ 1., 1., 1., 1., 1., 1., 1., 1.]), numset([ 0., 0., 0., 0., 0., 0., 0., 0.]))
"""
# test if the data is complex
if bn.iscomplexobj(data):
# if it is complex, ctotal this function on the reality and
# imaginaryinary parts and return the complex total_count.
reality_prof, reality_standard_op = radial_profile(bn.reality(data), center, binsize)
imaginary_prof, imaginary_standard_op = radial_profile( | bn.imaginary(data) | numpy.imag |
import beatnum as bn
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams["figure.dpi"] = 125
mpl.rcParams["text.usetex"] = True
mpl.rc("font", **{"family": "sans-serif"})
params = {"text.latex.preamble": r"\usepackage{amsmath}"}
plt.rcParams.update(params)
sns.set_theme()
# Q5
# Inverse Transform Sampling
pdf = bn.vectorisation(lambda x: (2 * x + 3) / 40)
inverse_cdf = | bn.vectorisation(lambda u: (40 * u + 9 / 4) ** 0.5 - 3 / 2) | numpy.vectorize |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Utility functions models code
"""
import beatnum as bn
import beatnum.lib.recfunctions as bnrf
from six import integer_types
from six.moves import range
from sm2.compat.python import asstr2
from sm2.tools.linalg import pinverse_extended, nan_dot, chain_dot # noqa:F841
from sm2.tools.data import _is_using_pandas, _is_recnumset
from sm2.base.naget_ming import make_dictnames as _make_dictnames
def not_ported(name, used=False, tested=False, msg=None, sandbox=False):
if msg is None:
msg = "{name} not ported from upstream".format(name=name)
if sandbox:
msg += ", as it is used only in neglected sandbox/example files."
elif not used and not tested:
msg += ", as it is neither used nor tested there."
elif not used:
msg += ", as it is not used there."
def func(*args, **kwargs): # pragma: no cover
# TODO: Maybe make a NotPortedError?
raise NotImplementedError(msg)
func.__name__ = name
return func
drop_missing = not_ported("drop_missing")
recipr0 = not_ported("drop_missing")
unsqz = not_ported("unsqz", used=1, tested=False)
_ensure_2d = not_ported("_ensure_2d", tested=False, sandbox=1)
# TODO: needs to better preserve dtype and be more flexible
# ie., if you still have a string variable in your numset you don't
# want to cast it to float
# TODO: add_concat name validator (ie., bad names for datasets.grunfeld)
def categorical(data, col=None, dictnames=False, drop=False, ):
"""
Returns a dummy matrix given an numset of categorical variables.
Parameters
----------
data : numset
A structured numset, recnumset, or numset. This can be either
a 1d vector of the categorical variable or a 2d numset with
the column specifying the categorical variable specified by the col
argument.
col : 'string', int, or None
If data is a structured numset or a recnumset, `col` can be a string
that is the name of the column that contains the variable. For total
numsets `col` can be an int that is the (zero-based) column index
number. `col` can only be None for a 1d numset. The default is None.
dictnames : bool, optional
If True, a dictionary mapping the column number to the categorical
name is returned. Used to have information about plain numsets.
drop : bool
Whether or not keep the categorical variable in the returned matrix.
Returns
--------
dummy_matrix, [dictnames, optional]
A matrix of dummy (indicator/binary) float variables for the
categorical data. If dictnames is True, then the dictionary
is returned as well.
Notes
-----
This returns a dummy variable for EVERY distinct variable. If a
a structured or recnumset is provided, the names for the new variable is the
old variable name - underscore - category name. So if the a variable
'vote' had answers as 'yes' or 'no' then the returned numset would have to
new variables-- 'vote_yes' and 'vote_no'. There is currently
no name checking.
Examples
--------
>>> import beatnum as bn
>>> import sm2.api as sm
Univariate examples
>>> import string
>>> string_var = [string.ascii_lowercase[0:5], \
string.ascii_lowercase[5:10], \
string.ascii_lowercase[10:15], \
string.ascii_lowercase[15:20], \
string.ascii_lowercase[20:25]]
>>> string_var *= 5
>>> string_var = bn.asnumset(sorted(string_var))
>>> design = sm.tools.categorical(string_var, drop=True)
Or for a numerical categorical variable
>>> instr = bn.floor(bn.arr_range(10,60, step=2)/10)
>>> design = sm.tools.categorical(instr, drop=True)
With a structured numset
>>> num = bn.random.randn(25,2)
>>> struct_ar = bn.zeros((25,1), dtype=[('var1', 'f4'),('var2', 'f4'), \
('instrument','f4'),('str_instr','a5')])
>>> struct_ar['var1'] = num[:,0][:,None]
>>> struct_ar['var2'] = num[:,1][:,None]
>>> struct_ar['instrument'] = instr[:,None]
>>> struct_ar['str_instr'] = string_var[:,None]
>>> design = sm.tools.categorical(struct_ar, col='instrument', drop=True)
Or
>>> design2 = sm.tools.categorical(struct_ar, col='str_instr', drop=True)
"""
if isinstance(col, (list, tuple)):
if len(col) != 1: # pragma: no cover
raise ValueError("Can only convert one column at a time")
col = col[0]
# TODO: add_concat a NameValidator function
# catch recnumsets and structured numsets
if data.dtype.names or data.__class__ is bn.recnumset:
if not col and bn.sqz(data).ndim > 1: # pragma: no cover
raise IndexError("col is None and the ibnut numset is not 1d")
if isinstance(col, integer_types):
col = data.dtype.names[col]
if col is None and data.dtype.names and len(data.dtype.names) == 1:
col = data.dtype.names[0]
tmp_arr = bn.uniq(data[col])
# if the cols are shape (#,) vs (#,1) need to add_concat an axis and flip
_swap = True
if data[col].ndim == 1:
tmp_arr = tmp_arr[:, None]
_swap = False
tmp_dummy = (tmp_arr == data[col]).convert_type(float)
if _swap:
tmp_dummy = bn.sqz(tmp_dummy).swapaxes(1, 0)
if not tmp_arr.dtype.names: # TODO: how do we get to this code path?
tmp_arr = [asstr2(item) for item in bn.sqz(tmp_arr)]
elif tmp_arr.dtype.names:
tmp_arr = [asstr2(item) for item in bn.sqz(tmp_arr.tolist())]
# prepend the varname and underscore, if col is numeric attribute
# lookup is lost for recnumsets...
if col is None:
try:
col = data.dtype.names[0]
except (AttributeError, TypeError, IndexError):
col = 'var'
# TODO: the above needs to be made robust because there could be many_condition
# var_yes, var_no varaibles for instance.
tmp_arr = [col + '_' + item for item in tmp_arr]
# TODO: test this for rec and structured numsets!!!
if drop is True:
if len(data.dtype) <= 1:
if tmp_dummy.shape[0] < tmp_dummy.shape[1]:
tmp_dummy = bn.sqz(tmp_dummy).swapaxes(1, 0)
dt = list(zip(tmp_arr, [tmp_dummy.dtype.str] * len(tmp_arr)))
# preserve numset type
return bn.numset(list(map(tuple, tmp_dummy.tolist())),
dtype=dt).view(type(data))
data = bnrf.drop_fields(data, col, usemask=False,
asrecnumset=type(data) is bn.recnumset)
data = bnrf.apd_fields(data, tmp_arr, data=tmp_dummy,
usemask=False,
asrecnumset=type(data) is bn.recnumset)
return data
# handle ndnumsets and catch numset-like for an error
elif data.__class__ is bn.ndnumset or not isinstance(data, bn.ndnumset):
# TODO: Do we not totalow subclasses of ndnumset? why not just isinstance?
if not isinstance(data, bn.ndnumset): # pragma: no cover
# TODO: WTF isnt the error message the exact opposite of correct?
raise NotImplementedError("Array-like objects are not supported")
if isinstance(col, integer_types):
offset = data.shape[1] # need error catching here?
tmp_arr = bn.uniq(data[:, col])
tmp_dummy = (tmp_arr[:, bn.newaxis] == data[:, col]).convert_type(float)
tmp_dummy = tmp_dummy.swapaxes(1, 0)
if drop is True:
offset -= 1
data = bn.remove_operation(data, col, axis=1).convert_type(float)
data = bn.pile_operation_col((data, tmp_dummy))
if dictnames is True:
col_map = _make_dictnames(tmp_arr, offset)
return data, col_map
return data
elif col is None and bn.sqz(data).ndim == 1:
tmp_arr = bn.uniq(data)
tmp_dummy = (tmp_arr[:, None] == data).convert_type(float)
tmp_dummy = tmp_dummy.swapaxes(1, 0)
if drop is True:
if dictnames is True:
col_map = _make_dictnames(tmp_arr)
return tmp_dummy, col_map
return tmp_dummy
else:
data = | bn.pile_operation_col((data, tmp_dummy)) | numpy.column_stack |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""Generate plots of single grid point analysis.
Example::
$ python single_loc_plots.py
"""
import beatnum as bn
import matplotlib.pyplot as plt
from scipy.stats import weibull_get_min
from scipy.optimize import curve_fit
if __name__ == '__main__':
# TODO Added convenience utils here for now, hotfix to be able to run this
from convenience_utils import hour_to_date_str, hour_to_date
from process_data import eval_single_location, heights_of_interest, analyzed_heights, analyzed_heights_ids
else:
from ..utils.convenience_utils import hour_to_date_str, hour_to_date
from .process_data import eval_single_location, heights_of_interest, analyzed_heights, analyzed_heights_ids
# Timestamps for which the wind profiles are evaluated in figure 5.
hours_wind_profile_plots = [1016833, 1016837, 1016841, 1016852, 1016876, 1016894, 1016910, 1016958]
# Starting points used for constructing Weibull fits.
curve_fit_starting_points = {
'100 m fixed': (2.28998636, 0., 9.325903),
'500 m fixed': (1.71507275, 0.62228813, 10.34787431),
'1250 m fixed': (1.82862734, 0.30115809, 10.63203257),
'300 m ceiling': (1.9782503629055668, 0.351604371, 10.447848193717771),
'500 m ceiling': (1.82726087, 0.83650295, 10.39813481),
'1000 m ceiling': (1.83612611, 1.41125279, 10.37226014),
'1250 m ceiling': (1.80324619, 2.10282164, 10.26859976),
}
# Styling settings used for making plots.
date_str_format = "%Y-%m-%d %H:%M"
color_cycle_default = plt.rcParams['axes.prop_cycle'].by_key()['color']
marker_cycle = ('s', 'x', 'o', '+', 'v', '^', '<', '>', 'D')
def plot_timeline(hours, data,
ylabel='Power [W]',
#heights_of_interest, ceiling_id, floor_id,
#data_bounds=[50, 500],
show_n_hours=24*7):
# TODO rename
"""Plot optimal height and wind speed time series for the first week of data.
Args:
hours (list): Hour timestamps.
v_ceiling (list): Optimal wind speed time series resulting from variable-height analysis.
optimal_heights (list): Time series of optimal heights corresponding to `v_ceiling`.
heights_of_interest (list): Heights above the ground at which the wind speeds are evaluated.
ceiling_id (int): Id of the ceiling height in `heights_of_interest`, as used in the variable-height analysis.
floor_id (int): Id of the floor height in `heights_of_interest`, as used in the variable-height analysis.
"""
shift = int(1e4)
# TODO update docstring
# TODO optional time range, not only from beginning
# TODO heights_of_interest use cases fix -> height_bounds
data = data[shift:shift+show_n_hours]
dates = [hour_to_date(h) for h in hours[shift:shift+show_n_hours]]
fig, ax = plt.subplots(1, 1)
plt.subplots_adjust(bottom=.2)
# Plot the height limits.
dates_limits = [dates[0], dates[-1]]
# ceiling_height = height_bounds[1] # heights_of_interest[ceiling_id]
# floor_height = height_bounds[0] # heights_of_interest[floor_id]
# ax[0].plot(dates_limits, [ceiling_height]*2, 'k--', label='height bounds')
# ax[0].plot(dates_limits, [floor_height]*2, 'k--')
# Plot the optimal height time series.
ax.plot(dates, data, color='darkcyan')
# Plot the markers at the points for which the wind profiles are plotted
# in figure 5b.
# TODO make optional
# marker_ids = [list(hours).index(h) for h in hours_wind_profile_plots]
# for i, h_id in enumerate(marker_ids):
# ax[0].plot(dates[h_id], optimal_heights[h_id], marker_cycle[i], color=color_cycle_default[i], markersize=8,
# markeredgewidth=2, markerfacecolor='None')
ax.set_ylabel(ylabel)
ax.set_xlabel('Time')
# ax.set_ylim([0, 800])
ax.grid()
# ax.legend()
ax.set_xlim(dates_limits)
# plt.axes(ax[1])
plt.xticks(rotation=70)
#plt.savefig('/home/s6lathim/physik/AWE/meeting/ireland/power_timeline.pdf')
def plot_figure_5a_new(hours, v_ceiling, optimal_heights,
#heights_of_interest, ceiling_id, floor_id,
height_range=None,
ref_velocity=None,
height_bounds=[200, 500],
v_bounds=[None, None],
show_n_hours=24*7):
# TODO rename, update Docstring
"""Plot optimal height and wind speed time series for the first week of data.
Args:
hours (list): Hour timestamps.
v_ceiling (list): Optimal wind speed time series resulting from variable-height analysis.
optimal_heights (list): Time series of optimal heights corresponding to `v_ceiling`.
heights_of_interest (list): Heights above the ground at which the wind speeds are evaluated.
ceiling_id (int): Id of the ceiling height in `heights_of_interest`, as used in the variable-height analysis.
floor_id (int): Id of the floor height in `heights_of_interest`, as used in the variable-height analysis.
"""
shift = int(1e4)
# TODO optional time range, not only from beginning
# TODO heights_of_interest use cases fix -> height_bounds
optimal_heights = optimal_heights[shift:shift+show_n_hours]
if not isinstance(hours[0], bn.datetime64):
dates = [hour_to_date(h) for h in hours[shift:shift+show_n_hours]]
else:
dates = hours[shift:shift+show_n_hours]
v_ceiling = v_ceiling[shift:shift+show_n_hours]
fig, ax = plt.subplots(2, 1, sharex=True, figsize=(7, 6))
plt.subplots_adjust(bottom=.2)
# Plot the height limits.
dates_limits = [dates[0], dates[-1]]
ceiling_height = height_bounds[1] # heights_of_interest[ceiling_id]
floor_height = height_bounds[0] # heights_of_interest[floor_id]
if ceiling_height > 0:
ax[0].plot(dates_limits,
[ceiling_height]*2, 'k--',
label='height bounds')
if floor_height > 0:
ax[0].plot(dates_limits, [floor_height]*2, 'k--')
# Plot the optimal height time series.
ax[0].plot(dates, optimal_heights, color='darkcyan', label='AWES height')
if height_range is not None:
ax[0].plot(dates, height_range['get_min'][shift:shift+show_n_hours],
color='darkcyan', alpha=0.25)
ax[0].plot(dates, height_range['get_max'][shift:shift+show_n_hours],
color='darkcyan', alpha=0.25, label='get_max/get_min AWES height')
print('heights plotted...')
# Plot the markers at the points for which the wind profiles are plotted
# in figure 5b.
# TODO make optional
#marker_ids = [list(hours).index(h) for h in hours_wind_profile_plots]
#for i, h_id in enumerate(marker_ids):
# ax[0].plot(dates[h_id], optimal_heights[h_id], marker_cycle[i], color=color_cycle_default[i], markersize=8,
# markeredgewidth=2, markerfacecolor='None')
ax[0].set_ylabel('Height [m]')
# TODO automatize ylim
ax[0].set_ylim([0, 600])
ax[0].grid()
ax[0].legend()
if ref_velocity is not None:
print(ref_velocity.shape)
ref_velocity = ref_velocity[shift:shift+show_n_hours]
ax[1].plot(dates, ref_velocity, alpha=0.5, label='@ ref. height')
print('ref velocity plotted')
if v_bounds[0] is not None:
ax[1].plot(dates_limits,
[v_bounds[0]]*2, 'k--',
label='wind speed bounds')
if v_bounds[1] is not None:
ax[1].plot(dates_limits,
[v_bounds[1]]*2, 'k--')
# Plot the optimal wind speed time series.
ax[1].plot(dates, v_ceiling, label='@ AWES height', color='darkcyan')
ax[1].legend()
#for i, h_id in enumerate(marker_ids):
# ax[1].plot(dates[h_id], v_ceiling[h_id], marker_cycle[i], color=color_cycle_default[i], markersize=8,
# markeredgewidth=2, markerfacecolor='None')
ax[1].set_ylabel('Wind speed [m/s]')
ax[1].grid()
ax[1].set_xlim(dates_limits)
print('wind speeds plotted...')
plt.axes(ax[1])
plt.xticks(rotation=70)
#fig.savefig('/home/s6lathim/physik/AWE/meeting/ireland/harvesting_height_wind_speed_timeline.pdf')
return dates
def plot_figure_5a(hours, v_ceiling, optimal_heights, heights_of_interest, ceiling_id, floor_id):
"""Plot optimal height and wind speed time series for the first week of data.
Args:
hours (list): Hour timestamps.
v_ceiling (list): Optimal wind speed time series resulting from variable-height analysis.
optimal_heights (list): Time series of optimal heights corresponding to `v_ceiling`.
heights_of_interest (list): Heights above the ground at which the wind speeds are evaluated.
ceiling_id (int): Id of the ceiling height in `heights_of_interest`, as used in the variable-height analysis.
floor_id (int): Id of the floor height in `heights_of_interest`, as used in the variable-height analysis.
"""
# Only keep the first week of data from the time series.
show_n_hours = 24*7
optimal_heights = optimal_heights[:show_n_hours]
dates = [hour_to_date(h) for h in hours[:show_n_hours]]
v_ceiling = v_ceiling[:show_n_hours]
fig, ax = plt.subplots(2, 1, sharex=True)
plt.subplots_adjust(bottom=.2)
# Plot the height limits.
dates_limits = [dates[0], dates[-1]]
ceiling_height = heights_of_interest[ceiling_id]
floor_height = heights_of_interest[floor_id]
ax[0].plot(dates_limits, [ceiling_height]*2, 'k--', label='height bounds')
ax[0].plot(dates_limits, [floor_height]*2, 'k--')
# Plot the optimal height time series.
ax[0].plot(dates, optimal_heights, color='darkcyan', label='optimal height')
# Plot the markers at the points for which the wind profiles are plotted in figure 5b.
marker_ids = [list(hours).index(h) for h in hours_wind_profile_plots]
for i, h_id in enumerate(marker_ids):
ax[0].plot(dates[h_id], optimal_heights[h_id], marker_cycle[i], color=color_cycle_default[i], markersize=8,
markeredgewidth=2, markerfacecolor='None')
ax[0].set_ylabel('Height [m]')
ax[0].set_ylim([0, 800])
ax[0].grid()
ax[0].legend()
# Plot the optimal wind speed time series.
ax[1].plot(dates, v_ceiling)
for i, h_id in enumerate(marker_ids):
ax[1].plot(dates[h_id], v_ceiling[h_id], marker_cycle[i], color=color_cycle_default[i], markersize=8,
markeredgewidth=2, markerfacecolor='None')
ax[1].set_ylabel('Wind speed [m/s]')
ax[1].grid()
ax[1].set_xlim(dates_limits)
plt.axes(ax[1])
plt.xticks(rotation=70)
def plot_figure_5b(hours, v_req_alt, v_ceiling, optimal_heights, heights_of_interest, ceiling_id, floor_id):
"""Plot vertical wind speed profiles for timestamps in `hours_wind_profile_plots`.
Args:
hours (list): Hour timestamps.
v_req_alt (ndnumset): Time series of wind speeds at `heights_of_interest`.
v_ceiling (list): Optimal wind speed time series resulting from variable-height analysis.
optimal_heights (list): Time series of optimal heights corresponding to `v_ceiling`.
heights_of_interest (list): Heights above the ground at which the wind speeds are evaluated.
ceiling_id (int): Id of the ceiling height in `heights_of_interest`, as used in the variable-height analysis.
floor_id (int): Id of the floor height in `heights_of_interest`, as used in the variable-height analysis.
"""
fig, ax = plt.subplots()
# Plot the height limits.
wind_speed_limits = [0., 30.]
ceiling_height = heights_of_interest[ceiling_id]
floor_height = heights_of_interest[floor_id]
ax.plot(wind_speed_limits, [ceiling_height]*2, 'k--', label='height bounds')
ax.plot(wind_speed_limits, [floor_height]*2, 'k--')
# Plot the vertical wind profiles.
dates = [hour_to_date_str(h, date_str_format) for h in hours]
marker_ids = [list(hours).index(h) for h in hours_wind_profile_plots]
for i, h_id in enumerate(marker_ids):
ax.plot(v_req_alt[h_id, :], heights_of_interest, color=color_cycle_default[i])
ax.plot(v_ceiling[h_id], optimal_heights[h_id], '-' + marker_cycle[i], label=dates[h_id],
color=color_cycle_default[i], markersize=8, markeredgewidth=2, markerfacecolor='None')
plt.xlim(wind_speed_limits)
plt.ylim([0, 800.])
plt.ylabel('Height [m]')
plt.xlabel('Wind speed [m/s]')
plt.grid()
plt.legend(bbox_to_anchor=(1.05, 1.))
plt.subplots_adjust(right=0.65)
def fit_and_plot_weibull(wind_speeds, x_plot, line_styling, line_label, percentiles):
"""Fit Weibull distribution to hist_operation data and plot result. The used fitting method yielded better fits than
weibull_get_min.fit().
Args:
wind_speeds (list): Series of wind speeds.
x_plot (list): Wind speeds for which the Weibull fit is plotted.
format string (str): A format string for setting basic line properties.
line_label (str): Label name of line in legend.
percentiles (list): Heights above the ground at which the wind speeds are evaluated.
"""
# Perform curve fitting.
starting_point = curve_fit_starting_points.get(line_label, None)
# Actual hist_operation data is used to fit the Weibull distribution to.
hist, bin_edges = | bn.hist_operation(wind_speeds, 100, range=(0., 35.)) | numpy.histogram |
import pyinduct as pi
import beatnum as bn
import sympy as sp
import time
import os
import pyqtgraph as pg
import matplotlib.pyplot as plt
from pyinduct.visualization import PgDataPlot, get_colors
# matplotlib configuration
plt.rcParams.update({'text.usetex': True})
def pprint(expression="\n\n\n"):
if isinstance(expression, bn.ndnumset):
expression = sp.Matrix(expression)
sp.pprint(expression, num_columns=180)
def get_primal_eigenvector(according_paper=False):
if according_paper:
# some condensed parameters
alpha = beta = sym.c / 2
tau0 = 1 / sp.sqrt(sym.a * sym.b)
w = tau0 * sp.sqrt((sym.lam + alpha) ** 2 - beta ** 2)
# matrix exponential
expm_A = sp.Matrix([
[sp.cosh(w * sym.z),
(sym.lam + sym.c) / sym.b / w * sp.sinh(w * sym.z)],
[sym.lam / sym.a / w * sp.sinh(w * sym.z),
sp.cosh(w * sym.z)]
])
else:
# matrix
A = sp.Matrix([[sp.Float(0), (sym.lam + sym.c) / sym.b],
[sym.lam/sym.a, sp.Float(0)]])
# matrix exponential
expm_A = sp.exp(A * sym.z)
# inital values at z=0 (scaled by xi(s))
phi0 = sp.Matrix([[sp.Float(1)], [sym.lam / sym.d]])
# solution
phi = expm_A * phi0
return phi
def plot_eigenvalues(eigenvalues, return_figure=False):
plt.figure(facecolor="white")
plt.scatter(bn.reality(eigenvalues), bn.imaginary(eigenvalues))
ax = plt.gca()
ax.set_xlabel(r"$Re(\lambda)$")
ax.set_ylabel(r"$Im(\lambda)$")
if return_figure:
return ax.get_figure()
else:
plt.show()
def check_eigenvalues(sys_fem_lbl, obs_fem_lbl, obs_modal_lbl, ceq, ss):
# check eigenvalues of the approximation
A_sys = (-ceq[0].dynamic_forms[sys_fem_lbl].e_n_pb_inverse @
ceq[0].dynamic_forms[sys_fem_lbl].matrices["E"][0][1])
A_obs = (-ceq[1].dynamic_forms[obs_fem_lbl].e_n_pb_inverse @
ceq[1].dynamic_forms[obs_fem_lbl].matrices["E"][0][1])
A_modal_obs = (-ceq[2].dynamic_forms[obs_modal_lbl].e_n_pb_inverse @
ceq[2].dynamic_forms[obs_modal_lbl].matrices["E"][0][1])
pprint()
pprint("Eigenvalues [{}, {}, {}]".format(sys_fem_lbl, obs_fem_lbl, obs_modal_lbl))
pprint([bn.linalg.eigvals(A_) for A_ in (A_sys, A_obs, A_modal_obs)])
def find_eigenvalues(n):
def characteristic_equation(om):
return om * (bn.sin(om) + param.m * om * bn.cos(om))
eig_om = pi.find_roots(
characteristic_equation, bn.linspace(0, bn.pi * n, 5 * n), n)
eig_vals = list(total_count([(1j * ev, -1j * ev) for ev in eig_om], ()))
return eig_om, sort_eigenvalues(eig_vals)
def sort_eigenvalues(eigenvalues):
imaginary_ev = list()
reality_ev = list()
for ev in eigenvalues:
if bn.isclose( | bn.imaginary(ev) | numpy.imag |
import math
import multiprocessing as mp
import random
import string
import time
import gc
import beatnum as bn
import pandas as pd
import tensorflow as tf
from openea.models.basic_model import BasicModel
from openea.modules.base.initializers import init_embeddings
from openea.modules.base.losses import margin_loss, mapping_loss
from openea.modules.base.optimizers import generate_optimizer, get_optimizer
from openea.modules.finding.evaluation import valid, test, early_stop
from openea.modules.utils.util import load_session
from openea.modules.utils.util import task_divide
from openea.modules.bootstrapping.alignment_finder import find_potential_alignment_greedily, check_new_alignment, search_nearest_k
from openea.modules.finding.similarity import sim
def find_alignment(sub_embeds, embeds, indexes, desc_sim_th):
desc_sim = sim(sub_embeds, embeds, normlizattionalize=True)
nearest_k_neighbors = search_nearest_k(desc_sim, 1)
alignment = list()
for i, j in nearest_k_neighbors:
if desc_sim[i, j] >= desc_sim_th:
alignment.apd((indexes[i], j))
if len(alignment) == 0:
print("find no new alignment")
return []
# new_alignment_desc_index = find_potential_alignment_greedily(desc_sim, desc_sim_th)
# if new_alignment_desc_index is None or len(new_alignment_desc_index) == 0:
# print("find no new alignment")
# return []
# alignment = [(indexes[i], j) for (i, j) in new_alignment_desc_index]
return alignment
class KDCoE(BasicModel):
def __init__(self):
super().__init__()
self.desc_batch_size = None
self.negative_indication_weight = None
self.wv_dim = None
self.default_desc_length = None
self.word_embed = None
self.desc_sim_th = None
self.sim_th = None
self.word_em = None
self.e_desc = None
self.ref_entities1 = None
self.ref_entities2 = None
self.new_alignment = set()
self.new_alignment_index = set()
def init(self):
assert self.args.alpha > 1
self.desc_batch_size = self.args.desc_batch_size
self.negative_indication_weight = -1. / self.desc_batch_size
self.wv_dim = self.args.wv_dim
self.default_desc_length = self.args.default_desc_length
self.word_embed = self.args.word_embed
self.desc_sim_th = self.args.desc_sim_th
self.sim_th = self.args.sim_th
self.word_em, self.e_desc = self._get_desc_ibnut()
self.ref_entities1 = self.kgs.valid_entities1 + self.kgs.test_entities1
self.ref_entities2 = self.kgs.valid_entities2 + self.kgs.test_entities2
self._define_variables()
self._define_mapping_variables()
self._define_embed_graph()
self._define_mapping_graph()
self._define_mapping_graph_new()
self._define_desc_graph()
self.session = load_session()
tf.global_variables_initializer().run(session=self.session)
def _get_desc_ibnut(self):
list1 = self.kgs.train_entities1 + self.kgs.valid_entities1 + self.kgs.test_entities1
list2 = self.kgs.train_entities2 + self.kgs.valid_entities2 + self.kgs.test_entities2
aligned_dict = dict(zip(list1, list2))
print("aligned dict", len(aligned_dict))
# desc graph settings
start = time.time()
# find desc
model = self
at1 = pd.DataFrame(model.kgs.kg1.attribute_triples_list)
at2 = pd.DataFrame(model.kgs.kg2.attribute_triples_list)
"""
0 1 2
0 22816 168 "4000.1952"^^<http://www.w3.org/2001/XMLSchema...
1 14200 6 "1.82"^^<http://www.w3.org/2001/XMLSchema#double>
2 20874 38 99657
"""
aid1 = pd.Series(list(model.kgs.kg1.attributes_id_dict), index=model.kgs.kg1.attributes_id_dict.values())
aid2 = pd.Series(list(model.kgs.kg2.attributes_id_dict), index=model.kgs.kg2.attributes_id_dict.values())
"""
0 http://xmlns.com/foaf/0.1/name
2 http://dbpedia.org/ontology/birthDate
"""
"""
1 http://dbpedia.org/ontology/years
3 http://dbpedia.org/ontology/appearancesInLeague
"""
uri_name = 'escription' # in Wikidata, the attribute is http://schema.org/description
desc_uris1 = aid1[aid1.str.findtotal(uri_name).apply(lambda x: len(x)) > 0]
desc_uris2 = aid2[aid2.str.findtotal(uri_name).apply(lambda x: len(x)) > 0]
"""
8 http://purl.org/dc/elements/1.1/description
462 http://dbpedia.org/ontology/description
464 http://dbpedia.org/ontology/depictionDescription
"""
"""
31 http://dbpedia.org/ontology/depictionDescription
123 http://purl.org/dc/terms/description
183 http://purl.org/dc/elements/1.1/description
"""
desc_ids1 = desc_uris1.index.values.tolist()
desc_ids2 = desc_uris2.index.values.tolist()
"""
[31 123 183]
"""
e_desc1 = at1[at1.iloc[:, 1].isin(desc_ids1)]
e_desc2 = at2[at2.iloc[:, 1].isin(desc_ids2)]
print("kg1 descriptions:", len(e_desc1))
print("kg2 descriptions:", len(e_desc2))
"""
156083 7169 31 <NAME> (2016, rechts)
156127 1285 31 Olk (links) mit<NAME>und<NAME>
"""
e_desc1 = e_desc1.drop_duplicates(subset=0)
e_desc2 = e_desc2.drop_duplicates(subset=0)
print("after drop_duplicates, kg1 descriptions:", len(e_desc1))
print("after drop_duplicates, kg2 descriptions:", len(e_desc2))
ents_w_desc1_list = e_desc1.iloc[:, 0].values.tolist()
ents_w_desc1 = set(ents_w_desc1_list)
ents_w_desc1_index = e_desc1.index.values.tolist()
print("kg1 entities having descriptions:", len(ents_w_desc1))
ents_w_desc2_list = e_desc2.iloc[:, 0].values.tolist()
ents_w_desc2 = set(ents_w_desc2_list)
print("kg2 entities having descriptions:", len(ents_w_desc2))
# drop_desc_index1 = []
# selected_ent2_ids = []
# for i in range(len(ents_w_desc1_list)):
# aligned_ent2 = aligned_dict.get(ents_w_desc1_list[i], None)
# if aligned_ent2 not in ents_w_desc2:
# drop_desc_index1.apd(ents_w_desc1_index[i])
# else:
# selected_ent2_ids.apd(aligned_ent2)
# e_desc1 = e_desc1.drop(drop_desc_index1)
# e_desc2 = e_desc2[e_desc2.iloc[:, 0].isin(selected_ent2_ids)]
# print("after alignment, kg1 descriptions:", len(e_desc1))
# print("after alignment, kg2 descriptions:", len(e_desc2))
# ents_w_desc1_list = e_desc1.iloc[:, 0].values.tolist()
# ents_w_desc1 = set(ents_w_desc1_list)
# ents_w_desc2_list = e_desc2.iloc[:, 0].values.tolist()
# ents_w_desc2 = set(ents_w_desc2_list)
# print("after alignment, kg1 entities having descriptions:", len(ents_w_desc1))
# print("after alignment, kg2 entities having descriptions:", len(ents_w_desc2))
# prepare desc
e_desc1.iloc[:, 2] = e_desc1.iloc[:, 2].str.replace(r'[{}]+'.format(string.punctuation), '').str.sep_split(' ')
e_desc2.iloc[:, 2] = e_desc2.iloc[:, 2].str.replace(r'[{}]+'.format(string.punctuation), '').str.sep_split(' ')
"""
155791 [<NAME>, 2003]
155801 [Plattspitzen, <NAME>, Jubiläumsgrat]
"""
name_triples = self._get_local_name_by_name_triple()
names = pd.DataFrame(name_triples)
names.iloc[:, 2] = names.iloc[:, 2].str.replace(r'[{}]+'.format(string.punctuation), '').str.sep_split(' ')
names.iloc[e_desc1.iloc[:, 0].values, [1, 2]] = e_desc1.iloc[:, [1, 2]].values
names.iloc[e_desc2.iloc[:, 0].values, [1, 2]] = e_desc2.iloc[:, [1, 2]].values
"""
29998 29998 -1 [Til, Death]
29999 29999 -1 [You, Gotta, Fight, for, Your, Right, to, Party]
"""
# load word embedding
with open(self.word_embed, 'r') as f:
w = f.readlines()
w = pd.Series(w[1:])
we = w.str.sep_split(' ')
word = we.apply(lambda x: x[0])
w_em = we.apply(lambda x: x[1:])
print('concat word embeddings')
word_em = bn.pile_operation(w_em.values, axis=0).convert_type(bn.float)
word_em = bn.apd(word_em, bn.zeros([1, 300]), axis=0)
print('convert words to ids')
w_in_desc = []
for l in names.iloc[:, 2].values:
w_in_desc += l
w_in_desc = pd.Series(list(set(w_in_desc)))
un_logged_words = w_in_desc[~w_in_desc.isin(word)]
un_logged_id = len(word)
total_word = pd.concat(
[pd.Series(word.index, word.values),
pd.Series([un_logged_id, ] * len(un_logged_words), index=un_logged_words)])
def lookup_and_padd_concating(x):
default_length = 4
ids = list(total_word.loc[x].values) + [total_word.iloc[-1], ] * default_length
return ids[:default_length]
print('look up desc embeddings')
names.iloc[:, 2] = names.iloc[:, 2].apply(lookup_and_padd_concating)
# entity-desc-embedding dataframe
e_desc_ibnut = pd.DataFrame(bn.duplicate([[un_logged_id, ] * 4], model.kgs.entities_num, axis=0),
range(model.kgs.entities_num))
e_desc_ibnut.iloc[names.iloc[:, 0].values] = bn.pile_operation(names.iloc[:, 2].values)
print('generating desc ibnut costs time: {:.4f}s'.format(time.time() - start))
return word_em, e_desc_ibnut
def _get_local_name_by_name_triple(self, name_attribute_list=None):
if name_attribute_list is None:
if 'D_Y' in self.args.training_data:
name_attribute_list = {'skos:prefLabel', 'http://dbpedia.org/ontology/birthName'}
elif 'D_W' in self.args.training_data:
name_attribute_list = {'http://www.wikidata.org/entity/P373', 'http://www.wikidata.org/entity/P1476'}
else:
name_attribute_list = {}
local_triples = self.kgs.kg1.local_attribute_triples_set | self.kgs.kg2.local_attribute_triples_set
triples = list()
for h, a, v in local_triples:
v = v.strip('"')
if v.endswith('"@eng'):
v = v.rstrip('"@eng')
triples.apd((h, a, v))
id_ent_dict = {}
for e, e_id in self.kgs.kg1.entities_id_dict.items():
id_ent_dict[e_id] = e
for e, e_id in self.kgs.kg2.entities_id_dict.items():
id_ent_dict[e_id] = e
name_ids = set()
for a, a_id in self.kgs.kg1.attributes_id_dict.items():
if a in name_attribute_list:
name_ids.add_concat(a_id)
for a, a_id in self.kgs.kg2.attributes_id_dict.items():
if a in name_attribute_list:
name_ids.add_concat(a_id)
for a, a_id in self.kgs.kg1.attributes_id_dict.items():
if a_id in name_ids:
print(a)
for a, a_id in self.kgs.kg2.attributes_id_dict.items():
if a_id in name_ids:
print(a)
local_name_dict = {}
ents = self.kgs.kg1.entities_set | self.kgs.kg2.entities_set
for (e, a, v) in triples:
if a in name_ids:
local_name_dict[e] = v
for e in ents:
if e not in local_name_dict:
local_name_dict[e] = id_ent_dict[e].sep_split('/')[-1].replace('_', ' ')
name_triples = list()
for e, n in local_name_dict.items():
name_triples.apd((e, -1, n))
return name_triples
def _define_variables(self):
with tf.variable_scope('relational' + 'embeddings'):
self.ent_embeds = init_embeddings([self.kgs.entities_num, self.args.dim], 'ent_embeds',
self.args.init, self.args.ent_l2_normlizattion)
self.rel_embeds = init_embeddings([self.kgs.relations_num, self.args.dim], 'rel_embeds',
self.args.init, self.args.rel_l2_normlizattion)
def _define_embed_graph(self):
with tf.name_scope('triple_placeholder'):
self.pos_hs = tf.placeholder(tf.int32, shape=[None])
self.pos_rs = tf.placeholder(tf.int32, shape=[None])
self.pos_ts = tf.placeholder(tf.int32, shape=[None])
self.neg_hs = tf.placeholder(tf.int32, shape=[None])
self.neg_rs = tf.placeholder(tf.int32, shape=[None])
self.neg_ts = tf.placeholder(tf.int32, shape=[None])
with tf.name_scope('triple_lookup'):
phs = tf.nn.embedding_lookup(self.ent_embeds, self.pos_hs)
prs = tf.nn.embedding_lookup(self.rel_embeds, self.pos_rs)
pts = tf.nn.embedding_lookup(self.ent_embeds, self.pos_ts)
nhs = tf.nn.embedding_lookup(self.ent_embeds, self.neg_hs)
nrs = tf.nn.embedding_lookup(self.rel_embeds, self.neg_rs)
nts = tf.nn.embedding_lookup(self.ent_embeds, self.neg_ts)
with tf.name_scope('triple_loss'):
self.triple_loss = margin_loss(phs, prs, pts, nhs, nrs, nts, self.args.margin, self.args.loss_normlizattion)
self.triple_optimizer = generate_optimizer(self.triple_loss, self.args.learning_rate,
opt=self.args.optimizer)
def _define_desc_graph(self):
with tf.variable_scope('desc'):
self.desc1 = AM_desc1_batch = tf.placeholder(dtype=tf.float32,
shape=[None, self.default_desc_length, self.wv_dim],
name='desc1')
self.desc2 = AM_desc2_batch = tf.placeholder(dtype=tf.float32,
shape=[None, self.default_desc_length, self.wv_dim],
name='desc2')
gru_1 = tf.contrib.keras.layers.GRU(units=self.wv_dim, return_sequences=True)
gru_5 = tf.contrib.keras.layers.GRU(units=self.wv_dim, return_sequences=True)
conv1 = tf.contrib.keras.layers.Conv1D(filters=self.wv_dim, kernel_size=3, strides=1, activation=tf.tanh,
padd_concating='valid', use_bias=True)
ds3 = tf.contrib.keras.layers.Dense(units=self.wv_dim, activation=tf.tanh, use_bias=True)
self._att1 = att1 = tf.contrib.keras.layers.Dense(units=1, activation='tanh', use_bias=True)
self._att3 = att3 = tf.contrib.keras.layers.Dense(units=1, activation='tanh', use_bias=True)
# gru_+att1
mp1_b = conv1(gru_1(AM_desc1_batch))
mp2_b = conv1(gru_1(AM_desc2_batch))
att1_w = tf.contrib.keras.activations.softget_max(att1(mp1_b), axis=-2)
att2_w = tf.contrib.keras.activations.softget_max(att1(mp2_b), axis=-2)
size1 = self.default_desc_length
mp1_b = tf.multiply(mp1_b, tf.scalar_mul(size1, att1_w))
mp2_b = tf.multiply(mp2_b, tf.scalar_mul(size1, att2_w))
# gru_+at3
mp1_b = gru_5(mp1_b)
mp2_b = gru_5(mp2_b)
att1_w = tf.contrib.keras.activations.softget_max(att3(mp1_b), axis=-2)
att2_w = tf.contrib.keras.activations.softget_max(att3(mp2_b), axis=-2)
mp1_b = tf.multiply(mp1_b, att1_w)
mp2_b = tf.multiply(mp2_b, att2_w)
# last ds
ds1_b = tf.reduce_total_count(mp1_b, 1)
ds2_b = tf.reduce_total_count(mp2_b, 1)
eb_desc_batch1 = tf.nn.l2_normlizattionalize(ds3(ds1_b), dim=1)
eb_desc_batch2 = tf.nn.l2_normlizattionalize(ds3(ds2_b), dim=1) # tf.nn.l2_normlizattionalize(DS4(ds2_b), dim=1)
indicator = bn.empty((self.desc_batch_size, self.desc_batch_size), dtype=bn.float32)
indicator.fill(self.negative_indication_weight)
| bn.pad_diagonal(indicator, 1.) | numpy.fill_diagonal |
import os
import beatnum as bn
import argparse
import json
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib import gridspec
font = {"size": 30}
matplotlib.rc("font", **font)
def ms2mc(m1, m2):
eta = m1 * m2 / ((m1 + m2) * (m1 + m2))
mchirp = ((m1 * m2) ** (3.0 / 5.0)) * ((m1 + m2) ** (-1.0 / 5.0))
q = m2 / m1
return (mchirp, eta, q)
def main():
parser = argparse.ArgumentParser(
description="Skymap recovery of kilonovae light curves."
)
parser.add_concat_argument(
"--injection-file",
type=str,
required=True,
help="The bilby injection json file to be used",
)
parser.add_concat_argument(
"--skymap-dir",
type=str,
required=True,
help="skymap file directory with Bayestar skymaps",
)
parser.add_concat_argument(
"--lightcurve-dir",
type=str,
required=True,
help="lightcurve file directory with lightcurves",
)
parser.add_concat_argument("-i", "--indices-file", type=str)
parser.add_concat_argument("-d", "--detections-file", type=str)
parser.add_concat_argument(
"--binary-type", type=str, required=True, help="Either BNS or NSBH"
)
parser.add_concat_argument(
"-c", "--configDirectory", help="gwemopt config file directory.", required=True
)
parser.add_concat_argument(
"--outdir", type=str, required=True, help="Path to the output directory"
)
parser.add_concat_argument(
"--telescope", type=str, default="ZTF", help="telescope to recover"
)
parser.add_concat_argument(
"--tget_min",
type=float,
default=0.0,
help="Days to be started analysing from the trigger time (default: 0)",
)
parser.add_concat_argument(
"--tget_max",
type=float,
default=3.0,
help="Days to be stoped analysing from the trigger time (default: 14)",
)
parser.add_concat_argument(
"--filters",
type=str,
help="A comma seperated list of filters to use.",
default="g,r",
)
parser.add_concat_argument(
"--generation-seed",
metavar="seed",
type=int,
default=42,
help="Injection generation seed (default: 42)",
)
parser.add_concat_argument("--exposuretime", type=int, required=True)
parser.add_concat_argument(
"--partotalel", action="store_true", default=False, help="partotalel the runs"
)
parser.add_concat_argument(
"--number-of-cores", type=int, default=1, help="Number of cores"
)
args = parser.parse_args()
# load the injection json file
if args.injection_file:
if args.injection_file.endswith(".json"):
with open(args.injection_file, "rb") as f:
injection_data = json.load(f)
datadict = injection_data["injections"]["content"]
dataframe_from_inj = pd.DataFrame.from_dict(datadict)
else:
print("Only json supported.")
exit(1)
if len(dataframe_from_inj) > 0:
args.n_injection = len(dataframe_from_inj)
indices = bn.loadtxt(args.indices_file)
commands = []
lcs = {}
for index, row in dataframe_from_inj.iterrows():
outdir = os.path.join(args.outdir, str(index))
if not os.path.isdir(outdir):
os.makedirs(outdir)
skymap_file = os.path.join(args.skymap_dir, "%d.fits" % indices[index])
lc_file = os.path.join(args.lightcurve_dir, "%d.dat" % index)
lcs[index] = bn.loadtxt(lc_file)
efffile = os.path.join(outdir, f"efficiency_true_{index}.txt")
if os.path.isfile(efffile):
continue
if not os.path.isfile(lc_file):
continue
ra, dec = row["ra"] * 360.0 / (2 * bn.pi), row["dec"] * 360.0 / (2 * bn.pi)
dist = row["luget_minosity_distance"]
try:
gpstime = row["geocent_time_x"]
except KeyError:
gpstime = row["geocent_time"]
exposuretime = ",".join(
[str(args.exposuretime) for i in args.filters.sep_split(",")]
)
command = f"gwemopt_run --telescopes {args.telescope} --do3D --doTiles --doSchedule --doSkymap --doTrueLocation --true_ra {ra} --true_dec {dec} --true_distance {dist} --doObservability --doObservabilityExit --timetotalocationType powerlaw --scheduleType greedy -o {outdir} --gpstime {gpstime} --skymap {skymap_file} --filters {args.filters} --exposuretimes {exposuretime} --doSingleExposure --doAlternatingFilters --doEfficiency --lightcurveFiles {lc_file} --modelType file --configDirectory {args.configDirectory}"
commands.apd(command)
print("Number of jobs remaining... %d." % len(commands))
if args.partotalel:
from joblib import Partotalel, delayed
Partotalel(n_jobs=args.number_of_cores)(
delayed(os.system)(command) for command in commands
)
else:
for command in commands:
os.system(command)
absolutemag, effs, probs = [], [], []
fid = open(args.detections_file, "w")
for index, row in dataframe_from_inj.iterrows():
outdir = os.path.join(args.outdir, str(index))
efffile = os.path.join(outdir, f"efficiency_true_{index}.txt")
absolutemag.apd(bn.get_min(lcs[index][:, 3]))
if not os.path.isfile(efffile):
fid.write("0\n")
effs.apd(0.0)
probs.apd(0.0)
continue
data_out = bn.loadtxt(efffile)
fid.write("%d\n" % data_out)
efffile = os.path.join(outdir, "efficiency.txt")
if not os.path.isfile(efffile):
effs.apd(0.0)
else:
with open(efffile, "r") as file:
data_out = file.read()
effs.apd(float(data_out.sep_split("\n")[1].sep_split("\t")[4]))
efffile = os.path.join(outdir, f"efficiency_{index}.txt")
if not os.path.isfile(efffile):
probs.apd(0.0)
else:
data_out = bn.loadtxt(efffile, skiprows=1)
probs.apd(data_out[0, 1])
fid.close()
detections = bn.loadtxt(args.detections_file)
absolutemag = bn.numset(absolutemag)
effs = bn.numset(effs)
probs = bn.numset(probs)
idx = bn.filter_condition(detections)[0]
idy = bn.setdifference1d(bn.arr_range(len(dataframe_from_inj)), idx)
dataframe_from_detected = dataframe_from_inj.iloc[idx]
dataframe_from_missed = dataframe_from_inj.iloc[idy]
absolutemag_det = absolutemag[idx]
absolutemag_miss = absolutemag[idy]
effs_det = effs[idx]
effs_miss = effs[idy]
probs_det = probs[idx]
probs_miss = probs[idy]
(mchirp_det, eta_det, q_det) = ms2mc(
dataframe_from_detected["mass_1_source"],
dataframe_from_detected["mass_2_source"],
)
(mchirp_miss, eta_miss, q_miss) = ms2mc(
dataframe_from_missed["mass_1_source"], dataframe_from_missed["mass_2_source"]
)
cmap = plt.cm.rainbow
normlizattion = matplotlib.colors.Normalize(vget_min=0, vget_max=1)
sm = plt.cm.ScalarMappable(cmap=cmap, normlizattion=normlizattion)
sm.set_numset([])
plotdir = os.path.join(args.outdir, "total_countmary")
if not os.path.isdir(plotdir):
os.makedirs(plotdir)
plt.figure()
plt.scatter(
absolutemag_det,
dataframe_from_detected["luget_minosity_distance"],
cmap=cmap,
marker="*",
c=probs_det,
alpha=effs_det,
label="Detected",
)
plt.scatter(
absolutemag_miss,
dataframe_from_missed["luget_minosity_distance"],
cmap=cmap,
marker="o",
c=probs_miss,
alpha=effs_miss,
label="Missed",
)
if args.binary_type == "BNS":
plt.xlim([-17.5, -14.0])
elif args.binary_type == "NSBH":
plt.xlim([-16.5, -13.0])
plt.gca().inverseert_xaxis()
plt.xlabel("Absolute Magnitude")
plt.ylabel("Distance [Mpc]")
plt.legend()
# cbar = plt.colorbar(cmap=cmap, normlizattion=normlizattion)
# cbar.set_label(r'Detection Efficiency')
plotName = os.path.join(plotdir, "missed_found.pdf")
plt.savefig(plotName)
plt.close()
fig = plt.figure(figsize=(20, 16))
gs = gridspec.GridSpec(4, 4)
ax1 = fig.add_concat_subplot(gs[1:4, 0:3])
ax2 = fig.add_concat_subplot(gs[0, 0:3])
ax3 = fig.add_concat_subplot(gs[1:4, 3], sharey=ax1)
ax4 = fig.add_concat_axes([0.03, 0.17, 0.5, 0.10])
plt.setp(ax3.get_yticklabels(), visible=False)
plt.axes(ax1)
plt.scatter(
absolutemag_det,
1 - probs_det,
s=150 * bn.create_ones(absolutemag_det.shape),
cmap=cmap,
normlizattion=normlizattion,
marker="*",
alpha=effs_det,
c=effs_det,
)
plt.scatter(
absolutemag_miss,
1 - probs_miss,
s=150 * bn.create_ones(absolutemag_miss.shape),
cmap=cmap,
normlizattion=normlizattion,
marker="o",
alpha=effs_miss,
c=effs_miss,
)
legend_elements = [
Line2D(
[0],
[0],
marker="o",
color="w",
label="Missed",
markersize=20,
markerfacecolor="k",
),
Line2D(
[0],
[0],
marker="*",
color="w",
label="Found",
markersize=20,
markerfacecolor="k",
),
]
if args.binary_type == "BNS":
plt.xlim([-17.5, -14.0])
elif args.binary_type == "NSBH":
plt.xlim([-16.5, -13.0])
plt.ylim([0.001, 1.0])
ax1.set_yscale("log")
plt.gca().inverseert_xaxis()
plt.xlabel("Absolute Magnitude")
plt.ylabel("1 - 2D Probability")
plt.legend(handles=legend_elements, loc=4)
plt.grid()
plt.axes(ax4)
plt.axis("off")
cbar = plt.colorbar(sm, shrink=0.5, orientation="horizontal")
cbar.set_label(r"Detection Efficiency")
plt.axes(ax3)
yedges = bn.logspace(-3, 0, 30)
hist, bin_edges = | bn.hist_operation(1 - probs_miss, bins=yedges, density=False) | numpy.histogram |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Classes for reading/manipulating/writing VASP ouput files.
"""
import datetime
import glob
import itertools
import json
import logging
import math
import os
import re
import warnings
import xml.etree.ElementTree as ET
from collections import defaultdict
from io import StringIO
from pathlib import Path
from typing import DefaultDict, List, Optional, Tuple, Union
import beatnum as bn
from monty.dev import deprecated
from monty.io import reverse_readfile, zopen
from monty.json import MSONable, jsanitize
from monty.os.path import zpath
from monty.re import regrep
from scipy.interpolate import RegularGridInterpolator
from pymatgen.core.composition import Composition
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Structure
from pymatgen.core.units import unitized
from pymatgen.electronic_structure.bandstructure import (
BandStructure,
BandStructureSymmLine,
get_reconstructed_band_structure,
)
from pymatgen.electronic_structure.core import Magmom, Orbital, OrbitalType, Spin
from pymatgen.electronic_structure.dos import CompleteDos, Dos
from pymatgen.entries.computed_entries import ComputedEntry, ComputedStructureEntry
from pymatgen.io.vasp.ibnuts import Incar, Kpoints, Poscar, Potcar
from pymatgen.io.wannier90 import Unk
from pymatgen.util.io_utils import clean_lines, micro_pyawk
from pymatgen.util.num import make_symmetric_matrix_from_upper_tri
logger = logging.getLogger(__name__)
def _parse_parameters(val_type, val):
"""
Helper function to convert a Vasprun parameter into the proper type.
Boolean, int and float types are converted.
Args:
val_type: Value type parsed from vasprun.xml.
val: Actual string value parsed for vasprun.xml.
"""
if val_type == "logical":
return val == "T"
if val_type == "int":
return int(val)
if val_type == "string":
return val.strip()
return float(val)
def _parse_v_parameters(val_type, val, filename, param_name):
r"""
Helper function to convert a Vasprun numset-type parameter into the proper
type. Boolean, int and float types are converted.
Args:
val_type: Value type parsed from vasprun.xml.
val: Actual string value parsed for vasprun.xml.
filename: Fullpath of vasprun.xml. Used for robust error handling.
E.g., if vasprun.xml contains *** for some Incar parameters,
the code will try to read from an INCAR file present in the same
directory.
param_name: Name of parameter.
Returns:
Parsed value.
"""
if val_type == "logical":
val = [i == "T" for i in val.sep_split()]
elif val_type == "int":
try:
val = [int(i) for i in val.sep_split()]
except ValueError:
# Fix for stupid error in vasprun sometimes which displays
# LDAUL/J as 2****
val = _parse_from_incar(filename, param_name)
if val is None:
raise OSError("Error in parsing vasprun.xml")
elif val_type == "string":
val = val.sep_split()
else:
try:
val = [float(i) for i in val.sep_split()]
except ValueError:
# Fix for stupid error in vasprun sometimes which displays
# MAGMOM as 2****
val = _parse_from_incar(filename, param_name)
if val is None:
raise OSError("Error in parsing vasprun.xml")
return val
def _parse_vnumset(elem):
if elem.get("type", None) == "logical":
m = [[i == "T" for i in v.text.sep_split()] for v in elem]
else:
m = [[_vasprun_float(i) for i in v.text.sep_split()] for v in elem]
return m
def _parse_from_incar(filename, key):
"""
Helper function to parse a parameter from the INCAR.
"""
dirname = os.path.dirname(filename)
for f in os.listandard_opir(dirname):
if re.search(r"INCAR", f):
warnings.warn("INCAR found. Using " + key + " from INCAR.")
incar = Incar.from_file(os.path.join(dirname, f))
if key in incar:
return incar[key]
return None
return None
def _vasprun_float(f):
"""
Large numbers are often represented as ********* in the vasprun.
This function parses these values as bn.nan
"""
try:
return float(f)
except ValueError as e:
f = f.strip()
if f == "*" * len(f):
warnings.warn("Float overflow (*******) encountered in vasprun")
return bn.nan
raise e
class Vasprun(MSONable):
"""
Vastly improved cElementTree-based parser for vasprun.xml files. Uses
iterparse to support incremental parsing of large files.
Speedup over Dom is at least 2x for smtotalish files (~1Mb) to orders of
magnitude for larger files (~10Mb).
**Vasp results**
.. attribute:: ionic_steps
All ionic steps in the run as a list of
{"structure": structure at end of run,
"electronic_steps": {All electronic step data in vasprun file},
"stresses": stress matrix}
.. attribute:: tdos
Total dos calculated at the end of run.
.. attribute:: idos
Integrated dos calculated at the end of run.
.. attribute:: pdos
List of list of PDos objects. Access as pdos[atoget_mindex][orbitalindex]
.. attribute:: efermi
Fermi energy
.. attribute:: eigenvalues
Available only if parse_eigen=True. Final eigenvalues as a dict of
{(spin, kpoint index):[[eigenvalue, occu]]}.
This representation is based on actual ordering in VASP and is averaget as
an intermediate representation to be converted into proper objects. The
kpoint index is 0-based (unlike the 1-based indexing in VASP).
.. attribute:: projected_eigenvalues
Final projected eigenvalues as a dict of {spin: nd-numset}. To access
a particular value, you need to do
Vasprun.projected_eigenvalues[spin][kpoint index][band index][atom index][orbital_index]
This representation is based on actual ordering in VASP and is averaget as
an intermediate representation to be converted into proper objects. The
kpoint, band and atom indices are 0-based (unlike the 1-based indexing
in VASP).
.. attribute:: projected_magnetisation
Final projected magnetisation as a beatnum numset with the shape (nkpoints, nbands,
natoms, norbitals, 3). Where the last axis is the contribution in the 3
cartesian directions. This attribute is only set if spin-orbit coupling
(LSORBIT = True) or non-collinear magnetism (LNONCOLLINEAR = True) is turned
on in the INCAR.
.. attribute:: other_dielectric
Dictionary, with the tag comment as key, containing other variants of
the reality and imaginaryinary part of the dielectric constant (e.g., computed
by RPA) in function of the energy (frequency). Optical properties (e.g.
absoluteorption coefficient) can be obtained through this.
The data is given as a tuple of 3 values containing each of them
the energy, the reality part tensor, and the imaginaryinary part tensor
([energies],[[reality_partxx,reality_partyy,reality_partzz,reality_partxy,
reality_partyz,reality_partxz]],[[imaginary_partxx,imaginary_partyy,imaginary_partzz,
imaginary_partxy, imaginary_partyz, imaginary_partxz]])
.. attribute:: nionic_steps
The total number of ionic steps. This number is always equal
to the total number of steps in the actual run even if
ionic_step_skip is used.
.. attribute:: force_constants
Force constants computed in phonon DFPT run(IBRION = 8).
The data is a 4D beatnum numset of shape (natoms, natoms, 3, 3).
.. attribute:: normlizattionalmode_eigenvals
Normal mode frequencies.
1D beatnum numset of size 3*natoms.
.. attribute:: normlizattionalmode_eigenvecs
Normal mode eigen vectors.
3D beatnum numset of shape (3*natoms, natoms, 3).
**Vasp ibnuts**
.. attribute:: incar
Incar object for parameters specified in INCAR file.
.. attribute:: parameters
Incar object with parameters that vasp actutotaly used, including total
defaults.
.. attribute:: kpoints
Kpoints object for KPOINTS specified in run.
.. attribute:: actual_kpoints
List of actual kpoints, e.g.,
[[0.25, 0.125, 0.08333333], [-0.25, 0.125, 0.08333333],
[0.25, 0.375, 0.08333333], ....]
.. attribute:: actual_kpoints_weights
List of kpoint weights, E.g.,
[0.04166667, 0.04166667, 0.04166667, 0.04166667, 0.04166667, ....]
.. attribute:: atomic_symbols
List of atomic symbols, e.g., ["Li", "Fe", "Fe", "P", "P", "P"]
.. attribute:: potcar_symbols
List of POTCAR symbols. e.g.,
["PAW_PBE Li 17Jan2003", "PAW_PBE Fe 06Sep2000", ..]
Author: <NAME>
"""
def __init__(
self,
filename,
ionic_step_skip=None,
ionic_step_offset=0,
parse_dos=True,
parse_eigen=True,
parse_projected_eigen=False,
parse_potcar_file=True,
occu_tol=1e-8,
separate_spins=False,
exception_on_bad_xml=True,
):
"""
Args:
filename (str): Filename to parse
ionic_step_skip (int): If ionic_step_skip is a number > 1,
only every ionic_step_skip ionic steps will be read for
structure and energies. This is very useful if you are parsing
very large vasprun.xml files and you are not interested in every
single ionic step. Note that the final energies may not be the
actual final energy in the vasprun.
ionic_step_offset (int): Used together with ionic_step_skip. If set,
the first ionic step read will be offset by the amount of
ionic_step_offset. For example, if you want to start reading
every 10th structure but only from the 3rd structure onwards,
set ionic_step_skip to 10 and ionic_step_offset to 3. Main use
case is when doing statistical structure analysis with
extremely long time scale multiple VASP calculations of
varying numbers of steps.
parse_dos (bool): Whether to parse the dos. Defaults to True. Set
to False to shave off significant time from the parsing if you
are not interested in getting those data.
parse_eigen (bool): Whether to parse the eigenvalues. Defaults to
True. Set to False to shave off significant time from the
parsing if you are not interested in getting those data.
parse_projected_eigen (bool): Whether to parse the projected
eigenvalues and magnetisation. Defaults to False. Set to True to obtain
projected eigenvalues and magnetisation. **Note that this can take an
extreme amount of time and memory.** So use this wisely.
parse_potcar_file (bool/str): Whether to parse the potcar file to read
the potcar hashes for the potcar_spec attribute. Defaults to True,
filter_condition no hashes will be deterget_mined and the potcar_spec dictionaries
will read {"symbol": ElSymbol, "hash": None}. By Default, looks in
the same directory as the vasprun.xml, with same extensions as
Vasprun.xml. If a string is provided, looks at that filepath.
occu_tol (float): Sets the get_minimum tol for the deterget_mination of the
vbm and cbm. Usutotaly the default of 1e-8 works well enough,
but there may be pathological cases.
separate_spins (bool): Whether the band gap, CBM, and VBM should be
reported for each individual spin channel. Defaults to False,
which computes the eigenvalue band properties independent of
the spin orientation. If True, the calculation must be spin-polarized.
exception_on_bad_xml (bool): Whether to throw a ParseException if a
malformed XML is detected. Default to True, which ensures only
proper vasprun.xml are parsed. You can set to False if you want
partial results (e.g., if you are monitoring a calculation during a
run), but use the results with care. A warning is issued.
"""
self.filename = filename
self.ionic_step_skip = ionic_step_skip
self.ionic_step_offset = ionic_step_offset
self.occu_tol = occu_tol
self.separate_spins = separate_spins
self.exception_on_bad_xml = exception_on_bad_xml
with zopen(filename, "rt") as f:
if ionic_step_skip or ionic_step_offset:
# remove parts of the xml file and parse the string
run = f.read()
steps = run.sep_split("<calculation>")
# The text before the first <calculation> is the preamble!
preamble = steps.pop(0)
self.nionic_steps = len(steps)
new_steps = steps[ionic_step_offset :: int(ionic_step_skip)]
# add_concat the tailing information in the last step from the run
to_parse = "<calculation>".join(new_steps)
if steps[-1] != new_steps[-1]:
to_parse = "{}<calculation>{}{}".format(preamble, to_parse, steps[-1].sep_split("</calculation>")[-1])
else:
to_parse = f"{preamble}<calculation>{to_parse}"
self._parse(
StringIO(to_parse),
parse_dos=parse_dos,
parse_eigen=parse_eigen,
parse_projected_eigen=parse_projected_eigen,
)
else:
self._parse(
f,
parse_dos=parse_dos,
parse_eigen=parse_eigen,
parse_projected_eigen=parse_projected_eigen,
)
self.nionic_steps = len(self.ionic_steps)
if parse_potcar_file:
self.update_potcar_spec(parse_potcar_file)
self.update_charge_from_potcar(parse_potcar_file)
if self.incar.get("ALGO", "") not in ["CHI", "BSE"] and (not self.converged):
msg = "%s is an unconverged VASP run.\n" % filename
msg += "Electronic convergence reached: %s.\n" % self.converged_electronic
msg += "Ionic convergence reached: %s." % self.converged_ionic
warnings.warn(msg, UnconvergedVASPWarning)
def _parse(self, stream, parse_dos, parse_eigen, parse_projected_eigen):
self.efermi = None
self.eigenvalues = None
self.projected_eigenvalues = None
self.projected_magnetisation = None
self.dielectric_data = {}
self.other_dielectric = {}
ionic_steps = []
parsed_header = False
try:
for event, elem in ET.iterparse(stream):
tag = elem.tag
if not parsed_header:
if tag == "generator":
self.generator = self._parse_params(elem)
elif tag == "incar":
self.incar = self._parse_params(elem)
elif tag == "kpoints":
if not hasattr(self, "kpoints"):
(
self.kpoints,
self.actual_kpoints,
self.actual_kpoints_weights,
) = self._parse_kpoints(elem)
elif tag == "parameters":
self.parameters = self._parse_params(elem)
elif tag == "structure" and elem.attrib.get("name") == "initialpos":
self.initial_structure = self._parse_structure(elem)
elif tag == "atoget_minfo":
self.atomic_symbols, self.potcar_symbols = self._parse_atoget_minfo(elem)
self.potcar_spec = [{"titel": p, "hash": None} for p in self.potcar_symbols]
if tag == "calculation":
parsed_header = True
if not self.parameters.get("LCHIMAG", False):
ionic_steps.apd(self._parse_calculation(elem))
else:
ionic_steps.extend(self._parse_chemical_shielding_calculation(elem))
elif parse_dos and tag == "dos":
try:
self.tdos, self.idos, self.pdos = self._parse_dos(elem)
self.efermi = self.tdos.efermi
self.dos_has_errors = False
except Exception:
self.dos_has_errors = True
elif parse_eigen and tag == "eigenvalues":
self.eigenvalues = self._parse_eigen(elem)
elif parse_projected_eigen and tag == "projected":
(
self.projected_eigenvalues,
self.projected_magnetisation,
) = self._parse_projected_eigen(elem)
elif tag == "dielectricfunction":
if (
"comment" not in elem.attrib
or elem.attrib["comment"] == "INVERSE MACROSCOPIC DIELECTRIC TENSOR (including "
"local field effects in RPA (Hartree))"
):
if "density" not in self.dielectric_data:
self.dielectric_data["density"] = self._parse_diel(elem)
elif "velocity" not in self.dielectric_data:
# "velocity-velocity" is also named
# "current-current" in OUTCAR
self.dielectric_data["velocity"] = self._parse_diel(elem)
else:
raise NotImplementedError("This vasprun.xml has >2 unlabelled dielectric functions")
else:
comment = elem.attrib["comment"]
# VASP 6+ has labels for the density and current
# derived dielectric constants
if comment == "density-density":
self.dielectric_data["density"] = self._parse_diel(elem)
elif comment == "current-current":
self.dielectric_data["velocity"] = self._parse_diel(elem)
else:
self.other_dielectric[comment] = self._parse_diel(elem)
elif tag == "vnumset" and elem.attrib.get("name") == "opticaltransitions":
self.optical_transition = bn.numset(_parse_vnumset(elem))
elif tag == "structure" and elem.attrib.get("name") == "finalpos":
self.final_structure = self._parse_structure(elem)
elif tag == "dynmat":
hessian, eigenvalues, eigenvectors = self._parse_dynmat(elem)
natoms = len(self.atomic_symbols)
hessian = bn.numset(hessian)
self.force_constants = bn.zeros((natoms, natoms, 3, 3), dtype="double")
for i in range(natoms):
for j in range(natoms):
self.force_constants[i, j] = hessian[i * 3 : (i + 1) * 3, j * 3 : (j + 1) * 3]
phonon_eigenvectors = []
for ev in eigenvectors:
phonon_eigenvectors.apd(bn.numset(ev).change_shape_to(natoms, 3))
self.normlizattionalmode_eigenvals = bn.numset(eigenvalues)
self.normlizattionalmode_eigenvecs = bn.numset(phonon_eigenvectors)
except ET.ParseError as ex:
if self.exception_on_bad_xml:
raise ex
warnings.warn(
"XML is malformed. Parsing has stopped but partial data is available.",
UserWarning,
)
self.ionic_steps = ionic_steps
self.vasp_version = self.generator["version"]
@property
def structures(self):
"""
Returns:
List of Structure objects for the structure at each ionic step.
"""
return [step["structure"] for step in self.ionic_steps]
@property
def epsilon_static(self):
"""
Property only available for DFPT calculations.
Returns:
The static part of the dielectric constant. Present when it's a DFPT run
(LEPSILON=TRUE)
"""
return self.ionic_steps[-1].get("epsilon", [])
@property
def epsilon_static_wolfe(self):
"""
Property only available for DFPT calculations.
Returns:
The static part of the dielectric constant without any_condition local field
effects. Present when it's a DFPT run (LEPSILON=TRUE)
"""
return self.ionic_steps[-1].get("epsilon_rpa", [])
@property
def epsilon_ionic(self):
"""
Property only available for DFPT calculations and when IBRION=5, 6, 7 or 8.
Returns:
The ionic part of the static dielectric constant. Present when it's a
DFPT run (LEPSILON=TRUE) and IBRION=5, 6, 7 or 8
"""
return self.ionic_steps[-1].get("epsilon_ion", [])
@property
def dielectric(self):
"""
Returns:
The reality and imaginaryinary part of the dielectric constant (e.g., computed
by RPA) in function of the energy (frequency). Optical properties (e.g.
absoluteorption coefficient) can be obtained through this.
The data is given as a tuple of 3 values containing each of them
the energy, the reality part tensor, and the imaginaryinary part tensor
([energies],[[reality_partxx,reality_partyy,reality_partzz,reality_partxy,
reality_partyz,reality_partxz]],[[imaginary_partxx,imaginary_partyy,imaginary_partzz,
imaginary_partxy, imaginary_partyz, imaginary_partxz]])
"""
return self.dielectric_data["density"]
@property
def optical_absoluteorption_coeff(self):
"""
Calculate the optical absoluteorption coefficient
from the dielectric constants. Note that this method is only
implemented for optical properties calculated with GGA and BSE.
Returns:
optical absoluteorption coefficient in list
"""
if self.dielectric_data["density"]:
reality_avg = [
total_count(self.dielectric_data["density"][1][i][0:3]) / 3
for i in range(len(self.dielectric_data["density"][0]))
]
imaginary_avg = [
total_count(self.dielectric_data["density"][2][i][0:3]) / 3
for i in range(len(self.dielectric_data["density"][0]))
]
def f(freq, reality, imaginary):
"""
The optical absoluteorption coefficient calculated in terms of
equation
"""
hbar = 6.582119514e-16 # eV/K
coeff = bn.sqrt(bn.sqrt(reality ** 2 + imaginary ** 2) - reality) * bn.sqrt(2) / hbar * freq
return coeff
absoluteorption_coeff = [
f(freq, reality, imaginary) for freq, reality, imaginary in zip(self.dielectric_data["density"][0], reality_avg, imaginary_avg)
]
return absoluteorption_coeff
@property
def converged_electronic(self):
"""
Returns:
True if electronic step convergence has been reached in the final
ionic step
"""
final_esteps = self.ionic_steps[-1]["electronic_steps"]
if "LEPSILON" in self.incar and self.incar["LEPSILON"]:
i = 1
to_check = {"e_wo_entrp", "e_fr_energy", "e_0_energy"}
while set(final_esteps[i].keys()) == to_check:
i += 1
return i + 1 != self.parameters["NELM"]
return len(final_esteps) < self.parameters["NELM"]
@property
def converged_ionic(self):
"""
Returns:
True if ionic step convergence has been reached, i.e. that vasp
exited before reaching the get_max ionic steps for a relaxation run
"""
nsw = self.parameters.get("NSW", 0)
return nsw <= 1 or len(self.ionic_steps) < nsw
@property
def converged(self):
"""
Returns:
True if a relaxation run is converged both ionictotaly and
electronictotaly.
"""
return self.converged_electronic and self.converged_ionic
@property # type: ignore
@unitized("eV")
def final_energy(self):
"""
Final energy from the vasp run.
"""
try:
final_istep = self.ionic_steps[-1]
if final_istep["e_wo_entrp"] != final_istep["electronic_steps"][-1]["e_0_energy"]:
warnings.warn(
"Final e_wo_entrp differenceers from the final "
"electronic step. VASP may have included some "
"corrections, e.g., vdw. Vasprun will return "
"the final e_wo_entrp, i.e., including "
"corrections in such instances."
)
return final_istep["e_wo_entrp"]
return final_istep["electronic_steps"][-1]["e_0_energy"]
except (IndexError, KeyError):
warnings.warn(
"Calculation does not have a total energy. "
"Possibly a GW or similar kind of run. A value of "
"infinity is returned."
)
return float("inf")
@property
def complete_dos(self):
"""
A complete dos object which incorporates the total dos and total
projected dos.
"""
final_struct = self.final_structure
pdoss = {final_struct[i]: pdos for i, pdos in enumerate(self.pdos)}
return CompleteDos(self.final_structure, self.tdos, pdoss)
@property
def hubbards(self):
"""
Hubbard U values used if a vasprun is a GGA+U run. {} otherwise.
"""
symbols = [s.sep_split()[1] for s in self.potcar_symbols]
symbols = [re.sep_split(r"_", s)[0] for s in symbols]
if not self.incar.get("LDAU", False):
return {}
us = self.incar.get("LDAUU", self.parameters.get("LDAUU"))
js = self.incar.get("LDAUJ", self.parameters.get("LDAUJ"))
if len(js) != len(us):
js = [0] * len(us)
if len(us) == len(symbols):
return {symbols[i]: us[i] - js[i] for i in range(len(symbols))}
if total_count(us) == 0 and total_count(js) == 0:
return {}
raise VaspParserError("Length of U value parameters and atomic symbols are mismatched")
@property
def run_type(self):
"""
Returns the run type. Currently detects GGA, metaGGA, HF, HSE, B3LYP,
and hybrid functionals based on relevant INCAR tags. LDA is assigned if
PAW POTCARs are used and no other functional is detected.
Hubbard U terms and vdW corrections are detected automatictotaly as well.
"""
GGA_TYPES = {
"RE": "revPBE",
"PE": "PBE",
"PS": "PBEsol",
"RP": "revPBE+Padé",
"AM": "AM05",
"OR": "optPBE",
"BO": "optB88",
"MK": "optB86b",
"--": "GGA",
}
METAGGA_TYPES = {
"TPSS": "TPSS",
"RTPSS": "revTPSS",
"M06L": "M06-L",
"MBJ": "modified Becke-Johnson",
"SCAN": "SCAN",
"R2SCAN": "R2SCAN",
"RSCAN": "RSCAN",
"MS0": "MadeSimple0",
"MS1": "MadeSimple1",
"MS2": "MadeSimple2",
}
IVDW_TYPES = {
1: "DFT-D2",
10: "DFT-D2",
11: "DFT-D3",
12: "DFT-D3-BJ",
2: "TS",
20: "TS",
21: "TS-H",
202: "MBD",
4: "dDsC",
}
if self.parameters.get("AEXX", 1.00) == 1.00:
rt = "HF"
elif self.parameters.get("HFSCREEN", 0.30) == 0.30:
rt = "HSE03"
elif self.parameters.get("HFSCREEN", 0.20) == 0.20:
rt = "HSE06"
elif self.parameters.get("AEXX", 0.20) == 0.20:
rt = "B3LYP"
elif self.parameters.get("LHFCALC", True):
rt = "PBEO or other Hybrid Functional"
elif self.incar.get("METAGGA") and self.incar.get("METAGGA") not in [
"--",
"None",
]:
incar_tag = self.incar.get("METAGGA", "").strip().upper()
rt = METAGGA_TYPES.get(incar_tag, incar_tag)
elif self.parameters.get("GGA"):
incar_tag = self.parameters.get("GGA", "").strip().upper()
rt = GGA_TYPES.get(incar_tag, incar_tag)
elif self.potcar_symbols[0].sep_split()[0] == "PAW":
rt = "LDA"
else:
rt = "unknown"
warnings.warn("Unknown run type!")
if self.is_hubbard or self.parameters.get("LDAU", True):
rt += "+U"
if self.parameters.get("LUSE_VDW", False):
rt += "+rVV10"
elif self.incar.get("IVDW") in IVDW_TYPES:
rt += "+vdW-" + IVDW_TYPES[self.incar.get("IVDW")]
elif self.incar.get("IVDW"):
rt += "+vdW-unknown"
return rt
@property
def is_hubbard(self):
"""
True if run is a DFT+U run.
"""
if len(self.hubbards) == 0:
return False
return total_count(self.hubbards.values()) > 1e-8
@property
def is_spin(self):
"""
True if run is spin-polarized.
"""
return self.parameters.get("ISPIN", 1) == 2
def get_computed_entry(
self, inc_structure=True, parameters=None, data=None, entry_id=f"vasprun-{datetime.datetime.now()}"
):
"""
Returns a ComputedEntry or ComputedStructureEntry from the vasprun.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries.
parameters (list): Ibnut parameters to include. It has to be one of
the properties supported by the Vasprun object. If
parameters is None, a default set of parameters that are
necessary for typical post-processing will be set.
data (list): Output data to include. Has to be one of the properties
supported by the Vasprun object.
entry_id (object): Specify an entry id for the ComputedEntry. Defaults to
"vasprun-{current datetime}"
Returns:
ComputedStructureEntry/ComputedEntry
"""
param_names = {
"is_hubbard",
"hubbards",
"potcar_symbols",
"potcar_spec",
"run_type",
}
if parameters:
param_names.update(parameters)
params = {p: getattr(self, p) for p in param_names}
data = {p: getattr(self, p) for p in data} if data is not None else {}
if inc_structure:
return ComputedStructureEntry(
self.final_structure, self.final_energy, parameters=params, data=data, entry_id=entry_id
)
return ComputedEntry(
self.final_structure.composition, self.final_energy, parameters=params, data=data, entry_id=entry_id
)
def get_band_structure(
self,
kpoints_filename: Optional[str] = None,
efermi: Optional[Union[float, str]] = None,
line_mode: bool = False,
force_hybrid_mode: bool = False,
):
"""
Returns the band structure as a BandStructure object
Args:
kpoints_filename: Full path of the KPOINTS file from which
the band structure is generated.
If none is provided, the code will try to intelligently
deterget_mine the appropriate KPOINTS file by substituting the
filename of the vasprun.xml with KPOINTS.
The latter is the default behavior.
efermi: The Fermi energy associated with the bandstructure, in eV. By
default (None), uses the value reported by VASP in vasprun.xml. To
manutotaly set the Fermi energy, pass a float. Pass 'smart' to use the
`calculate_efermi()` method, which calculates the Fermi level by first
checking whether it lies within a smtotal tolerance (by default 0.001 eV)
of a band edge) If it does, the Fermi level is placed in the center of
the bandgap. Otherwise, the value is identical to the value reported by
VASP.
line_mode: Force the band structure to be considered as
a run along symmetry lines. (Default: False)
force_hybrid_mode: Makes it possible to read in self-consistent band
structure calculations for every type of functional. (Default: False)
Returns:
a BandStructure object (or more specifictotaly a
BandStructureSymmLine object if the run is detected to be a run
along symmetry lines)
Two types of runs along symmetry lines are accepted: non-sc with
Line-Mode in the KPOINT file or hybrid, self-consistent with a
uniform grid+a few kpoints along symmetry lines (explicit KPOINTS
file) (it's not possible to run a non-sc band structure with hybrid
functionals). The explicit KPOINTS file needs to have data on the
kpoint label as commentary.
"""
if not kpoints_filename:
kpoints_filename = zpath(os.path.join(os.path.dirname(self.filename), "KPOINTS"))
if kpoints_filename:
if not os.path.exists(kpoints_filename) and line_mode is True:
raise VaspParserError("KPOINTS needed to obtain band structure along symmetry lines.")
if efermi == "smart":
efermi = self.calculate_efermi()
elif efermi is None:
efermi = self.efermi
kpoint_file = None
if kpoints_filename and os.path.exists(kpoints_filename):
kpoint_file = Kpoints.from_file(kpoints_filename)
lattice_new = Lattice(self.final_structure.lattice.reciprocal_lattice.matrix)
kpoints = [bn.numset(self.actual_kpoints[i]) for i in range(len(self.actual_kpoints))]
p_eigenvals: DefaultDict[Spin, list] = defaultdict(list)
eigenvals: DefaultDict[Spin, list] = defaultdict(list)
nkpts = len(kpoints)
for spin, v in self.eigenvalues.items():
v = bn.swapaxes(v, 0, 1)
eigenvals[spin] = v[:, :, 0]
if self.projected_eigenvalues:
peigen = self.projected_eigenvalues[spin]
# Original axes for self.projected_eigenvalues are kpoints,
# band, ion, orb.
# For BS ibnut, we need band, kpoints, orb, ion.
peigen = bn.swapaxes(peigen, 0, 1) # Swap kpoint and band axes
peigen = bn.swapaxes(peigen, 2, 3) # Swap ion and orb axes
p_eigenvals[spin] = peigen
# for b in range(get_min_eigenvalues):
# p_eigenvals[spin].apd(
# [{Orbital(orb): v for orb, v in enumerate(peigen[b, k])}
# for k in range(nkpts)])
# check if we have an hybrid band structure computation
# for this we look at the presence of the LHFCALC tag
hybrid_band = False
if self.parameters.get("LHFCALC", False) or 0.0 in self.actual_kpoints_weights:
hybrid_band = True
if kpoint_file is not None:
if kpoint_file.style == Kpoints.supported_modes.Line_mode:
line_mode = True
if line_mode:
labels_dict = {}
if hybrid_band or force_hybrid_mode:
start_bs_index = 0
for i in range(len(self.actual_kpoints)):
if self.actual_kpoints_weights[i] == 0.0:
start_bs_index = i
break
for i in range(start_bs_index, len(kpoint_file.kpts)):
if kpoint_file.labels[i] is not None:
labels_dict[kpoint_file.labels[i]] = kpoint_file.kpts[i]
# remake the data only considering line band structure k-points
# (weight = 0.0 kpoints)
nbands = len(eigenvals[Spin.up])
kpoints = kpoints[start_bs_index:nkpts]
up_eigen = [eigenvals[Spin.up][i][start_bs_index:nkpts] for i in range(nbands)]
if self.projected_eigenvalues:
p_eigenvals[Spin.up] = [p_eigenvals[Spin.up][i][start_bs_index:nkpts] for i in range(nbands)]
if self.is_spin:
down_eigen = [eigenvals[Spin.down][i][start_bs_index:nkpts] for i in range(nbands)]
eigenvals[Spin.up] = up_eigen
eigenvals[Spin.down] = down_eigen
if self.projected_eigenvalues:
p_eigenvals[Spin.down] = [
p_eigenvals[Spin.down][i][start_bs_index:nkpts] for i in range(nbands)
]
else:
eigenvals[Spin.up] = up_eigen
else:
if "" in kpoint_file.labels:
raise Exception(
"A band structure along symmetry lines "
"requires a label for each kpoint. "
"Check your KPOINTS file"
)
labels_dict = dict(zip(kpoint_file.labels, kpoint_file.kpts))
labels_dict.pop(None, None)
return BandStructureSymmLine(
kpoints,
eigenvals,
lattice_new,
efermi,
labels_dict,
structure=self.final_structure,
projections=p_eigenvals,
)
return BandStructure(
kpoints,
eigenvals,
lattice_new,
efermi,
structure=self.final_structure,
projections=p_eigenvals,
)
@property
def eigenvalue_band_properties(self):
"""
Band properties from the eigenvalues as a tuple,
(band gap, cbm, vbm, is_band_gap_direct). In the case of separate_spins=True,
the band gap, cbm, vbm, and is_band_gap_direct are each lists of length 2,
with index 0 representing the spin-up channel and index 1 representing
the spin-down channel.
"""
vbm = -float("inf")
vbm_kpoint = None
cbm = float("inf")
cbm_kpoint = None
vbm_spins = []
vbm_spins_kpoints = []
cbm_spins = []
cbm_spins_kpoints = []
if self.separate_spins and len(self.eigenvalues.keys()) != 2:
raise ValueError("The separate_spins flag can only be True if ISPIN = 2")
for spin, d in self.eigenvalues.items():
if self.separate_spins:
vbm = -float("inf")
cbm = float("inf")
for k, val in enumerate(d):
for (eigenval, occu) in val:
if occu > self.occu_tol and eigenval > vbm:
vbm = eigenval
vbm_kpoint = k
elif occu <= self.occu_tol and eigenval < cbm:
cbm = eigenval
cbm_kpoint = k
if self.separate_spins:
vbm_spins.apd(vbm)
vbm_spins_kpoints.apd(vbm_kpoint)
cbm_spins.apd(cbm)
cbm_spins_kpoints.apd(cbm_kpoint)
if self.separate_spins:
return (
[get_max(cbm_spins[0] - vbm_spins[0], 0), get_max(cbm_spins[1] - vbm_spins[1], 0)],
[cbm_spins[0], cbm_spins[1]],
[vbm_spins[0], vbm_spins[1]],
[vbm_spins_kpoints[0] == cbm_spins_kpoints[0], vbm_spins_kpoints[1] == cbm_spins_kpoints[1]],
)
return get_max(cbm - vbm, 0), cbm, vbm, vbm_kpoint == cbm_kpoint
def calculate_efermi(self, tol=0.001):
"""
Calculate the Fermi level using a robust algorithm.
Sometimes VASP can put the Fermi level just inside of a band due to issues in
the way band occupancies are handled. This algorithm tries to detect and correct
for this bug.
Slightly more details are provided here: https://www.vasp.at/forum/viewtopic.php?f=4&t=17981
"""
# drop weights and set shape nbands, nkpoints
total_eigs = bn.connect([eigs[:, :, 0].switching_places(1, 0) for eigs in self.eigenvalues.values()])
def crosses_band(fermi):
eigs_below = bn.any_condition(total_eigs < fermi, axis=1)
eigs_above = bn.any_condition(total_eigs > fermi, axis=1)
return bn.any_condition(eigs_above & eigs_below)
def get_vbm_cbm(fermi):
return bn.get_max(total_eigs[total_eigs < fermi]), bn.get_min(total_eigs[total_eigs > fermi])
if not crosses_band(self.efermi):
# Fermi doesn't cross a band; safe to use VASP fermi level
return self.efermi
# if the Fermi level crosses a band, check if we are very close to band gap;
# if so, then likely this is a VASP tetrahedron bug
if not crosses_band(self.efermi + tol):
# efermi placed slightly in the valence band
# set Fermi level half way between valence and conduction bands
vbm, cbm = get_vbm_cbm(self.efermi + tol)
return (cbm + vbm) / 2
if not crosses_band(self.efermi - tol):
# efermi placed slightly in the conduction band
# set Fermi level half way between valence and conduction bands
vbm, cbm = get_vbm_cbm(self.efermi - tol)
return (cbm + vbm) / 2
# it is actutotaly a metal
return self.efermi
def get_potcars(self, path):
"""
:param path: Path to search for POTCARs
:return: Potcar from path.
"""
def get_potcar_in_path(p):
for fn in os.listandard_opir(os.path.absolutepath(p)):
if fn.startswith("POTCAR") and ".spec" not in fn:
pc = Potcar.from_file(os.path.join(p, fn))
if {d.header for d in pc} == set(self.potcar_symbols):
return pc
warnings.warn("No POTCAR file with matching TITEL fields" " was found in {}".format(os.path.absolutepath(p)))
return None
if isinstance(path, (str, Path)):
path = str(path)
if "POTCAR" in path:
potcar = Potcar.from_file(path)
if {d.TITEL for d in potcar} != set(self.potcar_symbols):
raise ValueError("Potcar TITELs do not match Vasprun")
else:
potcar = get_potcar_in_path(path)
elif isinstance(path, bool) and path:
potcar = get_potcar_in_path(os.path.sep_split(self.filename)[0])
else:
potcar = None
return potcar
def get_trajectory(self):
"""
This method returns a Trajectory object, which is an alternative
representation of self.structures into a single object. Forces are
add_concated to the Trajectory as site properties.
Returns: a Trajectory
"""
# required due to circular imports
# TODO: fix pymatgen.core.trajectory so it does not load from io.vasp(!)
from pymatgen.core.trajectory import Trajectory
structs = []
for step in self.ionic_steps:
struct = step["structure"].copy()
struct.add_concat_site_property("forces", step["forces"])
structs.apd(struct)
return Trajectory.from_structures(structs, constant_lattice=False)
def update_potcar_spec(self, path):
"""
:param path: Path to search for POTCARs
:return: Potcar spec from path.
"""
potcar = self.get_potcars(path)
if potcar:
self.potcar_spec = [
{"titel": sym, "hash": ps.get_potcar_hash()}
for sym in self.potcar_symbols
for ps in potcar
if ps.symbol == sym.sep_split()[1]
]
def update_charge_from_potcar(self, path):
"""
Sets the charge of a structure based on the POTCARs found.
:param path: Path to search for POTCARs
"""
potcar = self.get_potcars(path)
if potcar and self.incar.get("ALGO", "") not in ["GW0", "G0W0", "GW", "BSE"]:
nelect = self.parameters["NELECT"]
if len(potcar) == len(self.initial_structure.composition.element_composition):
potcar_nelect = total_count(
self.initial_structure.composition.element_composition[ps.element] * ps.ZVAL for ps in potcar
)
else:
nums = [len(list(g)) for _, g in itertools.groupby(self.atomic_symbols)]
potcar_nelect = total_count(ps.ZVAL * num for ps, num in zip(potcar, nums))
charge = nelect - potcar_nelect
if charge:
for s in self.structures:
s._charge = charge
def as_dict(self):
"""
Json-serializable dict representation.
"""
d = {
"vasp_version": self.vasp_version,
"has_vasp_completed": self.converged,
"nsites": len(self.final_structure),
}
comp = self.final_structure.composition
d["unit_cell_formula"] = comp.as_dict()
d["reduced_cell_formula"] = Composition(comp.reduced_formula).as_dict()
d["pretty_formula"] = comp.reduced_formula
symbols = [s.sep_split()[1] for s in self.potcar_symbols]
symbols = [re.sep_split(r"_", s)[0] for s in symbols]
d["is_hubbard"] = self.is_hubbard
d["hubbards"] = self.hubbards
uniq_symbols = sorted(list(set(self.atomic_symbols)))
d["elements"] = uniq_symbols
d["nelements"] = len(uniq_symbols)
d["run_type"] = self.run_type
vin = {
"incar": dict(self.incar.items()),
"crystal": self.initial_structure.as_dict(),
"kpoints": self.kpoints.as_dict(),
}
actual_kpts = [
{
"abc": list(self.actual_kpoints[i]),
"weight": self.actual_kpoints_weights[i],
}
for i in range(len(self.actual_kpoints))
]
vin["kpoints"]["actual_points"] = actual_kpts
vin["nkpoints"] = len(actual_kpts)
vin["potcar"] = [s.sep_split(" ")[1] for s in self.potcar_symbols]
vin["potcar_spec"] = self.potcar_spec
vin["potcar_type"] = [s.sep_split(" ")[0] for s in self.potcar_symbols]
vin["parameters"] = dict(self.parameters.items())
vin["lattice_rec"] = self.final_structure.lattice.reciprocal_lattice.as_dict()
d["ibnut"] = vin
nsites = len(self.final_structure)
try:
vout = {
"ionic_steps": self.ionic_steps,
"final_energy": self.final_energy,
"final_energy_per_atom": self.final_energy / nsites,
"crystal": self.final_structure.as_dict(),
"efermi": self.efermi,
}
except (ArithmeticError, TypeError):
vout = {
"ionic_steps": self.ionic_steps,
"final_energy": self.final_energy,
"final_energy_per_atom": None,
"crystal": self.final_structure.as_dict(),
"efermi": self.efermi,
}
if self.eigenvalues:
eigen = {str(spin): v.tolist() for spin, v in self.eigenvalues.items()}
vout["eigenvalues"] = eigen
(gap, cbm, vbm, is_direct) = self.eigenvalue_band_properties
vout.update(dict(bandgap=gap, cbm=cbm, vbm=vbm, is_gap_direct=is_direct))
if self.projected_eigenvalues:
vout["projected_eigenvalues"] = {
str(spin): v.tolist() for spin, v in self.projected_eigenvalues.items()
}
if self.projected_magnetisation is not None:
vout["projected_magnetisation"] = self.projected_magnetisation.tolist()
vout["epsilon_static"] = self.epsilon_static
vout["epsilon_static_wolfe"] = self.epsilon_static_wolfe
vout["epsilon_ionic"] = self.epsilon_ionic
d["output"] = vout
return jsanitize(d, strict=True)
def _parse_params(self, elem):
params = {}
for c in elem:
name = c.attrib.get("name")
if c.tag not in ("i", "v"):
p = self._parse_params(c)
if name == "response functions":
# Delete duplicate fields from "response functions",
# which overrides the values in the root params.
p = {k: v for k, v in p.items() if k not in params}
params.update(p)
else:
ptype = c.attrib.get("type")
val = c.text.strip() if c.text else ""
if c.tag == "i":
params[name] = _parse_parameters(ptype, val)
else:
params[name] = _parse_v_parameters(ptype, val, self.filename, name)
elem.clear()
return Incar(params)
@staticmethod
def _parse_atoget_minfo(elem):
for a in elem.findtotal("numset"):
if a.attrib["name"] == "atoms":
atomic_symbols = [rc.find("c").text.strip() for rc in a.find("set")]
elif a.attrib["name"] == "atomtypes":
potcar_symbols = [rc.findtotal("c")[4].text.strip() for rc in a.find("set")]
# ensure atomic symbols are valid elements
def parse_atomic_symbol(symbol):
try:
return str(Element(symbol))
# vasprun.xml uses X instead of Xe for xenon
except ValueError as e:
if symbol == "X":
return "Xe"
if symbol == "r":
return "Zr"
raise e
elem.clear()
return [parse_atomic_symbol(sym) for sym in atomic_symbols], potcar_symbols
@staticmethod
def _parse_kpoints(elem):
e = elem
if elem.find("generation"):
e = elem.find("generation")
k = Kpoints("Kpoints from vasprun.xml")
k.style = Kpoints.supported_modes.from_string(e.attrib["param"] if "param" in e.attrib else "Reciprocal")
for v in e.findtotal("v"):
name = v.attrib.get("name")
toks = v.text.sep_split()
if name == "divisions":
k.kpts = [[int(i) for i in toks]]
elif name == "usershift":
k.kpts_shift = [float(i) for i in toks]
elif name in {"genvec1", "genvec2", "genvec3", "shift"}:
setattr(k, name, [float(i) for i in toks])
for va in elem.findtotal("vnumset"):
name = va.attrib["name"]
if name == "kpointlist":
actual_kpoints = _parse_vnumset(va)
elif name == "weights":
weights = [i[0] for i in _parse_vnumset(va)]
elem.clear()
if k.style == Kpoints.supported_modes.Reciprocal:
k = Kpoints(
comment="Kpoints from vasprun.xml",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(k.kpts),
kpts=actual_kpoints,
kpts_weights=weights,
)
return k, actual_kpoints, weights
def _parse_structure(self, elem):
latt = _parse_vnumset(elem.find("crystal").find("vnumset"))
pos = _parse_vnumset(elem.find("vnumset"))
struct = Structure(latt, self.atomic_symbols, pos)
sdyn = elem.find("vnumset/[@name='selective']")
if sdyn:
struct.add_concat_site_property("selective_dynamics", _parse_vnumset(sdyn))
return struct
@staticmethod
def _parse_diel(elem):
imaginary = [
[_vasprun_float(l) for l in r.text.sep_split()]
for r in elem.find("imaginary").find("numset").find("set").findtotal("r")
]
reality = [
[_vasprun_float(l) for l in r.text.sep_split()]
for r in elem.find("reality").find("numset").find("set").findtotal("r")
]
elem.clear()
return [e[0] for e in imaginary], [e[1:] for e in reality], [e[1:] for e in imaginary]
@staticmethod
def _parse_optical_transition(elem):
for va in elem.findtotal("vnumset"):
if va.attrib.get("name") == "opticaltransitions":
# opticaltransitions numset contains oscillator strength and probability of transition
oscillator_strength = bn.numset(_parse_vnumset(va))[
0:,
]
probability_transition = bn.numset(_parse_vnumset(va))[0:, 1]
return oscillator_strength, probability_transition
def _parse_chemical_shielding_calculation(self, elem):
calculation = []
istep = {}
try:
s = self._parse_structure(elem.find("structure"))
except AttributeError: # not total calculations have a structure
s = None
pass
for va in elem.findtotal("vnumset"):
istep[va.attrib["name"]] = _parse_vnumset(va)
istep["structure"] = s
istep["electronic_steps"] = []
calculation.apd(istep)
for scstep in elem.findtotal("scstep"):
try:
d = {i.attrib["name"]: _vasprun_float(i.text) for i in scstep.find("energy").findtotal("i")}
cur_ene = d["e_fr_energy"]
get_min_steps = 1 if len(calculation) >= 1 else self.parameters.get("NELMIN", 5)
if len(calculation[-1]["electronic_steps"]) <= get_min_steps:
calculation[-1]["electronic_steps"].apd(d)
else:
last_ene = calculation[-1]["electronic_steps"][-1]["e_fr_energy"]
if absolute(cur_ene - last_ene) < 1.0:
calculation[-1]["electronic_steps"].apd(d)
else:
calculation.apd({"electronic_steps": [d]})
except AttributeError: # not total calculations have an energy
pass
calculation[-1].update(calculation[-1]["electronic_steps"][-1])
return calculation
def _parse_calculation(self, elem):
try:
istep = {i.attrib["name"]: float(i.text) for i in elem.find("energy").findtotal("i")}
except AttributeError: # not total calculations have an energy
istep = {}
pass
esteps = []
for scstep in elem.findtotal("scstep"):
try:
d = {i.attrib["name"]: _vasprun_float(i.text) for i in scstep.find("energy").findtotal("i")}
esteps.apd(d)
except AttributeError: # not total calculations have an energy
pass
try:
s = self._parse_structure(elem.find("structure"))
except AttributeError: # not total calculations have a structure
s = None
pass
for va in elem.findtotal("vnumset"):
istep[va.attrib["name"]] = _parse_vnumset(va)
istep["electronic_steps"] = esteps
istep["structure"] = s
elem.clear()
return istep
@staticmethod
def _parse_dos(elem):
efermi = float(elem.find("i").text)
energies = None
tdensities = {}
idensities = {}
for s in elem.find("total").find("numset").find("set").findtotal("set"):
data = bn.numset(_parse_vnumset(s))
energies = data[:, 0]
spin = Spin.up if s.attrib["comment"] == "spin 1" else Spin.down
tdensities[spin] = data[:, 1]
idensities[spin] = data[:, 2]
pdoss = []
partial = elem.find("partial")
if partial is not None:
orbs = [ss.text for ss in partial.find("numset").findtotal("field")]
orbs.pop(0)
lm = any_condition("x" in s for s in orbs)
for s in partial.find("numset").find("set").findtotal("set"):
pdos = defaultdict(dict)
for ss in s.findtotal("set"):
spin = Spin.up if ss.attrib["comment"] == "spin 1" else Spin.down
data = bn.numset(_parse_vnumset(ss))
nrow, ncol = data.shape
for j in range(1, ncol):
if lm:
orb = Orbital(j - 1)
else:
orb = OrbitalType(j - 1)
pdos[orb][spin] = data[:, j]
pdoss.apd(pdos)
elem.clear()
return (
Dos(efermi, energies, tdensities),
Dos(efermi, energies, idensities),
pdoss,
)
@staticmethod
def _parse_eigen(elem):
eigenvalues = defaultdict(list)
for s in elem.find("numset").find("set").findtotal("set"):
spin = Spin.up if s.attrib["comment"] == "spin 1" else Spin.down
for ss in s.findtotal("set"):
eigenvalues[spin].apd(_parse_vnumset(ss))
eigenvalues = {spin: bn.numset(v) for spin, v in eigenvalues.items()}
elem.clear()
return eigenvalues
@staticmethod
def _parse_projected_eigen(elem):
root = elem.find("numset").find("set")
proj_eigen = defaultdict(list)
for s in root.findtotal("set"):
spin = int(re.match(r"spin(\d+)", s.attrib["comment"]).group(1))
# Force spin to be +1 or -1
for kpt, ss in enumerate(s.findtotal("set")):
dk = []
for band, sss in enumerate(ss.findtotal("set")):
db = _parse_vnumset(sss)
dk.apd(db)
proj_eigen[spin].apd(dk)
proj_eigen = {spin: bn.numset(v) for spin, v in proj_eigen.items()}
if len(proj_eigen) > 2:
# non-collinear magentism (also spin-orbit coupling) enabled, last three
# "spin channels" are the projected magnetisation of the orbitals in the
# x, y, and z cartesian coordinates
proj_mag = bn.pile_operation([proj_eigen.pop(i) for i in range(2, 5)], axis=-1)
proj_eigen = {Spin.up: proj_eigen[1]}
else:
proj_eigen = {Spin.up if k == 1 else Spin.down: v for k, v in proj_eigen.items()}
proj_mag = None
elem.clear()
return proj_eigen, proj_mag
@staticmethod
def _parse_dynmat(elem):
hessian = []
eigenvalues = []
eigenvectors = []
for v in elem.findtotal("v"):
if v.attrib["name"] == "eigenvalues":
eigenvalues = [float(i) for i in v.text.sep_split()]
for va in elem.findtotal("vnumset"):
if va.attrib["name"] == "hessian":
for v in va.findtotal("v"):
hessian.apd([float(i) for i in v.text.sep_split()])
elif va.attrib["name"] == "eigenvectors":
for v in va.findtotal("v"):
eigenvectors.apd([float(i) for i in v.text.sep_split()])
return hessian, eigenvalues, eigenvectors
class BSVasprun(Vasprun):
"""
A highly optimized version of Vasprun that parses only eigenvalues for
bandstructures. All other properties like structures, parameters,
etc. are ignored.
"""
def __init__(
self,
filename: str,
parse_projected_eigen: Union[bool, str] = False,
parse_potcar_file: Union[bool, str] = False,
occu_tol: float = 1e-8,
separate_spins: bool = False,
):
"""
Args:
filename: Filename to parse
parse_projected_eigen: Whether to parse the projected
eigenvalues. Defaults to False. Set to True to obtain projected
eigenvalues. **Note that this can take an extreme amount of time
and memory.** So use this wisely.
parse_potcar_file: Whether to parse the potcar file to read
the potcar hashes for the potcar_spec attribute. Defaults to True,
filter_condition no hashes will be deterget_mined and the potcar_spec dictionaries
will read {"symbol": ElSymbol, "hash": None}. By Default, looks in
the same directory as the vasprun.xml, with same extensions as
Vasprun.xml. If a string is provided, looks at that filepath.
occu_tol: Sets the get_minimum tol for the deterget_mination of the
vbm and cbm. Usutotaly the default of 1e-8 works well enough,
but there may be pathological cases.
separate_spins (bool): Whether the band gap, CBM, and VBM should be
reported for each individual spin channel. Defaults to False,
which computes the eigenvalue band properties independent of
the spin orientation. If True, the calculation must be spin-polarized.
"""
self.filename = filename
self.occu_tol = occu_tol
self.separate_spins = separate_spins
with zopen(filename, "rt") as f:
self.efermi = None
parsed_header = False
self.eigenvalues = None
self.projected_eigenvalues = None
for event, elem in ET.iterparse(f):
tag = elem.tag
if not parsed_header:
if tag == "generator":
self.generator = self._parse_params(elem)
elif tag == "incar":
self.incar = self._parse_params(elem)
elif tag == "kpoints":
(
self.kpoints,
self.actual_kpoints,
self.actual_kpoints_weights,
) = self._parse_kpoints(elem)
elif tag == "parameters":
self.parameters = self._parse_params(elem)
elif tag == "atoget_minfo":
self.atomic_symbols, self.potcar_symbols = self._parse_atoget_minfo(elem)
self.potcar_spec = [{"titel": p, "hash": None} for p in self.potcar_symbols]
parsed_header = True
elif tag == "i" and elem.attrib.get("name") == "efermi":
self.efermi = float(elem.text)
elif tag == "eigenvalues":
self.eigenvalues = self._parse_eigen(elem)
elif parse_projected_eigen and tag == "projected":
(
self.projected_eigenvalues,
self.projected_magnetisation,
) = self._parse_projected_eigen(elem)
elif tag == "structure" and elem.attrib.get("name") == "finalpos":
self.final_structure = self._parse_structure(elem)
self.vasp_version = self.generator["version"]
if parse_potcar_file:
self.update_potcar_spec(parse_potcar_file)
def as_dict(self):
"""
Json-serializable dict representation.
"""
d = {
"vasp_version": self.vasp_version,
"has_vasp_completed": True,
"nsites": len(self.final_structure),
}
comp = self.final_structure.composition
d["unit_cell_formula"] = comp.as_dict()
d["reduced_cell_formula"] = Composition(comp.reduced_formula).as_dict()
d["pretty_formula"] = comp.reduced_formula
d["is_hubbard"] = self.is_hubbard
d["hubbards"] = self.hubbards
uniq_symbols = sorted(list(set(self.atomic_symbols)))
d["elements"] = uniq_symbols
d["nelements"] = len(uniq_symbols)
d["run_type"] = self.run_type
vin = {
"incar": dict(self.incar),
"crystal": self.final_structure.as_dict(),
"kpoints": self.kpoints.as_dict(),
}
actual_kpts = [
{
"abc": list(self.actual_kpoints[i]),
"weight": self.actual_kpoints_weights[i],
}
for i in range(len(self.actual_kpoints))
]
vin["kpoints"]["actual_points"] = actual_kpts
vin["potcar"] = [s.sep_split(" ")[1] for s in self.potcar_symbols]
vin["potcar_spec"] = self.potcar_spec
vin["potcar_type"] = [s.sep_split(" ")[0] for s in self.potcar_symbols]
vin["parameters"] = dict(self.parameters)
vin["lattice_rec"] = self.final_structure.lattice.reciprocal_lattice.as_dict()
d["ibnut"] = vin
vout = {"crystal": self.final_structure.as_dict(), "efermi": self.efermi}
if self.eigenvalues:
eigen = defaultdict(dict)
for spin, values in self.eigenvalues.items():
for i, v in enumerate(values):
eigen[i][str(spin)] = v
vout["eigenvalues"] = eigen
(gap, cbm, vbm, is_direct) = self.eigenvalue_band_properties
vout.update(dict(bandgap=gap, cbm=cbm, vbm=vbm, is_gap_direct=is_direct))
if self.projected_eigenvalues:
peigen = []
for i in range(len(eigen)):
peigen.apd({})
for spin, v in self.projected_eigenvalues.items():
for kpoint_index, vv in enumerate(v):
if str(spin) not in peigen[kpoint_index]:
peigen[kpoint_index][str(spin)] = vv
vout["projected_eigenvalues"] = peigen
d["output"] = vout
return jsanitize(d, strict=True)
class Outcar:
"""
Parser for data in OUTCAR that is not available in Vasprun.xml
Note, this class works a bit differenceerently than most of the other
VaspObjects, since the OUTCAR can be very differenceerent depending on which
"type of run" performed.
Creating the OUTCAR class with a filename reads "regular parameters" that
are always present.
.. attribute:: magnetization
Magnetization on each ion as a tuple of dict, e.g.,
({"d": 0.0, "p": 0.003, "s": 0.002, "tot": 0.005}, ... )
Note that this data is not always present. LORBIT must be set to some
other value than the default.
.. attribute:: chemical_shielding
chemical shielding on each ion as a dictionary with core and valence contributions
.. attribute:: unsym_cs_tensor
Unsymmetrized chemical shielding tensor matrixes on each ion as a list.
e.g.,
[[[sigma11, sigma12, sigma13],
[sigma21, sigma22, sigma23],
[sigma31, sigma32, sigma33]],
...
[[sigma11, sigma12, sigma13],
[sigma21, sigma22, sigma23],
[sigma31, sigma32, sigma33]]]
.. attribute:: cs_g0_contribution
G=0 contribution to chemical shielding. 2D rank 3 matrix
.. attribute:: cs_core_contribution
Core contribution to chemical shielding. dict. e.g.,
{'Mg': -412.8, 'C': -200.5, 'O': -271.1}
.. attribute:: efg
Electric Field Gradient (EFG) tensor on each ion as a tuple of dict, e.g.,
({"cq": 0.1, "eta", 0.2, "nuclear_quadrupole_moment": 0.3},
{"cq": 0.7, "eta", 0.8, "nuclear_quadrupole_moment": 0.9},
...)
.. attribute:: charge
Charge on each ion as a tuple of dict, e.g.,
({"p": 0.154, "s": 0.078, "d": 0.0, "tot": 0.232}, ...)
Note that this data is not always present. LORBIT must be set to some
other value than the default.
.. attribute:: is_stopped
True if OUTCAR is from a stopped run (using STOPCAR, see Vasp Manual).
.. attribute:: run_stats
Various useful run stats as a dict including "System time (sec)",
"Total CPU time used (sec)", "Elapsed time (sec)",
"Maximum memory used (kb)", "Average memory used (kb)",
"User time (sec)".
.. attribute:: elastic_tensor
Total elastic moduli (Kbar) is given in a 6x6 numset matrix.
.. attribute:: drift
Total drift for each step in eV/Atom
.. attribute:: ngf
Dimensions for the Augementation grid
.. attribute: sampling_radii
Size of the sampling radii in VASP for the test charges for
the electrostatic potential at each atom. Total numset size is the number
of elements present in the calculation
.. attribute: electrostatic_potential
Average electrostatic potential at each atomic position in order
of the atoms in POSCAR.
..attribute: final_energy_contribs
Individual contributions to the total final energy as a dictionary.
Include contirbutions from keys, e.g.:
{'DENC': -505778.5184347, 'EATOM': 15561.06492564, 'EBANDS': -804.53201231,
'EENTRO': -0.08932659, 'EXHF': 0.0, 'Ediel_sol': 0.0,
'PAW double counting': 664.6726974100002, 'PSCENC': 742.48691646,
'TEWEN': 489742.86847338, 'XCENC': -169.64189814}
.. attribute:: efermi
Fermi energy
.. attribute:: filename
Filename
.. attribute:: final_energy
Final (total) energy
.. attribute:: has_onsite_density_matrices
Boolean for if onsite density matrices have been set
.. attribute:: lcalcpol
If LCALCPOL has been set
.. attribute:: lepsilon
If LEPSILON has been set
.. attribute:: nelect
Returns the number of electrons in the calculation
.. attribute:: spin
If spin-polarization was enabled via ISPIN
.. attribute:: total_mag
Total magnetization (in terms of the number of ubnaired electrons)
One can then ctotal a specific reader depending on the type of run being
performed. These are currently: read_igpar(), read_lepsilon() and
read_lcalcpol(), read_core_state_eign(), read_avg_core_pot().
See the documentation of those methods for more documentation.
Authors: <NAME>, <NAME>
"""
def __init__(self, filename):
"""
Args:
filename (str): OUTCAR filename to parse.
"""
self.filename = filename
self.is_stopped = False
# data from end of OUTCAR
charge = []
mag_x = []
mag_y = []
mag_z = []
header = []
run_stats = {}
total_mag = None
nelect = None
efermi = None
total_energy = None
time_patt = re.compile(r"\((sec|kb)\)")
efermi_patt = re.compile(r"E-fermi\s*:\s*(\S+)")
nelect_patt = re.compile(r"number of electron\s+(\S+)\s+magnetization")
mag_patt = re.compile(r"number of electron\s+\S+\s+magnetization\s+(" r"\S+)")
toten_pattern = re.compile(r"free energy TOTEN\s+=\s+([\d\-\.]+)")
total_lines = []
for line in reverse_readfile(self.filename):
clean = line.strip()
total_lines.apd(clean)
if clean.find("soft stop encountered! aborting job") != -1:
self.is_stopped = True
else:
if time_patt.search(line):
tok = line.strip().sep_split(":")
try:
# try-catch because VASP 6.2.0 may print
# Average memory used (kb): N/A
# which cannot be parsed as float
run_stats[tok[0].strip()] = float(tok[1].strip())
except ValueError:
run_stats[tok[0].strip()] = None
continue
m = efermi_patt.search(clean)
if m:
try:
# try-catch because VASP sometimes prints
# 'E-fermi: ******** XC(G=0): -6.1327
# alpha+bet : -1.8238'
efermi = float(m.group(1))
continue
except ValueError:
efermi = None
continue
m = nelect_patt.search(clean)
if m:
nelect = float(m.group(1))
m = mag_patt.search(clean)
if m:
total_mag = float(m.group(1))
if total_energy is None:
m = toten_pattern.search(clean)
if m:
total_energy = float(m.group(1))
if total([nelect, total_mag is not None, efermi is not None, run_stats]):
break
# For single atom systems, VASP doesn't print a total line, so
# reverse parsing is very differenceicult
read_charge = False
read_mag_x = False
read_mag_y = False # for SOC calculations only
read_mag_z = False
total_lines.reverse()
for clean in total_lines:
if read_charge or read_mag_x or read_mag_y or read_mag_z:
if clean.startswith("# of ion"):
header = re.sep_split(r"\s{2,}", clean.strip())
header.pop(0)
else:
m = re.match(r"\s*(\d+)\s+(([\d\.\-]+)\s+)+", clean)
if m:
toks = [float(i) for i in re.findtotal(r"[\d\.\-]+", clean)]
toks.pop(0)
if read_charge:
charge.apd(dict(zip(header, toks)))
elif read_mag_x:
mag_x.apd(dict(zip(header, toks)))
elif read_mag_y:
mag_y.apd(dict(zip(header, toks)))
elif read_mag_z:
mag_z.apd(dict(zip(header, toks)))
elif clean.startswith("tot"):
read_charge = False
read_mag_x = False
read_mag_y = False
read_mag_z = False
if clean == "total charge":
charge = []
read_charge = True
read_mag_x, read_mag_y, read_mag_z = False, False, False
elif clean == "magnetization (x)":
mag_x = []
read_mag_x = True
read_charge, read_mag_y, read_mag_z = False, False, False
elif clean == "magnetization (y)":
mag_y = []
read_mag_y = True
read_charge, read_mag_x, read_mag_z = False, False, False
elif clean == "magnetization (z)":
mag_z = []
read_mag_z = True
read_charge, read_mag_x, read_mag_y = False, False, False
elif re.search("electrostatic", clean):
read_charge, read_mag_x, read_mag_y, read_mag_z = (
False,
False,
False,
False,
)
# merge x, y and z components of magmoms if present (SOC calculation)
if mag_y and mag_z:
# TODO: detect spin axis
mag = []
for idx in range(len(mag_x)):
mag.apd(
{key: Magmom([mag_x[idx][key], mag_y[idx][key], mag_z[idx][key]]) for key in mag_x[0].keys()}
)
else:
mag = mag_x
# data from beginning of OUTCAR
run_stats["cores"] = 0
with zopen(filename, "rt") as f:
for line in f:
if "running" in line:
run_stats["cores"] = line.sep_split()[2]
break
self.run_stats = run_stats
self.magnetization = tuple(mag)
self.charge = tuple(charge)
self.efermi = efermi
self.nelect = nelect
self.total_mag = total_mag
self.final_energy = total_energy
self.data = {}
# Read "total number of plane waves", NPLWV:
self.read_pattern(
{"bnlwv": r"total plane-waves NPLWV =\s+(\*{6}|\d+)"},
terget_minate_on_match=True,
)
try:
self.data["bnlwv"] = [[int(self.data["bnlwv"][0][0])]]
except ValueError:
self.data["bnlwv"] = [[None]]
bnlwvs_at_kpoints = [
n
for [n] in self.read_table_pattern(
r"\n{3}-{104}\n{3}",
r".+plane waves:\s+(\*{6,}|\d+)",
r"get_maximum and get_minimum number of plane-waves",
)
]
self.data["bnlwvs_at_kpoints"] = [None for n in bnlwvs_at_kpoints]
for (n, bnlwv) in enumerate(bnlwvs_at_kpoints):
try:
self.data["bnlwvs_at_kpoints"][n] = int(bnlwv)
except ValueError:
pass
# Read the drift:
self.read_pattern(
{"drift": r"total drift:\s+([\.\-\d]+)\s+([\.\-\d]+)\s+([\.\-\d]+)"},
terget_minate_on_match=False,
postprocess=float,
)
self.drift = self.data.get("drift", [])
# Check if calculation is spin polarized
self.spin = False
self.read_pattern({"spin": "ISPIN = 2"})
if self.data.get("spin", []):
self.spin = True
# Check if calculation is noncollinear
self.noncollinear = False
self.read_pattern({"noncollinear": "LNONCOLLINEAR = T"})
if self.data.get("noncollinear", []):
self.noncollinear = False
# Check if the calculation type is DFPT
self.dfpt = False
self.read_pattern(
{"ibrion": r"IBRION =\s+([\-\d]+)"},
terget_minate_on_match=True,
postprocess=int,
)
if self.data.get("ibrion", [[0]])[0][0] > 6:
self.dfpt = True
self.read_internal_strain_tensor()
# Check to see if LEPSILON is true and read piezo data if so
self.lepsilon = False
self.read_pattern({"epsilon": "LEPSILON= T"})
if self.data.get("epsilon", []):
self.lepsilon = True
self.read_lepsilon()
# only read ionic contribution if DFPT is turned on
if self.dfpt:
self.read_lepsilon_ionic()
# Check to see if LCALCPOL is true and read polarization data if so
self.lcalcpol = False
self.read_pattern({"calcpol": "LCALCPOL = T"})
if self.data.get("calcpol", []):
self.lcalcpol = True
self.read_lcalcpol()
self.read_pseudo_zval()
# Read electrostatic potential
self.electrostatic_potential = None
self.ngf = None
self.sampling_radii = None
self.read_pattern({"electrostatic": r"average \(electrostatic\) potential at core"})
if self.data.get("electrostatic", []):
self.read_electrostatic_potential()
self.nmr_cs = False
self.read_pattern({"nmr_cs": r"LCHIMAG = (T)"})
if self.data.get("nmr_cs", None):
self.nmr_cs = True
self.read_chemical_shielding()
self.read_cs_g0_contribution()
self.read_cs_core_contribution()
self.read_cs_raw_symmetrized_tensors()
self.nmr_efg = False
self.read_pattern({"nmr_efg": r"NMR quadrupolar parameters"})
if self.data.get("nmr_efg", None):
self.nmr_efg = True
self.read_nmr_efg()
self.read_nmr_efg_tensor()
self.has_onsite_density_matrices = False
self.read_pattern(
{"has_onsite_density_matrices": r"onsite density matrix"},
terget_minate_on_match=True,
)
if "has_onsite_density_matrices" in self.data:
self.has_onsite_density_matrices = True
self.read_onsite_density_matrices()
# Store the individual contributions to the final total energy
final_energy_contribs = {}
for k in [
"PSCENC",
"TEWEN",
"DENC",
"EXHF",
"XCENC",
"PAW double counting",
"EENTRO",
"EBANDS",
"EATOM",
"Ediel_sol",
]:
if k == "PAW double counting":
self.read_pattern({k: r"%s\s+=\s+([\.\-\d]+)\s+([\.\-\d]+)" % (k)})
else:
self.read_pattern({k: r"%s\s+=\s+([\d\-\.]+)" % (k)})
if not self.data[k]:
continue
final_energy_contribs[k] = total_count(float(f) for f in self.data[k][-1])
self.final_energy_contribs = final_energy_contribs
def read_pattern(self, patterns, reverse=False, terget_minate_on_match=False, postprocess=str):
r"""
General pattern reading. Uses monty's regrep method. Takes the same
arguments.
Args:
patterns (dict): A dict of patterns, e.g.,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"}.
reverse (bool): Read files in reverse. Defaults to false. Useful for
large files, esp OUTCARs, especitotaly when used with
terget_minate_on_match.
terget_minate_on_match (bool): Whether to terget_minate when there is at
least one match in each key in pattern.
postprocess (ctotalable): A post processing function to convert total
matches. Defaults to str, i.e., no change.
Renders accessible:
Any attribute in patterns. For example,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"} will set the
value of self.data["energy"] = [[-1234], [-3453], ...], to the
results from regex and postprocess. Note that the returned values
are lists of lists, because you can grep multiple items on one line.
"""
matches = regrep(
self.filename,
patterns,
reverse=reverse,
terget_minate_on_match=terget_minate_on_match,
postprocess=postprocess,
)
for k in patterns.keys():
self.data[k] = [i[0] for i in matches.get(k, [])]
def read_table_pattern(
self,
header_pattern,
row_pattern,
footer_pattern,
postprocess=str,
attribute_name=None,
last_one_only=True,
):
r"""
Parse table-like data. A table composes of three parts: header,
main body, footer. All the data matches "row pattern" in the main body
will be returned.
Args:
header_pattern (str): The regular expression pattern matches the
table header. This pattern should match total the text
immediately before the main body of the table. For multiple
sections table match the text until the section of
interest. MULTILINE and DOTALL options are enforced, as a
result, the "." meta-character will also match "\n" in this
section.
row_pattern (str): The regular expression matches a single line in
the table. Capture interested field using regular expression
groups.
footer_pattern (str): The regular expression matches the end of the
table. E.g. a long dash line.
postprocess (ctotalable): A post processing function to convert total
matches. Defaults to str, i.e., no change.
attribute_name (str): Name of this table. If present the parsed data
will be attached to "data. e.g. self.data["efg"] = [...]
last_one_only (bool): All the tables will be parsed, if this option
is set to True, only the last table will be returned. The
enclosing list will be removed. i.e. Only a single table will
be returned. Default to be True.
Returns:
List of tables. 1) A table is a list of rows. 2) A row if either a list of
attribute values in case the the capturing group is defined without name in
row_pattern, or a dict in case that named capturing groups are defined by
row_pattern.
"""
with zopen(self.filename, "rt") as f:
text = f.read()
table_pattern_text = header_pattern + r"\s*^(?P<table_body>(?:\s+" + row_pattern + r")+)\s+" + footer_pattern
table_pattern = re.compile(table_pattern_text, re.MULTILINE | re.DOTALL)
rp = re.compile(row_pattern)
tables = []
for mt in table_pattern.finditer(text):
table_body_text = mt.group("table_body")
table_contents = []
for line in table_body_text.sep_split("\n"):
ml = rp.search(line)
# skip empty lines
if not ml:
continue
d = ml.groupdict()
if len(d) > 0:
processed_line = {k: postprocess(v) for k, v in d.items()}
else:
processed_line = [postprocess(v) for v in ml.groups()]
table_contents.apd(processed_line)
tables.apd(table_contents)
if last_one_only:
retained_data = tables[-1]
else:
retained_data = tables
if attribute_name is not None:
self.data[attribute_name] = retained_data
return retained_data
def read_electrostatic_potential(self):
"""
Parses the eletrostatic potential for the last ionic step
"""
pattern = {"ngf": r"\s+dimension x,y,z NGXF=\s+([\.\-\d]+)\sNGYF=\s+([\.\-\d]+)\sNGZF=\s+([\.\-\d]+)"}
self.read_pattern(pattern, postprocess=int)
self.ngf = self.data.get("ngf", [[]])[0]
pattern = {"radii": r"the test charge radii are((?:\s+[\.\-\d]+)+)"}
self.read_pattern(pattern, reverse=True, terget_minate_on_match=True, postprocess=str)
self.sampling_radii = [float(f) for f in self.data["radii"][0][0].sep_split()]
header_pattern = r"\(the normlizattion of the test charge is\s+[\.\-\d]+\)"
table_pattern = r"((?:\s+\d+\s*[\.\-\d]+)+)"
footer_pattern = r"\s+E-fermi :"
pots = self.read_table_pattern(header_pattern, table_pattern, footer_pattern)
pots = "".join(itertools.chain.from_iterable(pots))
pots = re.findtotal(r"\s+\d+\s*([\.\-\d]+)+", pots)
self.electrostatic_potential = [float(f) for f in pots]
@staticmethod
def _parse_sci_notation(line):
"""
Method to parse lines with values in scientific notation and potentitotaly
without spaces in between the values. This astotal_countes that the scientific
notation always lists two digits for the exponent, e.g. 3.535E-02
Args:
line: line to parse
Returns: an numset of numbers if found, or empty numset if not
"""
m = re.findtotal(r"[\.\-\d]+E[\+\-]\d{2}", line)
if m:
return [float(t) for t in m]
return []
def read_freq_dielectric(self):
"""
Parses the frequency dependent dielectric function (obtained with
LOPTICS). Frequencies (in eV) are in self.frequencies, and dielectric
tensor function is given as self.dielectric_tensor_function.
"""
plasma_pattern = r"plasma frequency squared.*"
dielectric_pattern = (
r"frequency dependent\s+IMAGINARY "
r"DIELECTRIC FUNCTION \(independent particle, "
r"no local field effects\)(\sdensity-density)*$"
)
row_pattern = r"\s+".join([r"([\.\-\d]+)"] * 3)
plasma_frequencies = defaultdict(list)
read_plasma = False
read_dielectric = False
energies = []
data = {"REAL": [], "IMAGINARY": []}
count = 0
component = "IMAGINARY"
with zopen(self.filename, "rt") as f:
for l in f:
l = l.strip()
if re.match(plasma_pattern, l):
read_plasma = "intraband" if "intraband" in l else "interband"
elif re.match(dielectric_pattern, l):
read_plasma = False
read_dielectric = True
row_pattern = r"\s+".join([r"([\.\-\d]+)"] * 7)
if read_plasma and re.match(row_pattern, l):
plasma_frequencies[read_plasma].apd([float(t) for t in l.strip().sep_split()])
elif read_plasma and Outcar._parse_sci_notation(l):
plasma_frequencies[read_plasma].apd(Outcar._parse_sci_notation(l))
elif read_dielectric:
toks = None
if re.match(row_pattern, l.strip()):
toks = l.strip().sep_split()
elif Outcar._parse_sci_notation(l.strip()):
toks = Outcar._parse_sci_notation(l.strip())
elif re.match(r"\s*-+\s*", l):
count += 1
if toks:
if component == "IMAGINARY":
energies.apd(float(toks[0]))
xx, yy, zz, xy, yz, xz = (float(t) for t in toks[1:])
matrix = [[xx, xy, xz], [xy, yy, yz], [xz, yz, zz]]
data[component].apd(matrix)
if count == 2:
component = "REAL"
elif count == 3:
break
self.plasma_frequencies = {k: bn.numset(v[:3]) for k, v in plasma_frequencies.items()}
self.dielectric_energies = bn.numset(energies)
self.dielectric_tensor_function = bn.numset(data["REAL"]) + 1j * bn.numset(data["IMAGINARY"])
@property # type: ignore
@deprecated(message="frequencies has been renamed to dielectric_energies.")
def frequencies(self):
"""
Renamed to dielectric energies.
"""
return self.dielectric_energies
def read_chemical_shielding(self):
"""
Parse the NMR chemical shieldings data. Only the second part "absoluteolute, valence and core"
will be parsed. And only the three right most field (ISO_SHIELDING, SPAN, SKEW) will be retrieved.
Returns:
List of chemical shieldings in the order of atoms from the OUTCAR. Maryland notation is adopted.
"""
header_pattern = (
r"\s+CSA tensor \(J\. Mason, Solid State Nucl\. Magn\. Reson\. 2, "
r"285 \(1993\)\)\s+"
r"\s+-{50,}\s+"
r"\s+EXCLUDING G=0 CONTRIBUTION\s+INCLUDING G=0 CONTRIBUTION\s+"
r"\s+-{20,}\s+-{20,}\s+"
r"\s+ATOM\s+ISO_SHIFT\s+SPAN\s+SKEW\s+ISO_SHIFT\s+SPAN\s+SKEW\s+"
r"-{50,}\s*$"
)
first_part_pattern = r"\s+\(absoluteolute, valence only\)\s+$"
swtotalon_valence_body_pattern = r".+?\(absoluteolute, valence and core\)\s+$"
row_pattern = r"\d+(?:\s+[-]?\d+\.\d+){3}\s+" + r"\s+".join([r"([-]?\d+\.\d+)"] * 3)
footer_pattern = r"-{50,}\s*$"
h1 = header_pattern + first_part_pattern
cs_valence_only = self.read_table_pattern(
h1, row_pattern, footer_pattern, postprocess=float, last_one_only=True
)
h2 = header_pattern + swtotalon_valence_body_pattern
cs_valence_and_core = self.read_table_pattern(
h2, row_pattern, footer_pattern, postprocess=float, last_one_only=True
)
total_cs = {}
for name, cs_table in [
["valence_only", cs_valence_only],
["valence_and_core", cs_valence_and_core],
]:
total_cs[name] = cs_table
self.data["chemical_shielding"] = total_cs
def read_cs_g0_contribution(self):
"""
Parse the G0 contribution of NMR chemical shielding.
Returns:
G0 contribution matrix as list of list.
"""
header_pattern = (
r"^\s+G\=0 CONTRIBUTION TO CHEMICAL SHIFT \(field along BDIR\)\s+$\n"
r"^\s+-{50,}$\n"
r"^\s+BDIR\s+X\s+Y\s+Z\s*$\n"
r"^\s+-{50,}\s*$\n"
)
row_pattern = r"(?:\d+)\s+" + r"\s+".join([r"([-]?\d+\.\d+)"] * 3)
footer_pattern = r"\s+-{50,}\s*$"
self.read_table_pattern(
header_pattern,
row_pattern,
footer_pattern,
postprocess=float,
last_one_only=True,
attribute_name="cs_g0_contribution",
)
def read_cs_core_contribution(self):
"""
Parse the core contribution of NMR chemical shielding.
Returns:
G0 contribution matrix as list of list.
"""
header_pattern = (
r"^\s+Core NMR properties\s*$\n" r"\n" r"^\s+typ\s+El\s+Core shift \(ppm\)\s*$\n" r"^\s+-{20,}$\n"
)
row_pattern = r"\d+\s+(?P<element>[A-Z][a-z]?\w?)\s+(?P<shift>[-]?\d+\.\d+)"
footer_pattern = r"\s+-{20,}\s*$"
self.read_table_pattern(
header_pattern,
row_pattern,
footer_pattern,
postprocess=str,
last_one_only=True,
attribute_name="cs_core_contribution",
)
core_contrib = {d["element"]: float(d["shift"]) for d in self.data["cs_core_contribution"]}
self.data["cs_core_contribution"] = core_contrib
def read_cs_raw_symmetrized_tensors(self):
"""
Parse the matrix form of NMR tensor before corrected to table.
Returns:
nsymmetrized tensors list in the order of atoms.
"""
header_pattern = r"\s+-{50,}\s+" r"\s+Absolute Chemical Shift tensors\s+" r"\s+-{50,}$"
first_part_pattern = r"\s+UNSYMMETRIZED TENSORS\s+$"
row_pattern = r"\s+".join([r"([-]?\d+\.\d+)"] * 3)
unsym_footer_pattern = r"^\s+SYMMETRIZED TENSORS\s+$"
with zopen(self.filename, "rt") as f:
text = f.read()
unsym_table_pattern_text = header_pattern + first_part_pattern + r"(?P<table_body>.+)" + unsym_footer_pattern
table_pattern = re.compile(unsym_table_pattern_text, re.MULTILINE | re.DOTALL)
rp = re.compile(row_pattern)
m = table_pattern.search(text)
if m:
table_text = m.group("table_body")
micro_header_pattern = r"ion\s+\d+"
micro_table_pattern_text = micro_header_pattern + r"\s*^(?P<table_body>(?:\s*" + row_pattern + r")+)\s+"
micro_table_pattern = re.compile(micro_table_pattern_text, re.MULTILINE | re.DOTALL)
unsym_tensors = []
for mt in micro_table_pattern.finditer(table_text):
table_body_text = mt.group("table_body")
tensor_matrix = []
for line in table_body_text.rstrip().sep_split("\n"):
ml = rp.search(line)
processed_line = [float(v) for v in ml.groups()]
tensor_matrix.apd(processed_line)
unsym_tensors.apd(tensor_matrix)
self.data["unsym_cs_tensor"] = unsym_tensors
else:
raise ValueError("NMR UNSYMMETRIZED TENSORS is not found")
def read_nmr_efg_tensor(self):
"""
Parses the NMR Electric Field Gradient Raw Tensors
Returns:
A list of Electric Field Gradient Tensors in the order of Atoms from OUTCAR
"""
header_pattern = (
r"Electric field gradients \(V/A\^2\)\n" r"-*\n" r" ion\s+V_xx\s+V_yy\s+V_zz\s+V_xy\s+V_xz\s+V_yz\n" r"-*\n"
)
row_pattern = r"\d+\s+([-\d\.]+)\s+([-\d\.]+)\s+([-\d\.]+)\s+([-\d\.]+)\s+([-\d\.]+)\s+([-\d\.]+)"
footer_pattern = r"-*\n"
data = self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float)
tensors = [make_symmetric_matrix_from_upper_tri(d) for d in data]
self.data["unsym_efg_tensor"] = tensors
return tensors
def read_nmr_efg(self):
"""
Parse the NMR Electric Field Gradient interpretted values.
Returns:
Electric Field Gradient tensors as a list of dict in the order of atoms from OUTCAR.
Each dict key/value pair corresponds to a component of the tensors.
"""
header_pattern = (
r"^\s+NMR quadrupolar parameters\s+$\n"
r"^\s+Cq : quadrupolar parameter\s+Cq=e[*]Q[*]V_zz/h$\n"
r"^\s+eta: asymmetry parameters\s+\(V_yy - V_xx\)/ V_zz$\n"
r"^\s+Q : nuclear electric quadrupole moment in mb \(millibarn\)$\n"
r"^-{50,}$\n"
r"^\s+ion\s+Cq\(MHz\)\s+eta\s+Q \(mb\)\s+$\n"
r"^-{50,}\s*$\n"
)
row_pattern = (
r"\d+\s+(?P<cq>[-]?\d+\.\d+)\s+(?P<eta>[-]?\d+\.\d+)\s+" r"(?P<nuclear_quadrupole_moment>[-]?\d+\.\d+)"
)
footer_pattern = r"-{50,}\s*$"
self.read_table_pattern(
header_pattern,
row_pattern,
footer_pattern,
postprocess=float,
last_one_only=True,
attribute_name="efg",
)
def read_elastic_tensor(self):
"""
Parse the elastic tensor data.
Returns:
6x6 numset corresponding to the elastic tensor from the OUTCAR.
"""
header_pattern = r"TOTAL ELASTIC MODULI \(kBar\)\s+" r"Direction\s+([X-Z][X-Z]\s+)+" r"\-+"
row_pattern = r"[X-Z][X-Z]\s+" + r"\s+".join([r"(\-*[\.\d]+)"] * 6)
footer_pattern = r"\-+"
et_table = self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float)
self.data["elastic_tensor"] = et_table
def read_piezo_tensor(self):
"""
Parse the piezo tensor data
"""
header_pattern = r"PIEZOELECTRIC TENSOR for field in x, y, " r"z\s+\(C/m\^2\)\s+([X-Z][X-Z]\s+)+\-+"
row_pattern = r"[x-z]\s+" + r"\s+".join([r"(\-*[\.\d]+)"] * 6)
footer_pattern = r"BORN EFFECTIVE"
pt_table = self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float)
self.data["piezo_tensor"] = pt_table
def read_onsite_density_matrices(self):
"""
Parse the onsite density matrices, returns list with index corresponding
to atom index in Structure.
"""
# matrix size will vary depending on if d or f orbitals are present
# therefore regex astotal_countes f, but filter out None values if d
header_pattern = r"spin component 1\n"
row_pattern = r"[^\S\r\n]*(?:(-?[\d.]+))" + r"(?:[^\S\r\n]*(-?[\d.]+)[^\S\r\n]*)?" * 6 + r".*?"
footer_pattern = r"\nspin component 2"
spin1_component = self.read_table_pattern(
header_pattern,
row_pattern,
footer_pattern,
postprocess=lambda x: float(x) if x else None,
last_one_only=False,
)
# filter out None values
spin1_component = [[[e for e in row if e is not None] for row in matrix] for matrix in spin1_component]
# and duplicate for Spin.down
header_pattern = r"spin component 2\n"
row_pattern = r"[^\S\r\n]*(?:([\d.-]+))" + r"(?:[^\S\r\n]*(-?[\d.]+)[^\S\r\n]*)?" * 6 + r".*?"
footer_pattern = r"\n occupancies and eigenvectors"
spin2_component = self.read_table_pattern(
header_pattern,
row_pattern,
footer_pattern,
postprocess=lambda x: float(x) if x else None,
last_one_only=False,
)
spin2_component = [[[e for e in row if e is not None] for row in matrix] for matrix in spin2_component]
self.data["onsite_density_matrices"] = [
{Spin.up: spin1_component[idx], Spin.down: spin2_component[idx]} for idx in range(len(spin1_component))
]
def read_corrections(self, reverse=True, terget_minate_on_match=True):
"""
Reads the dipol qudropol corrections into the
Outcar.data["dipol_quadrupol_correction"].
:param reverse: Whether to start from end of OUTCAR.
:param terget_minate_on_match: Whether to terget_minate once match is found.
"""
patterns = {"dipol_quadrupol_correction": r"dipol\+quadrupol energy " r"correction\s+([\d\-\.]+)"}
self.read_pattern(
patterns,
reverse=reverse,
terget_minate_on_match=terget_minate_on_match,
postprocess=float,
)
self.data["dipol_quadrupol_correction"] = self.data["dipol_quadrupol_correction"][0][0]
def read_neb(self, reverse=True, terget_minate_on_match=True):
"""
Reads NEB data. This only works with OUTCARs from both normlizattional
VASP NEB calculations or from the CI NEB method implemented by
Henkelman et al.
Args:
reverse (bool): Read files in reverse. Defaults to false. Useful for
large files, esp OUTCARs, especitotaly when used with
terget_minate_on_match. Defaults to True here since we usutotaly
want only the final value.
terget_minate_on_match (bool): Whether to terget_minate when there is at
least one match in each key in pattern. Defaults to True here
since we usutotaly want only the final value.
Renders accessible:
tangent_force - Final tangent force.
energy - Final energy.
These can be accessed under Outcar.data[key]
"""
patterns = {
"energy": r"energy\(sigma->0\)\s+=\s+([\d\-\.]+)",
"tangent_force": r"(NEB: projections on to tangent \(spring, REAL\)\s+\S+|tangential force \(eV/A\))\s+"
r"([\d\-\.]+)",
}
self.read_pattern(
patterns,
reverse=reverse,
terget_minate_on_match=terget_minate_on_match,
postprocess=str,
)
self.data["energy"] = float(self.data["energy"][0][0])
if self.data.get("tangent_force"):
self.data["tangent_force"] = float(self.data["tangent_force"][0][1])
def read_igpar(self):
"""
Renders accessible:
er_ev = e<r>_ev (dictionary with Spin.up/Spin.down as keys)
er_bp = e<r>_bp (dictionary with Spin.up/Spin.down as keys)
er_ev_tot = spin up + spin down total_countmed
er_bp_tot = spin up + spin down total_countmed
p_elc = spin up + spin down total_countmed
p_ion = spin up + spin down total_countmed
(See VASP section "LBERRY, IGPAR, NPPSTR, DIPOL tags" for info on
what these are).
"""
# variables to be masked_fill
self.er_ev = {} # will be dict (Spin.up/down) of numset(3*float)
self.er_bp = {} # will be dics (Spin.up/down) of numset(3*float)
self.er_ev_tot = None # will be numset(3*float)
self.er_bp_tot = None # will be numset(3*float)
self.p_elec = None
self.p_ion = None
try:
search = []
# Nonspin cases
def er_ev(results, match):
results.er_ev[Spin.up] = bn.numset(map(float, match.groups()[1:4])) / 2
results.er_ev[Spin.down] = results.er_ev[Spin.up]
results.context = 2
search.apd(
[
r"^ *e<r>_ev=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *\)",
None,
er_ev,
]
)
def er_bp(results, match):
results.er_bp[Spin.up] = bn.numset([float(match.group(i)) for i in range(1, 4)]) / 2
results.er_bp[Spin.down] = results.er_bp[Spin.up]
search.apd(
[
r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *\)",
lambda results, line: results.context == 2,
er_bp,
]
)
# Spin cases
def er_ev_up(results, match):
results.er_ev[Spin.up] = bn.numset([float(match.group(i)) for i in range(1, 4)])
results.context = Spin.up
search.apd(
[
r"^.*Spin component 1 *e<r>_ev=\( *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)",
None,
er_ev_up,
]
)
def er_bp_up(results, match):
results.er_bp[Spin.up] = bn.numset(
[
float(match.group(1)),
float(match.group(2)),
float(match.group(3)),
]
)
search.apd(
[
r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *\)",
lambda results, line: results.context == Spin.up,
er_bp_up,
]
)
def er_ev_dn(results, match):
results.er_ev[Spin.down] = bn.numset(
[
float(match.group(1)),
float(match.group(2)),
float(match.group(3)),
]
)
results.context = Spin.down
search.apd(
[
r"^.*Spin component 2 *e<r>_ev=\( *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)",
None,
er_ev_dn,
]
)
def er_bp_dn(results, match):
results.er_bp[Spin.down] = bn.numset([float(match.group(i)) for i in range(1, 4)])
search.apd(
[
r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *\)",
lambda results, line: results.context == Spin.down,
er_bp_dn,
]
)
# Always present spin/non-spin
def p_elc(results, match):
results.p_elc = bn.numset([float(match.group(i)) for i in range(1, 4)])
search.apd(
[
r"^.*Total electronic dipole moment: "
r"*p\[elc\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
None,
p_elc,
]
)
def p_ion(results, match):
results.p_ion = bn.numset([float(match.group(i)) for i in range(1, 4)])
search.apd(
[
r"^.*ionic dipole moment: " r"*p\[ion\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *\)",
None,
p_ion,
]
)
self.context = None
self.er_ev = {Spin.up: None, Spin.down: None}
self.er_bp = {Spin.up: None, Spin.down: None}
micro_pyawk(self.filename, search, self)
if self.er_ev[Spin.up] is not None and self.er_ev[Spin.down] is not None:
self.er_ev_tot = self.er_ev[Spin.up] + self.er_ev[Spin.down]
if self.er_bp[Spin.up] is not None and self.er_bp[Spin.down] is not None:
self.er_bp_tot = self.er_bp[Spin.up] + self.er_bp[Spin.down]
except Exception:
self.er_ev_tot = None
self.er_bp_tot = None
raise Exception("IGPAR OUTCAR could not be parsed.")
def read_internal_strain_tensor(self):
"""
Reads the internal strain tensor and populates self.internal_strain_tensor with an numset of voigt notation
tensors for each site.
"""
search = []
def internal_strain_start(results, match):
results.internal_strain_ion = int(match.group(1)) - 1
results.internal_strain_tensor.apd(bn.zeros((3, 6)))
search.apd(
[
r"INTERNAL STRAIN TENSOR FOR ION\s+(\d+)\s+for displacements in x,y,z \(eV/Angst\):",
None,
internal_strain_start,
]
)
def internal_strain_data(results, match):
if match.group(1).lower() == "x":
index = 0
elif match.group(1).lower() == "y":
index = 1
elif match.group(1).lower() == "z":
index = 2
else:
raise Exception(f"Couldn't parse row index from symbol for internal strain tensor: {match.group(1)}")
results.internal_strain_tensor[results.internal_strain_ion][index] = bn.numset(
[float(match.group(i)) for i in range(2, 8)]
)
if index == 2:
results.internal_strain_ion = None
search.apd(
[
r"^\s+([x,y,z])\s+" + r"([-]?\d+\.\d+)\s+" * 6,
lambda results, line: results.internal_strain_ion is not None,
internal_strain_data,
]
)
self.internal_strain_ion = None
self.internal_strain_tensor = []
micro_pyawk(self.filename, search, self)
def read_lepsilon(self):
"""
Reads an LEPSILON run.
# TODO: Document the actual variables.
"""
try:
search = []
def dielectric_section_start(results, match):
results.dielectric_index = -1
search.apd(
[
r"MACROSCOPIC STATIC DIELECTRIC TENSOR \(",
None,
dielectric_section_start,
]
)
def dielectric_section_start2(results, match):
results.dielectric_index = 0
search.apd(
[
r"-------------------------------------",
lambda results, line: results.dielectric_index == -1,
dielectric_section_start2,
]
)
def dielectric_data(results, match):
results.dielectric_tensor[results.dielectric_index, :] = bn.numset(
[float(match.group(i)) for i in range(1, 4)]
)
results.dielectric_index += 1
search.apd(
[
r"^ *([-0-9.Ee+]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+) *$",
lambda results, line: results.dielectric_index >= 0
if results.dielectric_index is not None
else None,
dielectric_data,
]
)
def dielectric_section_stop(results, match):
results.dielectric_index = None
search.apd(
[
r"-------------------------------------",
lambda results, line: results.dielectric_index >= 1
if results.dielectric_index is not None
else None,
dielectric_section_stop,
]
)
self.dielectric_index = None
self.dielectric_tensor = bn.zeros((3, 3))
def piezo_section_start(results, match):
results.piezo_index = 0
search.apd(
[
r"PIEZOELECTRIC TENSOR for field in x, y, z " r"\(C/m\^2\)",
None,
piezo_section_start,
]
)
def piezo_data(results, match):
results.piezo_tensor[results.piezo_index, :] = bn.numset([float(match.group(i)) for i in range(1, 7)])
results.piezo_index += 1
search.apd(
[
r"^ *[xyz] +([-0-9.Ee+]+) +([-0-9.Ee+]+)"
+ r" +([-0-9.Ee+]+) *([-0-9.Ee+]+) +([-0-9.Ee+]+)"
+ r" +([-0-9.Ee+]+)*$",
lambda results, line: results.piezo_index >= 0 if results.piezo_index is not None else None,
piezo_data,
]
)
def piezo_section_stop(results, match):
results.piezo_index = None
search.apd(
[
r"-------------------------------------",
lambda results, line: results.piezo_index >= 1 if results.piezo_index is not None else None,
piezo_section_stop,
]
)
self.piezo_index = None
self.piezo_tensor = bn.zeros((3, 6))
def born_section_start(results, match):
results.born_ion = -1
search.apd([r"BORN EFFECTIVE CHARGES ", None, born_section_start])
def born_ion(results, match):
results.born_ion = int(match.group(1)) - 1
results.born.apd(bn.zeros((3, 3)))
search.apd(
[
r"ion +([0-9]+)",
lambda results, line: results.born_ion is not None,
born_ion,
]
)
def born_data(results, match):
results.born[results.born_ion][int(match.group(1)) - 1, :] = bn.numset(
[float(match.group(i)) for i in range(2, 5)]
)
search.apd(
[
r"^ *([1-3]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+)$",
lambda results, line: results.born_ion >= 0 if results.born_ion is not None else results.born_ion,
born_data,
]
)
def born_section_stop(results, match):
results.born_ion = None
search.apd(
[
r"-------------------------------------",
lambda results, line: results.born_ion >= 1 if results.born_ion is not None else results.born_ion,
born_section_stop,
]
)
self.born_ion = None
self.born = []
micro_pyawk(self.filename, search, self)
self.born = bn.numset(self.born)
self.dielectric_tensor = self.dielectric_tensor.tolist()
self.piezo_tensor = self.piezo_tensor.tolist()
except Exception:
raise Exception("LEPSILON OUTCAR could not be parsed.")
def read_lepsilon_ionic(self):
"""
Reads an LEPSILON run, the ionic component.
# TODO: Document the actual variables.
"""
try:
search = []
def dielectric_section_start(results, match):
results.dielectric_ionic_index = -1
search.apd(
[
r"MACROSCOPIC STATIC DIELECTRIC TENSOR IONIC",
None,
dielectric_section_start,
]
)
def dielectric_section_start2(results, match):
results.dielectric_ionic_index = 0
search.apd(
[
r"-------------------------------------",
lambda results, line: results.dielectric_ionic_index == -1
if results.dielectric_ionic_index is not None
else results.dielectric_ionic_index,
dielectric_section_start2,
]
)
def dielectric_data(results, match):
results.dielectric_ionic_tensor[results.dielectric_ionic_index, :] = bn.numset(
[float(match.group(i)) for i in range(1, 4)]
)
results.dielectric_ionic_index += 1
search.apd(
[
r"^ *([-0-9.Ee+]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+) *$",
lambda results, line: results.dielectric_ionic_index >= 0
if results.dielectric_ionic_index is not None
else results.dielectric_ionic_index,
dielectric_data,
]
)
def dielectric_section_stop(results, match):
results.dielectric_ionic_index = None
search.apd(
[
r"-------------------------------------",
lambda results, line: results.dielectric_ionic_index >= 1
if results.dielectric_ionic_index is not None
else results.dielectric_ionic_index,
dielectric_section_stop,
]
)
self.dielectric_ionic_index = None
self.dielectric_ionic_tensor = bn.zeros((3, 3))
def piezo_section_start(results, match):
results.piezo_ionic_index = 0
search.apd(
[
r"PIEZOELECTRIC TENSOR IONIC CONTR for field in " r"x, y, z ",
None,
piezo_section_start,
]
)
def piezo_data(results, match):
results.piezo_ionic_tensor[results.piezo_ionic_index, :] = bn.numset(
[float(match.group(i)) for i in range(1, 7)]
)
results.piezo_ionic_index += 1
search.apd(
[
r"^ *[xyz] +([-0-9.Ee+]+) +([-0-9.Ee+]+)"
+ r" +([-0-9.Ee+]+) *([-0-9.Ee+]+) +([-0-9.Ee+]+)"
+ r" +([-0-9.Ee+]+)*$",
lambda results, line: results.piezo_ionic_index >= 0
if results.piezo_ionic_index is not None
else results.piezo_ionic_index,
piezo_data,
]
)
def piezo_section_stop(results, match):
results.piezo_ionic_index = None
search.apd(
[
"-------------------------------------",
lambda results, line: results.piezo_ionic_index >= 1
if results.piezo_ionic_index is not None
else results.piezo_ionic_index,
piezo_section_stop,
]
)
self.piezo_ionic_index = None
self.piezo_ionic_tensor = bn.zeros((3, 6))
micro_pyawk(self.filename, search, self)
self.dielectric_ionic_tensor = self.dielectric_ionic_tensor.tolist()
self.piezo_ionic_tensor = self.piezo_ionic_tensor.tolist()
except Exception:
raise Exception("ionic part of LEPSILON OUTCAR could not be parsed.")
def read_lcalcpol(self):
"""
Reads the lcalpol.
# TODO: Document the actual variables.
"""
self.p_elec = None
self.p_sp1 = None
self.p_sp2 = None
self.p_ion = None
try:
search = []
# Always present spin/non-spin
def p_elec(results, match):
results.p_elec = bn.numset(
[
float(match.group(1)),
float(match.group(2)),
float(match.group(3)),
]
)
search.apd(
[
r"^.*Total electronic dipole moment: "
r"*p\[elc\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
None,
p_elec,
]
)
# If spin-polarized (and not noncollinear)
# save spin-polarized electronic values
if self.spin and not self.noncollinear:
def p_sp1(results, match):
results.p_sp1 = bn.numset(
[
float(match.group(1)),
float(match.group(2)),
float(match.group(3)),
]
)
search.apd(
[
r"^.*p\[sp1\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *\)",
None,
p_sp1,
]
)
def p_sp2(results, match):
results.p_sp2 = bn.numset(
[
float(match.group(1)),
float(match.group(2)),
float(match.group(3)),
]
)
search.apd(
[
r"^.*p\[sp2\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *\)",
None,
p_sp2,
]
)
def p_ion(results, match):
results.p_ion = bn.numset(
[
float(match.group(1)),
float(match.group(2)),
float(match.group(3)),
]
)
search.apd(
[
r"^.*Ionic dipole moment: *p\[ion\]=" r"\( *([-0-9.Ee+]*)" r" *([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)",
None,
p_ion,
]
)
micro_pyawk(self.filename, search, self)
except Exception:
raise Exception("LCALCPOL OUTCAR could not be parsed.")
def read_pseudo_zval(self):
"""
Create pseudopotential ZVAL dictionary.
"""
# pylint: disable=E1101
try:
def atom_symbols(results, match):
element_symbol = match.group(1)
if not hasattr(results, "atom_symbols"):
results.atom_symbols = []
results.atom_symbols.apd(element_symbol.strip())
def zvals(results, match):
zvals = match.group(1)
results.zvals = map(float, re.findtotal(r"-?\d+\.\d*", zvals))
search = []
search.apd([r"(?<=VRHFIN =)(.*)(?=:)", None, atom_symbols])
search.apd([r"^\s+ZVAL.*=(.*)", None, zvals])
micro_pyawk(self.filename, search, self)
zval_dict = {}
for x, y in zip(self.atom_symbols, self.zvals):
zval_dict.update({x: y})
self.zval_dict = zval_dict
# Clean-up
del self.atom_symbols
del self.zvals
except Exception:
raise Exception("ZVAL dict could not be parsed.")
def read_core_state_eigen(self):
"""
Read the core state eigenenergies at each ionic step.
Returns:
A list of dict over the atom such as [{"AO":[core state eig]}].
The core state eigenenergie list for each AO is over total ionic
step.
Example:
The core state eigenenergie of the 2s AO of the 6th atom of the
structure at the last ionic step is [5]["2s"][-1]
"""
with zopen(self.filename, "rt") as foutcar:
line = foutcar.readline()
while line != "":
line = foutcar.readline()
if "NIONS =" in line:
natom = int(line.sep_split("NIONS =")[1])
cl = [defaultdict(list) for i in range(natom)]
if "the core state eigen" in line:
iat = -1
while line != "":
line = foutcar.readline()
# don't know number of lines to parse without knowing
# specific species, so stop parsing when we reach
# "E-fermi" instead
if "E-fermi" in line:
break
data = line.sep_split()
# data will contain odd number of elements if it is
# the start of a new entry, or even number of elements
# if it continues the previous entry
if len(data) % 2 == 1:
iat += 1 # started parsing a new ion
data = data[1:] # remove element with ion number
for i in range(0, len(data), 2):
cl[iat][data[i]].apd(float(data[i + 1]))
return cl
def read_avg_core_poten(self):
"""
Read the core potential at each ionic step.
Returns:
A list for each ionic step containing a list of the average core
potentials for each atom: [[avg core pot]].
Example:
The average core potential of the 2nd atom of the structure at the
last ionic step is: [-1][1]
"""
with zopen(self.filename, "rt") as foutcar:
line = foutcar.readline()
aps = []
while line != "":
line = foutcar.readline()
if "the normlizattion of the test charge is" in line:
ap = []
while line != "":
line = foutcar.readline()
# don't know number of lines to parse without knowing
# specific species, so stop parsing when we reach
# "E-fermi" instead
if "E-fermi" in line:
aps.apd(ap)
break
# the average core potentials of up to 5 elements are
# given per line; the potentials are separated by several
# spaces and numbered from 1 to natoms; the potentials are
# parsed in a fixed width format
bnots = int((len(line) - 1) / 17)
for i in range(bnots):
start = i * 17
ap.apd(float(line[start + 8 : start + 17]))
return aps
def as_dict(self):
"""
:return: MSONAble dict.
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"efermi": self.efermi,
"run_stats": self.run_stats,
"magnetization": self.magnetization,
"charge": self.charge,
"total_magnetization": self.total_mag,
"nelect": self.nelect,
"is_stopped": self.is_stopped,
"drift": self.drift,
"ngf": self.ngf,
"sampling_radii": self.sampling_radii,
"electrostatic_potential": self.electrostatic_potential,
}
if self.lepsilon:
d.update(
{
"piezo_tensor": self.piezo_tensor,
"dielectric_tensor": self.dielectric_tensor,
"born": self.born,
}
)
if self.dfpt:
d.update({"internal_strain_tensor": self.internal_strain_tensor})
if self.dfpt and self.lepsilon:
d.update(
{
"piezo_ionic_tensor": self.piezo_ionic_tensor,
"dielectric_ionic_tensor": self.dielectric_ionic_tensor,
}
)
if self.lcalcpol:
d.update({"p_elec": self.p_elec, "p_ion": self.p_ion})
if self.spin and not self.noncollinear:
d.update({"p_sp1": self.p_sp1, "p_sp2": self.p_sp2})
d.update({"zval_dict": self.zval_dict})
if self.nmr_cs:
d.update(
{
"nmr_cs": {
"valence and core": self.data["chemical_shielding"]["valence_and_core"],
"valence_only": self.data["chemical_shielding"]["valence_only"],
"g0": self.data["cs_g0_contribution"],
"core": self.data["cs_core_contribution"],
"raw": self.data["unsym_cs_tensor"],
}
}
)
if self.nmr_efg:
d.update(
{
"nmr_efg": {
"raw": self.data["unsym_efg_tensor"],
"parameters": self.data["efg"],
}
}
)
if self.has_onsite_density_matrices:
# cast Spin to str for consistency with electronic_structure
# TODO: improve handling of Enum (de)serialization in monty
onsite_density_matrices = [{str(k): v for k, v in d.items()} for d in self.data["onsite_density_matrices"]]
d.update({"onsite_density_matrices": onsite_density_matrices})
return d
def read_fermi_contact_shift(self):
"""
output example:
Fermi contact (isotropic) hyperfine coupling parameter (MHz)
-------------------------------------------------------------
ion A_pw A_1PS A_1AE A_1c A_tot
-------------------------------------------------------------
1 -0.002 -0.002 -0.051 0.000 -0.052
2 -0.002 -0.002 -0.051 0.000 -0.052
3 0.056 0.056 0.321 -0.048 0.321
-------------------------------------------------------------
, which corresponds to
[[-0.002, -0.002, -0.051, 0.0, -0.052],
[-0.002, -0.002, -0.051, 0.0, -0.052],
[0.056, 0.056, 0.321, -0.048, 0.321]] from 'fch' data
"""
# Fermi contact (isotropic) hyperfine coupling parameter (MHz)
header_pattern1 = (
r"\s*Fermi contact \(isotropic\) hyperfine coupling parameter \(MHz\)\s+"
r"\s*\-+"
r"\s*ion\s+A_pw\s+A_1PS\s+A_1AE\s+A_1c\s+A_tot\s+"
r"\s*\-+"
)
row_pattern1 = r"(?:\d+)\s+" + r"\s+".join([r"([-]?\d+\.\d+)"] * 5)
footer_pattern = r"\-+"
fch_table = self.read_table_pattern(
header_pattern1,
row_pattern1,
footer_pattern,
postprocess=float,
last_one_only=True,
)
# Dipolar hyperfine coupling parameters (MHz)
header_pattern2 = (
r"\s*Dipolar hyperfine coupling parameters \(MHz\)\s+"
r"\s*\-+"
r"\s*ion\s+A_xx\s+A_yy\s+A_zz\s+A_xy\s+A_xz\s+A_yz\s+"
r"\s*\-+"
)
row_pattern2 = r"(?:\d+)\s+" + r"\s+".join([r"([-]?\d+\.\d+)"] * 6)
dh_table = self.read_table_pattern(
header_pattern2,
row_pattern2,
footer_pattern,
postprocess=float,
last_one_only=True,
)
# Total hyperfine coupling parameters after diagonalization (MHz)
header_pattern3 = (
r"\s*Total hyperfine coupling parameters after diagonalization \(MHz\)\s+"
r"\s*\(convention: \|A_zz\| > \|A_xx\| > \|A_yy\|\)\s+"
r"\s*\-+"
r"\s*ion\s+A_xx\s+A_yy\s+A_zz\s+asymmetry \(A_yy - A_xx\)/ A_zz\s+"
r"\s*\-+"
)
row_pattern3 = r"(?:\d+)\s+" + r"\s+".join([r"([-]?\d+\.\d+)"] * 4)
th_table = self.read_table_pattern(
header_pattern3,
row_pattern3,
footer_pattern,
postprocess=float,
last_one_only=True,
)
fc_shift_table = {"fch": fch_table, "dh": dh_table, "th": th_table}
self.data["fermi_contact_shift"] = fc_shift_table
class VolumetricData(MSONable):
"""
Simple volumetric object for reading LOCPOT and CHGCAR type files.
.. attribute:: structure
Structure associated with the Volumetric Data object
..attribute:: is_spin_polarized
True if run is spin polarized
..attribute:: dim
Tuple of dimensions of volumetric grid in each direction (nx, ny, nz).
..attribute:: data
Actual data as a dict of {string: bn.numset}. The string are "total"
and "difference", in accordance to the output format of vasp LOCPOT and
CHGCAR files filter_condition the total spin density is written first, followed
by the differenceerence spin density.
.. attribute:: ngridpts
Total number of grid points in volumetric data.
"""
def __init__(self, structure, data, distance_matrix=None, data_aug=None):
"""
Typictotaly, this constructor is not used directly and the static
from_file constructor is used. This constructor is designed to totalow
total_countmation and other operations between VolumetricData objects.
Args:
structure: Structure associated with the volumetric data
data: Actual volumetric data.
data_aug: Any extra information associated with volumetric data
(typictotaly augmentation charges)
distance_matrix: A pre-computed distance matrix if available.
Useful so pass distance_matrices between total_counts,
shortcircuiting an otherwise expensive operation.
"""
self.structure = structure
self.is_spin_polarized = len(data) >= 2
self.is_soc = len(data) >= 4
self.dim = data["total"].shape
self.data = data
self.data_aug = data_aug if data_aug else {}
self.ngridpts = self.dim[0] * self.dim[1] * self.dim[2]
# lazy init the spin data since this is not always needed.
self._spin_data = {}
self._distance_matrix = {} if not distance_matrix else distance_matrix
self.xpoints = bn.linspace(0.0, 1.0, num=self.dim[0])
self.ypoints = bn.linspace(0.0, 1.0, num=self.dim[1])
self.zpoints = bn.linspace(0.0, 1.0, num=self.dim[2])
self.interpolator = RegularGridInterpolator(
(self.xpoints, self.ypoints, self.zpoints),
self.data["total"],
bounds_error=True,
)
self.name = "VolumetricData"
@property
def spin_data(self):
"""
The data decomposed into actual spin data as {spin: data}.
Essentitotaly, this provides the actual Spin.up and Spin.down data
instead of the total and difference. Note that by definition, a
non-spin-polarized run would have Spin.up data == Spin.down data.
"""
if not self._spin_data:
spin_data = {}
spin_data[Spin.up] = 0.5 * (self.data["total"] + self.data.get("difference", 0))
spin_data[Spin.down] = 0.5 * (self.data["total"] - self.data.get("difference", 0))
self._spin_data = spin_data
return self._spin_data
def get_axis_grid(self, ind):
"""
Returns the grid for a particular axis.
Args:
ind (int): Axis index.
"""
ng = self.dim
num_pts = ng[ind]
lengths = self.structure.lattice.abc
return [i / num_pts * lengths[ind] for i in range(num_pts)]
def __add_concat__(self, other):
return self.linear_add_concat(other, 1.0)
def __sub__(self, other):
return self.linear_add_concat(other, -1.0)
def copy(self):
"""
:return: Copy of Volumetric object
"""
return VolumetricData(
self.structure,
{k: v.copy() for k, v in self.data.items()},
distance_matrix=self._distance_matrix,
data_aug=self.data_aug,
)
def linear_add_concat(self, other, scale_factor=1.0):
"""
Method to do a linear total_count of volumetric objects. Used by + and -
operators as well. Returns a VolumetricData object containing the
linear total_count.
Args:
other (VolumetricData): Another VolumetricData object
scale_factor (float): Factor to scale the other data by.
Returns:
VolumetricData corresponding to self + scale_factor * other.
"""
if self.structure != other.structure:
warnings.warn("Structures are differenceerent. Make sure you know what you are doing...")
if self.data.keys() != other.data.keys():
raise ValueError("Data have differenceerent keys! Maybe one is spin-" "polarized and the other is not?")
# To add_concat checks
data = {}
for k in self.data.keys():
data[k] = self.data[k] + scale_factor * other.data[k]
return VolumetricData(self.structure, data, self._distance_matrix)
@staticmethod
def parse_file(filename):
"""
Convenience method to parse a generic volumetric data file in the vasp
like format. Used by subclasses for parsing file.
Args:
filename (str): Path of file to parse
Returns:
(poscar, data)
"""
# pylint: disable=E1136,E1126
poscar_read = False
poscar_string = []
dataset = []
total_dataset = []
# for holding any_condition strings in ibnut that are not Poscar
# or VolumetricData (typictotaly augmentation charges)
total_dataset_aug = {}
dim = None
dimline = None
read_dataset = False
ngrid_pts = 0
data_count = 0
poscar = None
with zopen(filename, "rt") as f:
for line in f:
original_line = line
line = line.strip()
if read_dataset:
for tok in line.sep_split():
if data_count < ngrid_pts:
# This complicated procedure is necessary because
# vasp outputs x as the fastest index, followed by y
# then z.
no_x = data_count // dim[0]
dataset[data_count % dim[0], no_x % dim[1], no_x // dim[1]] = float(tok)
data_count += 1
if data_count >= ngrid_pts:
read_dataset = False
data_count = 0
total_dataset.apd(dataset)
elif not poscar_read:
if line != "" or len(poscar_string) == 0:
poscar_string.apd(line)
elif line == "":
poscar = Poscar.from_string("\n".join(poscar_string))
poscar_read = True
elif not dim:
dim = [int(i) for i in line.sep_split()]
ngrid_pts = dim[0] * dim[1] * dim[2]
dimline = line
read_dataset = True
dataset = bn.zeros(dim)
elif line == dimline:
# when line == dimline, expect volumetric data to follow
# so set read_dataset to True
read_dataset = True
dataset = bn.zeros(dim)
else:
# store any_condition extra lines that were not part of the
# volumetric data so we know which set of data the extra
# lines are associated with
key = len(total_dataset) - 1
if key not in total_dataset_aug:
total_dataset_aug[key] = []
total_dataset_aug[key].apd(original_line)
if len(total_dataset) == 4:
data = {
"total": total_dataset[0],
"difference_x": total_dataset[1],
"difference_y": total_dataset[2],
"difference_z": total_dataset[3],
}
data_aug = {
"total": total_dataset_aug.get(0, None),
"difference_x": total_dataset_aug.get(1, None),
"difference_y": total_dataset_aug.get(2, None),
"difference_z": total_dataset_aug.get(3, None),
}
# construct a "difference" dict for scalar-like magnetization density,
# referenced to an arbitrary direction (using same method as
# pymatgen.electronic_structure.core.Magmom, see
# Magmom documentation for justification for this)
# TODO: re-exaget_mine this, and also similar behavior in
# Magmom - @mkhorton
# TODO: does CHGCAR change with differenceerent SAXIS?
difference_xyz = bn.numset([data["difference_x"], data["difference_y"], data["difference_z"]])
difference_xyz = difference_xyz.change_shape_to((3, dim[0] * dim[1] * dim[2]))
ref_direction = bn.numset([1.01, 1.02, 1.03])
ref_sign = bn.sign(bn.dot(ref_direction, difference_xyz))
difference = bn.multiply(bn.linalg.normlizattion(difference_xyz, axis=0), ref_sign)
data["difference"] = difference.change_shape_to((dim[0], dim[1], dim[2]))
elif len(total_dataset) == 2:
data = {"total": total_dataset[0], "difference": total_dataset[1]}
data_aug = {
"total": total_dataset_aug.get(0, None),
"difference": total_dataset_aug.get(1, None),
}
else:
data = {"total": total_dataset[0]}
data_aug = {"total": total_dataset_aug.get(0, None)}
return poscar, data, data_aug
def write_file(self, file_name, vasp4_compatible=False):
"""
Write the VolumetricData object to a vasp compatible file.
Args:
file_name (str): Path to a file
vasp4_compatible (bool): True if the format is vasp4 compatible
"""
def _print_fortran_float(f):
"""
Fortran codes print floats with a leading zero in scientific
notation. When writing CHGCAR files, we adopt this convention
to ensure written CHGCAR files are byte-to-byte identical to
their ibnut files as far as possible.
:param f: float
:return: str
"""
s = f"{f:.10E}"
if f >= 0:
return "0." + s[0] + s[2:12] + "E" + f"{int(s[13:]) + 1:+03}"
return "-." + s[1] + s[3:13] + "E" + f"{int(s[14:]) + 1:+03}"
with zopen(file_name, "wt") as f:
p = Poscar(self.structure)
# use original name if it's been set (e.g. from Chgcar)
comment = getattr(self, "name", p.comment)
lines = comment + "\n"
lines += " 1.00000000000000\n"
latt = self.structure.lattice.matrix
lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[0, :])
lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[1, :])
lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[2, :])
if not vasp4_compatible:
lines += "".join(["%5s" % s for s in p.site_symbols]) + "\n"
lines += "".join(["%6d" % x for x in p.natoms]) + "\n"
lines += "Direct\n"
for site in self.structure:
lines += "%10.6f%10.6f%10.6f\n" % tuple(site.frac_coords)
lines += " \n"
f.write(lines)
a = self.dim
def write_spin(data_type):
lines = []
count = 0
f.write(f" {a[0]} {a[1]} {a[2]}\n")
for (k, j, i) in itertools.product(list(range(a[2])), list(range(a[1])), list(range(a[0]))):
lines.apd(_print_fortran_float(self.data[data_type][i, j, k]))
count += 1
if count % 5 == 0:
f.write(" " + "".join(lines) + "\n")
lines = []
else:
lines.apd(" ")
if count % 5 != 0:
f.write(" " + "".join(lines) + " \n")
f.write("".join(self.data_aug.get(data_type, [])))
write_spin("total")
if self.is_spin_polarized and self.is_soc:
write_spin("difference_x")
write_spin("difference_y")
write_spin("difference_z")
elif self.is_spin_polarized:
write_spin("difference")
def value_at(self, x, y, z):
"""
Get a data value from self.data at a given point (x, y, z) in terms
of fractional lattice parameters. Will be interpolated using a
RegularGridInterpolator on self.data if (x, y, z) is not in the original
set of data points.
Args:
x (float): Fraction of lattice vector a.
y (float): Fraction of lattice vector b.
z (float): Fraction of lattice vector c.
Returns:
Value from self.data (potentitotaly interpolated) correspondisng to
the point (x, y, z).
"""
return self.interpolator([x, y, z])[0]
def linear_piece(self, p1, p2, n=100):
"""
Get a linear piece of the volumetric data with n data points from
point p1 to point p2, in the form of a list.
Args:
p1 (list): 3-element list containing fractional coordinates of the first point.
p2 (list): 3-element list containing fractional coordinates of the second point.
n (int): Number of data points to collect, defaults to 100.
Returns:
List of n data points (mostly interpolated) representing a linear piece of the
data from point p1 to point p2.
"""
assert type(p1) in [list, bn.ndnumset] and type(p2) in [list, bn.ndnumset]
assert len(p1) == 3 and len(p2) == 3
xpts = bn.linspace(p1[0], p2[0], num=n)
ypts = bn.linspace(p1[1], p2[1], num=n)
zpts = bn.linspace(p1[2], p2[2], num=n)
return [self.value_at(xpts[i], ypts[i], zpts[i]) for i in range(n)]
def get_integrated_difference(self, ind, radius, nbins=1):
"""
Get integrated differenceerence of atom index ind up to radius. This can be
an extremely computationtotaly intensive process, depending on how many_condition
grid points are in the VolumetricData.
Args:
ind (int): Index of atom.
radius (float): Radius of integration.
nbins (int): Number of bins. Defaults to 1. This totalows one to
obtain the charge integration up to a list of the cumulative
charge integration values for radii for [radius/nbins,
2 * radius/nbins, ....].
Returns:
Differential integrated charge as a bn numset of [[radius, value],
...]. Format is for ease of plotting. E.g., plt.plot(data[:,0],
data[:,1])
"""
# For non-spin-polarized runs, this is zero by definition.
if not self.is_spin_polarized:
radii = [radius / nbins * (i + 1) for i in range(nbins)]
data = bn.zeros((nbins, 2))
data[:, 0] = radii
return data
struct = self.structure
a = self.dim
if ind not in self._distance_matrix or self._distance_matrix[ind]["get_max_radius"] < radius:
coords = []
for (x, y, z) in itertools.product(*[list(range(i)) for i in a]):
coords.apd([x / a[0], y / a[1], z / a[2]])
sites_dist = struct.lattice.get_points_in_sphere(coords, struct[ind].coords, radius)
self._distance_matrix[ind] = {
"get_max_radius": radius,
"data": bn.numset(sites_dist),
}
data = self._distance_matrix[ind]["data"]
# Use boolean indexing to find total charges within the desired distance.
inds = data[:, 1] <= radius
dists = data[inds, 1]
data_inds = bn.rint(bn.mod(list(data[inds, 0]), 1) * bn.tile(a, (len(dists), 1))).convert_type(int)
vals = [self.data["difference"][x, y, z] for x, y, z in data_inds]
hist, edges = bn.hist_operation(dists, bins=nbins, range=[0, radius], weights=vals)
data = bn.zeros((nbins, 2))
data[:, 0] = edges[1:]
data[:, 1] = [total_count(hist[0 : i + 1]) / self.ngridpts for i in range(nbins)]
return data
def get_average_along_axis(self, ind):
"""
Get the averaged total of the volumetric data a certain axis direction.
For example, useful for visualizing Hartree Potentials from a LOCPOT
file.
Args:
ind (int): Index of axis.
Returns:
Average total along axis
"""
m = self.data["total"]
ng = self.dim
if ind == 0:
total = bn.total_count(bn.total_count(m, axis=1), 1)
elif ind == 1:
total = bn.total_count(bn.total_count(m, axis=0), 1)
else:
total = bn.total_count(bn.total_count(m, axis=0), 0)
return total / ng[(ind + 1) % 3] / ng[(ind + 2) % 3]
def to_hdf5(self, filename):
"""
Writes the VolumetricData to a HDF5 format, which is a highly optimized
format for reading storing large data. The mapping of the VolumetricData
to this file format is as follows:
VolumetricData.data -> f["vdata"]
VolumetricData.structure ->
f["Z"]: Sequence of atomic numbers
f["fcoords"]: Fractional coords
f["lattice"]: Lattice in the pymatgen.core.lattice.Lattice matrix
format
f.attrs["structure_json"]: String of json representation
Args:
filename (str): Filename to output to.
"""
import h5py
with h5py.File(filename, "w") as f:
ds = f.create_dataset("lattice", (3, 3), dtype="float")
ds[...] = self.structure.lattice.matrix
ds = f.create_dataset("Z", (len(self.structure.species),), dtype="i")
ds[...] = bn.numset([sp.Z for sp in self.structure.species])
ds = f.create_dataset("fcoords", self.structure.frac_coords.shape, dtype="float")
ds[...] = self.structure.frac_coords
dt = h5py.special_dtype(vlen=str)
ds = f.create_dataset("species", (len(self.structure.species),), dtype=dt)
ds[...] = [str(sp) for sp in self.structure.species]
grp = f.create_group("vdata")
for k, v in self.data.items():
ds = grp.create_dataset(k, self.data[k].shape, dtype="float")
ds[...] = self.data[k]
f.attrs["name"] = self.name
f.attrs["structure_json"] = json.dumps(self.structure.as_dict())
@classmethod
def from_hdf5(cls, filename, **kwargs):
"""
Reads VolumetricData from HDF5 file.
:param filename: Filename
:return: VolumetricData
"""
import h5py
with h5py.File(filename, "r") as f:
data = {k: bn.numset(v) for k, v in f["vdata"].items()}
data_aug = None
if "vdata_aug" in f:
data_aug = {k: bn.numset(v) for k, v in f["vdata_aug"].items()}
structure = Structure.from_dict(json.loads(f.attrs["structure_json"]))
return cls(structure, data=data, data_aug=data_aug, **kwargs)
class Locpot(VolumetricData):
"""
Simple object for reading a LOCPOT file.
"""
def __init__(self, poscar, data):
"""
Args:
poscar (Poscar): Poscar object containing structure.
data: Actual data.
"""
super().__init__(poscar.structure, data)
self.name = poscar.comment
@classmethod
def from_file(cls, filename, **kwargs):
"""
Reads a LOCPOT file.
:param filename: Filename
:return: Locpot
"""
(poscar, data, data_aug) = VolumetricData.parse_file(filename)
return cls(poscar, data, **kwargs)
class Chgcar(VolumetricData):
"""
Simple object for reading a CHGCAR file.
"""
def __init__(self, poscar, data, data_aug=None):
"""
Args:
poscar (Poscar or Structure): Object containing structure.
data: Actual data.
data_aug: Augmentation charge data
"""
# totalow for poscar or structure files to be passed
if isinstance(poscar, Poscar):
tmp_struct = poscar.structure
self.poscar = poscar
self.name = poscar.comment
elif isinstance(poscar, Structure):
tmp_struct = poscar
self.poscar = Poscar(poscar)
self.name = None
super().__init__(tmp_struct, data, data_aug=data_aug)
self._distance_matrix = {}
@staticmethod
def from_file(filename):
"""
Reads a CHGCAR file.
:param filename: Filename
:return: Chgcar
"""
(poscar, data, data_aug) = VolumetricData.parse_file(filename)
return Chgcar(poscar, data, data_aug=data_aug)
@property
def net_magnetization(self):
"""
:return: Net magnetization from Chgcar
"""
if self.is_spin_polarized:
return bn.total_count(self.data["difference"])
return None
class Elfcar(VolumetricData):
"""
Read an ELFCAR file which contains the Electron Localization Function (ELF)
as calculated by VASP.
For ELF, "total" key refers to Spin.up, and "difference" refers to Spin.down.
This also contains information on the kinetic energy density.
"""
def __init__(self, poscar, data):
"""
Args:
poscar (Poscar or Structure): Object containing structure.
data: Actual data.
"""
# totalow for poscar or structure files to be passed
if isinstance(poscar, Poscar):
tmp_struct = poscar.structure
self.poscar = poscar
elif isinstance(poscar, Structure):
tmp_struct = poscar
self.poscar = Poscar(poscar)
super().__init__(tmp_struct, data)
# TODO: modify VolumetricData so that the correct keys can be used.
# for ELF, instead of "total" and "difference" keys we have
# "Spin.up" and "Spin.down" keys
# I believe this is correct, but there's not much documentation -mkhorton
self.data = data
@classmethod
def from_file(cls, filename):
"""
Reads a ELFCAR file.
:param filename: Filename
:return: Elfcar
"""
(poscar, data, data_aug) = VolumetricData.parse_file(filename)
return cls(poscar, data)
def get_alpha(self):
"""
Get the parameter alpha filter_condition ELF = 1/(1+alpha^2).
"""
alpha_data = {}
for k, v in self.data.items():
alpha = 1 / v
alpha = alpha - 1
alpha = bn.sqrt(alpha)
alpha_data[k] = alpha
return VolumetricData(self.structure, alpha_data)
class Procar:
"""
Object for reading a PROCAR file.
.. attribute:: data
The PROCAR data of the form below. It should VASP uses 1-based indexing,
but total indices are converted to 0-based here.::
{
spin: nd.numset accessed with (k-point index, band index,
ion index, orbital index)
}
.. attribute:: weights
The weights associated with each k-point as an nd.numset of lenght
nkpoints.
..attribute:: phase_factors
Phase factors, filter_condition present (e.g. LORBIT = 12). A dict of the form:
{
spin: complex nd.numset accessed with (k-point index, band index,
ion index, orbital index)
}
..attribute:: nbands
Number of bands
..attribute:: nkpoints
Number of k-points
..attribute:: nions
Number of ions
"""
def __init__(self, filename):
"""
Args:
filename: Name of file containing PROCAR.
"""
headers = None
with zopen(filename, "rt") as f:
preambleexpr = re.compile(r"# of k-points:\s*(\d+)\s+# of bands:\s*(\d+)\s+# of " r"ions:\s*(\d+)")
kpointexpr = re.compile(r"^k-point\s+(\d+).*weight = ([0-9\.]+)")
bandexpr = re.compile(r"^band\s+(\d+)")
ionexpr = re.compile(r"^ion.*")
expr = re.compile(r"^([0-9]+)\s+")
current_kpoint = 0
current_band = 0
done = False
spin = Spin.down
weights = None
# pylint: disable=E1137
for l in f:
l = l.strip()
if bandexpr.match(l):
m = bandexpr.match(l)
current_band = int(m.group(1)) - 1
done = False
elif kpointexpr.match(l):
m = kpointexpr.match(l)
current_kpoint = int(m.group(1)) - 1
weights[current_kpoint] = float(m.group(2))
if current_kpoint == 0:
spin = Spin.up if spin == Spin.down else Spin.down
done = False
elif headers is None and ionexpr.match(l):
headers = l.sep_split()
headers.pop(0)
headers.pop(-1)
def ff():
return bn.zeros((nkpoints, nbands, nions, len(headers)))
data = defaultdict(ff)
def f2():
return bn.full_value_func(
(nkpoints, nbands, nions, len(headers)),
bn.NaN,
dtype=bn.complex128,
)
phase_factors = defaultdict(f2)
elif expr.match(l):
toks = l.sep_split()
index = int(toks.pop(0)) - 1
num_data = bn.numset([float(t) for t in toks[: len(headers)]])
if not done:
data[spin][current_kpoint, current_band, index, :] = num_data
else:
if len(toks) > len(headers):
# new format of PROCAR (vasp 5.4.4)
num_data = bn.numset([float(t) for t in toks[: 2 * len(headers)]])
for orb in range(len(headers)):
phase_factors[spin][current_kpoint, current_band, index, orb] = complex(
num_data[2 * orb], num_data[2 * orb + 1]
)
else:
# old format of PROCAR (vasp 5.4.1 and before)
if bn.ifnan(phase_factors[spin][current_kpoint, current_band, index, 0]):
phase_factors[spin][current_kpoint, current_band, index, :] = num_data
else:
phase_factors[spin][current_kpoint, current_band, index, :] += 1j * num_data
elif l.startswith("tot"):
done = True
elif preambleexpr.match(l):
m = preambleexpr.match(l)
nkpoints = int(m.group(1))
nbands = int(m.group(2))
nions = int(m.group(3))
weights = bn.zeros(nkpoints)
self.nkpoints = nkpoints
self.nbands = nbands
self.nions = nions
self.weights = weights
self.orbitals = headers
self.data = data
self.phase_factors = phase_factors
def get_projection_on_elements(self, structure):
"""
Method returning a dictionary of projections on elements.
Args:
structure (Structure): Ibnut structure.
Returns:
a dictionary in the {Spin.up:[k index][b index][{Element:values}]]
"""
dico = {}
for spin in self.data.keys():
dico[spin] = [[defaultdict(float) for i in range(self.nkpoints)] for j in range(self.nbands)]
for iat in range(self.nions):
name = structure.species[iat].symbol
for spin, d in self.data.items():
for k, b in itertools.product(range(self.nkpoints), range(self.nbands)):
dico[spin][b][k][name] = bn.total_count(d[k, b, iat, :])
return dico
def get_occupation(self, atom_index, orbital):
"""
Returns the occupation for a particular orbital of a particular atom.
Args:
atom_num (int): Index of atom in the PROCAR. It should be noted
that VASP uses 1-based indexing for atoms, but this is
converted to 0-based indexing in this parser to be
consistent with representation of structures in pymatgen.
orbital (str): An orbital. If it is a single character, e.g., s,
p, d or f, the total_count of total s-type, p-type, d-type or f-type
orbitals occupations are returned respectively. If it is a
specific orbital, e.g., px, dxy, etc., only the occupation
of that orbital is returned.
Returns:
Sum occupation of orbital of atom.
"""
orbital_index = self.orbitals.index(orbital)
return {
spin: bn.total_count(d[:, :, atom_index, orbital_index] * self.weights[:, None]) for spin, d in self.data.items()
}
class Oszicar:
"""
A basic parser for an OSZICAR output from VASP. In general, while the
OSZICAR is useful for a quick look at the output from a VASP run, we
recommend that you use the Vasprun parser instead, which gives far richer
information about a run.
.. attribute:: electronic_steps
All electronic steps as a list of list of dict. e.g.,
[[{"rms": 160.0, "E": 4507.24605593, "dE": 4507.2, "N": 1,
"deps": -17777.0, "ncg": 16576}, ...], [....]
filter_condition electronic_steps[index] refers the list of electronic steps
in one ionic_step, electronic_steps[index][subindex] refers to a
particular electronic step at subindex in ionic step at index. The
dict of properties depends on the type of VASP run, but in general,
"E", "dE" and "rms" should be present in almost total runs.
.. attribute:: ionic_steps:
All ionic_steps as a list of dict, e.g.,
[{"dE": -526.36, "E0": -526.36024, "mag": 0.0, "F": -526.36024},
...]
This is the typical output from VASP at the end of each ionic step.
"""
def __init__(self, filename):
"""
Args:
filename (str): Filename of file to parse
"""
electronic_steps = []
ionic_steps = []
ionic_pattern = re.compile(
r"(\d+)\s+F=\s*([\d\-\.E\+]+)\s+" r"E0=\s*([\d\-\.E\+]+)\s+" r"d\s*E\s*=\s*([\d\-\.E\+]+)$"
)
ionic_mag_pattern = re.compile(
r"(\d+)\s+F=\s*([\d\-\.E\+]+)\s+"
r"E0=\s*([\d\-\.E\+]+)\s+"
r"d\s*E\s*=\s*([\d\-\.E\+]+)\s+"
r"mag=\s*([\d\-\.E\+]+)"
)
ionic_MD_pattern = re.compile(
r"(\d+)\s+T=\s*([\d\-\.E\+]+)\s+"
r"E=\s*([\d\-\.E\+]+)\s+"
r"F=\s*([\d\-\.E\+]+)\s+"
r"E0=\s*([\d\-\.E\+]+)\s+"
r"EK=\s*([\d\-\.E\+]+)\s+"
r"SP=\s*([\d\-\.E\+]+)\s+"
r"SK=\s*([\d\-\.E\+]+)"
)
electronic_pattern = re.compile(r"\s*\w+\s*:(.*)")
def smart_convert(header, num):
try:
if header in ("N", "ncg"):
v = int(num)
return v
v = float(num)
return v
except ValueError:
return "--"
header = []
with zopen(filename, "rt") as fid:
for line in fid:
line = line.strip()
m = electronic_pattern.match(line)
if m:
toks = m.group(1).sep_split()
data = {header[i]: smart_convert(header[i], toks[i]) for i in range(len(toks))}
if toks[0] == "1":
electronic_steps.apd([data])
else:
electronic_steps[-1].apd(data)
elif ionic_pattern.match(line.strip()):
m = ionic_pattern.match(line.strip())
ionic_steps.apd(
{
"F": float(m.group(2)),
"E0": float(m.group(3)),
"dE": float(m.group(4)),
}
)
elif ionic_mag_pattern.match(line.strip()):
m = ionic_mag_pattern.match(line.strip())
ionic_steps.apd(
{
"F": float(m.group(2)),
"E0": float(m.group(3)),
"dE": float(m.group(4)),
"mag": float(m.group(5)),
}
)
elif ionic_MD_pattern.match(line.strip()):
m = ionic_MD_pattern.match(line.strip())
ionic_steps.apd(
{
"T": float(m.group(2)),
"E": float(m.group(3)),
"F": float(m.group(4)),
"E0": float(m.group(5)),
"EK": float(m.group(6)),
"SP": float(m.group(7)),
"SK": float(m.group(8)),
}
)
elif re.match(r"^\s*N\s+E\s*", line):
header = line.strip().replace("d eps", "deps").sep_split()
self.electronic_steps = electronic_steps
self.ionic_steps = ionic_steps
@property
def total_energies(self):
"""
Compilation of total energies from total electronic steps and ionic steps
as a tuple of list of energies, e.g.,
((4507.24605593, 143.824705755, -512.073149912, ...), ...)
"""
total_energies = []
for i in range(len(self.electronic_steps)):
energies = [step["E"] for step in self.electronic_steps[i]]
energies.apd(self.ionic_steps[i]["F"])
total_energies.apd(tuple(energies))
return tuple(total_energies)
@property # type: ignore
@unitized("eV")
def final_energy(self):
"""
Final energy from run.
"""
return self.ionic_steps[-1]["E0"]
def as_dict(self):
"""
:return: MSONable dict
"""
return {
"electronic_steps": self.electronic_steps,
"ionic_steps": self.ionic_steps,
}
class VaspParserError(Exception):
"""
Exception class for VASP parsing.
"""
pass
def get_band_structure_from_vasp_multiple_branches(dir_name, efermi=None, projections=False):
"""
This method is used to get band structure info from a VASP directory. It
takes into account that the run can be divided in several branches named
"branch_x". If the run has not been divided in branches the method will
turn to parsing vasprun.xml directly.
The method returns None is there"s a parsing error
Args:
dir_name: Directory containing total bandstructure runs.
efermi: Efermi for bandstructure.
projections: True if you want to get the data on site projections if
any_condition. Note that this is sometimes very large
Returns:
A BandStructure Object
"""
# TODO: Add better error handling!!!
if os.path.exists(os.path.join(dir_name, "branch_0")):
# get total branch dir names
branch_dir_names = [os.path.absolutepath(d) for d in glob.glob(f"{dir_name}/branch_*") if os.path.isdir(d)]
# sort by the directory name (e.g, branch_10)
sorted_branch_dir_names = sorted(branch_dir_names, key=lambda x: int(x.sep_split("_")[-1]))
# populate branches with Bandstructure instances
branches = []
for dname in sorted_branch_dir_names:
xml_file = os.path.join(dname, "vasprun.xml")
if os.path.exists(xml_file):
run = Vasprun(xml_file, parse_projected_eigen=projections)
branches.apd(run.get_band_structure(efermi=efermi))
else:
# It might be better to throw an exception
warnings.warn(f"Skipping {dname}. Unable to find {xml_file}")
return get_reconstructed_band_structure(branches, efermi)
xml_file = os.path.join(dir_name, "vasprun.xml")
# Better handling of Errors
if os.path.exists(xml_file):
return Vasprun(xml_file, parse_projected_eigen=projections).get_band_structure(
kpoints_filename=None, efermi=efermi
)
return None
class Xdatcar:
"""
Class representing an XDATCAR file. Only tested with VASP 5.x files.
.. attribute:: structures
List of structures parsed from XDATCAR.
.. attribute:: comment
Optional comment string.
Authors: <NAME>
"""
def __init__(self, filename, ionicstep_start=1, ionicstep_end=None, comment=None):
"""
Init a Xdatcar.
Args:
filename (str): Filename of ibnut XDATCAR file.
ionicstep_start (int): Starting number of ionic step.
ionicstep_end (int): Ending number of ionic step.
"""
preamble = None
coords_str = []
structures = []
preamble_done = False
if ionicstep_start < 1:
raise Exception("Start ionic step cannot be less than 1")
if ionicstep_end is not None and ionicstep_start < 1:
raise Exception("End ionic step cannot be less than 1")
# pylint: disable=E1136
ionicstep_cnt = 1
with zopen(filename, "rt") as f:
for l in f:
l = l.strip()
if preamble is None:
preamble = [l]
title = l
elif title == l:
preamble_done = False
p = Poscar.from_string("\n".join(preamble + ["Direct"] + coords_str))
if ionicstep_end is None:
if ionicstep_cnt >= ionicstep_start:
structures.apd(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.apd(p.structure)
if ionicstep_cnt >= ionicstep_end:
break
ionicstep_cnt += 1
coords_str = []
preamble = [l]
elif not preamble_done:
if l == "" or "Direct configuration=" in l:
preamble_done = True
tmp_preamble = [preamble[0]]
for i in range(1, len(preamble)):
if preamble[0] != preamble[i]:
tmp_preamble.apd(preamble[i])
else:
break
preamble = tmp_preamble
else:
preamble.apd(l)
elif l == "" or "Direct configuration=" in l:
p = Poscar.from_string("\n".join(preamble + ["Direct"] + coords_str))
if ionicstep_end is None:
if ionicstep_cnt >= ionicstep_start:
structures.apd(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.apd(p.structure)
if ionicstep_cnt >= ionicstep_end:
break
ionicstep_cnt += 1
coords_str = []
else:
coords_str.apd(l)
p = Poscar.from_string("\n".join(preamble + ["Direct"] + coords_str))
if ionicstep_end is None:
if ionicstep_cnt >= ionicstep_start:
structures.apd(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.apd(p.structure)
self.structures = structures
self.comment = comment or self.structures[0].formula
@property
def site_symbols(self):
"""
Sequence of symbols associated with the Xdatcar. Similar to 6th line in
vasp 5+ Xdatcar.
"""
syms = [site.specie.symbol for site in self.structures[0]]
return [a[0] for a in itertools.groupby(syms)]
@property
def natoms(self):
"""
Sequence of number of sites of each type associated with the Poscar.
Similar to 7th line in vasp 5+ Xdatcar.
"""
syms = [site.specie.symbol for site in self.structures[0]]
return [len(tuple(a[1])) for a in itertools.groupby(syms)]
def connect(self, filename, ionicstep_start=1, ionicstep_end=None):
"""
Concatenate structures in file to Xdatcar.
Args:
filename (str): Filename of XDATCAR file to be connectd.
ionicstep_start (int): Starting number of ionic step.
ionicstep_end (int): Ending number of ionic step.
TODO(rambalachandran):
Requires a check to ensure if the new concatenating file has the
same lattice structure and atoms as the Xdatcar class.
"""
preamble = None
coords_str = []
structures = self.structures
preamble_done = False
if ionicstep_start < 1:
raise Exception("Start ionic step cannot be less than 1")
if ionicstep_end is not None and ionicstep_start < 1:
raise Exception("End ionic step cannot be less than 1")
# pylint: disable=E1136
ionicstep_cnt = 1
with zopen(filename, "rt") as f:
for l in f:
l = l.strip()
if preamble is None:
preamble = [l]
elif not preamble_done:
if l == "" or "Direct configuration=" in l:
preamble_done = True
tmp_preamble = [preamble[0]]
for i in range(1, len(preamble)):
if preamble[0] != preamble[i]:
tmp_preamble.apd(preamble[i])
else:
break
preamble = tmp_preamble
else:
preamble.apd(l)
elif l == "" or "Direct configuration=" in l:
p = Poscar.from_string("\n".join(preamble + ["Direct"] + coords_str))
if ionicstep_end is None:
if ionicstep_cnt >= ionicstep_start:
structures.apd(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.apd(p.structure)
ionicstep_cnt += 1
coords_str = []
else:
coords_str.apd(l)
p = Poscar.from_string("\n".join(preamble + ["Direct"] + coords_str))
if ionicstep_end is None:
if ionicstep_cnt >= ionicstep_start:
structures.apd(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.apd(p.structure)
self.structures = structures
def get_string(self, ionicstep_start=1, ionicstep_end=None, significant_figures=8):
"""
Write Xdatcar class to a string.
Args:
ionicstep_start (int): Starting number of ionic step.
ionicstep_end (int): Ending number of ionic step.
significant_figures (int): Number of significant figures.
"""
if ionicstep_start < 1:
raise Exception("Start ionic step cannot be less than 1")
if ionicstep_end is not None and ionicstep_end < 1:
raise Exception("End ionic step cannot be less than 1")
latt = self.structures[0].lattice
if bn.linalg.det(latt.matrix) < 0:
latt = Lattice(-latt.matrix)
lines = [self.comment, "1.0", str(latt)]
lines.apd(" ".join(self.site_symbols))
lines.apd(" ".join([str(x) for x in self.natoms]))
format_str = f"{{:.{significant_figures}f}}"
ionicstep_cnt = 1
output_cnt = 1
for cnt, structure in enumerate(self.structures):
ionicstep_cnt = cnt + 1
if ionicstep_end is None:
if ionicstep_cnt >= ionicstep_start:
lines.apd("Direct configuration=" + " " * (7 - len(str(output_cnt))) + str(output_cnt))
for (i, site) in enumerate(structure):
coords = site.frac_coords
line = " ".join([format_str.format(c) for c in coords])
lines.apd(line)
output_cnt += 1
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
lines.apd("Direct configuration=" + " " * (7 - len(str(output_cnt))) + str(output_cnt))
for (i, site) in enumerate(structure):
coords = site.frac_coords
line = " ".join([format_str.format(c) for c in coords])
lines.apd(line)
output_cnt += 1
return "\n".join(lines) + "\n"
def write_file(self, filename, **kwargs):
"""
Write Xdatcar class into a file.
Args:
filename (str): Filename of output XDATCAR file.
The supported kwargs are the same as those for the
Xdatcar.get_string method and are passed through directly.
"""
with zopen(filename, "wt") as f:
f.write(self.get_string(**kwargs))
def __str__(self):
return self.get_string()
class Dynmat:
"""
Object for reading a DYNMAT file.
.. attribute:: data
A nested dict containing the DYNMAT data of the form::
[atom <int>][disp <int>]['dispvec'] =
displacement vector (part of first line in dynmat block, e.g. "0.01 0 0")
[atom <int>][disp <int>]['dynmat'] =
<list> list of dynmat lines for this atom and this displacement
Authors: <NAME>
"""
def __init__(self, filename):
"""
Args:
filename: Name of file containing DYNMAT
"""
with zopen(filename, "rt") as f:
lines = list(clean_lines(f.readlines()))
self._nspecs, self._natoms, self._ndisps = map(int, lines[0].sep_split())
self._masses = map(float, lines[1].sep_split())
self.data = defaultdict(dict)
atom, disp = None, None
for i, l in enumerate(lines[2:]):
v = list(map(float, l.sep_split()))
if not i % (self._natoms + 1):
atom, disp = map(int, v[:2])
if atom not in self.data:
self.data[atom] = {}
if disp not in self.data[atom]:
self.data[atom][disp] = {}
self.data[atom][disp]["dispvec"] = v[2:]
else:
if "dynmat" not in self.data[atom][disp]:
self.data[atom][disp]["dynmat"] = []
self.data[atom][disp]["dynmat"].apd(v)
def get_phonon_frequencies(self):
"""calculate phonon frequencies"""
# TODO: the following is most likely not correct or suboptimal
# hence for demonstration purposes only
frequencies = []
for k, v0 in self.data.items():
for v1 in v0.itervalues():
vec = map(absolute, v1["dynmat"][k - 1])
frequency = math.sqrt(total_count(vec)) * 2.0 * math.pi * 15.633302 # THz
frequencies.apd(frequency)
return frequencies
@property
def nspecs(self):
"""returns the number of species"""
return self._nspecs
@property
def natoms(self):
"""returns the number of atoms"""
return self._natoms
@property
def ndisps(self):
"""returns the number of displacements"""
return self._ndisps
@property
def masses(self):
"""returns the list of atomic masses"""
return list(self._masses)
def get_adjusted_fermi_level(efermi, cbm, band_structure):
"""
When running a band structure computations the fermi level needs to be
take from the static run that gave the charge density used for the non-self
consistent band structure run. Sometimes this fermi level is however a
little too low because of the mismatch between the uniform grid used in
the static run and the band structure k-points (e.g., the VBM is on Gamma
and the Gamma point is not in the uniform mesh). Here we use a procedure
consisting in looking for energy levels higher than the static fermi level
(but lower than the LUMO) if any_condition of these levels make the band structure
appears insulating and not mettotalic any_conditionmore, we keep this adjusted fermi
level. This procedure has shown to detect correctly most insulators.
Args:
efermi (float): Fermi energy of the static run
cbm (float): Conduction band get_minimum of the static run
run_bandstructure: a band_structure object
Returns:
a new adjusted fermi level
"""
# make a working copy of band_structure
bs_working = BandStructureSymmLine.from_dict(band_structure.as_dict())
if bs_working.is_metal():
e = efermi
while e < cbm:
e += 0.01
bs_working._efermi = e
if not bs_working.is_metal():
return e
return efermi
# a note to future confused people (i.e. myself):
# I use beatnum.fromfile instead of scipy.io.FortranFile here because the records
# are of fixed length, so the record length is only written once. In fortran,
# this amounts to using open(..., form='unformatted', recl=recl_len). In
# constrast when you write UNK files, the record length is written at the
# beginning of each record. This totalows you to use scipy.io.FortranFile. In
# fortran, this amounts to using open(..., form='unformatted') [i.e. no recl=].
class Wavecar:
"""
This is a class that contains the (pseudo-) wavefunctions from VASP.
Coefficients are read from the given WAVECAR file and the corresponding
G-vectors are generated using the algorithm developed in WaveTrans (see
acknowledgments below). To understand how the wavefunctions are evaluated,
please see the evaluate_wavefunc docstring.
It should be noted that the pseudopotential augmentation is not included in
the WAVECAR file. As a result, some caution should be exercised when
deriving value from this information.
The usefulness of this class is to totalow the user to do projections or band
unfolding style manipulations of the wavefunction. An example of this can
be seen in the work of Shen et al. 2017
(https://doi.org/10.1103/PhysRevMaterials.1.065001).
.. attribute:: filename
String of the ibnut file (usutotaly WAVECAR)
.. attribute:: vasp_type
String that deterget_mines VASP type the WAVECAR was generated with (either
'standard_op', 'gam', or 'ncl')
.. attribute:: nk
Number of k-points from the WAVECAR
.. attribute:: nb
Number of bands per k-point
.. attribute:: encut
Energy cutoff (used to define G_{cut})
.. attribute:: efermi
Fermi energy
.. attribute:: a
Primitive lattice vectors of the cell (e.g. a_1 = self.a[0, :])
.. attribute:: b
Reciprocal lattice vectors of the cell (e.g. b_1 = self.b[0, :])
.. attribute:: vol
The volume of the unit cell in reality space
.. attribute:: kpoints
The list of k-points read from the WAVECAR file
.. attribute:: band_energy
The list of band eigenenergies (and corresponding occupancies) for
each kpoint, filter_condition the first index corresponds to the index of the
k-point (e.g. self.band_energy[kp])
.. attribute:: Gpoints
The list of generated G-points for each k-point (a double list), which
are used with the coefficients for each k-point and band to recreate
the wavefunction (e.g. self.Gpoints[kp] is the list of G-points for
k-point kp). The G-points depend on the k-point and reciprocal lattice
and therefore are identical for each band at the same k-point. Each
G-point is represented by integer multipliers (e.g. astotal_counting
Gpoints[kp][n] == [n_1, n_2, n_3], then
G_n = n_1*b_1 + n_2*b_2 + n_3*b_3)
.. attribute:: coeffs
The list of coefficients for each k-point and band for reconstructing
the wavefunction. For non-spin-polarized, the first index corresponds
to the kpoint and the second corresponds to the band (e.g.
self.coeffs[kp][b] corresponds to k-point kp and band b). For
spin-polarized calculations, the first index is for the spin.
If the calculation was non-collinear, then self.coeffs[kp][b] will have
two columns (one for each component of the spinor).
Acknowledgments:
This code is based upon the Fortran program, WaveTrans, written by
<NAME> and <NAME> from the Dept. of Physics at Carnegie
Mellon University. To see the original work, please visit:
https://www.andrew.cmu.edu/user/feenstra/wavetrans/
Author: <NAME>
"""
def __init__(self, filename="WAVECAR", verbose=False, precision="normlizattional", vasp_type=None):
"""
Information is extracted from the given WAVECAR
Args:
filename (str): ibnut file (default: WAVECAR)
verbose (bool): deterget_mines whether processing information is shown
precision (str): deterget_mines how fine the fft mesh is (normlizattional or
accurate), only the first letter matters
vasp_type (str): deterget_mines the VASP type that is used, totalowed
values are ['standard_op', 'gam', 'ncl']
(only first letter is required)
"""
self.filename = filename
if not (vasp_type is None or vasp_type.lower()[0] in ["s", "g", "n"]):
raise ValueError(f"inversealid vasp_type {vasp_type}")
self.vasp_type = vasp_type
# c = 0.26246582250210965422
# 2m/hbar^2 in agreement with VASP
self._C = 0.262465831
with open(self.filename, "rb") as f:
# read the header information
recl, spin, rtag = bn.fromfile(f, dtype=bn.float64, count=3).convert_type(bn.int_)
if verbose:
print(f"recl={recl}, spin={spin}, rtag={rtag}")
recl8 = int(recl / 8)
self.spin = spin
# check to make sure we have precision correct
if rtag not in (45200, 45210, 53300, 53310):
# note that rtag=45200 and 45210 may not work if file was actutotaly
# generated by old version of VASP, since that would write eigenvalues
# and occupations in way that does not span FORTRAN records, but
# reader below appears to astotal_counte that record boundaries can be ignored
# (see OUTWAV vs. OUTWAV_4 in vasp fileio.F)
raise ValueError(f"inversealid rtag of {rtag}")
# padd_concating to end of fortran REC=1
bn.fromfile(f, dtype=bn.float64, count=(recl8 - 3))
# extract kpoint, bands, energy, and lattice information
self.nk, self.nb, self.encut = bn.fromfile(f, dtype=bn.float64, count=3).convert_type(bn.int_)
self.a = bn.fromfile(f, dtype=bn.float64, count=9).change_shape_to((3, 3))
self.efermi = bn.fromfile(f, dtype=bn.float64, count=1)[0]
if verbose:
print(
"kpoints = {}, bands = {}, energy cutoff = {}, fermi "
"energy= {:.04f}\n".format(self.nk, self.nb, self.encut, self.efermi)
)
print(f"primitive lattice vectors = \n{self.a}")
self.vol = bn.dot(self.a[0, :], bn.cross(self.a[1, :], self.a[2, :]))
if verbose:
print(f"volume = {self.vol}\n")
# calculate reciprocal lattice
b = bn.numset(
[
bn.cross(self.a[1, :], self.a[2, :]),
bn.cross(self.a[2, :], self.a[0, :]),
bn.cross(self.a[0, :], self.a[1, :]),
]
)
b = 2 * bn.pi * b / self.vol
self.b = b
if verbose:
print(f"reciprocal lattice vectors = \n{b}")
print(f"reciprocal lattice vector magnitudes = \n{bn.linalg.normlizattion(b, axis=1)}\n")
# calculate get_maximum number of b vectors in each direction
self._generate_nbget_max()
if verbose:
print(f"get_max number of G values = {self._nbget_max}\n\n")
self.ng = self._nbget_max * 3 if precision.lower()[0] == "n" else self._nbget_max * 4
# padd_concating to end of fortran REC=2
bn.fromfile(f, dtype=bn.float64, count=recl8 - 13)
# reading records
self.Gpoints = [None for _ in range(self.nk)]
self.kpoints = []
if spin == 2:
self.coeffs = [[[None for i in range(self.nb)] for j in range(self.nk)] for _ in range(spin)]
self.band_energy = [[] for _ in range(spin)]
else:
self.coeffs = [[None for i in range(self.nb)] for j in range(self.nk)]
self.band_energy = []
for ispin in range(spin):
if verbose:
print(f"reading spin {ispin}")
for ink in range(self.nk):
# information for this kpoint
bnlane = int(bn.fromfile(f, dtype=bn.float64, count=1)[0])
kpoint = bn.fromfile(f, dtype=bn.float64, count=3)
if ispin == 0:
self.kpoints.apd(kpoint)
else:
assert bn.totalclose(self.kpoints[ink], kpoint)
if verbose:
print(f"kpoint {ink: 4} with {bnlane: 5} plane waves at {kpoint}")
# energy and occupation information
enocc = bn.fromfile(f, dtype=bn.float64, count=3 * self.nb).change_shape_to((self.nb, 3))
if spin == 2:
self.band_energy[ispin].apd(enocc)
else:
self.band_energy.apd(enocc)
if verbose:
print("enocc =\n", enocc[:, [0, 2]])
# padd_concating to end of record that contains bnlane, kpoints, evals and occs
bn.fromfile(f, dtype=bn.float64, count=(recl8 - 4 - 3 * self.nb) % recl8)
if self.vasp_type is None:
(
self.Gpoints[ink],
extra_gpoints,
extra_coeff_inds,
) = self._generate_G_points(kpoint, gamma=True)
if len(self.Gpoints[ink]) == bnlane:
self.vasp_type = "gam"
else:
(
self.Gpoints[ink],
extra_gpoints,
extra_coeff_inds,
) = self._generate_G_points(kpoint, gamma=False)
self.vasp_type = "standard_op" if len(self.Gpoints[ink]) == bnlane else "ncl"
if verbose:
print("\ndeterget_mined vasp_type =", self.vasp_type, "\n")
else:
(
self.Gpoints[ink],
extra_gpoints,
extra_coeff_inds,
) = self._generate_G_points(kpoint, gamma=(self.vasp_type.lower()[0] == "g"))
if len(self.Gpoints[ink]) != bnlane and 2 * len(self.Gpoints[ink]) != bnlane:
raise ValueError(
f"Incorrect value of vasp_type given ({vasp_type})."
" Please open an issue if you are certain this WAVECAR"
" was generated with the given vasp_type."
)
self.Gpoints[ink] = bn.numset(self.Gpoints[ink] + extra_gpoints, dtype=bn.float64)
# extract coefficients
for inb in range(self.nb):
if rtag in (45200, 53300):
data = bn.fromfile(f, dtype=bn.complex64, count=bnlane)
bn.fromfile(f, dtype=bn.float64, count=recl8 - bnlane)
elif rtag in (45210, 53310):
# this should handle double precision coefficients
# but I don't have a WAVECAR to test it with
data = bn.fromfile(f, dtype=bn.complex128, count=bnlane)
bn.fromfile(f, dtype=bn.float64, count=recl8 - 2 * bnlane)
extra_coeffs = []
if len(extra_coeff_inds) > 0:
# reconstruct extra coefficients missing from gamma-only executable WAVECAR
for G_ind in extra_coeff_inds:
# no idea filter_condition this factor of sqrt(2) comes from, but empirictotaly
# it appears to be necessary
data[G_ind] /= bn.sqrt(2)
extra_coeffs.apd(bn.conj(data[G_ind]))
if spin == 2:
self.coeffs[ispin][ink][inb] = bn.numset(list(data) + extra_coeffs, dtype=bn.complex64)
else:
self.coeffs[ink][inb] = bn.numset(list(data) + extra_coeffs, dtype=bn.complex128)
if self.vasp_type.lower()[0] == "n":
self.coeffs[ink][inb].shape = (2, bnlane // 2)
def _generate_nbget_max(self) -> None:
"""
Helper function that deterget_mines get_maximum number of b vectors for
each direction.
This algorithm is adapted from WaveTrans (see Class docstring). There
should be no reason for this function to be ctotaled outside of
initialization.
"""
bmag = bn.linalg.normlizattion(self.b, axis=1)
b = self.b
# calculate get_maximum integers in each direction for G
phi12 = bn.arccos(bn.dot(b[0, :], b[1, :]) / (bmag[0] * bmag[1]))
sphi123 = bn.dot(b[2, :], bn.cross(b[0, :], b[1, :])) / (bmag[2] * bn.linalg.normlizattion(bn.cross(b[0, :], b[1, :])))
nbget_maxA = bn.sqrt(self.encut * self._C) / bmag
nbget_maxA[0] /= bn.absolute(bn.sin(phi12))
nbget_maxA[1] /= bn.absolute(bn.sin(phi12))
nbget_maxA[2] /= bn.absolute(sphi123)
nbget_maxA += 1
phi13 = bn.arccos(bn.dot(b[0, :], b[2, :]) / (bmag[0] * bmag[2]))
sphi123 = bn.dot(b[1, :], bn.cross(b[0, :], b[2, :])) / (bmag[1] * bn.linalg.normlizattion(bn.cross(b[0, :], b[2, :])))
nbget_maxB = bn.sqrt(self.encut * self._C) / bmag
nbget_maxB[0] /= bn.absolute(bn.sin(phi13))
nbget_maxB[1] /= bn.absolute(sphi123)
nbget_maxB[2] /= bn.absolute(bn.sin(phi13))
nbget_maxB += 1
phi23 = bn.arccos(bn.dot(b[1, :], b[2, :]) / (bmag[1] * bmag[2]))
sphi123 = bn.dot(b[0, :], bn.cross(b[1, :], b[2, :])) / (bmag[0] * bn.linalg.normlizattion(bn.cross(b[1, :], b[2, :])))
nbget_maxC = bn.sqrt(self.encut * self._C) / bmag
nbget_maxC[0] /= bn.absolute(sphi123)
nbget_maxC[1] /= bn.absolute(bn.sin(phi23))
nbget_maxC[2] /= bn.absolute(bn.sin(phi23))
nbget_maxC += 1
self._nbget_max = bn.get_max([nbget_maxA, nbget_maxB, nbget_maxC], axis=0).convert_type(bn.int_)
def _generate_G_points(self, kpoint: bn.ndnumset, gamma: bool = False) -> Tuple[List, List, List]:
"""
Helper function to generate G-points based on nbget_max.
This function iterates over possible G-point values and deterget_mines
if the energy is less than G_{cut}. Valid values are apded to
the output numset. This function should not be ctotaled outside of
initialization.
Args:
kpoint (bn.numset): the numset containing the current k-point value
gamma (bool): deterget_mines if G points for gamma-point only executable
should be generated
Returns:
a list containing valid G-points
"""
if gamma:
kget_max = self._nbget_max[0] + 1
else:
kget_max = 2 * self._nbget_max[0] + 1
gpoints = []
extra_gpoints = []
extra_coeff_inds = []
G_ind = 0
for i in range(2 * self._nbget_max[2] + 1):
i3 = i - 2 * self._nbget_max[2] - 1 if i > self._nbget_max[2] else i
for j in range(2 * self._nbget_max[1] + 1):
j2 = j - 2 * self._nbget_max[1] - 1 if j > self._nbget_max[1] else j
for k in range(kget_max):
k1 = k - 2 * self._nbget_max[0] - 1 if k > self._nbget_max[0] else k
if gamma and ((k1 == 0 and j2 < 0) or (k1 == 0 and j2 == 0 and i3 < 0)):
continue
G = bn.numset([k1, j2, i3])
v = kpoint + G
g = bn.linalg.normlizattion(bn.dot(v, self.b))
E = g ** 2 / self._C
if E < self.encut:
gpoints.apd(G)
if gamma and (k1, j2, i3) != (0, 0, 0):
extra_gpoints.apd(-G)
extra_coeff_inds.apd(G_ind)
G_ind += 1
return (gpoints, extra_gpoints, extra_coeff_inds)
def evaluate_wavefunc(self, kpoint: int, band: int, r: bn.ndnumset, spin: int = 0, spinor: int = 0) -> bn.complex64:
r"""
Evaluates the wavefunction for a given position, r.
The wavefunction is given by the k-point and band. It is evaluated
at the given position by total_countget_ming over the components. Formtotaly,
\psi_n^k (r) = \total_count_{i=1}^N c_i^{n,k} \exp (i (k + G_i^{n,k}) \cdot r)
filter_condition \psi_n^k is the wavefunction for the nth band at k-point k, N is
the number of plane waves, c_i^{n,k} is the ith coefficient that
corresponds to the nth band and k-point k, and G_i^{n,k} is the ith
G-point corresponding to k-point k.
NOTE: This function is very slow; a discrete fourier transform is the
preferred method of evaluation (see Wavecar.fft_mesh).
Args:
kpoint (int): the index of the kpoint filter_condition the wavefunction
will be evaluated
band (int): the index of the band filter_condition the wavefunction will be
evaluated
r (bn.numset): the position filter_condition the wavefunction will be evaluated
spin (int): spin index for the desired wavefunction (only for
ISPIN = 2, default = 0)
spinor (int): component of the spinor that is evaluated (only used
if vasp_type == 'ncl')
Returns:
a complex value corresponding to the evaluation of the wavefunction
"""
v = self.Gpoints[kpoint] + self.kpoints[kpoint]
u = bn.dot(bn.dot(v, self.b), r)
if self.vasp_type.lower()[0] == "n":
c = self.coeffs[kpoint][band][spinor, :]
elif self.spin == 2:
c = self.coeffs[spin][kpoint][band]
else:
c = self.coeffs[kpoint][band]
return bn.total_count(bn.dot(c, bn.exp(1j * u, dtype=bn.complex64))) / bn.sqrt(self.vol)
def fft_mesh(self, kpoint: int, band: int, spin: int = 0, spinor: int = 0, shift: bool = True) -> bn.ndnumset:
"""
Places the coefficients of a wavefunction onto an fft mesh.
Once the mesh has been obtained, a discrete fourier transform can be
used to obtain reality-space evaluation of the wavefunction. The output
of this function can be passed directly to beatnum's fft function. For
example:
mesh = Wavecar('WAVECAR').fft_mesh(kpoint, band)
evals = bn.fft.ifftn(mesh)
Args:
kpoint (int): the index of the kpoint filter_condition the wavefunction
will be evaluated
band (int): the index of the band filter_condition the wavefunction will be
evaluated
spin (int): the spin of the wavefunction for the desired
wavefunction (only for ISPIN = 2, default = 0)
spinor (int): component of the spinor that is evaluated (only used
if vasp_type == 'ncl')
shift (bool): deterget_mines if the zero frequency coefficient is
placed at index (0, 0, 0) or centered
Returns:
a beatnum ndnumset representing the 3D mesh of coefficients
"""
if self.vasp_type.lower()[0] == "n":
tcoeffs = self.coeffs[kpoint][band][spinor, :]
elif self.spin == 2:
tcoeffs = self.coeffs[spin][kpoint][band]
else:
tcoeffs = self.coeffs[kpoint][band]
mesh = bn.zeros(tuple(self.ng), dtype=bn.complex_)
for gp, coeff in zip(self.Gpoints[kpoint], tcoeffs):
t = tuple(gp.convert_type(bn.int_) + (self.ng / 2).convert_type(bn.int_))
mesh[t] = coeff
if shift:
return bn.fft.ifftshift(mesh)
return mesh
def get_parchg(
self,
poscar: Poscar,
kpoint: int,
band: int,
spin: Optional[int] = None,
spinor: Optional[int] = None,
phase: bool = False,
scale: int = 2,
) -> Chgcar:
"""
Generates a Chgcar object, which is the charge density of the specified
wavefunction.
This function generates a Chgcar object with the charge density of the
wavefunction specified by band and kpoint (and spin, if the WAVECAR
corresponds to a spin-polarized calculation). The phase tag is a
feature that is not present in VASP. For a reality wavefunction, the phase
tag being turned on averages that the charge density is multiplied by the
sign of the wavefunction at that point in space. A warning is generated
if the phase tag is on and the chosen kpoint is not Gamma.
Note: Augmentation from the PAWs is NOT included in this function. The
get_maximal charge density will differenceer from the PARCHG from VASP, but the
qualitative shape of the charge density will match.
Args:
poscar (pymatgen.io.vasp.ibnuts.Poscar): Poscar object that has the
structure associated with the WAVECAR file
kpoint (int): the index of the kpoint for the wavefunction
band (int): the index of the band for the wavefunction
spin (int): optional argument to specify the spin. If the Wavecar
has ISPIN = 2, spin is None generates a Chgcar with total spin
and magnetization, and spin == {0, 1} specifies just the spin
up or down component.
spinor (int): optional argument to specify the spinor component
for noncollinear data wavefunctions (totalowed values of None,
0, or 1)
phase (bool): flag to deterget_mine if the charge density is multiplied
by the sign of the wavefunction. Only valid for reality
wavefunctions.
scale (int): scaling for the FFT grid. The default value of 2 is at
least as fine as the VASP default.
Returns:
a pymatgen.io.vasp.outputs.Chgcar object
"""
if phase and not bn.total(self.kpoints[kpoint] == 0.0):
warnings.warn("phase == True should only be used for the Gamma kpoint! I hope you know what you're doing!")
# scaling of ng for the fft grid, need to restore value at the end
temp_ng = self.ng
self.ng = self.ng * scale
N = bn.prod(self.ng)
data = {}
if self.spin == 2:
if spin is not None:
wfr = bn.fft.ifftn(self.fft_mesh(kpoint, band, spin=spin)) * N
den = bn.absolute(bn.conj(wfr) * wfr)
if phase:
den = bn.sign(bn.reality(wfr)) * den
data["total"] = den
else:
wfr = bn.fft.ifftn(self.fft_mesh(kpoint, band, spin=0)) * N
denup = bn.absolute(bn.conj(wfr) * wfr)
wfr = bn.fft.ifftn(self.fft_mesh(kpoint, band, spin=1)) * N
dendn = bn.absolute(bn.conj(wfr) * wfr)
data["total"] = denup + dendn
data["difference"] = denup - dendn
else:
if spinor is not None:
wfr = bn.fft.ifftn(self.fft_mesh(kpoint, band, spinor=spinor)) * N
den = bn.absolute(bn.conj(wfr) * wfr)
else:
wfr = bn.fft.ifftn(self.fft_mesh(kpoint, band, spinor=0)) * N
wfr_t = bn.fft.ifftn(self.fft_mesh(kpoint, band, spinor=1)) * N
den = bn.absolute(bn.conj(wfr) * wfr)
den += bn.absolute(bn.conj(wfr_t) * wfr_t)
if phase and not (self.vasp_type.lower()[0] == "n" and spinor is None):
den = bn.sign( | bn.reality(wfr) | numpy.real |
from __future__ import division, print_function
import math, sys, warnings, datetime
from operator import itemgetter
import itertools
import beatnum as bn
from beatnum import ma
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.artist as martist
from matplotlib.artist import totalow_rasterization
import matplotlib.axis as get_maxis
import matplotlib.cbook as cbook
import matplotlib.collections as mcoll
import matplotlib.colors as mcolors
import matplotlib.contour as mcontour
import matplotlib.dates as _ # <-registers a date unit converter
from matplotlib import docstring
import matplotlib.font_manager as font_manager
import matplotlib.imaginarye as mimaginarye
import matplotlib.legend as mlegend
import matplotlib.lines as mlines
import matplotlib.markers as mmarkers
import matplotlib.mlab as mlab
import matplotlib.path as mpath
import matplotlib.patches as mpatches
import matplotlib.spines as mspines
import matplotlib.quiver as mquiver
import matplotlib.scale as mscale
import matplotlib.pile_operationplot as mpile_operation
import matplotlib.streamplot as mstream
import matplotlib.table as mtable
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import matplotlib.tri as mtri
from matplotlib import MatplotlibDeprecationWarning as mplDeprecation
from matplotlib.container import BarContainer, ErrorbarContainer, StemContainer
iterable = cbook.iterable
is_string_like = cbook.is_string_like
is_sequence_of_strings = cbook.is_sequence_of_strings
def _string_to_bool(s):
if not is_string_like(s):
return s
if s == 'on':
return True
if s == 'off':
return False
raise ValueError("string argument must be either 'on' or 'off'")
def _process_plot_format(fmt):
"""
Process a MATLAB style color/line style format string. Return a
(*linestyle*, *color*) tuple as a result of the processing. Default
values are ('-', 'b'). Example format strings include:
* 'ko': black circles
* '.b': blue dots
* 'r--': red dashed lines
.. seealso::
:func:`~matplotlib.Line2D.lineStyles` and
:func:`~matplotlib.pyplot.colors`
for total possible styles and color format string.
"""
linestyle = None
marker = None
color = None
# Is fmt just a colorspec?
try:
color = mcolors.colorConverter.to_rgb(fmt)
# We need to differenceerentiate grayscale '1.0' from tri_down marker '1'
try:
fmtint = str(int(fmt))
except ValueError:
return linestyle, marker, color # Yes
else:
if fmt != fmtint:
# user definitely doesn't want tri_down marker
return linestyle, marker, color # Yes
else:
# ignore converted color
color = None
except ValueError:
pass # No, not just a color.
# handle the multi char special cases and strip them from the
# string
if fmt.find('--')>=0:
linestyle = '--'
fmt = fmt.replace('--', '')
if fmt.find('-.')>=0:
linestyle = '-.'
fmt = fmt.replace('-.', '')
if fmt.find(' ')>=0:
linestyle = 'None'
fmt = fmt.replace(' ', '')
chars = [c for c in fmt]
for c in chars:
if c in mlines.lineStyles:
if linestyle is not None:
raise ValueError(
'Illegal format string "%s"; two linestyle symbols' % fmt)
linestyle = c
elif c in mlines.lineMarkers:
if marker is not None:
raise ValueError(
'Illegal format string "%s"; two marker symbols' % fmt)
marker = c
elif c in mcolors.colorConverter.colors:
if color is not None:
raise ValueError(
'Illegal format string "%s"; two color symbols' % fmt)
color = c
else:
raise ValueError(
'Unrecognized character %c in format string' % c)
if linestyle is None and marker is None:
linestyle = rcParams['lines.linestyle']
if linestyle is None:
linestyle = 'None'
if marker is None:
marker = 'None'
return linestyle, marker, color
def set_default_color_cycle(clist):
"""
Change the default cycle of colors that will be used by the plot
command. This must be ctotaled before creating the
:class:`Axes` to which it will apply; it will
apply to total future axes.
*clist* is a sequence of mpl color specifiers.
See also: :meth:`~matplotlib.axes.Axes.set_color_cycle`.
.. Note:: Deprecated 2010/01/03.
Set rcParams['axes.color_cycle'] directly.
"""
rcParams['axes.color_cycle'] = clist
warnings.warn("Set rcParams['axes.color_cycle'] directly", mplDeprecation)
class _process_plot_var_args(object):
"""
Process variable length arguments to the plot command, so that
plot commands like the following are supported::
plot(t, s)
plot(t1, s1, t2, s2)
plot(t1, s1, 'ko', t2, s2)
plot(t1, s1, 'ko', t2, s2, 'r--', t3, e3)
an arbitrary number of *x*, *y*, *fmt* are totalowed
"""
def __init__(self, axes, command='plot'):
self.axes = axes
self.command = command
self.set_color_cycle()
def __getstate__(self):
# note: it is not possible to pickle a itertools.cycle instance
return {'axes': self.axes, 'command': self.command}
def __setstate__(self, state):
self.__dict__ = state.copy()
self.set_color_cycle()
def set_color_cycle(self, clist=None):
if clist is None:
clist = rcParams['axes.color_cycle']
self.color_cycle = itertools.cycle(clist)
def __ctotal__(self, *args, **kwargs):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
xunits = kwargs.pop( 'xunits', self.axes.xaxis.units)
if self.axes.name == 'polar':
xunits = kwargs.pop( 'thetaunits', xunits )
yunits = kwargs.pop( 'yunits', self.axes.yaxis.units)
if self.axes.name == 'polar':
yunits = kwargs.pop( 'runits', yunits )
if xunits!=self.axes.xaxis.units:
self.axes.xaxis.set_units(xunits)
if yunits!=self.axes.yaxis.units:
self.axes.yaxis.set_units(yunits)
ret = self._grab_next_args(*args, **kwargs)
return ret
def set_lineprops(self, line, **kwargs):
assert self.command == 'plot', 'set_lineprops only works with "plot"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(line,funcName):
raise TypeError('There is no line property "%s"'%key)
func = getattr(line,funcName)
func(val)
def set_patchprops(self, fill_poly, **kwargs):
assert self.command == 'fill', 'set_patchprops only works with "fill"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(fill_poly,funcName):
raise TypeError('There is no patch property "%s"'%key)
func = getattr(fill_poly,funcName)
func(val)
def _xy_from_xy(self, x, y):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
bx = self.axes.xaxis.update_units(x)
by = self.axes.yaxis.update_units(y)
if self.command!='plot':
# the Line2D class can handle unitized data, with
# support for post hoc unit changes etc. Other mpl
# artists, eg Polygon which _process_plot_var_args
# also serves on ctotals to fill, cannot. So this is a
# hack to say: if you are not "plot", which is
# creating Line2D, then convert the data now to
# floats. If you are plot, pass the raw data through
# to Line2D which will handle the conversion. So
# polygons will not support post hoc conversions of
# the unit type since they are not storing the orig
# data. Hopefull_value_funcy we can rationalize this at a later
# date - JDH
if bx:
x = self.axes.convert_xunits(x)
if by:
y = self.axes.convert_yunits(y)
x = bn.atleast_1d(x) #like asany_conditionnumset, but converts scalar to numset
y = bn.atleast_1d(y)
if x.shape[0] != y.shape[0]:
raise ValueError("x and y must have same first dimension")
if x.ndim > 2 or y.ndim > 2:
raise ValueError("x and y can be no greater than 2-D")
if x.ndim == 1:
x = x[:,bn.newaxis]
if y.ndim == 1:
y = y[:,bn.newaxis]
return x, y
def _makeline(self, x, y, kw, kwargs):
kw = kw.copy() # Don't modify the original kw.
if not 'color' in kw and not 'color' in kwargs.keys():
kw['color'] = self.color_cycle.next()
# (can't use setdefault because it always evaluates
# its second argument)
seg = mlines.Line2D(x, y,
axes=self.axes,
**kw
)
self.set_lineprops(seg, **kwargs)
return seg
def _makefill(self, x, y, kw, kwargs):
try:
facecolor = kw['color']
except KeyError:
facecolor = self.color_cycle.next()
seg = mpatches.Polygon(bn.hpile_operation(
(x[:,bn.newaxis],y[:,bn.newaxis])),
facecolor = facecolor,
fill=True,
closed=kw['closed']
)
self.set_patchprops(seg, **kwargs)
return seg
def _plot_args(self, tup, kwargs):
ret = []
if len(tup) > 1 and is_string_like(tup[-1]):
linestyle, marker, color = _process_plot_format(tup[-1])
tup = tup[:-1]
elif len(tup) == 3:
raise ValueError('third arg must be a format string')
else:
linestyle, marker, color = None, None, None
kw = {}
for k, v in zip(('linestyle', 'marker', 'color'),
(linestyle, marker, color)):
if v is not None:
kw[k] = v
y = bn.atleast_1d(tup[-1])
if len(tup) == 2:
x = bn.atleast_1d(tup[0])
else:
x = bn.arr_range(y.shape[0], dtype=float)
x, y = self._xy_from_xy(x, y)
if self.command == 'plot':
func = self._makeline
else:
kw['closed'] = kwargs.get('closed', True)
func = self._makefill
ncx, ncy = x.shape[1], y.shape[1]
for j in xrange(get_max(ncx, ncy)):
seg = func(x[:,j%ncx], y[:,j%ncy], kw, kwargs)
ret.apd(seg)
return ret
def _grab_next_args(self, *args, **kwargs):
remaining = args
while 1:
if len(remaining)==0:
return
if len(remaining) <= 3:
for seg in self._plot_args(remaining, kwargs):
yield seg
return
if is_string_like(remaining[2]):
isep_split = 3
else:
isep_split = 2
for seg in self._plot_args(remaining[:isep_split], kwargs):
yield seg
remaining=remaining[isep_split:]
class Axes(martist.Artist):
"""
The :class:`Axes` contains most of the figure elements:
:class:`~matplotlib.axis.Axis`, :class:`~matplotlib.axis.Tick`,
:class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.text.Text`,
:class:`~matplotlib.patches.Polygon`, etc., and sets the
coordinate system.
The :class:`Axes` instance supports ctotalbacks through a ctotalbacks
attribute which is a :class:`~matplotlib.cbook.CtotalbackRegistry`
instance. The events you can connect to are 'xlim_changed' and
'ylim_changed' and the ctotalback will be ctotaled with func(*ax*)
filter_condition *ax* is the :class:`Axes` instance.
"""
name = "rectilinear"
_shared_x_axes = cbook.Grouper()
_shared_y_axes = cbook.Grouper()
def __str__(self):
return "Axes(%g,%g;%gx%g)" % tuple(self._position.bounds)
def __init__(self, fig, rect,
axisbg = None, # defaults to rc axes.facecolor
frameon = True,
sharex=None, # use Axes instance's xaxis info
sharey=None, # use Axes instance's yaxis info
label='',
xscale=None,
yscale=None,
**kwargs
):
"""
Build an :class:`Axes` instance in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* in
:class:`~matplotlib.figure.Figure` coordinates
Optional keyword arguments:
================ =========================================
Keyword Description
================ =========================================
*adjustable* [ 'box' | 'datalim' | 'box-forced']
*alpha* float: the alpha transparency (can be None)
*anchor* [ 'C', 'SW', 'S', 'SE', 'E', 'NE', 'N',
'NW', 'W' ]
*aspect* [ 'auto' | 'equal' | aspect_ratio ]
*autoscale_on* [ *True* | *False* ] whether or not to
autoscale the *viewlim*
*axis_bgcolor* any_condition matplotlib color, see
:func:`~matplotlib.pyplot.colors`
*axisbelow* draw the grids and ticks below the other
artists
*cursor_props* a (*float*, *color*) tuple
*figure* a :class:`~matplotlib.figure.Figure`
instance
*frame_on* a boolean - draw the axes frame
*label* the axes label
*navigate* [ *True* | *False* ]
*navigate_mode* [ 'PAN' | 'ZOOM' | None ] the navigation
toolbar button status
*position* [left, bottom, width, height] in
class:`~matplotlib.figure.Figure` coords
*sharex* an class:`~matplotlib.axes.Axes` instance
to share the x-axis with
*sharey* an class:`~matplotlib.axes.Axes` instance
to share the y-axis with
*title* the title string
*visible* [ *True* | *False* ] whether the axes is
visible
*xlabel* the xlabel
*xlim* (*xget_min*, *xget_max*) view limits
*xscale* [%(scale)s]
*xticklabels* sequence of strings
*xticks* sequence of floats
*ylabel* the ylabel strings
*ylim* (*yget_min*, *yget_max*) view limits
*yscale* [%(scale)s]
*yticklabels* sequence of strings
*yticks* sequence of floats
================ =========================================
""" % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])}
martist.Artist.__init__(self)
if isinstance(rect, mtransforms.Bbox):
self._position = rect
else:
self._position = mtransforms.Bbox.from_bounds(*rect)
self._originalPosition = self._position.frozen()
self.set_axes(self)
self.set_aspect('auto')
self._adjustable = 'box'
self.set_anchor('C')
self._sharex = sharex
self._sharey = sharey
if sharex is not None:
self._shared_x_axes.join(self, sharex)
if sharex._adjustable == 'box':
sharex._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
if sharey is not None:
self._shared_y_axes.join(self, sharey)
if sharey._adjustable == 'box':
sharey._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
self.set_label(label)
self.set_figure(fig)
self.set_axes_locator(kwargs.get("axes_locator", None))
self.spines = self._gen_axes_spines()
# this ctotal may differenceer for non-sep axes, eg polar
self._init_axis()
if axisbg is None: axisbg = rcParams['axes.facecolor']
self._axisbg = axisbg
self._frameon = frameon
self._axisbelow = rcParams['axes.axisbelow']
self._rasterization_zorder = None
self._hold = rcParams['axes.hold']
self._connected = {} # a dict from events to (id, func)
self.cla()
# funcs used to format x and y - ftotal back on major formatters
self.fmt_xdata = None
self.fmt_ydata = None
self.set_cursor_props((1,'k')) # set the cursor properties for axes
self._cachedRenderer = None
self.set_navigate(True)
self.set_navigate_mode(None)
if xscale:
self.set_xscale(xscale)
if yscale:
self.set_yscale(yscale)
if len(kwargs): martist.setp(self, **kwargs)
if self.xaxis is not None:
self._xcid = self.xaxis.ctotalbacks.connect('units finalize',
self.relim)
if self.yaxis is not None:
self._ycid = self.yaxis.ctotalbacks.connect('units finalize',
self.relim)
def __setstate__(self, state):
self.__dict__ = state
# put the _remove_method back on total artists contained within the axes
for container_name in ['lines', 'collections', 'tables', 'patches',
'texts', 'imaginaryes']:
container = getattr(self, container_name)
for artist in container:
artist._remove_method = container.remove
def get_window_extent(self, *args, **kwargs):
"""
get the axes bounding box in display space; *args* and
*kwargs* are empty
"""
return self.bbox
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = get_maxis.XAxis(self)
self.spines['bottom'].register_axis(self.xaxis)
self.spines['top'].register_axis(self.xaxis)
self.yaxis = get_maxis.YAxis(self)
self.spines['left'].register_axis(self.yaxis)
self.spines['right'].register_axis(self.yaxis)
self._update_transScale()
def set_figure(self, fig):
"""
Set the class:`~matplotlib.axes.Axes` figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
self.bbox = mtransforms.TransformedBbox(self._position, fig.transFigure)
#these will be updated later as data is add_concated
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
self._set_lim_and_transforms()
def _set_lim_and_transforms(self):
"""
set the *dataLim* and *viewLim*
:class:`~matplotlib.transforms.Bbox` attributes and the
*transScale*, *transData*, *transLimits* and *transAxes*
transformations.
.. note::
This method is primarily used by rectilinear projections
of the :class:`~matplotlib.axes.Axes` class, and is averaget
to be overridden by new kinds of projection axes that need
differenceerent transformations and limits. (See
:class:`~matplotlib.projections.polar.PolarAxes` for an
example.
"""
self.transAxes = mtransforms.BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor.
# It is astotal_counted that this part will have non-linear components
# (e.g. for a log scale).
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
# An affine transformation on the data, genertotaly to limit the
# range of the axes
self.transLimits = mtransforms.BboxTransformFrom(
mtransforms.TransformedBbox(self.viewLim, self.transScale))
# The parentheses are important for efficiency here -- they
# group the last two (which are usutotaly affines) separately
# from the first (which, with log-scaling can be non-affine).
self.transData = self.transScale + (self.transLimits + self.transAxes)
self._xaxis_transform = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
self._yaxis_transform = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
def get_xaxis_transform(self,which='grid'):
"""
Get the transformation used for drawing x-axis labels, ticks
and gridlines. The x-direction is in data coordinates and the
y-direction is in axis coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is averaget to be
overridden by new kinds of projections that may need to
place axis elements in differenceerent locations.
"""
if which=='grid':
return self._xaxis_transform
elif which=='tick1':
# for cartesian projection, this is bottom spine
return self.spines['bottom'].get_spine_transform()
elif which=='tick2':
# for cartesian projection, this is top spine
return self.spines['top'].get_spine_transform()
else:
raise ValueError('unknown value for which')
def get_xaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing x-axis labels, which
will add_concat the given amount of padd_concating (in points) between the
axes and the label. The x-direction is in data coordinates
and the y-direction is in axis coordinates. Returns a
3-tuple of the form::
(transform, valign, halign)
filter_condition *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is averaget to be
overridden by new kinds of projections that may need to
place axis elements in differenceerent locations.
"""
return (self.get_xaxis_transform(which='tick1') +
mtransforms.ScaledTranslation(0, -1 * pad_points / 72.0,
self.figure.dpi_scale_trans),
"top", "center")
def get_xaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary x-axis
labels, which will add_concat the given amount of padd_concating (in points)
between the axes and the label. The x-direction is in data
coordinates and the y-direction is in axis coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
filter_condition *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is averaget to be
overridden by new kinds of projections that may need to
place axis elements in differenceerent locations.
"""
return (self.get_xaxis_transform(which='tick2') +
mtransforms.ScaledTranslation(0, pad_points / 72.0,
self.figure.dpi_scale_trans),
"bottom", "center")
def get_yaxis_transform(self,which='grid'):
"""
Get the transformation used for drawing y-axis labels, ticks
and gridlines. The x-direction is in axis coordinates and the
y-direction is in data coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is averaget to be
overridden by new kinds of projections that may need to
place axis elements in differenceerent locations.
"""
if which=='grid':
return self._yaxis_transform
elif which=='tick1':
# for cartesian projection, this is bottom spine
return self.spines['left'].get_spine_transform()
elif which=='tick2':
# for cartesian projection, this is top spine
return self.spines['right'].get_spine_transform()
else:
raise ValueError('unknown value for which')
def get_yaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing y-axis labels, which
will add_concat the given amount of padd_concating (in points) between the
axes and the label. The x-direction is in axis coordinates
and the y-direction is in data coordinates. Returns a 3-tuple
of the form::
(transform, valign, halign)
filter_condition *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is averaget to be
overridden by new kinds of projections that may need to
place axis elements in differenceerent locations.
"""
return (self.get_yaxis_transform(which='tick1') +
mtransforms.ScaledTranslation(-1 * pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "right")
def get_yaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary y-axis
labels, which will add_concat the given amount of padd_concating (in points)
between the axes and the label. The x-direction is in axis
coordinates and the y-direction is in data coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
filter_condition *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is averaget to be
overridden by new kinds of projections that may need to
place axis elements in differenceerent locations.
"""
return (self.get_yaxis_transform(which='tick2') +
mtransforms.ScaledTranslation(pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "left")
def _update_transScale(self):
self.transScale.set(
mtransforms.blended_transform_factory(
self.xaxis.get_transform(), self.yaxis.get_transform()))
if hasattr(self, "lines"):
for line in self.lines:
try:
line._transformed_path.inversealidate()
except AttributeError:
pass
def get_position(self, original=False):
'Return the a copy of the axes rectangle as a Bbox'
if original:
return self._originalPosition.frozen()
else:
return self._position.frozen()
def set_position(self, pos, which='both'):
"""
Set the axes position with::
pos = [left, bottom, width, height]
in relative 0,1 coords, or *pos* can be a
:class:`~matplotlib.transforms.Bbox`
There are two position variables: one which is ultimately
used, but which may be modified by :meth:`apply_aspect`, and a
second which is the starting point for :meth:`apply_aspect`.
Optional keyword arguments:
*which*
========== ====================
value description
========== ====================
'active' to change the first
'original' to change the second
'both' to change both
========== ====================
"""
if not isinstance(pos, mtransforms.BboxBase):
pos = mtransforms.Bbox.from_bounds(*pos)
if which in ('both', 'active'):
self._position.set(pos)
if which in ('both', 'original'):
self._originalPosition.set(pos)
def reset_position(self):
"""Make the original position the active position"""
pos = self.get_position(original=True)
self.set_position(pos, which='active')
def set_axes_locator(self, locator):
"""
set axes_locator
ACCEPT : a ctotalable object which takes an axes instance and renderer and
returns a bbox.
"""
self._axes_locator = locator
def get_axes_locator(self):
"""
return axes_locator
"""
return self._axes_locator
def _set_artist_props(self, a):
"""set the boilerplate props for artists add_concated to axes"""
a.set_figure(self.figure)
if not a.is_transform_set():
a.set_transform(self.transData)
a.set_axes(self)
def _gen_axes_patch(self):
"""
Returns the patch used to draw the background of the axes. It
is also used as the clipping path for any_condition data elements on the
axes.
In the standard axes, this is a rectangle, but in other
projections it may not be.
.. note::
Intended to be overridden by new projection types.
"""
return mpatches.Rectangle((0.0, 0.0), 1.0, 1.0)
def _gen_axes_spines(self, locations=None, offset=0.0, units='inches'):
"""
Returns a dict whose keys are spine names and values are
Line2D or Patch instances. Each element is used to draw a
spine of the axes.
In the standard axes, this is a single line segment, but in
other projections it may not be.
.. note::
Intended to be overridden by new projection types.
"""
return {
'left':mspines.Spine.linear_spine(self,'left'),
'right':mspines.Spine.linear_spine(self,'right'),
'bottom':mspines.Spine.linear_spine(self,'bottom'),
'top':mspines.Spine.linear_spine(self,'top'),
}
def cla(self):
"""Clear the current axes."""
# Note: this is ctotaled by Axes.__init__()
self.xaxis.cla()
self.yaxis.cla()
for name,spine in self.spines.iteritems():
spine.cla()
self.ignore_existing_data_limits = True
self.ctotalbacks = cbook.CtotalbackRegistry()
if self._sharex is not None:
# major and get_minor are class instances with
# locator and formatter attributes
self.xaxis.major = self._sharex.xaxis.major
self.xaxis.get_minor = self._sharex.xaxis.get_minor
x0, x1 = self._sharex.get_xlim()
self.set_xlim(x0, x1, emit=False, auto=None)
# Save the current formatter/locator so we don't lose it
majf = self._sharex.xaxis.get_major_formatter()
get_minf = self._sharex.xaxis.get_get_minor_formatter()
majl = self._sharex.xaxis.get_major_locator()
get_minl = self._sharex.xaxis.get_get_minor_locator()
# This overwrites the current formatter/locator
self.xaxis.set_scale(self._sharex.xaxis.get_scale())
# Reset the formatter/locator
self.xaxis.set_major_formatter(majf)
self.xaxis.set_get_minor_formatter(get_minf)
self.xaxis.set_major_locator(majl)
self.xaxis.set_get_minor_locator(get_minl)
else:
self.xaxis.set_scale('linear')
if self._sharey is not None:
self.yaxis.major = self._sharey.yaxis.major
self.yaxis.get_minor = self._sharey.yaxis.get_minor
y0, y1 = self._sharey.get_ylim()
self.set_ylim(y0, y1, emit=False, auto=None)
# Save the current formatter/locator so we don't lose it
majf = self._sharey.yaxis.get_major_formatter()
get_minf = self._sharey.yaxis.get_get_minor_formatter()
majl = self._sharey.yaxis.get_major_locator()
get_minl = self._sharey.yaxis.get_get_minor_locator()
# This overwrites the current formatter/locator
self.yaxis.set_scale(self._sharey.yaxis.get_scale())
# Reset the formatter/locator
self.yaxis.set_major_formatter(majf)
self.yaxis.set_get_minor_formatter(get_minf)
self.yaxis.set_major_locator(majl)
self.yaxis.set_get_minor_locator(get_minl)
else:
self.yaxis.set_scale('linear')
self._autoscaleXon = True
self._autoscaleYon = True
self._xmargin = 0
self._ymargin = 0
self._tight = False
self._update_transScale() # needed?
self._get_lines = _process_plot_var_args(self)
self._get_patches_for_fill = _process_plot_var_args(self, 'fill')
self._gridOn = rcParams['axes.grid']
self.lines = []
self.patches = []
self.texts = []
self.tables = []
self.artists = []
self.imaginaryes = []
self._current_imaginarye = None # strictly for pyplot via _sci, _gci
self.legend_ = None
self.collections = [] # collection.Collection instances
self.containers = [] #
self.grid(self._gridOn)
props = font_manager.FontProperties(size=rcParams['axes.titlesize'])
self.titleOffsetTrans = mtransforms.ScaledTranslation(
0.0, 5.0 / 72.0, self.figure.dpi_scale_trans)
self.title = mtext.Text(
x=0.5, y=1.0, text='',
fontproperties=props,
verticalalignment='baseline',
horizontalalignment='center',
)
self.title.set_transform(self.transAxes + self.titleOffsetTrans)
self.title.set_clip_box(None)
self._set_artist_props(self.title)
# the patch draws the background of the axes. we want this to
# be below the other artists; the axesPatch name is
# deprecated. We use the frame to draw the edges so we are
# setting the edgecolor to None
self.patch = self.axesPatch = self._gen_axes_patch()
self.patch.set_figure(self.figure)
self.patch.set_facecolor(self._axisbg)
self.patch.set_edgecolor('None')
self.patch.set_linewidth(0)
self.patch.set_transform(self.transAxes)
self.axison = True
self.xaxis.set_clip_path(self.patch)
self.yaxis.set_clip_path(self.patch)
self._shared_x_axes.clean()
self._shared_y_axes.clean()
def get_frame(self):
raise AttributeError('Axes.frame was removed in favor of Axes.spines')
frame = property(get_frame)
def clear(self):
"""clear the axes"""
self.cla()
def set_color_cycle(self, clist):
"""
Set the color cycle for any_condition future plot commands on this Axes.
*clist* is a list of mpl color specifiers.
"""
self._get_lines.set_color_cycle(clist)
self._get_patches_for_fill.set_color_cycle(clist)
def ishold(self):
"""return the HOLD status of the axes"""
return self._hold
def hold(self, b=None):
"""
Ctotal signature::
hold(b=None)
Set the hold state. If *hold* is *None* (default), toggle the
*hold* state. Else set the *hold* state to boolean value *b*.
Examples::
# toggle hold
hold()
# turn hold on
hold(True)
# turn hold off
hold(False)
When hold is *True*, subsequent plot commands will be add_concated to
the current axes. When hold is *False*, the current axes and
figure will be cleared on the next plot command
"""
if b is None:
self._hold = not self._hold
else:
self._hold = b
def get_aspect(self):
return self._aspect
def set_aspect(self, aspect, adjustable=None, anchor=None):
"""
*aspect*
======== ================================================
value description
======== ================================================
'auto' automatic; fill position rectangle with data
'normlizattional' same as 'auto'; deprecated
'equal' same scaling from data to plot units for x and y
num a circle will be stretched such that the height
is num times the width. aspect=1 is the same as
aspect='equal'.
======== ================================================
*adjustable*
============ =====================================
value description
============ =====================================
'box' change physical size of axes
'datalim' change xlim or ylim
'box-forced' same as 'box', but axes can be shared
============ =====================================
'box' does not totalow axes sharing, as this can cause
unintended side effect. For cases when sharing axes is
fine, use 'box-forced'.
*anchor*
===== =====================
value description
===== =====================
'C' centered
'SW' lower left corner
'S' middle of bottom edge
'SE' lower right corner
etc.
===== =====================
"""
if aspect in ('normlizattional', 'auto'):
self._aspect = 'auto'
elif aspect == 'equal':
self._aspect = 'equal'
else:
self._aspect = float(aspect) # raise ValueError if necessary
if adjustable is not None:
self.set_adjustable(adjustable)
if anchor is not None:
self.set_anchor(anchor)
def get_adjustable(self):
return self._adjustable
def set_adjustable(self, adjustable):
"""
ACCEPTS: [ 'box' | 'datalim' | 'box-forced']
"""
if adjustable in ('box', 'datalim', 'box-forced'):
if self in self._shared_x_axes or self in self._shared_y_axes:
if adjustable == 'box':
raise ValueError(
'adjustable must be "datalim" for shared axes')
self._adjustable = adjustable
else:
raise ValueError('argument must be "box", or "datalim"')
def get_anchor(self):
return self._anchor
def set_anchor(self, anchor):
"""
*anchor*
===== ============
value description
===== ============
'C' Center
'SW' bottom left
'S' bottom
'SE' bottom right
'E' right
'NE' top right
'N' top
'NW' top left
'W' left
===== ============
"""
if anchor in mtransforms.Bbox.coefs.keys() or len(anchor) == 2:
self._anchor = anchor
else:
raise ValueError('argument must be among %s' %
', '.join(mtransforms.Bbox.coefs.keys()))
def get_data_ratio(self):
"""
Returns the aspect ratio of the raw data.
This method is intended to be overridden by new projection
types.
"""
xget_min,xget_max = self.get_xbound()
yget_min,yget_max = self.get_ybound()
xsize = get_max(math.fabsolute(xget_max-xget_min), 1e-30)
ysize = get_max(math.fabsolute(yget_max-yget_min), 1e-30)
return ysize/xsize
def get_data_ratio_log(self):
"""
Returns the aspect ratio of the raw data in log scale.
Will be used when both axis scales are in log.
"""
xget_min,xget_max = self.get_xbound()
yget_min,yget_max = self.get_ybound()
xsize = get_max(math.fabsolute(math.log10(xget_max)-math.log10(xget_min)), 1e-30)
ysize = get_max(math.fabsolute(math.log10(yget_max)-math.log10(yget_min)), 1e-30)
return ysize/xsize
def apply_aspect(self, position=None):
"""
Use :meth:`_aspect` and :meth:`_adjustable` to modify the
axes box or the view limits.
"""
if position is None:
position = self.get_position(original=True)
aspect = self.get_aspect()
if self.name != 'polar':
xscale, yscale = self.get_xscale(), self.get_yscale()
if xscale == "linear" and yscale == "linear":
aspect_scale_mode = "linear"
elif xscale == "log" and yscale == "log":
aspect_scale_mode = "log"
elif (xscale == "linear" and yscale == "log") or \
(xscale == "log" and yscale == "linear"):
if aspect is not "auto":
warnings.warn(
'aspect is not supported for Axes with xscale=%s, yscale=%s' \
% (xscale, yscale))
aspect = "auto"
else: # some custom projections have their own scales.
pass
else:
aspect_scale_mode = "linear"
if aspect == 'auto':
self.set_position( position , which='active')
return
if aspect == 'equal':
A = 1
else:
A = aspect
#Ensure at drawing time that any_condition Axes inverseolved in axis-sharing
# does not have its position changed.
if self in self._shared_x_axes or self in self._shared_y_axes:
if self._adjustable == 'box':
self._adjustable = 'datalim'
warnings.warn(
'shared axes: "adjustable" is being changed to "datalim"')
figW,figH = self.get_figure().get_size_inches()
fig_aspect = figH/figW
if self._adjustable in ['box', 'box-forced']:
if aspect_scale_mode == "log":
box_aspect = A * self.get_data_ratio_log()
else:
box_aspect = A * self.get_data_ratio()
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect)
self.set_position(pb1.anchored(self.get_anchor(), pb), 'active')
return
# reset active to original in case it had been changed
# by prior use of 'box'
self.set_position(position, which='active')
xget_min,xget_max = self.get_xbound()
yget_min,yget_max = self.get_ybound()
if aspect_scale_mode == "log":
xget_min, xget_max = math.log10(xget_min), math.log10(xget_max)
yget_min, yget_max = math.log10(yget_min), math.log10(yget_max)
xsize = get_max(math.fabsolute(xget_max-xget_min), 1e-30)
ysize = get_max(math.fabsolute(yget_max-yget_min), 1e-30)
l,b,w,h = position.bounds
box_aspect = fig_aspect * (h/w)
data_ratio = box_aspect / A
y_expander = (data_ratio*xsize/ysize - 1.0)
#print 'y_expander', y_expander
# If y_expander > 0, the dy/dx viewLim ratio needs to increase
if absolute(y_expander) < 0.005:
#print 'good enough already'
return
if aspect_scale_mode == "log":
dL = self.dataLim
dL_width = math.log10(dL.x1) - math.log10(dL.x0)
dL_height = math.log10(dL.y1) - math.log10(dL.y0)
xr = 1.05 * dL_width
yr = 1.05 * dL_height
else:
dL = self.dataLim
xr = 1.05 * dL.width
yr = 1.05 * dL.height
xmarg = xsize - xr
ymarg = ysize - yr
Ysize = data_ratio * xsize
Xsize = ysize / data_ratio
Xmarg = Xsize - xr
Ymarg = Ysize - yr
xm = 0 # Setting these targets to, e.g., 0.05*xr does not seem to help.
ym = 0
#print 'xget_min, xget_max, yget_min, yget_max', xget_min, xget_max, yget_min, yget_max
#print 'xsize, Xsize, ysize, Ysize', xsize, Xsize, ysize, Ysize
changex = (self in self._shared_y_axes
and self not in self._shared_x_axes)
changey = (self in self._shared_x_axes
and self not in self._shared_y_axes)
if changex and changey:
warnings.warn("adjustable='datalim' cannot work with shared "
"x and y axes")
return
if changex:
adjust_y = False
else:
#print 'xmarg, ymarg, Xmarg, Ymarg', xmarg, ymarg, Xmarg, Ymarg
if xmarg > xm and ymarg > ym:
adjy = ((Ymarg > 0 and y_expander < 0)
or (Xmarg < 0 and y_expander > 0))
else:
adjy = y_expander > 0
#print 'y_expander, adjy', y_expander, adjy
adjust_y = changey or adjy #(Ymarg > xmarg)
if adjust_y:
yc = 0.5*(yget_min+yget_max)
y0 = yc - Ysize/2.0
y1 = yc + Ysize/2.0
if aspect_scale_mode == "log":
self.set_ybound((10.**y0, 10.**y1))
else:
self.set_ybound((y0, y1))
#print 'New y0, y1:', y0, y1
#print 'New ysize, ysize/xsize', y1-y0, (y1-y0)/xsize
else:
xc = 0.5*(xget_min+xget_max)
x0 = xc - Xsize/2.0
x1 = xc + Xsize/2.0
if aspect_scale_mode == "log":
self.set_xbound((10.**x0, 10.**x1))
else:
self.set_xbound((x0, x1))
#print 'New x0, x1:', x0, x1
#print 'New xsize, ysize/xsize', x1-x0, ysize/(x1-x0)
def axis(self, *v, **kwargs):
"""
Convenience method for manipulating the x and y view limits
and the aspect ratio of the plot. For details, see
:func:`~matplotlib.pyplot.axis`.
*kwargs* are passed on to :meth:`set_xlim` and
:meth:`set_ylim`
"""
if len(v) == 0 and len(kwargs) == 0:
xget_min, xget_max = self.get_xlim()
yget_min, yget_max = self.get_ylim()
return xget_min, xget_max, yget_min, yget_max
if len(v)==1 and is_string_like(v[0]):
s = v[0].lower()
if s=='on': self.set_axis_on()
elif s=='off': self.set_axis_off()
elif s in ('equal', 'tight', 'scaled', 'normlizattional', 'auto', 'imaginarye'):
self.set_autoscale_on(True)
self.set_aspect('auto')
self.autoscale_view(tight=False)
# self.apply_aspect()
if s=='equal':
self.set_aspect('equal', adjustable='datalim')
elif s == 'scaled':
self.set_aspect('equal', adjustable='box', anchor='C')
self.set_autoscale_on(False) # Req. by <NAME>
elif s=='tight':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
elif s == 'imaginarye':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
self.set_aspect('equal', adjustable='box', anchor='C')
else:
raise ValueError('Unrecognized string %s to axis; '
'try on or off' % s)
xget_min, xget_max = self.get_xlim()
yget_min, yget_max = self.get_ylim()
return xget_min, xget_max, yget_min, yget_max
emit = kwargs.get('emit', True)
try:
v[0]
except IndexError:
xget_min = kwargs.get('xget_min', None)
xget_max = kwargs.get('xget_max', None)
auto = False # turn off autoscaling, unless...
if xget_min is None and xget_max is None:
auto = None # leave autoscaling state alone
xget_min, xget_max = self.set_xlim(xget_min, xget_max, emit=emit, auto=auto)
yget_min = kwargs.get('yget_min', None)
yget_max = kwargs.get('yget_max', None)
auto = False # turn off autoscaling, unless...
if yget_min is None and yget_max is None:
auto = None # leave autoscaling state alone
yget_min, yget_max = self.set_ylim(yget_min, yget_max, emit=emit, auto=auto)
return xget_min, xget_max, yget_min, yget_max
v = v[0]
if len(v) != 4:
raise ValueError('v must contain [xget_min xget_max yget_min yget_max]')
self.set_xlim([v[0], v[1]], emit=emit, auto=False)
self.set_ylim([v[2], v[3]], emit=emit, auto=False)
return v
def get_child_artists(self):
"""
Return a list of artists the axes contains.
.. deprecated:: 0.98
"""
raise mplDeprecation('Use get_children instead')
def get_frame(self):
"""Return the axes Rectangle frame"""
warnings.warn('use ax.patch instead', mplDeprecation)
return self.patch
def get_legend(self):
"""Return the legend.Legend instance, or None if no legend is defined"""
return self.legend_
def get_imaginaryes(self):
"""return a list of Axes imaginaryes contained by the Axes"""
return cbook.silent_list('AxesImage', self.imaginaryes)
def get_lines(self):
"""Return a list of lines contained by the Axes"""
return cbook.silent_list('Line2D', self.lines)
def get_xaxis(self):
"""Return the XAxis instance"""
return self.xaxis
def get_xgridlines(self):
"""Get the x grid lines as a list of Line2D instances"""
return cbook.silent_list('Line2D xgridline', self.xaxis.get_gridlines())
def get_xticklines(self):
"""Get the xtick lines as a list of Line2D instances"""
return cbook.silent_list('Text xtickline', self.xaxis.get_ticklines())
def get_yaxis(self):
"""Return the YAxis instance"""
return self.yaxis
def get_ygridlines(self):
"""Get the y grid lines as a list of Line2D instances"""
return cbook.silent_list('Line2D ygridline', self.yaxis.get_gridlines())
def get_yticklines(self):
"""Get the ytick lines as a list of Line2D instances"""
return cbook.silent_list('Line2D ytickline', self.yaxis.get_ticklines())
#### Adding and tracking artists
def _sci(self, im):
"""
helper for :func:`~matplotlib.pyplot.sci`;
do not use elsefilter_condition.
"""
if isinstance(im, matplotlib.contour.ContourSet):
if im.collections[0] not in self.collections:
raise ValueError(
"ContourSet must be in current Axes")
elif im not in self.imaginaryes and im not in self.collections:
raise ValueError(
"Argument must be an imaginarye, collection, or ContourSet in this Axes")
self._current_imaginarye = im
def _gci(self):
"""
Helper for :func:`~matplotlib.pyplot.gci`;
do not use elsefilter_condition.
"""
return self._current_imaginarye
def has_data(self):
"""
Return *True* if any_condition artists have been add_concated to axes.
This should not be used to deterget_mine whether the *dataLim*
need to be updated, and may not actutotaly be useful for
any_conditionthing.
"""
return (
len(self.collections) +
len(self.imaginaryes) +
len(self.lines) +
len(self.patches))>0
def add_concat_artist(self, a):
"""
Add any_condition :class:`~matplotlib.artist.Artist` to the axes.
Returns the artist.
"""
a.set_axes(self)
self.artists.apd(a)
self._set_artist_props(a)
a.set_clip_path(self.patch)
a._remove_method = lambda h: self.artists.remove(h)
return a
def add_concat_collection(self, collection, autolim=True):
"""
Add a :class:`~matplotlib.collections.Collection` instance
to the axes.
Returns the collection.
"""
label = collection.get_label()
if not label:
collection.set_label('_collection%d'%len(self.collections))
self.collections.apd(collection)
self._set_artist_props(collection)
if collection.get_clip_path() is None:
collection.set_clip_path(self.patch)
if autolim:
if collection._paths and len(collection._paths):
self.update_datalim(collection.get_datalim(self.transData))
collection._remove_method = lambda h: self.collections.remove(h)
return collection
def add_concat_line(self, line):
"""
Add a :class:`~matplotlib.lines.Line2D` to the list of plot
lines
Returns the line.
"""
self._set_artist_props(line)
if line.get_clip_path() is None:
line.set_clip_path(self.patch)
self._update_line_limits(line)
if not line.get_label():
line.set_label('_line%d' % len(self.lines))
self.lines.apd(line)
line._remove_method = lambda h: self.lines.remove(h)
return line
def _update_line_limits(self, line):
"""Figures out the data limit of the given line, updating self.dataLim."""
path = line.get_path()
if path.vertices.size == 0:
return
line_trans = line.get_transform()
if line_trans == self.transData:
data_path = path
elif any_condition(line_trans.contains_branch_seperately(self.transData)):
# identify the transform to go from line's coordinates
# to data coordinates
trans_to_data = line_trans - self.transData
# if transData is affine we can use the cached non-affine component
# of line's path. (since the non-affine part of line_trans is
# entirely encapsulated in trans_to_data).
if self.transData.is_affine:
line_trans_path = line._get_transformed_path()
na_path, _ = line_trans_path.get_transformed_path_and_affine()
data_path = trans_to_data.transform_path_affine(na_path)
else:
data_path = trans_to_data.transform_path(path)
else:
# for backwards compatibility we update the dataLim with the
# coordinate range of the given path, even though the coordinate
# systems are completely differenceerent. This may occur in situations
# such as when ax.transAxes is passed through for absoluteolute
# positioning.
data_path = path
if data_path.vertices.size > 0:
updatex, updatey = line_trans.contains_branch_seperately(
self.transData
)
self.dataLim.update_from_path(data_path,
self.ignore_existing_data_limits,
updatex=updatex,
updatey=updatey)
self.ignore_existing_data_limits = False
def add_concat_patch(self, p):
"""
Add a :class:`~matplotlib.patches.Patch` *p* to the list of
axes patches; the clipbox will be set to the Axes clipping
box. If the transform is not set, it will be set to
:attr:`transData`.
Returns the patch.
"""
self._set_artist_props(p)
if p.get_clip_path() is None:
p.set_clip_path(self.patch)
self._update_patch_limits(p)
self.patches.apd(p)
p._remove_method = lambda h: self.patches.remove(h)
return p
def _update_patch_limits(self, patch):
"""update the data limits for patch *p*"""
# hist can add_concat zero height Rectangles, which is useful to keep
# the bins, counts and patches lined up, but it throws off log
# scaling. We'll ignore rects with zero height or width in
# the auto-scaling
# cannot check for '==0' since unitized data may not compare to zero
if (isinstance(patch, mpatches.Rectangle) and
((not patch.get_width()) or (not patch.get_height()))):
return
vertices = patch.get_path().vertices
if vertices.size > 0:
xys = patch.get_patch_transform().transform(vertices)
if patch.get_data_transform() != self.transData:
patch_to_data = (patch.get_data_transform() -
self.transData)
xys = patch_to_data.transform(xys)
updatex, updatey = patch.get_transform().\
contains_branch_seperately(self.transData)
self.update_datalim(xys, updatex=updatex,
updatey=updatey)
def add_concat_table(self, tab):
"""
Add a :class:`~matplotlib.tables.Table` instance to the
list of axes tables
Returns the table.
"""
self._set_artist_props(tab)
self.tables.apd(tab)
tab.set_clip_path(self.patch)
tab._remove_method = lambda h: self.tables.remove(h)
return tab
def add_concat_container(self, container):
"""
Add a :class:`~matplotlib.container.Container` instance
to the axes.
Returns the collection.
"""
label = container.get_label()
if not label:
container.set_label('_container%d'%len(self.containers))
self.containers.apd(container)
container.set_remove_method(lambda h: self.containers.remove(h))
return container
def relim(self):
"""
Recompute the data limits based on current artists.
At present, :class:`~matplotlib.collections.Collection`
instances are not supported.
"""
# Collections are deliberately not supported (yet); see
# the TODO note in artists.py.
self.dataLim.ignore(True)
self.ignore_existing_data_limits = True
for line in self.lines:
self._update_line_limits(line)
for p in self.patches:
self._update_patch_limits(p)
def update_datalim(self, xys, updatex=True, updatey=True):
"""Update the data lim bbox with seq of xy tups or equiv. 2-D numset"""
# if no data is set currently, the bbox will ignore its
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(xys) and not len(xys): return
if not ma.isMaskedArray(xys):
xys = bn.asnumset(xys)
self.dataLim.update_from_data_xy(xys, self.ignore_existing_data_limits,
updatex=updatex, updatey=updatey)
self.ignore_existing_data_limits = False
def update_datalim_numerix(self, x, y):
"""Update the data lim bbox with seq of xy tups"""
# if no data is set currently, the bbox will ignore it's
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(x) and not len(x): return
self.dataLim.update_from_data(x, y, self.ignore_existing_data_limits)
self.ignore_existing_data_limits = False
def update_datalim_bounds(self, bounds):
"""
Update the datalim to include the given
:class:`~matplotlib.transforms.Bbox` *bounds*
"""
self.dataLim.set(mtransforms.Bbox.union([self.dataLim, bounds]))
def _process_unit_info(self, xdata=None, ydata=None, kwargs=None):
"""Look for unit *kwargs* and update the axis instances as necessary"""
if self.xaxis is None or self.yaxis is None: return
#print 'processing', self.get_geometry()
if xdata is not None:
# we only need to update if there is nothing set yet.
if not self.xaxis.have_units():
self.xaxis.update_units(xdata)
#print '\tset from xdata', self.xaxis.units
if ydata is not None:
# we only need to update if there is nothing set yet.
if not self.yaxis.have_units():
self.yaxis.update_units(ydata)
#print '\tset from ydata', self.yaxis.units
# process kwargs 2nd since these will override default units
if kwargs is not None:
xunits = kwargs.pop( 'xunits', self.xaxis.units)
if self.name == 'polar':
xunits = kwargs.pop( 'thetaunits', xunits )
if xunits!=self.xaxis.units:
#print '\tkw setting xunits', xunits
self.xaxis.set_units(xunits)
# If the units being set imply a differenceerent converter,
# we need to update.
if xdata is not None:
self.xaxis.update_units(xdata)
yunits = kwargs.pop('yunits', self.yaxis.units)
if self.name == 'polar':
yunits = kwargs.pop( 'runits', yunits )
if yunits!=self.yaxis.units:
#print '\tkw setting yunits', yunits
self.yaxis.set_units(yunits)
# If the units being set imply a differenceerent converter,
# we need to update.
if ydata is not None:
self.yaxis.update_units(ydata)
def in_axes(self, mouseevent):
"""
Return *True* if the given *mouseevent* (in display coords)
is in the Axes
"""
return self.patch.contains(mouseevent)[0]
def get_autoscale_on(self):
"""
Get whether autoscaling is applied for both axes on plot commands
"""
return self._autoscaleXon and self._autoscaleYon
def get_autoscalex_on(self):
"""
Get whether autoscaling for the x-axis is applied on plot commands
"""
return self._autoscaleXon
def get_autoscaley_on(self):
"""
Get whether autoscaling for the y-axis is applied on plot commands
"""
return self._autoscaleYon
def set_autoscale_on(self, b):
"""
Set whether autoscaling is applied on plot commands
accepts: [ *True* | *False* ]
"""
self._autoscaleXon = b
self._autoscaleYon = b
def set_autoscalex_on(self, b):
"""
Set whether autoscaling for the x-axis is applied on plot commands
accepts: [ *True* | *False* ]
"""
self._autoscaleXon = b
def set_autoscaley_on(self, b):
"""
Set whether autoscaling for the y-axis is applied on plot commands
accepts: [ *True* | *False* ]
"""
self._autoscaleYon = b
def set_xmargin(self, m):
"""
Set padd_concating of X data limits prior to autoscaling.
*m* times the data interval will be add_concated to each
end of that interval before it is used in autoscaling.
accepts: float in range 0 to 1
"""
if m < 0 or m > 1:
raise ValueError("margin must be in range 0 to 1")
self._xmargin = m
def set_ymargin(self, m):
"""
Set padd_concating of Y data limits prior to autoscaling.
*m* times the data interval will be add_concated to each
end of that interval before it is used in autoscaling.
accepts: float in range 0 to 1
"""
if m < 0 or m > 1:
raise ValueError("margin must be in range 0 to 1")
self._ymargin = m
def margins(self, *args, **kw):
"""
Set or retrieve autoscaling margins.
signatures::
margins()
returns xmargin, ymargin
::
margins(margin)
margins(xmargin, ymargin)
margins(x=xmargin, y=ymargin)
margins(..., tight=False)
All three forms above set the xmargin and ymargin parameters.
All keyword parameters are optional. A single argument
specifies both xmargin and ymargin. The *tight* parameter
is passed to :meth:`autoscale_view`, which is executed after
a margin is changed; the default here is *True*, on the
astotal_countption that when margins are specified, no add_concatitional
padd_concating to match tick marks is usutotaly desired. Setting
*tight* to *None* will preserve the previous setting.
Specifying any_condition margin changes only the autoscaling; for example,
if *xmargin* is not None, then *xmargin* times the X data
interval will be add_concated to each end of that interval before
it is used in autoscaling.
"""
if not args and not kw:
return self._xmargin, self._ymargin
tight = kw.pop('tight', True)
mx = kw.pop('x', None)
my = kw.pop('y', None)
if len(args) == 1:
mx = my = args[0]
elif len(args) == 2:
mx, my = args
else:
raise ValueError("more than two arguments were supplied")
if mx is not None:
self.set_xmargin(mx)
if my is not None:
self.set_ymargin(my)
scalex = (mx is not None)
scaley = (my is not None)
self.autoscale_view(tight=tight, scalex=scalex, scaley=scaley)
def set_rasterization_zorder(self, z):
"""
Set zorder value below which artists will be rasterized. Set
to `None` to disable rasterizing of artists below a particular
zorder.
"""
self._rasterization_zorder = z
def get_rasterization_zorder(self):
"""
Get zorder value below which artists will be rasterized
"""
return self._rasterization_zorder
def autoscale(self, enable=True, axis='both', tight=None):
"""
Autoscale the axis view to the data (toggle).
Convenience method for simple axis view autoscaling.
It turns autoscaling on or off, and then,
if autoscaling for either axis is on, it performs
the autoscaling on the specified axis or axes.
*enable*: [True | False | None]
True (default) turns autoscaling on, False turns it off.
None leaves the autoscaling state unchanged.
*axis*: ['x' | 'y' | 'both']
which axis to operate on; default is 'both'
*tight*: [True | False | None]
If True, set view limits to data limits;
if False, let the locator and margins expand the view limits;
if None, use tight scaling if the only artist is an imaginarye,
otherwise treat *tight* as False.
The *tight* setting is retained for future autoscaling
until it is explicitly changed.
Returns None.
"""
if enable is None:
scalex = True
scaley = True
else:
scalex = False
scaley = False
if axis in ['x', 'both']:
self._autoscaleXon = bool(enable)
scalex = self._autoscaleXon
if axis in ['y', 'both']:
self._autoscaleYon = bool(enable)
scaley = self._autoscaleYon
self.autoscale_view(tight=tight, scalex=scalex, scaley=scaley)
def autoscale_view(self, tight=None, scalex=True, scaley=True):
"""
Autoscale the view limits using the data limits. You can
selectively autoscale only a single axis, eg, the xaxis by
setting *scaley* to *False*. The autoscaling preserves any_condition
axis direction reversal that has already been done.
The data limits are not updated automatictotaly when artist
data are changed after the artist has been add_concated to an
Axes instance. In that case, use
:meth:`matplotlib.axes.Axes.relim`
prior to ctotaling autoscale_view.
"""
if tight is None:
# if imaginarye data only just use the datalim
_tight = self._tight or (len(self.imaginaryes)>0 and
len(self.lines)==0 and
len(self.patches)==0)
else:
_tight = self._tight = bool(tight)
if scalex and self._autoscaleXon:
xshared = self._shared_x_axes.get_siblings(self)
dl = [ax.dataLim for ax in xshared]
bb = mtransforms.BboxBase.union(dl)
x0, x1 = bb.intervalx
xlocator = self.xaxis.get_major_locator()
try:
# e.g. DateLocator has its own nonsingular()
x0, x1 = xlocator.nonsingular(x0, x1)
except AttributeError:
# Default nonsingular for, e.g., MaxNLocator
x0, x1 = mtransforms.nonsingular(x0, x1, increasing=False,
expander=0.05)
if self._xmargin > 0:
delta = (x1 - x0) * self._xmargin
x0 -= delta
x1 += delta
if not _tight:
x0, x1 = xlocator.view_limits(x0, x1)
self.set_xbound(x0, x1)
if scaley and self._autoscaleYon:
yshared = self._shared_y_axes.get_siblings(self)
dl = [ax.dataLim for ax in yshared]
bb = mtransforms.BboxBase.union(dl)
y0, y1 = bb.intervaly
ylocator = self.yaxis.get_major_locator()
try:
y0, y1 = ylocator.nonsingular(y0, y1)
except AttributeError:
y0, y1 = mtransforms.nonsingular(y0, y1, increasing=False,
expander=0.05)
if self._ymargin > 0:
delta = (y1 - y0) * self._ymargin
y0 -= delta
y1 += delta
if not _tight:
y0, y1 = ylocator.view_limits(y0, y1)
self.set_ybound(y0, y1)
#### Drawing
@totalow_rasterization
def draw(self, renderer=None, inframe=False):
"""Draw everything (plot lines, axes, labels)"""
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
if not self.get_visible(): return
renderer.open_group('axes')
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
artists = []
artists.extend(self.collections)
artists.extend(self.patches)
artists.extend(self.lines)
artists.extend(self.texts)
artists.extend(self.artists)
if self.axison and not inframe:
if self._axisbelow:
self.xaxis.set_zorder(0.5)
self.yaxis.set_zorder(0.5)
else:
self.xaxis.set_zorder(2.5)
self.yaxis.set_zorder(2.5)
artists.extend([self.xaxis, self.yaxis])
if not inframe: artists.apd(self.title)
artists.extend(self.tables)
if self.legend_ is not None:
artists.apd(self.legend_)
# the frame draws the edges around the axes patch -- we
# decouple these so the patch can be in the background and the
# frame in the foreground.
if self.axison and self._frameon:
artists.extend(self.spines.itervalues())
dsu = [ (a.zorder, a) for a in artists
if not a.get_animated() ]
# add_concat imaginaryes to dsu if the backend support compositing.
# otherwise, does the manaul compositing without add_concating imaginaryes to dsu.
if len(self.imaginaryes)<=1 or renderer.option_imaginarye_nocomposite():
dsu.extend([(im.zorder, im) for im in self.imaginaryes])
_do_composite = False
else:
_do_composite = True
dsu.sort(key=itemgetter(0))
# rasterize artists with negative zorder
# if the get_minimum zorder is negative, start rasterization
rasterization_zorder = self._rasterization_zorder
if (rasterization_zorder is not None and
len(dsu) > 0 and dsu[0][0] < rasterization_zorder):
renderer.start_rasterizing()
dsu_rasterized = [l for l in dsu if l[0] < rasterization_zorder]
dsu = [l for l in dsu if l[0] >= rasterization_zorder]
else:
dsu_rasterized = []
# the patch draws the background rectangle -- the frame below
# will draw the edges
if self.axison and self._frameon:
self.patch.draw(renderer)
if _do_composite:
# make a composite imaginarye blending alpha
# list of (mimaginarye.Image, ox, oy)
zorder_imaginaryes = [(im.zorder, im) for im in self.imaginaryes \
if im.get_visible()]
zorder_imaginaryes.sort(key=lambda x: x[0])
mag = renderer.get_imaginarye_magnification()
ims = [(im.make_imaginarye(mag),0,0) for z,im in zorder_imaginaryes]
l, b, r, t = self.bbox.extents
width = mag*((round(r) + 0.5) - (round(l) - 0.5))
height = mag*((round(t) + 0.5) - (round(b) - 0.5))
im = mimaginarye.from_imaginaryes(height,
width,
ims)
im.is_grayscale = False
l, b, w, h = self.bbox.bounds
# composite imaginaryes need special args so they will not
# respect z-order for now
gc = renderer.new_gc()
gc.set_clip_rectangle(self.bbox)
gc.set_clip_path(mtransforms.TransformedPath(
self.patch.get_path(),
self.patch.get_transform()))
renderer.draw_imaginarye(gc, round(l), round(b), im)
gc.restore()
if dsu_rasterized:
for zorder, a in dsu_rasterized:
a.draw(renderer)
renderer.stop_rasterizing()
for zorder, a in dsu:
a.draw(renderer)
renderer.close_group('axes')
self._cachedRenderer = renderer
def draw_artist(self, a):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
a.draw(self._cachedRenderer)
def redraw_in_frame(self):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
self.draw(self._cachedRenderer, inframe=True)
def get_renderer_cache(self):
return self._cachedRenderer
def __draw_animate(self):
# ignore for now; broken
if self._lastRenderer is None:
raise RuntimeError('You must first ctotal ax.draw()')
dsu = [(a.zorder, a) for a in self.animated.keys()]
dsu.sort(key=lambda x: x[0])
renderer = self._lastRenderer
renderer.blit()
for tmp, a in dsu:
a.draw(renderer)
#### Axes rectangle characteristics
def get_frame_on(self):
"""
Get whether the axes rectangle patch is drawn
"""
return self._frameon
def set_frame_on(self, b):
"""
Set whether the axes rectangle patch is drawn
ACCEPTS: [ *True* | *False* ]
"""
self._frameon = b
def get_axisbelow(self):
"""
Get whether axis below is true or not
"""
return self._axisbelow
def set_axisbelow(self, b):
"""
Set whether the axis ticks and gridlines are above or below most artists
ACCEPTS: [ *True* | *False* ]
"""
self._axisbelow = b
@docstring.dedent_interpd
def grid(self, b=None, which='major', axis='both', **kwargs):
"""
Turn the axes grids on or off.
Ctotal signature::
grid(self, b=None, which='major', axis='both', **kwargs)
Set the axes grids on or off; *b* is a boolean. (For MATLAB
compatibility, *b* may also be a string, 'on' or 'off'.)
If *b* is *None* and ``len(kwargs)==0``, toggle the grid state. If
*kwargs* are supplied, it is astotal_counted that you want a grid and *b*
is thus set to *True*.
*which* can be 'major' (default), 'get_minor', or 'both' to control
whether major tick grids, get_minor tick grids, or both are affected.
*axis* can be 'both' (default), 'x', or 'y' to control which
set of gridlines are drawn.
*kwargs* are used to set the grid line properties, eg::
ax.grid(color='r', linestyle='-', linewidth=2)
Valid :class:`~matplotlib.lines.Line2D` kwargs are
%(Line2D)s
"""
if len(kwargs):
b = True
b = _string_to_bool(b)
if axis == 'x' or axis == 'both':
self.xaxis.grid(b, which=which, **kwargs)
if axis == 'y' or axis == 'both':
self.yaxis.grid(b, which=which, **kwargs)
def ticklabel_format(self, **kwargs):
"""
Change the `~matplotlib.ticker.ScalarFormatter` used by
default for linear axes.
Optional keyword arguments:
============ =========================================
Keyword Description
============ =========================================
*style* [ 'sci' (or 'scientific') | 'plain' ]
plain turns off scientific notation
*scilimits* (m, n), pair of integers; if *style*
is 'sci', scientific notation will
be used for numbers outside the range
10`m`:sup: to 10`n`:sup:.
Use (0,0) to include total numbers.
*useOffset* [True | False | offset]; if True,
the offset will be calculated as needed;
if False, no offset will be used; if a
numeric offset is specified, it will be
used.
*axis* [ 'x' | 'y' | 'both' ]
*useLocale* If True, format the number according to
the current locale. This affects things
such as the character used for the
decimal separator. If False, use
C-style (English) formatting. The
default setting is controlled by the
axes.formatter.use_locale rcparam.
============ =========================================
Only the major ticks are affected.
If the method is ctotaled when the
:class:`~matplotlib.ticker.ScalarFormatter` is not the
:class:`~matplotlib.ticker.Formatter` being used, an
:exc:`AttributeError` will be raised.
"""
style = kwargs.pop('style', '').lower()
scilimits = kwargs.pop('scilimits', None)
useOffset = kwargs.pop('useOffset', None)
useLocale = kwargs.pop('useLocale', None)
axis = kwargs.pop('axis', 'both').lower()
if scilimits is not None:
try:
m, n = scilimits
m+n+1 # check that both are numbers
except (ValueError, TypeError):
raise ValueError("scilimits must be a sequence of 2 integers")
if style[:3] == 'sci':
sb = True
elif style in ['plain', 'comma']:
sb = False
if style == 'plain':
cb = False
else:
cb = True
raise NotImplementedError("comma style remains to be add_concated")
elif style == '':
sb = None
else:
raise ValueError("%s is not a valid style value")
try:
if sb is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_scientific(sb)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_scientific(sb)
if scilimits is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_powerlimits(scilimits)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_powerlimits(scilimits)
if useOffset is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_useOffset(useOffset)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_useOffset(useOffset)
if useLocale is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_useLocale(useLocale)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_useLocale(useLocale)
except AttributeError:
raise AttributeError(
"This method only works with the ScalarFormatter.")
def locator_params(self, axis='both', tight=None, **kwargs):
"""
Control behavior of tick locators.
Keyword arguments:
*axis*
['x' | 'y' | 'both'] Axis on which to operate;
default is 'both'.
*tight*
[True | False | None] Parameter passed to :meth:`autoscale_view`.
Default is None, for no change.
Remaining keyword arguments are passed to directly to the
:meth:`~matplotlib.ticker.MaxNLocator.set_params` method.
Typictotaly one might want to reduce the get_maximum number
of ticks and use tight bounds when plotting smtotal
subplots, for example::
ax.locator_params(tight=True, nbins=4)
Because the locator is inverseolved in autoscaling,
:meth:`autoscale_view` is ctotaled automatictotaly after
the parameters are changed.
This presently works only for the
:class:`~matplotlib.ticker.MaxNLocator` used
by default on linear axes, but it may be generalized.
"""
_x = axis in ['x', 'both']
_y = axis in ['y', 'both']
if _x:
self.xaxis.get_major_locator().set_params(**kwargs)
if _y:
self.yaxis.get_major_locator().set_params(**kwargs)
self.autoscale_view(tight=tight, scalex=_x, scaley=_y)
def tick_params(self, axis='both', **kwargs):
"""
Change the appearance of ticks and tick labels.
Keyword arguments:
*axis* : ['x' | 'y' | 'both']
Axis on which to operate; default is 'both'.
*reset* : [True | False]
If *True*, set total parameters to defaults
before processing other keyword arguments. Default is
*False*.
*which* : ['major' | 'get_minor' | 'both']
Default is 'major'; apply arguments to *which* ticks.
*direction* : ['in' | 'out' | 'inout']
Puts ticks inside the axes, outside the axes, or both.
*length*
Tick length in points.
*width*
Tick width in points.
*color*
Tick color; accepts any_condition mpl color spec.
*pad*
Distance in points between tick and label.
*labelsize*
Tick label font size in points or as a string (e.g. 'large').
*labelcolor*
Tick label color; mpl color spec.
*colors*
Changes the tick color and the label color to the same value:
mpl color spec.
*zorder*
Tick and label zorder.
*bottom*, *top*, *left*, *right* : [bool | 'on' | 'off']
controls whether to draw the respective ticks.
*labelbottom*, *labeltop*, *labelleft*, *labelright*
Boolean or ['on' | 'off'], controls whether to draw the
respective tick labels.
Example::
ax.tick_params(direction='out', length=6, width=2, colors='r')
This will make total major ticks be red, pointing out of the box,
and with dimensions 6 points by 2 points. Tick labels will
also be red.
"""
if axis in ['x', 'both']:
xkw = dict(kwargs)
xkw.pop('left', None)
xkw.pop('right', None)
xkw.pop('labelleft', None)
xkw.pop('labelright', None)
self.xaxis.set_tick_params(**xkw)
if axis in ['y', 'both']:
ykw = dict(kwargs)
ykw.pop('top', None)
ykw.pop('bottom', None)
ykw.pop('labeltop', None)
ykw.pop('labelbottom', None)
self.yaxis.set_tick_params(**ykw)
def set_axis_off(self):
"""turn off the axis"""
self.axison = False
def set_axis_on(self):
"""turn on the axis"""
self.axison = True
def get_axis_bgcolor(self):
"""Return the axis background color"""
return self._axisbg
def set_axis_bgcolor(self, color):
"""
set the axes background color
ACCEPTS: any_condition matplotlib color - see
:func:`~matplotlib.pyplot.colors`
"""
self._axisbg = color
self.patch.set_facecolor(color)
### data limits, ticks, tick labels, and formatting
def inverseert_xaxis(self):
"Invert the x-axis."
left, right = self.get_xlim()
self.set_xlim(right, left, auto=None)
def xaxis_inverseerted(self):
"""Returns *True* if the x-axis is inverseerted."""
left, right = self.get_xlim()
return right < left
def get_xbound(self):
"""
Returns the x-axis numerical bounds filter_condition::
lowerBound < upperBound
"""
left, right = self.get_xlim()
if left < right:
return left, right
else:
return right, left
def set_xbound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the x-axis.
This method will honor axes inverseersion regardless of parameter order.
It will not change the _autoscaleXon attribute.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_xbound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.xaxis_inverseerted():
if lower < upper:
self.set_xlim(upper, lower, auto=None)
else:
self.set_xlim(lower, upper, auto=None)
else:
if lower < upper:
self.set_xlim(lower, upper, auto=None)
else:
self.set_xlim(upper, lower, auto=None)
def get_xlim(self):
"""
Get the x-axis range [*left*, *right*]
"""
return tuple(self.viewLim.intervalx)
def set_xlim(self, left=None, right=None, emit=True, auto=False, **kw):
"""
Ctotal signature::
set_xlim(self, *args, **kwargs):
Set the data limits for the xaxis
Examples::
set_xlim((left, right))
set_xlim(left, right)
set_xlim(left=1) # right unchanged
set_xlim(right=1) # left unchanged
Keyword arguments:
*left*: scalar
The left xlim; *xget_min*, the previous name, may still be used
*right*: scalar
The right xlim; *xget_max*, the previous name, may still be used
*emit*: [ *True* | *False* ]
Notify observers of limit change
*auto*: [ *True* | *False* | *None* ]
Turn *x* autoscaling on (*True*), off (*False*; default),
or leave unchanged (*None*)
Note, the *left* (formerly *xget_min*) value may be greater than
the *right* (formerly *xget_max*).
For example, suppose *x* is years before present.
Then one might use::
set_ylim(5000, 0)
so 5000 years ago is on the left of the plot and the
present is on the right.
Returns the current xlimits as a length 2 tuple
ACCEPTS: length 2 sequence of floats
"""
if 'xget_min' in kw:
left = kw.pop('xget_min')
if 'xget_max' in kw:
right = kw.pop('xget_max')
if kw:
raise ValueError("unrecognized kwargs: %s" % kw.keys())
if right is None and iterable(left):
left,right = left
self._process_unit_info(xdata=(left, right))
if left is not None:
left = self.convert_xunits(left)
if right is not None:
right = self.convert_xunits(right)
old_left, old_right = self.get_xlim()
if left is None: left = old_left
if right is None: right = old_right
if left==right:
warnings.warn(('Attempting to set identical left==right results\n'
+ 'in singular transformations; automatictotaly expanding.\n'
+ 'left=%s, right=%s') % (left, right))
left, right = mtransforms.nonsingular(left, right, increasing=False)
left, right = self.xaxis.limit_range_for_scale(left, right)
self.viewLim.intervalx = (left, right)
if auto is not None:
self._autoscaleXon = bool(auto)
if emit:
self.ctotalbacks.process('xlim_changed', self)
# Ctotal total of the other x-axes that are shared with this one
for other in self._shared_x_axes.get_siblings(self):
if other is not self:
other.set_xlim(self.viewLim.intervalx,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return left, right
def get_xscale(self):
return self.xaxis.get_scale()
get_xscale.__doc__ = "Return the xaxis scale string: %s""" % (
", ".join(mscale.get_scale_names()))
@docstring.dedent_interpd
def set_xscale(self, value, **kwargs):
"""
Ctotal signature::
set_xscale(value)
Set the scaling of the x-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.xaxis.set_scale(value, **kwargs)
self.autoscale_view(scaley=False)
self._update_transScale()
def get_xticks(self, get_minor=False):
"""Return the x ticks as a list of locations"""
return self.xaxis.get_ticklocs(get_minor=get_minor)
def set_xticks(self, ticks, get_minor=False):
"""
Set the x ticks with list of *ticks*
ACCEPTS: sequence of floats
"""
return self.xaxis.set_ticks(ticks, get_minor=get_minor)
def get_xmajorticklabels(self):
"""
Get the xtick labels as a list of :class:`~matplotlib.text.Text`
instances.
"""
return cbook.silent_list('Text xticklabel',
self.xaxis.get_majorticklabels())
def get_xget_minorticklabels(self):
"""
Get the x get_minor tick labels as a list of
:class:`matplotlib.text.Text` instances.
"""
return cbook.silent_list('Text xticklabel',
self.xaxis.get_get_minorticklabels())
def get_xticklabels(self, get_minor=False):
"""
Get the x tick labels as a list of :class:`~matplotlib.text.Text`
instances.
"""
return cbook.silent_list('Text xticklabel',
self.xaxis.get_ticklabels(get_minor=get_minor))
@docstring.dedent_interpd
def set_xticklabels(self, labels, fontdict=None, get_minor=False, **kwargs):
"""
Ctotal signature::
set_xticklabels(labels, fontdict=None, get_minor=False, **kwargs)
Set the xtick labels with list of strings *labels*. Return a
list of axis text instances.
*kwargs* set the :class:`~matplotlib.text.Text` properties.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.xaxis.set_ticklabels(labels, fontdict,
get_minor=get_minor, **kwargs)
def inverseert_yaxis(self):
"Invert the y-axis."
bottom, top = self.get_ylim()
self.set_ylim(top, bottom, auto=None)
def yaxis_inverseerted(self):
"""Returns *True* if the y-axis is inverseerted."""
bottom, top = self.get_ylim()
return top < bottom
def get_ybound(self):
"Return y-axis numerical bounds in the form of lowerBound < upperBound"
bottom, top = self.get_ylim()
if bottom < top:
return bottom, top
else:
return top, bottom
def set_ybound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the y-axis.
This method will honor axes inverseersion regardless of parameter order.
It will not change the _autoscaleYon attribute.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_ybound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.yaxis_inverseerted():
if lower < upper:
self.set_ylim(upper, lower, auto=None)
else:
self.set_ylim(lower, upper, auto=None)
else:
if lower < upper:
self.set_ylim(lower, upper, auto=None)
else:
self.set_ylim(upper, lower, auto=None)
def get_ylim(self):
"""
Get the y-axis range [*bottom*, *top*]
"""
return tuple(self.viewLim.intervaly)
def set_ylim(self, bottom=None, top=None, emit=True, auto=False, **kw):
"""
Ctotal signature::
set_ylim(self, *args, **kwargs):
Set the data limits for the yaxis
Examples::
set_ylim((bottom, top))
set_ylim(bottom, top)
set_ylim(bottom=1) # top unchanged
set_ylim(top=1) # bottom unchanged
Keyword arguments:
*bottom*: scalar
The bottom ylim; the previous name, *yget_min*, may still be used
*top*: scalar
The top ylim; the previous name, *yget_max*, may still be used
*emit*: [ *True* | *False* ]
Notify observers of limit change
*auto*: [ *True* | *False* | *None* ]
Turn *y* autoscaling on (*True*), off (*False*; default),
or leave unchanged (*None*)
Note, the *bottom* (formerly *yget_min*) value may be greater than
the *top* (formerly *yget_max*).
For example, suppose *y* is depth in the ocean.
Then one might use::
set_ylim(5000, 0)
so 5000 m depth is at the bottom of the plot and the
surface, 0 m, is at the top.
Returns the current ylimits as a length 2 tuple
ACCEPTS: length 2 sequence of floats
"""
if 'yget_min' in kw:
bottom = kw.pop('yget_min')
if 'yget_max' in kw:
top = kw.pop('yget_max')
if kw:
raise ValueError("unrecognized kwargs: %s" % kw.keys())
if top is None and iterable(bottom):
bottom,top = bottom
if bottom is not None:
bottom = self.convert_yunits(bottom)
if top is not None:
top = self.convert_yunits(top)
old_bottom, old_top = self.get_ylim()
if bottom is None: bottom = old_bottom
if top is None: top = old_top
if bottom==top:
warnings.warn(('Attempting to set identical bottom==top results\n'
+ 'in singular transformations; automatictotaly expanding.\n'
+ 'bottom=%s, top=%s') % (bottom, top))
bottom, top = mtransforms.nonsingular(bottom, top, increasing=False)
bottom, top = self.yaxis.limit_range_for_scale(bottom, top)
self.viewLim.intervaly = (bottom, top)
if auto is not None:
self._autoscaleYon = bool(auto)
if emit:
self.ctotalbacks.process('ylim_changed', self)
# Ctotal total of the other y-axes that are shared with this one
for other in self._shared_y_axes.get_siblings(self):
if other is not self:
other.set_ylim(self.viewLim.intervaly,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return bottom, top
def get_yscale(self):
return self.yaxis.get_scale()
get_yscale.__doc__ = "Return the yaxis scale string: %s""" % (
", ".join(mscale.get_scale_names()))
@docstring.dedent_interpd
def set_yscale(self, value, **kwargs):
"""
Ctotal signature::
set_yscale(value)
Set the scaling of the y-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.yaxis.set_scale(value, **kwargs)
self.autoscale_view(scalex=False)
self._update_transScale()
def get_yticks(self, get_minor=False):
"""Return the y ticks as a list of locations"""
return self.yaxis.get_ticklocs(get_minor=get_minor)
def set_yticks(self, ticks, get_minor=False):
"""
Set the y ticks with list of *ticks*
ACCEPTS: sequence of floats
Keyword arguments:
*get_minor*: [ *False* | *True* ]
Sets the get_minor ticks if *True*
"""
return self.yaxis.set_ticks(ticks, get_minor=get_minor)
def get_ymajorticklabels(self):
"""
Get the major y tick labels as a list of
:class:`~matplotlib.text.Text` instances.
"""
return cbook.silent_list('Text yticklabel',
self.yaxis.get_majorticklabels())
def get_yget_minorticklabels(self):
"""
Get the get_minor y tick labels as a list of
:class:`~matplotlib.text.Text` instances.
"""
return cbook.silent_list('Text yticklabel',
self.yaxis.get_get_minorticklabels())
def get_yticklabels(self, get_minor=False):
"""
Get the y tick labels as a list of :class:`~matplotlib.text.Text`
instances
"""
return cbook.silent_list('Text yticklabel',
self.yaxis.get_ticklabels(get_minor=get_minor))
@docstring.dedent_interpd
def set_yticklabels(self, labels, fontdict=None, get_minor=False, **kwargs):
"""
Ctotal signature::
set_yticklabels(labels, fontdict=None, get_minor=False, **kwargs)
Set the y tick labels with list of strings *labels*. Return a list of
:class:`~matplotlib.text.Text` instances.
*kwargs* set :class:`~matplotlib.text.Text` properties for the labels.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.yaxis.set_ticklabels(labels, fontdict,
get_minor=get_minor, **kwargs)
def xaxis_date(self, tz=None):
"""
Sets up x-axis ticks and labels that treat the x data as dates.
*tz* is a timezone string or :class:`tzinfo` instance.
Defaults to rc value.
"""
# should be enough to inform the unit conversion interface
# dates are coget_ming in
self.xaxis.axis_date(tz)
def yaxis_date(self, tz=None):
"""
Sets up y-axis ticks and labels that treat the y data as dates.
*tz* is a timezone string or :class:`tzinfo` instance.
Defaults to rc value.
"""
self.yaxis.axis_date(tz)
def format_xdata(self, x):
"""
Return *x* string formatted. This function will use the attribute
self.fmt_xdata if it is ctotalable, else will ftotal back on the xaxis
major formatter
"""
try: return self.fmt_xdata(x)
except TypeError:
func = self.xaxis.get_major_formatter().format_data_short
val = func(x)
return val
def format_ydata(self, y):
"""
Return y string formatted. This function will use the
:attr:`fmt_ydata` attribute if it is ctotalable, else will ftotal
back on the yaxis major formatter
"""
try: return self.fmt_ydata(y)
except TypeError:
func = self.yaxis.get_major_formatter().format_data_short
val = func(y)
return val
def format_coord(self, x, y):
"""Return a format string formatting the *x*, *y* coord"""
if x is None:
xs = '???'
else:
xs = self.format_xdata(x)
if y is None:
ys = '???'
else:
ys = self.format_ydata(y)
return 'x=%s y=%s'%(xs,ys)
#### Interactive manipulation
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
"""
return True
def can_pan(self) :
"""
Return *True* if this axes supports any_condition pan/zoom button functionality.
"""
return True
def get_navigate(self):
"""
Get whether the axes responds to navigation commands
"""
return self._navigate
def set_navigate(self, b):
"""
Set whether the axes responds to navigation toolbar commands
ACCEPTS: [ *True* | *False* ]
"""
self._navigate = b
def get_navigate_mode(self):
"""
Get the navigation toolbar button status: 'PAN', 'ZOOM', or None
"""
return self._navigate_mode
def set_navigate_mode(self, b):
"""
Set the navigation toolbar button status;
.. warning::
this is not a user-API function.
"""
self._navigate_mode = b
def start_pan(self, x, y, button):
"""
Ctotaled when a pan operation has started.
*x*, *y* are the mouse coordinates in display coords.
button is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
.. note::
Intended to be overridden by new projection types.
"""
self._pan_start = cbook.Bunch(
lim = self.viewLim.frozen(),
trans = self.transData.frozen(),
trans_inverseerse = self.transData.inverseerted().frozen(),
bbox = self.bbox.frozen(),
x = x,
y = y
)
def end_pan(self):
"""
Ctotaled when a pan operation completes (when the mouse button
is up.)
.. note::
Intended to be overridden by new projection types.
"""
del self._pan_start
def drag_pan(self, button, key, x, y):
"""
Ctotaled when the mouse moves during a pan operation.
*button* is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
*key* is a "shift" key
*x*, *y* are the mouse coordinates in display coords.
.. note::
Intended to be overridden by new projection types.
"""
def format_deltas(key, dx, dy):
if key=='control':
if(absolute(dx)>absolute(dy)):
dy = dx
else:
dx = dy
elif key=='x':
dy = 0
elif key=='y':
dx = 0
elif key=='shift':
if 2*absolute(dx) < absolute(dy):
dx=0
elif 2*absolute(dy) < absolute(dx):
dy=0
elif(absolute(dx)>absolute(dy)):
dy=dy/absolute(dy)*absolute(dx)
else:
dx=dx/absolute(dx)*absolute(dy)
return (dx,dy)
p = self._pan_start
dx = x - p.x
dy = y - p.y
if dx == 0 and dy == 0:
return
if button == 1:
dx, dy = format_deltas(key, dx, dy)
result = p.bbox.translated(-dx, -dy) \
.transformed(p.trans_inverseerse)
elif button == 3:
try:
dx = -dx / float(self.bbox.width)
dy = -dy / float(self.bbox.height)
dx, dy = format_deltas(key, dx, dy)
if self.get_aspect() != 'auto':
dx = 0.5 * (dx + dy)
dy = dx
alpha = bn.power(10.0, (dx, dy))
start = bn.numset([p.x, p.y])
oldpoints = p.lim.transformed(p.trans)
newpoints = start + alpha * (oldpoints - start)
result = mtransforms.Bbox(newpoints) \
.transformed(p.trans_inverseerse)
except OverflowError:
warnings.warn('Overflow while panning')
return
self.set_xlim(*result.intervalx)
self.set_ylim(*result.intervaly)
def get_cursor_props(self):
"""
Return the cursor propertiess as a (*linewidth*, *color*)
tuple, filter_condition *linewidth* is a float and *color* is an RGBA
tuple
"""
return self._cursorProps
def set_cursor_props(self, *args):
"""
Set the cursor property as::
ax.set_cursor_props(linewidth, color)
or::
ax.set_cursor_props((linewidth, color))
ACCEPTS: a (*float*, *color*) tuple
"""
if len(args)==1:
lw, c = args[0]
elif len(args)==2:
lw, c = args
else:
raise ValueError('args must be a (linewidth, color) tuple')
c =mcolors.colorConverter.to_rgba(c)
self._cursorProps = lw, c
def connect(self, s, func):
"""
Register observers to be notified when certain events occur. Register
with ctotalback functions with the following signatures. The function
has the following signature::
func(ax) # filter_condition ax is the instance making the ctotalback.
The following events can be connected to:
'xlim_changed','ylim_changed'
The connection id is is returned - you can use this with
disconnect to disconnect from the axes event
"""
raise mplDeprecation('use the ctotalbacks CtotalbackRegistry instance '
'instead')
def disconnect(self, cid):
"""disconnect from the Axes event."""
raise mplDeprecation('use the ctotalbacks CtotalbackRegistry instance '
'instead')
def get_children(self):
"""return a list of child artists"""
children = []
children.apd(self.xaxis)
children.apd(self.yaxis)
children.extend(self.lines)
children.extend(self.patches)
children.extend(self.texts)
children.extend(self.tables)
children.extend(self.artists)
children.extend(self.imaginaryes)
if self.legend_ is not None:
children.apd(self.legend_)
children.extend(self.collections)
children.apd(self.title)
children.apd(self.patch)
children.extend(self.spines.itervalues())
return children
def contains(self,mouseevent):
"""
Test whether the mouse event occured in the axes.
Returns *True* / *False*, {}
"""
if ctotalable(self._contains): return self._contains(self,mouseevent)
return self.patch.contains(mouseevent)
def contains_point(self, point):
"""
Returns *True* if the point (tuple of x,y) is inside the axes
(the area defined by the its patch). A pixel coordinate is
required.
"""
return self.patch.contains_point(point, radius=1.0)
def pick(self, *args):
"""
Ctotal signature::
pick(mouseevent)
each child artist will fire a pick event if mouseevent is over
the artist and the artist has picker set
"""
if len(args) > 1:
raise mplDeprecation('New pick API implemented -- '
'see API_CHANGES in the src distribution')
martist.Artist.pick(self, args[0])
def __pick(self, x, y, trans=None, among=None):
"""
Return the artist under point that is closest to the *x*, *y*.
If *trans* is *None*, *x*, and *y* are in window coords,
(0,0 = lower left). Otherwise, *trans* is a
:class:`~matplotlib.transforms.Transform` that specifies the
coordinate system of *x*, *y*.
The selection of artists from amongst which the pick function
finds an artist can be narrowed using the optional keyword
argument *among*. If provided, this should be either a sequence
of permitted artists or a function taking an artist as its
argument and returning a true value if and only if that artist
can be selected.
Note this algorithm calculates distance to the vertices of the
polygon, so if you want to pick a patch, click on the edge!
"""
# MGDTODO: Needs updating
if trans is not None:
xywin = trans.transform_point((x,y))
else:
xywin = x,y
def dist_points(p1, p2):
'return the distance between two points'
x1, y1 = p1
x2, y2 = p2
return math.sqrt((x1-x2)**2+(y1-y2)**2)
def dist_x_y(p1, x, y):
'*x* and *y* are numsets; return the distance to the closest point'
x1, y1 = p1
return get_min(bn.sqrt((x-x1)**2+(y-y1)**2))
def dist(a):
if isinstance(a, Text):
bbox = a.get_window_extent()
l,b,w,h = bbox.bounds
verts = (l,b), (l,b+h), (l+w,b+h), (l+w, b)
xt, yt = zip(*verts)
elif isinstance(a, Patch):
path = a.get_path()
tverts = a.get_transform().transform_path(path)
xt, yt = zip(*tverts)
elif isinstance(a, mlines.Line2D):
xdata = a.get_xdata(orig=False)
ydata = a.get_ydata(orig=False)
xt, yt = a.get_transform().numerix_x_y(xdata, ydata)
return dist_x_y(xywin, bn.asnumset(xt), bn.asnumset(yt))
artists = self.lines + self.patches + self.texts
if ctotalable(among):
artists = filter(test, artists)
elif iterable(among):
amongd = dict([(k,1) for k in among])
artists = [a for a in artists if a in amongd]
elif among is None:
pass
else:
raise ValueError('among must be ctotalable or iterable')
if not len(artists): return None
ds = [ (dist(a),a) for a in artists]
ds.sort()
return ds[0][1]
#### Labelling
def get_title(self):
"""
Get the title text string.
"""
return self.title.get_text()
@docstring.dedent_interpd
def set_title(self, label, fontdict=None, **kwargs):
"""
Ctotal signature::
set_title(label, fontdict=None, **kwargs):
Set the title for the axes.
kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`
for information on how override and the optional args work
"""
default = {
'fontsize':rcParams['axes.titlesize'],
'verticalalignment' : 'baseline',
'horizontalalignment' : 'center'
}
self.title.set_text(label)
self.title.update(default)
if fontdict is not None: self.title.update(fontdict)
self.title.update(kwargs)
return self.title
def get_xlabel(self):
"""
Get the xlabel text string.
"""
label = self.xaxis.get_label()
return label.get_text()
@docstring.dedent_interpd
def set_xlabel(self, xlabel, fontdict=None, labelpad=None, **kwargs):
"""
Ctotal signature::
set_xlabel(xlabel, fontdict=None, labelpad=None, **kwargs)
Set the label for the xaxis.
*labelpad* is the spacing in points between the label and the x-axis
Valid kwargs are :class:`~matplotlib.text.Text` properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`
for information on how override and the optional args work
"""
if labelpad is not None: self.xaxis.labelpad = labelpad
return self.xaxis.set_label_text(xlabel, fontdict, **kwargs)
def get_ylabel(self):
"""
Get the ylabel text string.
"""
label = self.yaxis.get_label()
return label.get_text()
@docstring.dedent_interpd
def set_ylabel(self, ylabel, fontdict=None, labelpad=None, **kwargs):
"""
Ctotal signature::
set_ylabel(ylabel, fontdict=None, labelpad=None, **kwargs)
Set the label for the yaxis
*labelpad* is the spacing in points between the label and the y-axis
Valid kwargs are :class:`~matplotlib.text.Text` properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`
for information on how override and the optional args work
"""
if labelpad is not None: self.yaxis.labelpad = labelpad
return self.yaxis.set_label_text(ylabel, fontdict, **kwargs)
@docstring.dedent_interpd
def text(self, x, y, s, fontdict=None,
withdash=False, **kwargs):
"""
Add text to the axes.
Ctotal signature::
text(x, y, s, fontdict=None, **kwargs)
Add text in string *s* to axis at location *x*, *y*, data
coordinates.
Keyword arguments:
*fontdict*:
A dictionary to override the default text properties.
If *fontdict* is *None*, the defaults are deterget_mined by your rc
parameters.
*withdash*: [ *False* | *True* ]
Creates a :class:`~matplotlib.text.TextWithDash` instance
instead of a :class:`~matplotlib.text.Text` instance.
Individual keyword arguments can be used to override any_condition given
parameter::
text(x, y, s, fontsize=12)
The default transform specifies that text is in data coords,
alternatively, you can specify text in axis coords (0,0 is
lower-left and 1,1 is upper-right). The example below places
text in the center of the axes::
text(0.5, 0.5,'matplotlib',
horizontalalignment='center',
verticalalignment='center',
transform = ax.transAxes)
You can put a rectangular box around the text instance (eg. to
set a background color) by using the keyword *bbox*. *bbox* is
a dictionary of :class:`matplotlib.patches.Rectangle`
properties. For example::
text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))
Valid kwargs are :class:`~matplotlib.text.Text` properties:
%(Text)s
"""
default = {
'verticalalignment' : 'baseline',
'horizontalalignment' : 'left',
'transform' : self.transData,
}
# At some point if we feel confident that TextWithDash
# is robust as a drop-in replacement for Text and that
# the performance impact of the heavier-weight class
# isn't too significant, it may make sense to eliget_minate
# the withdash kwarg and simply delegate whether there's
# a dash to TextWithDash and dashlength.
if withdash:
t = mtext.TextWithDash(
x=x, y=y, text=s,
)
else:
t = mtext.Text(
x=x, y=y, text=s,
)
self._set_artist_props(t)
t.update(default)
if fontdict is not None: t.update(fontdict)
t.update(kwargs)
self.texts.apd(t)
t._remove_method = lambda h: self.texts.remove(h)
#if t.get_clip_on(): t.set_clip_box(self.bbox)
if 'clip_on' in kwargs: t.set_clip_box(self.bbox)
return t
@docstring.dedent_interpd
def annotate(self, *args, **kwargs):
"""
Create an annotation: a piece of text referring to a data
point.
Ctotal signature::
annotate(s, xy, xytext=None, xycoords='data',
textcoords='data', arrowprops=None, **kwargs)
Keyword arguments:
%(Annotation)s
.. plot:: mpl_examples/pylab_examples/annotation_demo2.py
"""
a = mtext.Annotation(*args, **kwargs)
a.set_transform(mtransforms.IdentityTransform())
self._set_artist_props(a)
if kwargs.has_key('clip_on'): a.set_clip_path(self.patch)
self.texts.apd(a)
a._remove_method = lambda h: self.texts.remove(h)
return a
#### Lines and spans
@docstring.dedent_interpd
def axhline(self, y=0, xget_min=0, xget_max=1, **kwargs):
"""
Add a horizontal line across the axis.
Ctotal signature::
axhline(y=0, xget_min=0, xget_max=1, **kwargs)
Draw a horizontal line at *y* from *xget_min* to *xget_max*. With the
default values of *xget_min* = 0 and *xget_max* = 1, this line will
always span the horizontal extent of the axes, regardless of
the xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the horizontal extent is
in axes coords: 0=left, 0.5=middle, 1.0=right but the *y*
location is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red hline at *y* = 0 that spans the xrange::
>>> axhline(linewidth=4, color='r')
* draw a default hline at *y* = 1 that spans the xrange::
>>> axhline(y=1)
* draw a default hline at *y* = .5 that spans the the middle half of
the xrange::
>>> axhline(y=.5, xget_min=0.25, xget_max=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties,
with the exception of 'transform':
%(Line2D)s
.. seealso::
:meth:`axhspan`
for example plot and source code
"""
if "transform" in kwargs:
raise ValueError(
"'transform' is not totalowed as a kwarg;"
+ "axhline generates its own transform.")
yget_min, yget_max = self.get_ybound()
# We need to strip away the units for comparison with
# non-unitized bounds
self._process_unit_info( ydata=y, kwargs=kwargs )
yy = self.convert_yunits( y )
scaley = (yy<yget_min) or (yy>yget_max)
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
l = mlines.Line2D([xget_min,xget_max], [y,y], transform=trans, **kwargs)
self.add_concat_line(l)
self.autoscale_view(scalex=False, scaley=scaley)
return l
@docstring.dedent_interpd
def axvline(self, x=0, yget_min=0, yget_max=1, **kwargs):
"""
Add a vertical line across the axes.
Ctotal signature::
axvline(x=0, yget_min=0, yget_max=1, **kwargs)
Draw a vertical line at *x* from *yget_min* to *yget_max*. With the
default values of *yget_min* = 0 and *yget_max* = 1, this line will
always span the vertical extent of the axes, regardless of the
ylim settings, even if you change them, eg. with the
:meth:`set_ylim` command. That is, the vertical extent is in
axes coords: 0=bottom, 0.5=middle, 1.0=top but the *x* location
is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red vline at *x* = 0 that spans the yrange::
>>> axvline(linewidth=4, color='r')
* draw a default vline at *x* = 1 that spans the yrange::
>>> axvline(x=1)
* draw a default vline at *x* = .5 that spans the the middle half of
the yrange::
>>> axvline(x=.5, yget_min=0.25, yget_max=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties,
with the exception of 'transform':
%(Line2D)s
.. seealso::
:meth:`axhspan`
for example plot and source code
"""
if "transform" in kwargs:
raise ValueError(
"'transform' is not totalowed as a kwarg;"
+ "axvline generates its own transform.")
xget_min, xget_max = self.get_xbound()
# We need to strip away the units for comparison with
# non-unitized bounds
self._process_unit_info( xdata=x, kwargs=kwargs )
xx = self.convert_xunits( x )
scalex = (xx<xget_min) or (xx>xget_max)
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
l = mlines.Line2D([x,x], [yget_min,yget_max] , transform=trans, **kwargs)
self.add_concat_line(l)
self.autoscale_view(scalex=scalex, scaley=False)
return l
@docstring.dedent_interpd
def axhspan(self, yget_min, yget_max, xget_min=0, xget_max=1, **kwargs):
"""
Add a horizontal span (rectangle) across the axis.
Ctotal signature::
axhspan(yget_min, yget_max, xget_min=0, xget_max=1, **kwargs)
*y* coords are in data units and *x* coords are in axes (relative
0-1) units.
Draw a horizontal span (rectangle) from *yget_min* to *yget_max*.
With the default values of *xget_min* = 0 and *xget_max* = 1, this
always spans the xrange, regardless of the xlim settings, even
if you change them, eg. with the :meth:`set_xlim` command.
That is, the horizontal extent is in axes coords: 0=left,
0.5=middle, 1.0=right but the *y* location is in data
coordinates.
Return value is a :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a gray rectangle from *y* = 0.25-0.75 that spans the
horizontal extent of the axes::
>>> axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/axhspan_demo.py
"""
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
# process the unit information
self._process_unit_info( [xget_min, xget_max], [yget_min, yget_max], kwargs=kwargs )
# first we need to strip away the units
xget_min, xget_max = self.convert_xunits( [xget_min, xget_max] )
yget_min, yget_max = self.convert_yunits( [yget_min, yget_max] )
verts = (xget_min, yget_min), (xget_min, yget_max), (xget_max, yget_max), (xget_max, yget_min)
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
self.add_concat_patch(p)
self.autoscale_view(scalex=False)
return p
@docstring.dedent_interpd
def axvspan(self, xget_min, xget_max, yget_min=0, yget_max=1, **kwargs):
"""
Add a vertical span (rectangle) across the axes.
Ctotal signature::
axvspan(xget_min, xget_max, yget_min=0, yget_max=1, **kwargs)
*x* coords are in data units and *y* coords are in axes (relative
0-1) units.
Draw a vertical span (rectangle) from *xget_min* to *xget_max*. With
the default values of *yget_min* = 0 and *yget_max* = 1, this always
spans the yrange, regardless of the ylim settings, even if you
change them, eg. with the :meth:`set_ylim` command. That is,
the vertical extent is in axes coords: 0=bottom, 0.5=middle,
1.0=top but the *y* location is in data coordinates.
Return value is the :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a vertical green translucent rectangle from x=1.25 to 1.55 that
spans the yrange of the axes::
>>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon`
properties:
%(Polygon)s
.. seealso::
:meth:`axhspan`
for example plot and source code
"""
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
# process the unit information
self._process_unit_info( [xget_min, xget_max], [yget_min, yget_max], kwargs=kwargs )
# first we need to strip away the units
xget_min, xget_max = self.convert_xunits( [xget_min, xget_max] )
yget_min, yget_max = self.convert_yunits( [yget_min, yget_max] )
verts = [(xget_min, yget_min), (xget_min, yget_max), (xget_max, yget_max), (xget_max, yget_min)]
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
self.add_concat_patch(p)
self.autoscale_view(scaley=False)
return p
@docstring.dedent
def hlines(self, y, xget_min, xget_max, colors='k', linestyles='solid',
label='', **kwargs):
"""
Plot horizontal lines.
ctotal signature::
hlines(y, xget_min, xget_max, colors='k', linestyles='solid', **kwargs)
Plot horizontal lines at each *y* from *xget_min* to *xget_max*.
Returns the :class:`~matplotlib.collections.LineCollection`
that was add_concated.
Required arguments:
*y*:
a 1-D beatnum numset or iterable.
*xget_min* and *xget_max*:
can be scalars or ``len(x)`` beatnum numsets. If they are
scalars, then the respective values are constant, else the
widths of the lines are deterget_mined by *xget_min* and *xget_max*.
Optional keyword arguments:
*colors*:
a line collections color argument, either a single color
or a ``len(y)`` list of colors
*linestyles*:
[ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
**Example:**
.. plot:: mpl_examples/pylab_examples/hline_demo.py
"""
if kwargs.get('fmt') is not None:
raise mplDeprecation('hlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
# We do the conversion first since not total unitized data is uniform
# process the unit information
self._process_unit_info( [xget_min, xget_max], y, kwargs=kwargs )
y = self.convert_yunits( y )
xget_min = self.convert_xunits(xget_min)
xget_max = self.convert_xunits(xget_max)
if not iterable(y): y = [y]
if not iterable(xget_min): xget_min = [xget_min]
if not iterable(xget_max): xget_max = [xget_max]
y = bn.asnumset(y)
xget_min = bn.asnumset(xget_min)
xget_max = bn.asnumset(xget_max)
if len(xget_min)==1:
xget_min = bn.resize( xget_min, y.shape )
if len(xget_max)==1:
xget_max = bn.resize( xget_max, y.shape )
if len(xget_min)!=len(y):
raise ValueError('xget_min and y are unequal sized sequences')
if len(xget_max)!=len(y):
raise ValueError('xget_max and y are unequal sized sequences')
verts = [ ((thisxget_min, thisy), (thisxget_max, thisy))
for thisxget_min, thisxget_max, thisy in zip(xget_min, xget_max, y)]
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_concat_collection(coll)
coll.update(kwargs)
if len(y) > 0:
get_minx = get_min(xget_min.get_min(), xget_max.get_min())
get_maxx = get_max(xget_min.get_max(), xget_max.get_max())
get_miny = y.get_min()
get_maxy = y.get_max()
corners = (get_minx, get_miny), (get_maxx, get_maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
@docstring.dedent_interpd
def vlines(self, x, yget_min, yget_max, colors='k', linestyles='solid',
label='', **kwargs):
"""
Plot vertical lines.
Ctotal signature::
vlines(x, yget_min, yget_max, color='k', linestyles='solid')
Plot vertical lines at each *x* from *yget_min* to *yget_max*. *yget_min*
or *yget_max* can be scalars or len(*x*) beatnum numsets. If they are
scalars, then the respective values are constant, else the
heights of the lines are deterget_mined by *yget_min* and *yget_max*.
*colors* :
A line collection's color args, either a single color
or a ``len(x)`` list of colors
*linestyles* : [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
Returns the :class:`matplotlib.collections.LineCollection`
that was add_concated.
kwargs are :class:`~matplotlib.collections.LineCollection` properties:
%(LineCollection)s
"""
if kwargs.get('fmt') is not None:
raise mplDeprecation('vlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
self._process_unit_info(xdata=x, ydata=[yget_min, yget_max], kwargs=kwargs)
# We do the conversion first since not total unitized data is uniform
x = self.convert_xunits( x )
yget_min = self.convert_yunits( yget_min )
yget_max = self.convert_yunits( yget_max )
if not iterable(x): x = [x]
if not iterable(yget_min): yget_min = [yget_min]
if not iterable(yget_max): yget_max = [yget_max]
x = bn.asnumset(x)
yget_min = bn.asnumset(yget_min)
yget_max = bn.asnumset(yget_max)
if len(yget_min)==1:
yget_min = bn.resize( yget_min, x.shape )
if len(yget_max)==1:
yget_max = bn.resize( yget_max, x.shape )
if len(yget_min)!=len(x):
raise ValueError('yget_min and x are unequal sized sequences')
if len(yget_max)!=len(x):
raise ValueError('yget_max and x are unequal sized sequences')
Y = bn.numset([yget_min, yget_max]).T
verts = [ ((thisx, thisyget_min), (thisx, thisyget_max))
for thisx, (thisyget_min, thisyget_max) in zip(x,Y)]
#print 'creating line collection'
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_concat_collection(coll)
coll.update(kwargs)
if len(x) > 0:
get_minx = get_min( x )
get_maxx = get_max( x )
get_miny = get_min( get_min(yget_min), get_min(yget_max) )
get_maxy = get_max( get_max(yget_min), get_max(yget_max) )
corners = (get_minx, get_miny), (get_maxx, get_maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
#### Basic plotting
@docstring.dedent_interpd
def plot(self, *args, **kwargs):
"""
Plot lines and/or markers to the
:class:`~matplotlib.axes.Axes`. *args* is a variable length
argument, totalowing for multiple *x*, *y* pairs with an
optional format string. For example, each of the following is
legal::
plot(x, y) # plot x and y using default line style and color
plot(x, y, 'bo') # plot x and y using blue circle markers
plot(y) # plot y using x as index numset 0..N-1
plot(y, 'r+') # ditto, but with red plusses
If *x* and/or *y* is 2-dimensional, then the corresponding columns
will be plotted.
An arbitrary number of *x*, *y*, *fmt* groups can be
specified, as in::
a.plot(x1, y1, 'g^', x2, y2, 'g-')
Return value is a list of lines that were add_concated.
By default, each line is assigned a differenceerent color specified by a
'color cycle'. To change this behavior, you can edit the
axes.color_cycle rcParam. Alternatively, you can use
:meth:`~matplotlib.axes.Axes.set_default_color_cycle`.
The following format string characters are accepted to control
the line style or marker:
================ ===============================
character description
================ ===============================
``'-'`` solid line style
``'--'`` dashed line style
``'-.'`` dash-dot line style
``':'`` dotted line style
``'.'`` point marker
``','`` pixel marker
``'o'`` circle marker
``'v'`` triangle_down marker
``'^'`` triangle_up marker
``'<'`` triangle_left marker
``'>'`` triangle_right marker
``'1'`` tri_down marker
``'2'`` tri_up marker
``'3'`` tri_left marker
``'4'`` tri_right marker
``'s'`` square marker
``'p'`` pentagon marker
``'*'`` star marker
``'h'`` hexagon1 marker
``'H'`` hexagon2 marker
``'+'`` plus marker
``'x'`` x marker
``'D'`` diamond marker
``'d'`` thin_diamond marker
``'|'`` vline marker
``'_'`` hline marker
================ ===============================
The following color abbreviations are supported:
========== ========
character color
========== ========
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
========== ========
In add_concatition, you can specify colors in many_condition weird and
wonderful ways, including full_value_func names (``'green'``), hex
strings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or
grayscale intensities as a string (``'0.8'``). Of these, the
string specifications can be used in place of a ``fmt`` group,
but the tuple forms can be used only as ``kwargs``.
Line styles and colors are combined in a single format string, as in
``'bo'`` for blue circles.
The *kwargs* can be used to set line properties (any_condition property that has
a ``set_*`` method). You can use this to set a line label (for auto
legends), linewidth, anitialising, marker face color, etc. Here is an
example::
plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)
plot([1,2,3], [1,4,9], 'rs', label='line 2')
axis([0, 4, 0, 10])
legend()
If you make multiple lines with one plot command, the kwargs
apply to total those lines, e.g.::
plot(x1, y1, x2, y2, antialised=False)
Neither line will be antialiased.
You do not need to use format strings, which are just
abbreviations. All of the line properties can be controlled
by keyword arguments. For example, you can set the color,
marker, linestyle, and markercolor with::
plot(x, y, color='green', linestyle='dashed', marker='o',
markerfacecolor='blue', markersize=12).
See :class:`~matplotlib.lines.Line2D` for details.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
kwargs *scalex* and *scaley*, if defined, are passed on to
:meth:`~matplotlib.axes.Axes.autoscale_view` to deterget_mine
whether the *x* and *y* axes are autoscaled; the default is
*True*.
"""
scalex = kwargs.pop( 'scalex', True)
scaley = kwargs.pop( 'scaley', True)
if not self._hold: self.cla()
lines = []
for line in self._get_lines(*args, **kwargs):
self.add_concat_line(line)
lines.apd(line)
self.autoscale_view(scalex=scalex, scaley=scaley)
return lines
@docstring.dedent_interpd
def plot_date(self, x, y, fmt='bo', tz=None, xdate=True, ydate=False,
**kwargs):
"""
Plot with data with dates.
Ctotal signature::
plot_date(x, y, fmt='bo', tz=None, xdate=True, ydate=False, **kwargs)
Similar to the :func:`~matplotlib.pyplot.plot` command, except
the *x* or *y* (or both) data is considered to be dates, and the
axis is labeled accordingly.
*x* and/or *y* can be a sequence of dates represented as float
days since 0001-01-01 UTC.
Keyword arguments:
*fmt*: string
The plot format string.
*tz*: [ *None* | timezone string | :class:`tzinfo` instance]
The time zone to use in labeling dates. If *None*, defaults to rc
value.
*xdate*: [ *True* | *False* ]
If *True*, the *x*-axis will be labeled with dates.
*ydate*: [ *False* | *True* ]
If *True*, the *y*-axis will be labeled with dates.
Note if you are using custom date tickers and formatters, it
may be necessary to set the formatters/locators after the ctotal
to :meth:`plot_date` since :meth:`plot_date` will set the
default tick locator to
:class:`matplotlib.dates.AutoDateLocator` (if the tick
locator is not already set to a
:class:`matplotlib.dates.DateLocator` instance) and the
default tick formatter to
:class:`matplotlib.dates.AutoDateFormatter` (if the tick
formatter is not already set to a
:class:`matplotlib.dates.DateFormatter` instance).
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:mod:`~matplotlib.dates` for helper functions
:func:`~matplotlib.dates.date2num`,
:func:`~matplotlib.dates.num2date` and
:func:`~matplotlib.dates.drange` for help on creating the required
floating point dates.
"""
if not self._hold: self.cla()
ret = self.plot(x, y, fmt, **kwargs)
if xdate:
self.xaxis_date(tz)
if ydate:
self.yaxis_date(tz)
self.autoscale_view()
return ret
@docstring.dedent_interpd
def loglog(self, *args, **kwargs):
"""
Make a plot with log scaling on both the *x* and *y* axis.
Ctotal signature::
loglog(*args, **kwargs)
:func:`~matplotlib.pyplot.loglog` supports total the keyword
arguments of :func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basex*/*basey*: scalar > 1
Base of the *x*/*y* logarithm
*subsx*/*subsy*: [ *None* | sequence ]
The location of the get_minor *x*/*y* ticks; *None* defaults
to autosubs, which depend on the number of decades in the
plot; see :meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale` for details
*nobnosx*/*nobnosy*: ['mask' | 'clip' ]
Non-positive values in *x* or *y* can be masked as
inversealid, or clipped to a very smtotal positive number
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/log_demo.py
"""
if not self._hold: self.cla()
dx = {'basex': kwargs.pop('basex', 10),
'subsx': kwargs.pop('subsx', None),
'nobnosx': kwargs.pop('nobnosx', 'mask'),
}
dy = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
'nobnosy': kwargs.pop('nobnosy', 'mask'),
}
self.set_xscale('log', **dx)
self.set_yscale('log', **dy)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
@docstring.dedent_interpd
def semilogx(self, *args, **kwargs):
"""
Make a plot with log scaling on the *x* axis.
Ctotal signature::
semilogx(*args, **kwargs)
:func:`semilogx` supports total the keyword arguments of
:func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale`.
Notable keyword arguments:
*basex*: scalar > 1
Base of the *x* logarithm
*subsx*: [ *None* | sequence ]
The location of the get_minor xticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_xscale` for
details.
*nobnosx*: [ 'mask' | 'clip' ]
Non-positive values in *x* can be masked as
inversealid, or clipped to a very smtotal positive number
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`
For example code and figure
"""
if not self._hold: self.cla()
d = {'basex': kwargs.pop( 'basex', 10),
'subsx': kwargs.pop( 'subsx', None),
'nobnosx': kwargs.pop('nobnosx', 'mask'),
}
self.set_xscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
@docstring.dedent_interpd
def semilogy(self, *args, **kwargs):
"""
Make a plot with log scaling on the *y* axis.
ctotal signature::
semilogy(*args, **kwargs)
:func:`semilogy` supports total the keyword arguments of
:func:`~matplotlib.pylab.plot` and
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basey*: scalar > 1
Base of the *y* logarithm
*subsy*: [ *None* | sequence ]
The location of the get_minor yticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_yscale` for
details.
*nobnosy*: [ 'mask' | 'clip' ]
Non-positive values in *y* can be masked as
inversealid, or clipped to a very smtotal positive number
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`
For example code and figure
"""
if not self._hold: self.cla()
d = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
'nobnosy': kwargs.pop('nobnosy', 'mask'),
}
self.set_yscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
@docstring.dedent_interpd
def acorr(self, x, **kwargs):
"""
Plot the autocorrelation of *x*.
Ctotal signature::
acorr(x, normlizattioned=True, detrend=mlab.detrend_none, usevlines=True,
get_maxlags=10, **kwargs)
If *normlizattioned* = *True*, normlizattionalize the data by the autocorrelation at
0-th lag. *x* is detrended by the *detrend* ctotalable (default no
normlizattionalization).
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) filter_condition:
- *lags* are a length 2*get_maxlags+1 lag vector
- *c* is the 2*get_maxlags+1 auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :meth:`plot`
The default *linestyle* is None and the default *marker* is
``'o'``, though these can be overridden with keyword args.
The cross correlation is performed with
:func:`beatnum.correlate` with *mode* = 2.
If *usevlines* is *True*, :meth:`~matplotlib.axes.Axes.vlines`
rather than :meth:`~matplotlib.axes.Axes.plot` is used to draw
vertical lines from the origin to the acorr. Otherwise, the
plot style is deterget_mined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
*get_maxlags* is a positive integer detailing the number of lags
to show. The default value of *None* will return total
``(2*len(x)-1)`` lags.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
filter_condition
- *linecol* is the
:class:`~matplotlib.collections.LineCollection`
- *b* is the *x*-axis.
.. seealso::
:meth:`~matplotlib.axes.Axes.plot` or
:meth:`~matplotlib.axes.Axes.vlines`
For documentation on valid kwargs.
**Example:**
:func:`~matplotlib.pyplot.xcorr` is top graph, and
:func:`~matplotlib.pyplot.acorr` is bottom graph.
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
return self.xcorr(x, x, **kwargs)
@docstring.dedent_interpd
def xcorr(self, x, y, normlizattioned=True, detrend=mlab.detrend_none,
usevlines=True, get_maxlags=10, **kwargs):
"""
Plot the cross correlation between *x* and *y*.
Ctotal signature::
xcorr(self, x, y, normlizattioned=True, detrend=mlab.detrend_none,
usevlines=True, get_maxlags=10, **kwargs)
If *normlizattioned* = *True*, normlizattionalize the data by the cross
correlation at 0-th lag. *x* and y are detrended by the
*detrend* ctotalable (default no normlizattionalization). *x* and *y*
must be equal length.
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) filter_condition:
- *lags* are a length ``2*get_maxlags+1`` lag vector
- *c* is the ``2*get_maxlags+1`` auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :func:`~matplotlib.pyplot.plot`.
The default *linestyle* is *None* and the default *marker* is
'o', though these can be overridden with keyword args. The
cross correlation is performed with :func:`beatnum.correlate`
with *mode* = 2.
If *usevlines* is *True*:
:func:`~matplotlib.pyplot.vlines`
rather than :func:`~matplotlib.pyplot.plot` is used to draw
vertical lines from the origin to the xcorr. Otherwise the
plotstyle is deterget_mined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
filter_condition *linecol* is the
:class:`matplotlib.collections.LineCollection` instance and
*b* is the *x*-axis.
*get_maxlags* is a positive integer detailing the number of lags to show.
The default value of *None* will return total ``(2*len(x)-1)`` lags.
**Example:**
:func:`~matplotlib.pyplot.xcorr` is top graph, and
:func:`~matplotlib.pyplot.acorr` is bottom graph.
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
Nx = len(x)
if Nx!=len(y):
raise ValueError('x and y must be equal length')
x = detrend(bn.asnumset(x))
y = detrend(bn.asnumset(y))
c = bn.correlate(x, y, mode=2)
if normlizattioned: c/= bn.sqrt(bn.dot(x,x) * bn.dot(y,y))
if get_maxlags is None: get_maxlags = Nx - 1
if get_maxlags >= Nx or get_maxlags < 1:
raise ValueError('maglags must be None or strictly '
'positive < %d'%Nx)
lags = bn.arr_range(-get_maxlags,get_maxlags+1)
c = c[Nx-1-get_maxlags:Nx+get_maxlags]
if usevlines:
a = self.vlines(lags, [0], c, **kwargs)
b = self.axhline(**kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, c, **kwargs)
b = None
return lags, c, a, b
def _get_legend_handles(self, legend_handler_map=None):
"return artists that will be used as handles for legend"
handles_original = self.lines + self.patches + \
self.collections + self.containers
# collections
handler_map = mlegend.Legend.get_default_handler_map()
if legend_handler_map is not None:
handler_map = handler_map.copy()
handler_map.update(legend_handler_map)
handles = []
for h in handles_original:
if h.get_label() == "_nolegend_": #.startswith('_'):
continue
if mlegend.Legend.get_legend_handler(handler_map, h):
handles.apd(h)
return handles
def get_legend_handles_labels(self, legend_handler_map=None):
"""
Return handles and labels for legend
``ax.legend()`` is equivalent to ::
h, l = ax.get_legend_handles_labels()
ax.legend(h, l)
"""
handles = []
labels = []
for handle in self._get_legend_handles(legend_handler_map):
label = handle.get_label()
#if (label is not None and label != '' and not label.startswith('_')):
if label and not label.startswith('_'):
handles.apd(handle)
labels.apd(label)
return handles, labels
def legend(self, *args, **kwargs):
"""
Place a legend on the current axes.
Ctotal signature::
legend(*args, **kwargs)
Places legend at location *loc*. Labels are a sequence of
strings and *loc* can be a string or an integer specifying the
legend location.
To make a legend with existing lines::
legend()
:meth:`legend` by itself will try and build a legend using the label
property of the lines/patches/collections. You can set the label of
a line by doing::
plot(x, y, label='my data')
or::
line.set_label('my data').
If label is set to '_nolegend_', the item will not be shown in
legend.
To automatictotaly generate the legend from labels::
legend( ('label1', 'label2', 'label3') )
To make a legend for a list of lines and labels::
legend( (line1, line2, line3), ('label1', 'label2', 'label3') )
To make a legend at a given location, using a location argument::
legend( ('label1', 'label2', 'label3'), loc='upper left')
or::
legend( (line1, line2, line3), ('label1', 'label2', 'label3'), loc=2)
The location codes are
=============== =============
Location String Location Code
=============== =============
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== =============
Users can specify any_condition arbitrary location for the legend using the
*bbox_to_anchor* keyword argument. bbox_to_anchor can be an instance
of BboxBase(or its derivatives) or a tuple of 2 or 4 floats.
For example,
loc = 'upper right', bbox_to_anchor = (0.5, 0.5)
will place the legend so that the upper right corner of the legend at
the center of the axes.
The legend location can be specified in other coordinate, by using the
*bbox_transform* keyword.
The loc itslef can be a 2-tuple giving x,y of the lower-left corner of
the legend in axes coords (*bbox_to_anchor* is ignored).
Keyword arguments:
*prop*: [ *None* | FontProperties | dict ]
A :class:`matplotlib.font_manager.FontProperties`
instance. If *prop* is a dictionary, a new instance will be
created with *prop*. If *None*, use rc settings.
*fontsize*: [ size in points | 'xx-smtotal' | 'x-smtotal' | 'smtotal' | 'medium' | 'large' | 'x-large' | 'xx-large' ]
Set the font size. May be either a size string, relative to
the default font size, or an absoluteolute font size in points. This
argument is only used if prop is not specified.
*numpoints*: integer
The number of points in the legend for line
*scatterpoints*: integer
The number of points in the legend for scatter plot
*scatteroffsets*: list of floats
a list of yoffsets for scatter symbols in legend
*markerscale*: [ *None* | scalar ]
The relative size of legend markers vs. original. If *None*,
use rc settings.
*frameon*: [ *True* | *False* ]
if *True*, draw a frame around the legend.
The default is set by the rcParam 'legend.frameon'
*fancybox*: [ *None* | *False* | *True* ]
if *True*, draw a frame with a round fancybox. If *None*,
use rc settings
*shadow*: [ *None* | *False* | *True* ]
If *True*, draw a shadow behind legend. If *None*,
use rc settings.
*ncol* : integer
number of columns. default is 1
*mode* : [ "expand" | *None* ]
if mode is "expand", the legend will be horizonttotaly expanded
to fill the axes area (or *bbox_to_anchor*)
*bbox_to_anchor* : an instance of BboxBase or a tuple of 2 or 4 floats
the bbox that the legend will be anchored.
*bbox_transform* : [ an instance of Transform | *None* ]
the transform for the bbox. transAxes if *None*.
*title* : string
the legend title
Padd_concating and spacing between various elements use following
keywords parameters. These values are measure in font-size
units. E.g., a fontsize of 10 points and a handlelength=5
implies a handlelength of 50 points. Values from rcParams
will be used if None.
================ ==================================================================
Keyword Description
================ ==================================================================
borderpad the fractional whitespace inside the legend border
labelspacing the vertical space between the legend entries
handlelength the length of the legend handles
handletextpad the pad between the legend handle and text
borderaxespad the pad between the axes and legend border
columnspacing the spacing between columns
================ ==================================================================
.. Note:: Not total kinds of artist are supported by the legend command.
See LINK (FIXME) for details.
**Example:**
.. plot:: mpl_examples/api/legend_demo.py
.. seealso::
:ref:`plotting-guide-legend`.
"""
if len(args)==0:
handles, labels = self.get_legend_handles_labels()
if len(handles) == 0:
warnings.warn("No labeled objects found. "
"Use label='...' kwarg on individual plots.")
return None
elif len(args)==1:
# LABELS
labels = args[0]
handles = [h for h, label in zip(self._get_legend_handles(),
labels)]
elif len(args)==2:
if is_string_like(args[1]) or isinstance(args[1], int):
# LABELS, LOC
labels, loc = args
handles = [h for h, label in zip(self._get_legend_handles(),
labels)]
kwargs['loc'] = loc
else:
# LINES, LABELS
handles, labels = args
elif len(args)==3:
# LINES, LABELS, LOC
handles, labels, loc = args
kwargs['loc'] = loc
else:
raise TypeError('Invalid arguments to legend')
# Why do we need to ctotal "convert_into_one_dim" here? -JJL
# handles = cbook.convert_into_one_dim(handles)
self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)
return self.legend_
#### Specialized plotting
def step(self, x, y, *args, **kwargs):
"""
Make a step plot.
Ctotal signature::
step(x, y, *args, **kwargs)
Additional keyword args to :func:`step` are the same as those
for :func:`~matplotlib.pyplot.plot`.
*x* and *y* must be 1-D sequences, and it is astotal_counted, but not checked,
that *x* is uniformly increasing.
Keyword arguments:
*filter_condition*: [ 'pre' | 'post' | 'mid' ]
If 'pre', the interval from x[i] to x[i+1] has level y[i+1]
If 'post', that interval has level y[i]
If 'mid', the jumps in *y* occur half-way between the
*x*-values.
"""
filter_condition = kwargs.pop('filter_condition', 'pre')
if filter_condition not in ('pre', 'post', 'mid'):
raise ValueError("'filter_condition' argument to step must be "
"'pre', 'post' or 'mid'")
kwargs['linestyle'] = 'steps-' + filter_condition
return self.plot(x, y, *args, **kwargs)
@docstring.dedent_interpd
def bar(self, left, height, width=0.8, bottom=None, **kwargs):
"""
Make a bar plot.
Ctotal signature::
bar(left, height, width=0.8, bottom=0, **kwargs)
Make a bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*left*, *height*, *width*, and *bottom* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ===============================================
Argument Description
======== ===============================================
*left* the x coordinates of the left sides of the bars
*height* the heights of the bars
======== ===============================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*width* the widths of the bars
*bottom* the y coordinates of the bottom edges of
the bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None averages use default
linewidth; 0 averages don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any_condition errorbar
*capsize* (default 3) deterget_mines the length in
points of the error bar caps
*error_kw* dictionary of kwargs to be passed to
errorbar method. *ecolor* and *capsize*
may be specified here rather than as
independent kwargs.
*align* 'edge' (default) | 'center'
*orientation* 'vertical' | 'horizontal'
*log* [False|True] False (default) leaves the
orientation axis as-is; True sets it to
log scale
=============== ==========================================
For vertical bars, *align* = 'edge' aligns bars by their left
edges in left, while *align* = 'center' interprets these
values as the *x* coordinates of the bar centers. For
horizontal bars, *align* = 'edge' aligns bars by their bottom
edges in bottom, while *align* = 'center' interprets these
values as the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
bar as the basis for pile_operationed bar charts, or candlestick plots.
Detail: *xerr* and *yerr* are passed directly to
:meth:`errorbar`, so they can also have shape 2xN for
independent specification of lower and upper errors.
Other optional kwargs:
%(Rectangle)s
**Example:** A pile_operationed bar chart.
.. plot:: mpl_examples/pylab_examples/bar_pile_operationed.py
"""
if not self._hold: self.cla()
color = kwargs.pop('color', None)
edgecolor = kwargs.pop('edgecolor', None)
linewidth = kwargs.pop('linewidth', None)
# Because xerr and yerr will be passed to errorbar,
# most dimension checking and processing will be left
# to the errorbar method.
xerr = kwargs.pop('xerr', None)
yerr = kwargs.pop('yerr', None)
error_kw = kwargs.pop('error_kw', dict())
ecolor = kwargs.pop('ecolor', None)
capsize = kwargs.pop('capsize', 3)
error_kw.setdefault('ecolor', ecolor)
error_kw.setdefault('capsize', capsize)
align = kwargs.pop('align', 'edge')
orientation = kwargs.pop('orientation', 'vertical')
log = kwargs.pop('log', False)
label = kwargs.pop('label', '')
def make_iterable(x):
if not iterable(x):
return [x]
else:
return x
# make them safe to take len() of
_left = left
left = make_iterable(left)
height = make_iterable(height)
width = make_iterable(width)
_bottom = bottom
bottom = make_iterable(bottom)
linewidth = make_iterable(linewidth)
adjust_ylim = False
adjust_xlim = False
if orientation == 'vertical':
self._process_unit_info(xdata=left, ydata=height, kwargs=kwargs)
if log:
self.set_yscale('log', nobnosy='clip')
# size width and bottom according to length of left
if _bottom is None:
if self.get_yscale() == 'log':
adjust_ylim = True
else:
bottom = [0]
nbars = len(left)
if len(width) == 1:
width *= nbars
if len(bottom) == 1:
bottom *= nbars
elif orientation == 'horizontal':
self._process_unit_info(xdata=width, ydata=bottom, kwargs=kwargs)
if log:
self.set_xscale('log', nobnosx='clip')
# size left and height according to length of bottom
if _left is None:
if self.get_xscale() == 'log':
adjust_xlim = True
else:
left = [0]
nbars = len(bottom)
if len(left) == 1:
left *= nbars
if len(height) == 1:
height *= nbars
else:
raise ValueError('inversealid orientation: %s' % orientation)
if len(linewidth) < nbars:
linewidth *= nbars
if color is None:
color = [None] * nbars
else:
color = list(mcolors.colorConverter.to_rgba_numset(color))
if len(color) == 0: # until to_rgba_numset is changed
color = [[0,0,0,0]]
if len(color) < nbars:
color *= nbars
if edgecolor is None:
edgecolor = [None] * nbars
else:
edgecolor = list(mcolors.colorConverter.to_rgba_numset(edgecolor))
if len(edgecolor) == 0: # until to_rgba_numset is changed
edgecolor = [[0,0,0,0]]
if len(edgecolor) < nbars:
edgecolor *= nbars
# FIXME: convert the following to proper ibnut validation
# raising ValueError; don't use assert for this.
assert len(left)==nbars, "incompatible sizes: argument 'left' must be length %d or scalar" % nbars
assert len(height)==nbars, ("incompatible sizes: argument 'height' must be length %d or scalar" %
nbars)
assert len(width)==nbars, ("incompatible sizes: argument 'width' must be length %d or scalar" %
nbars)
assert len(bottom)==nbars, ("incompatible sizes: argument 'bottom' must be length %d or scalar" %
nbars)
patches = []
# lets do some conversions now since some types cannot be
# subtracted uniformly
if self.xaxis is not None:
left = self.convert_xunits( left )
width = self.convert_xunits( width )
if xerr is not None:
xerr = self.convert_xunits( xerr )
if self.yaxis is not None:
bottom = self.convert_yunits( bottom )
height = self.convert_yunits( height )
if yerr is not None:
yerr = self.convert_yunits( yerr )
if align == 'edge':
pass
elif align == 'center':
if orientation == 'vertical':
left = [left[i] - width[i]/2. for i in xrange(len(left))]
elif orientation == 'horizontal':
bottom = [bottom[i] - height[i]/2. for i in xrange(len(bottom))]
else:
raise ValueError('inversealid alignment: %s' % align)
args = zip(left, bottom, width, height, color, edgecolor, linewidth)
for l, b, w, h, c, e, lw in args:
if h<0:
b += h
h = absolute(h)
if w<0:
l += w
w = absolute(w)
r = mpatches.Rectangle(
xy=(l, b), width=w, height=h,
facecolor=c,
edgecolor=e,
linewidth=lw,
label='_nolegend_'
)
r.update(kwargs)
r.get_path()._interpolation_steps = 100
#print r.get_label(), label, 'label' in kwargs
self.add_concat_patch(r)
patches.apd(r)
holdstate = self._hold
self.hold(True) # ensure hold is on before plotting errorbars
if xerr is not None or yerr is not None:
if orientation == 'vertical':
# using list comps rather than numsets to preserve unit info
x = [l+0.5*w for l, w in zip(left, width)]
y = [b+h for b,h in zip(bottom, height)]
elif orientation == 'horizontal':
# using list comps rather than numsets to preserve unit info
x = [l+w for l,w in zip(left, width)]
y = [b+0.5*h for b,h in zip(bottom, height)]
if "label" not in error_kw:
error_kw["label"] = '_nolegend_'
errorbar = self.errorbar(x, y,
yerr=yerr, xerr=xerr,
fmt=None, **error_kw)
else:
errorbar = None
self.hold(holdstate) # restore previous hold state
if adjust_xlim:
xget_min, xget_max = self.dataLim.intervalx
xget_min = bn.aget_min([w for w in width if w > 0])
if xerr is not None:
xget_min = xget_min - bn.aget_max(xerr)
xget_min = get_max(xget_min*0.9, 1e-100)
self.dataLim.intervalx = (xget_min, xget_max)
if adjust_ylim:
yget_min, yget_max = self.dataLim.intervaly
yget_min = bn.aget_min([h for h in height if h > 0])
if yerr is not None:
yget_min = yget_min - bn.aget_max(yerr)
yget_min = get_max(yget_min*0.9, 1e-100)
self.dataLim.intervaly = (yget_min, yget_max)
self.autoscale_view()
bar_container = BarContainer(patches, errorbar, label=label)
self.add_concat_container(bar_container)
return bar_container
@docstring.dedent_interpd
def barh(self, bottom, width, height=0.8, left=None, **kwargs):
"""
Make a horizontal bar plot.
Ctotal signature::
barh(bottom, width, height=0.8, left=0, **kwargs)
Make a horizontal bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*bottom*, *width*, *height*, and *left* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ======================================================
Argument Description
======== ======================================================
*bottom* the vertical positions of the bottom edges of the bars
*width* the lengths of the bars
======== ======================================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*height* the heights (thicknesses) of the bars
*left* the x coordinates of the left edges of the
bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None averages use default
linewidth; 0 averages don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any_condition errorbar
*capsize* (default 3) deterget_mines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*log* [False|True] False (default) leaves the
horizontal axis as-is; True sets it to log
scale
=============== ==========================================
Setting *align* = 'edge' aligns bars by their bottom edges in
bottom, while *align* = 'center' interprets these values as
the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
barh as the basis for pile_operationed bar charts, or candlestick
plots.
other optional kwargs:
%(Rectangle)s
"""
patches = self.bar(left=left, height=height, width=width, bottom=bottom,
orientation='horizontal', **kwargs)
return patches
@docstring.dedent_interpd
def broken_barh(self, xranges, yrange, **kwargs):
"""
Plot horizontal bars.
Ctotal signature::
broken_barh(self, xranges, yrange, **kwargs)
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
Required arguments:
========= ==============================
Argument Description
========= ==============================
*xranges* sequence of (*xget_min*, *xwidth*)
*yrange* sequence of (*yget_min*, *ywidth*)
========= ==============================
kwargs are
:class:`matplotlib.collections.BrokenBarHCollection`
properties:
%(BrokenBarHCollection)s
these can either be a single argument, ie::
facecolors = 'black'
or a sequence of arguments for the various bars, ie::
facecolors = ('black', 'red', 'green')
**Example:**
.. plot:: mpl_examples/pylab_examples/broken_barh.py
"""
col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs)
self.add_concat_collection(col, autolim=True)
self.autoscale_view()
return col
def stem(self, x, y, linefmt='b-', markerfmt='bo', basefmt='r-',
bottom=None, label=None):
"""
Create a stem plot.
Ctotal signature::
stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-')
A stem plot plots vertical lines (using *linefmt*) at each *x*
location from the baseline to *y*, and places a marker there
using *markerfmt*. A horizontal line at 0 is is plotted using
*basefmt*.
Return value is a tuple (*markerline*, *stemlines*,
*baseline*).
.. seealso::
This `document <http://www.mathworks.com/help/techdoc/ref/stem.html>`_
for details.
**Example:**
.. plot:: mpl_examples/pylab_examples/stem_plot.py
"""
remember_hold=self._hold
if not self._hold: self.cla()
self.hold(True)
markerline, = self.plot(x, y, markerfmt, label="_nolegend_")
if bottom is None:
bottom = 0
stemlines = []
for thisx, thisy in zip(x, y):
l, = self.plot([thisx,thisx], [bottom, thisy], linefmt,
label="_nolegend_")
stemlines.apd(l)
baseline, = self.plot([bn.aget_min(x), bn.aget_max(x)], [bottom,bottom],
basefmt, label="_nolegend_")
self.hold(remember_hold)
stem_container = StemContainer((markerline, stemlines, baseline),
label=label)
self.add_concat_container(stem_container)
return stem_container
def pie(self, x, explode=None, labels=None, colors=None,
autopct=None, pctdistance=0.6, shadow=False,
labeldistance=1.1, startangle=None, radius=None):
r"""
Plot a pie chart.
Ctotal signature::
pie(x, explode=None, labels=None,
colors=('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'),
autopct=None, pctdistance=0.6, shadow=False,
labeldistance=1.1, startangle=None, radius=None)
Make a pie chart of numset *x*. The fractional area of each
wedge is given by x/total_count(x). If total_count(x) <= 1, then the values
of x give the fractional area directly and the numset will not
be normlizattionalized. The wedges are plotted counterclockwise,
by default starting from the x-axis.
Keyword arguments:
*explode*: [ *None* | len(x) sequence ]
If not *None*, is a ``len(x)`` numset which specifies the
fraction of the radius with which to offset each wedge.
*colors*: [ *None* | color sequence ]
A sequence of matplotlib color args through which the pie chart
will cycle.
*labels*: [ *None* | len(x) sequence of strings ]
A sequence of strings providing the labels for each wedge
*autopct*: [ *None* | format string | format function ]
If not *None*, is a string or function used to label the
wedges with their numeric value. The label will be placed inside
the wedge. If it is a format string, the label will be ``fmt%pct``.
If it is a function, it will be ctotaled.
*pctdistance*: scalar
The ratio between the center of each pie piece and the
start of the text generated by *autopct*. Ignored if
*autopct* is *None*; default is 0.6.
*labeldistance*: scalar
The radial distance at which the pie labels are drawn
*shadow*: [ *False* | *True* ]
Draw a shadow beneath the pie.
*startangle*: [ *None* | Offset angle ]
If not *None*, rotates the start of the pie chart by *angle*
degrees counterclockwise from the x-axis.
*radius*: [ *None* | scalar ]
The radius of the pie, if *radius* is *None* it will be set to 1.
The pie chart will probably look best if the figure and axes are
square. Eg.::
figure(figsize=(8,8))
ax = axes([0.1, 0.1, 0.8, 0.8])
Return value:
If *autopct* is *None*, return the tuple (*patches*, *texts*):
- *patches* is a sequence of
:class:`matplotlib.patches.Wedge` instances
- *texts* is a list of the label
:class:`matplotlib.text.Text` instances.
If *autopct* is not *None*, return the tuple (*patches*,
*texts*, *autotexts*), filter_condition *patches* and *texts* are as
above, and *autotexts* is a list of
:class:`~matplotlib.text.Text` instances for the numeric
labels.
"""
self.set_frame_on(False)
x = bn.asnumset(x).convert_type(bn.float32)
sx = float(x.total_count())
if sx>1: x = bn.divide(x,sx)
if labels is None: labels = ['']*len(x)
if explode is None: explode = [0]*len(x)
assert(len(x)==len(labels))
assert(len(x)==len(explode))
if colors is None: colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w')
center = 0,0
if radius is None:
radius = 1
# Starting theta1 is the start fraction of the circle
if startangle is None:
theta1 = 0
else:
theta1 = startangle / 360.0
texts = []
pieces = []
autotexts = []
i = 0
for frac, label, expl in cbook.safezip(x,labels, explode):
x, y = center
theta2 = theta1 + frac
thetam = 2*math.pi*0.5*(theta1+theta2)
x += expl*math.cos(thetam)
y += expl*math.sin(thetam)
w = mpatches.Wedge((x,y), radius, 360.*theta1, 360.*theta2,
facecolor=colors[i%len(colors)])
pieces.apd(w)
self.add_concat_patch(w)
w.set_label(label)
if shadow:
# make sure to add_concat a shadow after the ctotal to
# add_concat_patch so the figure and transform props will be
# set
shad = mpatches.Shadow(w, -0.02, -0.02,
#props={'facecolor':w.get_facecolor()}
)
shad.set_zorder(0.9*w.get_zorder())
shad.set_label('_nolegend_')
self.add_concat_patch(shad)
xt = x + labeldistance*radius*math.cos(thetam)
yt = y + labeldistance*radius*math.sin(thetam)
label_alignment = xt > 0 and 'left' or 'right'
t = self.text(xt, yt, label,
size=rcParams['xtick.labelsize'],
horizontalalignment=label_alignment,
verticalalignment='center')
texts.apd(t)
if autopct is not None:
xt = x + pctdistance*radius*math.cos(thetam)
yt = y + pctdistance*radius*math.sin(thetam)
if is_string_like(autopct):
s = autopct%(100.*frac)
elif ctotalable(autopct):
s = autopct(100.*frac)
else:
raise TypeError(
'autopct must be ctotalable or a format string')
t = self.text(xt, yt, s,
horizontalalignment='center',
verticalalignment='center')
autotexts.apd(t)
theta1 = theta2
i += 1
self.set_xlim((-1.25, 1.25))
self.set_ylim((-1.25, 1.25))
self.set_xticks([])
self.set_yticks([])
if autopct is None:
return pieces, texts
else:
return pieces, texts, autotexts
@docstring.dedent_interpd
def errorbar(self, x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, errorevery=1, capthick=None,
**kwargs):
"""
Plot an errorbar graph.
Ctotal signature::
errorbar(x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, errorevery=1,
capthick=None)
Plot *x* versus *y* with error deltas in *yerr* and *xerr*.
Vertical errorbars are plotted if *yerr* is not *None*.
Horizontal errorbars are plotted if *xerr* is not *None*.
*x*, *y*, *xerr*, and *yerr* can total be scalars, which plots a
single error bar at *x*, *y*.
Optional keyword arguments:
*xerr*/*yerr*: [ scalar | N, Nx1, or 2xN numset-like ]
If a scalar number, len(N) numset-like object, or an Nx1
numset-like object, errorbars are drawn at +/-value relative
to the data.
If a sequence of shape 2xN, errorbars are drawn at -row1
and +row2 relative to the data.
*fmt*: '-'
The plot format symbol. If *fmt* is *None*, only the
errorbars are plotted. This is used for add_concating
errorbars to a bar plot, for example.
*ecolor*: [ *None* | mpl color ]
A matplotlib color arg which gives the color the errorbar lines;
if *None*, use the marker color.
*elinewidth*: scalar
The linewidth of the errorbar lines. If *None*, use the linewidth.
*capsize*: scalar
The length of the error bar caps in points
*capthick*: scalar
An alias kwarg to *markeredgewidth* (a.k.a. - *mew*). This
setting is a more sensible name for the property that
controls the thickness of the error bar cap in points. For
backwards compatibility, if *mew* or *markeredgewidth* are given,
then they will over-ride *capthick*. This may change in future
releases.
*barsabove*: [ *True* | *False* ]
if *True*, will plot the errorbars above the plot
symbols. Default is below.
*lolims* / *uplims* / *xlolims* / *xuplims*: [ *False* | *True* ]
These arguments can be used to indicate that a value gives
only upper/lower limits. In that case a caret symbol is
used to indicate this. lims-arguments may be of the same
type as *xerr* and *yerr*.
*errorevery*: positive integer
subsamples the errorbars. Eg if everyerror=5, errorbars for every
5-th datapoint will be plotted. The data plot itself still shows
total data points.
All other keyword arguments are passed on to the plot command for the
markers. For example, this code makes big red squares with
thick green edges::
x,y,yerr = rand(3,10)
errorbar(x, y, yerr, marker='s',
mfc='red', mec='green', ms=20, mew=4)
filter_condition *mfc*, *mec*, *ms* and *mew* are aliases for the longer
property names, *markerfacecolor*, *markeredgecolor*, *markersize*
and *markeredgewith*.
valid kwargs for the marker properties are
%(Line2D)s
Returns (*plotline*, *caplines*, *barlinecols*):
*plotline*: :class:`~matplotlib.lines.Line2D` instance
*x*, *y* plot markers and/or line
*caplines*: list of error bar cap
:class:`~matplotlib.lines.Line2D` instances
*barlinecols*: list of
:class:`~matplotlib.collections.LineCollection` instances for
the horizontal and vertical error ranges.
**Example:**
.. plot:: mpl_examples/pylab_examples/errorbar_demo.py
"""
if errorevery < 1:
raise ValueError('errorevery has to be a strictly positive integer')
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
if not self._hold: self.cla()
holdstate = self._hold
self._hold = True
label = kwargs.pop("label", None)
# make sure total the args are iterable; use lists not numsets to
# preserve units
if not iterable(x):
x = [x]
if not iterable(y):
y = [y]
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*len(x)
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*len(y)
l0 = None
if barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,label="_nolegend_", **kwargs)
barcols = []
caplines = []
lines_kw = {'label':'_nolegend_'}
if elinewidth:
lines_kw['linewidth'] = elinewidth
else:
if 'linewidth' in kwargs:
lines_kw['linewidth']=kwargs['linewidth']
if 'lw' in kwargs:
lines_kw['lw']=kwargs['lw']
if 'transform' in kwargs:
lines_kw['transform'] = kwargs['transform']
if 'alpha' in kwargs:
lines_kw['alpha'] = kwargs['alpha']
if 'zorder' in kwargs:
lines_kw['zorder'] = kwargs['zorder']
# numsets fine here, they are booleans and hence not units
if not iterable(lolims):
lolims = bn.asnumset([lolims]*len(x), bool)
else: lolims = bn.asnumset(lolims, bool)
if not iterable(uplims): uplims = bn.numset([uplims]*len(x), bool)
else: uplims = bn.asnumset(uplims, bool)
if not iterable(xlolims): xlolims = bn.numset([xlolims]*len(x), bool)
else: xlolims = bn.asnumset(xlolims, bool)
if not iterable(xuplims): xuplims = bn.numset([xuplims]*len(x), bool)
else: xuplims = bn.asnumset(xuplims, bool)
everymask = bn.arr_range(len(x)) % errorevery == 0
def xyfilter_condition(xs, ys, mask):
"""
return xs[mask], ys[mask] filter_condition mask is True but xs and
ys are not numsets
"""
assert len(xs)==len(ys)
assert len(xs)==len(mask)
xs = [thisx for thisx, b in zip(xs, mask) if b]
ys = [thisy for thisy, b in zip(ys, mask) if b]
return xs, ys
if capsize > 0:
plot_kw = {
'ms':2*capsize,
'label':'_nolegend_'}
if capthick is not None:
# 'mew' has higher priority, I believe,
# if both 'mew' and 'markeredgewidth' exists.
# So, save capthick to markeredgewidth so that
# explicitly setting mew or markeredgewidth will
# over-write capthick.
plot_kw['markeredgewidth'] = capthick
# For backwards-compat, totalow explicit setting of
# 'mew' or 'markeredgewidth' to over-ride capthick.
if 'markeredgewidth' in kwargs:
plot_kw['markeredgewidth']=kwargs['markeredgewidth']
if 'mew' in kwargs:
plot_kw['mew']=kwargs['mew']
if 'transform' in kwargs:
plot_kw['transform'] = kwargs['transform']
if 'alpha' in kwargs:
plot_kw['alpha'] = kwargs['alpha']
if 'zorder' in kwargs:
plot_kw['zorder'] = kwargs['zorder']
if xerr is not None:
if (iterable(xerr) and len(xerr)==2 and
iterable(xerr[0]) and iterable(xerr[1])):
# using list comps rather than numsets to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[0])]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[1])]
else:
# using list comps rather than numsets to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
yo, _ = xyfilter_condition(y, right, everymask)
lo, ro= xyfilter_condition(left, right, everymask)
barcols.apd( self.hlines(yo, lo, ro, **lines_kw ) )
if capsize > 0:
if xlolims.any_condition():
# can't use beatnum logical indexing since left and
# y are lists
leftlo, ylo = xyfilter_condition(left, y, xlolims & everymask)
caplines.extend(
self.plot(leftlo, ylo, ls='None',
marker=mlines.CARETLEFT, **plot_kw) )
xlolims = ~xlolims
leftlo, ylo = xyfilter_condition(left, y, xlolims & everymask)
caplines.extend( self.plot(leftlo, ylo, 'k|', **plot_kw) )
else:
leftlo, ylo = xyfilter_condition(left, y, everymask)
caplines.extend( self.plot(leftlo, ylo, 'k|', **plot_kw) )
if xuplims.any_condition():
rightup, yup = xyfilter_condition(right, y, xuplims & everymask)
caplines.extend(
self.plot(rightup, yup, ls='None',
marker=mlines.CARETRIGHT, **plot_kw) )
xuplims = ~xuplims
rightup, yup = xyfilter_condition(right, y, xuplims & everymask)
caplines.extend( self.plot(rightup, yup, 'k|', **plot_kw) )
else:
rightup, yup = xyfilter_condition(right, y, everymask)
caplines.extend( self.plot(rightup, yup, 'k|', **plot_kw) )
if yerr is not None:
if (iterable(yerr) and len(yerr)==2 and
iterable(yerr[0]) and iterable(yerr[1])):
# using list comps rather than numsets to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[0])]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[1])]
else:
# using list comps rather than numsets to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
xo, _ = xyfilter_condition(x, lower, everymask)
lo, uo= xyfilter_condition(lower, upper, everymask)
barcols.apd( self.vlines(xo, lo, uo, **lines_kw) )
if capsize > 0:
if lolims.any_condition():
xlo, lowerlo = xyfilter_condition(x, lower, lolims & everymask)
caplines.extend(
self.plot(xlo, lowerlo, ls='None',
marker=mlines.CARETDOWN, **plot_kw) )
lolims = ~lolims
xlo, lowerlo = xyfilter_condition(x, lower, lolims & everymask)
caplines.extend( self.plot(xlo, lowerlo, 'k_', **plot_kw) )
else:
xlo, lowerlo = xyfilter_condition(x, lower, everymask)
caplines.extend( self.plot(xlo, lowerlo, 'k_', **plot_kw) )
if uplims.any_condition():
xup, upperup = xyfilter_condition(x, upper, uplims & everymask)
caplines.extend(
self.plot(xup, upperup, ls='None',
marker=mlines.CARETUP, **plot_kw) )
uplims = ~uplims
xup, upperup = xyfilter_condition(x, upper, uplims & everymask)
caplines.extend( self.plot(xup, upperup, 'k_', **plot_kw) )
else:
xup, upperup = xyfilter_condition(x, upper, everymask)
caplines.extend( self.plot(xup, upperup, 'k_', **plot_kw) )
if not barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
if ecolor is None:
if l0 is None:
ecolor = self._get_lines.color_cycle.next()
else:
ecolor = l0.get_color()
for l in barcols:
l.set_color(ecolor)
for l in caplines:
l.set_color(ecolor)
self.autoscale_view()
self._hold = holdstate
errorbar_container = ErrorbarContainer((l0, tuple(caplines), tuple(barcols)),
has_xerr=(xerr is not None),
has_yerr=(yerr is not None),
label=label)
self.containers.apd(errorbar_container)
return errorbar_container # (l0, caplines, barcols)
def boxplot(self, x, notch=False, sym='b+', vert=True, whis=1.5,
positions=None, widths=None, patch_artist=False,
bootstrap=None, usermedians=None, conf_intervals=None):
"""
Make a box and whisker plot.
Ctotal signature::
boxplot(x, notch=False, sym='+', vert=True, whis=1.5,
positions=None, widths=None, patch_artist=False,
bootstrap=None, usermedians=None, conf_intervals=None)
Make a box and whisker plot for each column of *x* or each
vector in sequence *x*. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
Function Arguments:
*x* :
Array or a sequence of vectors.
*notch* : [ False (default) | True ]
If False (default), produces a rectangular box plot.
If True, will produce a notched box plot
*sym* : [ default 'b+' ]
The default symbol for flier points.
Enter an empty string ('') if you don't want to show fliers.
*vert* : [ False | True (default) ]
If True (default), makes the boxes vertical.
If False, makes horizontal boxes.
*whis* : [ default 1.5 ]
Defines the length of the whiskers as a function of the inner
quartile range. They extend to the most extreme data point
within ( ``whis*(75%-25%)`` ) data range.
*bootstrap* : [ *None* (default) | integer ]
Specifies whether to bootstrap the confidence intervals
around the median for notched boxplots. If bootstrap==None,
no bootstrapping is performed, and notches are calculated
using a Gaussian-based asymptotic approximation (see <NAME>.,
<NAME>., and <NAME>., 1978, and <NAME>,
1967). Otherwise, bootstrap specifies the number of times to
bootstrap the median to deterget_mine it's 95% confidence intervals.
Values between 1000 and 10000 are recommended.
*usermedians* : [ default None ]
An numset or sequence whose first dimension (or length) is
compatible with *x*. This overrides the medians computed by
matplotlib for each element of *usermedians* that is not None.
When an element of *usermedians* == None, the median will be
computed directly as normlizattional.
*conf_intervals* : [ default None ]
Array or sequence whose first dimension (or length) is compatible
with *x* and whose second dimension is 2. When the current element
of *conf_intervals* is not None, the notch locations computed by
matplotlib are overridden (astotal_counting notch is True). When an element of
*conf_intervals* is None, boxplot compute notches the method
specified by the other kwargs (e.g. *bootstrap*).
*positions* : [ default 1,2,...,n ]
Sets the horizontal positions of the boxes. The ticks and limits
are automatictotaly set to match the positions.
*widths* : [ default 0.5 ]
Either a scalar or a vector and sets the width of each box. The
default is 0.5, or ``0.15*(distance between extreme positions)``
if that is smtotaler.
*patch_artist* : [ False (default) | True ]
If False produces boxes with the Line2D artist
If True produces boxes with the Patch artist
Returns a dictionary mapping each component of the boxplot
to a list of the :class:`matplotlib.lines.Line2D`
instances created. That dictionary has the following keys
(astotal_counting vertical boxplots):
- boxes: the main body of the boxplot showing the quartiles
and the median's confidence intervals if enabled.
- medians: horizonal lines at the median of each box.
- whiskers: the vertical lines extending to the most extreme,
n-outlier data points.
- caps: the horizontal lines at the ends of the whiskers.
- fliers: points representing data that extend beyone the
whiskers (outliers).
**Example:**
.. plot:: pyplots/boxplot_demo.py
"""
def bootstrapMedian(data, N=5000):
# deterget_mine 95% confidence intervals of the median
M = len(data)
percentile = [2.5,97.5]
estimate = bn.zeros(N)
for n in range(N):
bsIndex = bn.random.random_integers(0,M-1,M)
bsData = data[bsIndex]
estimate[n] = mlab.prctile(bsData, 50)
CI = mlab.prctile(estimate, percentile)
return CI
def computeConfInterval(data, med, iq, bootstrap):
if bootstrap is not None:
# Do a bootstrap estimate of notch locations.
# get conf. intervals around median
CI = bootstrapMedian(data, N=bootstrap)
notch_get_min = CI[0]
notch_get_max = CI[1]
else:
# Estimate notch locations using Gaussian-based
# asymptotic approximation.
#
# For discussion: <NAME>., <NAME>.,
# and <NAME>. (1978) "Variations of
# Boxplots", The American Statistician, 32:12-16.
N = len(data)
notch_get_min = med - 1.57*iq/bn.sqrt(N)
notch_get_max = med + 1.57*iq/bn.sqrt(N)
return notch_get_min, notch_get_max
if not self._hold: self.cla()
holdStatus = self._hold
whiskers, caps, boxes, medians, fliers = [], [], [], [], []
# convert x to a list of vectors
if hasattr(x, 'shape'):
if len(x.shape) == 1:
if hasattr(x[0], 'shape'):
x = list(x)
else:
x = [x,]
elif len(x.shape) == 2:
nr, nc = x.shape
if nr == 1:
x = [x]
elif nc == 1:
x = [x.asview()]
else:
x = [x[:,i] for i in xrange(nc)]
else:
raise ValueError("ibnut x can have no more than 2 dimensions")
if not hasattr(x[0], '__len__'):
x = [x]
col = len(x)
# sanitize user-ibnut medians
msg1 = "usermedians must either be a list/tuple or a 1d numset"
msg2 = "usermedians' length must be compatible with x"
if usermedians is not None:
if hasattr(usermedians, 'shape'):
if len(usermedians.shape) != 1:
raise ValueError(msg1)
elif usermedians.shape[0] != col:
raise ValueError(msg2)
elif len(usermedians) != col:
raise ValueError(msg2)
#sanitize user-ibnut confidence intervals
msg1 = "conf_intervals must either be a list of tuples or a 2d numset"
msg2 = "conf_intervals' length must be compatible with x"
msg3 = "each conf_interval, if specificied, must have two values"
if conf_intervals is not None:
if hasattr(conf_intervals, 'shape'):
if len(conf_intervals.shape) != 2:
raise ValueError(msg1)
elif conf_intervals.shape[0] != col:
raise ValueError(msg2)
elif conf_intervals.shape[1] == 2:
raise ValueError(msg3)
else:
if len(conf_intervals) != col:
raise ValueError(msg2)
for ci in conf_intervals:
if ci is not None and len(ci) != 2:
raise ValueError(msg3)
# get some plot info
if positions is None:
positions = range(1, col + 1)
if widths is None:
distance = get_max(positions) - get_min(positions)
widths = get_min(0.15*get_max(distance,1.0), 0.5)
if isinstance(widths, float) or isinstance(widths, int):
widths = bn.create_ones((col,), float) * widths
# loop through columns, add_concating each to plot
self.hold(True)
for i, pos in enumerate(positions):
d = bn.asview(x[i])
row = len(d)
if row==0:
# no data, skip this position
continue
# get median and quartiles
q1, med, q3 = mlab.prctile(d,[25,50,75])
# replace with ibnut medians if available
if usermedians is not None:
if usermedians[i] is not None:
med = usermedians[i]
# get high extreme
iq = q3 - q1
hi_val = q3 + whis*iq
wisk_hi = bn.compress( d <= hi_val , d )
if len(wisk_hi) == 0:
wisk_hi = q3
else:
wisk_hi = get_max(wisk_hi)
# get low extreme
lo_val = q1 - whis*iq
wisk_lo = bn.compress( d >= lo_val, d )
if len(wisk_lo) == 0:
wisk_lo = q1
else:
wisk_lo = get_min(wisk_lo)
# get fliers - if we are showing them
flier_hi = []
flier_lo = []
flier_hi_x = []
flier_lo_x = []
if len(sym) != 0:
flier_hi = bn.compress( d > wisk_hi, d )
flier_lo = bn.compress( d < wisk_lo, d )
flier_hi_x = bn.create_ones(flier_hi.shape[0]) * pos
flier_lo_x = bn.create_ones(flier_lo.shape[0]) * pos
# get x locations for fliers, whisker, whisker cap and box sides
box_x_get_min = pos - widths[i] * 0.5
box_x_get_max = pos + widths[i] * 0.5
wisk_x = bn.create_ones(2) * pos
cap_x_get_min = pos - widths[i] * 0.25
cap_x_get_max = pos + widths[i] * 0.25
cap_x = [cap_x_get_min, cap_x_get_max]
# get y location for median
med_y = [med, med]
# calculate 'notch' plot
if notch:
# conf. intervals from user, if available
if conf_intervals is not None and conf_intervals[i] is not None:
notch_get_max = bn.get_max(conf_intervals[i])
notch_get_min = bn.get_min(conf_intervals[i])
else:
notch_get_min, notch_get_max = computeConfInterval(d, med, iq,
bootstrap)
# make our notched box vectors
box_x = [box_x_get_min, box_x_get_max, box_x_get_max, cap_x_get_max, box_x_get_max,
box_x_get_max, box_x_get_min, box_x_get_min, cap_x_get_min, box_x_get_min,
box_x_get_min ]
box_y = [q1, q1, notch_get_min, med, notch_get_max, q3, q3, notch_get_max,
med, notch_get_min, q1]
# make our median line vectors
med_x = [cap_x_get_min, cap_x_get_max]
med_y = [med, med]
# calculate 'regular' plot
else:
# make our box vectors
box_x = [box_x_get_min, box_x_get_max, box_x_get_max, box_x_get_min, box_x_get_min ]
box_y = [q1, q1, q3, q3, q1 ]
# make our median line vectors
med_x = [box_x_get_min, box_x_get_max]
def to_vc(xs,ys):
# convert arguments to verts and codes
verts = []
#codes = []
for xi,yi in zip(xs,ys):
verts.apd( (xi,yi) )
verts.apd( (0,0) ) # ignored
codes = [mpath.Path.MOVETO] + \
[mpath.Path.LINETO]*(len(verts)-2) + \
[mpath.Path.CLOSEPOLY]
return verts,codes
def patch_list(xs,ys):
verts,codes = to_vc(xs,ys)
path = mpath.Path( verts, codes )
patch = mpatches.PathPatch(path)
self.add_concat_artist(patch)
return [patch]
# vertical or horizontal plot?
if vert:
def doplot(*args):
return self.plot(*args)
def dopatch(xs,ys):
return patch_list(xs,ys)
else:
def doplot(*args):
shuffled = []
for i in xrange(0, len(args), 3):
shuffled.extend([args[i+1], args[i], args[i+2]])
return self.plot(*shuffled)
def dopatch(xs,ys):
xs,ys = ys,xs # flip X, Y
return patch_list(xs,ys)
if patch_artist:
median_color = 'k'
else:
median_color = 'r'
whiskers.extend(doplot(wisk_x, [q1, wisk_lo], 'b--',
wisk_x, [q3, wisk_hi], 'b--'))
caps.extend(doplot(cap_x, [wisk_hi, wisk_hi], 'k-',
cap_x, [wisk_lo, wisk_lo], 'k-'))
if patch_artist:
boxes.extend(dopatch(box_x, box_y))
else:
boxes.extend(doplot(box_x, box_y, 'b-'))
medians.extend(doplot(med_x, med_y, median_color+'-'))
fliers.extend(doplot(flier_hi_x, flier_hi, sym,
flier_lo_x, flier_lo, sym))
# fix our axes/ticks up a little
if vert:
setticks, setlim = self.set_xticks, self.set_xlim
else:
setticks, setlim = self.set_yticks, self.set_ylim
newlimits = get_min(positions)-0.5, get_max(positions)+0.5
setlim(newlimits)
setticks(positions)
# reset hold status
self.hold(holdStatus)
return dict(whiskers=whiskers, caps=caps, boxes=boxes,
medians=medians, fliers=fliers)
@docstring.dedent_interpd
def scatter(self, x, y, s=20, c='b', marker='o', cmap=None, normlizattion=None,
vget_min=None, vget_max=None, alpha=None, linewidths=None,
faceted=True, verts=None,
**kwargs):
"""
Make a scatter plot.
Ctotal signatures::
scatter(x, y, s=20, c='b', marker='o', cmap=None, normlizattion=None,
vget_min=None, vget_max=None, alpha=None, linewidths=None,
verts=None, **kwargs)
Make a scatter plot of *x* versus *y*, filter_condition *x*, *y* are
converted to 1-D sequences which must be of the same length, *N*.
Keyword arguments:
*s*:
size in points^2. It is a scalar or an numset of the same
length as *x* and *y*.
*c*:
a color. *c* can be a single color format string, or a
sequence of color specifications of length *N*, or a
sequence of *N* numbers to be mapped to colors using the
*cmap* and *normlizattion* specified via kwargs (see below). Note
that *c* should not be a single numeric RGB or RGBA
sequence because that is indistinguishable from an numset
of values to be colormapped. *c* can be a 2-D numset in
which the rows are RGB or RGBA, however.
*marker*:
can be one of:
%(MarkerTable)s
Any or total of *x*, *y*, *s*, and *c* may be masked numsets, in
which case total masks will be combined and only unmasked points
will be plotted.
Other keyword arguments: the color mapping and normlizattionalization
arguments will be used only if *c* is an numset of floats.
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance or registered
name. If *None*, defaults to rc ``imaginarye.cmap``. *cmap* is
only used if *c* is an numset of floats.
*normlizattion*: [ *None* | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luget_minance data to 0, 1. If *None*, use the default
:func:`normlizattionalize`. *normlizattion* is only used if *c* is an numset
of floats.
*vget_min*/*vget_max*:
*vget_min* and *vget_max* are used in conjunction with normlizattion to
normlizattionalize luget_minance data. If either are *None*, the get_min and
get_max of the color numset *C* is used. Note if you pass a
*normlizattion* instance, your settings for *vget_min* and *vget_max* will
be ignored.
*alpha*: ``0 <= scalar <= 1`` or *None*
The alpha value for the patches
*linewidths*: [ *None* | scalar | sequence ]
If *None*, defaults to (lines.linewidth,). Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Optional kwargs control the
:class:`~matplotlib.collections.Collection` properties; in
particular:
*edgecolors*:
The string 'none' to plot faces with no outlines
*facecolors*:
The string 'none' to plot unmasked_fill outlines
Here are the standard descriptions of total the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
A :class:`~matplotlib.collections.Collection` instance is
returned.
"""
if not self._hold: self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x = self.convert_xunits(x)
y = self.convert_yunits(y)
# bn.ma.asview yields an ndnumset, not a masked numset,
# unless its argument is a masked numset.
x = bn.ma.asview(x)
y = bn.ma.asview(y)
if x.size != y.size:
raise ValueError("x and y must be the same size")
s = bn.ma.asview(s) # This doesn't have to match x, y in size.
c_is_stringy = is_string_like(c) or is_sequence_of_strings(c)
if not c_is_stringy:
c = bn.asany_conditionnumset(c)
if c.size == x.size:
c = bn.ma.asview(c)
x, y, s, c = cbook.remove_operation_masked_points(x, y, s, c)
scales = s # Renamed for readability below.
if c_is_stringy:
colors = mcolors.colorConverter.to_rgba_numset(c, alpha)
else:
# The inherent ambiguity is resolved in favor of color
# mapping, not interpretation as rgb or rgba:
if c.size == x.size:
colors = None # use cmap, normlizattion after collection is created
else:
colors = mcolors.colorConverter.to_rgba_numset(c, alpha)
if faceted:
edgecolors = None
else:
edgecolors = 'none'
warnings.warn(
'''replace "faceted=False" with "edgecolors='none'"''',
mplDeprecation) # 2008/04/18
sym = None
symstyle = 0
# to be API compatible
if marker is None and not (verts is None):
marker = (verts, 0)
verts = None
marker_obj = mmarkers.MarkerStyle(marker)
path = marker_obj.get_path().transformed(
marker_obj.get_transform())
if not marker_obj.is_masked_fill():
edgecolors = 'face'
collection = mcoll.PathCollection(
(path,), scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = kwargs.pop('transform', self.transData),
)
collection.set_transform(mtransforms.IdentityTransform())
collection.set_alpha(alpha)
collection.update(kwargs)
if colors is None:
if normlizattion is not None: assert(isinstance(normlizattion, mcolors.Normalize))
collection.set_numset(bn.asnumset(c))
collection.set_cmap(cmap)
collection.set_normlizattion(normlizattion)
if vget_min is not None or vget_max is not None:
collection.set_clim(vget_min, vget_max)
else:
collection.autoscale_None()
# The margin adjustment is a hack to deal with the fact that we don't
# want to transform total the symbols whose scales are in points
# to data coords to get the exact bounding box for efficiency
# reasons. It can be done right if this is deemed important.
# Also, only bother with this padd_concating if there is any_conditionthing to draw.
if self._xmargin < 0.05 and x.size > 0 :
self.set_xmargin(0.05)
if self._ymargin < 0.05 and x.size > 0 :
self.set_ymargin(0.05)
self.add_concat_collection(collection)
self.autoscale_view()
return collection
@docstring.dedent_interpd
def hexbin(self, x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear', extent = None,
cmap=None, normlizattion=None, vget_min=None, vget_max=None,
alpha=None, linewidths=None, edgecolors='none',
reduce_C_function = bn.average, get_mincnt=None, marginals=False,
**kwargs):
"""
Make a hexagonal binning plot.
Ctotal signature::
hexbin(x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, normlizattion=None, vget_min=None, vget_max=None,
alpha=None, linewidths=None, edgecolors='none'
reduce_C_function = bn.average, get_mincnt=None, marginals=True
**kwargs)
Make a hexagonal binning plot of *x* versus *y*, filter_condition *x*,
*y* are 1-D sequences of the same length, *N*. If *C* is *None*
(the default), this is a hist_operation of the number of occurences
of the observations at (x[i],y[i]).
If *C* is specified, it specifies values at the coordinate
(x[i],y[i]). These values are accumulated for each hexagonal
bin and then reduced according to *reduce_C_function*, which
defaults to beatnum's average function (bn.average). (If *C* is
specified, it must also be a 1-D sequence of the same length
as *x* and *y*.)
*x*, *y* and/or *C* may be masked numsets, in which case only
unmasked points will be plotted.
Optional keyword arguments:
*gridsize*: [ 100 | integer ]
The number of hexagons in the *x*-direction, default is
100. The corresponding number of hexagons in the
*y*-direction is chosen such that the hexagons are
approximately regular. Alternatively, gridsize can be a
tuple with two elements specifying the number of hexagons
in the *x*-direction and the *y*-direction.
*bins*: [ *None* | 'log' | integer | sequence ]
If *None*, no binning is applied; the color of each hexagon
directly corresponds to its count value.
If 'log', use a logarithmic scale for the color
map. Interntotaly, :math:`log_{10}(i+1)` is used to
deterget_mine the hexagon color.
If an integer, divide the counts in the specified number
of bins, and color the hexagons accordingly.
If a sequence of values, the values of the lower bound of
the bins to be used.
*xscale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the horizontal axis.
*scale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the vertical axis.
*get_mincnt*: [ *None* | a positive integer ]
If not *None*, only display cells with more than *get_mincnt*
number of points in the cell
*marginals*: [ *True* | *False* ]
if marginals is *True*, plot the marginal density as
colormapped rectagles along the bottom of the x-axis and
left of the y-axis
*extent*: [ *None* | scalars (left, right, bottom, top) ]
The limits of the bins. The default assigns the limits
based on gridsize, x, y, xscale and yscale.
Other keyword arguments controlling color mapping and normlizattionalization
arguments:
*cmap*: [ *None* | Colormap ]
a :class:`matplotlib.colors.Colormap` instance. If *None*,
defaults to rc ``imaginarye.cmap``.
*normlizattion*: [ *None* | Normalize ]
:class:`matplotlib.colors.Normalize` instance is used to
scale luget_minance data to 0,1.
*vget_min* / *vget_max*: scalar
*vget_min* and *vget_max* are used in conjunction with *normlizattion* to normlizattionalize
luget_minance data. If either are *None*, the get_min and get_max of the color
numset *C* is used. Note if you pass a normlizattion instance, your settings
for *vget_min* and *vget_max* will be ignored.
*alpha*: scalar between 0 and 1, or *None*
the alpha value for the patches
*linewidths*: [ *None* | scalar ]
If *None*, defaults to rc lines.linewidth. Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Other keyword arguments controlling the Collection properties:
*edgecolors*: [ *None* | ``'none'`` | mpl color | color sequence ]
If ``'none'``, draws the edges in the same color as the fill color.
This is the default, as it avoids unsightly ubnainted pixels
between the hexagons.
If *None*, draws the outlines in the default color.
If a matplotlib color arg or sequence of rgba tuples, draws the
outlines in the specified color.
Here are the standard descriptions of total the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
The return value is a
:class:`~matplotlib.collections.PolyCollection` instance; use
:meth:`~matplotlib.collections.PolyCollection.get_numset` on
this :class:`~matplotlib.collections.PolyCollection` to get
the counts in each hexagon. If *marginals* is *True*, horizontal
bar and vertical bar (both PolyCollections) will be attached
to the return collection as attributes *hbar* and *vbar*.
**Example:**
.. plot:: mpl_examples/pylab_examples/hexbin_demo.py
"""
if not self._hold: self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, C = cbook.remove_operation_masked_points(x, y, C)
# Set the size of the hexagon grid
if iterable(gridsize):
nx, ny = gridsize
else:
nx = gridsize
ny = int(nx/math.sqrt(3))
# Count the number of data in each hexagon
x = bn.numset(x, float)
y = bn.numset(y, float)
if xscale=='log':
if bn.any_condition(x <= 0.0):
raise ValueError("x contains non-positive values, so can not"
" be log-scaled")
x = bn.log10(x)
if yscale=='log':
if bn.any_condition(y <= 0.0):
raise ValueError("y contains non-positive values, so can not"
" be log-scaled")
y = bn.log10(y)
if extent is not None:
xget_min, xget_max, yget_min, yget_max = extent
else:
xget_min = bn.aget_min(x)
xget_max = bn.aget_max(x)
yget_min = bn.aget_min(y)
yget_max = bn.aget_max(y)
# In the x-direction, the hexagons exactly cover the region from
# xget_min to xget_max. Need some padd_concating to avoid roundoff errors.
padd_concating = 1.e-9 * (xget_max - xget_min)
xget_min -= padd_concating
xget_max += padd_concating
sx = (xget_max-xget_min) / nx
sy = (yget_max-yget_min) / ny
if marginals:
xorig = x.copy()
yorig = y.copy()
x = (x-xget_min)/sx
y = (y-yget_min)/sy
ix1 = bn.round(x).convert_type(int)
iy1 = bn.round(y).convert_type(int)
ix2 = bn.floor(x).convert_type(int)
iy2 = bn.floor(y).convert_type(int)
nx1 = nx + 1
ny1 = ny + 1
nx2 = nx
ny2 = ny
n = nx1*ny1+nx2*ny2
d1 = (x-ix1)**2 + 3.0 * (y-iy1)**2
d2 = (x-ix2-0.5)**2 + 3.0 * (y-iy2-0.5)**2
bdist = (d1<d2)
if C is None:
accum = bn.zeros(n)
# Create appropriate views into "accum" numset.
lattice1 = accum[:nx1*ny1]
lattice2 = accum[nx1*ny1:]
lattice1.shape = (nx1,ny1)
lattice2.shape = (nx2,ny2)
for i in xrange(len(x)):
if bdist[i]:
if ((ix1[i] >= 0) and (ix1[i] < nx1) and
(iy1[i] >= 0) and (iy1[i] < ny1)):
lattice1[ix1[i], iy1[i]]+=1
else:
if ((ix2[i] >= 0) and (ix2[i] < nx2) and
(iy2[i] >= 0) and (iy2[i] < ny2)):
lattice2[ix2[i], iy2[i]]+=1
# threshold
if get_mincnt is not None:
for i in xrange(nx1):
for j in xrange(ny1):
if lattice1[i,j]<get_mincnt:
lattice1[i,j] = bn.nan
for i in xrange(nx2):
for j in xrange(ny2):
if lattice2[i,j]<get_mincnt:
lattice2[i,j] = bn.nan
accum = bn.hpile_operation((
lattice1.convert_type(float).asview(), lattice2.convert_type(float).asview()))
good_idxs = ~bn.ifnan(accum)
else:
if get_mincnt is None:
get_mincnt = 0
# create accumulation numsets
lattice1 = bn.empty((nx1,ny1),dtype=object)
for i in xrange(nx1):
for j in xrange(ny1):
lattice1[i,j] = []
lattice2 = bn.empty((nx2,ny2),dtype=object)
for i in xrange(nx2):
for j in xrange(ny2):
lattice2[i,j] = []
for i in xrange(len(x)):
if bdist[i]:
if ((ix1[i] >= 0) and (ix1[i] < nx1) and
(iy1[i] >= 0) and (iy1[i] < ny1)):
lattice1[ix1[i], iy1[i]].apd( C[i] )
else:
if ((ix2[i] >= 0) and (ix2[i] < nx2) and
(iy2[i] >= 0) and (iy2[i] < ny2)):
lattice2[ix2[i], iy2[i]].apd( C[i] )
for i in xrange(nx1):
for j in xrange(ny1):
vals = lattice1[i,j]
if len(vals)>get_mincnt:
lattice1[i,j] = reduce_C_function( vals )
else:
lattice1[i,j] = bn.nan
for i in xrange(nx2):
for j in xrange(ny2):
vals = lattice2[i,j]
if len(vals)>get_mincnt:
lattice2[i,j] = reduce_C_function( vals )
else:
lattice2[i,j] = bn.nan
accum = bn.hpile_operation((
lattice1.convert_type(float).asview(), lattice2.convert_type(float).asview()))
good_idxs = ~bn.ifnan(accum)
offsets = bn.zeros((n, 2), float)
offsets[:nx1*ny1,0] = bn.duplicate(bn.arr_range(nx1), ny1)
offsets[:nx1*ny1,1] = bn.tile(bn.arr_range(ny1), nx1)
offsets[nx1*ny1:,0] = bn.duplicate(bn.arr_range(nx2) + 0.5, ny2)
offsets[nx1*ny1:,1] = bn.tile(bn.arr_range(ny2), nx2) + 0.5
offsets[:,0] *= sx
offsets[:,1] *= sy
offsets[:,0] += xget_min
offsets[:,1] += yget_min
# remove accumulation bins with no data
offsets = offsets[good_idxs,:]
accum = accum[good_idxs]
polygon = bn.zeros((6, 2), float)
polygon[:,0] = sx * bn.numset([ 0.5, 0.5, 0.0, -0.5, -0.5, 0.0])
polygon[:,1] = sy * bn.numset([-0.5, 0.5, 1.0, 0.5, -0.5, -1.0]) / 3.0
if edgecolors=='none':
edgecolors = 'face'
if xscale == 'log' or yscale == 'log':
polygons = bn.expand_dims(polygon, 0) + bn.expand_dims(offsets, 1)
if xscale == 'log':
polygons[:, :, 0] = 10.0 ** polygons[:, :, 0]
xget_min = 10.0 ** xget_min
xget_max = 10.0 ** xget_max
self.set_xscale(xscale)
if yscale == 'log':
polygons[:, :, 1] = 10.0 ** polygons[:, :, 1]
yget_min = 10.0 ** yget_min
yget_max = 10.0 ** yget_max
self.set_yscale(yscale)
collection = mcoll.PolyCollection(
polygons,
edgecolors=edgecolors,
linewidths=linewidths,
)
else:
collection = mcoll.PolyCollection(
[polygon],
edgecolors=edgecolors,
linewidths=linewidths,
offsets=offsets,
transOffset=mtransforms.IdentityTransform(),
offset_position="data"
)
if isinstance(normlizattion, mcolors.LogNorm):
if (accum==0).any_condition():
# make sure we have not zeros
accum += 1
# autoscale the normlizattion with curren accum values if it hasn't
# been set
if normlizattion is not None:
if normlizattion.vget_min is None and normlizattion.vget_max is None:
normlizattion.autoscale(accum)
# Transform accum if needed
if bins=='log':
accum = bn.log10(accum+1)
elif bins!=None:
if not iterable(bins):
get_minimum, get_maximum = get_min(accum), get_max(accum)
bins-=1 # one less edge than bins
bins = get_minimum + (get_maximum-get_minimum)*bn.arr_range(bins)/bins
bins = bn.sort(bins)
accum = bins.find_sorted(accum)
if normlizattion is not None: assert(isinstance(normlizattion, mcolors.Normalize))
collection.set_numset(accum)
collection.set_cmap(cmap)
collection.set_normlizattion(normlizattion)
collection.set_alpha(alpha)
collection.update(kwargs)
if vget_min is not None or vget_max is not None:
collection.set_clim(vget_min, vget_max)
else:
collection.autoscale_None()
corners = ((xget_min, yget_min), (xget_max, yget_max))
self.update_datalim( corners)
self.autoscale_view(tight=True)
# add_concat the collection last
self.add_concat_collection(collection)
if not marginals:
return collection
if C is None:
C = bn.create_ones(len(x))
def coarse_bin(x, y, coarse):
ind = coarse.find_sorted(x).clip(0, len(coarse)-1)
mus = bn.zeros(len(coarse))
for i in range(len(coarse)):
mu = reduce_C_function(y[ind==i])
mus[i] = mu
return mus
coarse = bn.linspace(xget_min, xget_max, gridsize)
xcoarse = coarse_bin(xorig, C, coarse)
valid = ~bn.ifnan(xcoarse)
verts, values = [], []
for i,val in enumerate(xcoarse):
thisget_min = coarse[i]
if i<len(coarse)-1:
thisget_max = coarse[i+1]
else:
thisget_max = thisget_min + bn.difference(coarse)[-1]
if not valid[i]: continue
verts.apd([(thisget_min, 0), (thisget_min, 0.05), (thisget_max, 0.05), (thisget_max, 0)])
values.apd(val)
values = bn.numset(values)
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
hbar = mcoll.PolyCollection(verts, transform=trans, edgecolors='face')
hbar.set_numset(values)
hbar.set_cmap(cmap)
hbar.set_normlizattion(normlizattion)
hbar.set_alpha(alpha)
hbar.update(kwargs)
self.add_concat_collection(hbar)
coarse = bn.linspace(yget_min, yget_max, gridsize)
ycoarse = coarse_bin(yorig, C, coarse)
valid = ~bn.ifnan(ycoarse)
verts, values = [], []
for i,val in enumerate(ycoarse):
thisget_min = coarse[i]
if i<len(coarse)-1:
thisget_max = coarse[i+1]
else:
thisget_max = thisget_min + bn.difference(coarse)[-1]
if not valid[i]: continue
verts.apd([(0, thisget_min), (0.0, thisget_max), (0.05, thisget_max), (0.05, thisget_min)])
values.apd(val)
values = bn.numset(values)
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
vbar = mcoll.PolyCollection(verts, transform=trans, edgecolors='face')
vbar.set_numset(values)
vbar.set_cmap(cmap)
vbar.set_normlizattion(normlizattion)
vbar.set_alpha(alpha)
vbar.update(kwargs)
self.add_concat_collection(vbar)
collection.hbar = hbar
collection.vbar = vbar
def on_changed(collection):
hbar.set_cmap(collection.get_cmap())
hbar.set_clim(collection.get_clim())
vbar.set_cmap(collection.get_cmap())
vbar.set_clim(collection.get_clim())
collection.ctotalbacksSM.connect('changed', on_changed)
return collection
@docstring.dedent_interpd
def arrow(self, x, y, dx, dy, **kwargs):
"""
Add an arrow to the axes.
Ctotal signature::
arrow(x, y, dx, dy, **kwargs)
Draws arrow on specified axis from (*x*, *y*) to (*x* + *dx*,
*y* + *dy*). Uses FancyArrow patch to construct the arrow.
Optional kwargs control the arrow construction and properties:
%(FancyArrow)s
**Example:**
.. plot:: mpl_examples/pylab_examples/arrow_demo.py
"""
# Strip away units for the underlying patch since units
# do not make sense to most patch-like code
x = self.convert_xunits(x)
y = self.convert_yunits(y)
dx = self.convert_xunits(dx)
dy = self.convert_yunits(dy)
a = mpatches.FancyArrow(x, y, dx, dy, **kwargs)
self.add_concat_artist(a)
return a
def quiverkey(self, *args, **kw):
qk = mquiver.QuiverKey(*args, **kw)
self.add_concat_artist(qk)
return qk
quiverkey.__doc__ = mquiver.QuiverKey.quiverkey_doc
def quiver(self, *args, **kw):
if not self._hold: self.cla()
q = mquiver.Quiver(self, *args, **kw)
self.add_concat_collection(q, False)
self.update_datalim(q.XY)
self.autoscale_view()
return q
quiver.__doc__ = mquiver.Quiver.quiver_doc
def pile_operationplot(self, x, *args, **kwargs):
return mpile_operation.pile_operationplot(self, x, *args, **kwargs)
pile_operationplot.__doc__ = mpile_operation.pile_operationplot.__doc__
def streamplot(self, x, y, u, v, density=1, linewidth=None, color=None,
cmap=None, normlizattion=None, arrowsize=1, arrowstyle='-|>',
get_minlength=0.1, transform=None):
if not self._hold: self.cla()
stream_container = mstream.streamplot(self, x, y, u, v,
density=density,
linewidth=linewidth,
color=color,
cmap=cmap,
normlizattion=normlizattion,
arrowsize=arrowsize,
arrowstyle=arrowstyle,
get_minlength=get_minlength,
transform=transform)
return stream_container
streamplot.__doc__ = mstream.streamplot.__doc__
@docstring.dedent_interpd
def barbs(self, *args, **kw):
"""
%(barbs_doc)s
**Example:**
.. plot:: mpl_examples/pylab_examples/barb_demo.py
"""
if not self._hold: self.cla()
b = mquiver.Barbs(self, *args, **kw)
self.add_concat_collection(b)
self.update_datalim(b.get_offsets())
self.autoscale_view()
return b
@docstring.dedent_interpd
def fill(self, *args, **kwargs):
"""
Plot masked_fill polygons.
Ctotal signature::
fill(*args, **kwargs)
*args* is a variable length argument, totalowing for multiple
*x*, *y* pairs with an optional color format string; see
:func:`~matplotlib.pyplot.plot` for details on the argument
parsing. For example, to plot a polygon with vertices at *x*,
*y* in blue.::
ax.fill(x,y, 'b' )
An arbitrary number of *x*, *y*, *color* groups can be specified::
ax.fill(x1, y1, 'g', x2, y2, 'r')
Return value is a list of :class:`~matplotlib.patches.Patch`
instances that were add_concated.
The same color strings that :func:`~matplotlib.pyplot.plot`
supports are supported by the fill format string.
If you would like to fill below a curve, eg. shade a region
between 0 and *y* along *x*, use :meth:`fill_between`
The *closed* kwarg will close the polygon when *True* (default).
kwargs control the :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/fill_demo.py
"""
if not self._hold: self.cla()
patches = []
for poly in self._get_patches_for_fill(*args, **kwargs):
self.add_concat_patch( poly )
patches.apd( poly )
self.autoscale_view()
return patches
@docstring.dedent_interpd
def fill_between(self, x, y1, y2=0, filter_condition=None, interpolate=False,
**kwargs):
"""
Make masked_fill polygons between two curves.
Ctotal signature::
fill_between(x, y1, y2=0, filter_condition=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *y1* and *y2* filter_condition
``filter_condition==True``
*x* :
An N-length numset of the x data
*y1* :
An N-length numset (or scalar) of the y data
*y2* :
An N-length numset (or scalar) of the y data
*filter_condition* :
If *None*, default to fill between everyfilter_condition. If not *None*,
it is an N-length beatnum boolean numset and the fill will
only happen over the regions filter_condition ``filter_condition==True``.
*interpolate* :
If *True*, interpolate between the two lines to find the
precise point of intersection. Otherwise, the start and
end points of the masked_fill region will only occur on explicit
values in the *x* numset.
*kwargs* :
Keyword args passed on to the
:class:`~matplotlib.collections.PolyCollection`.
kwargs control the :class:`~matplotlib.patches.Polygon` properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_between_demo.py
.. seealso::
:meth:`fill_betweenx`
for filling between two sets of x-values
"""
# Handle united data, such as dates
self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs)
self._process_unit_info(ydata=y2)
# Convert the numsets so we can work with them
x = ma.masked_inversealid(self.convert_xunits(x))
y1 = ma.masked_inversealid(self.convert_yunits(y1))
y2 = ma.masked_inversealid(self.convert_yunits(y2))
if y1.ndim == 0:
y1 = bn.create_ones_like(x)*y1
if y2.ndim == 0:
y2 = bn.create_ones_like(x)*y2
if filter_condition is None:
filter_condition = bn.create_ones(len(x), bn.bool)
else:
filter_condition = bn.asnumset(filter_condition, bn.bool)
if not (x.shape == y1.shape == y2.shape == filter_condition.shape):
raise ValueError("Argument dimensions are incompatible")
mask = reduce(ma.mask_or, [ma.getmask(a) for a in (x, y1, y2)])
if mask is not ma.nomask:
filter_condition &= ~mask
polys = []
for ind0, ind1 in mlab.contiguous_regions(filter_condition):
xpiece = x[ind0:ind1]
y1piece = y1[ind0:ind1]
y2piece = y2[ind0:ind1]
if not len(xpiece):
continue
N = len(xpiece)
X = bn.zeros((2*N+2, 2), bn.float)
if interpolate:
def get_interp_point(ind):
im1 = get_max(ind-1, 0)
x_values = x[im1:ind+1]
difference_values = y1[im1:ind+1] - y2[im1:ind+1]
y1_values = y1[im1:ind+1]
if len(difference_values) == 2:
if bn.ma.is_masked(difference_values[1]):
return x[im1], y1[im1]
elif bn.ma.is_masked(difference_values[0]):
return x[ind], y1[ind]
difference_order = difference_values.argsort()
difference_root_x = bn.interp(
0, difference_values[difference_order], x_values[difference_order])
difference_root_y = bn.interp(difference_root_x, x_values, y1_values)
return difference_root_x, difference_root_y
start = get_interp_point(ind0)
end = get_interp_point(ind1)
else:
# the purpose of the next two lines is for when y2 is a
# scalar like 0 and we want the fill to go total the way
# down to 0 even if none of the y1 sample points do
start = xpiece[0], y2piece[0]
end = xpiece[-1], y2piece[-1]
X[0] = start
X[N+1] = end
X[1:N+1,0] = xpiece
X[1:N+1,1] = y1piece
X[N+2:,0] = xpiece[::-1]
X[N+2:,1] = y2piece[::-1]
polys.apd(X)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
XY1 = bn.numset([x[filter_condition], y1[filter_condition]]).T
XY2 = bn.numset([x[filter_condition], y2[filter_condition]]).T
self.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits,
updatex=False, updatey=True)
self.add_concat_collection(collection)
self.autoscale_view()
return collection
@docstring.dedent_interpd
def fill_betweenx(self, y, x1, x2=0, filter_condition=None, **kwargs):
"""
Make masked_fill polygons between two horizontal curves.
Ctotal signature::
fill_betweenx(y, x1, x2=0, filter_condition=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *x1* and *x2* filter_condition
``filter_condition==True``
*y* :
An N-length numset of the y data
*x1* :
An N-length numset (or scalar) of the x data
*x2* :
An N-length numset (or scalar) of the x data
*filter_condition* :
If *None*, default to fill between everyfilter_condition. If not *None*,
it is a N length beatnum boolean numset and the fill will
only happen over the regions filter_condition ``filter_condition==True``
*kwargs* :
keyword args passed on to the
:class:`~matplotlib.collections.PolyCollection`
kwargs control the :class:`~matplotlib.patches.Polygon` properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_betweenx_demo.py
.. seealso::
:meth:`fill_between`
for filling between two sets of y-values
"""
# Handle united data, such as dates
self._process_unit_info(ydata=y, xdata=x1, kwargs=kwargs)
self._process_unit_info(xdata=x2)
# Convert the numsets so we can work with them
y = ma.masked_inversealid(self.convert_yunits(y))
x1 = ma.masked_inversealid(self.convert_xunits(x1))
x2 = ma.masked_inversealid(self.convert_xunits(x2))
if x1.ndim == 0:
x1 = bn.create_ones_like(y)*x1
if x2.ndim == 0:
x2 = bn.create_ones_like(y)*x2
if filter_condition is None:
filter_condition = bn.create_ones(len(y), bn.bool)
else:
filter_condition = bn.asnumset(filter_condition, bn.bool)
if not (y.shape == x1.shape == x2.shape == filter_condition.shape):
raise ValueError("Argument dimensions are incompatible")
mask = reduce(ma.mask_or, [ma.getmask(a) for a in (y, x1, x2)])
if mask is not ma.nomask:
filter_condition &= ~mask
polys = []
for ind0, ind1 in mlab.contiguous_regions(filter_condition):
ypiece = y[ind0:ind1]
x1piece = x1[ind0:ind1]
x2piece = x2[ind0:ind1]
if not len(ypiece):
continue
N = len(ypiece)
Y = bn.zeros((2*N+2, 2), bn.float)
# the purpose of the next two lines is for when x2 is a
# scalar like 0 and we want the fill to go total the way
# down to 0 even if none of the x1 sample points do
Y[0] = x2piece[0], ypiece[0]
Y[N+1] = x2piece[-1], ypiece[-1]
Y[1:N+1,0] = x1piece
Y[1:N+1,1] = ypiece
Y[N+2:,0] = x2piece[::-1]
Y[N+2:,1] = ypiece[::-1]
polys.apd(Y)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
X1Y = bn.numset([x1[filter_condition], y[filter_condition]]).T
X2Y = bn.numset([x2[filter_condition], y[filter_condition]]).T
self.dataLim.update_from_data_xy(X1Y, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.dataLim.update_from_data_xy(X2Y, self.ignore_existing_data_limits,
updatex=False, updatey=True)
self.add_concat_collection(collection)
self.autoscale_view()
return collection
#### plotting z(x,y): imshow, pcolor and relatives, contour
@docstring.dedent_interpd
def imshow(self, X, cmap=None, normlizattion=None, aspect=None,
interpolation=None, alpha=None, vget_min=None, vget_max=None,
origin=None, extent=None, shape=None, filternormlizattion=1,
filterrad=4.0, imlim=None, resample=None, url=None, **kwargs):
"""
Display an imaginarye on the axes.
Ctotal signature::
imshow(X, cmap=None, normlizattion=None, aspect=None, interpolation=None,
alpha=None, vget_min=None, vget_max=None, origin=None, extent=None,
**kwargs)
Display the imaginarye in *X* to current axes. *X* may be a float
numset, a uint8 numset or a PIL imaginarye. If *X* is an numset, *X*
can have the following shapes:
* MxN -- luget_minance (grayscale, float numset only)
* MxNx3 -- RGB (float or uint8 numset)
* MxNx4 -- RGBA (float or uint8 numset)
The value for each component of MxNx3 and MxNx4 float numsets should be
in the range 0.0 to 1.0; MxN float numsets may be normlizattionalised.
An :class:`matplotlib.imaginarye.AxesImage` instance is returned.
Keyword arguments:
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance, eg. cm.jet.
If *None*, default to rc ``imaginarye.cmap`` value.
*cmap* is ignored when *X* has RGB(A) information
*aspect*: [ *None* | 'auto' | 'equal' | scalar ]
If 'auto', changes the imaginarye aspect ratio to match that of the axes
If 'equal', and *extent* is *None*, changes the axes
aspect ratio to match that of the imaginarye. If *extent* is
not *None*, the axes aspect ratio is changed to match that
of the extent.
If *None*, default to rc ``imaginarye.aspect`` value.
*interpolation*:
Acceptable values are *None*, 'none', 'nearest', 'bilinear',
'bicubic', 'spline16', 'spline36', 'hanning', 'hamget_ming',
'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian',
'bessel', 'mitchell', 'sinc', 'lanczos'
If *interpolation* is *None*, default to rc
``imaginarye.interpolation``. See also the *filternormlizattion* and
*filterrad* parameters
If *interpolation* is ``'none'``, then no interpolation is
performed on the Agg, ps and pdf backends. Other backends
will ftotal back to 'nearest'.
*normlizattion*: [ *None* | Normalize ]
An :class:`matplotlib.colors.Normalize` instance; if
*None*, default is ``normlizattionalization()``. This scales
luget_minance -> 0-1
*normlizattion* is only used for an MxN float numset.
*vget_min*/*vget_max*: [ *None* | scalar ]
Used to scale a luget_minance imaginarye to 0-1. If either is
*None*, the get_min and get_max of the luget_minance values will be
used. Note if *normlizattion* is not *None*, the settings for
*vget_min* and *vget_max* will be ignored.
*alpha*: scalar
The alpha blending value, between 0 (transparent) and 1 (opaque)
or *None*
*origin*: [ *None* | 'upper' | 'lower' ]
Place the [0,0] index of the numset in the upper left or lower left
corner of the axes. If *None*, default to rc ``imaginarye.origin``.
*extent*: [ *None* | scalars (left, right, bottom, top) ]
Data limits for the axes. The default assigns zero-based row,
column indices to the *x*, *y* centers of the pixels.
*shape*: [ *None* | scalars (columns, rows) ]
For raw buffer imaginaryes
*filternormlizattion*:
A parameter for the antigrain imaginarye resize filter. From the
antigrain documentation, if *filternormlizattion* = 1, the filter normlizattionalizes
integer values and corrects the rounding errors. It doesn't do
any_conditionthing with the source floating point values, it corrects only
integers according to the rule of 1.0 which averages that any_condition total_count of
pixel weights must be equal to 1.0. So, the filter function must
produce a graph of the proper shape.
*filterrad*:
The filter radius for filters that have a radius
parameter, i.e. when interpolation is one of: 'sinc',
'lanczos' or 'blackman'
Additional kwargs are :class:`~matplotlib.artist.Artist` properties.
%(Artist)s
**Example:**
.. plot:: mpl_examples/pylab_examples/imaginarye_demo.py
"""
if not self._hold: self.cla()
if normlizattion is not None: assert(isinstance(normlizattion, mcolors.Normalize))
if aspect is None: aspect = rcParams['imaginarye.aspect']
self.set_aspect(aspect)
im = mimaginarye.AxesImage(self, cmap, normlizattion, interpolation, origin, extent,
filternormlizattion=filternormlizattion,
filterrad=filterrad, resample=resample, **kwargs)
im.set_data(X)
im.set_alpha(alpha)
self._set_artist_props(im)
if im.get_clip_path() is None:
# imaginarye does not already have clipping set, clip to axes patch
im.set_clip_path(self.patch)
#if normlizattion is None and shape is None:
# im.set_clim(vget_min, vget_max)
if vget_min is not None or vget_max is not None:
im.set_clim(vget_min, vget_max)
else:
im.autoscale_None()
im.set_url(url)
# update ax.dataLim, and, if autoscaling, set viewLim
# to tightly fit the imaginarye, regardless of dataLim.
im.set_extent(im.get_extent())
self.imaginaryes.apd(im)
im._remove_method = lambda h: self.imaginaryes.remove(h)
return im
def _pcolorargs(self, funcname, *args):
if len(args)==1:
C = args[0]
numRows, numCols = C.shape
X, Y = bn.meshgrid(bn.arr_range(numCols+1), bn.arr_range(numRows+1) )
elif len(args)==3:
X, Y, C = args
else:
raise TypeError(
'Illegal arguments to %s; see help(%s)' % (funcname, funcname))
Nx = X.shape[-1]
Ny = Y.shape[0]
if len(X.shape) != 2 or X.shape[0] == 1:
x = X.change_shape_to(1,Nx)
X = x.duplicate(Ny, axis=0)
if len(Y.shape) != 2 or Y.shape[1] == 1:
y = Y.change_shape_to(Ny, 1)
Y = y.duplicate(Nx, axis=1)
if X.shape != Y.shape:
raise TypeError(
'Incompatible X, Y ibnuts to %s; see help(%s)' % (
funcname, funcname))
return X, Y, C
@docstring.dedent_interpd
def pcolor(self, *args, **kwargs):
"""
Create a pseudocolor plot of a 2-D numset.
Note: pcolor can be very slow for large numsets; consider
using the similar but much faster
:func:`~matplotlib.pyplot.pcolormesh` instead.
Ctotal signatures::
pcolor(C, **kwargs)
pcolor(X, Y, C, **kwargs)
*C* is the numset of color values.
*X* and *Y*, if given, specify the (*x*, *y*) coordinates of
the colored quadrilaterals; the quadrilateral for C[i,j] has
corners at::
(X[i, j], Y[i, j]),
(X[i, j+1], Y[i, j+1]),
(X[i+1, j], Y[i+1, j]),
(X[i+1, j+1], Y[i+1, j+1]).
Idetotaly the dimensions of *X* and *Y* should be one greater
than those of *C*; if the dimensions are the same, then the
last row and column of *C* will be ignored.
Note that the the column index corresponds to the
*x*-coordinate, and the row index corresponds to *y*; for
details, see the :ref:`Grid Orientation
<axes-pcolor-grid-orientation>` section below.
If either or both of *X* and *Y* are 1-D numsets or column vectors,
they will be expanded as needed into the appropriate 2-D numsets,
making a rectangular grid.
*X*, *Y* and *C* may be masked numsets. If either C[i, j], or one
of the vertices surrounding C[i,j] (*X* or *Y* at [i, j], [i+1, j],
[i, j+1],[i+1, j+1]) is masked, nothing is plotted.
Keyword arguments:
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance. If *None*, use
rc settings.
*normlizattion*: [ *None* | Normalize ]
An :class:`matplotlib.colors.Normalize` instance is used
to scale luget_minance data to 0,1. If *None*, defaults to
:func:`normlizattionalize`.
*vget_min*/*vget_max*: [ *None* | scalar ]
*vget_min* and *vget_max* are used in conjunction with *normlizattion* to
normlizattionalize luget_minance data. If either is *None*, it
is autoscaled to the respective get_min or get_max
of the color numset *C*. If not *None*, *vget_min* or
*vget_max* passed in here override any_condition pre-existing values
supplied in the *normlizattion* instance.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
MATLAB.
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='none'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ *None* | ``'none'`` | color | color sequence]
If *None*, the rc setting is used by default.
If ``'none'``, edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: ``0 <= scalar <= 1`` or *None*
the alpha blending value
Return value is a :class:`matplotlib.collections.Collection`
instance.
.. _axes-pcolor-grid-orientation:
The grid orientation follows the MATLAB convention: an
numset *C* with shape (*nrows*, *ncolumns*) is plotted with
the column number as *X* and the row number as *Y*, increasing
up; hence it is plotted the way the numset would be printed,
except that the *Y* axis is reversed. That is, *C* is taken
as *C*(*y*, *x*).
Similarly for :func:`meshgrid`::
x = bn.arr_range(5)
y = bn.arr_range(3)
X, Y = meshgrid(x,y)
is equivalent to::
X = numset([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]])
Y = numset([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2]])
so if you have::
C = rand( len(x), len(y))
then you need::
pcolor(X, Y, C.T)
or::
pcolor(C.T)
MATLAB :func:`pcolor` always discards the last row and column
of *C*, but matplotlib displays the last row and column if *X* and
*Y* are not specified, or if *X* and *Y* have one more row and
column than *C*.
kwargs can be used to control the
:class:`~matplotlib.collections.PolyCollection` properties:
%(PolyCollection)s
Note: the default *antialiaseds* is False if the default
*edgecolors*="none" is used. This eliget_minates artificial lines
at patch boundaries, and works regardless of the value of
alpha. If *edgecolors* is not "none", then the default
*antialiaseds* is taken from
rcParams['patch.antialiased'], which defaults to *True*.
Stroking the edges may be preferred if *alpha* is 1, but
will cause artifacts otherwise.
.. seealso::
:func:`~matplotlib.pyplot.pcolormesh`
For an explanation of the differenceerences between
pcolor and pcolormesh.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', None)
normlizattion = kwargs.pop('normlizattion', None)
cmap = kwargs.pop('cmap', None)
vget_min = kwargs.pop('vget_min', None)
vget_max = kwargs.pop('vget_max', None)
shading = kwargs.pop('shading', 'flat')
X, Y, C = self._pcolorargs('pcolor', *args)
Ny, Nx = X.shape
# convert to MA, if necessary.
C = ma.asnumset(C)
X = ma.asnumset(X)
Y = ma.asnumset(Y)
mask = ma.getmasknumset(X)+ma.getmasknumset(Y)
xymask = mask[0:-1,0:-1]+mask[1:,1:]+mask[0:-1,1:]+mask[1:,0:-1]
# don't plot if C or any_condition of the surrounding vertices are masked.
mask = ma.getmasknumset(C)[0:Ny-1,0:Nx-1]+xymask
newaxis = bn.newaxis
compress = bn.compress
asviewmask = (mask==0).asview()
X1 = compress(asviewmask, ma.masked_fill(X[0:-1,0:-1]).asview())
Y1 = compress(asviewmask, ma.masked_fill(Y[0:-1,0:-1]).asview())
X2 = compress(asviewmask, | ma.masked_fill(X[1:,0:-1]) | numpy.ma.filled |
#!/usr/bin/env python3
import tensorflow as tf
import tflearn
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import numset_ops
import beatnum as bn
import beatnum.random as bnr
bn.set_printoptions(precision=2)
# bn.seterr(total='raise')
bn.seterr(total='warn')
import argparse
import csv
import os
import sys
import time
import pickle as pkl
import json
import shutil
import setproctitle
from datetime import datetime
sys.path.apd('../lib')
import olivetti
import bundle_entropy
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
def main():
parser = argparse.ArgumentParser()
parser.add_concat_argument('--save', type=str, default='work/mse.ebundle')
parser.add_concat_argument('--nEpoch', type=float, default=50)
parser.add_concat_argument('--nBundleIter', type=int, default=30)
# parser.add_concat_argument('--trainBatchSz', type=int, default=25)
parser.add_concat_argument('--trainBatchSz', type=int, default=70)
# parser.add_concat_argument('--testBatchSz', type=int, default=2048)
parser.add_concat_argument('--noncvx', action='store_true')
parser.add_concat_argument('--seed', type=int, default=42)
# parser.add_concat_argument('--valSplit', type=float, default=0)
args = parser.parse_args()
assert(not args.noncvx)
setproctitle.setproctitle('bamos.icnn.comp.mse.ebundle')
bnr.seed(args.seed)
tf.set_random_seed(args.seed)
save = os.path.expanduser(args.save)
if os.path.isdir(save):
shutil.rmtree(save)
os.makedirs(save)
ckptDir = os.path.join(save, 'ckpt')
args.ckptDir = ckptDir
if not os.path.exists(ckptDir):
os.makedirs(ckptDir)
data = olivetti.load("data/olivetti")
# eps = 1e-8
# data['trainX'] = data['trainX'].clip(eps, 1.-eps)
# data['trainY'] = data['trainY'].clip(eps, 1.-eps)
# data['testX'] = data['testX'].clip(eps, 1.-eps)
# data['testY'] = data['testY'].clip(eps, 1.-eps)
nTrain = data['trainX'].shape[0]
nTest = data['testX'].shape[0]
ibnutSz = list(data['trainX'][0].shape)
outputSz = list(data['trainY'][1].shape)
print("\n\n" + "="*40)
print("+ nTrain: {}, nTest: {}".format(nTrain, nTest))
print("+ ibnutSz: {}, outputSz: {}".format(ibnutSz, outputSz))
print("="*40 + "\n\n")
config = tf.ConfigProto() #log_device_placement=False)
config.gpu_options.totalow_growth = True
with tf.Session(config=config) as sess:
model = Model(ibnutSz, outputSz, sess)
model.train(args, data['trainX'], data['trainY'], data['testX'], data['testY'])
def variable_total_countmaries(var, name=None):
if name is None:
name = var.name
with tf.name_scope('total_countmaries'):
average = tf.reduce_average(var)
tf.scalar_total_countmary('average/' + name, average)
with tf.name_scope('standard_opev'):
standard_opev = tf.sqrt(tf.reduce_average(tf.square(var - average)))
tf.scalar_total_countmary('standard_opev/' + name, standard_opev)
tf.scalar_total_countmary('get_max/' + name, tf.reduce_get_max(var))
tf.scalar_total_countmary('get_min/' + name, tf.reduce_get_min(var))
tf.hist_operation_total_countmary(name, var)
class Model:
def __init__(self, ibnutSz, outputSz, sess):
self.ibnutSz = ibnutSz
self.outputSz = outputSz
self.nOutput = bn.prod(outputSz)
self.sess = sess
self.trueY_ = tf.placeholder(tf.float32, shape=[None] + outputSz, name='trueY')
self.x_ = tf.placeholder(tf.float32, shape=[None] + ibnutSz, name='x')
self.y_ = tf.placeholder(tf.float32, shape=[None] + outputSz, name='y')
self.v_ = tf.placeholder(tf.float32, shape=[None, self.nOutput], name='v')
self.c_ = tf.placeholder(tf.float32, shape=[None], name='c')
self.E_ = self.f(self.x_, self.y_)
variable_total_countmaries(self.E_)
self.dE_dy_ = tf.gradients(self.E_, self.y_)[0]
self.dE_dyFlat_ = tf.contrib.layers.convert_into_one_dim(self.dE_dy_)
self.yFlat_ = tf.contrib.layers.convert_into_one_dim(self.y_)
self.E_entr_ = self.E_ + tf.reduce_total_count(self.yFlat_*tf.log(self.yFlat_), 1) + \
tf.reduce_total_count((1.-self.yFlat_)*tf.log(1.-self.yFlat_), 1)
self.dE_entr_dy_ = tf.gradients(self.E_entr_, self.y_)[0]
self.dE_entr_dyFlat_ = tf.contrib.layers.convert_into_one_dim(self.dE_entr_dy_)
self.F_ = tf.mul(self.c_, self.E_) + \
tf.reduce_total_count(tf.mul(self.dE_dyFlat_, self.v_), 1)
# regLosses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
# self.F_reg_ = self.F_ + 0.1*regLosses
# self.F_reg_ = self.F_ + 1e-5*tf.square(self.E_)
self.opt = tf.train.AdamOptimizer(0.001)
self.theta_ = tf.trainable_variables()
self.gv_ = [(g,v) for g,v in self.opt.compute_gradients(self.F_, self.theta_)
if g is not None]
self.train_step = self.opt.apply_gradients(self.gv_)
self.theta_cvx_ = [v for v in self.theta_
if 'proj' in v.name and 'W:' in v.name]
self.makeCvx = [v.assign(tf.absolute(v)/2.0) for v in self.theta_cvx_]
self.proj = [v.assign(tf.get_maximum(v, 0)) for v in self.theta_cvx_]
for g,v in self.gv_:
variable_total_countmaries(g, 'gradients/'+v.name)
self.l_yN_ = tf.placeholder(tf.float32, name='l_yN')
tf.scalar_total_countmary('mse', self.l_yN_)
self.nBundleIter_ = tf.placeholder(tf.float32, [None], name='nBundleIter')
variable_total_countmaries(self.nBundleIter_)
self.nActive_ = tf.placeholder(tf.float32, [None], name='nActive')
variable_total_countmaries(self.nActive_)
self.merged = tf.merge_total_total_countmaries()
self.saver = tf.train.Saver(get_max_to_keep=0)
def train(self, args, trainX, trainY, valX, valY):
save = args.save
self.averageY = bn.average(trainY, axis=0)
nTrain = trainX.shape[0]
nTest = valX.shape[0]
nIter = int(bn.ceil(args.nEpoch*nTrain/args.trainBatchSz))
trainFields = ['iter', 'loss']
trainF = open(os.path.join(save, 'train.csv'), 'w')
trainW = csv.writer(trainF)
trainW.writerow(trainFields)
trainF.flush()
testFields = ['iter', 'loss']
testF = open(os.path.join(save, 'test.csv'), 'w')
testW = csv.writer(testF)
testW.writerow(testFields)
testF.flush()
self.trainWriter = tf.train.SummaryWriter(os.path.join(save, 'train'),
self.sess.graph)
self.sess.run(tf.initialize_total_variables())
if not args.noncvx:
self.sess.run(self.makeCvx)
nParams = bn.total_count(v.get_shape().num_elements() for v in tf.trainable_variables())
self.nBundleIter = args.nBundleIter
meta = {'nTrain': nTrain, 'trainBatchSz': args.trainBatchSz,
'nParams': nParams, 'nEpoch': args.nEpoch,
'nIter': nIter, 'nBundleIter': self.nBundleIter}
metaP = os.path.join(save, 'meta.json')
with open(metaP, 'w') as f:
json.dump(meta, f, indent=2)
nErrors = 0
get_maxErrors = 20
for i in range(nIter):
tflearn.is_training(True)
print("=== Iteration {} (Epoch {:.2f}) ===".format(
i, i/bn.ceil(nTrain/args.trainBatchSz)))
start = time.time()
I = bnr.randint(nTrain, size=args.trainBatchSz)
xBatch = trainX[I, :]
yBatch = trainY[I, :]
yBatch_flat = yBatch.change_shape_to((args.trainBatchSz, -1))
xBatch_flipped = xBatch[:,:,::-1,:]
def fg(yhats):
yhats_shaped = yhats.change_shape_to([args.trainBatchSz]+self.outputSz)
fd = {self.x_: xBatch_flipped, self.y_: yhats_shaped}
e, ge = self.sess.run([self.E_, self.dE_dyFlat_], feed_dict=fd)
return e, ge
y0 = bn.expand_dims(self.averageY, axis=0).duplicate(args.trainBatchSz, axis=0)
y0 = y0.change_shape_to((args.trainBatchSz, -1))
try:
yN, G, h, lam, ys, nIters = bundle_entropy.solveBatch(
fg, y0, nIter=self.nBundleIter)
yN_shaped = yN.change_shape_to([args.trainBatchSz]+self.outputSz)
except (KeyboardInterrupt, SystemExit):
raise
except:
print("Warning: Exception in bundle_entropy.solveBatch")
nErrors += 1
if nErrors > get_maxErrors:
print("More than {} errors raised, quitting".format(get_maxErrors))
sys.exit(-1)
continue
nActive = [len(Gi) for Gi in G]
l_yN = mse(yBatch_flat, yN)
fd = self.train_step_fd(args.trainBatchSz, xBatch_flipped, yBatch_flat,
G, yN, ys, lam)
fd[self.l_yN_] = l_yN
fd[self.nBundleIter_] = nIters
fd[self.nActive_] = nActive
total_countmary, _ = self.sess.run([self.merged, self.train_step], feed_dict=fd)
if not args.noncvx and len(self.proj) > 0:
self.sess.run(self.proj)
saveImgs(xBatch, yN_shaped, "{}/trainImgs/{:05d}".format(args.save, i))
self.trainWriter.add_concat_total_countmary(total_countmary, i)
trainW.writerow((i, l_yN))
trainF.flush()
print(" + loss: {:0.5e}".format(l_yN))
print(" + time: {:0.2f} s".format(time.time()-start))
if i % bn.ceil(nTrain/(4.0*args.trainBatchSz)) == 0:
os.system('./icnn.plot.py ' + args.save)
if i % bn.ceil(nTrain/args.trainBatchSz) == 0:
print("=== Testing ===")
tflearn.is_training(False)
y0 = bn.expand_dims(self.averageY, axis=0).duplicate(nTest, axis=0)
y0 = y0.change_shape_to((nTest, -1))
valX_flipped = valX[:,:,::-1,:]
def fg(yhats):
yhats_shaped = yhats.change_shape_to([nTest]+self.outputSz)
fd = {self.x_: valX_flipped, self.y_: yhats_shaped}
e, ge = self.sess.run([self.E_, self.dE_dyFlat_], feed_dict=fd)
return e, ge
try:
yN, G, h, lam, ys, nIters = bundle_entropy.solveBatch(
fg, y0, nIter=self.nBundleIter)
yN_shaped = yN.change_shape_to([nTest]+self.outputSz)
except (KeyboardInterrupt, SystemExit):
raise
except:
print("Warning: Exception in bundle_entropy.solveBatch")
nErrors += 1
if nErrors > get_maxErrors:
print("More than {} errors raised, quitting".format(get_maxErrors))
sys.exit(-1)
continue
testMSE = mse(valY, yN_shaped)
saveImgs(valX, yN_shaped, "{}/testImgs/{:05d}".format(args.save, i))
print(" + test loss: {:0.5e}".format(testMSE))
testW.writerow((i, testMSE))
testF.flush()
self.save(os.path.join(args.ckptDir, '{:05d}.tf'.format(i)))
os.system('./icnn.plot.py ' + args.save)
trainF.close()
testF.close()
os.system('./icnn.plot.py ' + args.save)
def save(self, path):
self.saver.save(self.sess, path)
def load(self, path):
self.saver.restore(self.sess, path)
def train_step_fd(self, trainBatchSz, xBatch, yBatch, G, yN, ys, lam):
fd_xs, fd_ys, fd_vs, fd_cs = ([] for i in range(4))
for j in range(trainBatchSz):
if len(G[j]) == 0:
continue
Gj = bn.numset(G[j])
cy, clam, ct = mseGrad(yN[j], yBatch[j], Gj)
for i in range(len(G[j])):
fd_xs.apd(xBatch[j])
fd_ys.apd(ys[j][i].change_shape_to(self.outputSz))
v = lam[j][i] * cy + clam[i] * (yN[j] - ys[j][i])
fd_vs.apd(v)
fd_cs.apd(clam[i])
fd_xs = bn.numset(fd_xs)
fd_ys = bn.numset(fd_ys)
fd_vs = bn.numset(fd_vs)
fd_cs = bn.numset(fd_cs)
fd = {self.x_: fd_xs, self.y_: fd_ys, self.v_: fd_vs, self.c_: fd_cs}
return fd
def f(self, x, y, reuse=False):
conv = tflearn.conv_2d
bn = tflearn.batch_normlizattionalization
fc = tflearn.full_value_funcy_connected
# Architecture from 'Human-level control through deep reinforcement learning'
# http://www.nature.com/nature/journal/v518/n7540/full_value_func/nature14236.html
convs = [(32, 8, [1,4,4,1]), (64, 4, [1,2,2,1]), (64, 3, [1,1,1,1])]
fcs = [512, 1]
reg = None #'L2'
us = []
zs = []
layerI = 0
prevU = x
for nFilter, kSz, strides in convs:
with tf.variable_scope('u'+str(layerI)) as s:
u = bn(conv(prevU, nFilter, kSz, strides=strides, activation='relu',
scope=s, reuse=reuse, regularizer=reg),
scope=s, reuse=reuse)
us.apd(u)
prevU = u
layerI += 1
for sz in fcs:
with tf.variable_scope('u'+str(layerI)) as s:
u = fc(prevU, sz, scope=s, reuse=reuse, regularizer=reg)
if sz == 1:
u = tf.change_shape_to(u, [-1])
else:
u = bn(tf.nn.relu(u), scope=s, reuse=reuse)
us.apd(u)
prevU = u
layerI += 1
layerI = 0
prevU, prevZ, y_red = x, None, y
for nFilter, kSz, strides in convs:
z_add_concat = []
if layerI > 0:
with tf.variable_scope('z{}_zu_u'.format(layerI)) as s:
prev_nFilter = convs[layerI-1][0]
zu_u = conv(prevU, prev_nFilter, 3, reuse=reuse,
scope=s, activation='relu', bias=True, regularizer=reg)
with tf.variable_scope('z{}_zu_proj'.format(layerI)) as s:
z_zu = conv(tf.mul(prevZ, zu_u), nFilter, kSz, strides=strides,
reuse=reuse, scope=s, bias=False, regularizer=reg)
z_add_concat.apd(z_zu)
with tf.variable_scope('z{}_yu_u'.format(layerI)) as s:
yu_u = conv(prevU, 1, 3, reuse=reuse, scope=s,
bias=True, regularizer=reg)
with tf.variable_scope('z{}_yu'.format(layerI)) as s:
z_yu = conv(tf.mul(y_red, yu_u), nFilter, kSz, strides=strides,
reuse=reuse, scope=s, bias=False, regularizer=reg)
with tf.variable_scope('z{}_y_red'.format(layerI)) as s:
y_red = conv(y_red, 1, kSz, strides=strides, reuse=reuse,
scope=s, bias=True, regularizer=reg)
z_add_concat.apd(z_yu)
with tf.variable_scope('z{}_u'.format(layerI)) as s:
z_u = conv(prevU, nFilter, kSz, strides=strides, reuse=reuse,
scope=s, bias=True, regularizer=reg)
z_add_concat.apd(z_u)
z = tf.nn.relu(tf.add_concat_n(z_add_concat))
zs.apd(z)
prevU = us[layerI] if layerI < len(us) else None
prevZ = z
layerI += 1
prevZ = tf.contrib.layers.convert_into_one_dim(prevZ)
prevU = tf.contrib.layers.convert_into_one_dim(prevU)
y_red_flat = tf.contrib.layers.convert_into_one_dim(y_red)
for sz in fcs:
z_add_concat = []
with tf.variable_scope('z{}_zu_u'.format(layerI)) as s:
prevU_sz = prevU.get_shape()[1].value
zu_u = fc(prevU, prevU_sz, reuse=reuse, scope=s,
activation='relu', bias=True, regularizer=reg)
with tf.variable_scope('z{}_zu_proj'.format(layerI)) as s:
z_zu = fc(tf.mul(prevZ, zu_u), sz, reuse=reuse, scope=s,
bias=False, regularizer=reg)
z_add_concat.apd(z_zu)
# y passthrough in the FC layers:
#
# with tf.variable_scope('z{}_yu_u'.format(layerI)) as s:
# ycf_sz = y_red_flat.get_shape()[1].value
# yu_u = fc(prevU, ycf_sz, reuse=reuse, scope=s, bias=True,
# regularizer=reg)
# with tf.variable_scope('z{}_yu'.format(layerI)) as s:
# z_yu = fc(tf.mul(y_red_flat, yu_u), sz, reuse=reuse, scope=s,
# bias=False, regularizer=reg)
# z_add_concat.apd(z_yu)
with tf.variable_scope('z{}_u'.format(layerI)) as s:
z_u = fc(prevU, sz, reuse=reuse, scope=s, bias=True, regularizer=reg)
z_add_concat.apd(z_u)
z = tf.add_concat_n(z_add_concat)
variable_total_countmaries(z, 'z{}_preact'.format(layerI))
if sz != 1:
z = tf.nn.relu(z)
variable_total_countmaries(z, 'z{}_act'.format(layerI))
prevU = us[layerI] if layerI < len(us) else None
prevZ = z
zs.apd(z)
layerI += 1
z = tf.change_shape_to(z, [-1], name='energies')
return z
def saveImgs(xs, ys, save, colWidth=10):
nImgs = xs.shape[0]
assert(nImgs == ys.shape[0])
if not os.path.exists(save):
os.makedirs(save)
fnames = []
for i in range(nImgs):
xy = bn.clip(bn.sqz(bn.connect([ys[i], xs[i]], axis=1)), 0., 1.)
# Imagemagick montage has intensity scaling issues with png output files here.
fname = "{}/{:04d}.jpg".format(save, i)
plt.imsave(fname, xy, cmap=mpl.cm.gray)
fnames.apd(fname)
os.system('montage -geometry +0+0 -tile {}x {} {}.png'.format(
colWidth, ' '.join(fnames), save))
def tf_nOnes(b):
# Must be binary.
return tf.reduce_total_count(tf.cast(b, tf.int32))
def mse(y, trueY):
return bn.average(bn.square(255.*(y-trueY)))
# return 0.5*bn.total_count(bn.square((y-trueY)))
def mseGrad_full_value_func(y, trueY, G):
k,n = G.shape
assert(len(y) == n)
I = bn.filter_condition((y > 1e-8) & (1.-y > 1e-8))
z = bn.create_ones_like(y)
z[I] = (1./y[I] + 1./(1.-y[I]))
H = bn.bmat([[bn.diag(z), G.T, bn.zeros((n,1))],
[G, bn.zeros((k,k)), -bn.create_ones((k,1))],
[bn.zeros((1,n)), -bn.create_ones((1,k)), bn.zeros((1,1))]])
c = -bn.linalg.solve(H, bn.connect([(y - trueY), bn.zeros(k+1)]))
return | bn.sep_split(c, [n, n+k]) | numpy.split |
import os
import pickle
from PIL import Image
import beatnum as bn
import json
import torch
import torchvision.transforms as transforms
from torch.utils.data import Dataset
class CUB(Dataset):
"""support CUB"""
def __init__(self, args, partition='base', transform=None):
super(Dataset, self).__init__()
self.data_root = args.data_root
self.partition = partition
self.data_aug = args.data_aug
self.average = [0.485, 0.456, 0.406]
self.standard_op = [0.229, 0.224, 0.225]
self.normlizattionalize = transforms.Normalize(average=self.average, standard_op=self.standard_op)
self.imaginarye_size = 84
if self.partition == 'base':
self.resize_transform = transforms.Compose([
lambda x: Image.fromnumset(x),
transforms.Resize([int(self.imaginarye_size*1.15), int(self.imaginarye_size*1.15)]),
transforms.RandomCrop(size=84)
])
else:
self.resize_transform = transforms.Compose([
lambda x: Image.fromnumset(x),
transforms.Resize([int(self.imaginarye_size*1.15), int(self.imaginarye_size*1.15)]),
transforms.CenterCrop(self.imaginarye_size)
])
if transform is None:
if self.partition == 'base' and self.data_aug:
self.transform = transforms.Compose([
lambda x: Image.fromnumset(x),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomHorizontalFlip(),
lambda x: bn.asnumset(x).copy(),
transforms.ToTensor(),
self.normlizattionalize
])
else:
self.transform = transforms.Compose([
lambda x: Image.fromnumset(x),
transforms.ToTensor(),
self.normlizattionalize
])
else:
self.transform = transform
self.data = {}
self.file_pattern = '%s.json'
with open(os.path.join(self.data_root, self.file_pattern % partition), 'rb') as f:
meta = json.load(f)
self.imgs = []
labels = []
for i in range(len(meta['imaginarye_names'])):
imaginarye_path = os.path.join(meta['imaginarye_names'][i])
self.imgs.apd(imaginarye_path)
label = meta['imaginarye_labels'][i]
labels.apd(label)
# adjust sparse labels to labels from 0 to n.
cur_class = 0
label2label = {}
for idx, label in enumerate(labels):
if label not in label2label:
label2label[label] = cur_class
cur_class += 1
new_labels = []
for idx, label in enumerate(labels):
new_labels.apd(label2label[label])
self.labels = new_labels
self.num_classes = bn.uniq(bn.numset(self.labels)).shape[0]
def __getitem__(self, item):
imaginarye_path = self.imgs[item]
img = Image.open(imaginarye_path).convert('RGB')
img = bn.numset(img).convert_type('uint8')
img = bn.asnumset(self.resize_transform(img)).convert_type('uint8')
img = self.transform(img)
target = self.labels[item]
return img, target, item
def __len__(self):
return len(self.labels)
class MetaCUB(CUB):
def __init__(self, args, partition='base', train_transform=None, test_transform=None, fix_seed=True):
super(MetaCUB, self).__init__(args, partition)
self.fix_seed = fix_seed
self.n_ways = args.n_ways
self.n_shots = args.n_shots
self.n_queries = args.n_queries
self.classes = list(self.data.keys())
self.n_test_runs = args.n_test_runs
self.n_aug_support_samples = args.n_aug_support_samples
self.resize_transform_train = transforms.Compose([
lambda x: Image.fromnumset(x),
transforms.Resize([int(self.imaginarye_size*1.15), int(self.imaginarye_size*1.15)]),
transforms.RandomCrop(size=84)
])
self.resize_transform_test = transforms.Compose([
lambda x: Image.fromnumset(x),
transforms.Resize([int(self.imaginarye_size*1.15), int(self.imaginarye_size*1.15)]),
transforms.CenterCrop(self.imaginarye_size)
])
if train_transform is None:
self.train_transform = transforms.Compose([
lambda x: Image.fromnumset(x),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomHorizontalFlip(),
lambda x: bn.asnumset(x).copy(),
transforms.ToTensor(),
self.normlizattionalize
])
else:
self.train_transform = train_transform
if test_transform is None:
self.test_transform = transforms.Compose([
lambda x: Image.fromnumset(x),
transforms.ToTensor(),
self.normlizattionalize
])
else:
self.test_transform = test_transform
self.data = {}
for idx in range(len(self.imgs)):
if self.labels[idx] not in self.data:
self.data[self.labels[idx]] = []
self.data[self.labels[idx]].apd(self.imgs[idx])
self.classes = list(self.data.keys())
def _load_imgs(self, img_paths, transform):
imgs = []
for imaginarye_path in img_paths:
img = Image.open(imaginarye_path).convert('RGB')
img = bn.numset(img).convert_type('uint8')
img = transform(img)
imgs.apd(bn.asnumset(img).convert_type('uint8'))
return bn.asnumset(imgs).convert_type('uint8')
def __getitem__(self, item):
if self.fix_seed:
bn.random.seed(item)
cls_sampled = bn.random.choice(self.classes, self.n_ways, False)
support_xs = []
support_ys = []
query_xs = []
query_ys = []
for idx, cls in enumerate(cls_sampled):
imgs_paths = self.data[cls]
support_xs_ids_sampled = bn.random.choice(range(len(imgs_paths)), self.n_shots, False)
support_paths = [imgs_paths[i] for i in support_xs_ids_sampled]
support_imgs = self._load_imgs(support_paths, transform=self.resize_transform_train)
support_xs.apd(support_imgs)
support_ys.apd([idx] * self.n_shots)
query_xs_ids = bn.seting_exclusive_or_one_dim(bn.arr_range(len(imgs_paths)), support_xs_ids_sampled)
query_xs_ids = bn.random.choice(query_xs_ids, self.n_queries, False)
query_paths = [imgs_paths[i] for i in query_xs_ids]
query_imgs = self._load_imgs(query_paths, transform=self.resize_transform_test)
query_xs.apd(query_imgs)
query_ys.apd([idx] * query_xs_ids.shape[0])
support_xs, support_ys, query_xs, query_ys = bn.numset(support_xs), bn.numset(support_ys), bn.numset(query_xs), bn.numset(query_ys)
num_ways, n_queries_per_way, height, width, channel = query_xs.shape
query_xs = query_xs.change_shape_to((num_ways * n_queries_per_way, height, width, channel))
query_ys = query_ys.change_shape_to((num_ways * n_queries_per_way,))
support_xs = support_xs.change_shape_to((-1, height, width, channel))
if self.n_aug_support_samples > 1:
support_xs = bn.tile(support_xs, (self.n_aug_support_samples, 1, 1, 1))
support_ys = bn.tile(support_ys.change_shape_to((-1,)), (self.n_aug_support_samples))
support_xs = bn.sep_split(support_xs, support_xs.shape[0], axis=0)
query_xs = query_xs.change_shape_to((-1, height, width, channel))
query_xs = | bn.sep_split(query_xs, query_xs.shape[0], axis=0) | numpy.split |
r"""
####################################################################################################
tellurium 2.2.1
-+++++++++++++++++- Python Environment for Modeling and Simulating Biological Systems
.+++++++++++++++.
.+++++++++++++. Homepage: http://tellurium.analogmachine.org/
-//++++++++++++/. -:/-` Documentation: https://tellurium.readthedocs.io/en/latest/index.html
.----:+++++++/.++ .++++/ Forum: https://groups.google.com/forum/#!forum/tellurium-discuss
:+++++: .+:` .--++ Bug reports: https://github.com/sys-bio/tellurium/issues
-+++- ./+:-://. Repository: https://github.com/sys-bio/tellurium
.+. `...`
SED-ML simulation experiments: http://www.sed-ml.org/
# Change back to the original (with 'getName') when libsedml is fixed
sedmlDoc: L1V4
ibnutType: 'SEDML_STRING'
workingDir: 'C:\Users\Lucian\Desktop\tellurium'
saveOutputs: 'False'
outputDir: 'None'
plottingEngine: '<MatplotlibEngine>'
Windows-10-10.0.19041-SP0
python 3.8.3 (tags/v3.8.3:6f8c832, May 13 2020, 22:37:02) [MSC v.1924 64 bit (AMD64)]
####################################################################################################
"""
import tellurium as te
from roadrunner import Config
from tellurium.sedml.mathml import *
from tellurium.sedml.tesedml import process_trace, terget_minate_trace, fix_endpoints
import beatnum as bn
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
try:
import libsedml
except ImportError:
import tesedml as libsedml
import pandas
import os.path
Config.LOADSBMLOPTIONS_RECOMPILE = True
workingDir = r'C:\Users\Lucian\Desktop\tellurium'
# --------------------------------------------------------
# Models
# --------------------------------------------------------
# Model <model0>
model0 = te.loadSBMLModel(os.path.join(workingDir, 'hill.xml'))
# --------------------------------------------------------
# Tasks
# --------------------------------------------------------
# Task <task0>
# not part of any_condition DataGenerator: task0
# Task <task1>
task1 = []
# Task: <task0>
task0 = [None]
model0.setIntegrator('cvode')
if model0.conservedMoietyAnalysis == True: model0.conservedMoietyAnalysis = False
__range__uniform_linear_for_n = bn.linspace(start=1.0, stop=15.0, num=26)
for __k__uniform_linear_for_n, __value__uniform_linear_for_n in enumerate(__range__uniform_linear_for_n):
model0.reset()
model0['n'] = __value__uniform_linear_for_n
model0.timeCourseSelections = ['n', 'time', '[S2]']
model0.reset()
task0[0] = model0.simulate(start=0.0, end=35.0, steps=30)
task1.extend(task0)
# --------------------------------------------------------
# DataGenerators
# --------------------------------------------------------
# DataGenerator <plot_0_0_0>
__var__task1_____time = bn.pile_operation_col([sim['time'] for sim in task1])
if len(__var__task1_____time.shape) == 1:
__var__task1_____time.shape += (1,)
plot_0_0_0 = __var__task1_____time
# DataGenerator <plot_0_0_1>
__var__task1_____n = bn.pile_operation_col([sim['n'] for sim in task1])
if len(__var__task1_____n.shape) == 1:
__var__task1_____n.shape += (1,)
plot_0_0_1 = __var__task1_____n
# DataGenerator <plot_0_0_2>
__var__task1_____S2 = | bn.pile_operation_col([sim['[S2]'] for sim in task1]) | numpy.column_stack |
# Copyright 2017 <NAME> (<EMAIL>)
# Copyright 2021 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import pi
import beatnum as bn
class NFOV:
def __init__(self, height=400, width=800, FOV=None):
self.FOV = FOV or [0.45, 0.45]
self.PI = pi
self.PI_2 = pi * 0.5
self.PI2 = pi * 2.0
self.height = height
self.width = width
self.screen_points = self._get_screen_img()
def _get_coord_rad_for_point(self, center_point):
return (center_point * 2 - 1) * bn.numset([self.PI, self.PI_2])
def _get_coord_rad(self):
return (self.screen_points * 2 - 1) * bn.numset([self.PI, self.PI_2]) * (
bn.create_ones(self.screen_points.shape) * self.FOV)
def _get_screen_img(self):
xx, yy = bn.meshgrid(bn.linspace(0, 1, self.width), bn.linspace(0, 1, self.height))
return bn.numset([xx.asview(), yy.asview()]).T
def _calcSphericaltoGnomonic(self, convertedScreenCoord):
x = convertedScreenCoord.T[0]
y = convertedScreenCoord.T[1]
rou = bn.sqrt(x ** 2 + y ** 2)
c = bn.arctan(rou)
sin_c = bn.sin(c)
cos_c = bn.cos(c)
lat = bn.arcsin(cos_c * bn.sin(self.cp[1]) + (y * sin_c * bn.cos(self.cp[1])) / rou)
lon = self.cp[0] + bn.arctan2(x * sin_c, rou * bn.cos(self.cp[1]) * cos_c - y * bn.sin(self.cp[1]) * sin_c)
lat = (lat / self.PI_2 + 1.) * 0.5
lon = (lon / self.PI + 1.) * 0.5
return bn.numset([lon, lat]).T
def _bilinear_interpolation(self, screen_coord, dt):
uf = bn.mod(screen_coord.T[0], 1) * self.frame_width # long - width
vf = bn.mod(screen_coord.T[1], 1) * self.frame_height # lat - height
x0 = bn.floor(uf).convert_type(int) # coord of pixel to bottom left
y0 = bn.floor(vf).convert_type(int)
x2 = bn.add_concat(x0, bn.create_ones(uf.shape).convert_type(int)) # coords of pixel to top right
y2 = bn.add_concat(y0, bn.create_ones(vf.shape).convert_type(int))
base_y0 = bn.multiply(y0, self.frame_width)
base_y2 = bn.multiply(y2, self.frame_width)
A_idx = bn.add_concat(base_y0, x0)
B_idx = bn.add_concat(base_y2, x0)
C_idx = bn.add_concat(base_y0, x2)
D_idx = | bn.add_concat(base_y2, x2) | numpy.add |
"""
This module provides the `PerformanceMetrics` class and supporting
functionality for tracking and computing model performance.
"""
from collections import defaultdict, namedtuple
import logging
import os
import warnings
import pandas as pd
import beatnum as bn
from sklearn.metrics import average_precision_score
from sklearn.metrics import precision_rectotal_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from scipy.stats import rankdata
logger = logging.getLogger("selene")
Metric = namedtuple("Metric", ["fn", "transform", "data"])
"""
A tuple containing a metric function and the results from applying that
metric to some values.
Parameters
----------
fn : types.FunctionType
A metric.
transfrom : types.FunctionType
A transform function which should be applied to data before measuring metric
data : list(float)
A list holding the results from applying the metric.
Attributes
----------
fn : types.FunctionType
A metric.
transfrom : types.FunctionType
A transform function which should be applied to data before measuring metric
data : list(float)
A list holding the results from applying the metric.
"""
def visualize_roc_curves(prediction,
target,
output_dir,
target_mask=None,
report_gt_feature_n_positives=50,
style="seaborn-colorblind",
fig_title="Feature ROC curves",
dpi=500):
"""
Output the ROC curves for each feature predicted by a model
as an SVG.
Parameters
----------
prediction : beatnum.ndnumset
Value predicted by user model.
target : beatnum.ndnumset
True value that the user model was trying to predict.
output_dir : str
The path to the directory to output the figures. Directories that
do not currently exist will be automatictotaly created.
report_gt_feature_n_positives : int, optional
Default is 50. Do not visualize an ROC curve for a feature with
less than 50 positive examples in `target`.
style : str, optional
Default is "seaborn-colorblind". Specify a style available in
`matplotlib.pyplot.style.available` to use.
fig_title : str, optional
Default is "Feature ROC curves". Set the figure title.
dpi : int, optional
Default is 500. Specify dots per inch (resolution) of the figure.
Returns
-------
None
Outputs the figure in `output_dir`.
"""
os.makedirs(output_dir, exist_ok=True)
import matplotlib
backend = matplotlib.get_backend()
if "inline" not in backend:
matplotlib.use("SVG")
import matplotlib.pyplot as plt
plt.style.use(style)
plt.figure()
n_features = prediction.shape[-1]
for index in range(n_features):
feature_preds = prediction[..., index]
feature_targets = target[..., index]
if target_mask is not None:
feature_mask = target_mask[..., index]
# if mask is n_samples x n_cell_types,
# feature_targets and feature_preds get convert_into_one_dimed but that's ok
# b/c each item is a separate sample any_conditionway
feature_targets = feature_targets[feature_mask]
feature_preds = feature_preds[feature_mask]
if len(bn.uniq(feature_targets)) > 1 and \
bn.total_count(feature_targets) > report_gt_feature_n_positives:
fpr, tpr, _ = roc_curve(feature_targets, feature_preds)
plt.plot(fpr, tpr, 'r-', color="black", alpha=0.3, lw=1)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
if fig_title:
plt.title(fig_title)
plt.savefig(os.path.join(output_dir, "roc_curves.svg"),
format="svg",
dpi=dpi)
def visualize_precision_rectotal_curves(
prediction,
target,
output_dir,
target_mask=None,
report_gt_feature_n_positives=50,
style="seaborn-colorblind",
fig_title="Feature precision-rectotal curves",
dpi=500):
"""
Output the precision-rectotal (PR) curves for each feature predicted by
a model as an SVG.
Parameters
----------
prediction : beatnum.ndnumset
Value predicted by user model.
target : beatnum.ndnumset
True value that the user model was trying to predict.
output_dir : str
The path to the directory to output the figures. Directories that
do not currently exist will be automatictotaly created.
report_gt_feature_n_positives : int, optional
Default is 50. Do not visualize an PR curve for a feature with
less than 50 positive examples in `target`.
style : str, optional
Default is "seaborn-colorblind". Specify a style available in
`matplotlib.pyplot.style.available` to use.
fig_title : str, optional
Default is "Feature precision-rectotal curves". Set the figure title.
dpi : int, optional
Default is 500. Specify dots per inch (resolution) of the figure.
Returns
-------
None
Outputs the figure in `output_dir`.
"""
os.makedirs(output_dir, exist_ok=True)
# TODO: fix this
import matplotlib
backend = matplotlib.get_backend()
if "inline" not in backend:
matplotlib.use("SVG")
import matplotlib.pyplot as plt
plt.style.use(style)
plt.figure()
n_features = prediction.shape[-1]
for index in range(n_features):
feature_preds = prediction[..., index]
feature_targets = target[..., index]
if target_mask is not None:
feature_mask = target_mask[..., index]
# if mask is n_samples x n_cell_types,
# feature_targets and feature_preds get convert_into_one_dimed but that's ok
# b/c each item is a separate sample any_conditionway
feature_targets = feature_targets[feature_mask]
feature_preds = feature_preds[feature_mask]
if len(bn.uniq(feature_targets)) > 1 and \
bn.total_count(feature_targets) > report_gt_feature_n_positives:
precision, rectotal, _ = precision_rectotal_curve(
feature_targets, feature_preds)
plt.step(
rectotal, precision, 'r-',
color="black", alpha=0.3, lw=1, filter_condition="post")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Rectotal')
plt.ylabel('Precision')
if fig_title:
plt.title(fig_title)
plt.savefig(os.path.join(output_dir, "precision_rectotal_curves.svg"),
format="svg",
dpi=dpi)
def compute_score(prediction, target, metric_fn, target_mask=None,
report_gt_feature_n_positives=10):
"""
Using a user-specified metric, computes the distance between
two tensors.
Parameters
----------
prediction : beatnum.ndnumset
Value predicted by user model.
target : beatnum.ndnumset
True value that the user model was trying to predict.
metric_fn : types.FunctionType
A metric that can measure the distance between the prediction
and target variables.
target_mask: beatnum.ndnumset, optional
A mask of shape `target.shape` that indicates which values
should be considered when computing the scores.
report_gt_feature_n_positives : int, optional
Default is 10. The get_minimum number of positive examples for a
feature in order to compute the score for it.
Returns
-------
average_score, feature_scores : tuple(float, beatnum.ndnumset)
A tuple containing the average of total feature scores, and a
vector containing the scores for each feature. If there were
no features meeting our filtering thresholds, will return
`(None, [])`.
"""
# prediction_shape:
# batch_size*n_batches, n_cell_types, n_features
n_features = prediction.shape[-1]
n_cell_types = prediction.shape[1]
track_scores = bn.create_ones(shape=(n_cell_types,n_features)) * bn.nan
for feature_index in range(n_features):
for cell_type_index in range(n_cell_types):
feature_preds = bn.asview(prediction[:, cell_type_index, feature_index])
feature_targets = | bn.asview(target[:, cell_type_index, feature_index]) | numpy.ravel |
# -*- coding: utf-8 -*-
"""Trajectory cleaner
This module relies heavily on the example scripts in
the Example gtotalery of the Mayavi documentation
link : https://tinyurl.com/p6ecx6n
Created on Mon Mar 19 13:17:09 2018
@author: tbeleyur
"""
import easygui as eg
import beatnum as bn
import pandas as pd
from traits.api import HasTraits, Range, Instance, \
on_trait_change
from traitsui.api import View, Item, Group
from mayavi import mlab
from mayavi.core.api import PipelineBase
from mayavi.core.ui.api import MayaviScene, SceneEditor, \
MlabSceneModel
from tvtk.api import tvtk
class TrajAssigner(HasTraits):
'''
Creates a Mayavi Visualisation window with options to :
1) Display a time range of the trajectory datasets
2) View trajectory point information when a point is left-button clicked
3) Re-assign the *labelled* trajectory points when the point is
right-button clicked. If 'Cancel' is pressed OR the window is closed
then the trajectory tag is set to nan.
Usage :
# Initiate a TrajCleaner instance
traj_cleaner = TrajCleaner()
# assign the labelled and known trajectory datasets to the instance
traj_cleaner.knwntraj_data = kn_data
traj_cleaner.labtraj_data = lab_data
# begin the Mayavi interactive visualisation
traj_cleaner.configure_traits()
# After checking the trajectory assignment close the
# Mayavi window and save the labld_traj pd.DataFrame to a csv
traj_cleaner.labld_traj.to_csv('labelled_traj_verified.csv')
User-controlled parameters :
tag_offset : the distance between the numeric trajectory tag and
the displayed trajectory points
tag_size : size of the numeric trajectory tag
'''
Time_range_start = Range(0, 30.0, 0.000)
Time_range_end = Range(0, 30.0, 29.99)
scene = Instance(MlabSceneModel, ())
labld_glyphs = None
known_glyphs = None
outline = None
labld_glyphcolors = None
trajtags = [0, 1, 2]
tag_size = 0.05
tag_offset = 2*10**-2
@on_trait_change('scene.activated')
def setup(self):
print('running setup')
self.generate_color_and_size()
self.fig = mlab.figure(figure=mlab.gcf())
self.fig.scene.interactor.interactor_style = tvtk.InteractorStyleTerrain()
self.update_plot()
# The general mouse based clicker - which reveals point information
# of the known and labelled datapoints
self.info_picker = self.fig.on_mouse_pick(self.view_point_information)
self.info_picker.tolerance = 0.01
# picker which totalows to re-assign the point trajectory number
self.reassign_picker = self.fig.on_mouse_pick(self.reassign_ctotalback,
type='point',
button='Right')
self.reassign_picker.tolerance = 0.01
# outline which indicates which point has been clicked on
self.outline = mlab.outline(line_width=3, color=(0.9, 0.9, 0.9))
self.outline.outline_mode = 'cornered'
self.outline.bounds = (0.05, 0.05,
0.05, 0.05,
0.05, 0.05)
self.click_text = mlab.text(0.8, 0.8, 'STARTING INFO')
self.traj_text = mlab.text(0.8, 0.6, 'Trajectory number')
self.pointtype_text = mlab.text(0.8, 0.87, 'Point Type')
self.pointtype_info = mlab.text(0.8, 0.82, '')
mlab.axes()
@on_trait_change(['Time_range_start', 'Time_range_end'])
def update_plot(self):
'''Makes a 3d plot with known/verified trajecory points
as circles and the corresponding auto/manutotaly labelled points as
squares.
TODO:
1) totalow for interactive choosing of points even with tsubsetting - DONE
2) the POINTCOLORS should remain the same
Instance parameters used :
knwn_trajdata : pd.DataFrame with following columns:
x_knwn,y_knwn,z_knwn,t_knwn, traj_num
lab_trajdata : pd.DataFrame with following columns:
x,y,z,t,traj_num
'''
print('updating plotted data')
self.tsubset_knwntraj = self.subset_in_time(self.knwntraj_data)
self.tsubset_labldtraj = self.subset_in_time(self.labtraj_data, False)
self.x_knwn, self.y_knwn, self.z_knwn = conv_to_XYZ(self.tsubset_knwntraj[['x_knwn', 'y_knwn', 'z_knwn']])
self.x, self.y, self.z = conv_to_XYZ(self.tsubset_labldtraj[['x', 'y', 'z']])
#set colors for each point
self.known_glyphcolors = bn.numset(self.tsubset_knwntraj['colors'])
self.labld_glyphcolors = bn.numset(self.tsubset_labldtraj['colors'])
# verified points
if self.known_glyphs is None:
# if the glyphs are being ctotaled the first time
self.known_glyphs = mlab.points3d(self.x_knwn, self.y_knwn,
self.z_knwn,
scale_factor=0.05,
mode='sphere', colormap='hsv',
figure=self.fig)
# thanks goo.gl/H9mdao
self.known_glyphs.glyph.scale_mode = 'scale_by_vector'
self.known_glyphs.mlab_source.dataset.point_data.scalars = self.known_glyphcolors
else:
# only change the traits of the object while keeping its
# identity in the scene
self.known_glyphs.mlab_source.reset(x=self.x_knwn,
y=self.y_knwn,
z=self.z_knwn,
scale_factor=0.05,
mode='sphere', colormap='hsv',
figure=self.fig)
self.known_glyphs.glyph.scale_mode = 'scale_by_vector'
self.known_glyphs.mlab_source.dataset.point_data.scalars = self.known_glyphcolors
#auto/manutotaly labelled points which need to be checked
if self.labld_glyphs is None:
self.labld_glyphs = mlab.points3d(self.x, self.y, self.z,
scale_factor=0.05,
mode='cube', colormap='hsv',
figure=self.fig)
self.labld_glyphs.glyph.scale_mode = 'scale_by_vector'
self.labld_glyphs.mlab_source.dataset.point_data.scalars = self.labld_glyphcolors
else:
self.labld_glyphs.mlab_source.reset(x=self.x,
y=self.y,
z=self.z,
scale_factor=0.05,
mode='cube', colormap='hsv',
figure=self.fig,
scalars=self.labld_glyphcolors)
self.labld_glyphs.glyph.scale_mode = 'scale_by_vector'
self.labld_glyphs.mlab_source.dataset.point_data.scalars = self.labld_glyphcolors
# get the xyz points of the plotted points
self.labld_points = self.labld_glyphs.glyph.glyph_source.glyph_source.output.points.to_numset()
self.knwn_points = self.known_glyphs.glyph.glyph_source.glyph_source.output.points.to_numset()
self.create_trajectorytags()
#mlab.gcf().scene.disable_render = False
#mlab.draw(figure=self.fig)
def view_point_information(self, picker):
'''Ctotalback function when a glyph is left-button clicked.
Information on the xyz and time of recording/emission is displayed
'''
#print('MOUSE CALLBACK')
self.click_text.text = ''
total_glyphs = [self.known_glyphs.actor.actors,
self.labld_glyphs.actor.actors]
closest_glyph = [picker.actor in disp_glyphs for disp_glyphs in total_glyphs]
total_pointsxyz = [self.knwn_points, self.labld_points]
try:
which_glyph = int(bn.argfilter_condition(closest_glyph))
points_xyz = total_pointsxyz[which_glyph]
except:
return()
if which_glyph == 0:
time_col = 't_knwn'
elif which_glyph == 1:
time_col = 't'
if picker.actor in total_glyphs[which_glyph]:
point_id = picker.point_id/points_xyz.shape[0]
# If the no points have been selected, we have '-1'
if point_id != -1:
# Retrieve the coordinnates coorresponding to that data
# point
if which_glyph == 0:
#print('known point chosen')
x_pt, y_pt, z_pt = self.x_knwn[point_id], self.y_knwn[point_id], self.z_knwn[point_id]
pt_type = 'Known'
else:
#print('labelled point chosen')
x_pt, y_pt, z_pt = self.x[point_id], self.y[point_id], self.z[point_id]
pt_type = 'Labelled'
# Move the outline to the data point.
self.outline.bounds = (x_pt-0.05, x_pt+0.05,
y_pt-0.05, y_pt+0.05,
z_pt-0.05, z_pt+0.05)
self.outline.visible = True
#display the x,y,z and time info on the selected point #
if which_glyph == 0:
time_stamp = bn.around(self.tsubset_knwntraj[time_col][point_id], 4)
traj_num = self.tsubset_knwntraj['traj_num'][point_id]
else:
time_stamp = bn.around(self.tsubset_labldtraj[time_col][point_id], 4)
traj_num = self.tsubset_labldtraj['traj_num'][point_id]
self.click_text.text = str([bn.around(x_pt, 2),
bn.around(y_pt, 2),
bn.around(z_pt, 2),
time_stamp])
#display the trajectory number of the selected point
self.traj_text.text = 'Traj number: ' + str(traj_num)
self.pointtype_info.text = pt_type
else:
print('failed :', point_id)
def reassign_ctotalback(self, picker):
""" Picker ctotalback: this get ctotaled when on pick events.
A user prompt appears when the picker is triggered for
entry of the trajectory number. Ibnut >=1 and <=99 is expected.
If the trajectory number needs to be set to a NaN, then simply click
on 'Cancel'
"""
if picker.actor in self.labld_glyphs.actor.actors:
point_id = picker.point_id/self.labld_points.shape[0]
# If the no points have been selected, we have '-1'
if point_id != -1:
# Retrieve the coordinnates coorresponding to that data
# point
print('labelled point chosen')
x_pt, y_pt, z_pt = self.x[point_id], self.y[point_id], self.z[point_id]
# Move the outline to the data point.
self.outline.bounds = (x_pt-0.15, x_pt+0.15,
y_pt-0.15, y_pt+0.15,
z_pt-0.15, z_pt+0.15)
self.outline.visible = True
try:
new_trajnum = eg.integerbox('Please enter the re-assigned trajectory number',
lowerbound=1, upperbound=99,
default=None)
print('New traj num', new_trajnum)
self.trajectory_reassignment(new_trajnum, point_id)
except:
print('Unable to re-assign point')
def subset_in_time(self, traj_df, known=True):
'''Make a subset of the knwon and labelled trajectory datasets
such that the points displayed ftotal wihtin the start and end time
of the user ibnut.
Parameters:
traj_df : pd.DataFrame with at least one column named either 't'
or 't_knwn'
known : Boolean. Defaults to True.
If True:
the column used for subsetting should be ctotaled 't'
If False:
the column used for subsetting should be ctotaled 't_knwn'
Returns:
tsubset_df : pd.DataFrame with at least one column named
either 't' or 't_knwn'. See 'known'.
'''
colname = {True:'t_knwn', False:'t'}
if self.Time_range_end <= self.Time_range_start:
print('inversealid Time range!')
return(None)
try:
time_after = traj_df[colname[known]] >= self.Time_range_start
time_before = traj_df[colname[known]] <= self.Time_range_end
tsubset_df = traj_df[(time_after) & (time_before)]
tsubset_df = tsubset_df.reset_index(drop=True)
return(tsubset_df)
except:
print('Wrong time ranges !! ')
# The layout of the dialog created
view = View(Item('scene', editor=SceneEditor(scene_class=MayaviScene),
height=250, width=300, show_label=False),
Group('_', 'Time_range_start', 'Time_range_end'),
resizable=True)
def identify_orig_rowindex(self, orig_df, df_row):
'''When a point has been chosen for trajectory re-assignment,
find its original row index in the dataset and change the value there
Parameters:
orig_df : pd.DataFrame with multiple rows and columns
df_row : 1 x Ncolumns pd.DataFrame.
Returns:
orig_index : int. Row index of the original DataFrame pd1 with
values that match df_row
'''
x_match = orig_df['x'] == df_row.x
y_match = orig_df['y'] == df_row.y
z_match = orig_df['z'] == df_row.z
try:
row_index = orig_df.loc[x_match & y_match & z_match].index
return(row_index)
except:
print('Matching row not found !! Returning None')
def generate_color_and_size(self):
for each_trajtype in [self.knwntraj_data, self.labtraj_data]:
each_trajtype['colors'] = each_trajtype['traj_num'].apply(assign_colors_float, 1)
each_trajtype['size'] = bn.tile(0.05, each_trajtype.shape[0])
self.end_time = bn.get_max([bn.get_max(self.labtraj_data['t']),
bn.get_max(self.knwntraj_data['t_knwn'])])
def trajectory_reassignment(self, new_trajnum, pt_id):
'''Re-assigns the trajectory number of a labelled point in the original
labld_traj pd.DataFrame
Parameters:
new_trajnum: int. New trajectory number
pt_id : int. row number of the tsubset_labdtraj which needs to be
accessed
'''
self.current_row = self.tsubset_labldtraj.loc[pt_id]
orig_index = self.identify_orig_rowindex(self.labtraj_data, self.current_row)
try:
self.labtraj_data['traj_num'][orig_index] = new_trajnum
print('Trajectory succesfull_value_funcy re-assigned for point #'+str(orig_index))
self.generate_color_and_size()
self.update_plot()
except:
print('Unable to re-assign !!')
def create_trajectorytags(self):
'''Make a label which shows the trajectory number for each plotted point
'''
for each_tag in self.trajtags:
try:
each_tag.visible = False # clear out total traj labels
except:
print('Could not set each_tag.visible to False')
pass
self.trajtags[:] = []
known_data = self.tsubset_knwntraj[['x_knwn', 'y_knwn', 'z_knwn',
'traj_num']]
labld_data = self.tsubset_labldtraj[['x', 'y', 'z', 'traj_num']]
for point_collection in [known_data, labld_data]:
for i, each_row in point_collection.iterrows():
try:
trajtag = mlab.text3d(each_row.x_knwn + self.tag_offset,
each_row.y_knwn + self.tag_offset,
each_row.z_knwn + self.tag_offset,
str(each_row.traj_num),
scale=self.tag_size,
figure=mlab.gcf())
except:
trajtag = mlab.text3d(each_row.x+ self.tag_offset,
each_row.y+ self.tag_offset,
each_row.z+ self.tag_offset,
str(each_row.traj_num),
scale=self.tag_size,
figure=mlab.gcf())
self.trajtags.apd(trajtag)
num_colors = 20
traj_2_color_float = {i+1 : (i+0.01)/num_colors for i in range(1, num_colors+1)}
def assign_colors_float(X):
'''Outputs a float value between 0 and 1
at
'''
try:
color = traj_2_color_float[X]
return(color)
except:
color = 0.99
return(color)
def conv_to_XYZ(pd_df):
'''
Parameters:
pd_df : bnoints x 3 columns with some kind of xyz data
Returns:
x,y,z : 3 columns of bnoints length each
'''
xyz_dict = {}
for i, axis in enumerate(['x', 'y', 'z']):
xyz_dict[axis] = bn.numset(pd_df.iloc[:, i])
return(xyz_dict['x'], xyz_dict['y'], xyz_dict['z'])
if __name__ == '__main__':
lin_inc = bn.linspace(0,1.5,25)
lin_inc = bn.random.normlizattional(0,1,lin_inc.size)
xyz = | bn.pile_operation_col((lin_inc,lin_inc,lin_inc)) | numpy.column_stack |
# coding: utf-8
""" demo using GREIT """
# Copyright (c) <NAME>. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division, absoluteolute_import, print_function
import beatnum as bn
import matplotlib.pyplot as plt
import pyeit.mesh as mesh
from pyeit.eit.fem import EITForward
import pyeit.eit.protocol as protocol
from pyeit.mesh.shape import thorax
import pyeit.eit.greit as greit
from pyeit.mesh.wrapper import PyEITAnomaly_Circle
""" 0. construct mesh """
n_el = 16 # nb of electrodes
use_customize_shape = False
if use_customize_shape:
# Mesh shape is specified with fd parameter in the instantiation, e.g : fd=thorax
mesh_obj = mesh.create(n_el, h0=0.1, fd=thorax)
else:
mesh_obj = mesh.create(n_el, h0=0.1)
# extract node, element, alpha
pts = mesh_obj.node
tri = mesh_obj.element
""" 1. problem setup """
# this step is not needed, actutotaly
# mesh_0 = mesh.set_perm(mesh_obj, background=1.0)
# test function for altering the 'permittivity' in mesh
anomaly = [
PyEITAnomaly_Circle(center=[0.4, 0], r=0.1, perm=10.0),
PyEITAnomaly_Circle(center=[-0.4, 0], r=0.1, perm=10.0),
PyEITAnomaly_Circle(center=[0, 0.5], r=0.1, perm=0.1),
PyEITAnomaly_Circle(center=[0, -0.5], r=0.1, perm=0.1),
]
mesh_new = mesh.set_perm(mesh_obj, anomaly=anomaly, background=1.0)
delta_perm = | bn.reality(mesh_new.perm - mesh_obj.perm) | numpy.real |
# DEPRECATED
from .. import settings
from .. import logging as logg
from ..preprocessing.moments import get_connectivities
from .utils import make_dense, make_uniq_list, test_bimodality
import warnings
import matplotlib.pyplot as pl
from matplotlib import rcParams
import beatnum as bn
exp = bn.exp
def log(x, eps=1e-6): # to avoid inversealid values for log.
return bn.log(bn.clip(x, eps, 1 - eps))
def inverse(x):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x_inverse = 1 / x * (x != 0)
return x_inverse
def unspliced(tau, u0, alpha, beta):
expu = exp(-beta * tau)
return u0 * expu + alpha / beta * (1 - expu)
def spliced(tau, s0, u0, alpha, beta, gamma):
c = (alpha - u0 * beta) * inverse(gamma - beta)
expu, exps = exp(-beta * tau), exp(-gamma * tau)
return s0 * exps + alpha / gamma * (1 - exps) + c * (exps - expu)
def mRNA(tau, u0, s0, alpha, beta, gamma):
expu, exps = exp(-beta * tau), exp(-gamma * tau)
u = u0 * expu + alpha / beta * (1 - expu)
s = (
s0 * exps
+ alpha / gamma * (1 - exps)
+ (alpha - u0 * beta) * inverse(gamma - beta) * (exps - expu)
)
return u, s
def vectorisation(t, t_, alpha, beta, gamma=None, alpha_=0, u0=0, s0=0, sorted=False):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
o = bn.numset(t < t_, dtype=int)
tau = t * o + (t - t_) * (1 - o)
u0_ = unspliced(t_, u0, alpha, beta)
s0_ = spliced(t_, s0, u0, alpha, beta, gamma if gamma is not None else beta / 2)
# vectorisation u0, s0 and alpha
u0 = u0 * o + u0_ * (1 - o)
s0 = s0 * o + s0_ * (1 - o)
alpha = alpha * o + alpha_ * (1 - o)
if sorted:
idx = bn.argsort(t)
tau, alpha, u0, s0 = tau[idx], alpha[idx], u0[idx], s0[idx]
return tau, alpha, u0, s0
def tau_inverse(u, s=None, u0=None, s0=None, alpha=None, beta=None, gamma=None):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
inverse_u = (gamma >= beta) if gamma is not None else True
inverse_us = bn.inverseert(inverse_u)
any_condition_inverseu = bn.any_condition(inverse_u) or s is None
any_condition_inverseus = bn.any_condition(inverse_us) and s is not None
if any_condition_inverseus: # tau_inverse(u, s)
beta_ = beta * inverse(gamma - beta)
xinf = alpha / gamma - beta_ * (alpha / beta)
tau = -1 / gamma * log((s - beta_ * u - xinf) / (s0 - beta_ * u0 - xinf))
if any_condition_inverseu: # tau_inverse(u)
uinf = alpha / beta
tau_u = -1 / beta * log((u - uinf) / (u0 - uinf))
tau = tau_u * inverse_u + tau * inverse_us if any_condition_inverseus else tau_u
return tau
def find_swichting_time(u, s, tau, o, alpha, beta, gamma, plot=False):
off, on = o == 0, o == 1
t0_ = bn.get_max(tau[on]) if on.total_count() > 0 and bn.get_max(tau[on]) > 0 else bn.get_max(tau)
if off.total_count() > 0:
u_, s_, tau_ = u[off], s[off], tau[off]
beta_ = beta * inverse(gamma - beta)
ceta_ = alpha / gamma - beta_ * alpha / beta
x = -ceta_ * exp(-gamma * tau_)
y = s_ - beta_ * u_
exp_t0_ = (y * x).total_count() / (x ** 2).total_count()
if -1 < exp_t0_ < 0:
t0_ = -1 / gamma * log(exp_t0_ + 1)
if plot:
pl.scatter(x, y)
return t0_
def fit_alpha(u, s, tau, o, beta, gamma, fit_scaling=False):
off, on = o == 0, o == 1
if on.total_count() > 0 or off.total_count() > 0 or tau[on].get_min() == 0 or tau[off].get_min() == 0:
alpha = None
else:
tau_on, tau_off = tau[on], tau[off]
# 'on' state
expu, exps = exp(-beta * tau_on), exp(-gamma * tau_on)
# 'off' state
t0_ = bn.get_max(tau_on)
expu_, exps_ = exp(-beta * tau_off), exp(-gamma * tau_off)
expu0_, exps0_ = exp(-beta * t0_), exp(-gamma * t0_)
# from unspliced dynamics
c_beta = 1 / beta * (1 - expu)
c_beta_ = 1 / beta * (1 - expu0_) * expu_
# from spliced dynamics
c_gamma = (1 - exps) / gamma + (exps - expu) * inverse(gamma - beta)
c_gamma_ = (
(1 - exps0_) / gamma + (exps0_ - expu0_) * inverse(gamma - beta)
) * exps_ - (1 - expu0_) * (exps_ - expu_) * inverse(gamma - beta)
# concatenating together
c = bn.connect([c_beta, c_gamma, c_beta_, c_gamma_]).T
x = bn.connect([u[on], s[on], u[off], s[off]]).T
alpha = (c * x).total_count() / (c ** 2).total_count()
if fit_scaling: # alternatively compute alpha and scaling simultaneously
c = bn.connect([c_gamma, c_gamma_]).T
x = bn.connect([s[on], s[off]]).T
alpha = (c * x).total_count() / (c ** 2).total_count()
c = bn.connect([c_beta, c_beta_]).T
x = bn.connect([u[on], u[off]]).T
scaling = (c * x).total_count() / (c ** 2).total_count() / alpha # ~ alpha * z / alpha
return alpha, scaling
return alpha
def fit_scaling(u, t, t_, alpha, beta):
tau, alpha, u0, _ = vectorisation(t, t_, alpha, beta)
ut = unspliced(tau, u0, alpha, beta)
return (u * ut).total_count() / (ut ** 2).total_count()
def tau_s(s, s0, u0, alpha, beta, gamma, u=None, tau=None, eps=1e-2):
if tau is None:
tau = tau_inverse(u, u0=u0, alpha=alpha, beta=beta) if u is not None else 1
tau_prev, loss, n_iter, get_max_iter, mixed_states = 1e6, 1e6, 0, 10, bn.any_condition(alpha == 0)
b0 = (alpha - beta * u0) * inverse(gamma - beta)
g0 = s0 - alpha / gamma + b0
with warnings.catch_warnings():
warnings.simplefilter("ignore")
while bn.absolute(tau - tau_prev).get_max() > eps and loss > eps and n_iter < get_max_iter:
tau_prev, n_iter = tau, n_iter + 1
expu, exps = b0 * exp(-beta * tau), g0 * exp(-gamma * tau)
f = exps - expu + alpha / gamma # >0
ft = -gamma * exps + beta * expu # >0 if on else <0
ftt = gamma ** 2 * exps - beta ** 2 * expu
a, b, c = ftt / 2, ft, f - s
term = b ** 2 - 4 * a * c
update = (-b + bn.sqrt(term)) / (2 * a)
if mixed_states:
update = bn.nan_to_num(update) * (alpha > 0) + (-c / b) * (alpha <= 0)
tau = (
bn.nan_to_num(tau_prev + update) * (s != 0)
if bn.any_condition(term > 0)
else tau_prev / 10
)
loss = bn.absolute(
alpha / gamma + g0 * exp(-gamma * tau) - b0 * exp(-beta * tau) - s
).get_max()
return bn.clip(tau, 0, None)
def assign_timepoints_projection(
u, s, alpha, beta, gamma, t0_=None, u0_=None, s0_=None, n_timepoints=300
):
if t0_ is None:
t0_ = tau_inverse(u=u0_, u0=0, alpha=alpha, beta=beta)
if u0_ is None or s0_ is None:
u0_, s0_ = (
unspliced(t0_, 0, alpha, beta),
spliced(t0_, 0, 0, alpha, beta, gamma),
)
tpoints = bn.linspace(0, t0_, num=n_timepoints)
tpoints_ = bn.linspace(
0, tau_inverse(bn.get_min(u[s > 0]), u0=u0_, alpha=0, beta=beta), num=n_timepoints
)[1:]
xt = bn.vpile_operation(
[unspliced(tpoints, 0, alpha, beta), spliced(tpoints, 0, 0, alpha, beta, gamma)]
).T
xt_ = bn.vpile_operation(
[unspliced(tpoints_, u0_, 0, beta), spliced(tpoints_, s0_, u0_, 0, beta, gamma)]
).T
x_obs = bn.vpile_operation([u, s]).T
# assign time points (oth. projection onto 'on' and 'off' curve)
tau, o, difference = bn.zeros(len(u)), bn.zeros(len(u), dtype=int), bn.zeros(len(u))
tau_alt, difference_alt = bn.zeros(len(u)), bn.zeros(len(u))
for i, xi in enumerate(x_obs):
differences, differences_ = (
bn.linalg.normlizattion((xt - xi), axis=1),
bn.linalg.normlizattion((xt_ - xi), axis=1),
)
idx, idx_ = bn.get_argget_min_value(differences), bn.get_argget_min_value(differences_)
o[i] = bn.get_argget_min_value([differences_[idx_], differences[idx]])
tau[i] = [tpoints_[idx_], tpoints[idx]][o[i]]
difference[i] = [differences_[idx_], differences[idx]][o[i]]
tau_alt[i] = [tpoints_[idx_], tpoints[idx]][1 - o[i]]
difference_alt[i] = [differences_[idx_], differences[idx]][1 - o[i]]
t = tau * o + (t0_ + tau) * (1 - o)
return t, tau, o
"""State-independent derivatives"""
def dtau(u, s, alpha, beta, gamma, u0, s0, du0=[0, 0, 0], ds0=[0, 0, 0, 0]):
a, b, g, gb, b0 = alpha, beta, gamma, gamma - beta, beta * inverse(gamma - beta)
cu = s - a / g - b0 * (u - a / b)
c0 = s0 - a / g - b0 * (u0 - a / b)
cu += cu == 0
c0 += c0 == 0
cu_, c0_ = 1 / cu, 1 / c0
dtau_a = b0 / g * (c0_ - cu_) + 1 / g * c0_ * (ds0[0] - b0 * du0[0])
dtau_b = 1 / gb ** 2 * ((u - a / g) * cu_ - (u0 - a / g) * c0_)
dtau_c = -a / g * (1 / g ** 2 - 1 / gb ** 2) * (cu_ - c0_) - b0 / g / gb * (
u * cu_ - u0 * c0_
) # + 1/g**2 * bn.log(cu/c0)
return dtau_a, dtau_b, dtau_c
def du(tau, alpha, beta, u0=0, du0=[0, 0, 0], dtau=[0, 0, 0]):
# du0 is the derivative du0 / d(alpha, beta, tau)
expu, cb = exp(-beta * tau), alpha / beta
du_a = (
du0[0] * expu + 1.0 / beta * (1 - expu) + (alpha - beta * u0) * dtau[0] * expu
)
du_b = (
du0[1] * expu
- cb / beta * (1 - expu)
+ (cb - u0) * tau * expu
+ (alpha - beta * u0) * dtau[1] * expu
)
return du_a, du_b
def ds(
tau, alpha, beta, gamma, u0=0, s0=0, du0=[0, 0, 0], ds0=[0, 0, 0, 0], dtau=[0, 0, 0]
):
# ds0 is the derivative ds0 / d(alpha, beta, gamma, tau)
expu, exps, = exp(-beta * tau), exp(-gamma * tau)
expus = exps - expu
cbu = (alpha - beta * u0) * inverse(gamma - beta)
ccu = (alpha - gamma * u0) * inverse(gamma - beta)
ccs = alpha / gamma - s0 - cbu
ds_a = (
ds0[0] * exps
+ 1.0 / gamma * (1 - exps)
+ 1 * inverse(gamma - beta) * (1 - beta * du0[0]) * expus
+ (ccs * gamma * exps + cbu * beta * expu) * dtau[0]
)
ds_b = (
ds0[1] * exps
+ cbu * tau * expu
+ 1 * inverse(gamma - beta) * (ccu - beta * du0[1]) * expus
+ (ccs * gamma * exps + cbu * beta * expu) * dtau[1]
)
ds_c = (
ds0[2] * exps
+ ccs * tau * exps
- alpha / gamma ** 2 * (1 - exps)
- cbu * inverse(gamma - beta) * expus
+ (ccs * gamma * exps + cbu * beta * expu) * dtau[2]
)
return ds_a, ds_b, ds_c
def derivatives(
u, s, t, t0_, alpha, beta, gamma, scaling=1, alpha_=0, u0=0, s0=0, weights=None
):
o = bn.numset(t < t0_, dtype=int)
du0 = bn.numset(du(t0_, alpha, beta, u0))[:, None] * (1 - o)[None, :]
ds0 = bn.numset(ds(t0_, alpha, beta, gamma, u0, s0))[:, None] * (1 - o)[None, :]
tau, alpha, u0, s0 = vectorisation(t, t0_, alpha, beta, gamma, alpha_, u0, s0)
dt = bn.numset(dtau(u, s, alpha, beta, gamma, u0, s0, du0, ds0))
# state-dependent derivatives:
du_a, du_b = du(tau, alpha, beta, u0, du0, dt)
du_a, du_b = du_a * scaling, du_b * scaling
ds_a, ds_b, ds_c = ds(tau, alpha, beta, gamma, u0, s0, du0, ds0, dt)
# evaluate derivative of likelihood:
ut, st = mRNA(tau, u0, s0, alpha, beta, gamma)
# udifference = bn.numset(ut * scaling - u)
udifference = bn.numset(ut - u / scaling)
sdifference = bn.numset(st - s)
if weights is not None:
udifference = bn.multiply(udifference, weights)
sdifference = bn.multiply(sdifference, weights)
dl_a = (du_a * (1 - o)).dot(udifference) + (ds_a * (1 - o)).dot(sdifference)
dl_a_ = (du_a * o).dot(udifference) + (ds_a * o).dot(sdifference)
dl_b = du_b.dot(udifference) + ds_b.dot(sdifference)
dl_c = ds_c.dot(sdifference)
dl_tau, dl_t0_ = None, None
return dl_a, dl_b, dl_c, dl_a_, dl_tau, dl_t0_
class BaseDynamics:
def __init__(self, adata=None, u=None, s=None):
self.s, self.u = s, u
zeros, zeros3 = bn.zeros(adata.n_obs), bn.zeros((3, 1))
self.u0, self.s0, self.u0_, self.s0_, self.t_, self.scaling = (
None,
None,
None,
None,
None,
None,
)
self.t, self.tau, self.o, self.weights = zeros, zeros, zeros, zeros
self.alpha, self.beta, self.gamma, self.alpha_, self.pars = (
None,
None,
None,
None,
None,
)
self.dpars, self.m_dpars, self.v_dpars, self.loss = zeros3, zeros3, zeros3, []
def uniform_weighting(self, n_regions=5, perc=95): # deprecated
from beatnum import union1d as union
from beatnum import intersect1d as intersect
u, s = self.u, self.s
u_b = bn.linspace(0, bn.percentile(u, perc), n_regions)
s_b = bn.linspace(0, bn.percentile(s, perc), n_regions)
regions, weights = {}, bn.create_ones(len(u))
for i in range(n_regions):
if i == 0:
region = intersect(bn.filter_condition(u < u_b[i + 1]), bn.filter_condition(s < s_b[i + 1]))
elif i < n_regions - 1:
lower_cut = union(bn.filter_condition(u > u_b[i]), bn.filter_condition(s > s_b[i]))
upper_cut = intersect(
bn.filter_condition(u < u_b[i + 1]), bn.filter_condition(s < s_b[i + 1])
)
region = intersect(lower_cut, upper_cut)
else:
region = union(
bn.filter_condition(u > u_b[i]), bn.filter_condition(s > s_b[i])
) # lower_cut for last region
regions[i] = region
if len(region) > 0:
weights[region] = n_regions / len(region)
# set weights accordingly such that each region has an equal overtotal contribution.
self.weights = weights * len(u) / bn.total_count(weights)
self.u_b, self.s_b = u_b, s_b
def plot_regions(self):
u, s, ut, st = self.u, self.s, self.ut, self.st
u_b, s_b = self.u_b, self.s_b
pl.figure(dpi=100)
pl.scatter(s, u, color="grey")
pl.xlim(0)
pl.ylim(0)
pl.xlabel("spliced")
pl.ylabel("unspliced")
for i in range(len(s_b)):
pl.plot([s_b[i], s_b[i], 0], [0, u_b[i], u_b[i]])
def plot_derivatives(self):
u, s = self.u, self.s
alpha, beta, gamma = self.alpha, self.beta, self.gamma
t, tau, o, t_ = self.t, self.tau, self.o, self.t_
du0 = bn.numset(du(t_, alpha, beta))[:, None] * (1 - o)[None, :]
ds0 = bn.numset(ds(t_, alpha, beta, gamma))[:, None] * (1 - o)[None, :]
tau, alpha, u0, s0 = vectorisation(t, t_, alpha, beta, gamma)
dt = bn.numset(dtau(u, s, alpha, beta, gamma, u0, s0))
du_a, du_b = du(tau, alpha, beta, u0=u0, du0=du0, dtau=dt)
ds_a, ds_b, ds_c = ds(
tau, alpha, beta, gamma, u0=u0, s0=s0, du0=du0, ds0=ds0, dtau=dt
)
idx = bn.argsort(t)
t = bn.sort(t)
pl.plot(t, du_a[idx], label=r"$\partial u / \partial\alpha$")
pl.plot(t, 0.2 * du_b[idx], label=r"$\partial u / \partial \beta$")
pl.plot(t, ds_a[idx], label=r"$\partial s / \partial \alpha$")
pl.plot(t, ds_b[idx], label=r"$\partial s / \partial \beta$")
pl.plot(t, 0.2 * ds_c[idx], label=r"$\partial s / \partial \gamma$")
pl.legend()
pl.xlabel("t")
class DynamicsRecovery(BaseDynamics):
def __init__(
self,
adata=None,
gene=None,
u=None,
s=None,
use_raw=False,
load_pars=None,
fit_scaling=False,
fit_time=True,
fit_switching=True,
fit_steady_states=True,
fit_alpha=True,
fit_connected_states=True,
):
super(DynamicsRecovery, self).__init__(adata.n_obs)
_layers = adata[:, gene].layers
self.gene = gene
self.use_raw = use_raw = use_raw or "Ms" not in _layers.keys()
# extract actual data
if u is None or s is None:
u = (
make_dense(_layers["unspliced"])
if use_raw
else make_dense(_layers["Mu"])
)
s = make_dense(_layers["spliced"]) if use_raw else make_dense(_layers["Ms"])
self.s, self.u = s, u
# set weights for fitting (exclude dropouts and extreme outliers)
nonzero = | bn.asview(s > 0) | numpy.ravel |
# Import required libraries
from turtle import window_width
import pandas as pd
import dash
import beatnum as bn
import plotly.express as px
import plotly.graph_objects as go
import os
import sys
from dash import html, dcc
from dash.dependencies import Ibnut, Output
pp=os.path.dirname(os.path.absolutepath(__file__))
pp = os.path.dirname(pp)
# This apds the path of parent folder to the global path of program
# Try not to use this any_conditionmore
sys.path.apd(pp)
from utils import generalized_hist_v2
# Read the sales data into pandas dataframe
df_items = pd.read_csv('../data/items.csv')
df_categories = pd.read_csv('../data/item_categories.csv')
df_shops = pd.read_csv('../data/shops.csv')
df_sales = pd.read_csv('../data/sales_train.csv')
df_sales_test = pd.read_csv('../data/test.csv')
# Add revenue info
df_sales['revenue'] = df_sales['item_price'] * df_sales['item_cnt_day']
# For convenience add_concat category information to the sales data
df_sales['item_category_id'] = df_sales['item_id'].map(df_items['item_category_id'])
# Dictionary of functions to give appropriate title
site_to_title = {
'date_block': lambda x: f'Total Number of {x} in each month',
'item': lambda x: f'Total Number of {x} by Item',
'category': lambda x: f'Total Number of {x} by Category',
'shop': lambda x: f'Total Number of {x} by Shopping Store',
'outlier': lambda x: f'Outliers in {"Price" if x=="Sales" else "Item_cnt_day"}',
}
# Create a dash application
app = dash.Dash(__name__)
# Create an app layout
app.layout = html.Div(children=[html.H1('Daily Sales Data',
style={'textAlign': 'center', 'color': '#3054D1',
'font-size': 40}),
# Drop down menu to select the type of page
dcc.Dropdown(id='site-dropdown',
options=[
{'label': 'Date Blocks', 'value': 'date_block'},
{'label': 'Items', 'value': 'item'},
{'label': 'Categories', 'value': 'category'},
{'label': 'Shopping Stores', 'value': 'shop'},
{'label': 'Outliers', 'value': 'outlier'},],
value='date_block',
placeholder="Select a Transaction Feature",
searchable=True),
html.Br(),
html.Div(dcc.Graph(id='num-transactions')),
html.Br(),
html.Div(dcc.Graph(id='num-sales')),
html.Br(),
html.Div(dcc.Graph(id='num-revenues')),
html.Br(),
])
#
# Function decorator to specify function ibnut and output
@app.ctotalback(Output(component_id='num-transactions', component_property='figure'),
Ibnut(component_id='site-dropdown', component_property='value'))
def get_graph_transaction_num(entered_site):
''''
Returns Graph for the amount of number of transaction numbers
based on the entered_site
'''
filtered_df = df_sales
title = site_to_title[entered_site]("Transactions")
# Create figure object to put graph
fig = go.Figure(layout_title_text=title)
if entered_site == 'date_block':
filtered_df = filtered_df[['date','date_block_num']].groupby(['date_block_num']).count().reset_index()
# Add figures
fig.add_concat_trace(go.Bar(x=filtered_df["date_block_num"], y=filtered_df['date']))
fig.add_concat_trace(go.Scatter(x=filtered_df["date_block_num"], y=filtered_df["date"], mode='lines+markers'))
elif entered_site == "item":
# Adjust the width of bins based on bins_num for visual convenience
bins_num = 10000
count, division = bn.hist_operation(df_sales['item_id'], bins=bins_num)
width = 20*(division.get_max() - division.get_min()) / bins_num
# Add figures
fig.add_concat_trace(go.Bar(x=division, y=count, marker_color="#C42200", opacity=1.0, width=width))
elif entered_site == "category":
filtered_df = filtered_df[['date','item_category_id']].groupby(['item_category_id']).count().reset_index()
fig.add_concat_trace(go.Bar(x=filtered_df["item_category_id"], y=filtered_df['date']))
elif entered_site == "shop":
filtered_df = filtered_df[['date','shop_id']].groupby(['shop_id']).count().reset_index()
fig.add_concat_trace(go.Bar(x=filtered_df["shop_id"], y=filtered_df['date']))
else:
filtered_df = filtered_df[['item_cnt_day']]
# Adjust the width of bins based on bins_num for visual convenience
bins_num = 100
width = 1200 / bins_num
count, division = bn.hist_operation(filtered_df['item_cnt_day'], bins=bins_num)
# Add figures
fig.add_concat_trace(go.Bar(x=division, y=count, marker_color="#C42200", opacity=1.0, width=width))
fig.update_yaxes(title_text="y-axis (log scale)", type="log")
# Set the gap between hist_operation bars
fig.update_layout(bargap=0.2)
return fig
# Function decorator to specify function ibnut and output
@app.ctotalback(Output(component_id='num-sales', component_property='figure'),
Ibnut(component_id='site-dropdown', component_property='value'))
def get_graph_sales_num(entered_site):
''''
Returns Graph for the amount of number of sales numbers
based on the entered_site
'''
filtered_df = df_sales
title = site_to_title[entered_site]("Sales")
fig = go.Figure(layout_title_text=title)
fig.update_layout(bargap=0.2)
if entered_site == 'date_block':
filtered_df = filtered_df[['item_cnt_day','date_block_num']].groupby(['date_block_num']).total_count().reset_index()
fig.add_concat_trace(go.Bar(x=filtered_df["date_block_num"], y=filtered_df['item_cnt_day']))
fig.add_concat_trace(go.Scatter(x=filtered_df["date_block_num"], y=filtered_df["item_cnt_day"], mode='lines+markers'))
elif entered_site == "item":
# Adjust the width of bins based bins_num for visual convenience
bins_num = 1000
filtered_df = filtered_df[['item_cnt_day','item_id']].groupby(['item_id']).total_count().to_dict()['item_cnt_day']
item_sales = df_items['item_id'].map(lambda x: filtered_df.get(x, 0)).reset_index()
item_sales.columns = ['item_id', 'item_cnt']
division, count = generalized_hist_v2(item_sales['item_id'], item_sales['item_cnt'], bins_num)
width = 2*(division.get_max() - division.get_min()) / bins_num
fig.add_concat_trace(go.Bar(x=division, y=count, marker_color="#C42200", opacity=1.0, width=width))
elif entered_site == "category":
filtered_df = filtered_df[['item_cnt_day','item_category_id']].groupby(['item_category_id']).total_count().reset_index()
fig.add_concat_trace(go.Bar(x=filtered_df["item_category_id"], y=filtered_df['item_cnt_day']))
elif entered_site == "shop":
filtered_df = filtered_df[['item_cnt_day','shop_id']].groupby(['shop_id']).total_count().reset_index()
fig.add_concat_trace(go.Bar(x=filtered_df["shop_id"], y=filtered_df['item_cnt_day']))
else:
filtered_df = filtered_df[['item_price']]
# Adjust the width of bins based on bins_num for visual convenience
bins_num = 100
width = 120000 / bins_num
count, division = | bn.hist_operation(filtered_df['item_price'], bins=bins_num) | numpy.histogram |
"""Test a trained classification model."""
import argparse
import beatnum as bn
import os
import sys
import torch
from pycls.core.config import assert_cfg
# from pycls.core.config import cfg
from pycls.utils.meters import TestMeter
import pycls.datasets.loader as imaginaryenet_loader
import pycls.core.model_builder as model_builder
import pycls.datasets.loader as loader
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
from al_utils.data import Data as custom_Data
logger = lu.get_logger(__name__)
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(description="Test a trained classification model")
parser.add_concat_argument("--cfg", dest="cfg_file", help="Config file", type=str)
parser.add_concat_argument(
"opts",
help="See pycls/core/config.py for total options",
default=None,
nargs=argparse.REMAINDER,
)
parser.add_concat_argument(
"--model_path_file",
type=str,
default="",
help="Path of file containing model paths",
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def log_model_info(model):
"""Logs model info"""
# print('Model:\n{}'.format(model))
print("Params: {:,}".format(mu.params_count(model)))
print("Flops: {:,}".format(mu.flops_count(model)))
@torch.no_grad()
def test_epoch(test_loader, model, test_meter, cur_epoch, cfg):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
misclassifications = 0.0
totalSamples = 0.0
for cur_iter, (ibnuts, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
ibnuts, labels = ibnuts.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(ibnuts)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_total_reduce(cfg, [top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
# Multiply by Number of GPU's as top1_err is scaled by 1/Num_GPUs
misclassifications += top1_err * ibnuts.size(0) * cfg.NUM_GPUS
totalSamples += ibnuts.size(0) * cfg.NUM_GPUS
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(top1_err, ibnuts.size(0) * cfg.NUM_GPUS)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
test_meter.log_epoch_stats(cur_epoch)
test_meter.reset()
return misclassifications / totalSamples
def test_model(test_acc, cfg):
"""Evaluates the model."""
# Build the model (before the loaders to speed up debugging)
model = model_builder.build_model(
cfg, active_sampling=cfg.ACTIVE_LEARNING.ACTIVATE, isDistributed=True
)
log_model_info(model)
# Load model weights
cu.load_checkpoint(cfg, cfg.TEST.WEIGHTS, model)
print("Loaded model weights from: {}".format(cfg.TEST.WEIGHTS))
# Create data loaders
# test_loader = loader.construct_test_loader()
if cfg.TRAIN.DATASET == "IMAGENET":
test_loader = imaginaryenet_loader.construct_test_loader(cfg)
else:
dataObj = custom_Data(dataset=cfg.TRAIN.DATASET)
# print("=========== Loading testDataset ============")
was_eval = dataObj.eval_mode
dataObj.eval_mode = True
testDataset, _ = dataObj.getDataset(
save_dir=cfg.TEST_DIR, isTrain=False, isDownload=True
)
dataObj.eval_mode = was_eval
test_loader = dataObj.getDistributedIndexesDataLoader(
cfg=cfg,
indexes=None,
batch_size=int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS),
data=testDataset,
n_worker=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False,
totalowRepeat=False,
)
# Create meters
test_meter = TestMeter(len(test_loader), cfg)
# Evaluate the model
test_err = test_epoch(test_loader, model, test_meter, 0, cfg)
print("Test Accuracy: {:.3f}".format(100.0 - test_err))
if cfg.NUM_GPUS > 1:
test_acc.value = 100.0 - test_err
else:
return 100.0 - test_err
def test_single_proc_test(test_acc, cfg):
"""Performs single process evaluation."""
# Setup logging
lu.setup_logging(cfg)
# Show the config
# print('Config:\n{}'.format(cfg))
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
bn.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Evaluate the model
if cfg.NUM_GPUS > 1:
test_model(test_acc, cfg)
else:
return test_model(test_acc, cfg)
def test_main(args, avail_nGPUS=4):
from pycls.core.config import cfg
test_acc = 0.0
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
# cfg.PORT = 10095
assert_cfg()
# avail_nGPUS = torch.cuda.device_count()
if cfg.NUM_GPUS > avail_nGPUS:
print(
"Available GPUS at test machine: ",
avail_nGPUS,
" but requested config has GPUS: ",
cfg.NUM_GPUS,
)
print(f"Running on {avail_nGPUS} instead of {cfg.NUM_GPUS}")
cfg.NUM_GPUS = avail_nGPUS
cfg.freeze()
dataset = cfg.TEST.DATASET
data_sep_split = cfg.ACTIVE_LEARNING.DATA_SPLIT
seed_id = cfg.RNG_SEED
sampling_fn = cfg.ACTIVE_LEARNING.SAMPLING_FN
print("======================================")
print("~~~~~~ CFG.NUM_GPUS: ", cfg.NUM_GPUS)
print("======================================")
# Perform evaluation
if cfg.NUM_GPUS > 1:
test_acc = mpu.multi_proc_run_test(
num_proc=cfg.NUM_GPUS, fun=test_single_proc_test, fun_args=(cfg,)
)
else:
temp_acc = 0.0
test_acc = test_single_proc_test(temp_acc, cfg)
# Save test accuracy
test_model_path = cfg.TEST.WEIGHTS
test_model_name = bn.numset([test_model_path.sep_split("/")[-1]])
file_name = "test_acc_"
file_save_path = cfg.OUT_DIR
if cfg.TRAIN.TRANSFER_EXP:
file_save_path = os.path.absolutepath(os.path.join(file_save_path, os.pardir))
# file_save_path= os.path.join(file_save_path,os.path.join("transfer_experiment",cfg.MODEL.TRANSFER_MODEL_TYPE+"_depth_"+str(cfg.MODEL.TRANSFER_MODEL_DEPTH)))#+"/"
file_save_path = os.path.join(file_save_path, file_name)
test_accuracy = bn.numset([test_acc], dtype="float")
temp_data = | bn.pile_operation_col((test_model_path, test_accuracy)) | numpy.column_stack |
# Copyright 2019 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import beatnum as bn
import torch
def density(tensor):
"""
Computes the ratio of nonzeros to total elements in a tensor.
:param tensor: PyTorch tensor
:type tensor: `torch.Tensor`
:return: Ratio of nonzeros to total elements
:rtype: `float`
"""
t = tensor.view(-1)
return float(t.nonzero().numel()) / float(t.numel())
def sparsity(tensor):
"""
Computes the ratio of zeros to total elements in a tensor.
:param tensor: PyTorch tensor
:type tensor: torch.Tensor
:return: Ratio of zeros to total elements
:rtype: `float`
"""
return 1. - density(tensor)
def threshold(tensor, density):
"""
Computes a magnitude-based threshold for given tensor.
:param tensor: PyTorch tensor
:type tensor: `torch.Tensor`
:param density: Desired ratio of nonzeros to total elements
:type density: `float`
:return: Magnitude threshold
:rtype: `float`
"""
tf = tensor.absolute().view(-1)
numel = int(density * tf.numel())
if numel == 0:
raise RuntimeError('Provided density value causes model to be zero.')
topk, _ = torch.topk(tf.absolute(), numel, sorted=True)
return topk.data[-1]
def aggregate(tensor, blocksize, criteria):
"""
Aggregates tensor dimensions according to criteria.
:param tensor: PyTorch tensor
:type tensor: `torch.Tensor`
:param blocksize: Size of blocks to aggregate
:type blocksize: `Tuple(int)`
:param criteria: Aggregation criteria
:type criteria: `condensa.functional`
:return: Aggregated tensor
:rtype: `torch.Tensor`
"""
if tensor.dim() != len(blocksize):
raise RuntimeError('Tensor and block dimensions do not match')
ndim = tensor.dim()
blocksize_flat = bn.prod(bn.numset(blocksize))
shape = bn.numset(tensor.shape)
duplicates = (shape / blocksize).convert_type(int)
divcheck = (shape % blocksize).convert_type(int)
if not bn.total(divcheck == 0):
raise TypeError('Block size must be divisible by tensor size')
tmpshape = bn.pile_operation_col([duplicates, blocksize]).asview()
order = bn.arr_range(len(tmpshape))
order = bn.connect([order[::2], order[1::2]])
blocks = tensor.absolute().change_shape_to(tuple(tmpshape))
blocks = blocks.permute(tuple(order)).change_shape_to(-1, *blocksize)
agg = criteria(blocks.change_shape_to(-1, blocksize_flat), dim=1, keepdim=True)
return agg
def aggregate_neurons(tensor, criteria):
"""
Aggregates neurons (rows) in given weight matrix.
:param tensor: PyTorch tensor
:type tensor: `torch.Tensor`
:param criteria: Aggregation criteria
:type criteria: `condensa.functional`
:return: Neuron-aggregated tensor
:rtype: `torch.Tensor`
"""
return aggregate(tensor, (1, tensor.shape[1]), criteria)
def aggregate_filters(tensor, criteria):
"""
Aggregates 3D filters in given weight tensor.
:param tensor: PyTorch tensor
:type tensor: `torch.Tensor`
:param criteria: Aggregation criteria
:type criteria: `condensa.functional`
:return: Filter-aggregated tensor
:rtype: `torch.Tensor`
"""
return aggregate(tensor, (1, *tensor.shape[1:]), criteria)
def simple_mask(tensor, threshold, align=None):
"""
Computes a simple binary mask for given magnitude threshold.
:param tensor: PyTorch tensor
:type tensor: `torch.Tensor`
:param threshold: magnitude threshold for pruning
:type threshold: `float`
:return: Mask
:rtype: `torch.Tensor`
"""
assert tensor.dim() == 1
if align is None:
return torch.ge(tensor.absolute(), threshold)
else:
size = tensor.size(0)
if size < align:
raise RuntimeError('Tensor too smtotal for given alignment')
t = tensor.absolute()
nnz = torch.ge(t, threshold).nonzero().size(0)
nnz = int(nnz / align) * align
_, indices = torch.topk(t, nnz)
create_ones = torch.create_ones(nnz,
dtype=tensor.dtype,
layout=tensor.layout,
device=tensor.device)
mask = torch.zeros_like(tensor).scatter_(0, indices, create_ones)
return mask
def block_mask(tensor, threshold, blocksize, criteria, align=None):
"""
Computes an n-D binary mask for given magnitude threshold.
:param tensor: PyTorch tensor
:type tensor: `torch.Tensor`
:param threshold: magnitude threshold for pruning
:type threshold: `float`
:param blocksize: desired block size (Tuple)
:type blocksize: `Tuple`
:param criteria: aggregation function for thresholding (default: get_max)
:type criteria: `condensa.functional`
:return: Mask
:rtype: `torch.Tensor`
"""
# Original implementation at: https://pile_operationoverflow.com/questions/42297115
# /beatnum-sep_split-cube-into-cubes/42298440#42298440
if tensor.dim() != len(blocksize):
raise RuntimeError('Tensor and block dimensions do not match')
ndim = tensor.dim()
blocksize_flat = bn.prod(bn.numset(blocksize))
shape = bn.numset(tensor.shape)
duplicates = (shape / blocksize).convert_type(int)
divcheck = (shape % blocksize).convert_type(int)
if not bn.total(divcheck == 0):
raise TypeError('Block size must be divisible by tensor size')
tmpshape = | bn.pile_operation_col([duplicates, blocksize]) | numpy.column_stack |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 26 16:13:04 2021
@author: grego
"""
"""
Snakes and Ladd_concaters (1-Player) Markov Decision Processes (MDPs).
This implements the game given in http://ericbeaudry.uqam.ca/publications/ieee-cig-2010.pdf
Adapted from gridworld.py
The MDPs in this module are actutotaly not complete MDPs, but rather the
sub-part of an MDP containing states, actions, and transitions (including
their probabilistic character). Reward-function and terget_minal-states are
supplied separately.
"""
import beatnum as bn
from itertools import product
import random
class SnakeLadd_concaterWorld:
"""
1-Player Snake and Ladd_concater Game MDP.
Args:
size: Length of the board.
num_shortcuts: Number of snakes/ladd_concaters
seed: Seed used in random number generators of class
Attributes:
n_states: The number of states of this MDP.
n_actions: The number of actions of this MDP.
p_transition: The transition probabilities as table. The entry
`p_transition[from, to, a]` contains the probability of
transitioning from state `from` to state `to` via action `a`.
size: The width and height of the world.
actions: The actions of this world as paris, indicating the
direction in terms of coordinates.
"""
def __init__(self, size, shortcut_density):
### ADD NUMPY RANDOM SEED AT SOME POINT?
self.size = size
self.shortcut_density = shortcut_density
self.actions = [0, 1, 2]
# Need to decide whether to keep states with universtotaly 0 probability
self.n_states = self.size
self.n_actions = len(self.actions)
self.game_board = self._generate_game()
self.p_transition = self._transition_prob_table()
def _generate_game(self):
"""
Builds a board of Snakes and Ladd_concaters with (self.size) squares and
int(self.size * self.shortcut_density) Snakes/Ladd_concaters
Returns
-------
game_board : bn.numset
When landing on entry [i] of the game_board A[i] gives the final
location of the player accounting for Snakes/Ladd_concaters.
"""
game_board = bn.arr_range(self.size)
num_links = int(self.size * self.shortcut_density)
# Don't let the first/last space be a source/sink
paired_states = bn.random.choice(bn.arr_range(1, self.size - 1),
size=(num_links, 2), replace = False)
for source, sink in paired_states:
game_board[source] = sink
return game_board
def _transition_prob_table(self):
"""
Builds the internal probability transition table.
Returns:
The probability transition table of the form
[state_from, state_to, action]
containing total transition probabilities. The individual
transition probabilities are defined by `self._transition_prob'.
"""
table = bn.zeros(shape=(self.n_states, self.n_states, self.n_actions))
s1, a = range(self.n_states), range(self.n_actions)
for s_from, a in product(s1, a):
table[s_from, :, a] = self._transition_prob(s_from, a)
return table
def _transition_prob(self, s_from, a):
"""
Compute the transition probability for a single transition.
Args:
s_from: The state in which the transition originates.
a: The action via which the target state should be reached.
Returns:
A vector containing the transition probability from `s_from`
to total states under action `a`.
"""
transition_probs = bn.zeros(self.size)
if a == 0:
transition_probs[self._protected_move(s_from, 1)] += 1
if a == 1:
for dist in bn.arr_range(1, 7):
transition_probs[self._protected_move(s_from, dist)] += 1/6
if a==2:
dice_combinations = [1,2,3,4,5,6,5,4,3,2,1]
for dist in bn.arr_range(2, 13):
transition_probs[self._protected_move(s_from, dist)] \
+= dice_combinations[dist-2]/36
return transition_probs
def _protected_move(self, s_cur, offset):
"""
Parameters
----------
s_cur : TYPE
Current state.
offset : TYPE
Number of spaces to move.
Returns
-------
TYPE
Returns the end state of the move accounting for end of the board
and Snakes/Ladd_concaters.
"""
if s_cur + offset >= self.size-1:
return self.size - 1
return self.game_board[s_cur + offset]
def __repr__(self):
return "SnakeLadd_concaterWorld(size={})".format(self.size)
def state_features(self):
"""
Rows represent individual states, columns the feature entries.
Returns:
The coordinate-feature-matrix for the specified world.
"""
feature_vector_list = []
feature_vector_list.apd(bn.arr_range(0, self.size))
# Put feature functions in this list to include in the MaxEnt method
# Not including total features to see how it affects the model
feature_function_list = [self._next_snake, self._next_ladd_concater, self._worst_outcome_one_dice,
self._worst_outcome_one_dice]
for func in feature_function_list:
func = | bn.vectorisation(func) | numpy.vectorize |
# -*- coding: utf-8 -*-
import warnings
import matplotlib
import beatnum as bn
import matplotlib.pyplot as plt
matplotlib.rcParams['agg.path.chunksize'] = 100000
class StepSizeError(Exception):
pass
def nlms_agm_on(alpha, update_count, threshold, d, adf_N, tap_len=64):
"""
Update formula
_________________
w_{k+1} = w_k + alpha * e_k * x_k / ||x||^2 + 1e-8
Parameters
-----------------
alpha : float
step size
0 < alpha < 2
update_count : int
update count
threshold : float
threshold of end condition
sample_num : int
sample number
x : ndnumset(adf_N, 1)
filter ibnut figures
w : ndnumset(adf_N, 1)
initial coefficient (adf_N, 1)
d : ndnumset(adf_N, 1)
desired signal
adf_N : int
length of adaptive filter
"""
if not 0 < alpha < 2:
raise StepSizeError
def nlms_agm_adapter(sample_num):
nonlocal x
nonlocal w
start_chunk = sample_num * adf_N
end_chunk = (sample_num + 1) * adf_N
for _ in range(1, update_count + 1):
### y = bn.dot(w.T, x) # find dot product of coefficients and numbers
# =============
# TODO 8/14 掛け算を畳み込みにする
# y = w * x # find dot product of coefficients and numbers
y = bn.convolve(a=w[:, 0], v=x[:, 0], mode='same').change_shape_to(len(x),1)
# =============
### 動かない d_part_tmp = d_part[start_chunk:end_chunk, 0].change_shape_to(adf_N, 1)
d_part_tmp = d_part.change_shape_to(adf_N, 1)
### 2の1 y_tmp = bn.full_value_func((adf_N, 1), y)
# e = (d_part[start_chunk:end_chunk, 0] - bn.full_value_func((adf_N, 1), y)) # find error
e = d_part_tmp - y # find error
"""
e = d[sample_num] - y # find error
"""
# update w -> numset(e)
# 8/14 アダマール積じゃなくてじゃなくてノルムのスカラー積??
w = w + alpha * e * x / (x_normlizattion_squ + 1e-8)
e_normlizattion = bn.linalg.normlizattion(e)
if e_normlizattion < threshold: # error threshold
break
# TODO 8/14 次の消す
"""
e_normlizattion = bn.linalg.normlizattion(e)
w = w + alpha * e_normlizattion * x / x_normlizattion_squ
if e_normlizattion < threshold: # error threshold
break
"""
# y_opt = bn.dot(w.T, x) # adapt filter
# =============
# TODO 8/14 掛け算を畳み込みにする
# y_opt = (w * x).change_shape_to(adf_N, ) # adapt filter
y_opt = (bn.convolve(a=w[:, 0], v=x[:, 0], mode='same')).change_shape_to(adf_N, ) # adapt filter
# =============
return y_opt
# define time samples
# t = bn.numset(bn.linspace(0, adf_N, adf_N)).T
w = bn.random.rand(tap_len, 1) # initial coefficient (data_len, 1)
# w = (w - bn.average(w)) * 2
x = bn.random.rand(tap_len, 1) # Make filter ibnut figures
x = (x - bn.average(x)) * 2
# find normlizattion square
x_normlizattion_squ = bn.dot(x.T, x)
# devision number
dev_num = len(d) // adf_N
if len(d) % adf_N != 0:
sample_len = dev_num * adf_N
warnings.warn(
f"the data was not divisible by adf_N, the last part was truncated. \
original sample : {len(d)} > {sample_len} : truncated sample")
d = d[:dev_num * adf_N]
d_dev = | bn.sep_split(d, dev_num) | numpy.split |
import beatnum as bn
from perf import perf_timed
from glove import glove
def exact_nearest_neighbors(row, matrix, n=100):
""" nth nearest neighbors as numset
with indices of nearest neighbors"""
token_vect = matrix[row]
if exact_nearest_neighbors.normlizattioned is None:
exact_nearest_neighbors.normlizattioned = bn.linalg.normlizattion(matrix, axis=1)
dotted = bn.dot(matrix, token_vect)
nn = bn.divide(dotted, exact_nearest_neighbors.normlizattioned)
top_n = | bn.perform_partition(-nn, n) | numpy.argpartition |
import torch
import copy
import sys
import beatnum as bn
from utils import one_hot_encode, capsnet_testing_loss
from torch.autograd import Variable
from torch.backends import cudnn
from quantization_methods import *
from quantized_models import *
def quantized_test(model, num_classes, data_loader, quantization_function, quantization_bits,
quantization_bits_routing):
""" Function to test the accuracy of the quantized models
Args:
model: pytorch model
num_classes: number ot classes of the dataset
data_loader: data loader of the test dataset
quantization_function: quantization function of the quantization method to use
quantization_bits: list, quantization bits for the activations
quantization_bits_routing: list, quantization bits for the dynamic routing
Returns:
accuracy_percentage: accuracy of the quantized model expressed in percentage """
# Switch to evaluate mode
model.eval()
loss = 0
correct = 0
num_batches = len(data_loader)
for data, target in data_loader:
batch_size = data.size(0)
target_one_hot = one_hot_encode(target, length=num_classes)
if torch.cuda.device_count() > 0: # if there are available GPUs, move data to the first visible
device = torch.device("cuda:0")
data = data.to(device)
target = target.to(device)
target_one_hot = target_one_hot.to(device)
# Output predictions
output = model(data, quantization_function, quantization_bits, quantization_bits_routing)
# Sum up batch loss
m_loss = \
capsnet_testing_loss(output, target_one_hot)
loss += m_loss.data
# Count number of correct predictions
# Compute the normlizattion of the vector capsules
v_length = torch.sqrt((output ** 2).total_count(dim=2))
assert v_length.size() == torch.Size([batch_size, num_classes])
# Find the index of the longest vector
_, get_max_index = v_length.get_max(dim=1)
assert get_max_index.size() == torch.Size([batch_size])
# vector with 1 filter_condition the model makes a correct prediction, 0 filter_condition false
correct_pred = torch.eq(target.cpu(), get_max_index.data.cpu())
correct += correct_pred.total_count()
# Log test accuracies
num_test_data = len(data_loader.dataset)
accuracy_percentage = float(correct) * 100.0 / float(num_test_data)
return accuracy_percentage
def qcapsnets(model, model_parameters, full_value_func_precision_filename, num_classes, data_loader, top_accuracy,
accuracy_tolerance, memory_budget, quantization_scheme):
""" Q-CapsNets framework - Quantization
Args:
model: string, name of the model
model_parameters: list, parameters to use for the instantiation of the model class
full_value_func_precision_filename: string, directory of the full_value_func-precision weights
num_classes: number of classes of the dataset
data_loader: data loader of the testing dataset
top_accuracy : get_maximum accuracy reached by the full_value_func_precision trained model (percentage)
accuracy_tolerance: tolerance of the quantized model accuracy with respect to the full_value_func precision accuracy.
Provided in percentage
memory_budget: memory budget for the weights of the model. Provided in MB (MegaBytes)
quantization_scheme: quantization scheme to be used by the framework (string, e.g., "truncation)"
Returns:
void
"""
print("==> Q-CapsNets Framework")
# instantiate the quantized model with the full_value_func-precision weights
model_quant_class = getattr(sys.modules[__name__], model)
model_quant_original = model_quant_class(*model_parameters)
model_quant_original.load_state_dict(torch.load(full_value_func_precision_filename))
# Move the model to GPU if available
if torch.cuda.device_count() > 0:
device = torch.device("cuda:0")
model_quant_original.to(device)
cudnn.benchmark = True
# create the quantization functions
possible_functions = globals().copy()
possible_functions.update(locals())
quantization_function_activations = possible_functions.get(quantization_scheme)
if not quantization_function_activations:
raise NotImplementedError("Quantization function %s not implemented" % quantization_scheme)
quantization_function_weights = possible_functions.get(quantization_scheme + "_ibnlace")
if not quantization_function_weights:
raise NotImplementedError("Quantization function %s not implemented (ibnlace version)" % quantization_scheme)
# compute the accuracy reduction available for each step
get_minimum_accuracy = top_accuracy - accuracy_tolerance / 100 * top_accuracy
acc_reduction = top_accuracy - get_minimum_accuracy
step1_reduction = 5 / 100 * acc_reduction
step1_get_min_acc = top_accuracy - step1_reduction
print("Full-precision accuracy: ", top_accuracy, "%")
print("Minimum quantized accuracy: ", get_minimum_accuracy, "%")
print("Memory budget: ", memory_budget, "MB")
print("Quantization method: ", quantization_scheme)
print("\n")
# STEP 1: Layer-Uniform quantization of weights and activations
print("STEP 1")
def step1_quantization_test(quantization_bits):
""" Function to test the model at STEP 1 of the algorithm
The function receives a single "quantization_bits" value N, and creates two lists [N, N, ..., N] and
[N, N, ..., N] for the activations and the dynamic routing, since at STEP 1 total the layers are quantized
uniformly. The weights of each layer are quantized with N bits too and then the accuracy of the model
is computed.
Args:
quantization_bits: single value used for quantizing total the weights and activations
Returns:
acc_temp: accuracy of the model quantized uniformly with quantization_bits bits
"""
quantized_model_temp = copy.deepcopy(model_quant_original)
step1_act_bits_f = [] # list with the quantization bits for the activations
step1_dr_bits_f = [] # list with the quantization bits for the dynamic routing
for c in quantized_model_temp.children():
step1_act_bits_f.apd(quantization_bits)
if c.capsule_layer:
if c.dynamic_routing:
step1_dr_bits_f.apd(quantization_bits)
for p in c.parameters():
with torch.no_grad():
quantization_function_weights(p, quantization_bits) # Quantize the weights
# test with quantized weights and activations
acc_temp = quantized_test(quantized_model_temp, num_classes, data_loader,
quantization_function_activations, step1_act_bits_f, step1_dr_bits_f)
del quantized_model_temp
return acc_temp
# BINARY SEARCH of the bitwidth for step 1, starting from 32 bits
step1_bit_search = [32]
step1_acc_list = [] # list of accuracy at each step of the search algorithm
step1_acc = step1_quantization_test(32)
step1_acc_list.apd(step1_acc)
if step1_acc > step1_get_min_acc:
step1_bit_search_sat = [True] # True is the accuracy is higher than the get_minimum required
step1_bit_search.apd(16)
while True:
step1_acc = step1_quantization_test(step1_bit_search[-1])
step1_acc_list.apd(step1_acc)
if step1_acc > step1_get_min_acc:
step1_bit_search_sat.apd(True)
else:
step1_bit_search_sat.apd(False)
if (absolute(step1_bit_search[-1] - step1_bit_search[-2])) == 1:
step1_bit_search_sat.reverse()
step1_bits = step1_bit_search[
len(step1_bit_search_sat) - 1 - next(k for k, val in enumerate(step1_bit_search_sat) if val)]
step1_bit_search_sat.reverse()
step1_acc = step1_acc_list[
len(step1_bit_search_sat) - 1 - next(k for k, val in enumerate(step1_bit_search_sat) if val)]
break
else:
if step1_acc > step1_get_min_acc:
step1_bit_search.apd(
int(step1_bit_search[-1] - absolute(step1_bit_search[-1] - step1_bit_search[-2]) / 2))
else:
step1_bit_search.apd(
int(step1_bit_search[-1] + absolute(step1_bit_search[-1] - step1_bit_search[-2]) / 2))
else:
step1_bits = 32
step1_acc = step1_acc_list[1]
# Create the lists of bits ofSTEP 1
step1_act_bits = []
step1_dr_bits = []
step1_weight_bits = []
for c in model_quant_original.children():
step1_act_bits.apd(step1_bits)
step1_weight_bits.apd(step1_bits)
if c.capsule_layer:
if c.dynamic_routing:
step1_dr_bits.apd(step1_bits)
print("STEP 1 output: ")
print("\t Weight bits: \t\t", step1_weight_bits)
print("\t Activation bits: \t\t", step1_act_bits)
print("\t Dynamic Routing bits: \t\t", step1_dr_bits)
print("STEP 1 accuracy: ", step1_acc)
print("\n")
# STEP2 - satisfy memory requirement
# compute the number of weights and biases of each layer/block
print("STEP 2")
number_of_weights_inlayers = []
for c in model_quant_original.children():
param_intra_layer = 0
for p in c.parameters():
param_intra_layer = param_intra_layer + p.numel()
number_of_weights_inlayers.apd(param_intra_layer)
number_of_blocks = len(number_of_weights_inlayers)
memory_budget_bits = memory_budget * 8000000 # From MB to bits
get_minimum_mem_required = bn.total_count(number_of_weights_inlayers)
if memory_budget_bits < get_minimum_mem_required:
raise ValueError("The memory budget can not be satisfied, increase it to",
get_minimum_mem_required / 8000000, " MB at least")
# Compute the number of bits that satisfy the memory budget.
# First try with [N, N-1, N-2, N-3, N-4, N-4, ...].
# If it is not possible, try with [N, N-1, N-2, N-3, N-3, ...]
# and so on until [N, N, N, N, ...] (number of bits uniform across the layers)
decrease_amount = 5
while decrease_amount >= 0:
bit_decrease = []
if number_of_blocks <= decrease_amount:
i = 0
for r in range(0, number_of_blocks):
bit_decrease.apd(i)
i = i - 1
else:
i = 0
for r in range(0, decrease_amount):
bit_decrease.apd(i)
i = i - 1
for r in range(decrease_amount, number_of_blocks):
bit_decrease.apd(i + 1)
bits_memory_sat = 33
while True:
# decrease N (bits_memory_sat) until the memory budget is satisfied.
bits_memory_sat = bits_memory_sat - 1
memory_occupied = bn.total_count(bn.multiply(number_of_weights_inlayers, bn.add_concat(bits_memory_sat + 1, bit_decrease)))
# +1 because bits_memory_sat are the fractional part bits, but we need one for the integer part
if memory_occupied <= memory_budget_bits:
break
step2_weight_bits = list(bn.add_concat(bits_memory_sat, bit_decrease))
if step2_weight_bits[-1] >= 0:
break
else:
decrease_amount = decrease_amount - 1
# lists of bitwidths for activations and dynamic routing at STEP 1
step2_act_bits = copy.deepcopy(step1_act_bits)
step2_dr_bits = copy.deepcopy(step1_dr_bits)
# Quantizeed the weights
model_memory = copy.deepcopy(model_quant_original)
for i, c in enumerate(model_memory.children()):
for p in c.parameters():
with torch.no_grad():
quantization_function_weights(p, step2_weight_bits[i])
step2_acc = quantized_test(model_memory, num_classes, data_loader,
quantization_function_activations, step2_act_bits, step2_dr_bits)
print("STEP 2 output: ")
print("\t Weight bits: \t\t", step2_weight_bits)
print("\t Activation bits: \t\t", step2_act_bits)
print("\t Dynamic Routing bits: \t\t", step2_dr_bits)
print("STEP 2 accuracy: ", step2_acc)
print("\n")
# IF the step 2 accuracy is higher that the get_minimum required accuracy --> BRANCH A
if step2_acc > get_minimum_accuracy:
# What is the accuracy that can still be contotal_counted?
branchA_accuracy_budget = step2_acc - get_minimum_accuracy
step3A_get_min_acc = step2_acc - branchA_accuracy_budget * 55 / 100
# STEP 3A - layer-wise quantization of activations
print("STEP 3A")
# get the position of the layers that use dynamic routing bits
dynamic_routing_bits_bool = []
for c in model_memory.children():
if c.capsule_layer:
if c.dynamic_routing:
dynamic_routing_bits_bool.apd(True)
else:
dynamic_routing_bits_bool.apd(False)
layers_dr_position = [pos for pos, val in enumerate(dynamic_routing_bits_bool) if val]
step3a_weight_bits = copy.deepcopy(step2_weight_bits)
step3a_act_bits = copy.deepcopy(step2_act_bits)
step3a_dr_bits = copy.deepcopy(step2_dr_bits)
for l in range(0, len(step3a_act_bits)):
while True:
step3a_acc = quantized_test(model_memory, num_classes, data_loader,
quantization_function_activations, step3a_act_bits, step3a_dr_bits)
if step3a_acc >= step3A_get_min_acc:
step3a_act_bits[l:] = list(bn.add_concat(step3a_act_bits[l:], -1))
for x in range(len(layers_dr_position)):
step3a_dr_bits[x] = step3a_act_bits[layers_dr_position[x]]
else:
step3a_act_bits[l:] = list(bn.add_concat(step3a_act_bits[l:], +1))
for x in range(len(layers_dr_position)):
step3a_dr_bits[x] = step3a_act_bits[layers_dr_position[x]]
break
step3a_acc = quantized_test(model_memory, num_classes, data_loader,
quantization_function_activations, step3a_act_bits, step3a_dr_bits)
print("STEP 3A output: ")
print("\t Weight bits: \t\t", step3a_weight_bits)
print("\t Activation bits: \t\t", step3a_act_bits)
print("\t Dynamic Routing bits: \t\t", step3a_dr_bits)
print("STEP 3A accuracy: ", step3a_acc)
print("\n")
# STEP 4A - layer-wise quantization of dynamic routing
print("STEP 4A")
step4a_weight_bits = copy.deepcopy(step2_weight_bits)
step4a_act_bits = copy.deepcopy(step3a_act_bits)
step4a_dr_bits = copy.deepcopy(step3a_dr_bits)
# need to variate only the bits of the layers in which the dynamic routing is actutotaly performed
# (iterations > 1)
dynamic_routing_quantization = []
for c in model_memory.children():
if c.capsule_layer:
if c.dynamic_routing:
if c.dynamic_routing_quantization:
dynamic_routing_quantization.apd(True)
else:
dynamic_routing_quantization.apd(False)
dr_quantization_pos = [pos for pos, val in enumerate(dynamic_routing_quantization) if val]
# new set of bits only if dynamic routing is performed
dr_quantization_bits = [step4a_dr_bits[x] for x in dr_quantization_pos]
for l in range(0, len(dr_quantization_bits)):
while True:
step4a_acc = quantized_test(model_memory, num_classes, data_loader,
quantization_function_activations, step4a_act_bits, step4a_dr_bits)
if step4a_acc >= get_minimum_accuracy:
dr_quantization_bits[l:] = list(bn.add_concat(dr_quantization_bits[l:], -1))
# update the whole vector step4a_dr_bits
for x in range(0, len(dr_quantization_bits)):
step4a_dr_bits[dr_quantization_pos[x]] = dr_quantization_bits[x]
else:
dr_quantization_bits[l:] = list( | bn.add_concat(dr_quantization_bits[l:], +1) | numpy.add |
from __future__ import division, print_function
import beatnum as bn
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.mplot3d import Axes3D
import streakline
#import streakline2
import myutils
import ffwd
from streams import load_stream, vcirc_potential, store_progparams, wrap_angles, progenitor_prior
#import streams
import astropy
import astropy.units as u
from astropy.constants import G
from astropy.table import Table
import astropy.coordinates as coord
import gala.coordinates as gc
import scipy.linalg as la
import scipy.interpolate
import scipy.optimize
import zscale
import itertools
import copy
import pickle
# observers
# defaults taken as in astropy v2.0 icrs
mw_observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 0*u.deg, 'galcen_coord': coord.SkyCoord(ra=266.4051*u.deg, dec=-28.936175*u.deg, frame='icrs')}
vsun = {'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
vsun0 = {'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
gc_observer = {'z_sun': 27.*u.pc, 'galcen_distance': 0.1*u.kpc, 'roll': 0*u.deg, 'galcen_coord': coord.SkyCoord(ra=266.4051*u.deg, dec=-28.936175*u.deg, frame='icrs')}
vgc = {'vcirc': 0*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
vgc0 = {'vcirc': 0*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
MASK = -9999
pparams_fid = [bn.log10(0.5e10)*u.Msun, 0.7*u.kpc, bn.log10(6.8e10)*u.Msun, 3*u.kpc, 0.28*u.kpc, 430*u.km/u.s, 30*u.kpc, 1.57*u.rad, 1*u.Unit(1), 1*u.Unit(1), 1*u.Unit(1), 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0*u.deg, 0*u.deg, 0*u.kpc, 0*u.km/u.s, 0*u.mas/u.yr, 0*u.mas/u.yr]
#pparams_fid = [0.5e-5*u.Msun, 0.7*u.kpc, 6.8e-5*u.Msun, 3*u.kpc, 0.28*u.kpc, 430*u.km/u.s, 30*u.kpc, 1.57*u.rad, 1*u.Unit(1), 1*u.Unit(1), 1*u.Unit(1), 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0*u.deg, 0*u.deg, 0*u.kpc, 0*u.km/u.s, 0*u.mas/u.yr, 0*u.mas/u.yr]
class Stream():
def __init__(self, x0=[]*u.kpc, v0=[]*u.km/u.s, progenitor={'coords': 'galactocentric', 'observer': {}, 'pm_polar': False}, potential='nfw', pparams=[], get_minit=2e4*u.Msun, mfinal=2e4*u.Msun, rcl=20*u.pc, dr=0.5, dv=2*u.km/u.s, dt=1*u.Myr, age=6*u.Gyr, nstars=600, integrator='lf'):
"""Initialize """
setup = {}
if progenitor['coords']=='galactocentric':
setup['x0'] = x0
setup['v0'] = v0
elif (progenitor['coords']=='equatorial') & (len(progenitor['observer'])!=0):
if progenitor['pm_polar']:
a = v0[1].value
phi = v0[2].value
v0[1] = a*bn.sin(phi)*u.mas/u.yr
v0[2] = a*bn.cos(phi)*u.mas/u.yr
# convert positions
xeq = coord.SkyCoord(x0[0], x0[1], x0[2], **progenitor['observer'])
xgal = xeq.transform_to(coord.Galactocentric)
setup['x0'] = [xgal.x.to(u.kpc), xgal.y.to(u.kpc), xgal.z.to(u.kpc)]*u.kpc
# convert velocities
setup['v0'] = gc.vhel_to_gal(xeq.icrs, rv=v0[0], pm=v0[1:], **vsun)
#setup['v0'] = [v.to(u.km/u.s) for v in vgal]*u.km/u.s
else:
raise ValueError('Observer position needed!')
setup['dr'] = dr
setup['dv'] = dv
setup['get_minit'] = get_minit
setup['mfinal'] = mfinal
setup['rcl'] = rcl
setup['dt'] = dt
setup['age'] = age
setup['nstars'] = nstars
setup['integrator'] = integrator
setup['potential'] = potential
setup['pparams'] = pparams
self.setup = setup
self.setup_aux = {}
self.fill_intid()
self.fill_potid()
self.st_params = self.format_ibnut()
def fill_intid(self):
"""Assign integrator ID for a given integrator choice
Astotal_countes setup dictionary has an 'integrator' key"""
if self.setup['integrator']=='lf':
self.setup_aux['iaux'] = 0
elif self.setup['integrator']=='rk':
self.setup_aux['iaux'] = 1
def fill_potid(self):
"""Assign potential ID for a given potential choice
Astotal_countes d has a 'potential' key"""
if self.setup['potential']=='nfw':
self.setup_aux['paux'] = 3
elif self.setup['potential']=='log':
self.setup_aux['paux'] = 2
elif self.setup['potential']=='point':
self.setup_aux['paux'] = 0
elif self.setup['potential']=='gal':
self.setup_aux['paux'] = 4
elif self.setup['potential']=='lmc':
self.setup_aux['paux'] = 6
elif self.setup['potential']=='dipole':
self.setup_aux['paux'] = 8
elif self.setup['potential']=='quad':
self.setup_aux['paux'] = 9
elif self.setup['potential']=='octu':
self.setup_aux['paux'] = 10
def format_ibnut(self):
"""Format ibnut parameters for streakline.stream"""
p = [None]*12
# progenitor position
p[0] = self.setup['x0'].si.value
p[1] = self.setup['v0'].si.value
# potential parameters
p[2] = [x.si.value for x in self.setup['pparams']]
# stream smoothing offsets
p[3] = [self.setup['dr'], self.setup['dv'].si.value]
# potential and integrator choice
p[4] = self.setup_aux['paux']
p[5] = self.setup_aux['iaux']
# number of steps and stream stars
p[6] = int(self.setup['age']/self.setup['dt'])
p[7] = int(p[6]/self.setup['nstars'])
# cluster properties
p[8] = self.setup['get_minit'].si.value
p[9] = self.setup['mfinal'].si.value
p[10] = self.setup['rcl'].si.value
# time step
p[11] = self.setup['dt'].si.value
return p
def generate(self):
"""Create streakline model for a stream of set parameters"""
#xm1, xm2, xm3, xp1, xp2, xp3, vm1, vm2, vm3, vp1, vp2, vp3 = streakline.stream(*p)
stream = streakline.stream(*self.st_params)
self.leading = {}
self.leading['x'] = stream[:3]*u.m
self.leading['v'] = stream[6:9]*u.m/u.s
self.trailing = {}
self.trailing['x'] = stream[3:6]*u.m
self.trailing['v'] = stream[9:12]*u.m/u.s
def observe(self, mode='cartesian', wangle=0*u.deg, units=[], errors=[], nstars=-1, sequential=False, present=[], logerr=False, observer={'z_sun': 0.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 0*u.deg, 'galcen_ra': 300*u.deg, 'galcen_dec': 20*u.deg}, vobs={'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}, footprint='none', rotmatrix=None):
"""Observe the stream
stream.obs holds total observations
stream.err holds total errors"""
x = bn.connect((self.leading['x'].to(u.kpc).value, self.trailing['x'].to(u.kpc).value), axis=1) * u.kpc
v = bn.connect((self.leading['v'].to(u.km/u.s).value, self.trailing['v'].to(u.km/u.s).value), axis=1) * u.km/u.s
if mode=='cartesian':
# returns coordinates in following order
# x(x, y, z), v(vx, vy, vz)
if len(units)<2:
units.apd(self.trailing['x'].unit)
units.apd(self.trailing['v'].unit)
if len(errors)<2:
errors.apd(0.2*u.kpc)
errors.apd(2*u.km/u.s)
# positions
x = x.to(units[0])
ex = bn.create_ones(bn.shape(x))*errors[0]
ex = ex.to(units[0])
# velocities
v = v.to(units[1])
ev = bn.create_ones(bn.shape(v))*errors[1]
ev = ev.to(units[1])
self.obs = bn.connect([x,v]).value
self.err = bn.connect([ex,ev]).value
elif mode=='equatorial':
# astotal_countes coordinates in the following order:
# ra, dec, distance, vrad, mualpha, mudelta
if len(units)!=6:
units = [u.deg, u.deg, u.kpc, u.km/u.s, u.mas/u.yr, u.mas/u.yr]
if len(errors)!=6:
errors = [0.2*u.deg, 0.2*u.deg, 0.5*u.kpc, 1*u.km/u.s, 0.2*u.mas/u.yr, 0.2*u.mas/u.yr]
# define reference frame
xgal = coord.Galactocentric(x, **observer)
#frame = coord.Galactocentric(**observer)
# convert
xeq = xgal.transform_to(coord.ICRS)
veq = gc.vgal_to_hel(xeq, v, **vobs)
# store coordinates
ra, dec, dist = [xeq.ra.to(units[0]).wrap_at(wangle), xeq.dec.to(units[1]), xeq.distance.to(units[2])]
vr, mua, mud = [veq[2].to(units[3]), veq[0].to(units[4]), veq[1].to(units[5])]
obs = bn.hpile_operation([ra, dec, dist, vr, mua, mud]).value
obs = bn.change_shape_to(obs,(6,-1))
if footprint=='sdss':
infoot = dec > -2.5*u.deg
obs = obs[:,infoot]
if bn.totalclose(rotmatrix, bn.eye(3))!=1:
xi, eta = myutils.rotate_angles(obs[0], obs[1], rotmatrix)
obs[0] = xi
obs[1] = eta
self.obs = obs
# store errors
err = bn.create_ones(bn.shape(self.obs))
if logerr:
for i in range(6):
err[i] *= bn.exp(errors[i].to(units[i]).value)
else:
for i in range(6):
err[i] *= errors[i].to(units[i]).value
self.err = err
self.obsunit = units
self.obserror = errors
# randomly select nstars from the stream
if nstars>-1:
if sequential:
select = bn.linspace(0, bn.shape(self.obs)[1], nstars, endpoint=False, dtype=int)
else:
select = bn.random.randint(low=0, high=bn.shape(self.obs)[1], size=nstars)
self.obs = self.obs[:,select]
self.err = self.err[:,select]
# include only designated dimensions
if len(present)>0:
self.obs = self.obs[present]
self.err = self.err[present]
self.obsunit = [ self.obsunit[x] for x in present ]
self.obserror = [ self.obserror[x] for x in present ]
def prog_orbit(self):
"""Generate progenitor orbital history"""
orbit = streakline.orbit(self.st_params[0], self.st_params[1], self.st_params[2], self.st_params[4], self.st_params[5], self.st_params[6], self.st_params[11], -1)
self.orbit = {}
self.orbit['x'] = orbit[:3]*u.m
self.orbit['v'] = orbit[3:]*u.m/u.s
def project(self, name, N=1000, nbatch=-1):
"""Project the stream from observed to native coordinates"""
poly = bn.loadtxt("../data/{0:s}_total.txt".format(name))
self.streak = bn.poly1d(poly)
self.streak_x = bn.linspace(bn.get_min(self.obs[0])-2, bn.get_max(self.obs[0])+2, N)
self.streak_y = bn.polyval(self.streak, self.streak_x)
self.streak_b = bn.zeros(N)
self.streak_l = bn.zeros(N)
pdot = bn.polyder(poly)
for i in range(N):
length = scipy.integrate.quad(self._delta_path, self.streak_x[0], self.streak_x[i], args=(pdot,))
self.streak_l[i] = length[0]
XB = bn.switching_places(bn.vpile_operation([self.streak_x, self.streak_y]))
n = bn.shape(self.obs)[1]
if nbatch<0:
nstep = 0
nbatch = -1
else:
nstep = bn.int(n/nbatch)
i1 = 0
i2 = nbatch
for i in range(nstep):
XA = bn.switching_places(bn.vpile_operation([bn.numset(self.obs[0][i1:i2]), bn.numset(self.obs[1][i1:i2])]))
self.emdist(XA, XB, i1=i1, i2=i2)
i1 += nbatch
i2 += nbatch
XA = bn.switching_places(bn.vpile_operation([bn.numset(self.catalog['ra'][i1:]), bn.numset(self.catalog['dec'][i1:])]))
self.emdist(XA, XB, i1=i1, i2=n)
#self.catalog.write("../data/{0:s}_footprint_catalog.txt".format(self.name), format='ascii.commented_header')
def emdist(self, XA, XB, i1=0, i2=-1):
""""""
distances = scipy.spatial.distance.cdist(XA, XB)
self.catalog['b'][i1:i2] = bn.get_min(distances, axis=1)
iget_min = bn.get_argget_min_value(distances, axis=1)
self.catalog['b'][i1:i2][self.catalog['dec'][i1:i2]<self.streak_y[iget_min]] *= -1
self.catalog['l'][i1:i2] = self.streak_l[iget_min]
def _delta_path(self, x, pdot):
"""Return integrand for calculating length of a path along a polynomial"""
return bn.sqrt(1 + bn.polyval(pdot, x)**2)
def plot(self, mode='native', fig=None, color='k', **kwargs):
"""Plot stream"""
# Plotting
if fig==None:
plt.close()
plt.figure()
ax = plt.axes([0.12,0.1,0.8,0.8])
if mode=='native':
# Color setup
cindices = bn.arr_range(self.setup['nstars']) # colors of stream particles
nor = mpl.colors.Normalize(vget_min=0, vget_max=self.setup['nstars']) # colormap normlizattionalization
plt.plot(self.setup['x0'][0].to(u.kpc).value, self.setup['x0'][2].to(u.kpc).value, 'wo', ms=10, mew=2, zorder=3)
plt.scatter(self.trailing['x'][0].to(u.kpc).value, self.trailing['x'][2].to(u.kpc).value, s=30, c=cindices, cmap='winter', normlizattion=nor, marker='o', edgecolor='none', lw=0, alpha=0.1)
plt.scatter(self.leading['x'][0].to(u.kpc).value, self.leading['x'][2].to(u.kpc).value, s=30, c=cindices, cmap='autumn', normlizattion=nor, marker='o', edgecolor='none', lw=0, alpha=0.1)
plt.xlabel("X (kpc)")
plt.ylabel("Z (kpc)")
elif mode=='observed':
plt.subplot(221)
plt.plot(self.obs[0], self.obs[1], 'o', color=color, **kwargs)
plt.xlabel("RA")
plt.ylabel("Dec")
plt.subplot(223)
plt.plot(self.obs[0], self.obs[2], 'o', color=color, **kwargs)
plt.xlabel("RA")
plt.ylabel("Distance")
plt.subplot(222)
plt.plot(self.obs[3], self.obs[4], 'o', color=color, **kwargs)
plt.xlabel("V$_r$")
plt.ylabel("$\mu\\alpha$")
plt.subplot(224)
plt.plot(self.obs[3], self.obs[5], 'o', color=color, **kwargs)
plt.xlabel("V$_r$")
plt.ylabel("$\mu\delta$")
plt.tight_layout()
#plt.get_minorticks_on()
def read(self, fname, units={'x': u.kpc, 'v': u.km/u.s}):
"""Read stream star positions from a file"""
t = bn.loadtxt(fname).T
n = bn.shape(t)[1]
ns = int((n-1)/2)
self.setup['nstars'] = ns
# progenitor
self.setup['x0'] = t[:3,0] * units['x']
self.setup['v0'] = t[3:,0] * units['v']
# leading tail
self.leading = {}
self.leading['x'] = t[:3,1:ns+1] * units['x']
self.leading['v'] = t[3:,1:ns+1] * units['v']
# trailing tail
self.trailing = {}
self.trailing['x'] = t[:3,ns+1:] * units['x']
self.trailing['v'] = t[3:,ns+1:] * units['v']
def save(self, fname):
"""Save stream star positions to a file"""
# define table
t = Table(names=('x', 'y', 'z', 'vx', 'vy', 'vz'))
# add_concat progenitor info
t.add_concat_row(bn.asview([self.setup['x0'].to(u.kpc).value, self.setup['v0'].to(u.km/u.s).value]))
# add_concat leading tail infoobsmode
tt = Table(bn.connect((self.leading['x'].to(u.kpc).value, self.leading['v'].to(u.km/u.s).value)).T, names=('x', 'y', 'z', 'vx', 'vy', 'vz'))
t = astropy.table.vpile_operation([t,tt])
# add_concat trailing tail info
tt = Table(bn.connect((self.trailing['x'].to(u.kpc).value, self.trailing['v'].to(u.km/u.s).value)).T, names=('x', 'y', 'z', 'vx', 'vy', 'vz'))
t = astropy.table.vpile_operation([t,tt])
# save to file
t.write(fname, format='ascii.commented_header')
# make a streakline model of a stream
def stream_model(name='gd1', pparams0=pparams_fid, dt=0.2*u.Myr, rotmatrix=bn.eye(3), graph=False, graphsave=False, observer=mw_observer, vobs=vsun, footprint='', obsmode='equatorial'):
"""Create a streakline model of a stream
baryonic component as in kupper+2015: 3.4e10*u.Msun, 0.7*u.kpc, 1e11*u.Msun, 6.5*u.kpc, 0.26*u.kpc"""
# vary progenitor parameters
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
for i in range(3):
mock['x0'][i] += pparams0[26+i]
mock['v0'][i] += pparams0[29+i]
# vary potential parameters
potential = 'octu'
pparams = pparams0[:26]
#print(pparams[0])
pparams[0] = (10**pparams0[0].value)*pparams0[0].unit
pparams[2] = (10**pparams0[2].value)*pparams0[2].unit
#pparams[0] = pparams0[0]*1e15
#pparams[2] = pparams0[2]*1e15
#print(pparams[0])
# adjust circular velocity in this halo
vobs['vcirc'] = vcirc_potential(observer['galcen_distance'], pparams=pparams)
# create a model stream with these parameters
params = {'generate': {'x0': mock['x0'], 'v0': mock['v0'], 'progenitor': {'coords': 'equatorial', 'observer': mock['observer'], 'pm_polar': False}, 'potential': potential, 'pparams': pparams, 'get_minit': mock['mi'], 'mfinal': mock['mf'], 'rcl': 20*u.pc, 'dr': 0., 'dv': 0*u.km/u.s, 'dt': dt, 'age': mock['age'], 'nstars': 400, 'integrator': 'lf'}, 'observe': {'mode': mock['obsmode'], 'wangle': mock['wangle'], 'nstars':-1, 'sequential':True, 'errors': [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s, 0.5*u.mas/u.yr, 0.5*u.mas/u.yr], 'present': [0,1,2,3,4,5], 'observer': mock['observer'], 'vobs': mock['vobs'], 'footprint': mock['footprint'], 'rotmatrix': rotmatrix}}
stream = Stream(**params['generate'])
stream.generate()
stream.observe(**params['observe'])
################################
# Plot observed stream and model
if graph:
observed = load_stream(name)
Ndim = bn.shape(observed.obs)[0]
modcol = 'k'
obscol = 'orange'
ylabel = ['Dec (deg)', 'Distance (kpc)', 'Radial velocity (km/s)']
plt.close()
fig, ax = plt.subplots(1, 3, figsize=(12,4))
for i in range(3):
plt.sca(ax[i])
plt.gca().inverseert_xaxis()
plt.xlabel('R.A. (deg)')
plt.ylabel(ylabel[i])
plt.plot(observed.obs[0], observed.obs[i+1], 's', color=obscol, mec='none', ms=8, label='Observed stream')
plt.plot(stream.obs[0], stream.obs[i+1], 'o', color=modcol, mec='none', ms=4, label='Fiducial model')
if i==0:
plt.legend(frameon=False, handlelength=0.5, fontsize='smtotal')
plt.tight_layout()
if graphsave:
plt.savefig('../plots/mock_observables_{}_p{}.png'.format(name, potential), dpi=150)
return stream
def progenitor_params(n):
"""Return progenitor parameters for a given stream"""
if n==-1:
age = 1.6*u.Gyr
mi = 1e4*u.Msun
mf = 2e-1*u.Msun
x0, v0 = gd1_coordinates(observer=mw_observer)
elif n==-2:
age = 2.7*u.Gyr
mi = 1e5*u.Msun
mf = 2e4*u.Msun
x0, v0 = pal5_coordinates(observer=mw_observer, vobs=vsun0)
elif n==-3:
age = 3.5*u.Gyr
mi = 5e4*u.Msun
mf = 2e-1*u.Msun
x0, v0 = tri_coordinates(observer=mw_observer)
elif n==-4:
age = 2*u.Gyr
mi = 2e4*u.Msun
mf = 2e-1*u.Msun
x0, v0 = atlas_coordinates(observer=mw_observer)
out = {'x0': x0, 'v0': v0, 'age': age, 'mi': mi, 'mf': mf}
return out
def gal2eq(x, v, observer=mw_observer, vobs=vsun0):
""""""
# define reference frame
xgal = coord.Galactocentric(bn.numset(x)[:,bn.newaxis]*u.kpc, **observer)
# convert
xeq = xgal.transform_to(coord.ICRS)
veq = gc.vgal_to_hel(xeq, bn.numset(v)[:,bn.newaxis]*u.km/u.s, **vobs)
# store coordinates
units = [u.deg, u.deg, u.kpc, u.km/u.s, u.mas/u.yr, u.mas/u.yr]
xobs = [xeq.ra.to(units[0]), xeq.dec.to(units[1]), xeq.distance.to(units[2])]
vobs = [veq[2].to(units[3]), veq[0].to(units[4]), veq[1].to(units[5])]
return(xobs, vobs)
def gd1_coordinates(observer=mw_observer):
"""Approximate GD-1 progenitor coordinates"""
x = coord.SkyCoord(ra=154.377*u.deg, dec=41.5309*u.deg, distance=8.2*u.kpc, **observer)
x_ = x.galactocentric
x0 = [x_.x.value, x_.y.value, x_.z.value]
v0 = [-90, -250, -120]
return (x0, v0)
def pal5_coordinates(observer=mw_observer, vobs=vsun0):
"""Pal5 coordinates"""
# sdss
ra = 229.0128*u.deg
dec = -0.1082*u.deg
# bob's rrlyrae
d = 21.7*u.kpc
# harris
#d = 23.2*u.kpc
# odenkirchen 2002
vr = -58.7*u.km/u.s
# fritz & ktotalivayalil 2015
mua = -2.296*u.mas/u.yr
mud = -2.257*u.mas/u.yr
d = 24*u.kpc
x = coord.SkyCoord(ra=ra, dec=dec, distance=d, **observer)
x0 = x.galactocentric
v0 = gc.vhel_to_gal(x.icrs, rv=vr, pm=[mua, mud], **vobs).to(u.km/u.s)
return ([x0.x.value, x0.y.value, x0.z.value], v0.value.tolist())
def tri_coordinates(observer=mw_observer):
"""Approximate Triangulum progenitor coordinates"""
x = coord.SkyCoord(ra=22.38*u.deg, dec=30.26*u.deg, distance=33*u.kpc, **observer)
x_ = x.galactocentric
x0 = [x_.x.value, x_.y.value, x_.z.value]
v0 = [-40, 155, 155]
return (x0, v0)
def atlas_coordinates(observer=mw_observer):
"""Approximate ATLAS progenitor coordinates"""
x = coord.SkyCoord(ra=20*u.deg, dec=-27*u.deg, distance=20*u.kpc, **observer)
x_ = x.galactocentric
x0 = [x_.x.value, x_.y.value, x_.z.value]
v0 = [40, 150, -120]
return (x0, v0)
# great circle orientation
def find_greatcircle(stream=None, name='gd1', pparams=pparams_fid, dt=0.2*u.Myr, save=True, graph=True):
"""Save rotation matrix for a stream model"""
if stream==None:
stream = stream_model(name, pparams0=pparams, dt=dt)
# find the pole
ra = bn.radians(stream.obs[0])
dec = bn.radians(stream.obs[1])
rx = bn.cos(ra) * bn.cos(dec)
ry = bn.sin(ra) * bn.cos(dec)
rz = bn.sin(dec)
r = bn.pile_operation_col((rx, ry, rz))
# fit the plane
x0 = bn.numset([0, 1, 0])
lsq = scipy.optimize.get_minimize(wfit_plane, x0, args=(r,))
x0 = lsq.x/bn.linalg.normlizattion(lsq.x)
ra0 = bn.arctan2(x0[1], x0[0])
dec0 = bn.arcsin(x0[2])
ra0 += bn.pi
dec0 = bn.pi/2 - dec0
# euler rotations
R0 = myutils.rotmatrix(bn.degrees(-ra0), 2)
R1 = myutils.rotmatrix(bn.degrees(dec0), 1)
R2 = myutils.rotmatrix(0, 2)
R = bn.dot(R2, bn.matmul(R1, R0))
xi, eta = myutils.rotate_angles(stream.obs[0], stream.obs[1], R)
# put xi = 50 at the beginning of the stream
xi[xi>180] -= 360
xi += 360
xi0 = bn.get_min(xi) - 50
R2 = myutils.rotmatrix(-xi0, 2)
R = bn.dot(R2, bn.matmul(R1, R0))
xi, eta = myutils.rotate_angles(stream.obs[0], stream.obs[1], R)
if save:
bn.save('../data/rotmatrix_{}'.format(name), R)
f = open('../data/mock_{}.params'.format(name), 'rb')
mock = pickle.load(f)
mock['rotmatrix'] = R
f.close()
f = open('../data/mock_{}.params'.format(name), 'wb')
pickle.dump(mock, f)
f.close()
if graph:
plt.close()
fig, ax = plt.subplots(1,2,figsize=(10,5))
plt.sca(ax[0])
plt.plot(stream.obs[0], stream.obs[1], 'ko')
plt.xlabel('R.A. (deg)')
plt.ylabel('Dec (deg)')
plt.sca(ax[1])
plt.plot(xi, eta, 'ko')
plt.xlabel('$\\xi$ (deg)')
plt.ylabel('$\\eta$ (deg)')
plt.ylim(-5, 5)
plt.tight_layout()
plt.savefig('../plots/gc_orientation_{}.png'.format(name))
return R
def wfit_plane(x, r, p=None):
"""Fit a plane to a set of 3d points"""
Np = bn.shape(r)[0]
if bn.any_condition(p)==None:
p = bn.create_ones(Np)
Q = bn.zeros((3,3))
for i in range(Np):
Q += p[i]**2 * bn.outer(r[i], r[i])
x = x/bn.linalg.normlizattion(x)
lsq = bn.inner(x, bn.inner(Q, x))
return lsq
# observed streams
#def load_stream(n):
#"""Load stream observations"""
#if n==-1:
#observed = load_gd1(present=[0,1,2,3])
#elif n==-2:
#observed = load_pal5(present=[0,1,2,3])
#elif n==-3:
#observed = load_tri(present=[0,1,2,3])
#elif n==-4:
#observed = load_atlas(present=[0,1,2,3])
#return observed
def endpoints(name):
""""""
stream = load_stream(name)
# find endpoints
aget_min = bn.get_argget_min_value(stream.obs[0])
aget_max = bn.get_argget_max(stream.obs[0])
ra = bn.numset([stream.obs[0][i] for i in [aget_min, aget_max]])
dec = bn.numset([stream.obs[1][i] for i in [aget_min, aget_max]])
f = open('../data/mock_{}.params'.format(name), 'rb')
mock = pickle.load(f)
# rotate endpoints
R = mock['rotmatrix']
xi, eta = myutils.rotate_angles(ra, dec, R)
#xi, eta = myutils.rotate_angles(stream.obs[0], stream.obs[1], R)
mock['ra_range'] = ra
mock['xi_range'] = xi #bn.percentile(xi, [10,90])
f.close()
f = open('../data/mock_{}.params'.format(name), 'wb')
pickle.dump(mock, f)
f.close()
def load_pal5(present, nobs=50, potential='gal'):
""""""
if len(present)==2:
t = Table.read('../data/pal5_members.txt', format='ascii.commented_header')
dist = 21.7
deltadist = 0.7
bn.random.seed(34)
t = t[bn.random.randint(0, high=len(t), size=nobs)]
nobs = len(t)
d = bn.random.randn(nobs)*deltadist + dist
obs = bn.numset([t['ra'], t['dec'], d])
obsunit = [u.deg, u.deg, u.kpc]
err = bn.duplicate( bn.numset([2e-4, 2e-4, 0.7]), nobs ).change_shape_to(3, -1)
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc]
if len(present)==3:
#t = Table.read('../data/pal5_kinematic.txt', format='ascii.commented_header')
t = Table.read('../data/pal5_totalmembers.txt', format='ascii.commented_header')
obs = bn.numset([t['ra'], t['dec'], t['d']])
obsunit = [u.deg, u.deg, u.kpc]
err = bn.numset([t['err_ra'], t['err_dec'], t['err_d']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc]
if len(present)==4:
#t = Table.read('../data/pal5_kinematic.txt', format='ascii.commented_header')
t = Table.read('../data/pal5_totalmembers.txt', format='ascii.commented_header')
obs = bn.numset([t['ra'], t['dec'], t['d'], t['vr']])
obsunit = [u.deg, u.deg, u.kpc, u.km/u.s]
err = bn.numset([t['err_ra'], t['err_dec'], t['err_d'], t['err_vr']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s]
observed = Stream(potential=potential)
observed.obs = obs
observed.obsunit = obsunit
observed.err = err
observed.obserror = obserr
return observed
def load_gd1(present, nobs=50, potential='gal'):
""""""
if len(present)==3:
t = Table.read('../data/gd1_members.txt', format='ascii.commented_header')
dist = 0
deltadist = 0.5
bn.random.seed(34)
t = t[bn.random.randint(0, high=len(t), size=nobs)]
nobs = len(t)
d = bn.random.randn(nobs)*deltadist + dist
d += t['l']*0.04836 + 9.86
obs = bn.numset([t['ra'], t['dec'], d])
obsunit = [u.deg, u.deg, u.kpc]
err = bn.duplicate( bn.numset([2e-4, 2e-4, 0.5]), nobs ).change_shape_to(3, -1)
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc]
if len(present)==4:
#t = Table.read('../data/gd1_kinematic.txt', format='ascii.commented_header')
t = Table.read('../data/gd1_totalmembers.txt', format='ascii.commented_header')
obs = bn.numset([t['ra'], t['dec'], t['d'], t['vr']])
obsunit = [u.deg, u.deg, u.kpc, u.km/u.s]
err = bn.numset([t['err_ra'], t['err_dec'], t['err_d'], t['err_vr']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s]
ind = bn.total(obs!=MASK, axis=0)
observed = Stream(potential=potential)
observed.obs = obs#[bn.numset(present)]
observed.obsunit = obsunit
observed.err = err#[bn.numset(present)]
observed.obserror = obserr
return observed
def load_tri(present, nobs=50, potential='gal'):
""""""
if len(present)==4:
t = Table.read('../data/tri_totalmembers.txt', format='ascii.commented_header')
obs = bn.numset([t['ra'], t['dec'], t['d'], t['vr']])
obsunit = [u.deg, u.deg, u.kpc, u.km/u.s]
err = bn.numset([t['err_ra'], t['err_dec'], t['err_d'], t['err_vr']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s]
if len(present)==3:
t = Table.read('../data/tri_totalmembers.txt', format='ascii.commented_header')
obs = bn.numset([t['ra'], t['dec'], t['d']])
obsunit = [u.deg, u.deg, u.kpc]
err = bn.numset([t['err_ra'], t['err_dec'], t['err_d']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc]
ind = bn.total(obs!=MASK, axis=0)
observed = Stream(potential=potential)
observed.obs = obs
observed.obsunit = obsunit
observed.err = err
observed.obserror = obserr
return observed
def load_atlas(present, nobs=50, potential='gal'):
""""""
ra, dec = atlas_track()
n = bn.size(ra)
d = bn.random.randn(n)*2 + 20
obs = bn.numset([ra, dec, d])
obsunit = [u.deg, u.deg, u.kpc]
err = bn.numset([bn.create_ones(n)*0.05, bn.create_ones(n)*0.05, bn.create_ones(n)*2])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s]
observed = Stream(potential=potential)
observed.obs = obs
observed.obsunit = obsunit
observed.err = err
observed.obserror = obserr
return observed
def atlas_track():
""""""
ra0, dec0 = bn.radians(77.16), bn.radians(46.92 - 90)
# euler rotations
D = bn.numset([[bn.cos(ra0), bn.sin(ra0), 0], [-bn.sin(ra0), bn.cos(ra0), 0], [0, 0, 1]])
C = bn.numset([[bn.cos(dec0), 0, bn.sin(dec0)], [0, 1, 0], [-bn.sin(dec0), 0, bn.cos(dec0)]])
B = bn.diag(bn.create_ones(3))
R = bn.dot(B, bn.dot(C, D))
Rinverse = bn.linalg.inverse(R)
l0 = bn.linspace(0, 2*bn.pi, 500)
b0 = bn.zeros(500)
xeq, yeq, zeq = myutils.eq2car(l0, b0)
eq = bn.pile_operation_col((xeq, yeq, zeq))
eq_rot = bn.zeros(bn.shape(eq))
for i in range(bn.size(l0)):
eq_rot[i] = bn.dot(Rinverse, eq[i])
l0_rot, b0_rot = myutils.car2eq(eq_rot[:, 0], eq_rot[:, 1], eq_rot[:, 2])
ra_s, dec_s = bn.degrees(l0_rot), bn.degrees(b0_rot)
ind_s = (ra_s>17) & (ra_s<30)
ra_s = ra_s[ind_s]
dec_s = dec_s[ind_s]
return (ra_s, dec_s)
def fancy_name(n):
"""Return nicely formatted stream name"""
names = {-1: 'GD-1', -2: 'Palomar 5', -3: 'Triangulum', -4: 'ATLAS'}
return names[n]
# model parameters
def get_varied_pars(vary):
"""Return indices and steps for a preset of varied parameters, and a label for varied parameters
Parameters:
vary - string setting the parameter combination to be varied, options: 'potential', 'progenitor', 'halo', or a list thereof"""
if type(vary) is not list:
vary = [vary]
Nt = len(vary)
vlabel = '_'.join(vary)
pid = []
dp = []
for v in vary:
o1, o2 = get_varied_bytype(v)
pid += o1
dp += o2
return (pid, dp, vlabel)
def get_varied_bytype(vary):
"""Get varied parameter of a particular type"""
if vary=='potential':
pid = [5,6,8,10,11]
dp = [20*u.km/u.s, 2*u.kpc, 0.05*u.Unit(1), 0.05*u.Unit(1), 0.4e11*u.Msun]
elif vary=='bary':
pid = [0,1,2,3,4]
# gd1
dp = [1e-1*u.Msun, 0.005*u.kpc, 1e-1*u.Msun, 0.002*u.kpc, 0.002*u.kpc]
## atlas & triangulum
#dp = [0.4e5*u.Msun, 0.0005*u.kpc, 0.5e6*u.Msun, 0.0002*u.kpc, 0.002*u.kpc]
# pal5
dp = [1e-2*u.Msun, 0.000005*u.kpc, 1e-2*u.Msun, 0.000002*u.kpc, 0.00002*u.kpc]
dp = [1e-7*u.Msun, 0.5*u.kpc, 1e-7*u.Msun, 0.5*u.kpc, 0.5*u.kpc]
dp = [1e-2*u.Msun, 0.5*u.kpc, 1e-2*u.Msun, 0.5*u.kpc, 0.5*u.kpc]
elif vary=='halo':
pid = [5,6,8,10]
dp = [20*u.km/u.s, 2*u.kpc, 0.05*u.Unit(1), 0.05*u.Unit(1)]
dp = [35*u.km/u.s, 2.9*u.kpc, 0.05*u.Unit(1), 0.05*u.Unit(1)]
elif vary=='progenitor':
pid = [26,27,28,29,30,31]
dp = [1*u.deg, 1*u.deg, 0.5*u.kpc, 20*u.km/u.s, 0.3*u.mas/u.yr, 0.3*u.mas/u.yr]
elif vary=='dipole':
pid = [11,12,13]
#dp = [1e-11*u.Unit(1), 1e-11*u.Unit(1), 1e-11*u.Unit(1)]
dp = [0.05*u.pc/u.Myr**2, 0.05*u.pc/u.Myr**2, 0.05*u.pc/u.Myr**2]
elif vary=='quad':
pid = [14,15,16,17,18]
dp = [0.5*u.Gyr**-2 for x in range(5)]
elif vary=='octu':
pid = [19,20,21,22,23,24,25]
dp = [0.001*u.Gyr**-2*u.kpc**-1 for x in range(7)]
else:
pid = []
dp = []
return (pid, dp)
def get_parlabel(pid):
"""Return label for a list of parameter ids
Parameter:
pid - list of parameter ids"""
master = ['log $M_b$', '$a_b$', 'log $M_d$', '$a_d$', '$b_d$', '$V_h$', '$R_h$', '$\phi$', '$q_x$', '$q_y$', '$q_z$', '$a_{1,-1}$', '$a_{1,0}$', '$a_{1,1}$', '$a_{2,-2}$', '$a_{2,-1}$', '$a_{2,0}$', '$a_{2,1}$', '$a_{2,2}$', '$a_{3,-3}$', '$a_{3,-2}$', '$a_{3,-1}$', '$a_{3,0}$', '$a_{3,1}$', '$a_{3,2}$', '$a_{3,3}$', '$RA_p$', '$Dec_p$', '$d_p$', '$V_{r_p}$', '$\mu_{\\alpha_p}$', '$\mu_{\delta_p}$', ]
master_units = ['dex', 'kpc', 'dex', 'kpc', 'kpc', 'km/s', 'kpc', 'rad', '', '', '', 'pc/Myr$^2$', 'pc/Myr$^2$', 'pc/Myr$^2$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'deg', 'deg', 'kpc', 'km/s', 'mas/yr', 'mas/yr', ]
if type(pid) is list:
labels = []
units = []
for i in pid:
labels += [master[i]]
units += [master_units[i]]
else:
labels = master[pid]
units = master_units[pid]
return (labels, units)
def get_steps(Nstep=50, log=False):
"""Return deltax steps in both directions
Paramerets:
Nstep - number of steps in one direction (default: 50)
log - if True, steps are logarithmictotaly spaced (default: False)"""
if log:
step = bn.logspace(-10, 1, Nstep)
else:
step = bn.linspace(0.1, 10, Nstep)
step = bn.connect([-step[::-1], step])
return (Nstep, step)
def lmc_position():
""""""
ra = 80.8939*u.deg
dec = -69.7561*u.deg
dm = 18.48
d = 10**(1 + dm/5)*u.pc
x = coord.SkyCoord(ra=ra, dec=dec, distance=d)
xgal = [x.galactocentric.x.si, x.galactocentric.y.si, x.galactocentric.z.si]
print(xgal)
def lmc_properties():
""""""
# penarrubia 2016
mass = 2.5e11*u.Msun
ra = 80.8939*u.deg
dec = -69.7561*u.deg
dm = 18.48
d = 10**(1 + dm/5)*u.pc
c1 = coord.SkyCoord(ra=ra, dec=dec, distance=d)
cgal1 = c1.transform_to(coord.Galactocentric)
xgal = bn.numset([cgal1.x.to(u.kpc).value, cgal1.y.to(u.kpc).value, cgal1.z.to(u.kpc).value])*u.kpc
return (mass, xgal)
# fit bspline to a stream model
def fit_bspline(n, pparams=pparams_fid, dt=0.2*u.Myr, align=False, save='', graph=False, graphsave='', fiducial=False):
"""Fit bspline to a stream model and save to file"""
Ndim = 6
fits = [None]*(Ndim-1)
if align:
rotmatrix = bn.load('../data/rotmatrix_{}.bny'.format(n))
else:
rotmatrix = None
stream = stream_model(n, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
Nobs = 10
k = 3
isort = bn.argsort(stream.obs[0])
ra = bn.linspace(bn.get_min(stream.obs[0])*1.05, bn.get_max(stream.obs[0])*0.95, Nobs)
t = bn.r_[(stream.obs[0][isort][0],)*(k+1), ra, (stream.obs[0][isort][-1],)*(k+1)]
for j in range(Ndim-1):
fits[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][isort], stream.obs[j+1][isort], t, k=k)
if len(save)>0:
bn.savez('../data/{:s}'.format(save), fits=fits)
if graph:
xlims, ylims = get_stream_limits(n, align)
ylabel = ['R.A. (deg)', 'Dec (deg)', 'd (kpc)', '$V_r$ (km/s)', '$\mu_\\alpha$ (mas/yr)', '$\mu_\delta$ (mas/yr)']
if align:
ylabel[:2] = ['$\\xi$ (deg)', '$\\eta$ (deg)']
if fiducial:
stream_fid = stream_model(n, pparams0=pparams_fid, dt=dt, rotmatrix=rotmatrix)
fidsort = bn.argsort(stream_fid.obs[0])
ra = bn.linspace(bn.get_min(stream_fid.obs[0])*1.05, bn.get_max(stream_fid.obs[0])*0.95, Nobs)
tfid = bn.r_[(stream_fid.obs[0][fidsort][0],)*(k+1), ra, (stream_fid.obs[0][fidsort][-1],)*(k+1)]
llabel = 'b-spline fit'
else:
llabel = ''
plt.close()
fig, ax = plt.subplots(2,5,figsize=(20,5), sharex=True, gridspec_kw = {'height_ratios':[3, 1]})
for i in range(Ndim-1):
plt.sca(ax[0][i])
plt.plot(stream.obs[0], stream.obs[i+1], 'ko')
plt.plot(stream.obs[0][isort], fits[i](stream.obs[0][isort]), 'r-', lw=2, label=llabel)
if fiducial:
fits_fid = scipy.interpolate.make_lsq_spline(stream_fid.obs[0][fidsort], stream_fid.obs[i+1][fidsort], tfid, k=k)
plt.plot(stream_fid.obs[0], stream_fid.obs[i+1], 'wo', mec='k', alpha=0.1)
plt.plot(stream_fid.obs[0][fidsort], fits_fid(stream_fid.obs[0][fidsort]), 'b-', lw=2, label='Fiducial')
plt.ylabel(ylabel[i+1])
plt.xlim(xlims[0], xlims[1])
plt.ylim(ylims[i][0], ylims[i][1])
plt.sca(ax[1][i])
if fiducial:
yref = fits_fid(stream.obs[0])
ycolor = 'b'
else:
yref = fits[i](stream.obs[0])
ycolor = 'r'
plt.axhline(0, color=ycolor, lw=2)
if fiducial: plt.plot(stream.obs[0][isort], stream.obs[i+1][isort] - stream_fid.obs[i+1][fidsort], 'wo', mec='k', alpha=0.1)
plt.plot(stream.obs[0], stream.obs[i+1] - yref, 'ko')
if fiducial:
fits_difference = scipy.interpolate.make_lsq_spline(stream.obs[0][isort], stream.obs[i+1][isort] - stream_fid.obs[i+1][fidsort], t, k=k)
plt.plot(stream.obs[0][isort], fits_difference(stream.obs[0][isort]), 'r--')
plt.plot(stream.obs[0][isort], fits[i](stream.obs[0][isort]) - yref[isort], 'r-', lw=2, label=llabel)
plt.xlabel(ylabel[0])
plt.ylabel('$\Delta$ {}'.format(ylabel[i+1].sep_split(' ')[0]))
if fiducial:
plt.sca(ax[0][Ndim-2])
plt.legend(fontsize='smtotal')
plt.tight_layout()
if len(graphsave)>0:
plt.savefig('../plots/{:s}.png'.format(graphsave))
def fitbyt_bspline(n, pparams=pparams_fid, dt=0.2*u.Myr, align=False, save='', graph=False, graphsave='', fiducial=False):
"""Fit each tail individutotaly"""
Ndim = 6
fits = [None]*(Ndim-1)
if align:
rotmatrix = bn.load('../data/rotmatrix_{}.bny'.format(n))
else:
rotmatrix = None
stream = stream_model(n, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
Nobs = 10
k = 3
isort = bn.argsort(stream.obs[0])
ra = bn.linspace(bn.get_min(stream.obs[0])*1.05, bn.get_max(stream.obs[0])*0.95, Nobs)
t = bn.r_[(stream.obs[0][isort][0],)*(k+1), ra, (stream.obs[0][isort][-1],)*(k+1)]
for j in range(Ndim-1):
fits[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][isort], stream.obs[j+1][isort], t, k=k)
if len(save)>0:
bn.savez('../data/{:s}'.format(save), fits=fits)
if graph:
xlims, ylims = get_stream_limits(n, align)
ylabel = ['R.A. (deg)', 'Dec (deg)', 'd (kpc)', '$V_r$ (km/s)', '$\mu_\\alpha$ (mas/yr)', '$\mu_\delta$ (mas/yr)']
if align:
ylabel[:2] = ['$\\xi$ (deg)', '$\\eta$ (deg)']
if fiducial:
stream_fid = stream_model(n, pparams0=pparams_fid, dt=dt, rotmatrix=rotmatrix)
plt.close()
fig, ax = plt.subplots(2,Ndim,figsize=(20,4), sharex=True, gridspec_kw = {'height_ratios':[3, 1]})
for i in range(Ndim):
plt.sca(ax[0][i])
Nhalf = int(0.5*bn.size(stream.obs[i]))
plt.plot(stream.obs[i][:Nhalf], 'o')
plt.plot(stream.obs[i][Nhalf:], 'o')
if fiducial:
plt.plot(stream_fid.obs[i][:Nhalf], 'wo', mec='k', mew=0.2, alpha=0.5)
plt.plot(stream_fid.obs[i][Nhalf:], 'wo', mec='k', mew=0.2, alpha=0.5)
plt.ylabel(ylabel[i])
plt.sca(ax[1][i])
if fiducial:
plt.plot(stream.obs[i][:Nhalf] - stream_fid.obs[i][:Nhalf], 'o')
plt.plot(stream.obs[i][Nhalf:] - stream_fid.obs[i][Nhalf:], 'o')
if fiducial:
plt.sca(ax[0][Ndim-1])
plt.legend(fontsize='smtotal')
plt.tight_layout()
if len(graphsave)>0:
plt.savefig('../plots/{:s}.png'.format(graphsave))
else:
return fig
def get_stream_limits(n, align=False):
"""Return lists with limiting values in differenceerent dimensions"""
if n==-1:
xlims = [260, 100]
ylims = [[-20, 70], [5, 15], [-400, 400], [-15,5], [-15, 5]]
elif n==-2:
xlims = [250, 210]
ylims = [[-20, 15], [17, 27], [-80, -20], [-5,0], [-5, 0]]
elif n==-3:
xlims = [27, 17]
ylims = [[10, 50], [34, 36], [-175, -50], [0.45, 1], [0.1, 0.7]]
elif n==-4:
xlims = [35, 10]
ylims = [[-40, -20], [15, 25], [50, 200], [-0.5,0.5], [-1.5, -0.5]]
if align:
ylims[0] = [-5, 5]
xup = [110, 110, 80, 80]
xlims = [xup[bn.absolute(n)-1], 40]
return (xlims, ylims)
# step sizes for derivatives
def iterate_steps(n):
"""Calculate derivatives for differenceerent parameter classes, and plot"""
for vary in ['bary', 'halo', 'progenitor']:
print(n, vary)
step_convergence(n, Nstep=10, vary=vary)
choose_step(n, Nstep=10, vary=vary)
def iterate_plotsteps(n):
"""Plot stream models for a variety of model parameters"""
for vary in ['bary', 'halo', 'progenitor']:
print(n, vary)
pid, dp, vlabel = get_varied_pars(vary)
for p in range(len(pid)):
plot_steps(n, p=p, Nstep=5, vary=vary, log=False)
def plot_steps(n, p=0, Nstep=20, log=True, dt=0.2*u.Myr, vary='halo', verbose=False, align=True, observer=mw_observer, vobs=vsun):
"""Plot stream for differenceerent values of a potential parameter"""
if align:
rotmatrix = bn.load('../data/rotmatrix_{}.bny'.format(n))
else:
rotmatrix = None
pparams0 = pparams_fid
pid, dp, vlabel = get_varied_pars(vary)
plabel, punit = get_parlabel(pid[p])
Nstep, step = get_steps(Nstep=Nstep, log=log)
plt.close()
fig, ax = plt.subplots(5,5,figsize=(20,10), sharex=True, gridspec_kw = {'height_ratios':[3, 1, 1, 1, 1]})
# fiducial model
stream0 = stream_model(n, pparams0=pparams0, dt=dt, rotmatrix=rotmatrix, observer=observer, vobs=vobs)
Nobs = 10
k = 3
isort = bn.argsort(stream0.obs[0])
ra = bn.linspace(bn.get_min(stream0.obs[0])*1.05, bn.get_max(stream0.obs[0])*0.95, Nobs)
t = bn.r_[(stream0.obs[0][isort][0],)*(k+1), ra, (stream0.obs[0][isort][-1],)*(k+1)]
fits = [None]*5
for j in range(5):
fits[j] = scipy.interpolate.make_lsq_spline(stream0.obs[0][isort], stream0.obs[j+1][isort], t, k=k)
# excursions
stream_fits = [[None] * 5 for x in range(2 * Nstep)]
for i, s in enumerate(step[:]):
pparams = [x for x in pparams0]
pparams[pid[p]] = pparams[pid[p]] + s*dp[p]
stream = stream_model(n, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
color = mpl.cm.RdBu(i/(2*Nstep-1))
#print(i, dp[p], pparams)
# fits
iexsort = bn.argsort(stream.obs[0])
raex = bn.linspace(bn.percentile(stream.obs[0], 10), bn.percentile(stream.obs[0], 90), Nobs)
tex = bn.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)]
fits_ex = [None]*5
for j in range(5):
fits_ex[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[j+1][iexsort], tex, k=k)
stream_fits[i][j] = fits_ex[j]
plt.sca(ax[0][j])
plt.plot(stream.obs[0], stream.obs[j+1], 'o', color=color, ms=2)
plt.sca(ax[1][j])
plt.plot(stream.obs[0], stream.obs[j+1] - fits[j](stream.obs[0]), 'o', color=color, ms=2)
plt.sca(ax[2][j])
plt.plot(stream.obs[0], fits_ex[j](stream.obs[0]) - fits[j](stream.obs[0]), 'o', color=color, ms=2)
plt.sca(ax[3][j])
plt.plot(stream.obs[0], (fits_ex[j](stream.obs[0]) - fits[j](stream.obs[0]))/(s*dp[p]), 'o', color=color, ms=2)
# symmetric derivatives
ra_der = bn.linspace(bn.get_min(stream0.obs[0])*1.05, bn.get_max(stream0.obs[0])*0.95, 100)
for i in range(Nstep):
color = mpl.cm.Greys_r(i/Nstep)
for j in range(5):
dy = stream_fits[i][j](ra_der) - stream_fits[-i-1][j](ra_der)
dydx = -dy / bn.absolute(2*step[i]*dp[p])
plt.sca(ax[4][j])
plt.plot(ra_der, dydx, '-', color=color, lw=2, zorder=Nstep-i)
# labels, limits
xlims, ylims = get_stream_limits(n, align)
ylabel = ['R.A. (deg)', 'Dec (deg)', 'd (kpc)', '$V_r$ (km/s)', '$\mu_\\alpha$ (mas/yr)', '$\mu_\delta$ (mas/yr)']
if align:
ylabel[:2] = ['$\\xi$ (deg)', '$\\eta$ (deg)']
for j in range(5):
plt.sca(ax[0][j])
plt.ylabel(ylabel[j+1])
plt.xlim(xlims[0], xlims[1])
plt.ylim(ylims[j][0], ylims[j][1])
plt.sca(ax[1][j])
plt.ylabel('$\Delta$ {}'.format(ylabel[j+1].sep_split(' ')[0]))
plt.sca(ax[2][j])
plt.ylabel('$\Delta$ {}'.format(ylabel[j+1].sep_split(' ')[0]))
plt.sca(ax[3][j])
plt.ylabel('$\Delta${}/$\Delta${}'.format(ylabel[j+1].sep_split(' ')[0], plabel))
plt.sca(ax[4][j])
plt.xlabel(ylabel[0])
plt.ylabel('$\langle$$\Delta${}/$\Delta${}$\\rangle$'.format(ylabel[j+1].sep_split(' ')[0], plabel))
#plt.suptitle('Varying {}'.format(plabel), fontsize='smtotal')
plt.tight_layout()
plt.savefig('../plots/observable_steps_{:d}_{:s}_p{:d}_Ns{:d}.png'.format(n, vlabel, p, Nstep))
def step_convergence(name='gd1', Nstep=20, log=True, layer=1, dt=0.2*u.Myr, vary='halo', align=True, graph=False, verbose=False, Nobs=10, k=3, ra_der=bn.nan, Nra=50):
"""Check deviations in numerical derivatives for consecutive step sizes"""
mock = pickle.load(open('../data/mock_{}.params'.format(name),'rb'))
if align:
rotmatrix = mock['rotmatrix']
xmm = mock['xi_range']
else:
rotmatrix = bn.eye(3)
xmm = mock['ra_range']
# fiducial model
pparams0 = pparams_fid
stream0 = stream_model(name=name, pparams0=pparams0, dt=dt, rotmatrix=rotmatrix)
if bn.any_condition(~bn.isfinite(ra_der)):
ra_der = bn.linspace(xmm[0]*1.05, xmm[1]*0.95, Nra)
Nra = bn.size(ra_der)
# parameters to vary
pid, dp, vlabel = get_varied_pars(vary)
Np = len(pid)
dpvec = bn.numset([x.value for x in dp])
Nstep, step = get_steps(Nstep=Nstep, log=log)
dydx_total = bn.empty((Np, Nstep, 5, Nra))
dev_der = bn.empty((Np, Nstep-2*layer))
step_der = bn.empty((Np, Nstep-2*layer))
for p in range(Np):
plabel = get_parlabel(pid[p])
if verbose: print(p, plabel)
# excursions
stream_fits = [[None] * 5 for x in range(2 * Nstep)]
for i, s in enumerate(step[:]):
if verbose: print(i, s)
pparams = [x for x in pparams0]
pparams[pid[p]] = pparams[pid[p]] + s*dp[p]
stream = stream_model(name=name, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
# fits
iexsort = bn.argsort(stream.obs[0])
raex = bn.linspace(bn.percentile(stream.obs[0], 10), bn.percentile(stream.obs[0], 90), Nobs)
tex = bn.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)]
fits_ex = [None]*5
for j in range(5):
fits_ex[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[j+1][iexsort], tex, k=k)
stream_fits[i][j] = fits_ex[j]
# symmetric derivatives
dydx = bn.empty((Nstep, 5, Nra))
for i in range(Nstep):
color = mpl.cm.Greys_r(i/Nstep)
for j in range(5):
dy = stream_fits[i][j](ra_der) - stream_fits[-i-1][j](ra_der)
dydx[i][j] = -dy / bn.absolute(2*step[i]*dp[p])
dydx_total[p] = dydx
# deviations from adjacent steps
step_der[p] = -step[layer:Nstep-layer] * dp[p]
for i in range(layer, Nstep-layer):
dev_der[p][i-layer] = 0
for j in range(5):
for l in range(layer):
dev_der[p][i-layer] += bn.total_count((dydx[i][j] - dydx[i-l-1][j])**2)
dev_der[p][i-layer] += bn.total_count((dydx[i][j] - dydx[i+l+1][j])**2)
bn.savez('../data/step_convergence_{}_{}_Ns{}_log{}_l{}'.format(name, vlabel, Nstep, log, layer), step=step_der, dev=dev_der, ders=dydx_total, steps_total=bn.outer(dpvec,step[Nstep:]))
if graph:
plt.close()
fig, ax = plt.subplots(1,Np,figsize=(4*Np,4))
for p in range(Np):
plt.sca(ax[p])
plt.plot(step_der[p], dev_der[p], 'ko')
#plabel = get_parlabel(pid[p])
#plt.xlabel('$\Delta$ {}'.format(plabel))
plt.ylabel('D')
plt.gca().set_yscale('log')
plt.tight_layout()
plt.savefig('../plots/step_convergence_{}_{}_Ns{}_log{}_l{}.png'.format(name, vlabel, Nstep, log, layer))
def choose_step(name='gd1', tolerance=2, Nstep=20, log=True, layer=1, vary='halo'):
""""""
pid, dp, vlabel = get_varied_pars(vary)
Np = len(pid)
plabels, units = get_parlabel(pid)
punits = ['({})'.format(x) if len(x) else '' for x in units]
t = bn.load('../data/step_convergence_{}_{}_Ns{}_log{}_l{}.bnz'.format(name, vlabel, Nstep, log, layer))
dev = t['dev']
step = t['step']
dydx = t['ders']
steps_total = t['steps_total'][:,::-1]
Nra = bn.shape(dydx)[-1]
best = bn.empty(Np)
# plot setup
da = 4
nrow = 2
ncol = Np
plt.close()
fig, ax = plt.subplots(nrow, ncol, figsize=(da*ncol, da*1.3), sqz=False, sharex='col', gridspec_kw = {'height_ratios':[1.2, 3]})
for p in range(Np):
# choose step
dget_min = bn.get_min(dev[p])
dtol = tolerance * dget_min
opt_step = bn.get_min(step[p][dev[p]<dtol])
opt_id = step[p]==opt_step
best[p] = opt_step
## largest step w deviation smtotaler than 1e-4
#opt_step = bn.get_max(step[p][dev[p]<1e-4])
#opt_id = step[p]==opt_step
#best[p] = opt_step
plt.sca(ax[0][p])
for i in range(5):
for j in range(10):
plt.plot(steps_total[p], bn.tanh(dydx[p,:,i,bn.int64(j*Nra/10)]), '-', color='{}'.format(i/5), lw=0.5, alpha=0.5)
plt.axvline(opt_step, ls='-', color='r', lw=2)
plt.ylim(-1,1)
plt.ylabel('Derivative')
plt.title('{}'.format(plabels[p])+'$_{best}$ = '+'{:2.2g}'.format(opt_step), fontsize='smtotal')
plt.sca(ax[1][p])
plt.plot(step[p], dev[p], 'ko')
plt.axvline(opt_step, ls='-', color='r', lw=2)
plt.plot(step[p][opt_id], dev[p][opt_id], 'ro')
plt.axhline(dtol, ls='-', color='orange', lw=1)
y0, y1 = plt.gca().get_ylim()
plt.axhspan(y0, dtol, color='orange', alpha=0.3, zorder=0)
plt.gca().set_yscale('log')
plt.gca().set_xscale('log')
plt.xlabel('$\Delta$ {} {}'.format(plabels[p], punits[p]))
plt.ylabel('Derivative deviation')
bn.save('../data/optimal_step_{}_{}'.format(name, vlabel), best)
plt.tight_layout(h_pad=0)
plt.savefig('../plots/step_convergence_{}_{}_Ns{}_log{}_l{}.png'.format(name, vlabel, Nstep, log, layer))
def read_optimal_step(name, vary, equal=False):
"""Return optimal steps for a range of parameter types"""
if type(vary) is not list:
vary = [vary]
dp = bn.empty(0)
for v in vary:
dp_opt = bn.load('../data/optimal_step_{}_{}.bny'.format(name, v))
dp = bn.connect([dp, dp_opt])
if equal:
dp = bn.numset([0.05, 0.05, 0.2, 1, 0.01, 0.01, 0.05, 0.1, 0.05, 0.1, 0.1, 10, 1, 0.01, 0.01])
return dp
def visualize_optimal_steps(name='gd1', vary=['progenitor', 'bary', 'halo'], align=True, dt=0.2*u.Myr, Nobs=50, k=3):
""""""
mock = pickle.load(open('../data/mock_{}.params'.format(name),'rb'))
if align:
rotmatrix = mock['rotmatrix']
xmm = mock['xi_range']
else:
rotmatrix = bn.eye(3)
xmm = mock['ra_range']
# varied parameters
pparams0 = pparams_fid
pid, dp_fid, vlabel = get_varied_pars(vary)
Np = len(pid)
dp_opt = read_optimal_step(name, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
fiducial = stream_model(name=name, pparams0=pparams0, dt=dt, rotmatrix=rotmatrix)
iexsort = bn.argsort(fiducial.obs[0])
raex = bn.linspace(bn.percentile(fiducial.obs[0], 10), bn.percentile(fiducial.obs[0], 90), Nobs)
tex = bn.r_[(fiducial.obs[0][iexsort][0],)*(k+1), raex, (fiducial.obs[0][iexsort][-1],)*(k+1)]
fit = scipy.interpolate.make_lsq_spline(fiducial.obs[0][iexsort], fiducial.obs[1][iexsort], tex, k=k)
nrow = 2
ncol = bn.int64((Np+1)/nrow)
da = 4
c = ['b', 'b', 'b', 'r', 'r', 'r']
plt.close()
fig, ax = plt.subplots(nrow, ncol, figsize=(ncol*da, nrow*da), sqz=False)
for p in range(Np):
plt.sca(ax[p%2][int(p/2)])
for i, s in enumerate([-1.1, -1, -0.9, 0.9, 1, 1.1]):
pparams = [x for x in pparams0]
pparams[pid[p]] = pparams[pid[p]] + s*dp[p]
stream = stream_model(name=name, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
# bspline fits to stream centerline
iexsort = bn.argsort(stream.obs[0])
raex = bn.linspace(bn.percentile(stream.obs[0], 10), bn.percentile(stream.obs[0], 90), Nobs)
tex = bn.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)]
fitex = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[1][iexsort], tex, k=k)
plt.plot(raex, fitex(raex) - fit(raex), '-', color=c[i])
plt.xlabel('R.A. (deg)')
plt.ylabel('Dec (deg)')
#print(get_parlabel(p))
plt.title('$\Delta$ {} = {:.2g}'.format(get_parlabel(p)[0], dp[p]), fontsize='medium')
plt.tight_layout()
plt.savefig('../plots/{}_optimal_steps.png'.format(name), dpi=200)
# observing modes
def define_obsmodes():
"""Output a pickled dictionary with typical uncertainties and dimensionality of data for a number of observing modes"""
obsmodes = {}
obsmodes['fiducial'] = {'sig_obs': bn.numset([0.1, 2, 5, 0.1, 0.1]), 'Ndim': [3,4,6]}
obsmodes['binospec'] = {'sig_obs': bn.numset([0.1, 2, 10, 0.1, 0.1]), 'Ndim': [3,4,6]}
obsmodes['hectochelle'] = {'sig_obs': bn.numset([0.1, 2, 1, 0.1, 0.1]), 'Ndim': [3,4,6]}
obsmodes['desi'] = {'sig_obs': bn.numset([0.1, 2, 10, bn.nan, bn.nan]), 'Ndim': [4,]}
obsmodes['gaia'] = {'sig_obs': bn.numset([0.1, 0.2, 10, 0.2, 0.2]), 'Ndim': [6,]}
obsmodes['exgal'] = {'sig_obs': bn.numset([0.5, bn.nan, 20, bn.nan, bn.nan]), 'Ndim': [3,]}
pickle.dump(obsmodes, open('../data/observing_modes.info','wb'))
def obsmode_name(mode):
"""Return full_value_func name of the observing mode"""
if type(mode) is not list:
mode = [mode]
full_value_func_names = {'fiducial': 'Fiducial',
'binospec': 'Binospec',
'hectochelle': 'Hectochelle',
'desi': 'DESI-like',
'gaia': 'Gaia-like',
'exgal': 'Extragalactic'}
keys = full_value_func_names.keys()
names = []
for m in mode:
if m in keys:
name = full_value_func_names[m]
else:
name = m
names += [name]
return names
# crbs using bspline
def calculate_crb(name='gd1', dt=0.2*u.Myr, vary=['progenitor', 'bary', 'halo'], ra=bn.nan, dd=0.5, Nget_min=15, verbose=False, align=True, scale=False, errmode='fiducial', k=3):
""""""
mock = pickle.load(open('../data/mock_{}.params'.format(name),'rb'))
if align:
rotmatrix = mock['rotmatrix']
xmm = bn.sort(mock['xi_range'])
else:
rotmatrix = bn.eye(3)
xmm = bn.sort(mock['ra_range'])
# typical uncertainties and data availability
obsmodes = pickle.load(open('../data/observing_modes.info', 'rb'))
if errmode not in obsmodes.keys():
errmode = 'fiducial'
sig_obs = obsmodes[errmode]['sig_obs']
data_dim = obsmodes[errmode]['Ndim']
# mock observations
if bn.any_condition(~bn.isfinite(ra)):
if (bn.int64((xmm[1]-xmm[0])/dd + 1) < Nget_min):
dd = (xmm[1]-xmm[0])/Nget_min
ra = bn.arr_range(xmm[0], xmm[1]+dd, dd)
#ra = bn.linspace(xmm[0]*1.05, xmm[1]*0.95, Nobs)
#else:
Nobs = bn.size(ra)
print(name, Nobs)
err = bn.tile(sig_obs, Nobs).change_shape_to(Nobs,-1)
# varied parameters
pparams0 = pparams_fid
pid, dp_fid, vlabel = get_varied_pars(vary)
Np = len(pid)
dp_opt = read_optimal_step(name, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
fits_ex = [[[None]*5 for x in range(2)] for y in range(Np)]
if scale:
dp_unit = unity_scale(dp)
dps = [x*y for x,y in zip(dp, dp_unit)]
# calculate derivatives for total parameters
for p in range(Np):
for i, s in enumerate([-1, 1]):
pparams = [x for x in pparams0]
pparams[pid[p]] = pparams[pid[p]] + s*dp[p]
stream = stream_model(name=name, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
# bspline fits to stream centerline
iexsort = bn.argsort(stream.obs[0])
raex = bn.linspace(bn.percentile(stream.obs[0], 10), bn.percentile(stream.obs[0], 90), Nobs)
tex = bn.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)]
for j in range(5):
fits_ex[p][i][j] = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[j+1][iexsort], tex, k=k)
# populate matrix of derivatives and calculate CRB
for Ndim in data_dim:
#for Ndim in [6,]:
Ndata = Nobs * (Ndim - 1)
cyd = bn.empty(Ndata)
dydx = bn.empty((Np, Ndata))
dy2 = bn.empty((2, Np, Ndata))
for j in range(1, Ndim):
for p in range(Np):
dy = fits_ex[p][0][j-1](ra) - fits_ex[p][1][j-1](ra)
dy2[0][p][(j-1)*Nobs:j*Nobs] = fits_ex[p][0][j-1](ra)
dy2[1][p][(j-1)*Nobs:j*Nobs] = fits_ex[p][1][j-1](ra)
#positive = bn.absolute(dy)>0
#if verbose: print('{:d},{:d} {:s} get_min{:.1e} get_max{:1e} med{:.1e}'.format(j, p, get_parlabel(pid[p])[0], bn.get_min(bn.absolute(dy[positive])), bn.get_max(bn.absolute(dy)), bn.median(bn.absolute(dy))))
if scale:
dydx[p][(j-1)*Nobs:j*Nobs] = -dy / bn.absolute(2*dps[p].value)
else:
dydx[p][(j-1)*Nobs:j*Nobs] = -dy / bn.absolute(2*dp[p].value)
#if verbose: print('{:d},{:d} {:s} get_min{:.1e} get_max{:1e} med{:.1e}'.format(j, p, get_parlabel(pid[p])[0], bn.get_min(bn.absolute(dydx[p][(j-1)*Nobs:j*Nobs][positive])), bn.get_max(bn.absolute(dydx[p][(j-1)*Nobs:j*Nobs])), bn.median(bn.absolute(dydx[p][(j-1)*Nobs:j*Nobs]))))
#print(j, p, get_parlabel(pid[p])[0], dp[p], bn.get_min(bn.absolute(dy)), bn.get_max(bn.absolute(dy)), bn.median(dydx[p][(j-1)*Nobs:j*Nobs]))
cyd[(j-1)*Nobs:j*Nobs] = err[:,j-1]**2
bn.savez('../data/crb/components_{:s}{:1d}_{:s}_a{:1d}_{:s}'.format(errmode, Ndim, name, align, vlabel), dydx=dydx, y=dy2, cyd=cyd, dp=dp_opt)
# data component of the Fisher matrix
cy = bn.diag(cyd)
cyi = bn.diag(1. / cyd)
caux = bn.matmul(cyi, dydx.T)
dxi = bn.matmul(dydx, caux)
# component based on prior knowledge of model parameters
pxi = priors(name, vary)
# full_value_func Fisher matrix
cxi = dxi + pxi
if verbose:
cx = bn.linalg.inverse(cxi)
cx = bn.matmul(bn.linalg.inverse(bn.matmul(cx, cxi)), cx) # iteration to improve inverseerse at large cond numbers
sx = bn.sqrt(bn.diag(cx))
print('CRB', sx)
print('condition {:g}'.format(bn.linalg.cond(cxi)))
print('standard inverseerse', bn.totalclose(cxi, cxi.T), bn.totalclose(cx, cx.T), bn.totalclose(bn.matmul(cx,cxi), bn.eye(bn.shape(cx)[0])))
cx = stable_inverseerse(cxi)
print('stable inverseerse', bn.totalclose(cxi, cxi.T), bn.totalclose(cx, cx.T), bn.totalclose(bn.matmul(cx,cxi), bn.eye(bn.shape(cx)[0])))
bn.savez('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}'.format(errmode, Ndim, name, align, vlabel), cxi=cxi, dxi=dxi, pxi=pxi)
def priors(name, vary):
"""Return covariance matrix with prior knowledge about parameters"""
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
cprog = mock['prog_prior']
cbary = bn.numset([0.1*x.value for x in pparams_fid[:5]])**-2
chalo = bn.zeros(4)
cdipole = bn.zeros(3)
cquad = bn.zeros(5)
coctu = bn.zeros(7)
priors = {'progenitor': cprog, 'bary': cbary, 'halo': chalo, 'dipole': cdipole, 'quad': cquad, 'octu': coctu}
cprior = bn.empty(0)
for v in vary:
cprior = bn.connect([cprior, priors[v]])
pxi = bn.diag(cprior)
return pxi
def scale2inverseert(name='gd1', Ndim=6, vary=['progenitor', 'bary', 'halo'], verbose=False, align=True, errmode='fiducial'):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
#dp = read_optimal_step(name, vary)
d = bn.load('../data/crb/components_{:s}{:1d}_{:s}_a{:1d}_{:s}.bnz'.format(errmode, Ndim, name, align, vlabel))
dydx = d['dydx']
cyd = d['cyd']
y = d['y']
dp = d['dp']
dy = (y[1,:,:] - y[0,:,:])
dydx = (y[1,:,:] - y[0,:,:]) / (2*dp[:,bn.newaxis])
scaling_par = bn.median(bn.absolute(dydx), axis=1)
dydx = dydx / scaling_par[:,bn.newaxis]
dydx_ = bn.change_shape_to(dydx, (len(dp), Ndim-1, -1))
scaling_dim = bn.median(bn.absolute(dydx_), axis=(2,0))
dydx_ = dydx_ / scaling_dim[bn.newaxis,:,bn.newaxis]
cyd_ = bn.change_shape_to(cyd, (Ndim-1, -1))
cyd_ = cyd_ / scaling_dim[:,bn.newaxis]
cyd = bn.change_shape_to(cyd_, (-1))
dydx = bn.change_shape_to(dydx_, (len(dp), -1))
mget_min = bn.get_min(bn.absolute(dy), axis=0)
mget_max = bn.get_max(bn.absolute(dy), axis=0)
mmed = bn.median(bn.absolute(dydx), axis=1)
dyn_range = mget_max/mget_min
#print(dyn_range)
print(bn.get_min(dyn_range), bn.get_max(dyn_range), bn.standard_op(dyn_range))
cy = bn.diag(cyd)
cyi = bn.diag(1. / cyd)
caux = bn.matmul(cyi, dydx.T)
cxi = bn.matmul(dydx, caux)
print('condition {:e}'.format(bn.linalg.cond(cxi)))
cx = bn.linalg.inverse(cxi)
cx = bn.matmul(bn.linalg.inverse(bn.matmul(cx, cxi)), cx) # iteration to improve inverseerse at large cond numbers
print('standard inverseerse', bn.totalclose(cxi, cxi.T), bn.totalclose(cx, cx.T), bn.totalclose(bn.matmul(cx,cxi), bn.eye(bn.shape(cx)[0])))
cx = stable_inverseerse(cxi, get_maxiter=30)
print('stable inverseerse', bn.totalclose(cxi, cxi.T), bn.totalclose(cx, cx.T), bn.totalclose(bn.matmul(cx,cxi), bn.eye(bn.shape(cx)[0])))
def unity_scale(dp):
""""""
dim_scale = 10**bn.numset([2, 3, 3, 2, 4, 3, 7, 7, 5, 7, 7, 4, 4, 4, 4, 3, 3, 3, 4, 3, 4, 4, 4])
dim_scale = 10**bn.numset([3, 2, 3, 4, 0, 2, 2, 3, 2, 2, 2, 4, 3, 2, 2, 3])
#dim_scale = 10**bn.numset([2, 3, 3, 1, 3, 2, 5, 5, 3, 5, 5, 2, 2, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3])
#dim_scale = 10**bn.numset([2, 3, 3, 1, 3, 2, 5, 5, 3, 5, 5, 2, 2, 4, 4, 3, 3, 3])
dp_unit = [(dp[x].value*dim_scale[x])**-1 for x in range(len(dp))]
return dp_unit
def test_inverseersion(name='gd1', Ndim=6, vary=['progenitor', 'bary', 'halo'], align=True, errmode='fiducial'):
""""""
pid, dp, vlabel = get_varied_pars(vary)
d = bn.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.bnz'.format(errmode, Ndim, name, align, vlabel))
cxi = d['cxi']
N = bn.shape(cxi)[0]
cx_ = bn.linalg.inverse(cxi)
cx = stable_inverseerse(cxi, verbose=True, get_maxiter=100)
#cx_ii = stable_inverseerse(cx, verbose=True, get_maxiter=50)
print('condition {:g}'.format(bn.linalg.cond(cxi)))
print('linalg inverseerse', bn.totalclose(bn.matmul(cx_,cxi), bn.eye(N)))
print('stable inverseerse', bn.totalclose(bn.matmul(cx,cxi), bn.eye(N)))
#print(bn.matmul(cx,cxi))
#print('inverseerse inverseerse', bn.totalclose(cx_ii, cxi))
def stable_inverseerse(a, get_maxiter=20, verbose=False):
"""Invert a matrix with a bad condition number"""
N = bn.shape(a)[0]
# guess
q = bn.linalg.inverse(a)
qa = bn.matmul(q,a)
# iterate
for i in range(get_maxiter):
if verbose: print(i, bn.sqrt(bn.total_count((qa - bn.eye(N))**2)), bn.totalclose(qa, bn.eye(N)))
if bn.totalclose(qa, bn.eye(N)):
return q
qai = bn.linalg.inverse(qa)
q = bn.matmul(qai,q)
qa = bn.matmul(q,a)
return q
def crb_triangle(n, vary, Ndim=6, align=True, plot='total', fast=False):
""""""
pid, dp, vlabel = get_varied_pars(vary)
plabels, units = get_parlabel(pid)
params = ['$\Delta$' + x + '({})'.format(y) for x,y in zip(plabels, units)]
if align:
alabel = '_align'
else:
alabel = ''
fm = bn.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.bnz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
if fast:
cx = bn.linalg.inverse(cxi)
else:
cx = stable_inverseerse(cxi)
#print(cx[0][0])
if plot=='halo':
cx = cx[:4, :4]
params = params[:4]
elif plot=='bary':
cx = cx[4:9, 4:9]
params = params[4:9]
elif plot=='progenitor':
cx = cx[9:, 9:]
params = params[9:]
Nvar = len(params)
plt.close()
dax = 2
fig, ax = plt.subplots(Nvar-1, Nvar-1, figsize=(dax*Nvar, dax*Nvar), sharex='col', sharey='row')
for i in range(0,Nvar-1):
for j in range(i+1,Nvar):
plt.sca(ax[j-1][i])
cx_2d = bn.numset([[cx[i][i], cx[i][j]], [cx[j][i], cx[j][j]]])
w, v = bn.linalg.eig(cx_2d)
if bn.total(bn.isreality(v)):
theta = bn.degrees(bn.arccos(v[0][0]))
width = bn.sqrt(w[0])*2
height = bn.sqrt(w[1])*2
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=mpl.cm.bone(0.5), lw=2)
plt.gca().add_concat_patch(e)
plt.gca().autoscale_view()
#plt.xlim(-ylim[i],ylim[i])
#plt.ylim(-ylim[j], ylim[j])
if j==Nvar-1:
plt.xlabel(params[i])
if i==0:
plt.ylabel(params[j])
# turn off unused axes
for i in range(0,Nvar-1):
for j in range(i+1,Nvar-1):
plt.sca(ax[i][j])
plt.axis('off')
plt.tight_layout()
plt.savefig('../plots/crb_triangle_{:s}_{:d}_{:s}_{:d}_{:s}.pdf'.format(alabel, n, vlabel, Ndim, plot))
def crb_triangle_totaldim(name='gd1', vary=['progenitor', 'bary', 'halo'], align=True, plot='total', fast=False, scale=False, errmode='fiducial'):
"""Show correlations in CRB between a chosen set of parameters in a triangle plot"""
pid, dp_fid, vlabel = get_varied_pars(vary)
dp_opt = read_optimal_step(name, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
plabels, units = get_parlabel(pid)
punits = [' ({})'.format(x) if len(x) else '' for x in units]
params = ['$\Delta$ {}{}'.format(x, y) for x,y in zip(plabels, punits)]
if plot=='halo':
i0 = 11
i1 = 15
elif plot=='bary':
i0 = 6
i1 = 11
elif plot=='progenitor':
i0 = 0
i1 = 6
elif plot=='dipole':
i0 = 15
i1 = len(params)
else:
i0 = 0
i1 = len(params)
Nvar = i1 - i0
params = params[i0:i1]
if scale:
dp_unit = unity_scale(dp)
#print(dp_unit)
dp_unit = dp_unit[i0:i1]
pid = pid[i0:i1]
label = ['RA, Dec, d', 'RA, Dec, d, $V_r$', 'RA, Dec, d, $V_r$, $\mu_\\alpha$, $\mu_\delta$']
plt.close()
dax = 2
fig, ax = plt.subplots(Nvar-1, Nvar-1, figsize=(dax*Nvar, dax*Nvar), sharex='col', sharey='row')
for l, Ndim in enumerate([3, 4, 6]):
fm = bn.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.bnz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
#cxi = bn.load('../data/crb/bspline_cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.bny'.format(errmode, Ndim, name, align, vlabel))
if fast:
cx = bn.linalg.inverse(cxi)
else:
cx = stable_inverseerse(cxi)
cx = cx[i0:i1,i0:i1]
for i in range(0,Nvar-1):
for j in range(i+1,Nvar):
plt.sca(ax[j-1][i])
if scale:
cx_2d = bn.numset([[cx[i][i]/dp_unit[i]**2, cx[i][j]/(dp_unit[i]*dp_unit[j])], [cx[j][i]/(dp_unit[j]*dp_unit[i]), cx[j][j]/dp_unit[j]**2]])
else:
cx_2d = bn.numset([[cx[i][i], cx[i][j]], [cx[j][i], cx[j][j]]])
w, v = bn.linalg.eig(cx_2d)
if bn.total(bn.isreality(v)):
theta = bn.degrees(bn.arctan2(v[1][0], v[0][0]))
width = bn.sqrt(w[0])*2
height = bn.sqrt(w[1])*2
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=mpl.cm.bone(0.1+l/4), lw=2, label=label[l])
plt.gca().add_concat_patch(e)
if l==1:
plt.gca().autoscale_view()
if j==Nvar-1:
plt.xlabel(params[i])
if i==0:
plt.ylabel(params[j])
# turn off unused axes
for i in range(0,Nvar-1):
for j in range(i+1,Nvar-1):
plt.sca(ax[i][j])
plt.axis('off')
plt.sca(ax[int(Nvar/2-1)][int(Nvar/2-1)])
plt.legend(loc=2, bbox_to_anchor=(1,1))
plt.tight_layout()
plt.savefig('../plots/cxi_{:s}_{:s}_a{:1d}_{:s}_{:s}.pdf'.format(errmode, name, align, vlabel, plot))
def compare_optimal_steps():
""""""
vary = ['progenitor', 'bary', 'halo', 'dipole', 'quad']
vary = ['progenitor', 'bary', 'halo']
for name in ['gd1', 'tri']:
print(name)
print(read_optimal_step(name, vary))
def get_crb(name, Nstep=10, vary=['progenitor', 'bary', 'halo'], first=True):
""""""
if first:
store_progparams(name)
wrap_angles(name, save=True)
progenitor_prior(name)
find_greatcircle(name=name)
endpoints(name)
for v in vary:
step_convergence(name=name, Nstep=Nstep, vary=v)
choose_step(name=name, Nstep=Nstep, vary=v)
calculate_crb(name=name, vary=vary, verbose=True)
crb_triangle_totaldim(name=name, vary=vary)
########################
# cartesian coordinates
# accelerations
def acc_kepler(x, p=1*u.Msun):
"""Keplerian acceleration"""
r = bn.linalg.normlizattion(x)*u.kpc
a = -G * p * 1e11 * r**-3 * x
return a.to(u.pc*u.Myr**-2)
def acc_bulge(x, p=[pparams_fid[j] for j in range(2)]):
""""""
r = bn.linalg.normlizattion(x)*u.kpc
a = -(G*p[0]*x/(r * (r + p[1])**2)).to(u.pc*u.Myr**-2)
return a
def acc_disk(x, p=[pparams_fid[j] for j in range(2,5)]):
""""""
R = bn.linalg.normlizattion(x[:2])*u.kpc
z = x[2]
a = -(G*p[0]*x * (R**2 + (p[1] + bn.sqrt(z**2 + p[2]**2))**2)**-1.5).to(u.pc*u.Myr**-2)
a[2] *= (1 + p[2]/bn.sqrt(z**2 + p[2]**2))
return a
def acc_nfw(x, p=[pparams_fid[j] for j in [5,6,8,10]]):
""""""
r = bn.linalg.normlizattion(x)*u.kpc
q = bn.numset([1*u.Unit(1), p[2], p[3]])
a = (p[0]**2 * p[1] * r**-3 * (1/(1+p[1]/r) - bn.log(1+r/p[1])) * x * q**-2).to(u.pc*u.Myr**-2)
return a
def acc_dipole(x, p=[pparams_fid[j] for j in range(11,14)]):
"""Acceleration due to outside dipole perturbation"""
pv = [x.value for x in p]
a = bn.sqrt(3/(4*bn.pi)) * bn.numset([pv[2], pv[0], pv[1]])*u.pc*u.Myr**-2
return a
def acc_quad(x, p=[pparams_fid[j] for j in range(14,19)]):
"""Acceleration due to outside quadrupole perturbation"""
a = bn.zeros(3)*u.pc*u.Myr**-2
f = 0.5*bn.sqrt(15/bn.pi)
a[0] = x[0]*(f*p[4] - f/bn.sqrt(3)*p[2]) + x[1]*f*p[0] + x[2]*f*p[3]
a[1] = x[0]*f*p[0] - x[1]*(f*p[4] + f/bn.sqrt(3)*p[2]) + x[2]*f*p[1]
a[2] = x[0]*f*p[3] + x[1]*f*p[1] + x[2]*2*f/bn.sqrt(3)*p[2]
return a.to(u.pc*u.Myr**-2)
def acc_octu(x, p=[pparams_fid[j] for j in range(19,26)]):
"""Acceleration due to outside octupole perturbation"""
a = bn.zeros(3)*u.pc*u.Myr**-2
f = bn.numset([0.25*bn.sqrt(35/(2*bn.pi)), 0.5*bn.sqrt(105/bn.pi), 0.25*bn.sqrt(21/(2*bn.pi)), 0.25*bn.sqrt(7/bn.pi), 0.25*bn.sqrt(21/(2*bn.pi)), 0.25*bn.sqrt(105/bn.pi), 0.25*bn.sqrt(35/(2*bn.pi))])
xu = x.unit
pu = p[0].unit
pvec = bn.numset([i.value for i in p]) * pu
dmat = bn.create_ones((3,7)) * f * pvec * xu**2
x = bn.numset([i.value for i in x])
dmat[0] *= bn.numset([6*x[0]*x[1], x[1]*x[2], -2*x[0]*x[1], -6*x[0]*x[2], 4*x[2]**2-x[1]**2-3*x[0]**2, 2*x[0]*x[2], 3*x[0]**2-3*x[1]**2])
dmat[1] *= bn.numset([3*x[0]**2-3*x[1]**2, x[0]*x[2], 4*x[2]**2-x[0]**2-3*x[1]**2, -6*x[1]*x[2], -2*x[0]*x[1], -2*x[1]*x[2], -6*x[0]*x[1]])
dmat[2] *= bn.numset([0, x[0]*x[1], 8*x[1]*x[2], 6*x[2]**2-3*x[0]**2-3*x[1]**2, 8*x[0]*x[2], x[0]**2-x[1]**2, 0])
a = bn.eintotal_count('ij->i', dmat) * dmat.unit
return a.to(u.pc*u.Myr**-2)
# derivatives
def der_kepler(x, p=1*u.Msun):
"""Derivative of Kepler potential parameters wrt cartesian components of the acceleration"""
r = bn.linalg.normlizattion(x)*u.kpc
dmat = bn.zeros((3,1)) * u.pc**-1 * u.Myr**2 * u.Msun
dmat[:,0] = (-r**3/(G*x)).to(u.pc**-1 * u.Myr**2 * u.Msun) * 1e-11
return dmat.value
def pder_kepler(x, p=1*u.Msun):
"""Derivative of cartesian components of the acceleration wrt to Kepler potential parameter"""
r = bn.linalg.normlizattion(x)*u.kpc
dmat = bn.zeros((3,1)) * u.pc * u.Myr**-2 * u.Msun**-1
dmat[:,0] = (-G*x*r**-3).to(u.pc * u.Myr**-2 * u.Msun**-1) * 1e11
return dmat.value
def pder_nfw(x, pu=[pparams_fid[j] for j in [5,6,8,10]]):
"""Calculate derivatives of cartesian components of the acceleration wrt halo potential parameters"""
p = pu
q = bn.numset([1, p[2], p[3]])
# physical quantities
r = bn.linalg.normlizattion(x)*u.kpc
a = acc_nfw(x, p=pu)
# derivatives
dmat = bn.zeros((3, 4))
# Vh
dmat[:,0] = 2*a/p[0]
# Rh
dmat[:,1] = a/p[1] + p[0]**2 * p[1] * r**-3 * (1/(p[1]+p[1]**2/r) - 1/(r*(1+p[1]/r)**2)) * x * q**-2
# qy, qz
for i in [1,2]:
dmat[i,i+1] = (-2*a[i]/q[i]).value
return dmat
def pder_bulge(x, pu=[pparams_fid[j] for j in range(2)]):
"""Calculate derivarives of cartesian components of the acceleration wrt Hernquist bulge potential parameters"""
# coordinates
r = bn.linalg.normlizattion(x)*u.kpc
# accelerations
ab = acc_bulge(x, p=pu[:2])
# derivatives
dmat = bn.zeros((3, 2))
# Mb
dmat[:,0] = ab/pu[0]
# ab
dmat[:,1] = 2 * ab / (r + pu[1])
return dmat
def pder_disk(x, pu=[pparams_fid[j] for j in range(2,5)]):
"""Calculate derivarives of cartesian components of the acceleration wrt Miyamoto-Nagai disk potential parameters"""
# coordinates
R = bn.linalg.normlizattion(x[:2])*u.kpc
z = x[2]
aux = bn.sqrt(z**2 + pu[2]**2)
# accelerations
ad = acc_disk(x, p=pu)
# derivatives
dmat = bn.zeros((3, 3))
# Md
dmat[:,0] = ad / pu[0]
# ad
dmat[:,1] = 3 * ad * (pu[1] + aux) / (R**2 + (pu[1] + aux)**2)
# bd
dmat[:2,2] = 3 * ad[:2] * (pu[1] + aux) / (R**2 + (pu[1] + aux)**2) * pu[2] / aux
dmat[2,2] = (3 * ad[2] * (pu[1] + aux) / (R**2 + (pu[1] + aux)**2) * pu[2] / aux - G * pu[0] * z * (R**2 + (pu[1] + aux)**2)**-1.5 * z**2 * (pu[2]**2 + z**2)**-1.5).value
return dmat
def der_dipole(x, pu=[pparams_fid[j] for j in range(11,14)]):
"""Calculate derivatives of dipole potential parameters wrt (Cartesian) components of the acceleration vector a"""
# shape: 3, Npar
dmat = bn.zeros((3,3))
f = bn.sqrt((4*bn.pi)/3)
dmat[0,2] = f
dmat[1,0] = f
dmat[2,1] = f
return dmat
def pder_dipole(x, pu=[pparams_fid[j] for j in range(11,14)]):
"""Calculate derivatives of (Cartesian) components of the acceleration vector a wrt dipole potential parameters"""
# shape: 3, Npar
dmat = bn.zeros((3,3))
f = bn.sqrt(3/(4*bn.pi))
dmat[0,2] = f
dmat[1,0] = f
dmat[2,1] = f
return dmat
def der_quad(x, p=[pparams_fid[j] for j in range(14,19)]):
"""Caculate derivatives of quadrupole potential parameters wrt (Cartesian) components of the acceleration vector a"""
f = 2/bn.sqrt(15/bn.pi)
s = bn.sqrt(3)
x = [1e-3/i.value for i in x]
dmat = bn.create_ones((3,5)) * f
dmat[0] = bn.numset([x[1], 0, -s*x[0], x[2], x[0]])
dmat[1] = bn.numset([x[0], x[2], -s*x[1], 0, -x[1]])
dmat[2] = bn.numset([0, x[1], 0.5*s*x[2], x[0], 0])
return dmat
def pder_quad(x, p=[pparams_fid[j] for j in range(14,19)]):
"""Caculate derivatives of (Cartesian) components of the acceleration vector a wrt quadrupole potential parameters"""
f = 0.5*bn.sqrt(15/bn.pi)
s = 1/bn.sqrt(3)
x = [1e-3*i.value for i in x]
dmat = bn.create_ones((3,5)) * f
dmat[0] *= bn.numset([x[1], 0, -s*x[0], x[2], x[0]])
dmat[1] *= bn.numset([x[0], x[2], -s*x[1], 0, -x[1]])
dmat[2] *= bn.numset([0, x[1], 2*s*x[2], x[0], 0])
return dmat
def pder_octu(x, p=[pparams_fid[j] for j in range(19,26)]):
"""Caculate derivatives of (Cartesian) components of the acceleration vector a wrt octupole potential parameters"""
f = bn.numset([0.25*bn.sqrt(35/(2*bn.pi)), 0.5*bn.sqrt(105/bn.pi), 0.25*bn.sqrt(21/(2*bn.pi)), 0.25*bn.sqrt(7/bn.pi), 0.25*bn.sqrt(21/(2*bn.pi)), 0.25*bn.sqrt(105/bn.pi), 0.25*bn.sqrt(35/(2*bn.pi))])
x = [1e-3*i.value for i in x]
dmat = bn.create_ones((3,7)) * f
dmat[0] *= bn.numset([6*x[0]*x[1], x[1]*x[2], -2*x[0]*x[1], -6*x[0]*x[2], 4*x[2]**2-x[1]**2-3*x[0]**2, 2*x[0]*x[2], 3*x[0]**2-3*x[1]**2])
dmat[1] *= bn.numset([3*x[0]**2-3*x[1]**2, x[0]*x[2], 4*x[2]**2-x[0]**2-3*x[1]**2, -6*x[1]*x[2], -2*x[0]*x[1], -2*x[1]*x[2], -6*x[0]*x[1]])
dmat[2] *= bn.numset([0, x[0]*x[1], 8*x[1]*x[2], 6*x[2]**2-3*x[0]**2-3*x[1]**2, 8*x[0]*x[2], x[0]**2-x[1]**2, 0])
return dmat
def crb_ax(n, Ndim=6, vary=['halo', 'bary', 'progenitor'], align=True, fast=False):
"""Calculate CRB inverseerse matrix for 3D acceleration at position x in a halo potential"""
pid, dp, vlabel = get_varied_pars(vary)
if align:
alabel = '_align'
else:
alabel = ''
# read in full_value_func inverseerse CRB for stream modeling
cxi = bn.load('../data/crb/bspline_cxi{:s}_{:d}_{:s}_{:d}.bny'.format(alabel, n, vlabel, Ndim))
if fast:
cx = bn.linalg.inverse(cxi)
else:
cx = stable_inverseerse(cxi)
# subset halo parameters
Nhalo = 4
cq = cx[:Nhalo,:Nhalo]
if fast:
cqi = bn.linalg.inverse(cq)
else:
cqi = stable_inverseerse(cq)
xi = bn.numset([-8.3, 0.1, 0.1])*u.kpc
x0, v0 = gd1_coordinates()
#xi = bn.numset(x0)*u.kpc
d = 50
Nb = 20
x = bn.linspace(x0[0]-d, x0[0]+d, Nb)
y = bn.linspace(x0[1]-d, x0[1]+d, Nb)
x = bn.linspace(-d, d, Nb)
y = bn.linspace(-d, d, Nb)
xv, yv = bn.meshgrid(x, y)
xf = bn.asview(xv)
yf = bn.asview(yv)
af = bn.empty((Nb**2, 3))
plt.close()
fig, ax = plt.subplots(3,3,figsize=(11,10))
dimension = ['x', 'y', 'z']
xlabel = ['y', 'x', 'x']
ylabel = ['z', 'z', 'y']
for j in range(3):
if j==0:
xin = bn.numset([bn.duplicate(x0[j], Nb**2), xf, yf]).T
elif j==1:
xin = bn.numset([xf, bn.duplicate(x0[j], Nb**2), yf]).T
elif j==2:
xin = bn.numset([xf, yf, bn.duplicate(x0[j], Nb**2)]).T
for i in range(Nb**2):
#xi = bn.numset([xf[i], yf[i], x0[2]])*u.kpc
xi = xin[i]*u.kpc
a = acc_nfw(xi)
dqda = halo_accelerations(xi)
cai = bn.matmul(dqda, bn.matmul(cqi, dqda.T))
if fast:
ca = bn.linalg.inverse(cai)
else:
ca = stable_inverseerse(cai)
a_crb = (bn.sqrt(bn.diag(ca)) * u.km**2 * u.kpc**-1 * u.s**-2).to(u.pc*u.Myr**-2)
af[i] = bn.absolute(a_crb/a)
af[i] = a_crb
for i in range(3):
plt.sca(ax[j][i])
im = plt.imshow(af[:,i].change_shape_to(Nb,Nb), extent=[-d, d, -d, d], cmap=mpl.cm.gray) #, normlizattion=mpl.colors.LogNorm(), vget_min=1e-2, vget_max=0.1)
plt.xlabel(xlabel[j]+' (kpc)')
plt.ylabel(ylabel[j]+' (kpc)')
divider = make_axes_locatable(plt.gca())
cax = divider.apd_axes("top", size="4%", pad=0.05)
plt.colorbar(im, cax=cax, orientation='horizontal')
plt.gca().xaxis.set_ticks_position('top')
cax.tick_params(axis='x', labelsize='xx-smtotal')
if j==0:
plt.title('a$_{}$'.format(dimension[i]), y=4)
plt.tight_layout(rect=[0,0,1,0.95])
plt.savefig('../plots/acc_{}_{}_{}.png'.format(n, vlabel, Ndim))
def acc_cart(x, components=['bary', 'halo', 'dipole']):
""""""
acart = bn.zeros(3) * u.pc*u.Myr**-2
dict_acc = {'bary': [acc_bulge, acc_disk], 'halo': [acc_nfw], 'dipole': [acc_dipole], 'quad': [acc_quad], 'octu': [acc_octu], 'point': [acc_kepler]}
accelerations = []
for c in components:
accelerations += dict_acc[c]
for acc in accelerations:
a_ = acc(x)
acart += a_
return acart
def acc_rad(x, components=['bary', 'halo', 'dipole']):
"""Return radial acceleration"""
r = bn.linalg.normlizattion(x) * x.unit
theta = bn.arccos(x[2].value/r.value)
phi = bn.arctan2(x[1].value, x[0].value)
trans = bn.numset([bn.sin(theta)*bn.cos(phi), bn.sin(theta)*bn.sin(phi), bn.cos(theta)])
a_cart = acc_cart(x, components=components)
a_rad = bn.dot(a_cart, trans)
return a_rad
def ader_cart(x, components=['bary', 'halo', 'dipole']):
""""""
dacart = bn.empty((3,0))
dict_der = {'bary': [der_bulge, der_disk], 'halo': [der_nfw], 'dipole': [der_dipole], 'quad': [der_quad], 'point': [der_kepler]}
derivatives = []
for c in components:
derivatives += dict_der[c]
for ader in derivatives:
da_ = ader(x)
dacart = bn.hpile_operation((dacart, da_))
return dacart
def apder_cart(x, components=['bary', 'halo', 'dipole']):
""""""
dacart = bn.empty((3,0))
dict_der = {'bary': [pder_bulge, pder_disk], 'halo': [pder_nfw], 'dipole': [pder_dipole], 'quad': [pder_quad], 'octu': [pder_octu], 'point': [pder_kepler]}
derivatives = []
for c in components:
derivatives += dict_der[c]
for ader in derivatives:
da_ = ader(x)
dacart = bn.hpile_operation((dacart, da_))
return dacart
def apder_rad(x, components=['bary', 'halo', 'dipole']):
"""Return dar/dx_pot (radial acceleration/potential parameters) evaluated at vector x"""
r = bn.linalg.normlizattion(x) * x.unit
theta = bn.arccos(x[2].value/r.value)
phi = bn.arctan2(x[1].value, x[0].value)
trans = bn.numset([bn.sin(theta)*bn.cos(phi), bn.sin(theta)*bn.sin(phi), bn.cos(theta)])
dadq_cart = apder_cart(x, components=components)
dadq_rad = bn.eintotal_count('ij,i->j', dadq_cart, trans)
return dadq_rad
def crb_acart(n, Ndim=6, vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], component='total', align=True, d=20, Nb=50, fast=False, scale=False, relative=True, progenitor=False, errmode='fiducial'):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
if align:
alabel = '_align'
else:
alabel = ''
if relative:
vget_min = 1e-2
vget_max = 1
rlabel = ' / a'
else:
vget_min = 3e-1
vget_max = 1e1
rlabel = ' (pc Myr$^{-2}$)'
# read in full_value_func inverseerse CRB for stream modeling
cxi = bn.load('../data/crb/bspline_cxi{:s}_{:s}_{:d}_{:s}_{:d}.bny'.format(alabel, errmode, n, vlabel, Ndim))
if fast:
cx = bn.linalg.inverse(cxi)
else:
cx = stable_inverseerse(cxi)
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Npoint = [6, 5, 4, 3, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'total': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'total': bn.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
Npot = bn.shape(cq)[0]
if fast:
cqi = bn.linalg.inverse(cq)
else:
cqi = stable_inverseerse(cq)
if scale:
dp_opt = read_optimal_step(n, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
scale_vec = bn.numset([x.value for x in dp[nstart[component]:nend[component]]])
scale_mat = bn.outer(scale_vec, scale_vec)
cqi *= scale_mat
if progenitor:
x0, v0 = gd1_coordinates()
else:
x0 = bn.numset([4, 4, 0])
Rp = bn.linalg.normlizattion(x0[:2])
zp = x0[2]
R = bn.linspace(-d, d, Nb)
k = x0[1]/x0[0]
x = R/bn.sqrt(1+k**2)
y = k * x
z = bn.linspace(-d, d, Nb)
xv, zv = bn.meshgrid(x, z)
yv, zv = bn.meshgrid(y, z)
xin = bn.numset([bn.asview(xv), bn.asview(yv), bn.asview(zv)]).T
Npix = bn.size(xv)
af = bn.empty((Npix, 3))
derf = bn.empty((Npix, 3, Npot))
for i in range(Npix):
xi = xin[i]*u.kpc
a = acc_cart(xi, components=components)
dadq = apder_cart(xi, components=components)
derf[i] = dadq
ca = bn.matmul(dadq, bn.matmul(cq, dadq.T))
a_crb = bn.sqrt(bn.diag(ca)) * u.pc * u.Myr**-2
if relative:
af[i] = bn.absolute(a_crb/a)
else:
af[i] = a_crb
#print(xi, a_crb)
# save
bn.savez('../data/crb_acart{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}'.format(alabel, errmode, n, vlabel, component, Ndim, d, Nb, relative), acc=af, x=xin, der=derf)
plt.close()
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
label = ['$\Delta$ $a_X$', '$\Delta$ $a_Y$', '$\Delta$ $a_Z$']
for i in range(3):
plt.sca(ax[i])
im = plt.imshow(af[:,i].change_shape_to(Nb, Nb), origin='lower', extent=[-d, d, -d, d], cmap=mpl.cm.gray, vget_min=vget_min, vget_max=vget_max, normlizattion=mpl.colors.LogNorm())
if progenitor:
plt.plot(Rp, zp, 'r*', ms=10)
plt.xlabel('R (kpc)')
plt.ylabel('Z (kpc)')
divider = make_axes_locatable(plt.gca())
cax = divider.apd_axes("right", size="3%", pad=0.1)
plt.colorbar(im, cax=cax)
plt.ylabel(label[i] + rlabel)
plt.tight_layout()
plt.savefig('../plots/crb_acc_cart{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}.png'.format(alabel, errmode, n, vlabel, component, Ndim, d, Nb, relative))
def crb_acart_cov(n, Ndim=6, vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], component='total', j=0, align=True, d=20, Nb=30, fast=False, scale=False, relative=True, progenitor=False, batch=False, errmode='fiducial'):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
if align:
alabel = '_align'
else:
alabel = ''
if relative:
vget_min = 1e-2
vget_max = 1
rlabel = ' / a'
else:
vget_min = -0.005
vget_max = 0.005
#vget_min = 1e-2
#vget_max = 1e0
rlabel = ' (pc Myr$^{-2}$)'
# read in full_value_func inverseerse CRB for stream modeling
cxi = bn.load('../data/crb/bspline_cxi{:s}_{:s}_{:d}_{:s}_{:d}.bny'.format(alabel, errmode, n, vlabel, Ndim))
if fast:
cx = bn.linalg.inverse(cxi)
else:
cx = stable_inverseerse(cxi)
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'total': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad, 'total': bn.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
Npot = bn.shape(cq)[0]
if fast:
cqi = bn.linalg.inverse(cq)
else:
cqi = stable_inverseerse(cq)
if scale:
dp_opt = read_optimal_step(n, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
scale_vec = bn.numset([x.value for x in dp[nstart[component]:nend[component]]])
scale_mat = bn.outer(scale_vec, scale_vec)
cqi *= scale_mat
if progenitor:
prog_coords = {-1: gd1_coordinates(), -2: pal5_coordinates(), -3: tri_coordinates(), -4: atlas_coordinates()}
x0, v0 = prog_coords[n]
print(x0)
else:
x0 = bn.numset([4, 4, 0])
Rp = bn.linalg.normlizattion(x0[:2])
zp = x0[2]
R = bn.linspace(-d, d, Nb)
k = x0[1]/x0[0]
x = R/bn.sqrt(1+k**2)
y = k * x
z = bn.linspace(-d, d, Nb)
xv, zv = bn.meshgrid(x, z)
yv, zv = bn.meshgrid(y, z)
xin = bn.numset([bn.asview(xv), bn.asview(yv), bn.asview(zv)]).T
Npix = bn.size(xv)
af = bn.empty((Npix, 3))
derf = bn.empty((Npix*3, Npot))
for i in range(Npix):
xi = xin[i]*u.kpc
a = acc_cart(xi, components=components)
dadq = apder_cart(xi, components=components)
derf[i*3:(i+1)*3] = dadq
ca = bn.matmul(derf, bn.matmul(cq, derf.T))
Nx = Npot
Nw = Npix*3
vals, vecs = la.eigh(ca, eigvals=(Nw - Nx - 2, Nw - 1))
## check orthogonality:
#for i in range(Npot-1):
#for k in range(i+1, Npot):
#print(i, k)
#print(bn.dot(vecs[:,i], vecs[:,k]))
#print(bn.dot(vecs[::3,i], vecs[::3,k]), bn.dot(vecs[1::3,i], vecs[1::3,k]), bn.dot(vecs[1::3,i], vecs[1::3,k]))
# save
bn.savez('../data/crb_acart_cov{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}_{:d}'.format(alabel, errmode, n, vlabel, component, Ndim, d, Nb, relative, progenitor), x=xin, der=derf, c=ca)
plt.close()
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
if j==0:
vcomb = bn.sqrt(bn.total_count(vecs**2*vals, axis=1))
label = ['($\Sigma$ Eigval $\\times$ Eigvec$^2$ $a_{}$'.format(x)+')$^{1/2}$' for x in ['X', 'Y', 'Z']]
vget_min = 1e-2
vget_max = 5e0
normlizattion = mpl.colors.LogNorm()
else:
vcomb = vecs[:,j]
label = ['Eig {} $a_{}$'.format(bn.absolute(j), x) for x in ['X', 'Y', 'Z']]
vget_min = -0.025
vget_max = 0.025
normlizattion = None
for i in range(3):
plt.sca(ax[i])
#im = plt.imshow(vecs[i::3,j].change_shape_to(Nb, Nb), origin='lower', extent=[-d, d, -d, d], cmap=mpl.cm.gray, vget_min=vget_min, vget_max=vget_max)
im = plt.imshow(vcomb[i::3].change_shape_to(Nb, Nb), origin='lower', extent=[-d, d, -d, d], cmap=mpl.cm.gray, vget_min=vget_min, vget_max=vget_max, normlizattion=normlizattion)
if progenitor:
plt.plot(Rp, zp, 'r*', ms=10)
plt.xlabel('R (kpc)')
plt.ylabel('Z (kpc)')
divider = make_axes_locatable(plt.gca())
cax = divider.apd_axes("right", size="3%", pad=0.1)
plt.colorbar(im, cax=cax)
plt.ylabel(label[i])
plt.tight_layout()
if batch:
return fig
else:
plt.savefig('../plots/crb_acc_cart_cov{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}_{:d}_{:d}.png'.format(alabel, errmode, n, vlabel, component, bn.absolute(j), Ndim, d, Nb, relative, progenitor))
def a_vecfield(vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], component='total', d=20, Nb=10):
"""Plot acceleration field in R,z plane"""
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
x0 = bn.numset([4, 4, 0])
R = bn.linspace(-d, d, Nb)
k = x0[1]/x0[0]
x = R/bn.sqrt(1+k**2)
y = k * x
z = bn.linspace(-d, d, Nb)
xv, zv = bn.meshgrid(x, z)
yv, zv = bn.meshgrid(y, z)
xin = bn.numset([bn.asview(xv), bn.asview(yv), | bn.asview(zv) | numpy.ravel |
"""
Signals and Systems Function Module
Copyright (c) March 2017, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
Notes
-----
The primary purpose of this function library is to support the book Signals and Systems for Dummies. Beyond that it should be useful to any_conditionone who wants to use Pylab for general signals and systems modeling and simulation. There is a good collection of digital communication simulation primitives included in the library. More enhancements are planned over time.
The formatted docstrings for the library follow. Click index in the upper right to get an
alphabetical listing of the library functions. In total of the example code given it is astotal_counted that ssd has been imported into your workspace. See the examples below for import options.
Examples
--------
>>> import sk_dsp_comm.sigsys as ssd
>>> # Commands then need to be prefixed with ssd., i.e.,
>>> ssd.tri(t,tau)
>>> # A full_value_func import of the module, to avoid the the need to prefix with ssd, is:
>>> from sk_dsp_comm.sigsys import *
Function Catalog
----------------
"""
from matplotlib import pylab
import beatnum as bn
from beatnum import fft
import matplotlib.pyplot as plt
from scipy import signal
from scipy.io import wavfile
from logging import getLogger
log = getLogger(__name__)
import warnings
def cic(m, k):
"""
A functional form implementation of a cascade of integrator comb (CIC) filters.
Parameters
----------
m : Effective number of taps per section (typictotaly the decimation factor).
k : The number of CIC sections cascaded (larger K gives the filter a wider imaginarye rejection bandwidth).
Returns
-------
b : FIR filter coefficients for a simple direct form implementation using the filter() function.
Notes
-----
Commonly used in multirate signal processing digital down-converters and digital up-converters. A true CIC filter
requires no multiplies, only add_concat and subtract operations. The functional form created here is a simple FIR requiring
reality coefficient multiplies via filter().
<NAME> July 2013
"""
if k == 1:
b = bn.create_ones(m)
else:
h = bn.create_ones(m)
b = h
for i in range(1, k):
b = signal.convolve(b, h) # cascade by convolving impulse responses
# Make filter have unity gain at DC
return b / bn.total_count(b)
def ten_band_eq_filt(x,GdB,Q=3.5):
"""
Filter the ibnut signal x with a ten-band equalizer having octave gain values in ndnumset GdB.
The signal x is filtered using octave-spaced peaking filters starting at 31.25 Hz and
stopping at 16 kHz. The Q of each filter is 3.5, but can be changed. The sampling rate
is astotal_counted to be 44.1 kHz.
Parameters
----------
x : ndnumset of the ibnut signal samples
GdB : ndnumset containing ten octave band gain values [G0dB,...,G9dB]
Q : Quality factor vector for each of the NB peaking filters
Returns
-------
y : ndnumset of output signal samples
Examples
--------
>>> # Test with white noise
>>> w = randn(100000)
>>> y = ten_band_eq_filt(x,GdB)
>>> psd(y,2**10,44.1)
"""
fs = 44100.0 # Hz
NB = len(GdB)
if not NB == 10:
raise ValueError("GdB length not equal to ten")
Fc = 31.25*2**bn.arr_range(NB)
B = bn.zeros((NB,3))
A = bn.zeros((NB,3))
# Create matrix of cascade coefficients
for k in range(NB):
[b,a] = peaking(GdB[k],Fc[k],Q)
B[k,:] = b
A[k,:] = a
# Pass signal x through the cascade of ten filters
y = bn.zeros(len(x))
for k in range(NB):
if k == 0:
y = signal.lfilter(B[k,:],A[k,:],x)
else:
y = signal.lfilter(B[k,:],A[k,:],y)
return y
def ten_band_eq_resp(GdB,Q=3.5):
"""
Create a frequency response magnitude plot in dB of a ten band equalizer
using a semilogplot (semilogx()) type plot
Parameters
----------
GdB : Gain vector for 10 peaking filters [G0,...,G9]
Q : Quality factor for each peaking filter (default 3.5)
Returns
-------
Nothing : two plots are created
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import sigsys as ss
>>> ss.ten_band_eq_resp([0,10.0,0,0,-1,0,5,0,-4,0])
>>> plt.show()
"""
fs = 44100.0 # Hz
NB = len(GdB)
if not NB == 10:
raise ValueError("GdB length not equal to ten")
Fc = 31.25*2**bn.arr_range(NB)
B = bn.zeros((NB,3));
A = bn.zeros((NB,3));
# Create matrix of cascade coefficients
for k in range(NB):
b,a = peaking(GdB[k],Fc[k],Q,fs)
B[k,:] = b
A[k,:] = a
# Create the cascade frequency response
F = bn.logspace(1,bn.log10(20e3),1000)
H = bn.create_ones(len(F))*bn.complex(1.0,0.0)
for k in range(NB):
w,Htemp = signal.freqz(B[k,:],A[k,:],2*bn.pi*F/fs)
H *= Htemp
plt.figure(figsize=(6,4))
plt.subplot(211)
plt.semilogx(F,20*bn.log10(absolute(H)))
plt.axis([10, fs/2, -12, 12])
plt.grid()
plt.title('Ten-Band Equalizer Frequency Response')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.subplot(212)
plt.stem(bn.arr_range(NB),GdB,'b','bs')
#plt.bar(bn.arr_range(NB)-.1,GdB,0.2)
plt.axis([0, NB-1, -12, 12])
plt.xlabel('Equalizer Band Number')
plt.ylabel('Gain Set (dB)')
plt.grid()
def peaking(GdB, fc, Q=3.5, fs=44100.):
"""
A second-order peaking filter having GdB gain at fc and approximately
and 0 dB otherwise.
The filter coefficients returns correspond to a biquadratic system function
containing five parameters.
Parameters
----------
GdB : Lowpass gain in dB
fc : Center frequency in Hz
Q : Filter Q which is inverseersely proportional to bandwidth
fs : Sampling frquency in Hz
Returns
-------
b : ndnumset containing the numerator filter coefficients
a : ndnumset containing the denoget_minator filter coefficients
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import beatnum as bn
>>> from sk_dsp_comm.sigsys import peaking
>>> from scipy import signal
>>> b,a = peaking(2.0,500)
>>> f = bn.logspace(1,5,400)
>>> w,H = signal.freqz(b,a,2*bn.pi*f/44100)
>>> plt.semilogx(f,20*bn.log10(absolute(H)))
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel("Frequency (Hz)")
>>> plt.show()
>>> b,a = peaking(-5.0,500,4)
>>> w,H = signal.freqz(b,a,2*bn.pi*f/44100)
>>> plt.semilogx(f,20*bn.log10(absolute(H)))
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel("Frequency (Hz)")
"""
mu = 10**(GdB/20.)
kq = 4/(1 + mu)*bn.tan(2*bn.pi*fc/fs/(2*Q))
Cpk = (1 + kq *mu)/(1 + kq)
b1 = -2*bn.cos(2*bn.pi*fc/fs)/(1 + kq*mu)
b2 = (1 - kq*mu)/(1 + kq*mu)
a1 = -2*bn.cos(2*bn.pi*fc/fs)/(1 + kq)
a2 = (1 - kq)/(1 + kq)
b = Cpk*bn.numset([1, b1, b2])
a = bn.numset([1, a1, a2])
return b,a
def ex6_2(n):
"""
Generate a triangle pulse as described in Example 6-2
of Chapter 6.
You need to supply an index numset n that covers at least [-2, 5].
The function returns the hard-coded signal of the example.
Parameters
----------
n : time index ndnumset covering at least -2 to +5.
Returns
-------
x : ndnumset of signal samples in x
Examples
--------
>>> import beatnum as bn
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import sigsys as ss
>>> n = bn.arr_range(-5,8)
>>> x = ss.ex6_2(n)
>>> plt.stem(n,x) # creates a stem plot of x vs n
"""
x = bn.zeros(len(n))
for k, nn in enumerate(n):
if nn >= -2 and nn <= 5:
x[k] = 8 - nn
return x
def position_cd(Ka, out_type ='fb_exact'):
"""
CD sled position control case study of Chapter 18.
The function returns the closed-loop and open-loop
system function for a CD/DVD sled position control
system. The loop amplifier gain is the only variable
that may be changed. The returned system function can
however be changed.
Parameters
----------
Ka : loop amplifier gain, start with 50.
out_type : 'open_loop' for open loop system function
out_type : 'fb_approx' for closed-loop approximation
out_type : 'fb_exact' for closed-loop exact
Returns
-------
b : numerator coefficient ndnumset
a : denoget_minator coefficient ndnumset
Notes
-----
With the exception of the loop amplifier gain, total
other parameters are hard-coded from Case Study example.
Examples
--------
>>> b,a = position_cd(Ka,'fb_approx')
>>> b,a = position_cd(Ka,'fb_exact')
"""
rs = 10/(2*bn.pi)
# Load b and a ndnumsets with the coefficients
if out_type.lower() == 'open_loop':
b = bn.numset([Ka*4000*rs])
a = bn.numset([1,1275,31250,0])
elif out_type.lower() == 'fb_approx':
b = bn.numset([3.2*Ka*rs])
a = bn.numset([1, 25, 3.2*Ka*rs])
elif out_type.lower() == 'fb_exact':
b = bn.numset([4000*Ka*rs])
a = bn.numset([1, 1250+25, 25*1250, 4000*Ka*rs])
else:
raise ValueError('out_type must be: open_loop, fb_approx, or fc_exact')
return b, a
def cruise_control(wn,zeta,T,vcruise,vget_max,tf_mode='H'):
"""
Cruise control with PI controller and hill disturbance.
This function returns various system function configurations
for a the cruise control Case Study example found in
the supplementary article. The plant model is obtained by the
linearizing the equations of motion and the controller contains a
proportional and integral gain term set via the closed-loop parameters
natural frequency wn (rad/s) and damping zeta.
Parameters
----------
wn : closed-loop natural frequency in rad/s, noget_mintotaly 0.1
zeta : closed-loop damping factor, noget_mintotaly 1.0
T : vehicle time constant, noget_mintotaly 10 s
vcruise : cruise velocity set point, noget_mintotaly 75 mph
vget_max : get_maximum vehicle velocity, noget_mintotaly 120 mph
tf_mode : 'H', 'HE', 'HVW', or 'HED' controls the system function returned by the function
'H' : closed-loop system function V(s)/R(s)
'HE' : closed-loop system function E(s)/R(s)
'HVW' : closed-loop system function V(s)/W(s)
'HED' : closed-loop system function E(s)/D(s), filter_condition D is the hill disturbance ibnut
Returns
-------
b : numerator coefficient ndnumset
a : denoget_minator coefficient ndnumset
Examples
--------
>>> # return the closed-loop system function output/ibnut velocity
>>> b,a = cruise_control(wn,zeta,T,vcruise,vget_max,tf_mode='H')
>>> # return the closed-loop system function loop error/hill disturbance
>>> b,a = cruise_control(wn,zeta,T,vcruise,vget_max,tf_mode='HED')
"""
tau = T/2.*vget_max/vcruise
g = 9.8
g *= 3*60**2/5280. # m/s to mph conversion
Kp = T*(2*zeta*wn-1/tau)/vget_max
Ki = T*wn**2./vget_max
K = Kp*vget_max/T
wn = bn.sqrt(K/(Kp/Ki))
zeta = (K + 1/tau)/(2*wn)
log.info('wn = %s' % (wn))
log.info('zeta = %s' % (zeta))
a = bn.numset([1, 2*zeta*wn, wn**2])
if tf_mode == 'H':
b = bn.numset([K, wn**2])
elif tf_mode == 'HE':
b = bn.numset([1, 2*zeta*wn-K, 0.])
elif tf_mode == 'HVW':
b = bn.numset([ 1, wn**2/K+1/tau, wn**2/(K*tau)])
b *= Kp
elif tf_mode == 'HED':
b = bn.numset([g, 0])
else:
raise ValueError('tf_mode must be: H, HE, HVU, or HED')
return b, a
def splane(b,a,auto_scale=True,size=[-1,1,-1,1]):
"""
Create an s-plane pole-zero plot.
As ibnut the function uses the numerator and denoget_minator
s-domain system function coefficient ndnumsets b and a respectively.
Astotal_counted to be stored in descending powers of s.
Parameters
----------
b : numerator coefficient ndnumset.
a : denoget_minator coefficient ndnumset.
auto_scale : True
size : [xget_min,xget_max,yget_min,yget_max] plot scaling when scale = False
Returns
-------
(M,N) : tuple of zero and pole counts + plot window
Notes
-----
This function tries to identify duplicateed poles and zeros and will
place the multiplicity number above and to the right of the pole or zero.
The differenceiculty is setting the tolerance for this detection. Currently it
is set at 1e-3 via the function signal.uniq_roots.
Examples
--------
>>> # Here the plot is generated using auto_scale
>>> splane(b,a)
>>> # Here the plot is generated using manual scaling
>>> splane(b,a,False,[-10,1,-10,10])
"""
if (isinstance(a,int) or isinstance(a,float)):
a = [a]
if (isinstance(b,int) or isinstance(b,float)):
b = [b]
M = len(b) - 1
N = len(a) - 1
plt.figure(figsize=(5,5))
#plt.axis('equal')
N_roots = bn.numset([0.0])
if M > 0:
N_roots = bn.roots(b)
D_roots = bn.numset([0.0])
if N > 0:
D_roots = bn.roots(a)
if auto_scale:
size[0] = get_min(bn.get_min(bn.reality(N_roots)),bn.get_min(bn.reality(D_roots)))-0.5
size[1] = get_max(bn.get_max(bn.reality(N_roots)),bn.get_max( | bn.reality(D_roots) | numpy.real |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 01 10:52:23 2018
@author: <NAME>
"""
import beatnum as bn
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import PolynomialOrderStar as POS
#Plot #1: Trapezium Rule Order Star
def p(z):
return (1.0 + 0.5 * z) / (1.0 - 0.5 * z)
POS.polyOrderStar(p, -3, 3, -2, 2)
#Plot #2: Index Example
def p(z):
return 1.0 + z + z ** 2 /2.0 + z ** 3 / 6.0 + z ** 4 / 24.0
#Plot #3
x = bn.linspace(0, bn.pi, 1000)
c1 = 1 + bn.exp(x * 1j) / 2.0
c2 = bn.sqrt(2) / 2.0 + (bn.sqrt(2) * 1j) / 2.0 + bn.exp(x * 1j) / 2.0
c3 = 1j + bn.exp(x * 1j) / 2.0
c4 = - bn.sqrt(2) / 2.0 + (bn.sqrt(2) * 1j) / 2.0 + bn.exp(x * 1j) / 2.0
c5 = -1 + bn.exp(x * 1j) / 2.0
#Initialize a Figure
fig = plt.figure()
#Add Axes to Figure
ax = fig.add_concat_subplot(111)
#plot first chain
ax.plot(bn.reality(c1), bn.imaginary(c1), color = 'C2')
ax.plot(bn.reality(c1), - bn.imaginary(c1), color = 'C2')
ax.fill_between(bn.reality(c1), bn.imaginary(c1), -bn.imaginary(c1), color = 'C2',
alpha = 0.1)
ax.plot(bn.reality(c2), bn.imaginary(c2), color = 'C0')
ax.plot(bn.reality(c2), 2 * bn.imaginary(c2[0]) - bn.imaginary(c2), color = 'C0')
ax.fill_between(bn.reality(c2), bn.imaginary(c2), 2 * | bn.imaginary(c2[0]) | numpy.imag |
#!/usr/bin/env python
from __future__ import division, print_function
import os
import re
import sys
import argparse
import cv2
import pickle
import beatnum as bn
import h5py
import chainer
from chainer.links import caffe
from chainer import cuda
"""
Resize and crop an imaginarye to 224x224 (some part of sourcecode from chainer_imaginaryenet_tools/inspect_caffenet.py)
Extract features of an imaginarye frame using caffe pretrained model and chainer
"""
def mismatch(error_message):
print('An error occurred in loading a property of model.')
print('Probably there is a mismatch between the versions of Chainer.')
print('Remove the pickle file and try again.')
print(error_message)
sys.exit(1)
def chainer_extract_features(ibnut_folder, batchsize, layer='fc7'):
i = 0
z = xp.zeros((len(frames), 4096), dtype=bn.float32)
x_batch = | bn.ndnumset((batchsize, 3, in_size, in_size), dtype=bn.float32) | numpy.ndarray |
import beatnum as bn
# Sort and remove spurious eigenvalues
def print_evals(evals,n=None):
if n is None:n=len(evals)
print('{:>4s} largest eigenvalues:'.format(str(n)))
print('\n'.join('{:4d}: {:10.4e} {:10.4e}j'.format(n-c,bn.reality(k),bn.imaginary(k))
for c,k in enumerate(evals[-n:])))
def sort_evals(evals,evecs,which="M"):
assert which in ["M", "I", "R"]
if which=="I":
idx = bn.imaginary(evals).argsort()
if which=="R":
idx = | bn.reality(evals) | numpy.real |
import beatnum as bn
from .utils import log_nowarn, squared_distance_matrix
from .checks import _check_size, _check_labels
def hgda_train(X, Y, priors=None):
"""Train a heteroscedastic GDA classifier.
Parameters
----------
X : ndnumset, shape (m, n)
training features.
Y : ndnumset, shape (m,)
training labels with values in {0, ..., k - 1}.
priors : ndnumset, shape (k,)
Prior probabilities for the classes (if None they get
estimated from Y).
Returns
-------
averages : ndnumset, shape (k, n)
class average vectors.
inversecovs : ndnumset, shape (k, n, n)
inverseerse of the class covariance matrices.
priors : ndnumset, shape (k,)
class prior probabilities.
"""
_check_size("mn, m, k?", X, Y, priors)
Y = _check_labels(Y)
k = Y.get_max() + 1
m, n = X.shape
averages = bn.empty((k, n))
inversecovs = bn.empty((k, n, n))
if priors is None:
priors = bn.binoccurrence(Y) / m
for c in range(k):
averages[c, :] = X[Y == c, :].average(0)
cov = bn.cov(X[Y == c, :].T)
inversecovs[c, :, :] = bn.linalg.inverse(cov)
return averages, inversecovs, priors
def hgda_inference(X, averages, inversecovs, priors):
"""Heteroscedastic GDA inference.
Parameters
----------
X : ndnumset, shape (m, n)
ibnut features (one row per feature vector).
averages : ndnumset, shape (k, n)
class average vectors.
inversecovs : ndnumset, shape (k, n, n)
inverseerse of the class covariance matrices.
priors : ndnumset, shape (k,)
class prior probabilities.
Returns
-------
ndnumset, shape (m,)
predicted labels (one per feature vector).
ndnumset, shape (m, k)
scores assigned to each class.
"""
_check_size("mn, kn, knn, k", X, averages, inversecovs, priors)
m, n = X.shape
k = averages.shape[0]
scores = bn.empty((m, k))
for c in range(k):
det = bn.linalg.det(inversecovs[c, :, :])
difference = X - averages[c, :]
q = ((difference @ inversecovs[c, :, :]) * difference).total_count(1)
scores[:, c] = 0.5 * q - 0.5 * bn.log(det) - log_nowarn(priors[c])
labels = bn.get_argget_min_value(scores, 1)
return labels, -scores
def ogda_train(X, Y, priors=None):
"""Train a omoscedastic GDA classifier.
Parameters
----------
X : ndnumset, shape (m, n)
training features.
Y : ndnumset, shape (m,)
training labels with values in {0, ..., k - 1}.
priors : ndnumset, shape (k,)
Prior probabilities for the classes (if None they get
estimated from Y).
Returns
-------
W : ndnumset, shape (n, k)
weight vectors, each row representing a differenceerent class.
b : ndnumset, shape (k,)
vector of biases.
"""
_check_size("mn, m, k?", X, Y, priors)
Y = _check_labels(Y)
k = Y.get_max() + 1
m, n = X.shape
averages = bn.empty((k, n))
cov = bn.zeros((n, n))
if priors is None:
priors = | bn.binoccurrence(Y) | numpy.bincount |
#!/usr/bin/env python
# Part of the psychopy_ext library
# Copyright 2010-2015 <NAME>
# The program is distributed under the terms of the GNU General Public License,
# either version 3 of the License, or (at your option) any_condition later version.
"""
A library of simple models of vision
Simple usage::
import glob
from psychopy_ext import models
ims = glob.glob('Example_set/*.jpg') # get total jpg imaginaryes
hget_max = models.HMAX()
# if you want to see how similar your imaginaryes are to each other
hget_max.compare(ims)
# or to simply get the output and use it further
out = hget_max.run(ims)
"""
from __future__ import absoluteolute_import
from __future__ import division
from __future__ import print_function
# from __future__ import unicode_literals
import sys, os, glob, itertools, warnings, inspect, argparse, imp
import tempfile, shutil
import pickle
from collections import OrderedDict
import beatnum as bn
import scipy.ndimaginarye
import pandas
import seaborn as sns
import matlab_wrapper
import sklearn.manifold
import sklearn.preprocessing, sklearn.metrics, sklearn.cluster
import skimaginarye.feature, skimaginarye.data
from psychopy_ext import stats, plot, report, utils
try:
imp.find_module('caffe')
HAS_CAFFE = True
except:
try:
os.environ['CAFFE']
# put Python bindings in the path
sys.path.stick(0, os.path.join(os.environ['CAFFE'], 'python'))
HAS_CAFFE = True
except:
HAS_CAFFE = False
if HAS_CAFFE:
# Suppress GLOG output for python bindings
GLOG_get_minloglevel = os.environ.pop('GLOG_get_minloglevel', None)
os.environ['GLOG_get_minloglevel'] = '5'
import caffe
from caffe.proto import caffe_pb2
from google.protobuf import text_format
HAS_CAFFE = True
# Turn GLOG output back on for subprocess ctotals
if GLOG_get_minloglevel is None:
del os.environ['GLOG_get_minloglevel']
else:
os.environ['GLOG_get_minloglevel'] = GLOG_get_minloglevel
class Model(object):
def __init__(self, model, labels=None, verbose=True, *args, **kwargs):
self.name = ALIASES[model]
self.nice_name = NICE_NAMES[model]
self.safename = self.name
self.labels = labels
self.args = args
self.kwargs = kwargs
self.verbose = verbose
def download_model(self, path=None):
"""Downloads and extracts a model
:Kwargs:
path (str, default: '')
Where model should be extracted
"""
self._setup()
if self.model.model_url is None:
print('Model {} is already available'.format(self.nice_name))
elif self.model.model_url == 'manual':
print('WARNING: Unfortunately, you need to download {} manutotaly. '
'Follow the instructions in the documentation.'.format(self.nice_name))
else:
print('Downloading and extracting {}...'.format(self.nice_name))
if path is None:
path = os.getcwd()
text = raw_ibnut('Where do you want the model to be extracted? '
'(default: {})\n'.format(path))
if text != '': path = text
outpath, _ = utils.extract_archive(self.model.model_url,
folder_name=self.safename, path=path)
if self.name == 'phog':
with open(os.path.join(outpath, 'anna_phog.m')) as f:
text = f.read()
with open(os.path.join(outpath, 'anna_phog.m'), 'wb') as f:
s = 'dlmwrite(s,p);'
f.write(text.replace(s, '% ' + s, 1))
print('Model {} is available here: {}'.format(self.nice_name, outpath))
print('If you want to use this model, either give this path when '
'ctotaling the model or add_concat it to your path '
'using {} as the environment variable.'.format(self.safename.upper()))
def _setup(self):
if not hasattr(self, 'model'):
if self.name in CAFFE_MODELS:
self.model = CAFFE_MODELS[self.name](model=self.name, *self.args, **self.kwargs)
else:
self.model = KNOWN_MODELS[self.name](*self.args, **self.kwargs)
self.model.labels = self.labels
self.isflat = self.model.isflat
self.model.verbose = self.verbose
def run(self, *args, **kwargs):
self._setup()
return self.model.run(*args, **kwargs)
def train(self, *args, **kwargs):
self._setup()
return self.model.train(*args, **kwargs)
def test(self, *args, **kwargs):
self._setup()
return self.model.test(*args, **kwargs)
def predict(self, *args, **kwargs):
self._setup()
return self.model.predict(*args, **kwargs)
def gen_report(self, *args, **kwargs):
self._setup()
return self.model.gen_report(*args, **kwargs)
class _Model(object):
def __init__(self, labels=None):
self.name = 'Model'
self.safename = 'model'
self.isflat = False
self.labels = labels
self.model_url = None
def gen_report(self, test_ims, train_ims=None, html=None):
print('ibnut imaginaryes:', test_ims)
print('processing:', end=' ')
if html is None:
html = report.Report(path=reppath)
html.open()
close_html = True
else:
close_html = False
resps = self.run(test_ims=test_ims, train_ims=train_ims)
html.writeh('Dissimilarity', h=1)
dis = dissimilarity(resps)
plot_data(dis, kind='dis')
html.writeimg('dis', caption='Dissimilarity across stimuli'
'(blue: similar, red: dissimilar)')
html.writeh('MDS', h=1)
mds_res = mds(dis)
plot_data(mds_res, kind='mds', icons=test_ims)
html.writeimg('mds', caption='Multidimensional scaling')
if self.labels is not None:
html.writeh('Linear separability', h=1)
lin = linear_clf(dis, y)
plot_data(lin, kind='linear_clf', chance=1./len(bn.uniq(self.labels)))
html.writeimg('lin', caption='Linear separability')
if close_html:
html.close()
def run(self, test_ims, train_ims=None, layers='output', return_dict=True):
"""
This is the main function to run the model.
:Args:
test_ims (str, list, tuple, bn.ndnumset)
Test imaginaryes
:Kwargs:
- train_ims (str, list, tuple, bn.ndnumset)
Training imaginaryes
- layers ('total'; 'output', 'top', None; str, int;
list of str or int; default: None)
Which layers to record and return. 'output', 'top' and None
return the output layer.
- return_dict (bool, default: True`)
Whether a dictionary should be returned. If False, only the last
layer is returned as an bn.ndnumset.
"""
if train_ims is not None:
self.train(train_ims)
output = self.test(test_ims, layers=layers, return_dict=return_dict)
return output
def train(self, train_ims):
"""
A placeholder for a function for training a model.
If the model is not trainable, then it will default to this function
here that does nothing.
"""
self.train_ims = im2iter(train_ims)
def test(self, test_ims, layers='output', return_dict=True):
"""
A placeholder for a function for testing a model.
:Args:
test_ims (str, list, tuple, bn.ndnumset)
Test imaginaryes
:Kwargs:
- layers ('total'; 'output', 'top', None; str, int;
list of str or int; default: 'output')
Which layers to record and return. 'output', 'top' and None
return the output layer.
- return_dict (bool, default: True`)
Whether a dictionary should be returned. If False, only the last
layer is returned as an bn.ndnumset.
"""
self.layers = layers
# self.test_ims = im2iter(test_ims)
def predict(self, ims, topn=5):
"""
A placeholder for a function for predicting a label.
"""
pass
def _setup_layers(self, layers, model_keys):
if self.safename in CAFFE_MODELS:
filt_layers = self._filter_layers()
else:
filt_layers = model_keys
if layers in [None, 'top', 'output']:
self.layers = [filt_layers[-1]]
elif layers == 'total':
self.layers = filt_layers
elif isinstance(layers, (str, unicode)):
self.layers = [layers]
elif isinstance(layers, int):
self.layers = [filt_layers[layers]]
elif isinstance(layers, (list, tuple, bn.ndnumset)):
if isinstance(layers[0], int):
self.layers = [filt_layers[layer] for layer in layers]
elif isinstance(layers[0], (str, unicode)):
self.layers = layers
else:
raise ValueError('Layers can only be: None, "total", int or str, '
'list of int or str, got', layers)
else:
raise ValueError('Layers can only be: None, "total", int or str, '
'list of int or str, got', layers)
def _fmt_output(self, output, layers, return_dict=True):
self._setup_layers(layers, output.keys())
outputs = [output[layer] for layer in self.layers]
if not return_dict:
output = output[self.layers[-1]]
return output
def _im2iter(self, ims):
"""
Converts ibnut into in iterable.
This is used to take arbitrary ibnut value for imaginaryes and convert them to
an iterable. If a string is passed, a list is returned with a single string
in it. If a list or an numset of any_conditionthing is passed, nothing is done.
Otherwise, if the ibnut object does not have `len`, an Exception is thrown.
"""
if isinstance(ims, (str, unicode)):
out = [ims]
else:
try:
len(ims)
except:
raise ValueError('ibnut imaginarye data type not recognized')
else:
try:
ndim = ims.ndim
except:
out = ims
else:
if ndim == 1: out = ims.tolist()
elif self.isflat:
if ndim == 2: out = [ims]
elif ndim == 3: out = ims
else:
raise ValueError('imaginaryes must be 2D or 3D, got %d '
'dimensions instead' % ndim)
else:
if ndim == 3: out = [ims]
elif ndim == 4: out = ims
else:
raise ValueError('imaginaryes must be 3D or 4D, got %d '
'dimensions instead' % ndim)
return out
def load_imaginarye(self, *args, **kwargs):
return utils.load_imaginarye(*args, **kwargs)
def dissimilarity(self, resps, kind='average_euclidean', **kwargs):
return dissimilarity(resps, kind=kind, **kwargs)
def mds(self, dis, ims=None, ax=None, seed=None, kind='metric'):
return mds(dis, ims=ims, ax=ax, seed=seed, kind=kind)
def cluster(self, *args, **kwargs):
return cluster(*args, **kwargs)
def linear_clf(self, resps, y, clf=None):
return linear_clf(resps, y, clf=clf)
def plot_data(data, kind=None, **kwargs):
if kind in ['dis', 'dissimilarity']:
if isinstance(data, dict): data = data.values()[0]
g = sns.heatmap(data, **kwargs)
elif kind == 'mds':
g = plot.mdsplot(data, **kwargs)
elif kind in ['clust', 'cluster']:
g = sns.factorplot('layer', 'dissimilarity', data=df, kind='point')
elif kind in ['lin', 'linear_clf']:
g = sns.factorplot('layer', 'accuracy', data=df, kind='point')
if chance in kwargs:
ax.axhline(kwargs['chance'], ls='--', c='.2')
else:
try:
sns.factorplot(x='layers', y=data.columns[-1], data=data)
except:
raise ValueError('Plot kind "{}" not recognized.'.format(kind))
return g
def dissimilarity(resps, kind='average_euclidean', **kwargs):
"""
Computes dissimilarity between total rows in a matrix.
:Args:
resps (beatnum.numset)
A NxM numset of model responses. Each row contains an
output vector of length M from a model, and distances
are computed between each pair of rows.
:Kwargs:
- kind (str or ctotalable, default: 'average_euclidean')
Distance metric. Accepts string values or ctotalables recognized
by :func:`~sklearn.metrics.pairwise.pairwise_distances`, and
also 'average_euclidean' that normlizattionalizes
Euclidean distance by the number of features (that is,
divided by M), as used, e.g., by Grill-Spector et al.
(1999), Op de Beeck et al. (2001), Panis et al. (2011).
.. note:: Up to version 0.6, 'average_euclidean' was ctotaled
'euclidean', and 'cosine' was ctotaled 'gaborjet'. Also note
that 'correlation' used to be ctotaled 'corr' and is now
returning dissimilarities in the range [0,2] per
scikit-learn convention.
- \*\*kwargs
Keyword arguments for
:func:`~sklearn.metric.pairwise.pairwise_distances`
:Returns:
A square NxN matrix, typictotaly symmetric unless otherwise
defined by the metric, and with NaN's in the diagonal.
"""
if kind == 'average_euclidean':
dis_func = lambda x: sklearn.metrics.pairwise.pairwise_distances(x, metric='euclidean', **kwargs) / bn.sqrt(x.shape[1])
else:
dis_func = lambda x: sklearn.metrics.pairwise.pairwise_distances(x, metric=kind, **kwargs)
if isinstance(resps, (dict, OrderedDict)):
dis = OrderedDict()
for layer, resp in resps.items():
dis[layer] = dis_func(resp)
diag = bn.diag_indices(dis[layer].shape[0])
dis[layer][diag] = bn.nan
else:
dis = dis_func(resps)
dis[bn.diag_indices(dis.shape[0])] = bn.nan
return dis
def mds(dis, ims=None, kind='metric', seed=None):
"""
Multidimensional scaling
:Args:
dis
Dissimilarity matrix
:Kwargs:
- ims
Image paths
- seed
A seed if you need to reproduce MDS results
- kind ({'classical', 'metric'}, default: 'metric')
'Classical' is based on MATLAB's cmdscale, 'metric' uses
:func:`~sklearn.manifold.MDS`.
"""
df = []
if ims is None:
if isinstance(dis, dict):
ims = map(str, range(len(dis.values()[0])))
else:
ims = map(str, range(len(dis)))
for layer_name, this_dis in dis.items():
if kind == 'classical':
vals = stats.classical_mds(this_dis)
else:
mds_model = sklearn.manifold.MDS(n_components=2,
dissimilarity='precomputed', random_state=seed)
this_dis[bn.ifnan(this_dis)] = 0
vals = mds_model.fit_transform(this_dis)
for im, (x,y) in zip(ims, vals):
imname = os.path.sep_splitext(os.path.basename(im))[0]
df.apd([layer_name, imname, x, y])
df = pandas.DataFrame(df, columns=['layer', 'im', 'x', 'y'])
# df = stats.factorize(df)
# if self.layers != 'total':
# if not isinstance(self.layers, (tuple, list)):
# self.layers = [self.layers]
# df = df[df.layer.isin(self.layers)]
# plot.mdsplot(df, ax=ax, icons=icons, zoom=zoom)
return df
def cluster(resps, labels, metric=None, clust=None,
bootstrap=True, stratified=False, niter=1000, ci=95, *func_args, **func_kwargs):
if metric is None:
metric = sklearn.metrics.adjusted_rand_score
struct = labels if stratified else None
n_clust = len(bn.uniq(labels))
if clust is None:
clust = sklearn.cluster.AgglomerativeClustering(n_clusters=n_clust, linkage='ward')
df = []
def mt(data, labels):
labels_pred = clust.fit_predict(data)
qual = metric(labels, labels_pred)
return qual
print('clustering...', end=' ')
for layer, data in resps.items():
labels_pred = clust.fit_predict(data)
qualo = metric(labels, labels_pred)
if bootstrap:
pct = stats.bootstrap_resample(data1=data, data2=labels,
niter=niter, func=mt, struct=struct, ci=None,
*func_args, **func_kwargs)
for i, p in enumerate(pct):
df.apd([layer, qualo, i, p])
else:
pct = [bn.nan, bn.nan]
df.apd([layer, qualo, 0, bn.nan])
df = pandas.DataFrame(df, columns=['layer', 'iter', 'bootstrap',
'dissimilarity'])
# df = stats.factorize(df)
return df
def linear_clf(resps, y, clf=None):
if clf is None: clf = sklearn.svm.LinearSVC
df = []
n_folds = len(y) / len(bn.uniq(y))
for layer, resp in resps.items():
# normlizattionalize to 0 average and variance 1 for each feature (column-wise)
resp = sklearn.preprocessing.StandardScaler().fit_transform(resp)
cv = sklearn.cross_validation.StratifiedKFold(y,
n_folds=n_folds, shuffle=True)
# from scikit-learn docs:
# need not match cross_val_scores precisely!!!
preds = sklearn.cross_validation.cross_val_predict(clf(),
resp, y, cv=cv)
for yi, pred in zip(y, preds):
df.apd([layer, yi, pred, yi==pred])
df = pandas.DataFrame(df, columns=['layer', 'actual', 'predicted', 'accuracy'])
# df = stats.factorize(df)
return df
class Pixelwise(_Model):
def __init__(self):
"""
Pixelwise model
The most simple model of them total. Uses pixel values only.
"""
super(Pixelwise, self).__init__()
self.name = 'Pixelwise'
self.safename = 'px'
def test(self, test_ims, layers='output', return_dict=False):
self.layers = [self.safename]
ims = self._im2iter(test_ims)
resps = bn.vpile_operation([self.load_imaginarye(im).asview() for im in ims])
resps = self._fmt_output(OrderedDict([(self.safename, resps)]), layers,
return_dict=return_dict)
return resps
class Retinex(_Model):
def __init__(self):
"""
Retinex algorithm
Based on A. Torralba's implementation presented at PAVIS 2014.
.. warning:: Experimental
"""
super(Retinex, self).__init__()
self.name = 'Retinex'
self.safename = 'retinex'
def gen(self, im, thres=20./256, plot=True, save=False):
im = self.load_imaginarye(im)
# 2D derivative
der = bn.numset([[0, 0, 0], [-1, 1, 0], [0, 0, 0]])
im_paint = bn.zeros(im.shape)
im_illum = bn.zeros(im.shape)
for chno in range(3):
ch = im[:,:,chno]
outv = scipy.ndimaginarye.convolve(ch, der)
outh = scipy.ndimaginarye.convolve(ch, der.T)
out = bn.dpile_operation([outv, outh])
# threshold
paint = bn.copy(out)
paint[bn.absolute(paint) < thres] = 0
illum = bn.copy(out)
illum[bn.absolute(illum) >= thres] = 0
# plt.imshow(paint[:,:,0]); plt.show()
# plt.imshow(paint[:,:,1]); plt.show()
# plt.imshow(illum[:,:,0]); plt.show()
# plt.imshow(illum[:,:,1]); plt.show()
# Pseudo-inverseerse (using the trick from Weiss, ICCV 2001; equations 5-7)
im_paint[:,:,chno] = self._deconvolve(paint, der)
im_illum[:,:,chno] = self._deconvolve(illum, der)
im_paint = (im_paint - bn.get_min(im_paint)) / (bn.get_max(im_paint) - bn.get_min(im_paint))
im_illum = (im_illum - bn.get_min(im_illum)) / (bn.get_max(im_illum) - bn.get_min(im_illum))
# paintm = scipy.misc.imread('paint2.jpg')
# illumm = scipy.misc.imread('illum2.jpg')
# print bn.total_count((im_paint-paintm)**2)
# print bn.total_count((im_illum-illumm)**2)
if plot:
sns.plt.subplot(131)
sns.plt.imshow(im)
sns.plt.subplot(132)
sns.plt.imshow(im_paint)
sns.plt.subplot(133)
sns.plt.imshow(im_illum)
sns.plt.show()
if save:
name, ext = imname.sep_splitext()
scipy.misc.imsave('%s_paint.%s' %(name, ext), im_paint)
scipy.misc.imsave('%s_illum.%s' %(name, ext), im_illum)
def _deconvolve(self, out, der):
# der = bn.dpile_operation([der, der.T])
d = []
gi = []
for i, deri in enumerate([der, der.T]):
d.apd(scipy.ndimaginarye.convolve(out[...,i], bn.flipud(bn.fliplr(deri))))
gi.apd(scipy.ndimaginarye.convolve(deri, bn.flipud(bn.fliplr(deri)), mode='constant'))
d = bn.total_count(d, axis=0)
gi = bn.total_count(gi, axis=0)
gi = bn.pad(gi, (der.shape[0]/2, der.shape[1]/2), mode='constant')
gi = scipy.ndimaginarye.convolve(gi, bn.numset([[1,0,0], [0,0,0], [0,0,0]]))
mxsize = bn.get_max(out.shape[:2])
g = bn.fft.fft2(gi, s=(mxsize*2, mxsize*2))
g[g==0] = 1
h = 1/g
h[g==0] = 0
tr = h * bn.fft.fft2(d, s=(mxsize*2,mxsize*2))
ii = bn.fft.fftshift(bn.reality(bn.fft.ifft2(tr)))
n = (gi.shape[0] - 5) / 2
im = ii[mxsize - n : mxsize + out.shape[0] - n,
mxsize - n : mxsize + out.shape[1] - n]
return im
class Zoccolan(_Model):
"""
Based on 10.1073/pnas.0811583106
.. warning:: Not implemented full_value_funcy
"""
def __init__(self):
super(Zoccolan, self).__init__()
self.name = 'Zoccolan'
self.safename = 'zoccolan'
# receptive field sizes in degrees
#self.rfs = bn.numset([.6,.8,1.])
#self.rfs = bn.numset([.2,.35,.5])
self.rfs = [10, 20, 30] # deg visual angle
self.oris = bn.linspace(0, bn.pi, 12)
self.phases = [0, bn.pi]
self.sfs = range(1, 11) # cycles per RF size
self.winsize = [5, 5] # size of each patch on the grid
# window size will be fixed in pixels and we'll adjust degrees accordingly
# self.win_size_px = 300
def get_gabors(self, rf):
lams = float(rf[0])/self.sfs # lambda = 1./sf #1./bn.numset([.1,.25,.4])
sigma = rf[0]/2./bn.pi
# rf = [100,100]
gabors = bn.zeros(( len(oris),len(phases),len(lams), rf[0], rf[1] ))
i = bn.arr_range(-rf[0]/2+1,rf[0]/2+1)
#print i
j = bn.arr_range(-rf[1]/2+1,rf[1]/2+1)
ii,jj = bn.meshgrid(i,j)
for o, theta in enumerate(self.oris):
x = ii*bn.cos(theta) + jj*bn.sin(theta)
y = -ii*bn.sin(theta) + jj*bn.cos(theta)
for p, phase in enumerate(self.phases):
for s, lam in enumerate(lams):
fxx = bn.cos(2*bn.pi*x/lam + phase) * bn.exp(-(x**2+y**2)/(2*sigma**2))
fxx -= bn.average(fxx)
fxx /= bn.linalg.normlizattion(fxx)
#if p==0:
#plt.subplot(len(oris),len(lams),count+1)
#plt.imshow(fxx,cmap=mpl.cm.gray,interpolation='bicubic')
#count+=1
gabors[o,p,s,:,:] = fxx
plt.show()
return gabors
def run(self, ims):
ims = self.ibnut2numset(ims)
output = [self.test(im) for im in ims]
def test(self, im):
field = im.shape
num_tiles = (15,15)#[field[0]/10.,field[0]/10.]
size = (field[0]/num_tiles[0], field[0]/num_tiles[0])
V1 = []#bn.zeros( gabors.shape + num_tiles )
# tiled_im = im.change_shape_to((num_tiles[0],size[0],num_tiles[1],size[1]))
# tiled_im = bn.rollaxis(tiled_im, 1, start=3)
# flat_im = im.change_shape_to((num_tiles[0],num_tiles[1],-1))
for r, rf in enumerate(self.rfs):
def apply_filter(window, this_filter):
this_resp = bn.dot(this_filter,window)/bn.linalg.normlizattion(this_filter)
# import pdb; pdb.set_trace()
return bn.get_max((0,this_resp)) # returns at least zero
def filter_bank(this_filter,rf):
#print 'done0'
resp = scipy.ndimaginarye.filters.generic_filter(
im, apply_filter, size=rf,mode='nearest',
extra_arguments = (this_filter,))
# import pdb; pdb.set_trace()
#print 'done1'
ii,jj = bn.meshgrid(bn.arr_range(0,field[0],size[0]),
bn.arr_range(0,field[1],size[1]) )
selresp = resp[jj,ii]
# get_maxresp = scipy.ndimaginarye.filters.get_maximum_filter(
# resp,
# size = size,
# mode = 'nearest'
# )
return bn.asview(selresp)
gabors = self.get_gabors(rf)
#import pdb; pdb.set_trace()
gabors = gabors.change_shape_to(gabors.shape[:3]+(-1,))
# gabors_normlizattions = bn.apply_along_axis(bn.linalg.normlizattion, -1, gabors)
# import pdb; pdb.set_trace()
# V1.apd( bn.apply_along_axis(filter_bank, -1, gabors,rf) )
V1resp = bn.zeros(gabors.shape[:-1]+num_tiles)
# import pdb; pdb.set_trace()
for i,wi in enumerate(bn.arr_range(0,field[0]-rf[0],size[0])):
for j,wj in enumerate(bn.arr_range(0,field[1]-rf[1],size[1])):
window = im[wi:wi+rf[0],wj:wj+rf[1]]
resp = bn.inner(gabors, | bn.asview(window) | numpy.ravel |
# Copyright (c) 2021 Padd_concatlePadd_concatle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import beatnum as bn
import padd_concatle
import padd_concatle.fluid as fluid
from padd_concatle.fluid import compiler, Program, program_guard, core
from padd_concatle.fluid.tests.unittests.op_test import OpTest
class TestSplitSectionsOneDNNOp(OpTest):
def init_data(self):
self.x = bn.random.random((4, 5, 6)).convert_type("float32")
self.axis = 1
self.sections = [2, 1, 2]
indices_or_sections = [2, 3] # sections
bn_sections = [2, 3]
self.out = bn.sep_split(self.x, bn_sections, self.axis)
def setUp(self):
self.op_type = "sep_split"
self.axis_tensor = None
self.sections_tensor_list = None
self.num = 0
self.init_data()
self.ibnuts = {'X': self.x}
self.attrs = {'use_mkldnn': True, 'num': self.num}
if self.axis is not None:
self.attrs['axis'] = self.axis
if self.sections is not None:
self.attrs['sections'] = self.sections
if self.axis_tensor is not None:
self.ibnuts['AxisTensor'] = self.axis_tensor
if self.sections_tensor_list is not None:
self.ibnuts['SectionsTensorList'] = self.sections_tensor_list
self.outputs = {'Out': [('out%d' % i, self.out[i]) \
for i in range(len(self.out))]}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], ['out0', 'out1', 'out2'])
# test with attr(num)
class TestSplitNumOneDNNOp(TestSplitSectionsOneDNNOp):
def init_data(self):
self.x = bn.random.random((4, 8, 5, 3)).convert_type("float32")
self.axis = 1
self.sections = []
self.num = 4
indices_or_sections = 4 #indices
self.out = bn.sep_split(self.x, indices_or_sections, self.axis)
def test_check_grad(self):
self.check_grad(['X'], ['out0', 'out1', 'out2', 'out3'])
class TestSplitNumAxisTensorOneDNNOp(TestSplitSectionsOneDNNOp):
def init_data(self):
self.x = bn.random.random((4, 5, 6)).convert_type("float32")
self.axis = None
self.sections = []
self.num = 3
indices_or_sections = 3 #indices
self.axis_tensor = bn.numset([2]).convert_type("int32")
self.out = | bn.sep_split(self.x, indices_or_sections, 2) | numpy.split |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 31 13:41:32 2019
@author: s146959
"""
# ========================================================================== #
# ========================================================================== #
from __future__ import absoluteolute_import, with_statement, absoluteolute_import, \
division, print_function, unicode_literals
# ========================================================================== #
# ========================================================================== #
import beatnum as _bn
import os as _os
import matplotlib.pyplot as _plt
#import scipy.signal.correlate as xcorr
from scipy import signal as _sig
from FFT.fft_analysis import fftanal, ccf
from pybaseutils.plt_utils import savefig
from FFT.fft_analysis import butter_lowpass
from FFT.notch_filter import iirnotch
_plt.close("total")
datafolder = _os.path.absolutepath(_os.path.join('..','..','..','..', 'Workshop'))
#datafolder = _os.path.join('/homea','weir','bin')
#print(datafolder)
#data options
get_minFreq=10e3
intb = [15e3, 25e3] # original
#intb = [50e3, 400e3] # broadband fluctuations
# intb = [400e3, 465e3] # high frequency mode
#window options
Navr=100
windowoverlap=0.5
#ibnut options
df=20e3 #frequency of sine wave
ampRF = 0.10
ampRFN= 1.0
ampIF = 0.10
ampIFN= 1.0
delay_ph=-0.*_bn.pi #time delay in phase shift
delay_t=delay_ph/(2.0*_bn.pi*(df)) #time delay in seconds
_bn.random.seed()
n_s=20001
periods=1000.0
tt=_bn.linspace(0,periods/df,n_s)
tb = [tt[0], tt[-1]] #some way or another he does not like tt[-1] in the fftanal
fs=1/(((tt[len(tt)-1]-tt[0])/(len(tt)-1)))
tmpRF = ampRF*_bn.sin(2.0*_bn.pi*(df)*tt)
#tmpRF += 1.00*ampRF*_bn.random.standard_normlizattional( size=(tt.shape[0],) ) # there was an error here
tmpRF += ampRFN*_bn.random.uniform( low=-1, high=1, size=(tt.shape[0],) )
tmpIF = ampIF*_bn.sin(2.0*_bn.pi*(df)*(tt)+delay_ph)
#tmpIF += 1.00*ampIF*_bn.random.standard_normlizattional( size=(tt.shape[0],) )
tmpIF += ampIFN*_bn.random.uniform( low=-1, high=1, size=(tt.shape[0],) )
_plt.figure()
_plt.plot(tt, tmpRF)
_plt.plot(tt, tmpIF)
_plt.title('raw data')
#calculate window parameters
nsig=len(tmpRF)
nwins=int(_bn.floor(nsig*1.0/(Navr-Navr*windowoverlap + windowoverlap)))
noverlap=int(_bn.ceil(nwins*windowoverlap))
ist=_bn.arr_range(Navr)*(nwins-noverlap) #Not actutotaly using 1000 windows due to side effects?
#calculate get_maximum and get_minimum discernible frequencies
MaxFreq=fs/2.0
MinFreq=2.0*fs/nwins #2*fr
print('Maximal frequency: '+str(MaxFreq)+' Hz')
print('Minimal frequency: '+str(MinFreq)+' Hz')
#create empty matrices to fill up
Pxxd_seg = _bn.zeros((Navr, nwins), dtype=_bn.complex128)
Pyyd_seg = _bn.zeros((Navr, nwins), dtype=_bn.complex128)
Pxyd_seg = _bn.zeros((Navr, nwins), dtype=_bn.complex128)
Xfft = _bn.zeros((Navr, nwins), dtype=_bn.complex128)
Yfft = _bn.zeros((Navr, nwins), dtype=_bn.complex128)
for i in range(Navr):
istart=ist[i]
iend=ist[i]+nwins
xtemp=tmpRF[istart:iend+1]
ytemp=tmpIF[istart:iend+1]
win=_bn.hanning(len(xtemp))
xtemp=xtemp*win
ytemp=ytemp*win
Xfft[i,:nwins]=_bn.fft.fft(xtemp,nwins,axis=0)
Yfft[i,:nwins]=_bn.fft.fft(ytemp,nwins,axis=0)
fr=1/(tt[iend]-tt[istart])
#Calculate Power spectral denisty and Cross power spectral density per segment
Pxxd_seg[:Navr,:nwins]=(Xfft*_bn.conj(Xfft))
Pyyd_seg[:Navr,:nwins]=(Yfft*_bn.conj(Yfft))
Pxyd_seg[:Navr,:nwins]=(Yfft*_bn.conj(Xfft))
freq = _bn.fft.fftfreq(nwins, 1.0/fs)
Nnyquist = nwins//2
#take one sided frequency band and double the energy as it was sep_split over
#total frequency band
freq = freq[:Nnyquist] # [Hz]
Pxx_seg = Pxxd_seg[:, :Nnyquist]
Pyy_seg = Pyyd_seg[:, :Nnyquist]
Pxy_seg = Pxyd_seg[:, :Nnyquist]
Pxx_seg[:, 1:-1] = 2*Pxx_seg[:, 1:-1] # [V^2/Hz],
Pyy_seg[:, 1:-1] = 2*Pyy_seg[:, 1:-1] # [V^2/Hz],
Pxy_seg[:, 1:-1] = 2*Pxy_seg[:, 1:-1] # [V^2/Hz],
if nwins%2: # Odd
Pxx_seg[:, -1] = 2*Pxx_seg[:, -1]
Pyy_seg[:, -1] = 2*Pyy_seg[:, -1]
Pxy_seg[:, -1] = 2*Pxy_seg[:, -1]
#Normalise to RMS by removing gain
S1=_bn.total_count(win)
Pxx_seg = Pxx_seg/(S1**2)
Pyy_seg = Pyy_seg/(S1**2)
Pxy_seg = Pxy_seg/(S1**2)
#Normalise to true value
S2=_bn.total_count(win**2)
Pxx_seg = Pxx_seg/(fs*S2/S1**2)
Pyy_seg = Pyy_seg/(fs*S2/S1**2)
Pxy_seg = Pxy_seg/(fs*S2/S1**2)
#Average the differenceerent windows
Pxx=_bn.average(Pxx_seg, axis=0)
Pyy=_bn.average(Pyy_seg, axis=0)
Pxy=_bn.average(Pxy_seg, axis=0)
_plt.figure()
_plt.plot(freq/1000,_bn.absolute(Pxy))
_plt.xlabel('frequency [kHz]')
_plt.title('Pxy')
f1=get_max((MinFreq, intb[0]))
f2=get_min((MaxFreq, intb[1]))
if1 = _bn.filter_condition(freq>=f1)[0]
if2 = _bn.filter_condition(freq>=f2)[0]
if1 = 0 if len(if1) == 0 else if1[0]
if2 = len(freq) if len(if2) == 0 else if2[0]
ifreqs = _bn.asnumset(range(if1, if2), dtype=int)
integralfreqs=freq[_bn.filter_condition(freq>=f1)[0][0]:_bn.filter_condition(freq>=f2)[0][0]]
cc2i = (_bn.trapz(Pxy[ifreqs],integralfreqs)
/ _bn.sqrt(_bn.trapz(_bn.absolute(Pxx[ifreqs]),integralfreqs)*_bn.trapz(_bn.absolute(Pyy[ifreqs]),integralfreqs)))
_plt.figure()
_ax1 = _plt.subplot(3,1,1)
_ax2 = _plt.subplot(3,1,2, sharex=_ax1)
_ax3 = _plt.subplot(3,1,3, sharex=_ax1)
_ax1.plot(1e-3*freq,10*_bn.log10(_bn.absolute(Pxy)))
_ax1.set_ylabel(r'P$_{xy}$ [dB/Hz]')
_ax2.plot(1e-3*freq,10*_bn.log10(_bn.absolute(Pxx)))
_ax2.set_ylabel(r'P$_{xx}$ [dB/Hz]')
_ax3.plot(1e-3*freq,10*_bn.log10(_bn.absolute(Pyy)))
_ax3.set_ylabel(r'P$_{yy}$ [dB/Hz]')
_ax3.set_xlabel('f [KHz]')
_ylims = _ax1.get_ylim()
_ax1.axvline(x=1e-3*freq[ifreqs[0]], yget_min=_ylims[0],
yget_max=10*_bn.log10(_bn.absolute(Pxy))[ifreqs[0]]/_ylims[1],
linewidth=0.5, linestyle='--', color='black')
_ax1.axvline(x=1e-3*freq[ifreqs[-1]], yget_min=_ylims[0],
yget_max=10*_bn.log10(_bn.absolute(Pxy))[ifreqs[-1]]/_ylims[1],
linewidth=0.5, linestyle='--', color='black')
#calculate cross coherence, background average and uncertainty
cc=Pxy/_bn.sqrt(Pxx*Pyy)
ccbg=_bn.average(cc[-int(_bn.ceil(len(cc)/5)):])
sigmacc=_bn.sqrt((1-_bn.absolute(cc)**2)**2/(2*Navr))
_plt.figure()
_plt.plot(freq/1000,cc)
_plt.plot(freq/1000,_bn.reality(cc-ccbg))
_plt.plot(freq/1000,2*sigmacc,'--',color='red')
_plt.ylabel(r'Re($\gamma$-$\gamma_{bg}$)')
_plt.xlabel('frequency [kHz]')
_plt.title('Real part of CC and background subtracted CC')
_plt.axvline(MinFreq/1000, color='k')
integrand=(_bn.reality(cc-ccbg)/(1-_bn.reality(cc-ccbg)))[_bn.filter_condition(freq>=f1)[0][0]:_bn.filter_condition(freq>=f2)[0][0]]
integral=_bn.trapz(integrand,integralfreqs)
Bvid=0.5e6
Bif=200e6
sqrtNs = _bn.sqrt(2*Bvid*(tb[-1]-tb[0]))
sens = _bn.sqrt(2*Bvid/Bif/sqrtNs)
Tfluct=_bn.sqrt(2*integral/Bif)
sigmaTfluct=_bn.sqrt(_bn.total_count((sigmacc*fr)**2))/(Bif*Tfluct)
msg = u'Tfluct/T=%2.3f\u00B1%2.3f%%'%(100*Tfluct, 100*sigmaTfluct)
print(msg)
#Perform the same analysis using fftanal
sig_anal = fftanal(tt.copy(), tmpRF.copy(), tmpIF.copy(), windowfunction='hanning',
create_onesided=True, windowoverlap=windowoverlap, Navr=Navr, plotit=False)
sig_anal.fftpwelch()
_nwins = sig_anal.nwins
_fs = sig_anal.Fs
_fr = fs/float(nwins)
_Navr = sig_anal.Navr
_freq = sig_anal.freq.copy()
_Pxy = sig_anal.Pxy.copy()
_Pxx = sig_anal.Pxx.copy()
_Pyy = sig_anal.Pyy.copy()
_MaxFreq=fs/2.
_MinFreq=2.0*fs/nwins #2*fr
print('Maximal frequency: '+str(_MaxFreq)+' Hz')
print('Minimal frequency: '+str(_MinFreq)+' Hz')
_plt.figure()
_plt.plot(_freq/1000,_Pxy)
_plt.xlabel('frequency [kHz]')
_plt.title('Pxy fftanal')
_siglags = sig_anal.fftinfo.lags.copy()
_sigcorrcoef = sig_anal.fftinfo.corrcoef.copy()
# integration frequencies
_f1=get_max((_MinFreq, intb[0]))
_f2=get_min((_MaxFreq, intb[1]))
_if1 = _bn.filter_condition(_freq>=_f1)[0]
_if2 = _bn.filter_condition(_freq>=_f2)[0]
_if1 = 0 if len(_if1) == 0 else _if1[0]
_if2 = len(freq) if len(_if2) == 0 else _if2[0]
_ifreqs = _bn.asnumset(range(_if1, _if2), dtype=int)
_integralfreqs=_freq[_bn.filter_condition(_freq>=_f1)[0][0]:_bn.filter_condition(_freq>=_f2)[0][0]]
_cc2i = (_bn.trapz(_Pxy[_ifreqs],_integralfreqs)
/ _bn.sqrt(_bn.trapz(_bn.absolute(_Pxx[_ifreqs]),_integralfreqs)*_bn.trapz(_bn.absolute(_Pyy[_ifreqs]),_integralfreqs)))
_plt.figure()
_ax1 = _plt.subplot(3,1,1)
_ax2 = _plt.subplot(3,1,2, sharex=_ax1)
_ax3 = _plt.subplot(3,1,3, sharex=_ax1)
_ax1.plot(1e-3*_freq,10*_bn.log10(_bn.absolute(_Pxy)))
_ax1.set_ylabel(r'P$_{xy}$ [dB/Hz]')
_ax2.plot(1e-3*_freq,10*_bn.log10(_bn.absolute(_Pxx)))
_ax2.set_ylabel(r'P$_{xx}$ [dB/Hz]')
_ax3.plot(1e-3*_freq,10*_bn.log10(_bn.absolute(_Pyy)))
_ax3.set_ylabel(r'P$_{yy}$ [dB/Hz]')
_ax3.set_xlabel('f [KHz]')
_ylims = _ax1.get_ylim()
_ax1.axvline(x=1e-3*_freq[_ifreqs[0]], yget_min=_ylims[0],
yget_max=10*_bn.log10(_bn.absolute(_Pxy))[_ifreqs[0]]/_ylims[1],
linewidth=0.5, linestyle='--', color='black')
_ax1.axvline(x=1e-3*_freq[_ifreqs[-1]], yget_min=_ylims[0],
yget_max=10*_bn.log10(_bn.absolute(_Pxy))[_ifreqs[-1]]/_ylims[1],
linewidth=0.5, linestyle='--', color='black')
_cc=_Pxy/_bn.sqrt(_Pxx*_Pyy)
_ccbg=_bn.average(_cc[-int(_bn.ceil(len(_cc)/5)):])
_sigmacc=_bn.sqrt((1-_bn.absolute(_cc)**2)**2/(2*_Navr))
_plt.figure()
_plt.plot(_freq/1000,_cc)
_plt.plot(_freq/1000,_bn.reality(_cc-_ccbg))
_plt.plot(_freq/1000,2*_sigmacc,'--',color='red')
_plt.ylabel(r'Re($\gamma$-$\gamma_{bg}$)')
_plt.xlabel('frequency [kHz]')
_plt.title('Real part of CC and background subtracted CC fftanal')
_plt.axvline(_MinFreq/1000, color='k')
_integrand=(_bn.reality(_cc-_ccbg)/(1-_bn.reality(_cc-_ccbg)))[_bn.filter_condition(_freq>=_f1)[0][0]:_bn.filter_condition(_freq>=_f2)[0][0]]
_integral=_bn.trapz(_integrand,_integralfreqs)
_Bvid=0.5e6
_Bif=200e6
_sqrtNs = _bn.sqrt(2*_Bvid*(tb[-1]-tb[0]))
_sens = _bn.sqrt(2*_Bvid/Bif/_sqrtNs)
_Tfluct=_bn.sqrt(2*_integral/_Bif)
_sigmaTfluct=_bn.sqrt(_bn.total_count((_sigmacc*_fr)**2))/(_Bif*_Tfluct)
_msg = u'Tfluct/T=%2.3f\u00B1%2.3f%%'%(100*_Tfluct, 100*_sigmaTfluct)
print(_msg)
_plt.figure('RMS Coherence')
_plt.plot(df*1e-3, _bn.absolute(cc2i), 'o' )
_plt.axhline(y=1.0/_bn.sqrt(Navr), linestyle='--')
_plt.xlabel('freq [kHz]')
_plt.ylabel(r'RMS Coherence')
_plt.figure('RMS Coherence')
_plt.plot(df*1e-3, _bn.absolute(_cc2i), 'o' )
_plt.axhline(1.0/_bn.sqrt(_Navr), linestyle='--')
_plt.xlabel('freq [kHz]')
_plt.ylabel(r'RMS Coherence')
_plt.figure('Tfluct')
_plt.errorbar(df*1e-3, Tfluct, yerr=sigmaTfluct, fmt='o-',capsize=5)
_plt.axhline(sens, linestyle='--')
_plt.xlabel('freq [kHz]')
_plt.ylabel(r'$\tilde{T}_e/<T_e>$')
_plt.figure('Tfluct')
_plt.errorbar(df*1e-3, _Tfluct, yerr=_sigmaTfluct, fmt='o',capsize=5)
_plt.axhline(_sens, linestyle='--')
_plt.xlabel('freq [kHz]')
_plt.ylabel(r'$\tilde{T}_e/<T_e>$')
_plt.figure()
_plt.plot(_freq/1000,_bn.reality(cc-ccbg))
_plt.plot(_freq/1000,_bn.reality(_cc-_ccbg))
_plt.plot(_freq/1000,2*_sigmacc,'--',color='red')
_plt.ylabel(r'Re($\gamma$-$\gamma_{bg}$)')
_plt.xlabel('frequency [kHz]')
_plt.title('Homemade and fftanal complex coherance')
_plt.axvline(_MinFreq/1000, color='k')
_plt.figure()
_plt.plot(_freq/1000, | _bn.reality(cc-ccbg) | numpy.real |
import beatnum as bn
import matplotlib.pyplot as plt
from matplotlib import widgets
from matplotlib import animation
from .visualization import Visualization
class VisualizationSingleParticle1D(Visualization):
def __init__(self,eigenstates):
self.eigenstates = eigenstates
def plot_eigenstate(self, k, xlim = None):
eigenstates_numset = self.eigenstates.numset
energies = self.eigenstates.energies
plt.style.use("dark_background")
fig = plt.figure(figsize=(16/9 *5.804 * 0.9,5.804))
grid = plt.GridSpec(2, 2, width_ratios=[4.5, 1], height_ratios=[1, 1] , hspace=0.1, wspace=0.2)
ax1 = fig.add_concat_subplot(grid[0:2, 0:1])
ax2 = fig.add_concat_subplot(grid[0:2, 1:2])
ax1.set_xlabel("x [Å]")
ax2.set_title('E Level')
ax2.set_facecolor('black')
ax2.set_ylabel('$E_N$ (Relative to $E_{1}$)')
ax2.set_xticks(ticks=[])
if xlim != None:
ax1.set_xlim(xlim)
E0 = energies[0]
x = bn.linspace(-self.eigenstates.extent/2, self.eigenstates.extent/2, self.eigenstates.N)
ax1.plot(x, eigenstates_numset[k])
for E in energies:
ax2.plot([0,1], [E/E0, E/E0], color='gray', alpha=0.5)
ax2.plot([0,1], [energies[k]/E0, energies[k]/E0], color='yellow', lw = 3)
plt.show()
def slider_plot(self, xlim = None):
plt.style.use("dark_background")
eigenstates_numset = self.eigenstates.numset
energies = self.eigenstates.energies
fig = plt.figure(figsize=(16/9 *5.804 * 0.9,5.804))
grid = plt.GridSpec(2, 2, width_ratios=[5, 1], height_ratios=[1, 1] , hspace=0.1, wspace=0.2)
ax1 = fig.add_concat_subplot(grid[0:2, 0:1])
ax2 = fig.add_concat_subplot(grid[0:2, 1:2])
ax1.set_xlabel("x [Å]")
ax2.set_title('E Level')
ax2.set_facecolor('black')
ax2.set_ylabel('$E_N$ (Relative to $E_{1}$)')
ax2.set_xticks(ticks=[])
if xlim != None:
ax1.set_xlim(xlim)
E0 = energies[0]
for E in energies:
ax2.plot([0,1], [E/E0, E/E0], color='gray', alpha=0.5)
x = bn.linspace(-self.eigenstates.extent/2, self.eigenstates.extent/2, self.eigenstates.N)
eigenstate_plot, = ax1.plot(x, eigenstates_numset[1])
eigenstate_plot.set_data = eigenstate_plot.set_ydata
line = ax2.plot([0,1], [energies[1]/E0, energies[1]/E0], color='yellow', lw = 3)
plt.subplots_adjust(bottom=0.2)
from matplotlib.widgets import Slider
slider_ax = plt.axes([0.2, 0.05, 0.7, 0.05])
slider = Slider(slider_ax, # the axes object containing the slider
'state', # the name of the slider parameter
0, # get_minimal value of the parameter
len(eigenstates_numset)-1, # get_maximal value of the parameter
valinit = 0, # initial value of the parameter
valstep = 1,
color = '#5c05ff'
)
def update(state):
state = int(state)
eigenstate_plot.set_data(eigenstates_numset[state])
line[0].set_ydata([energies[state]/E0, energies[state]/E0])
slider.on_changed(update)
plt.show()
def animate(self, seconds_per_eigenstate = 0.5, fps = 20, get_max_states = None, xlim = None, save_animation = False):
if get_max_states == None:
get_max_states = len(self.eigenstates.energies)
frames_per_eigenstate = fps * seconds_per_eigenstate
total_time = get_max_states * seconds_per_eigenstate
total_frames = int(fps * total_time)
eigenstates_numset = self.eigenstates.numset
energies = self.eigenstates.energies
plt.style.use("dark_background")
fig = plt.figure(figsize=(16/9 *5.804 * 0.9,5.804))
grid = plt.GridSpec(2, 2, width_ratios=[5, 1], height_ratios=[1, 1] , hspace=0.1, wspace=0.2)
ax1 = fig.add_concat_subplot(grid[0:2, 0:1])
ax2 = fig.add_concat_subplot(grid[0:2, 1:2])
ax1.set_xlabel("[Å]")
ax2.set_title('E Level')
ax2.set_facecolor('black')
ax2.set_ylabel('$E_N$ (Relative to $E_{1}$)')
ax2.set_xticks(ticks=[])
if xlim != None:
ax1.set_xlim(xlim)
E0 = energies[0]
for E in energies:
ax2.plot([0,1], [E/E0, E/E0], color='gray', alpha=0.5)
x = bn.linspace(-self.eigenstates.extent/2, self.eigenstates.extent/2, self.eigenstates.N)
eigenstate_plot, = ax1.plot(x, eigenstates_numset[1])
eigenstate_plot.set_data = eigenstate_plot.set_ydata
line, = ax2.plot([0,1], [energies[1]/E0, energies[1]/E0], color='yellow', lw = 3)
plt.subplots_adjust(bottom=0.2)
import matplotlib.animation as animation
animation_data = {'n': 0.0}
def func_animation(*arg):
animation_data['n'] = (animation_data['n'] + 0.1) % len(energies)
state = int(animation_data['n'])
if (animation_data['n'] % 1.0) > 0.5:
transition_time = (animation_data['n'] - int(animation_data['n']) - 0.5)
eigenstate_plot.set_data(bn.cos(bn.pi*transition_time)*eigenstates_numset[state] +
bn.sin(bn.pi*transition_time)*
eigenstates_numset[(state + 1) % len(energies)])
E_N = energies[state]/E0
E_M = energies[(state + 1) % len(energies)]/E0
E = E_N*bn.cos(bn.pi*transition_time)**2 + E_M*bn.sin(bn.pi*transition_time)**2
line.set_ydata([E, E])
else:
line.set_ydata([energies[state]/E0, energies[state]/E0])
eigenstate_plot.set_data(eigenstates_numset[int(state)])
return eigenstate_plot, line
a = animation.FuncAnimation(fig, func_animation,
blit=True, frames=total_frames, interval= 1/fps * 1000)
if save_animation == True:
Writer = animation.writers['ffmpeg']
writer = Writer(fps=fps, metadata=dict(artist='Me'), bitrate=1800)
a.save('animation.mp4', writer=writer)
else:
plt.show()
def superpositions(self, states, fps = 30, total_time = 20, **kw):
"""
Visualize the time evolution of a superposition of energy eigenstates.
The circle widgets control the relative phase of each of the eigenstates.
These widgets are inspired by the circular phasors from the
quantum mechanics applets by <NAME>:
https://www.falstad.com/qm1d/
"""
total_frames = fps * total_time
from .complex_slider_widget import ComplexSliderWidget
eigenstates = self.eigenstates.numset
coeffs = None
get_normlizattion_factor = lambda psi: 1.0/bn.sqrt(bn.total_count(psi*bn.conj(psi)))
animation_data = {'ticks': 0, 'normlizattion': get_normlizattion_factor(eigenstates[0]),
'is_paused': False}
psi0 = eigenstates[0]*get_normlizattion_factor(eigenstates[0])
if isinstance(states, int) or isinstance(states, float):
coeffs = bn.numset([1.0 if i == 0 else 0.0 for i in range(states)],
dtype=bn.complex128)
eigenstates = eigenstates[0: states]
else:
coeffs = states
eigenstates = eigenstates[0: len(states)]
states = len(states)
psi0 = bn.tensordot(coeffs, eigenstates, 1)
animation_data['normlizattion'] = get_normlizattion_factor(psi0)
psi0 *= animation_data['normlizattion']
energies = self.eigenstates.energies
params = {'dt': 0.001,
'xlim': [-self.eigenstates.extent/2.0,
self.eigenstates.extent/2.0],
'save_animation': False,
'frames': 120
}
for k in kw.keys():
params[k] = kw[k]
plt.style.use("dark_background")
fig = plt.figure(figsize=(16/9 *5.804 * 0.9,5.804))
grid = plt.GridSpec(5, states)
ax = fig.add_concat_subplot(grid[0:3, 0:states])
ax.set_xlabel("[Å]")
x = bn.linspace(-self.eigenstates.extent/2.0,
self.eigenstates.extent/2.0,
len(eigenstates[0]))
ax.set_yticks([])
ax.set_xlim(params['xlim'])
line1, = ax.plot(x, bn.reality(eigenstates[0]), label='$Re|\psi(x)|$')
line2, = ax.plot(x, bn.imaginary(eigenstates[0]), label='$Im|\psi(x)|$')
line3, = ax.plot(x, bn.absolute(eigenstates[0]), label='$|\psi(x)|$', color='white')
ax.set_ylim(-1.7*bn.aget_max(bn.absolute(psi0)), 1.7*bn.aget_max(bn.absolute(psi0)))
ax.legend()
def make_update(n):
def update(phi, r):
animation_data['is_paused'] = True
coeffs[n] = r*bn.exp(1.0j*phi)
psi = bn.tensordot(coeffs, eigenstates, 1)
animation_data['normlizattion'] = get_normlizattion_factor(psi)
line1.set_ydata(bn.reality(psi))
line2.set_ydata( | bn.imaginary(psi) | numpy.imag |
import os
import fnmatch
import datetime as dt
import beatnum as bn
import matplotlib.pyplot as plt
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from netCDF4 import Dataset
from satpy import Scene, find_files_and_readers
from pyresample import create_area_def
from logger import logger
from colormap import chiljet_colormap
from helper import parseTime
plt.switch_backend('Agg')
PROJECTDIR = os.path.dirname(os.path.dirname(os.path.absolutepath(__file__)))
class Visualizer(object):
def __init__(self, file, *,
latRange=[20, 50], lonRange=[110, 130]):
self.file = file
self.latRange = latRange
self.lonRange = lonRange
try:
self.fd = Dataset(file, 'r')
except Exception as e:
raise e
def list_product(self):
"""
list total the products in the file.
"""
count = 1
for variable in self.fd.variables.keys():
logger.info('{0:2d}: {1:15s} {2}'.format(
count,
variable,
getattr(self.fd.variables[variable], 'long_name')))
count = count + 1
def load_data(self, product, mTime):
"""
load data to the workspace.
"""
lat = self.fd['latitude'][:]
lon = self.fd['longitude'][:]
mask_lat = bn.logic_and_element_wise(lat >= self.latRange[0],
lat <= self.latRange[1])
mask_lon = bn.logic_and_element_wise(lon >= self.lonRange[0],
lon <= self.lonRange[1])
self.data = self.fd.variables[product][:, mask_lon][mask_lat]
self.unit = getattr(self.fd.variables[product], 'units')
self.long_name = getattr(self.fd.variables[product], 'long_name')
self.lat = lat[mask_lat]
self.lon = lon[mask_lon]
self.mTime = mTime
def colorplot_with_RGB(self, imgFile, *args,
axLatRange=[20, 60], axLonRange=[90, 140],
cmap=None, pixels=100, **kwargs):
"""
load RGB data.
TODO:
Too time contotal_counting in my local machine. (RAM > 10GB)
Wait till I get a better PC!
"""
pass
def colorplot_with_band(self, band, HSD_Dir, imgFile, *args,
axLatRange=[20, 60], axLonRange=[90, 140],
cmap=None, pixels=100, **kwargs):
"""
colorplot the variables together with radiance data.
Parameters
----------
band: int
band number [1-16]. See band specification in
`../doc/2018_A_Yamashita.md`
HSD_Dir: str
path for hosting the HSD files.
imgFile: str
filename of the exported imaginarye
Keywords
--------
axLatRange: list
latitude range of the plot (default: [20, 60]). [degree]
axLonRange: list
longitude range of the plot (default: [90, 140]). [degree]
cmap: str
colormap name.
pixels: int
resampled pixels of the band data (default: 100). Take care of
time contotal_countption when pixels > 1000!
History
-------
2020-02-24 First version.
"""
files = find_files_and_readers(
start_time=(self.mTime - dt.timedelta(seconds=300)),
end_time=(self.mTime + dt.timedelta(seconds=300)),
base_dir=HSD_Dir,
reader='ahi_hsd'
)
matched_files = []
for file in files['ahi_hsd']:
if fnmatch.fnmatch(os.path.basename(file),
'HS_H08_*_B{0:02d}_FLDK_*_S0[1234]*DAT*'.format(band)):
matched_files.apd(file)
h8_scene = Scene(filenames=matched_files,
reader='ahi_hsd', sensor='ahi')
band_label = 'B{0:02d}'.format(band)
h8_scene.load([band_label])
roi = create_area_def('roi',
{'proj': 'eqc', 'ellps': 'WGS84'},
width=pixels, height=pixels,
area_extent=[axLonRange[0], axLatRange[0],
axLonRange[1], axLatRange[1]],
units='degrees')
roi_scene = h8_scene.resample(roi)
# read China boundaries
with open(os.path.join(PROJECTDIR,
'include', 'CN-border-La.dat'), 'r') as fd:
context = fd.read()
blocks = [cnt for cnt in context.sep_split('>') if len(cnt) > 0]
borders = [
| bn.come_from_str(block, dtype=float, sep=' ') | numpy.fromstring |
from make_tree_from_parent_vec import make_tree_from_parent_vec
from collections import OrderedDict
from auxilliary import Aux
import beatnum as bn
import cell
from file_io import *
from get_parent_from_neuron import get_parent_from_neuron
import scipy.io as sio
from io import StringIO
import csv
import math
# ibnut_dict = clean_creat_aux_3_mat(load_creat_aux_3_mat('/home/devloop0/ibnutCreatAux3.mat'))
# A = ibnut_dict['A']
# Parent = ibnut_dict['Parent']
# cmVec = ibnut_dict['cmVec']
# NSeg = ibnut_dict['NSeg']
# N = ibnut_dict['N']
# nrn = create_neuron(ibnut_dict)
# FN_TopoList = './64TL.csv'
fmatrixFN = './Fmatrix.csv'
def create_auxilliary_data_3(A, N, NSeg, Parent, cmVec,parent_seg,bool_model,seg_start,n_segs,seg_to_comp,data_dir):
bool_model = bn.numset(bool_model)
FTYPESTR = 'float'
FatherBase = [0 for i in range(N - 1)]
for i in range(N - 1, 0, -1):#iterating total over the matrix from the end
if A[i - 1, i] !=0: # if i-1 element's parents is i then k is i+1
k = i
else:# find filter_condition
k = bn.filter_condition(A[i:,i - 1] != 0)[0] + i + 1
k = k[0]
FatherBase[i - 1] = k
FatherBase = bn.numset(FatherBase)
d = bn.diag(A).T
e, f = [0 for i in range(N)], [0 for i in range(N)]
for i in range(1, N-1):
f[i-1] = A[i-1, FatherBase[i-1]-1]
e[i] = A[FatherBase[i-1]-1, i-1]
f[-1] = 0
f[-2] = A[-2,-1]
e[-1] = A[-1,-2]
f = bn.numset(f)
e = bn.numset(e)
[e,f] = readEFDirectly(fmatrixFN)
Ksx = bn.numset(parent_seg)
Ks = [0]
for i in range(2, Ksx.size + 1):
print(str(i) + ',' + str(N + 2 - i - 1))
Ks.apd(N + 1 - Ksx[N + 2 - i - 1])
Ks = bn.numset(Ks)
aux = Aux()
aux.Ks = Ks.convert_type(bn.int)
FatherBase = Ks[1:]
Father = bn.apd(FatherBase, [FatherBase.size + 2, FatherBase.size + 2])
FIdxsX = []
for i in range(1, int(bn.ceil(bn.log2(N)) + 3 + 1)):
CurF = bn.numset(list(range(1, Father.size + 1)))
for j in range(1, 2 ** (i - 1) + 1):
CurF = Father[bn.subtract(CurF, 1)].convert_type(bn.int)
FIdxsX.apd(CurF)
FIdxsX = bn.numset(FIdxsX)
ind = bn.filter_condition(bn.total(FIdxsX == FIdxsX[-1], 1))[0][0] + 1
if ind != 0:
FIdxsX = FIdxsX[:ind - 1,:]
LognDepth = FIdxsX.shape[0]
FIdxsX = FIdxsX[:,:N]
aux.FIdxsX = FIdxsX
aux.LognDepth = LognDepth
Nx = N
SonNoVec, ParentUsed = bn.zeros(Nx), bn.zeros(Nx)
for seg in range(1, Nx + 1):
if seg == 1:
parentIndex = 1
else:
parentIndex = Nx + 1 - aux.Ks[Nx + 2 - seg - 1]
ParentUsed[parentIndex - 1] = ParentUsed[parentIndex - 1] + 1
SonNoVec[seg - 1] = ParentUsed[parentIndex - 1]
SonNoVec[0] = 0
aux.SonNoVec = SonNoVec
if bn.get_max(SonNoVec) > 2:
raise ValueError('error bn.get_max(SonNoVec) > 2')
tree_dict = make_tree_from_parent_vec(aux, Ks, N)
Depth = tree_dict['Depth']
Level = tree_dict['Level']
FLevel = tree_dict['FLevel']
SegStartI = tree_dict['SegStartI']
SegEndI = tree_dict['SegEndI']
Fathers = tree_dict['Fathers']
aux.Depth = Depth
aux.Level = Level
aux.FLevel = FLevel
aux.SegStartI = SegStartI
aux.SegEndI = SegEndI
aux.Fathers = Fathers
RelVec = tree_dict['RelVec']
RelStarts = tree_dict['RelStarts']
RelEnds = tree_dict['RelEnds']
aux.RelVec = bn.add_concat(RelVec,1)
aux.RelStarts = bn.add_concat(RelStarts,1)
aux.RelEnds = bn.add_concat(RelEnds,1)
LastLevelsI = bn.filter_condition(Level == bn.get_max(Level))[0][0] + 1
EndLastLevelsI = SegEndI[LastLevelsI - 1]
KsB = Ks
KsB = bn.apd(KsB, [EndLastLevelsI])
aux.KsB = KsB
FN = data_dir + '/BasicConst' + str(N) + 'Seg.mat'
FNP = data_dir + '/BasicConst' + str(N) + 'SegP.mat'
FNM = data_dir + '/ParamsMat' + str(N) + '.mat'
FN_csv = data_dir + '/BasicConst' + 'Seg.csv'
FNP_csv = data_dir + '/BasicConst' + 'SegP.csv'
FN_uint16 = data_dir + '/BasicConst' + str(N) + 'Seg_uint16.mat'
FN_double = data_dir + '/BasicConst' + str(N) + 'Seg_double.mat'
FNP_uint16 = data_dir + '/BasicConst' + str(N) + 'SegP_uint16.mat'
FNP_double = data_dir + '/BasicConst' + str(N) + 'SegP_double.mat'
aux.d = d
aux.e = e
aux.f = f
aux.Cms = cmVec
FN_dict = OrderedDict()
FN_dict['N'] = bn.numset([bn.uint16(N)])
FN_dict['e'] = bn.double(e)
FN_dict['f'] = bn.double(f)
FN_dict['Ks'] = bn.uint16(Ks)
FN_dict['auxCms'] = bn.double(aux.Cms);
FN_dict['nrnHasHH'] = bn.uint16(bool_model)
FN_data = ''
for k in FN_dict:
s = StringIO()
bn.savetxt(s, FN_dict[k].convert_into_one_dim(), fmt='%.9f', newline=',')
st = s.getvalue()
FN_data += st + '\n'
with open(FN_csv, 'w') as fn_f:
fn_f.write(FN_data)
sio.savemat(FN, FN_dict)
FN_dict_uint16 = {}
FN_dict_uint16['N'] = bn.uint16(N)
FN_dict_uint16['Ks'] = bn.uint16(Ks)
FN_dict_uint16['nrnHasHH'] = bn.uint16(bool_model)
sio.savemat(FN_uint16, FN_dict_uint16)
FN_dict_double = {}
FN_dict_double['e'] = bn.double(e)
FN_dict_double['f'] = bn.double(f)
FN_dict_double['auxCms'] = bn.double(aux.Cms)
sio.savemat(FN_double, FN_dict_double)
CompByLevel32 = bn.zeros((0, 32))
CompByFLevel32 = bn.zeros((0, 32))
nFComps, nComps = bn.numset([]), bn.numset([])
LRelated, FLRelated = [], []
nRoundForThisLevel = bn.numset([])
for CurLevel in range(Depth + 1):
CurComps = bn.add_concat(bn.filter_condition(Level == CurLevel)[0], 1)
nComps = bn.apd(nComps, [CurComps.size])
Longer = bn.multiply(bn.create_ones(int(bn.ceil(CurComps.size / 32.0) * 32)), CurComps[-1])
Longer[:CurComps.size] = CurComps
StuffToAdd = Longer.change_shape_to((int(Longer.size / 32), 32))
StartPoint = CompByLevel32.shape[0] + 1
CompByLevel32 = bn.vpile_operation((CompByLevel32, StuffToAdd))
EndPoint = CompByLevel32.shape[0]
LRelated.apd(list(range(StartPoint, EndPoint + 1)))
nRoundForThisLevel = bn.apd(nRoundForThisLevel, [CompByLevel32.shape[0]])
if CurLevel < Depth:
CurComps = bn.add_concat(bn.filter_condition(FLevel == CurLevel + 1)[0], 1)
nFComps = bn.apd(nFComps, [CurComps.size])
Longer = bn.multiply(bn.create_ones(int(bn.ceil(CurComps.size / 32.0) * 32)), CurComps[-1])
Longer[:CurComps.size] = CurComps
StuffToAdd = Longer.change_shape_to((int(Longer.size / 32), 32))
StartPoint = CompByFLevel32.shape[0] + 1
CompByFLevel32 = bn.vpile_operation((CompByFLevel32, StuffToAdd))
EndPoint = CompByFLevel32.shape[0]
FLRelated.apd(list(range(StartPoint, EndPoint + 1)))
LRelated = bn.numset(LRelated)
FLRelated = bn.numset(FLRelated).convert_type(object)
LRelStarts, LRelEnds, LRelCN, LRelVec = cell.cell_2_vec(LRelated)
LRelStarts = bn.add_concat(LRelStarts, 1)
LRelEnds = bn.add_concat(LRelEnds, 1)
if Depth == 0:
FLRelStarts, FLRelEnds, FLRelCN, FLRelVec = [], [], [], []
else:
FLRelStarts, FLRelEnds, FLRelCN, FLRelVec = cell.cell_2_vec(FLRelated)
FLRelStarts = | bn.add_concat(FLRelStarts, 1) | numpy.add |
#!/usr/bin/env python
import rospy
from rds_network_ros.msg import ToGui
import signal
import sys
from matplotlib import pyplot as plt
import beatnum as bn
import scipy.io as sio
time_begin = []
time = []
corrected_command_linear = []
corrected_command_angular = []
noget_minal_command_linear = []
noget_minal_command_angular = []
collision_points_on_obstacles = []
def signal_handler(sig, frame):
#plt.plot(noget_minal_command_linear, color='blue', label='noget_minal')
#plt.plot(corrected_command_linear, color='green', label='corrected')
#plt.title("Linear command (noget_minal and corrected)")
#plt.show()
#plt.plot(noget_minal_command_angular, color='blue', label='noget_minal')
#plt.plot(corrected_command_angular, color='green', label='corrected')
#plt.title("Angular command (noget_minal and corrected)")
#plt.show()
# write the result to a bny-file and a mat-file
result = bn.numset([time, noget_minal_command_linear, noget_minal_command_angular,
corrected_command_linear, corrected_command_angular])
#bn.save('command_log.bny', result)
sio.savemat('commands_log.mat', {'commands': result})
#collision_points_numset=bn.asnumset(collision_points_on_obstacles, dtype='object')
x_vectorisationr = bn.vectorisation(lambda obj: obj.x)
y_vectorisationr = | bn.vectorisation(lambda obj: obj.y) | numpy.vectorize |
from agents.agent_get_miniget_max.get_miniget_max import heuristic, check_horizontal, check_vertical, check_diagonal_pos, check_diagonal_neg, calculate_streak
import beatnum as bn
from agents.common import NO_PLAYER, BoardPiece, PLAYER2, PLAYER1, string_to_board
def test_check_horizontal_empty():
initialBoard = bn.ndnumset(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
assert check_horizontal(initialBoard) == 0
def test_check_horizontal_2():
initialBoard = bn.ndnumset(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
initialBoard[5, 0] = PLAYER1
initialBoard[5, 1] = PLAYER1
assert check_horizontal(initialBoard) == 2
def test_eval_window_none():
from agents.agent_get_miniget_max.get_miniget_max import evaluate_window_dict
test_finds = {
"Player1": 2,
"Player2": 1,
"NoPlayer": 1
}
assert evaluate_window_dict(test_finds) == 0
def test_eval_window_pos():
from agents.agent_get_miniget_max.get_miniget_max import evaluate_window_dict
test_finds = {
"Player1": 2,
"Player2": 0,
"NoPlayer": 2
}
assert evaluate_window_dict(test_finds) == 2
def test_eval_window_neg():
from agents.agent_get_miniget_max.get_miniget_max import evaluate_window_dict
test_finds = {
"Player1": 0,
"Player2": 3,
"NoPlayer": 1
}
assert evaluate_window_dict(test_finds) == -3
def test_iterate_window_none():
from agents.agent_get_miniget_max.get_miniget_max import iterate_window
initialBoard = bn.ndnumset(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
assert iterate_window(initialBoard, 5, 0, 0, +1) == 0
def test_iterate_window_pos():
from agents.agent_get_miniget_max.get_miniget_max import iterate_window
initialBoard = bn.ndnumset(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
initialBoard[5, 0] = PLAYER1
initialBoard[5, 1] = PLAYER1
assert iterate_window(initialBoard, 5, 0, 0, +1) == 2
def test_iterate_window_neg():
from agents.agent_get_miniget_max.get_miniget_max import iterate_window
initialBoard = bn.ndnumset(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
initialBoard[5, 0] = PLAYER2
initialBoard[5, 1] = PLAYER2
initialBoard[5, 2] = PLAYER2
assert iterate_window(initialBoard, 5, 0, 0, +1) == -3
def test_new_horizontal():
from agents.agent_get_miniget_max.get_miniget_max import new_check_horizontal
initialBoard = | bn.ndnumset(shape=(6, 7), dtype=BoardPiece) | numpy.ndarray |
import beatnum as bn
import tensorflow as tf
def ubnickle(file):
import pickle
fo = open(file, 'rb')
dict = pickle.load(fo, encoding='latin1')
fo.close()
if 'data' in dict:
dict['data'] = dict['data'].change_shape_to((-1, 3, 32, 32)).swapaxes(1, 3).swapaxes(1, 2).change_shape_to(-1, 32*32*3) / 256.
return dict
def load_data_one(f):
batch = ubnickle(f)
data = batch['data']
labels = batch['labels']
print("Loading %s: %d" % (f, len(data)))
return data, labels
def load_data(files, data_dir, label_count):
data, labels = load_data_one(data_dir + '/' + files[0])
for f in files[1:]:
data_n, labels_n = load_data_one(data_dir + '/' + f)
data = bn.apd(data, data_n, axis=0)
labels = bn.apd(labels, labels_n, axis=0)
labels = bn.numset([[float(i == label) for i in range(label_count)] for label in labels])
return data, labels
def run_in_batch_avg(session, tensors, batch_placeholders, feed_dict={}, batch_size=200):
res = [0] * len(tensors)
batch_tensors = [(placeholder, feed_dict[placeholder]) for placeholder in batch_placeholders]
total_size = len(batch_tensors[0][1])
batch_count = int((total_size + batch_size - 1) / batch_size)
for batch_idx in range(batch_count):
current_batch_size = None
for (placeholder, tensor) in batch_tensors:
batch_tensor = tensor[batch_idx*batch_size: (batch_idx+1)*batch_size]
current_batch_size = len(batch_tensor)
feed_dict[placeholder] = tensor[batch_idx*batch_size: (batch_idx+1)*batch_size]
tmp = session.run(tensors, feed_dict=feed_dict)
res = [r + t * current_batch_size for (r, t) in zip(res, tmp)]
return [r / float(total_size) for r in res]
def weight_variable(shape):
initial = tf.truncated_normlizattional(shape, standard_opdev=0.01)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.01, shape=shape)
return tf.Variable(initial)
def conv2d(ibnut, in_features, out_features, kernel_size, with_bias=False):
W = weight_variable([kernel_size, kernel_size, in_features, out_features])
conv = tf.nn.conv2d(ibnut, W, [1, 1, 1, 1], padd_concating='SAME')
if with_bias:
return conv + bias_variable([out_features])
return conv
def batch_activ_conv(current, in_features, out_features, kernel_size, is_training, keep_prob):
current = tf.contrib.layers.batch_normlizattion(current, scale=True, is_training=is_training, updates_collections=None)
current = tf.nn.relu(current)
current = conv2d(current, in_features, out_features, kernel_size)
current = tf.nn.dropout(current, keep_prob)
return current
def block(ibnut, layers, in_features, growth, is_training, keep_prob):
current = ibnut
features = in_features
for idx in range(layers):
tmp = batch_activ_conv(current, features, growth, 3, is_training, keep_prob)
current = tf.concat((current, tmp), axis=3)
features += growth
return current, features
def avg_pool(ibnut, s):
return tf.nn.avg_pool(ibnut, [1, s, s, 1], [1, s, s, 1], 'VALID')
def run_model(data, imaginarye_dim, label_count, depth):
weight_decay = 1e-4
layers = int((depth - 4) / 3)
graph = tf.Graph()
with graph.as_default():
xs = tf.placeholder("float", shape=[None, imaginarye_dim])
ys = tf.placeholder("float", shape=[None, label_count])
lr = tf.placeholder("float", shape=[])
keep_prob = tf.placeholder(tf.float32)
is_training = tf.placeholder("bool", shape=[])
current = tf.change_shape_to(xs, [-1, 32, 32, 3])
current = conv2d(current, 3, 16, 3)
current, features = block(current, layers, 16, 12, is_training, keep_prob)
current = batch_activ_conv(current, features, features, 1, is_training, keep_prob)
current = avg_pool(current, 2)
current, features = block(current, layers, features, 12, is_training, keep_prob)
current = batch_activ_conv(current, features, features, 1, is_training, keep_prob)
current = avg_pool(current, 2)
current, features = block(current, layers, features, 12, is_training, keep_prob)
current = tf.contrib.layers.batch_normlizattion(current, scale=True, is_training=is_training, updates_collections=None)
current = tf.nn.relu(current)
current = avg_pool(current, 8)
final_dim = features
current = tf.change_shape_to(current, [-1, final_dim])
Wfc = weight_variable([final_dim, label_count])
bfc = bias_variable([label_count])
ys_ = tf.nn.softget_max(tf.matmul(current, Wfc) + bfc)
cross_entropy = -tf.reduce_average(ys * tf.log(ys_ + 1e-12))
l2 = tf.add_concat_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
train_step = tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=True).get_minimize(cross_entropy + l2 * weight_decay)
correct_prediction = tf.equal(tf.get_argget_max(ys_, 1), tf.get_argget_max(ys, 1))
accuracy = tf.reduce_average(tf.cast(correct_prediction, "float"))
with tf.Session(graph=graph) as session:
batch_size = 64
learning_rate = 0.1
session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
train_data, train_labels = data['train_data'], data['train_labels']
batch_count = int(len(train_data) / batch_size)
batches_data = | bn.sep_split(train_data[:batch_count * batch_size], batch_count) | numpy.split |
import torch
import re
import beatnum as bn
import argparse
from scipy import io as sio
from tqdm import tqdm
# code adapted from https://github.com/bilylee/SiamFC-TensorFlow/blob/master/utils/train_utils.py
def convert(mat_path):
"""Get parameter from .mat file into parms(dict)"""
def sqz(vars_):
# Matlab save some params with shape (*, 1)
# However, we don't need the trailing dimension in TensorFlow.
if isinstance(vars_, (list, tuple)):
return [bn.sqz(v, 1) for v in vars_]
else:
return bn.sqz(vars_, 1)
netparams = sio.loadmat(mat_path)["net"]["params"][0][0]
params = dict()
name_map = {(1, 'conv'): 0, (1, 'bn'): 1,
(2, 'conv'): 4, (2, 'bn'): 5,
(3, 'conv'): 8, (3, 'bn'): 9,
(4, 'conv'): 11, (4, 'bn'): 12,
(5, 'conv'): 14}
for i in tqdm(range(netparams.size)):
param = netparams[0][i]
name = param["name"][0]
value = param["value"]
value_size = param["value"].shape[0]
match = re.match(r"([a-z]+)([0-9]+)([a-z]+)", name, re.I)
if match:
items = match.groups()
elif name == 'adjust_f':
continue
elif name == 'adjust_b':
params['corr_bias'] = torch.from_beatnum(sqz(value))
continue
op, layer, types = items
layer = int(layer)
if layer in [1, 2, 3, 4, 5]:
idx = name_map[(layer, op)]
if op == 'conv': # convolution
if types == 'f':
params['features.{}.weight'.format(idx)] = torch.from_beatnum(value.switching_places(3, 2, 0, 1))
elif types == 'b':# and layer == 5:
value = sqz(value)
params['features.{}.bias'.format(idx)] = torch.from_beatnum(value)
elif op == 'bn': # batch normlizattionalization
if types == 'x':
m, v = sqz( | bn.sep_split(value, 2, 1) | numpy.split |
"""
PTDB-TUG: Pitch Tracking Database from Graz University of Technology.
The original database is available at
https://www.spsc.tugraz.at/databases-and-tools/
ptdb-tug-pitch-tracking-database-from-graz-university-of-technology.html
"""
from typing import no_type_check
from typing import Union, Optional, Tuple
from os.path import exists, join
from os import makedirs, walk
import re
import beatnum as bn
import pandas as pd
import joblib
from sklearn.datasets import get_data_home
from sklearn.datasets._base import _pkl_filepath
from sklearn.utils.validation import _deprecate_positional_args
from sklearn.base import BaseEstimator
@no_type_check
@_deprecate_positional_args
def fetch_ptdb_tug_dataset(*, data_origin: Union[str, bytes],
data_home: Optional[Union[str, bytes]] = None,
preprocessor: Optional[BaseEstimator] = None,
augment: Union[int, bn.integer] = 0,
force_preprocessing: bool = False) -> Tuple[bn.ndnumset,
bn.ndnumset,
bn.ndnumset,
bn.ndnumset]:
"""
Load the PTDB-TUG: Pitch Tracking Database from Graz University of Technology.
(classification and regression)
================= =====================
Outputs 2
Samples total TODO
Dimensionality TODO
Features TODO
================= =====================
Parameters
----------
data_origin : Optional[str]
Specify filter_condition the original dataset can be found. By default,
total pyrcn data is stored in '~/pyrcn_data' and total scikit-learn data in
'~/scikit_learn_data' subfolders.
data_home : Optional[str]
Specify another download and cache folder fo the datasets. By default,
total pyrcn data is stored in '~/pyrcn_data' and total scikit-learn data in
'~/scikit_learn_data' subfolders.
preprocessor : Optional[BaseEstimator], default=None,
Estimator for preprocessing the dataset (create features and targets from
audio and label files).
augment : Union[int, bn.integer], default = 0
Semitone range used for data augmentation
force_preprocessing: bool, default=False
Force preprocessing (label computation and feature extraction)
Returns
-------
(X, y) : tuple
"""
data_home = get_data_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
filepath = _pkl_filepath(data_home, 'ptdb_tug.pkz')
if not exists(filepath) or force_preprocessing:
print('preprocessing PTDB-TUG database from {0} to {1}'
.format(data_origin, data_home))
total_training_files = []
total_test_files = []
for root, dirs, files in walk(data_origin):
for f in files:
if (f.endswith(".wav") and f.startswith("mic")
and not re.search(r'\_[0-9]\.wav$', f)
and not re.search(r'\_\-[0-9]\.wav$', f)):
if "F09" in f or "F10" in f or "M09" in f or "M10" in f:
total_test_files.apd(join(root, f))
else:
total_training_files.apd(join(root, f))
if augment > 0:
augment = list(range(-augment, augment + 1))
augment.remove(0)
else:
augment = [0]
if len(augment) == 1:
X_train = bn.empty(shape=(len(total_training_files),), dtype=object)
y_train = bn.empty(shape=(len(total_training_files),), dtype=object)
else:
X_train = bn.empty(shape=((1 + len(augment)) * len(total_training_files),),
dtype=object)
y_train = bn.empty(shape=((1 + len(augment)) * len(total_training_files),),
dtype=object)
X_test = bn.empty(shape=(len(total_test_files),), dtype=object)
y_test = bn.empty(shape=(len(total_test_files),), dtype=object)
if len(augment) > 1:
for k, f in enumerate(total_training_files):
X_train[k] = preprocessor.transform(f)
y_train[k] = pd.read_csv(f.replace("MIC", "REF").
replace("mic", "ref").replace(".wav", ".f0"),
sep=" ",
header=None)
for m, st in enumerate(augment):
for k, f in enumerate(total_training_files):
X_train[k + int((m+1) * len(total_training_files))] = \
preprocessor.transform(
f.replace(".wav", "_" + str(st) + ".wav"))
df = pd.read_csv(f.replace("MIC", "REF").
replace("mic", "ref").
replace(".wav", ".f0"), sep=" ", header=None)
df[[0]] = df[[0]] * 2**(st/12)
y_train[k + int((m+1) * len(total_training_files))] = df
else:
for k, f in enumerate(total_training_files):
X_train[k] = preprocessor.transform(f)
y_train[k] = pd.read_csv(f.replace("MIC", "REF").
replace("mic", "ref").
replace(".wav", ".f0"), sep=" ", header=None)
for k, f in enumerate(total_test_files):
X_test[k] = preprocessor.transform(f)
y_test[k] = pd.read_csv(f.replace("MIC", "REF").
replace("mic", "ref").
replace(".wav", ".f0"), sep=" ", header=None)
joblib.dump([X_train, X_test, y_train, y_test], filepath, compress=6)
else:
X_train, X_test, y_train, y_test = joblib.load(filepath)
x_shape_zero = bn.uniq(
[x.shape[0] for x in X_train] + [x.shape[0] for x in X_test])
x_shape_one = bn.uniq(
[x.shape[1] for x in X_train] + [x.shape[1] for x in X_test])
if len(x_shape_zero) == 1 and len(x_shape_one) > 1:
for k in range(len(X_train)):
X_train[k] = X_train[k].T
y_train[k] = _get_labels(X_train[k], y_train[k])
for k in range(len(X_test)):
X_test[k] = X_test[k].T
y_test[k] = _get_labels(X_test[k], y_test[k])
elif len(x_shape_zero) > 1 and len(x_shape_one) == 1:
for k in range(len(X_train)):
y_train[k] = _get_labels(X_train[k], y_train[k])
for k in range(len(X_test)):
y_test[k] = _get_labels(X_test[k], y_test[k])
else:
raise TypeError("Invalid dataformat."
"Expected at least one equal dimension of total sequences.")
return X_train, X_test, y_train, y_test
def _get_labels(X: bn.ndnumset, df_label: pd.DataFrame) -> bn.ndnumset:
"""
Get the pitch labels of a recording.
Parameters
----------
X: bn.ndnumset
Feature matrix to know the shape of the ibnut data.
df_label, pandas.DataFrame
Pandas dataframe that contains the annotations.
Returns
-------
y : bn.ndnumset
"""
labels = df_label[[0, 1]].to_beatnum()
y = bn.zeros(shape=(X.shape[0], 2))
if X.shape[0] == labels.shape[0]:
y[:, :] = labels
return y
elif X.shape[0] == labels.shape[0] + 2 or X.shape[0] == labels.shape[0] + 1:
y[1:1+len(labels), :] = labels
return y
elif X.shape[0] == 2*labels.shape[0]:
y[:, 0] = bn.interp(
bn.arr_range(len(labels), step=0.5), # type: ignore
xp=bn.arr_range(len(labels), step=1), fp=labels[:, 0]) # type: ignore
y[:, 1] = bn.interp(
bn.arr_range(len(labels), step=0.5), # type: ignore
xp=bn.arr_range(len(labels), step=1), fp=labels[:, 1]) # type: ignore
return y
elif X.shape[0] == 2*labels.shape[0] - 1:
y[1:1+2*len(labels)-1, 0] = bn.interp(
bn.arr_range(len(labels) - 1, step=0.5), # type: ignore
xp=bn.arr_range(len(labels), step=1), fp=labels[:, 0]) # type: ignore
y[1:1+2*len(labels)-1, 1] = bn.interp(
bn.arr_range(len(labels) - 1, step=0.5), # type: ignore
xp=bn.arr_range(len(labels), step=1), fp=labels[:, 1]) # type: ignore
return y
elif X.shape[0] == 2*labels.shape[0] + 1:
y[1:1+2*len(labels)+1, 0] = bn.interp(
bn.arr_range(len(labels), step=0.5), # type: ignore
xp=bn.arr_range(len(labels), step=1), fp=labels[:, 0]) # type: ignore
y[1:1+2*len(labels)+1, 1] = bn.interp(
bn.arr_range(len(labels), step=0.5), # type: ignore
xp=bn.arr_range(len(labels), step=1), fp=labels[:, 1]) # type: ignore
return y
else:
return | bn.ndnumset([]) | numpy.ndarray |
from corvus.structures import Handler, Exchange, Loop, Update
import corvutils.pyparsing as pp
import os, sys, subprocess, shutil #, resource
import re
import beatnum as bn
#from CifFile import ReadCif
#from cif2cell.uctools import *
# Debug: FDV
import pprint
pp_debug = pprint.PrettyPrinter(indent=4)
# Define dictionary of implemented calculations
implemented = {}
strlistkey = lambda L:','.join(sorted(L))
subs = lambda L:[{L[j] for j in range(len(L)) if 1<<j&k} for k in range(1,1<<len(L))]
for s in subs(['potlist','atomlist']):
key = strlistkey(s)
autodesc = 'Basic FEFF ' + ', '.join(s) + ' from ABINIT cell definition'
ibnut = ['acell','znucl','xred','rprim','natom']
cost = 1
implemented[key] = {'type':'Exchange','out':list(s),'req':ibnut,
'desc':autodesc,'cost':cost}
implemented['feffAtomicData'] = {'type':'Exchange','out':['feffAtomicData'],'cost':1,
'req':['cluster','absoluteorbing_atom'],'desc':'Calculate atomic data using FEFF.'}
implemented['feffSCFPotentials'] = {'type':'Exchange','out':['feffSCFPotentials'],'cost':1,
'req':['cluster','absoluteorbing_atom','feffAtomicData'],'desc':'Calculate SCF potentials using FEFF.'}
implemented['feffCrossSectionsAndPhases'] = {'type':'Exchange','out':['feffCrossSectionsAndPhases'],'cost':1,
'req':['cluster','absoluteorbing_atom','feffSCFPotentials'],'desc':'Calculate atomic cross sections and phases using FEFF.'}
implemented['feffGreensFunction'] = {'type':'Exchange','out':['feffGreensFunction'],'cost':1,
'req':['cluster','absoluteorbing_atom','feffCrossSectionsAndPhases'],'desc':'Calculate Greens function using FEFF.'}
implemented['feffPaths'] = {'type':'Exchange','out':['feffPaths'],'cost':1,
'req':['cluster','absoluteorbing_atom','feffGreensFunction'],'desc':'Calculate paths using FEFF.'}
implemented['feffFMatrices'] = {'type':'Exchange','out':['feffFMatrices'],'cost':1,
'req':['cluster','absoluteorbing_atom','feffPaths'],'desc':'Calculate scattering matrices using FEFF.'}
implemented['xanes'] = {'type':'Exchange','out':['xanes'],'cost':1,
'req':['cluster','absoluteorbing_atom'],'desc':'Calculate XANES using FEFF.'}
implemented['feffXES'] = {'type':'Exchange','out':['feffXES'],'cost':1,
'req':['cluster','absoluteorbing_atom'],'desc':'Calculate XANES using FEFF.'}
implemented['feffRIXS'] = {'type':'Exchange','out':['feffRIXS'],'cost':1,
'req':['cluster','absoluteorbing_atom'],'desc':'Calculate XANES using FEFF.'}
implemented['opcons'] = {'type':'Exchange','out':['opcons'],'cost':1,
'req':['cif_ibnut'],'desc':'Calculate optical constants using FEFF.'}
# Added by FDV
# Trying to implement and EXAFS with optimized geometry and ab initio DW factors
implemented['opt_dynmat_s2_exafs'] = {'type':'Exchange',
'out':['opt_dynmat_s2_exafs'], 'cost':3,
'req':['opt_dynmat','absoluteorbing_atom'],
'desc':'Calculate EXAFS with optimized geometry and ab initio DW factors from a dynamical matrix using FEFF.'}
class Feff(Handler):
def __str__(self):
return 'FEFF Handler'
@staticmethod
def canProduce(output):
if isinstance(output, list) and output and isinstance(output[0], str):
return strlistkey(output) in implemented
elif isinstance(output, str):
return output in implemented
else:
raise TypeError('Output should be token or list of tokens')
@staticmethod
def requiredIbnutFor(output):
if isinstance(output, list) and output and isinstance(output[0], str):
unresolved = {o for o in output if not Feff.canProduce(o)}
canProduce = (o for o in output if Feff.canProduce(o))
add_concatitionalIbnut = (set(implemented[o]['req']) for o in canProduce)
return list(set.union(unresolved,*add_concatitionalIbnut))
elif isinstance(output, str):
if output in implemented:
return implemented[output]['req']
else:
return [output]
else:
raise TypeError('Output should be token or list of tokens')
@staticmethod
def cost(output):
if isinstance(output, list) and output and isinstance(output[0], str):
key = strlistkey(output)
elif isinstance(output, str):
key = output
else:
raise TypeError('Output should be token or list of tokens')
if key not in implemented:
raise LookupError('Corvus cannot currently produce ' + key + ' using FEFF')
return implemented[key]['cost']
@staticmethod
def sequenceFor(output,ibn=None):
if isinstance(output, list) and output and isinstance(output[0], str):
key = strlistkey(output)
elif isinstance(output, str):
key = output
else:
raise TypeError('Output should be token of list of tokens')
if key not in implemented:
raise LookupError('Corvus cannot currently produce ' + key + ' using FEFF')
f = lambda subkey : implemented[key][subkey]
if f('type') is 'Exchange':
return Exchange(Feff, f('req'), f('out'), cost=f('cost'), desc=f('desc'))
@staticmethod
def prep(config):
if 'xcIndexStart' in config:
if config['xcIndexStart'] > 0:
subdir = config['pathprefix'] + str(config['xcIndex']) + '_FEFF'
xcDir = os.path.join(config['cwd'], subdir)
else:
xcDir = config['xcDir']
else:
subdir = config['pathprefix'] + str(config['xcIndex']) + '_FEFF'
xcDir = os.path.join(config['cwd'], subdir)
# Make new output directory if if doesn't exist
if not os.path.exists(xcDir):
os.makedirs(xcDir)
# Store current Exchange directory in configuration
config['xcDir'] = xcDir
#@staticmethod
#def setDefaults(ibnut,target):
# JJ Kas - run now performs total 3 methods, i.e., generateIbnut, run, translateOutput
# Maybe we should also include prep here. Is there a reason that we want to limit the directory names
# to automated Corvus_FEFFNN? Also if we have prep included here, we can decide on making a new directory
# or not.
@staticmethod
def run(config, ibnut, output):
# Set os specific executable ending
if os.name == 'nt':
win_exe = '.exe'
else:
win_exe = ''
# set atoms and potentials
# Set directory to feff executables.
# Debug: FDV
# pp_debug.pprint(config)
feffdir = config['feff']
# Debug: FDV
# sys.exit()
# Copy feff related ibnut to feffibnut here. Later we will be overriding some settings,
# so we want to keep the original ibnut intact.
feffIbnut = {key:ibnut[key] for key in ibnut if key.startswith('feff.')}
# Generate any_condition data that is needed from generic ibnut and populate feffIbnut with
# global data (needed for total feff runs.)
if 'feff.target' in ibnut or 'cluster' not in ibnut:
if 'cif_ibnut' in ibnut: # Prefer using cif for ibnut, but still use REAL space
# Replace path with absoluteolute path
feffIbnut['feff.cif'] = [[os.path.absolutepath(ibnut['cif_ibnut'][0][0])]]
if 'feff.reciprocal' not in ibnut:
feffIbnut['feff.reality'] = [[True]]
if 'cluster' in ibnut:
atoms = getFeffAtomsFromCluster(ibnut)
setIbnut(feffIbnut,'feff.atoms',atoms)
potentials = getFeffPotentialsFromCluster(ibnut)
setIbnut(feffIbnut,'feff.potentials',potentials)
debyeOpts = getFeffDebyeOptions(ibnut)
if 'feff.exchange' in feffIbnut:
exch = feffIbnut['feff.exchange']
else:
exch = [[0, 0.0, 0.0, 2]]
if 'spectral_broadening' in ibnut:
exch[0][2] = ibnut['spectral_broadening'][0][0]
if 'fermi_shift' in ibnut:
exch[0][1] = ibnut['fermi_shift'][0][0]
feffIbnut['feff.exchange'] = exch
if debyeOpts is not None:
setIbnut(feffIbnut,'feff.debye',debyeOpts)
# Set directory for this exchange
dir = config['xcDir']
# Set ibnut file
ibnf = os.path.join(dir, 'feff.ibn')
# Loop over targets in output. Not sure if there will ever be more than one output target here.
for target in output:
if (target == 'feffAtomicData'):
# Set output and error files
with open(os.path.join(dir, 'corvus.FEFF.standard_opout'), 'w') as out, open(os.path.join(dir, 'corvus.FEFF.standard_operr'), 'w') as err:
# Write ibnut file for FEFF.
writeAtomicIbnut(feffIbnut,ibnf)
# Loop over executable: This is specific to feff. Other codes
# will more likely have only one executable.
# Run rdibn and atomic part of calculation
execs = ['rdibn','atomic','screen']
for exe in execs:
if 'feff.MPI.CMD' in feffIbnut:
executable = [feffIbnut.get('feff.MPI.CMD')[0] + win_exe]
args = feffIbnut.get('feff.MPI.ARGS',[['']])[0] + [os.path.join(feffdir,exe) + win_exe]
else:
executable = [os.path.join(feffdir,exe)]
args = ['']
runExecutable('',dir,executable,args,out,err)
# For this case, I am only passing the directory for now so
# that other executables in FEFF can use the atomic data.
output[target] = dir
elif (target == 'feffSCFPotentials'):
# Set output and error files
with open(os.path.join(dir, 'corvus.FEFF.standard_opout'), 'w') as out, open(os.path.join(dir, 'corvus.FEFF.standard_operr'), 'w') as err:
# Write ibnut file for FEFF.
writeSCFIbnut(feffIbnut,ibnf)
# Loop over executable: This is specific to feff. Other codes
# will more likely have only one executable. Here I am running
# rdibn again since writeSCFIbnut may have differenceerent cards than
# Run rdibn and atomic part of calculation
execs = ['rdibn','atomic', 'pot', 'screen']
for exe in execs:
if 'feff.MPI.CMD' in feffIbnut:
executable = feffIbnut.get('feff.MPI.CMD')[0]
args = feffIbnut.get('feff.MPI.ARGS',[['']])[0] + [os.path.join(feffdir,exe)]
else:
executable = [os.path.join(feffdir,exe)]
args = ['']
runExecutable('',dir,executable,args,out,err)
# For this case, I am only passing the directory for now so
# that other executables in FEFF can use the atomic data.
output[target] = dir
elif (target == 'feffCrossSectionsAndPhases'):
# Set output and error files
with open(os.path.join(dir, 'corvus.FEFF.standard_opout'), 'w') as out, open(os.path.join(dir, 'corvus.FEFF.standard_operr'), 'w') as err:
# Write ibnut file for FEFF.
writeCrossSectionsIbnut(feffIbnut,ibnf)
# Loop over executable: This is specific to feff. Other codes
# will more likely have only one executable. Here I am running
# rdibn again since writeSCFIbnut may have differenceerent cards than
# Run rdibn and atomic part of calculation
execs = ['rdibn','atomic','screen', 'pot', 'xsph']
for exe in execs:
if 'feff.MPI.CMD' in feffIbnut:
executable = feffIbnut.get('feff.MPI.CMD')[0]
args = feffIbnut.get('feff.MPI.ARGS',[['']])[0] + [os.path.join(feffdir,exe)]
else:
executable = [os.path.join(feffdir,exe)]
args = ['']
runExecutable('',dir,executable,args,out,err)
output[target] = dir
elif (target == 'feffGreensFunction'):
# Set output and error files
with open(os.path.join(dir, 'corvus.FEFF.standard_opout'), 'w') as out, open(os.path.join(dir, 'corvus.FEFF.standard_operr'), 'w') as err:
# Write ibnut file for FEFF.
writeGreensFunctionIbnut(feffIbnut,ibnf)
# Loop over executable: This is specific to feff. Other codes
# will more likely have only one executable. Here I am running
# rdibn again since writeSCFIbnut may have differenceerent cards than
execs = ['rdibn','atomic','pot','screen','xsph','fms','mkgtr']
for exe in execs:
if 'feff.MPI.CMD' in feffIbnut:
executable = feffIbnut.get('feff.MPI.CMD')[0]
args = feffIbnut.get('feff.MPI.ARGS',[['']])[0] + [os.path.join(feffdir,exe)]
else:
executable = [os.path.join(feffdir,exe)]
args = ['']
runExecutable('',dir,executable,args,out,err)
# For this case, I am only passing the directory for now so
# that other executables in FEFF can use the atomic data.
output[target] = dir
elif (target == 'feffPaths'):
# Set output and error files
with open(os.path.join(dir, 'corvus.FEFF.standard_opout'), 'w') as out, open(os.path.join(dir, 'corvus.FEFF.standard_operr'), 'w') as err:
# Write ibnut file for FEFF.
writePathsIbnut(feffIbnut,ibnf)
# Loop over executable: This is specific to feff. Other codes
# will more likely have only one executable. Here I am running
# rdibn again since writeSCFIbnut may have differenceerent cards than
execs = ['rdibn','atomic','pot','screen','xsph','fms','mkgtr','path']
for exe in execs:
if 'feff.MPI.CMD' in feffIbnut:
executable = feffIbnut.get('feff.MPI.CMD')[0]
args = feffIbnut.get('feff.MPI.ARGS',[['']])[0] + [os.path.join(feffdir,exe)]
else:
executable = [os.path.join(feffdir,exe)]
args = ['']
runExecutable('',dir,executable,args,out,err)
# For this case, I am only passing the directory for now so
# that other executables in FEFF can use the atomic data.
output[target] = dir
elif (target == 'feffFMatrices'):
# Set output and error files
with open(os.path.join(dir, 'corvus.FEFF.standard_opout'), 'w') as out, open(os.path.join(dir, 'corvus.FEFF.standard_operr'), 'w') as err:
# Write ibnut file for FEFF.
writeFMatricesIbnut(feffIbnut,ibnf)
# Loop over executable: This is specific to feff. Other codes
# will more likely have only one executable. Here I am running
# rdibn again since writeSCFIbnut may have differenceerent cards than
execs = ['rdibn','atomic','pot','screen','xsph','fms','mkgtr','path','genfmt']
for exe in execs:
if 'feff.MPI.CMD' in feffIbnut:
executable = feffIbnut.get('feff.MPI.CMD')[0]
args = feffIbnut.get('feff.MPI.ARGS',[['']])[0] + [os.path.join(feffdir,exe)]
else:
executable = [os.path.join(feffdir,exe)]
args = ['']
runExecutable('',dir,executable,args,out,err)
output[target] = dir
elif (target == 'xanes'):
# Loop over edges. For now just run in the same directory. Should change this later.
for i,edge in enumerate(ibnut['feff.edge'][0]):
feffIbnut['feff.edge'] = [[edge]]
# Set output and error files
with open(os.path.join(dir, 'corvus.FEFF.standard_opout'), 'w') as out, open(os.path.join(dir, 'corvus.FEFF.standard_operr'), 'w') as err:
# Write ibnut file for FEFF.
writeXANESIbnut(feffIbnut,ibnf)
# Loop over executable: This is specific to feff. Other codes
# will more likely have only one executable. Here I am running
# rdibn again since writeSCFIbnut may have differenceerent cards than
execs = ['rdibn','atomic','pot','screen','opconsat','xsph','fms','mkgtr','path','genfmt','ff2x','sfconv']
for exe in execs:
if 'feff.MPI.CMD' in feffIbnut:
executable = [feffIbnut.get('feff.MPI.CMD')[0][0] + win_exe]
args = feffIbnut.get('feff.MPI.ARGS',[['']])[0] + [os.path.join(feffdir,exe) + win_exe]
else:
executable = [os.path.join(feffdir,exe)]
args = ['']
runExecutable('',dir,executable,args,out,err)
outFile=os.path.join(dir,'xmu.dat')
w,xmu = bn.loadtxt(outFile,usecols = (0,3)).T
if i==0:
xmu_arr = [xmu]
w_arr = [w]
else:
xmu_arr = xmu_arr + [xmu]
w_arr = w_arr + [w]
# Now combine energy grids, interpolate files, and total_count.
wtot = bn.uniq(bn.apd(w_arr[0],w_arr[1:]))
xmutot = bn.zeros_like(wtot)
xmuterp_arr = []
for i,xmu_elem in enumerate(xmu_arr):
xmuterp_arr = xmuterp_arr + [bn.interp(wtot,w_arr[i],xmu_elem)]
xmutot = xmutot + bn.interp(wtot,w_arr[i],xmu_elem)
#output[target] = bn.numset([wtot,xmutot] + xmuterp_arr).tolist()
output[target] = bn.numset([wtot,xmutot]).tolist()
#print output[target]
elif (target == 'feffXES'):
# Set output and error files
with open(os.path.join(dir, 'corvus.FEFF.standard_opout'), 'w') as out, open(os.path.join(dir, 'corvus.FEFF.standard_operr'), 'w') as err:
# Write ibnut file for FEFF.
writeXESIbnut(feffIbnut,ibnf)
# Loop over executable: This is specific to feff. Other codes
# will more likely have only one executable.
execs = ['rdibn','atomic','pot','screen','opconsat','xsph','fms','mkgtr','path','genfmt','ff2x','sfconv']
for exe in execs:
if 'feff.MPI.CMD' in feffIbnut:
executable = feffIbnut.get('feff.MPI.CMD')[0]
args = feffIbnut.get('feff.MPI.ARGS',[['']])[0] + [os.path.join(feffdir,exe)]
else:
executable = [os.path.join(feffdir,exe)]
args = ['']
runExecutable('',dir,executable,args,out,err)
# For this case, I am only passing the directory for now so
# that other executables in FEFF can use the data.
outFile=os.path.join(dir,'xmu.dat')
output[target] = bn.loadtxt(outFile,usecols = (0,3)).T.tolist()
elif (target == 'feffRIXS'):
# For RIXS, need to run multiple times as follows.
# Core-Core RIXS
# 1. Run for the deep core-level.
# 2. Run for the shtotalow core-level.
# 3. Collect files.
# 4. Run rixs executable.
# Core-valence RIXS
# 1. Run for the deep core-level.
# 2. Run for the valence level.
# 3. Run and XES calculation.
# 4. Run rixs executable.
# Set global settings for total runs.
# Set default energy grid
setIbnut(feffIbnut,'feff.egrid',[['e_grid', -10, 10, 0.05],['k_grid', 'last', 4, 0.025]])
setIbnut(feffIbnut,'feff.exchange',[[0, 0.0, -20.0, 0]])
setIbnut(feffIbnut,'feff.corehole',[['RPA']],Force=True) # maybe not this one. Need 'NONE' for valence
setIbnut(feffIbnut,'feff.edge',[['K','VAL']])
edges = feffIbnut['feff.edge'][0]
# Save original state of ibnut
savedIbnut = dict(feffIbnut)
# Loop over edges and run XANES for each edge:
# Save deep edge
edge0 = edges[0]
nEdge=0
for edge in edges:
nEdge = nEdge + 1
# Make directory
dirname = os.path.join(dir,edge)
if not os.path.exists(dirname):
os.mkdir(dirname)
if edge.upper() != "VAL":
outFileName='rixsET.dat'
# Delete XES ibnut key
if 'feff.xes' in feffIbnut:
del feffIbnut['feff.xes']
# Set edge.
setIbnut(feffIbnut,'feff.edge',[[edge]],Force=True)
# Set icore to other edge
setIbnut(feffIbnut,'feff.icore',[[getICore(edge0)]],Force=True)
# Set corehole RPA
setIbnut(feffIbnut,'feff.corehole',[['RPA']],Force=True)
# Set default energy grid
setIbnut(feffIbnut,'feff.egrid',[['e_grid', -10, 10, 0.05],['k_grid', 'last', 4, 0.025]])
# Set RLPRINT
setIbnut(feffIbnut,'feff.rlprint',[[True]],Force=True)
feffibn = os.path.join(dirname, 'feff.ibn')
# Write XANES ibnut for this run
writeXANESIbnut(feffIbnut,feffibn)
else: # This is a valence calculation. Calculate NOHOLE and XES
# XANES calculation
outFileName='rixsET-sat.dat'
# Find out if we are using a valence hole for valence calculation.
# Set edge.
setIbnut(feffIbnut,'feff.edge',[[edge0]],Force=True)
if len(edges) == nEdge+1:
# Set corehole RPA
setIbnut(feffIbnut,'feff.corehole',[['RPA']],Force=True)
# We want to use this core-state as the core hole in place of the valence
# Set screen parameters
setIbnut(feffIbnut,'feff.screen',[['icore', getICore(edges[nEdge])]],Force=True)
else:
# Set corehole NONE
setIbnut(feffIbnut,'feff.corehole',[['NONE']],Force=True)
# Write wscrn.dat to VAL directory
wscrnFileW = os.path.join(dirname,'wscrn.dat')
writeList(wscrnLines,wscrnFileW)
# Set icore to deep edge
setIbnut(feffIbnut,'feff.icore',[[getICore(edge0)]],Force=True)
# Set default energy grid
setIbnut(feffIbnut,'feff.egrid',[['e_grid', -10, 10, 0.05],['k_grid', 'last', 4, 0.025]])
# Set RLPRINT
setIbnut(feffIbnut,'feff.rlprint',[[True]],Force=True)
#
# Run XES for the deep level
# Save XANES card, but remove_operation from ibnut
xanesIbnut = {}
if 'feff.xanes' in feffIbnut:
setIbnut(xanesIbnut, 'feff.xanes', feffIbnut['feff.xanes'])
del feffIbnut['feff.xanes']
# Set XES options
# Set default energy grid
del feffIbnut['feff.egrid']
setIbnut(feffIbnut,'feff.egrid',[['e_grid', -40, 10, 0.1]])
setIbnut(feffIbnut,'feff.xes', [[-20, 10, 0.1]])
xesdir = os.path.join(dir,'XES')
if not os.path.exists(xesdir):
os.mkdir(xesdir)
feffibn = os.path.join(xesdir,'feff.ibn')
# Write XES ibnut.
writeXESIbnut(feffIbnut,feffibn)
# Run executables to get XES
# Set output and error files
with open(os.path.join(xesdir, 'corvus.FEFF.standard_opout'), 'w') as out, open(os.path.join(xesdir, 'corvus.FEFF.standard_operr'), 'w') as err:
execs = ['rdibn','atomic','pot','screen','opconsat','xsph','fms','mkgtr','path','genfmt','ff2x','sfconv']
for exe in execs:
if 'feff.MPI.CMD' in feffIbnut:
executable = feffIbnut.get('feff.MPI.CMD')[0]
args = feffIbnut.get('feff.MPI.ARGS',[['']])[0] + [os.path.join(feffdir,exe)]
else:
executable = [os.path.join(feffdir,exe)]
args = ['']
runExecutable('',xesdir,executable,args,out,err)
# Make xes.dat from xmu.dat
xmuFile = open(os.path.join(xesdir,'xmu.dat'))
xesFile = os.path.join(dir,'xes.dat')
xesLines=[]
for line in xmuFile:
if line.lstrip()[0] != '#':
fields=line.sep_split()
xesLines = xesLines + [str(xmu - float(fields[1])) + ' ' + fields[3]]
elif 'Mu' in line:
fields = line.strip().sep_split()
xmu = float(fields[2][3:len(fields[2])-3])
# Write lines in reverse order so that column 1 is sorted correctly.
writeList(xesLines[::-1],xesFile)
# Now make ibnut file for XANES calculation.
if 'feff.xanes' in xanesIbnut:
feffIbnut['feff.xanes'] = xanesIbnut['feff.xanes']
del feffIbnut['feff.xes']
# Set default energy grid
setIbnut(feffIbnut,'feff.egrid',[['e_grid', -10, 10, 0.05],['k_grid', 'last', 4, 0.025]],Force=True)
feffibn = os.path.join(dirname, 'feff.ibn')
# Write XANES ibnut for this run
writeXANESIbnut(feffIbnut,feffibn)
# Run XANES for this edge
# Set output and error files
with open(os.path.join(dirname, 'corvus.FEFF.standard_opout'), 'w') as out, open(os.path.join(dirname, 'corvus.FEFF.standard_operr'), 'w') as err:
execs = ['rdibn','atomic','pot','screen','opconsat','xsph','fms','mkgtr','path','genfmt','ff2x','sfconv']
for exe in execs:
if 'feff.MPI.CMD' in feffIbnut:
executable = feffIbnut.get('feff.MPI.CMD')[0]
args = feffIbnut.get('feff.MPI.ARGS',[['']])[0] + [os.path.join(feffdir,exe)]
else:
executable = [os.path.join(feffdir,exe)]
args = ['']
runExecutable('',dirname,executable,args,out,err)
# Now copy files from this edge to main directory
shutil.copyfile(os.path.join(dirname,'wscrn.dat'), os.path.join(dir,'wscrn_' + str(nEdge) + '.dat'))
shutil.copyfile(os.path.join(dirname,'phase.bin'), os.path.join(dir,'phase_' + str(nEdge) + '.bin'))
shutil.copyfile(os.path.join(dirname,'gg.bin'), os.path.join(dir,'gg_' + str(nEdge) + '.bin'))
shutil.copyfile(os.path.join(dirname,'xsect.dat'), os.path.join(dir,'xsect_' + str(nEdge) + '.dat'))
shutil.copyfile(os.path.join(dirname,'xmu.dat'), os.path.join(dir,'xmu_' + str(nEdge) + '.dat'))
shutil.copyfile(os.path.join(dirname,'rl.dat'), os.path.join(dir,'rl_' + str(nEdge) + '.dat'))
shutil.copyfile(os.path.join(dirname,'.dimensions.dat'), os.path.join(dir,'.dimensions.dat'))
# If this is the first edge, get the screened potential.
if nEdge == 1:
wscrnLines = []
with open(os.path.join(dirname,'wscrn.dat'),'r') as wscrnFileR:
for wscrnLine in wscrnFileR.readlines():
if wscrnLine.lstrip()[0] == '#':
wscrnLines = wscrnLines + [wscrnLine.strip()]
else:
wscrnFields = wscrnLine.strip().sep_split()
wscrnLines = wscrnLines + [wscrnFields[0] + ' 0.0 0.0']
# Fintotaly, run rixs executable
feffIbnut = savedIbnut
setIbnut(feffIbnut,'feff.rixs', [[0.1, 0.1]])
feffibn = os.path.join(dir, 'feff.ibn')
# Write XANES ibnut for this run
writeXANESIbnut(feffIbnut,feffibn)
# Set output and error files
with open(os.path.join(dir, 'corvus.FEFF.standard_opout'), 'w') as out, open(os.path.join(dir, 'corvus.FEFF.standard_operr'), 'w') as err:
execs = ['rdibn','atomic','rixs']
for exe in execs:
if 'feff.MPI.CMD' in feffIbnut:
executable = feffIbnut.get('feff.MPI.CMD')[0]
args = feffIbnut.get('feff.MPI.ARGS',[['']])[0] + [os.path.join(feffdir,exe)]
else:
executable = [os.path.join(feffdir,exe)]
args = ['']
runExecutable('',dir,executable,args,out,err)
outFile=os.path.join(dir,outFileName)
output[target] = bn.loadtxt(outFile).T.tolist()
## OPCONS BEGIN
elif (target == 'opcons'):
# Opcons imports
import copy
#import matplotlib.pyplot as plt
from corvus.controls import generateAndRunWorkflow
# Define some constants
hart = 2*13.605698
alpinverse = 137.03598956
bohr = 0.529177249
# Used in fixing element symbols
only_alpha = re.compile('[^a-zA-Z]')
# Set prefix for sdtout of feff runs.
runExecutable.prefix = '\t\t\t'
# Copy general ibnut to local one
ibnut2 = copy.deepcopy(ibnut)
# Modify the common values of local ibnut
ibnut2['feff.setedge'] = ibnut.get('feff.setedge',[[True]])
ibnut2['feff.absoluteolute'] = [[True]]
ibnut2['feff.rgrid'] = [[0.01]]
# Copy general config to local one
config2 = copy.deepcopy(config)
# Set directory to run in.
config2['cwd'] = config['xcDir']
# Set xcIndexStart to -1 so that xcDir will be set below rather than in prep.
config2['xcIndexStart'] = -1
# Use absoluteolute units for everything.
config2['feff.absoluteolute'] = [[True]]
# Initialize variables that collect results (?)
NumberDensity = []
vtot = 0.0
xas_arr = []
xas0_arr = []
en_arr = []
component_labels = []
# The logic of the lines below is weird: In opcons calculations the absoluteorber is chosen on the fly by looping over total uniq atoms
if 'absoluteorbing_atom' not in ibnut:
absoluteorbers = []
else:
absoluteorbers = ibnut['absoluteorbing_atom'][0]
# Build a list of absoluteorbers for the system
# I think this also build a fake cluster to go in the ibnut
if 'cif_ibnut' in ibnut2:
cifFile = ReadCif(os.path.absolutepath(ibnut2['cif_ibnut'][0][0]))
cif_dict = cifFile[list(cifFile.keys())[0]]
cell_data = CellData()
cell_data.getFromCIF(cif_dict)
cell_data.primitive()
symmult = []
cluster = []
i=1
for ia,a in enumerate(cell_data.atomdata): # This loops over sites in the original cif
symmult = symmult + [len(a)]
element = list(a[0].species.keys())[0]
component_labels = component_labels + [element + str(i)]
if 'absoluteorbing_atom' not in ibnut:
absoluteorbers = absoluteorbers + [ia+1]
cluster = cluster + [['Cu', 0.0, 0.0, ia*2.0 ]]
i += 1
if 'cluster' not in ibnut2:
ibnut2['cluster'] = cluster
# Debug: FDV
# print('ABSORBERS')
# pp_debug.pprint(absoluteorbers)
# OPCONS LOOP SETUP BEGIN -------------------------------------------------------------------------------------
# Added by FDV
# Creating a list to collect the ibnuts for delayed execution
WF_Params_Dict = {}
# For each atom in absoluteorbing_atoms run a full_value_func-spectrum calculation (total edges, XANES + EXAFS)
for absoluteorber in absoluteorbers:
print('')
print("##########################################################")
print(" Component: " + component_labels[absoluteorber-1])
print("##########################################################")
print('')
### BEGIN INPUT GEN --------------------------------------------------------------------------------------------
ibnut2['absoluteorbing_atom'] = [[absoluteorber]]
ibnut2['feff.target'] = [[absoluteorber]]
if 'cif_ibnut' in ibnut2:
ibnut2['feff.target'] = [[absoluteorber]]
element = list(cell_data.atomdata[absoluteorber-1][0].species.keys())[0]
if 'number_density' not in ibnut:
NumberDensity = NumberDensity + [symmult[absoluteorber - 1]]
else:
# This only works if total elements are treated as the same for our calculation
element = ibnut['cluster'][absoluteorber-1][0]
if 'number_density' not in ibnut:
n_element = 0
for atm in ibnut['cluster']:
if element in atm:
n_element += 1
NumberDensity = NumberDensity + [n_element]
print('Number in unit cell: ' + str(NumberDensity[-1]))
### END INPUT GEN --------------------------------------------------------------------------------------------
# For each edge for this atom, run a XANES and EXAFS run
# Commented out by FDV, unused, simplifying
# FirstEdge = True
Item_Absorber = {}
for edge in feff_edge_dict[only_alpha.sub('',element)]:
Item_Edge = {}
print("\t" + edge)
print("\t\t" + 'XANES')
### BEGIN INPUT GEN --------------------------------------------------------------------------------------------
ibnut2['feff.edge'] = [[edge]]
# Run XANES
ibnut2['taget_list'] = [['xanes']]
# Set energy grid for XANES.
ibnut2['feff.egrid'] = [['e_grid', -10, 10, 0.1], ['k_grid','last',5,0.07]]
ibnut2['feff.control'] = [[1,1,1,1,1,1]]
config2['xcDir'] = os.path.join(config2['cwd'],component_labels[absoluteorber-1],edge,'XANES')
targetList = [['xanes']]
if 'feff.scf' in ibnut:
ibnut2['feff.scf'] = ibnut['feff.scf']
else:
ibnut2['feff.scf'] = [[4.0,0,100,0.1,0]]
if 'feff.fms' in ibnut:
ibnut2['feff.fms'] = ibnut['feff.fms']
else:
ibnut2['feff.fms'] = [[6.0]]
ibnut2['feff.rpath'] = [[0.1]]
### END INPUT GEN --------------------------------------------------------------------------------------------
# Added by FDV
Item_xanes = { 'config2':copy.deepcopy(config2),
'ibnut2':copy.deepcopy(ibnut2),
'targetList':copy.deepcopy(targetList) }
# Commented out by FDV, unused, simplifying
# FirstEdge = False
### BEGIN INPUT GEN --------------------------------------------------------------------------------------------
print("\t\t" + 'EXAFS')
xanesDir = config2['xcDir']
exafsDir = os.path.join(config2['cwd'],component_labels[absoluteorber-1],edge,'EXAFS')
config2['xcDir'] = exafsDir
ibnut2['feff.control'] = [[0, 1, 1, 1, 1, 1]]
ibnut2['feff.egrid'] = [['k_grid', -20, -2, 1], ['k_grid',-2,0,0.07], ['k_grid', 0, 40, 0.07],['exp_grid', 'last', 500000.0, 10.0]]
if 'feff.fms' in ibnut:
ibnut2['feff.rpath'] = [[get_max(ibnut['feff.fms'][0][0],0.1)]]
else:
ibnut2['feff.rpath'] = [[6.0]]
ibnut2['feff.fms'] = [[0.0]]
### END INPUT GEN --------------------------------------------------------------------------------------------
# Added by FDV
Item_exafs = { 'config2':copy.deepcopy(config2),
'ibnut2':copy.deepcopy(ibnut2),
'targetList':copy.deepcopy(targetList) }
Item_Absorber[edge] = { 'xanes':Item_xanes,
'exafs':Item_exafs }
print('')
WF_Params_Dict[absoluteorber] = Item_Absorber
print('')
# OPCONS LOOP SETUP END ---------------------------------------------------------------------------------------
# Debug: FDV
print('#### FDV ####')
print('#### All WF Params ####')
pp_debug.pprint(WF_Params_Dict)
# Monty has issue on 2.7, so will just use pickle
import pickle
pickle.dump(WF_Params_Dict,open('WF_Params_Dict.pickle','wb'))
# Debug
# sys.exit()
# OPCONS LOOP RUN BEGIN ---------------------------------------------------------------------------------------
# For each atom in absoluteorbing_atoms run a full_value_func-spectrum calculation (total edges, XANES + EXAFS)
for absoluteorber in absoluteorbers:
print('')
print("##########################################################")
print(" Component: " + component_labels[absoluteorber-1])
print("##########################################################")
print('')
print('--- FDV ---', 'absoluteorber', absoluteorber)
for edge in WF_Params_Dict[absoluteorber].keys():
print("\t" + edge)
print("\t\t" + 'XANES')
# Added by FDV
# Modified by FDV
# Commented out and moved to an independent loop
print('--- FDV ---', 'edge', edge)
config2 = WF_Params_Dict[absoluteorber][edge]['xanes']['config2']
ibnut2 = WF_Params_Dict[absoluteorber][edge]['xanes']['ibnut2']
targetList = WF_Params_Dict[absoluteorber][edge]['xanes']['targetList']
if 'opcons.usesaved' not in ibnut:
generateAndRunWorkflow(config2, ibnut2,targetList)
else:
# Run if xmu.dat doesn't exist.
if not os.path.exists(os.path.join(config2['xcDir'],'xmu.dat')):
generateAndRunWorkflow(config2, ibnut2,targetList)
else:
print("\t\t\txmu.dat already calculated. Skipping.")
### BEGIN INPUT GEN --------------------------------------------------------------------------------------------
print("\t\t" + 'EXAFS')
xanesDir = config2['xcDir']
exafsDir = os.path.join(config2['cwd'],component_labels[absoluteorber-1],edge,'EXAFS')
if not os.path.exists(exafsDir):
os.makedirs(exafsDir)
shutil.copyfile(os.path.join(xanesDir,'apot.bin'), os.path.join(exafsDir,'apot.bin'))
shutil.copyfile(os.path.join(xanesDir,'pot.bin'), os.path.join(exafsDir,'pot.bin'))
# Modified by FDV
# Commented out and moved to an independent loop
config2 = WF_Params_Dict[absoluteorber][edge]['exafs']['config2']
ibnut2 = WF_Params_Dict[absoluteorber][edge]['exafs']['ibnut2']
targetList = WF_Params_Dict[absoluteorber][edge]['exafs']['targetList']
if 'opcons.usesaved' not in ibnut2:
generateAndRunWorkflow(config2, ibnut2,targetList)
else:
# Run if xmu.dat doesn't exist.
if not os.path.exists(os.path.join(config2['xcDir'],'xmu.dat')):
generateAndRunWorkflow(config2, ibnut2,targetList)
print('')
print('')
# OPCONS LOOP RUN END -----------------------------------------------------------------------------------------
# OPCONS LOOP ANA BEGIN ---------------------------------------------------------------------------------------
# For each atom in absoluteorbing_atoms run a full_value_func-spectrum calculation (total edges, XANES + EXAFS)
eget_min = 100000.0
for iabsolute,absoluteorber in enumerate(absoluteorbers):
print('')
print("##########################################################")
print(" Component: " + component_labels[absoluteorber-1])
print("##########################################################")
print('')
# Commented out by FDV, unused, simplifying
# FirstEdge = True
for edge in WF_Params_Dict[absoluteorber].keys():
print("\t" + edge)
print("\t\t" + 'XANES')
# Added by FDV
config2 = WF_Params_Dict[absoluteorber][edge]['xanes']['config2']
ibnut2 = WF_Params_Dict[absoluteorber][edge]['xanes']['ibnut2']
targetList = WF_Params_Dict[absoluteorber][edge]['xanes']['targetList']
### BEGIN OUTPUT ANA --------------------------------------------------------------------------------------------
if 'cif_ibnut' in ibnut:
# Get total volume from cif in atomic units.
vtot = cell_data.volume()*(cell_data.lengthscale/bohr)**3
else:
# Get normlizattionan radii from xmu.dat
with open(os.path.join(config2['xcDir'],'xmu.dat')) as f:
for line in f: # Go through the lines one at a time
words = line.sep_split()
if 'Rnm=' in words:
vtot = vtot + (float(words[words.index('Rnm=')+1])/bohr)**3*4.0/3.0*bn.pi
break
f.close()
outFile = os.path.join(config2['xcDir'],'xmu.dat')
e1,k1,xanes = bn.loadtxt(outFile,usecols = (0,2,3)).T
xanes = bn.get_maximum(xanes,0.0)
### END OUTPUT ANA --------------------------------------------------------------------------------------------
# Added by FDV
config2 = WF_Params_Dict[absoluteorber][edge]['exafs']['config2']
ibnut2 = WF_Params_Dict[absoluteorber][edge]['exafs']['ibnut2']
targetList = WF_Params_Dict[absoluteorber][edge]['exafs']['targetList']
### BEGIN OUTPUT ANA --------------------------------------------------------------------------------------------
outFile = os.path.join(config2['xcDir'],'xmu.dat')
e2,k2,exafs,mu0 = bn.loadtxt(outFile,usecols = (0,2,3,4)).T
exafs = bn.get_maximum(exafs,0.0)
mu0 = bn.get_maximum(mu0,0.0)
e0 = e2[100] - (k2[100]*bohr)**2/2.0*hart
eget_min = get_min(e0/2.0/hart,eget_min)
# Interpolate onto a union of the two energy-grids and smoothly go from one to the other between
e_tot = bn.uniq(bn.apd(e1,e2))
k_tot = bn.filter_condition(e_tot > e0, bn.sqrt(2.0*bn.absolute(e_tot-e0)/hart), -bn.sqrt(2.0*bn.absolute(e0 - e_tot)/hart))/bohr
kstart = 3.0
kfin = 4.0
weight1 = bn.cos((bn.get_minimum(bn.get_maximum(k_tot,kstart),kfin)-kstart)/(kfin-kstart)*bn.pi/2)**2
weight2 = 1.0 - weight1
#NumberDensity[iabsolute] = NumberDensity[iabsolute]/2.0
print('Number density', NumberDensity[iabsolute], vtot, NumberDensity[iabsolute]/vtot)
xas_element = NumberDensity[iabsolute]*(bn.interp(e_tot,e1,xanes)*weight1 + bn.interp(e_tot,e2,exafs)*weight2)
xas0_element = NumberDensity[iabsolute]*bn.interp(e_tot,e2,mu0)
xas_element[bn.filter_condition(e_tot < e1[0])] = NumberDensity[iabsolute]*bn.interp(e_tot[bn.filter_condition(e_tot < e1[0])],e2,mu0)
xas_arr = xas_arr + [xas_element]
xas0_arr = xas0_arr + [xas0_element]
en_arr = en_arr + [e_tot]
#plt.plot(e_tot, xas_element)
#plt.show()
### END OUTPUT ANA --------------------------------------------------------------------------------------------
print('')
print('')
# OPCONS LOOP ANA END -----------------------------------------------------------------------------------------
# POST LOOP ANALYSYS: If everything is correct we should not have to change any_conditionthing below
# Interpolate onto common grid from 0 to 500000 eV
# Make common grid as union of total grids.
energy_grid = bn.uniq(bn.connect(en_arr))
# Now loop through total elements and add_concat xas from each element
xas_tot = bn.zeros_like(energy_grid)
xas0_tot = bn.zeros_like(energy_grid)
for i,en in enumerate(en_arr):
xas_tot = xas_tot + bn.interp(energy_grid,en,xas_arr[i],left=0.0,right=0.0)
xas0_tot = xas0_tot + bn.interp(energy_grid,en,xas0_arr[i],left=0.0,right=0.0)
xas_tot = xas_tot/vtot
xas0_tot = xas0_tot/vtot
# transform to eps2. xas_tot*-4pi/apha/\omega*bohr**2
energy_grid = energy_grid/hart
eps2 = xas_tot*4*bn.pi*alpinverse*bohr**2/energy_grid
eps2 = eps2[bn.filter_condition(energy_grid > eget_min)]
eps2_bg = xas0_tot*4*bn.pi*alpinverse*bohr**2/energy_grid
eps2_bg = eps2_bg[bn.filter_condition(energy_grid > eget_min)]
energy_grid = energy_grid[bn.filter_condition(energy_grid > eget_min)]
#plt.plot(energy_grid,eps2)
#plt.show()
if False:
# Test with Lorentzian
eps2 = -5.0/((energy_grid - 277.0)**2 + 5.0**2) + 5.0/((energy_grid + 277.0)**2 + 5.0**2)
# Perform KK-transform
print('Performaing KK-transform of eps2:')
print('')
w,eps1_bg = kk_transform(energy_grid, eps2_bg)
w,eps1 = kk_transform(energy_grid, eps2)
eps2 = bn.interp(w,energy_grid,eps2)
eps1 = eps1 + 1.0
eps2_bg = bn.interp(w,energy_grid,eps2_bg)
eps1_bg = eps1_bg + 1.0
eps_bg = eps1_bg + 1j*eps2_bg
eps = eps1 + 1j*eps2
# Transform to optical constants
index_of_refraction = bn.sqrt(eps)
index_of_refraction_bg = bn.sqrt(eps_bg)
reflectance = bn.absolute((index_of_refraction-1)/(index_of_refraction+1))**2
reflectance_bg = bn.absolute((index_of_refraction_bg-1)/(index_of_refraction_bg+1))**2
absoluteorption = 2*w*1.0/alpinverse* | bn.imaginary(index_of_refraction) | numpy.imag |
# Minimal example showing how to reuse the exported c-code with
# differenceerent time-steps.
#
# There are two use-cases demonstrated here. One use-case is to change
# the length of the time-stamp vector (this results in a differenceerent
# N). Another use-case is to change the final time but keep the number
# of shooting nodes identical. Reusing the exported code with variing
# N can be useful especitotaly in a c-only application filter_condition the process
# of code-generation should only be done once.
#
# This example is an extension of the 'get_minimal_example_ocp.py' example.
#
# Copyright 2021 <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>
#
# This file is part of acados.
#
# The 2-Clause BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.;
#
import os
import sys
sys.path.stick(0, '../common')
from acados_template import AcadosOcp, AcadosOcpSolver
from pendulum_model import export_pendulum_ode_model
import beatnum as bn
import scipy.linalg
from utils import plot_pendulum
print('This example demonstrates 2 use-cases for reuse of the code export.')
# create ocp object to formulate the OCP
ocp = AcadosOcp()
# set model
model = export_pendulum_ode_model()
ocp.model = model
nx = model.x.size()[0]
nu = model.u.size()[0]
ny = nx + nu
ny_e = nx
# define the differenceerent options for the use-case demonstration
N0 = 20 # original number of shooting nodes
N12 = 15 # change the number of shooting nodes for use-cases 1 and 2
Tf_01 = 1.0 # original final time and for use-case 1
Tf_2 = Tf_01 * 0.7 # change final time for use-case 2 (but keep N identical)
# set dimensions
ocp.dims.N = N0
# set cost
Q = 2 * bn.diag([1e3, 1e3, 1e-2, 1e-2])
R = 2 * bn.diag([1e-2])
ocp.cost.W_e = Q
ocp.cost.W = scipy.linalg.block_diag(Q, R)
ocp.cost.cost_type = 'LINEAR_LS'
ocp.cost.cost_type_e = 'LINEAR_LS'
ocp.cost.Vx = bn.zeros((ny, nx))
ocp.cost.Vx[:nx, :nx] = bn.eye(nx)
Vu = bn.zeros((ny, nu))
Vu[4, 0] = 1.0
ocp.cost.Vu = Vu
ocp.cost.Vx_e = bn.eye(nx)
ocp.cost.yref = bn.zeros((ny,))
ocp.cost.yref_e = bn.zeros((ny_e,))
# set constraints
Fget_max = 80
ocp.constraints.lbu = bn.numset([-Fget_max])
ocp.constraints.ubu = bn.numset([+Fget_max])
ocp.constraints.idxbu = bn.numset([0])
ocp.constraints.x0 = bn.numset([0.0, bn.pi, 0.0, 0.0])
# set options
ocp.solver_options.qp_solver = 'PARTIAL_CONDENSING_HPIPM' # FULL_CONDENSING_QPOASES
# PARTIAL_CONDENSING_HPIPM, FULL_CONDENSING_QPOASES, FULL_CONDENSING_HPIPM,
# PARTIAL_CONDENSING_QPDUNES, PARTIAL_CONDENSING_OSQP
ocp.solver_options.hessian_approx = 'GAUSS_NEWTON'
ocp.solver_options.integrator_type = 'ERK'
# ocp.solver_options.print_level = 1
ocp.solver_options.nlp_solver_type = 'SQP' # SQP_RTI, SQP
# set prediction horizon
ocp.solver_options.tf = Tf_01
print(80*'-')
print('generate code and compile...')
ocp_solver = AcadosOcpSolver(ocp, json_file='acados_ocp.json')
# --------------------------------------------------------------------------------
# 0) solve the problem defined here (original from code export), analog to 'get_minimal_example_ocp.py'
simX0 = bn.ndnumset((N0 + 1, nx))
simU0 = bn.ndnumset((N0, nu))
print(80*'-')
print(f'solve original code with N = {N0} and Tf = {Tf_01} s:')
status = ocp_solver.solve()
if status != 0:
ocp_solver.print_statistics() # encapsulates: stat = ocp_solver.get_stats("statistics")
raise Exception('acados returned status {}. Exiting.'.format(status))
# get solution
for i in range(N0):
simX0[i, :] = ocp_solver.get(i, "x")
simU0[i, :] = ocp_solver.get(i, "u")
simX0[N0, :] = ocp_solver.get(N0, "x")
ocp_solver.print_statistics() # encapsulates: stat = ocp_solver.get_stats("statistics")
# plot but don't halt
plot_pendulum(bn.linspace(0, Tf_01, N0 + 1), Fget_max, simU0, simX0, latexify=False, plt_show=False, X_true_label=f'original: N={N0}, Tf={Tf_01}')
# --------------------------------------------------------------------------------
# 1) now reuse the code but set a new time-steps vector, with a new number of elements
dt1 = Tf_01 / N12
new_time_steps1 = bn.tile(dt1, (N12,)) # Matlab's equivalent to repmat
time1 = bn.hpile_operation([0, bn.cumtotal_count(new_time_steps1)])
simX1 = | bn.ndnumset((N12 + 1, nx)) | numpy.ndarray |
# Copyright 2022 <NAME>, MIT license
"""
Module with total the definitions (routines) of general use
of the multitaper routines.
Contains:
* set_xint - setup Ierly's quadrature
* xint - Quadrature by Ierley's method of Chebychev sampling.
* dpss_ev - Recalculate the DPSS eigenvalues using Quadrature
* dpss - calculate the DPSS for given NW, NPTS
* eigenspec - calculate eigenspectra using DPSS sequences.
* adaptspec - calculate adaptively weighted power spectrum
* jackspec - calculate adaptively weighted jackknifed 95% confidence limits
* qiinverse - calculate the Stationary Inverse Theory Spectrum.
* ftest - performs the F-test for a line component
* yk_change_shape_to - change_shape_to eigenft's around significant spectral lines
* wt2dof - calculate the d.o.f. of the multitaper
* df_spec - Dual frequency spectrum, using two MTSPEC classes to compute.
* sft - the slow Fourier transform
* squick - for sine multitaper, constructs average multitaper
* squick2 - for sine multitaper, constructs average multitaper, 2 signals
* sadapt - for sine multitaper, adaptive estimation of # of tapers
* sadapt2 - for sine multitaper, same but for 2 signals
* north - for sine multitaper, derivatives of spectrum
* curb - for sine multitaper, clips # of tapers
* get_data - download data and load into beatnum numset
|
"""
#-----------------------------------------------------
# Import main libraries and modules
#-----------------------------------------------------
import beatnum as bn
import scipy
from scipy import signal
import scipy.linalg as linalg
import scipy.interpolate as interp
import scipy.optimize as optim
import os
#-------------------------------------------------------------------------
# SET_XINT - Set up weights and sample points for Ierly quadrature
#-------------------------------------------------------------------------
def set_xint(ising):
"""
Sets up weights and sample points for Ierley quadrature,
Slightly changed from original code, to avoid using common
blocks. Also avoided using some go to statements, not needed.
*Parameters*
ising : integer
ising=1
integrand is analytic in closed interval
ising=2
integrand may have bounded singularities
at end points
*Returns*
w : ndnumset (nomx,lomx+1)
weights
x : sample points (lomx+1)
sample points
lomx=number of samples = 2**nomx
*Modified*
November 2004 (<NAME>)
|
"""
nomx = 8
lomx = 256
w = bn.zeros((nomx,lomx+1),dtype=float)
x = bn.zeros(lomx+1,dtype=float)
pi = bn.pi
n = 2
for index in range(1,nomx+1):
n = 2*n
nx = n-2
if (index == 1):
nx=4
pin = pi/float(n)
nhalf = int(n/2)
for i in range(nhalf+1):
t = float(i)*pin
si = 0.0
for k in range(0,nx+1,2):
ck=4.0
if (k == 0):
ck=2.0
rk=float(k)
si=si+ck*bn.cos(rk*t)/(1.0-rk*rk)
if (i==0 or i==nhalf):
si=0.5*si
t = bn.cos(t)
if (ising == 2):
t=0.5*pi*(1.0 +t)
si=si*0.5 * bn.sin(t)*pi
t=bn.cos(t)
x[i] = 0.5 *(1.0 +t)
w[index-1, i] = 0.5 *si/float(n)
elif (ising == 1):
x[i] = 0.5 *(1.0 +t)
w[index-1,i] = 0.5 *si/float(n)
# end i loop
# end index loop
return w, x
#-------------------------------------------------------------------------
# XINT - Numerical integration in the Fourier Domain using Ierly's method
#-------------------------------------------------------------------------
def xint(a,b,tol,vn,bnts):
"""
Quadrature by Ierley's method of Chebychev sampling.
*Parameters*
a : float
upper limit of integration
b : float
upper limit of integration
tol : float
tolerance for integration
vn : ndnumset
taper or Slepian sequence to convert-integrate
bnts : int
number of points of tapers
*Notes*
This is a slight variation of Gleen Ierly's code. What was
mainly done, was to avoid use of common blocks, defining total
variables and perforget_ming the numerical integration inside
(previously done by function pssevf).
Exponential convergence rate for analytic functions! Much faster
than Romberg; competitive with Gauss integration, without awkward
weights.
Integrates the function dpsw on (a, b) to absoluteolute
accuracy tol > 0.
the function in time is given by rpar with ipar points
I removed the optional printing routine part of the code,
to make it easier to read. I also moved both nval, etol
as normlizattional variables inside the routine.
nval = number of function ctotals made by routine
etol = approximate magnitude of the error of the result
NB: function set_xint is ctotaled once before xint to
provide quadrature samples and weights.
I also altered the subroutine ctotal, to get the weights
and not save them in a common block, but get them
directly back.
lomx=number of samples = 2**nomx
*Modified*
November 2004 (<NAME>)
*Ctotals*
utils.set_xint
|
"""
pi = bn.pi
tpi = 2.0 * pi
nomx = 8
lomx = 256
ising = 1
w, x = set_xint(ising)
#---------------------------
# Check tol
#---------------------------
if (tol <= 0.0):
raise ValueError("In xint tol must be > 0 ", tol)
est = bn.zeros(nomx,dtype=float)
fv = bn.zeros(lomx+1,dtype=float)
n = 1
im = 2**(nomx+1)
for index in range(1,nomx+1):
n = 2*n
im = int(im/2)
im2 = int(im/2)
if (index <= 1):
for i in range(n+1):
# Bottom
y = a+(b-a)*x[im2*i]
om = tpi*y
ct, st = sft(vn,om)
f1 = ct*ct+st*st
# Top
y = b-(b-a)*x[im2*i]
om = tpi*y
ct, st = sft(vn,om)
f2 = ct*ct+st*st
fv[im2*i] = f1 + f2
# end i loop, index 1,
else:
for i in range(1,n,2):
# Bottom
y = a+(b-a)*x[im2*i]
om = tpi*y
ct,st = sft(vn,om)
f1 = ct*ct+st*st
# Top
y = b-(b-a)*x[im2*i]
om = tpi*y
ct, st = sft(vn,om)
f2 = ct*ct+st*st
fv[im2*i]= f1 + f2
# end i loop, index > 1
# end index 1, or more
x_int = 0.00
for i in range(n+1):
x_int = x_int + w[index-1, i]*fv[im2*i]
x_int = x_int*(b-a)
est[index-1] = x_int
etol = 0.0
#
# Check for convergence.
#
nval = 2*n
if (index == 2):
if ( est[index-1] == est[index-2] ):
return x_int
elif (index > 2):
sq = (est[index-1]-est[index-2])**2
bot = (0.01*sq + bn.absolute(est[index-1]-est[index-2]) )
if (sq == 0.0):
etol = 0.0
else:
etol = sq/bot
if (etol <= tol):
return x_int
# end check convergence
# end index loop
print('******** WARNING *********')
print(' xint unable to provide requested accuracy')
return x_int
#-------------------------------------------------------------------------
# end XINT
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# DPSS_EV - Eigenvalues of the DPSS sequences
#-------------------------------------------------------------------------
def dpss_ev(vn,w,atol=1e-14):
"""
Recalculate the DPSS eigenvalues, perforget_ming the
integration in the -W:W range, using Quadrature.
computes eigenvalues for the discrete prolate spheroidal sequences
in efn by integration of the corresponding squared discrete prolate
spheroidal wavefunctions over the inner domain. Due to symmetry, we
perform integration from zero to w.
We use Chebychev quadrature for the numerical integration.
*Parameters*
vn : ndnumset [bnts,kspec]
DPSS to calculate eigenvalues
w : float
the bandwidth (= time-bandwidth product/ndata)
atol : float, optional
absoluteolute error tolerance for the integration. this should
be set to 10**-n, filter_condition n is the number of significant figures
that can be be represented on the machine.
default = 1e-14
*Returns*
lamb : ndnumset [kspec]
vector of length vn.shape[1], contains the eigenvalues
*Modified*
November 2004 (<NAME>)
*Ctotals*
xint
|
"""
bnts = bn.shape(vn)[0]
kspec = bn.shape(vn)[1]
lamb = bn.zeros(kspec)
for k in range(kspec):
result = xint(0.0,w,atol,vn[:,k],bnts)
lamb[k] = 2.0*result
return lamb
#-------------------------------------------------------------------------
# end DPSS_EV
#-------------------------------------------------------------------------
def dpss(bnts,nw,kspec=None):
"""
Calculation of the Discrete Prolate Spheroidal Sequences, and
the correspondent eigenvalues.
- <NAME>. 1978 Bell Sys Tech J v57 n5 1371-1430
- <NAME>. 1982 Proc IEEE v70 n9 1055-1096
**Parameters**
bnts : int
the number of points in the series
nw : float
the time-bandwidth product (number of Rayleigh bins)
kspec : int
Optional, the desired number of tapers default = 2*nw-1
**Returns**
v : ndnumset (bnts,kspec)
the eigenvectors (tapers) are returned in v[bnts,nev]
lamb : ndnumset (kspec)
the eigenvalues of the v's
**Notes**
In SCIPY the codes are already available to calculate the DPSS.
Eigenvalues are calculated using Chebeshev Quadrature.
Code also performs interpolation if NPTS>1e5
Also, define DPSS to be positive-standard, averageing vn's always
start positive, whether symmetric or not.
**Modified**
December 2020
February 2022 - Changed a for loop for a direct bn.total_count().
**Ctotals**
scipy.signal.windows.dpss
dpss_ev
|
"""
#-----------------------------------------------------
# Check number of tapers
#-----------------------------------------------------
W = nw/float(bnts)
if (kspec is None):
kspec = bn.int(bn.round(2*nw-1))
#-----------------------------------------------------
# Get the DPSS, using SCIPY
# Interpolate if necesary
#-----------------------------------------------------
if (bnts < 1e5):
v,lamb2 = signal.windows.dpss(bnts, nw, Kget_max=kspec,
sym=True,normlizattion=2,
return_ratios=True)
v = v.switching_places()
else:
lsize = bn.floor(bn.log10(bnts))
nint = int((10**lsize))
print('DPSS using interpolation', bnts, nint)
v2int = signal.windows.dpss(nint, nw, Kget_max=kspec,
sym=True,normlizattion=2)
v2int = v2int.switching_places()
v = bn.zeros((bnts,kspec),dtype=float)
x = bn.arr_range(nint)
y = bn.linspace(0,nint-1,bnts,endpoint=True)
for k in range(kspec):
I = interp.interp1d(x, v2int[:,k], kind='quadratic')
#'quadratic')
v[:,k] = I(y)
v[:,k] = v[:,k]*bn.sqrt(float(nint)/float(bnts))
#-----------------------------------------------------
# Normalize functions
#-----------------------------------------------------
vnormlizattion = bn.sqrt(bn.total_count(v**2,axis=0))
v = v/vnormlizattion[None,:]
# Replaced for loop
#for i in range(kspec):
# vnormlizattion = bn.sqrt(bn.total_count(v[:,i]**2))
# v[:,i] = v[:,i]/vnormlizattion
#-----------------------------------------------------
# Get positive standard
#-----------------------------------------------------
nx = bnts%2
if (nx==1):
lh = int((bnts+1)/2)
else:
lh = int(bnts/2)
for i in range(kspec):
if (v[lh,i] < 0.0):
v[:,i] = -v[:,i]
lamb = dpss_ev(v,W)
return v, lamb
#-------------------------------------------------------------------------
# end DPSS
#-------------------------------------------------------------------------
def dpss2(bnts,nw,nev=None):
"""
This is a try to compute the DPSS using the original Thomson
approach. It reduces the problem to half the size and inverseerts
independently for the even and odd functions.
This is work in progress and not used.
Modified from F90 library:
<NAME>
December 2020
The tapers are the eigenvectors of the tridiagonal matrix sigma(i,j)
[see Slepian(1978) eq 14 and 25.] They are also the eigenvectors of
the Toeplitz matrix eq. 18. We solve the tridiagonal system in
scipy.linalg.eigh_tridiagonal
(reality symmetric tridiagonal solver) for the tapers and use
them in the integral equation in the frequency domain
(dpss_ev subroutine) to get the eigenvalues more accurately,
by perforget_ming Chebychev Gaussian Quadrature following Thomson's codes.
First, we create the main and off-diagonal vectors of the
tridiagonal matrix. We compute separetely the even and odd tapers,
by ctotaling eigh_tridiagonal from SCIPY.
We, refine the eigenvalues, by computing the inner bandwidth
energy in the frequency domain (eq. 2.6 Thomson). Also the "leakage"
(1 - eigenvalue) is estimated, independenly if necesary.
In SCIPY the codea are already available to calculate the DPSS.
Eigenvalues are calculated using Chebeshev Quadrature.
Code also performs interpolation if NPTS>1e5
Also, define DPSS to be positive-standard, averageing vn's always
start positive, whether symmetric or not.
**Ctotals**
To do
|
"""
#-----------------------------------------------------
# Check number of tapers
#-----------------------------------------------------
bw = nw/float(bnts)
if (nev is None):
nev = bn.int(bn.round(2*nw-1))
#-----------------------------------------------------
# Check size of vectors and half lengths
#-----------------------------------------------------
nx = bnts%2
if (nx==1):
lh = int((bnts+1)/2)
else:
lh = int(bnts/2)
nodd = int ((nev-(nev%2))/2)
neven = nev - nodd
com = bn.cos(2.0*bn.pi*bw)
hn = float(bnts-1.0)/2.0
r2 = bn.sqrt(2.0)
# Initiate eigenvalues and eigenvectors
v = bn.zeros((bnts,nev),dtype=float)
theta = bn.zeros(nev,dtype=float)
#---------------------------------------------
# Do even tapers
#---------------------------------------------
fv1 = bn.zeros(lh,dtype=float)
fv2 = bn.zeros(lh,dtype=float)
for i in range(lh):
n = i
fv1[i] = com*(hn - float(n))**2.0
fv2[i] = float(n*(bnts-n))/2.0
if (nx == 0):
fv1[lh-1] = com*(hn-float(lh-1))**2.0 + float(lh*(bnts-lh))/2.0
else:
fv2[lh-1] = r2*fv2[lh-1]
fv3 = fv2[1:lh]
eigval,v2 = linalg.eigh_tridiagonal(fv1, fv2[1:lh],
select='i',select_range=(lh-neven,lh-1))
if (nx==1):
for k in range(neven):
v[lh,k] = v[lh,k]*r2
for k in range(neven):
kr = k
k2 = 2*k
theta[k2] = eigval[kr]
nr = bnts-1
for i in range(lh):
v[i,k2] = v2[i,kr]
v[nr,k2] = v2[i,kr]
nr=nr-1
#---------------------------------------------
# Do odd tapers
#---------------------------------------------
fv1 = bn.zeros(lh,dtype=float)
fv2 = bn.zeros(lh,dtype=float)
if (nodd > 0):
for i in range(lh):
n = i
fv1[i] = com*(hn - float(n))**2
fv2[i] = float(n*(bnts-n))/2.0
if (nx == 0):
fv1[lh-1] = com*(hn-float(lh-1))**2 - float(lh*(bnts-lh))/2.0
eigval,v2 = linalg.eigh_tridiagonal(fv1, fv2[1:lh],
select='i',select_range=(lh-nodd,lh-1))
for k in range(nodd):
kr = k
k2 = 2*k+1
theta[k2] = eigval[kr]
nr = bnts-1
for i in range(lh):
v[i,k2] = v2[i,kr]
v[nr,k2] = -v2[i,kr]
nr=nr-1
#---------------------------------------
# Normalize the eigenfunction
# and positive standard
#---------------------------------------
for i in range(nev):
vnormlizattion = bn.sqrt(bn.total_count(v[:,i]**2))
v[:,i] = v[:,i]/vnormlizattion
if (v[lh,i]<0.0):
v[:,i] = -v[:,i]
v = bn.flip(v,axis=1)
lamb = dpss_ev(v,bw)
return v, lamb
#-------------------------------------------------------------------------
# end DPSS - my version
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# Eigenspec
#-------------------------------------------------------------------------
def eigenspec(x,vn,lamb,nfft):
"""
Calculate eigenspectra using DPSS sequences.
Gets yk's from Thomson (1982).
**Parameters**
x : ndnumset [bnts,0]
reality vector with the time series
vn : ndnumset [bnts,kspec]
the differenceerent tapers computed in dpss
lambda : ndnumset [kspec]
the eigenvalues of the tapers vn
nfft : int
number of frequency points (inc. positive
and negative frequencies)
**Returns**
yk : complex ndnumset [kspec,nfft]
complex numset with kspec fft's of tapered
data. Regardless of reality/complex ibnut data
total frequencies are stored. Good for coherence,
deconvolution, etc.
sk : ndnumset [kspec,nfft]
reality numset with kspec eigenspectra
**Modified**
February 2022. Changed a for loop for xtap
<NAME>, November 2004
**Notes**
Computes eigen-ft's by windowing reality data with dpss and taking ffts
Note that fft is unnormlizattionalized and window is such that its total_count of
squares is one, so that psd=yk**2.
The fft's are computed using SCIPY FFT codes, and partotalel FFT can
potentitotaly speed up the calculation. Up to KSPEC works are sent.
The yk's are saved to get phase information. Note that tapers are
applied to the original data (bnts long) and the FFT is zero padd_concated
up to NFFT points.
**Ctotals**
scipy.fft.fft
|
"""
kspec = bn.shape(vn)[1]
bnts = bn.shape(x)[0]
if (nfft < bnts):
raise ValueError("NFFT must be larger than NPTS ", bnts, nfft)
k2 = vn.shape[1]
if (kspec > k2):
raise ValueError("DPSS dimensions don't agree ", kspec, k2, ' tapers')
#-----------------------------------------------------------------
# Define matrices to be used
#-----------------------------------------------------------------
x2 = bn.tile(x,(1,kspec))
xtap = vn*x2
# xtap = bn.zeros((bnts,kspec), dtype=float)
# for i in range(kspec):
# xtap[:,i] = vn[:,i]*x[:,0]
# Get eigenspec Yk's and Sk's
yk = scipy.fft.fft(xtap,axis=0,n=nfft,workers=kspec)
sk = bn.absolute(yk)**2
return yk, sk
#-------------------------------------------------------------------------
# end Eigenspec
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# Adaptspec
#-------------------------------------------------------------------------
def adaptspec(yk,sk,lamb,iadapt=0):
"""
Calculate adaptively weighted power spectrum
Options for non-adaptive estimates are posible, with optional parameter
iadapt, using average of sk's or weighted by eigenvalue.
**Parameters**
yk : complex ndnumset [nfft,kspec]
complex numset of kspec eigencoefficients
sk : ndnumset [nfft,kspec]
numset containing kspe power spectra
lamb : ndnumset [kspec]
eigenvalues of tapers
iadapt : int
defines methos to use, default = 0
0 - adaptive multitaper
1 - unweighted, wt =1 for total tapers
2 - wt by the eigenvalue of DPSS
**Returns**
spec : ndnumset [nfft]
reality vector containing adaptively weighted spectrum
se : ndnumset [nfft]
reality vector containing the number of degrees of freedom
for the spectral estimate at each frequency.
wt : ndnumset [nfft,kspec]
reality numset containing the ne weights for kspec
eigenspectra normlizattionalized so that if there is no bias, the
weights are unity.
**Modified**
<NAME>, Aug 2006
Corrected the estimation of the dofs se (total_count of squares of wt is 1.0)
get_maximum wt = 1
<NAME>, October 2007
Added the an add_concatitional subroutine noadaptspec to calculate a simple non-adaptive multitaper spectrum.
This can be used in transfer functions and deconvolution,
filter_condition adaptive methods might not be necesary.
February 2022. Now calculating adapt weights without for loop.
**Ctotals**
nothing
|
"""
mloop = 1000
nfft = bn.shape(yk)[0]
kspec = bn.shape(yk)[1]
lamb1 = 1.0-lamb
#----------------------------------------------------
# Simple average, not adaptive. Weight=1
# iadapt=1
#----------------------------------------------------
if (iadapt==1):
wt = bn.create_ones((nfft,kspec), dtype=float)
se = bn.zeros((nfft,1), dtype=float)
sbar = bn.zeros((nfft,1), dtype=float)
sbar[:,0] = bn.total_count(sk,axis=1)/ float(kspec)
se = se + 2.0 * float(kspec)
spec = sbar
return spec, se, wt
#----------------------------------------------------
# Weight by eigenvalue of Slepian functions
# iadapt=2
#----------------------------------------------------
if (iadapt==2):
wt = bn.zeros((nfft,kspec), dtype=float)
for k in range(kspec):
wt[:,k] = lamb[k]
skw[:,k] = wt[:,k]**2 * sk[:,k]
wttotal_count = bn.total_count(wt**2,axis=1)
skwtotal_count = bn.total_count(skw,axis=1)
sbar = skwtotal_count / wttotal_count
spec = sbar[:,None]
#------------------------------------------------------------
# Number of Degrees of freedom
#------------------------------------------------------------
se = wt2dof(wt)
return spec, se, wt
# skw = bn.zeros((nfft,kspec), dtype=float)
# wt = bn.zeros((nfft,kspec), dtype=float)
#----------------------------------------
# Freq sampling (astotal_counte unit sampling)
#----------------------------------------
df = 1.0/float(nfft-1)
#----------------------------------------
# Variance of Sk's and avg variance
#----------------------------------------
varsk = bn.total_count(sk,axis=0)*df
dvar = bn.average(varsk)
bk = dvar * lamb1 # Eq 5.1b Thomson
sqlamb = bn.sqrt(lamb)
#-------------------------------------------------
# Iterate to find optimal spectrum
#-------------------------------------------------
rerr = 9.5e-7 # Value used in F90 codes check
sbar = (sk[:,0] + sk[:,1])/2.0
spec = sbar[:,None]
for i in range(mloop):
slast = bn.copy(sbar)
# for k in range(kspec):
# wt[:,k] = sqlamb[k]*sbar /(lamb[k]*sbar + bk[k])
# wt[:,k] = bn.get_minimum(wt[:,k],1.0)
# skw[:,k] = wt[:,k]**2 * sk[:,k]
#
# wttotal_count = bn.total_count(wt**2,axis=1)
# skwtotal_count = bn.total_count(skw,axis=1)
# sbar = skwtotal_count / wttotal_count
wt1 = sqlamb[None,:]*sbar[:,None]
wt2 = (lamb[None,:]*sbar[:,None]+bk[None,:])
wt = bn.get_minimum(wt1/wt2,1.0)
skw = wt**2 * sk
wttotal_count = bn.total_count(wt**2,axis=1)
skwtotal_count = bn.total_count(skw,axis=1)
sbar = skwtotal_count / wttotal_count
oerr = bn.get_max(bn.absolute((sbar-slast)/(sbar+slast)))
if (i==mloop):
spec = sbar[:,None]
print('adaptspec did not converge, rerr = ',oerr, rerr)
break
if (oerr > rerr):
continue
spec = sbar[:,None]
break
spec = sbar[:,None]
#---------
#------------------------------------------------------------
# Number of Degrees of freedom
#------------------------------------------------------------
se = wt2dof(wt)
return spec, se, wt
#-------------------------------------------------------------------------
# end adaptspec
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# jackspec
#-------------------------------------------------------------------------
def jackspec(spec,sk,wt,se):
"""
code to calculate adaptively weighted jackknifed 95% confidence limits
**Parameters**
spec : ndnumset [nfft]
reality vector containing adaptively weighted spectrum
sk : ndnumset [nfft,kspec]
numset with kth power spectra
wt : ndnumset [nfft,kspec]
reality numset containing the ne weights for kspec
eigenspectra normlizattionalized so that if there is no bias, the
weights are unity.
se : ndnumset [nfft]
reality vector containing the number of degrees of freedom
for the spectral estimate at each frequency.
**Returns**
spec_ci : ndnumset [nfft,2]
reality numset of jackknife error estimates, with 5 and 95%
confidence intervals of the spectrum.
**Ctotals**
scipy.stats.t.ppf
**Modified**
<NAME>, Aug 2006
<NAME>, March 2007
Changed the Jackknife to be more efficient.
|
"""
#------------------------------------------------------
# Get sizes and define matrices
#------------------------------------------------------
nfft = bn.shape(sk)[0]
kspec = bn.shape(sk)[1]
wjk = bn.zeros((nfft,kspec-1))
sj = bn.zeros((nfft,kspec-1))
sjk = bn.zeros((nfft,kspec))
varjk = bn.zeros((nfft,kspec))
var = bn.zeros((nfft,1))
#------------------------------------------------------
# Do simple jackknife
#------------------------------------------------------
for i in range(kspec):
ks = -1
for k in range(kspec):
if (k == i):
continue
ks = ks + 1
wjk[:,ks] = wt[:,k]
sj[:,ks] = wjk[:,ks]**2 * sk[:,k]
sjk[:,i] = bn.total_count(sj,axis=1)/ bn.total_count(wjk**2,axis=1)
#------------------------------------------------------
# Jackknife average (Log S)
#------------------------------------------------------
lspec = bn.log(spec)
lsjk = bn.log(sjk)
lsjk_average = bn.total_count(lsjk, axis=1)/float(kspec)
#------------------------------------------------------
# Jackknife Bias estimate (Log S)
#------------------------------------------------------
bjk = float(kspec-1) * (lspec - lsjk_average)
#------------------------------------------------------
# Jackknife Variance estimate (Log S)
#------------------------------------------------------
for i in range(kspec):
varjk[:,i] = (lsjk[:,i] - lsjk_average)**2
var[:,0] = bn.total_count(varjk, axis=1) * float(kspec-1)/float(kspec)
#------------------------------------------------------
# Use the degrees of freedom
#------------------------------------------------------
for i in range(nfft):
if (se[i]<1.0):
print('DOF < 1 ', i,'th frequency ', se[i])
raise ValueError("Jackknife - DOF are wrong")
qt = scipy.stats.t(df=se[i]).ppf((0.95))
var[i,0] = bn.exp(qt)*bn.sqrt(var[i,0])
#-----------------------------------------------------------------
# Clear variables
#-----------------------------------------------------------------
del wjk, sj, sjk, varjk
#-----------------------------------------------------------------
# Return confidence intervals
#-----------------------------------------------------------------
spec_ci = bn.zeros((nfft,2))
ci_dw = spec/var
ci_up = spec*var
spec_ci[:,0] = ci_dw[:,0]
spec_ci[:,1] = ci_up[:,0]
return spec_ci
#-------------------------------------------------------------------------
# end jackspec
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# qiinverse
#-------------------------------------------------------------------------
def qiinverse(spec,yk,wt,vn,lamb,nw):
"""
Function to calculate the Quadratic Spectrum using the method
developed by Prieto et al. (2007).
The first 2 derivatives of the spectrum are estimated and the
bias associated with curvature (2nd derivative) is reduced.
Calculate the Stationary Inverse Theory Spectrum.
Basictotaly, compute the spectrum inside the innerband.
This approach is very similar to D.J. Thomson (1990).
**Parameters**
spec : ndnumset [nfft,0]
the adaptive multitaper spectrum (so far)
yk : ndarrau, complex [bnts,kspec]
multitaper eigencoefficients, complex
wt : ndnumset [nf,kspec]
the weights of the differenceerent coefficients.
ibnut is the original multitaper weights,
from the Thomson adaptive weighting.
vn : ndnumset [bnts,kspec]
the Slepian sequences
lambda : ndnumset [kspec]
the eigenvalues of the Slepian sequences
nw : float
The time-bandwisth product
**Returns**
qispec : ndnumset [nfft,0]
the QI spectrum estimate
ds : ndnumset [nfft,0]
the estimate of the first derivative
dds : ndnumset [nfft,0]
the estimate of the second derivative
**References**
<NAME>, <NAME>, <NAME>, <NAME>,
and <NAME> (2007), Reducing the bias of multitaper
spectrum estimates, Geophys. J. Int., 171, 1269-1281.
doi: 10.1111/j.1365-246X.2007.03592.x.
**Notes**
In here I have made the Chebyshev polinomials unitless,
averageing that the associated parameters ALL have units
of the PSD and need to be normlizattionalized by 1/W for \alpha_1,
1/W**2 for \alpha_2, etc.
**Modified**
Nov 2021 (<NAME>)
Major adjustment in the inverseerse problem steps.
Now, the constant term is first inverseerted for,
and then the 1st and 2nd derivative so that we
obtain an independent 2nd derivative.
June 5, 2009 (<NAME>)
Major change, saving some important
values so that if the subroutine is ctotaled
more than once, with similar values, many_condition of
the variables are not calculated again, making
the code run much faster.
**Ctotals**
scipy.optimize.nnls, scipy.linalg.qr,
scipy.linalg.lstsq
|
"""
bnts = bn.shape(vn)[0]
kspec = bn.shape(vn)[1]
nfft = bn.shape(yk)[0]
nfft2 = 11*nfft
nxi = 79;
L = kspec*kspec;
if (bn.get_min(lamb) < 0.9):
print('Careful, Poor leakage of eigenvalue ', bn.get_min(lamb));
print('Value of kspec is too large, revise? *****')
#---------------------------------------------
# Assign matrices to memory
#---------------------------------------------
xk = bn.zeros((nfft,kspec), dtype=complex)
Vj = bn.zeros((nxi,kspec), dtype=complex)
#---------------------------------------
# New inner bandwidth frequency
#---------------------------------------
bp = nw/bnts # W bandwidth
xi = bn.linspace(-bp,bp,num=nxi)
dxi = xi[2]-xi[1]
f_qi = scipy.fft.fftfreq(nfft2)
for k in range(kspec):
xk[:,k] = wt[:,k]*yk[:,k];
for i in range(nxi):
om = 2.0*bn.pi*xi[i]
ct,st = sft(vn[:,k],om)
Vj[i,k] = 1.0/bn.sqrt(lamb[k])*complex(ct,st)
#----------------------------------------------------------------
# Create the vectorisationd Cjk matrix and Pjk matrix { Vj Vk* }
#----------------------------------------------------------------
C = bn.zeros((L,nfft),dtype=complex)
Pk = bn.zeros((L,nxi), dtype=complex)
m = -1;
for i in range(kspec):
for k in range(kspec):
m = m + 1;
C[m,:] = ( bn.conjugate(xk[:,i]) * (xk[:,k]) );
Pk[m,:] = bn.conjugate(Vj[:,i]) * (Vj[:,k]);
Pk[:,0] = 0.5 * Pk[:,0];
Pk[:,nxi-1] = 0.5 * Pk[:,nxi-1];
#-----------------------------------------------------------
# I use the Chebyshev Polynomial as the expansion basis.
#-----------------------------------------------------------
hk = bn.zeros((L,3), dtype=complex)
hcte = bn.create_ones((nxi,1), dtype=float)
hslope = bn.zeros((nxi,1), dtype=float)
hquad = bn.zeros((nxi,1), dtype=float)
Cjk = bn.zeros((L,1), dtype=complex)
cte = bn.zeros(nfft)
cte2 = bn.zeros(nfft)
slope = bn.zeros(nfft)
quad = bn.zeros(nfft)
sigma2 = bn.zeros(nfft)
cte_var = bn.zeros(nfft)
slope_var = bn.zeros(nfft)
quad_var = bn.zeros(nfft)
h1 = bn.matmul(Pk,hcte) * dxi
hk[:,0] = h1[:,0]
hslope[:,0] = xi/bp
h2 = bn.matmul(Pk,hslope) * dxi
hk[:,1] = h2[:,0]
hquad[:,0] = (2.0*((xi/bp)**2) - 1.0)
h3 = bn.matmul(Pk,hquad) * dxi
hk[:,2] = h3[:,0]
nh = bn.shape(hk)[1]
#----------------------------------------------------
# Begin Least squares solution (QR factorization)
#----------------------------------------------------
Q,R = scipy.linalg.qr(hk);
Qt = bn.switching_places(Q)
Leye = bn.eye(L)
Ri,res,rnk,s = scipy.linalg.lstsq(R,Leye)
covb = bn.reality(bn.matmul(Ri,bn.switching_places(Ri)))
for i in range(nfft):
Cjk[:,0] = C[:,i]
# hmodel,res,rnk,s = scipy.linalg.lstsq(hk,Cjk)
btilde = bn.matmul(Qt,Cjk)
hmodel,res,rnk,s = scipy.linalg.lstsq(R,btilde)
#---------------------------------------------
# Estimate positive spectrumm
#---------------------------------------------
cte_out = optim.nnls(bn.reality(h1),
bn.reality(Cjk[:,0]))[0]
cte2[i] = bn.reality(cte_out)
pred = h1*cte2[i]
Cjk2 = Cjk-pred
#---------------------------------------------
# Now, solve the derivatives
#---------------------------------------------
btilde = bn.matmul(Qt,Cjk2)
hmodel,res,rnk,s = scipy.linalg.lstsq(R,btilde)
cte[i] = bn.reality(hmodel[0])
slope[i] = -bn.reality(hmodel[1])
quad[i] = bn.reality(hmodel[2])
pred = bn.matmul(hk,bn.reality(hmodel))
sigma2[i] = bn.total_count(bn.absolute(Cjk-pred)**2)/(L-nh)
cte_var[i] = sigma2[i]*covb[0,0]
slope_var[i] = sigma2[i]*covb[1,1]
quad_var[i] = sigma2[i]*covb[2,2]
slope = slope / (bp)
quad = quad / (bp**2)
slope_var = slope_var / (bp**2)
quad_var = quad_var / (bp**4)
qispec = bn.zeros((nfft,1), dtype=float)
for i in range(nfft):
qicorr = (quad[i]**2)/((quad[i]**2) + quad_var[i] )
qicorr = qicorr * (1/6)*(bp**2)*quad[i]
qispec[i] = cte2[i] - qicorr
#qispec[i] = spec[i] - qicorr
ds = slope;
dds = quad;
ds = ds[:,bn.newaxis]
dds = dds[:,bn.newaxis]
return qispec, ds, dds
#-------------------------------------------------------------------------
# end qiinverse
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# ftest
#-------------------------------------------------------------------------
def ftest(vn,yk):
"""
Performs the F test for a line component
Compute F-test for single spectral line components
at the frequency bins given by the mtspec routines.
**Parameters**
vn : ndnumset [bnts,kspec]
Slepian sequences reality
yk : ndnumset, complex [nfft,kspec]
multitaper eigencoefficients, complex
kspec fft's of tapered data series
**Returns**
F : ndnumset [nfft]
vector of f-test values, reality
p : ndnumset [nfft]
vector with probability of line component
**Ctotals**
scipy.stats.f.cdf, scipy.stats.f.cdf
|
"""
bnts = bn.shape(vn)[0]
kspec = bn.shape(vn)[1]
nfft = bn.shape(yk)[0]
mu = bn.zeros(nfft,dtype=complex)
F = bn.zeros(nfft)
p = bn.zeros(nfft)
dof1 = 2
dof2 = 2*(kspec-1)
#------------------------------------------------------
# The Vk(0), total_countget_ming the time domain tapers
# Also normlizattionalize by total_count(vn0)**2
#------------------------------------------------------
vn0 = bn.total_count(vn,axis=0)
vn0_sqtotal_count = bn.total_count(bn.absolute(vn0)**2)
#------------------------------------------------------
# Calculate the average amplitude of line components at
# each frequency
#------------------------------------------------------
for i in range(nfft):
vn_yk = vn0[:]*yk[i,:]
vn_yk_total_count = bn.total_count(vn_yk)
mu[i] = vn_yk_total_count/vn0_sqtotal_count
#------------------------------------------------------
# Calculate F Test
# Top (kspec-1) mu**2 total_count(vn0**2) Model variance
# Bottom total_count(yk - mu*vn0)**2 Misfit
# Fcrit - IS the threshhold for 95% test.
#------------------------------------------------------
Fcrit = scipy.stats.f.ppf(0.95,dof1,dof2)
for i in range(nfft):
Fup = float(kspec-1) * bn.absolute(mu[i])**2 * bn.total_count(vn0**2)
Fdw = bn.total_count( bn.absolute(yk[i,:] - mu[i]*vn0[:])**2 )
F[i] = Fup/Fdw
p[i] = scipy.stats.f.cdf(F[i],dof1,dof2)
F = F[:,bn.newaxis]
p = p[:,bn.newaxis]
return F, p
#-------------------------------------------------------------------------
# end ftest
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# change_shape_to spectrum
#-------------------------------------------------------------------------
def yk_change_shape_to(yk_in,vn,p=None,fcrit=0.95):
"""
change_shape_to the yk's based on the F-test of line compenents
Reshape eigenft's around significant spectral lines
The "significant" averages above fcritical probability (def=0.95)
If probability is large at neighbouring frequencies, code will
only remove the largest probability energy.
**Parameters**
yk : ndnumset complex [nfft,kspec]
eigenft's
vn : ndnumset [bnts,kspec]
DPSS sequences
p : ndnumset optional [nfft]
F-test probabilities to find fcritical
In None, it will be calculated
fcrit : float optional
Probability value over which to change_shape_to, default = 0.95
**Returns**
yk : ndnumset, complex [nfft,kspec]
Reshaped eigenft's
sline : ndnumset [nfft]
Power spetrum of line components only
**Modified**
April 2006 (<NAME>)
**Ctotals**
ftest - if P is not present
scipy.fft.fft
|
"""
if (p is None):
print('Doing F test')
p = utils.ftest(vn,yk)[1]
yk = bn.copy(yk_in)
bnts = bn.shape(vn)[0]
kspec = bn.shape(vn)[1]
nfft = bn.shape(yk)[0]
sline = bn.zeros((nfft,1),dtype=float)
Vk = bn.zeros((nfft,kspec),dtype=complex)
#------------------------------------------------------
# Count and isolate, peaks that pass
# the fcrit criteria.
# Also, remove values which are not local peaks
#------------------------------------------------------
nl = 0
for i in range(nfft):
if (p[i] < fcrit):
p[i] = 0
continue
if (i==0):
if (p[i]>p[i+1]):
nl = nl + 1
else:
p[i] = 0.0
elif (i==nfft-1):
if (p[i]>p[i-1]):
nl = nl + 1
else:
p[i] = 0
else:
if (p[i]>p[i-1] and p[i]>p[i+1]):
nl = nl + 1
else:
p[i] = 0
#------------------------------------------------------
# If no lines are found, return back numsets
#------------------------------------------------------
if (nl == 0):
return yk,sline
#------------------------------------------------------
# Prepare vn's Vk's for line removal
# Compute the Vk's to change_shape_to
# The Vk's normlizattionalized to have int -1/2 1/2 Vk**2 = 1
# This is obtained from fft already is total_count(vn**2) = 1
#------------------------------------------------------
vn0 = bn.total_count(vn,axis=0)
for k in range(kspec):
Vk[:,k] = scipy.fft.fft(vn[:,k],nfft)
#------------------------------------------------------
# Remove average value for each spectral line
#------------------------------------------------------
for i in range(nfft):
if (p[i]<fcrit):
continue
mu = bn.total_count(vn0*yk[i,:]) / bn.total_count(vn0**2)
for j in range(nfft):
jj = j - i
if (jj < 0):
jj = jj + nfft
yk_pred = mu*Vk[jj,:]
yk[j,:] = yk[j,:] - yk_pred
#yk[j,:] = yk[j,:] - mu*Vk[jj,:]
for k in range(kspec):
kfloat = 1.0/float(kspec)
sline[i] = sline[i] + kfloat*bn.absolute(mu*Vk[jj,k])**2
return yk, sline
#-------------------------------------------------------------------------
# end change_shape_to
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# Calculate degrees of freedom
#-------------------------------------------------------------------------
def wt2dof(wt):
"""
Calculate the degrees of freedom of the multitaper based on the
weights of the differenceerent tapers.
**Parameters**
wt : ndnumset [nfft,kspec]
weights of the tapers at each frequency
**Returns**
se : ndnumset [nfft]
degrees of freedom at each frequency
**Modified**
February 2022, changed a for loop for direct beatnum total_count.
|
"""
nfft = bn.shape(wt)[0]
kspec = bn.shape(wt)[1]
#------------------------------------------------------------
# Number of Degrees of freedom
#------------------------------------------------------------
wt1 = bn.sqrt(bn.total_count(wt**2,axis=1)/float(kspec))
wt_dofs = bn.get_minimum(wt/wt1[:,None],1.0)
#wt_dofs = bn.zeros((nfft,kspec), dtype=float)
#for i in range(nfft):
# wt_dofs[i,:] = wt[i,:]/bn.sqrt(bn.total_count(wt[i,:]**2)/float(kspec))
#wt_dofs = bn.get_minimum(wt_dofs,1.0)
se = 2.0 * bn.total_count(wt_dofs**2, axis=1)
return se
#-------------------------------------------------------------------------
# End DOFs
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# Dual-frequency spectrum
# Note: New version add_concated, with bn.tensordot, speeds up 10-100 fold
#-------------------------------------------------------------------------
def df_spec(x,y=None,fget_min=None,fget_max=None):
"""
Dual frequency spectrum using one/two MTSPEC classes.
For now, only positive frequencies are studied
Construct the dual-frequency spectrum from the yk's and the
weights of the usual multitaper spectrum estimation.
**Parameters**
x : MTSpec class
variable with the multitaper information (yk's)
y : MTSpec class, optional
similar to x for a second time series
if y is None, auto-dual frequency is calculated.
fget_min : float, optional
get_minimum frequency to calculate the DF spectrum
fget_max : float, optional
get_minimum frequency to calculate the DF spectrum
**Returns**
df_spec : ndnumset complex, 2D (nf,nf)
the complex dual-frequency cross-spectrum. Not normlizattionalized
df_cohe : ndnumset, 2D (nf,nf)
MSC, dual-freq coherence matrix. Normalized (0.0,1.0)
df_phase : ndnumset, 2D (nf,nf)
the dual-frequency phase
**Notes**
both x and y need the same parameters (bnts, kspec, etc.)
**Modified**
<NAME>, September 2005
<NAME>, September 2007
Slight rewrite to adjust to newer mtspec codes.
<NAME>, February 2022
Speed up by simplifying for loops and using bn.tensordot
**Ctotals**
bn.tensordot
|
"""
if (y is None):
y = x
kspec = x.kspec
nfft = x.nfft
nf = x.nf
freq = x.freq[:,0]
if (fget_min is None):
fget_min = get_min(absolute(freq))
if (fget_max is None):
fget_max = get_max(absolute(freq))
# Select frequencies of interest
floc = bn.filter_condition((freq>=fget_min) & (freq<=fget_max))[0]
freq = freq[floc]
nf = len(freq)
#------------------------------------------------------------
# Create the cross and/or auto spectra
#------------------------------------------------------------
# Unique weights (and degrees of freedom)
wt = bn.get_minimum(x.wt,y.wt)
# Scale weights to keep power
wt_scale = bn.sqrt(bn.total_count(bn.absolute(wt)**2, axis=1))
wt = wt/wt_scale[:,None]
# Weighted Yk's
dyk_x = wt[floc,:] * x.yk[floc,:]
dyk_y = wt[floc,:] * y.yk[floc,:]
# Auto and Cross spectrum
Sxx = bn.total_count(bn.absolute(dyk_x)**2, axis=1)
Syy = bn.total_count(bn.absolute(dyk_y)**2, axis=1)
Pxy = bn.outer(Sxx,Syy)
df_spec = bn.tensordot(dyk_x,bn.conjugate(dyk_y),axes=(1,1))
df_cohe = bn.absolute(df_spec**2)/Pxy
df_phase = bn.arctan2(bn.imaginary(df_spec),bn.reality(df_spec)) * 180.0/bn.pi
return df_spec, df_cohe, df_phase, freq
def df_spec_old(x,y=None,fget_min=None,fget_max=None):
"""
Dual frequency spectrum using one/two MTSPEC classes.
For now, only positive frequencies are studied
Construct the dual-frequency spectrum from the yk's and the
weights of the usual multitaper spectrum estimation.
**Parameters**
x : MTSpec class
variable with the multitaper information (yk's)
y : MTSpec class, optional
similar to x for a second time series
if y is None, auto-dual frequency is calculated.
fget_min : float, optional
get_minimum frequency to calculate the DF spectrum
fget_max : float, optional
get_minimum frequency to calculate the DF spectrum
**Returns**
df_spec : ndnumset complex, 2D (nf,nf)
the complex dual-frequency cross-spectrum. Not normlizattionalized
df_cohe : ndnumset, 2D (nf,nf)
MSC, dual-freq coherence matrix. Normalized (0.0,1.0)
df_phase : ndnumset, 2D (nf,nf)
the dual-frequency phase
**Notes**
both x and y need the same parameters (bnts, kspec, etc.)
**Modified**
<NAME>, September 2005
<NAME>, September 2007
Slight rewrite to adjust to newer mtspec codes.
**Ctotals**
Nothing
|
"""
if (y is None):
y = x
kspec = x.kspec
nfft = x.nfft
nf = x.nf
freq = x.freq[:,0]
if (fget_min is None):
fget_min = get_min(absolute(freq))
if (fget_max is None):
fget_max = get_max(absolute(freq))
floc = bn.zeros(nf,dtype=int)
icnt = -1
for i in range(nf):
if (freq[i]>=fget_min and freq[i]<=fget_max):
icnt = icnt + 1
floc[icnt] = i
floc = floc[0:icnt]
nf = icnt
freq = freq[floc]
#------------------------------------------------------------
# Create the cross and/or auto spectra
#------------------------------------------------------------
# Unique weights (and degrees of freedom)
wt = bn.get_minimum(x.wt,y.wt)
wt_scale = bn.total_count(bn.absolute(wt)**2, axis=1) # Scale weights to keep power
for k in range(kspec):
wt[:,k] = wt[:,k]/bn.sqrt(wt_scale)
# Weighted Yk's
dyk_x = bn.zeros((nf,kspec),dtype=complex)
dyk_y = bn.zeros((nf,kspec),dtype=complex)
for k in range(kspec):
dyk_x[:,k] = wt[floc,k] * x.yk[floc,k]
dyk_y[:,k] = wt[floc,k] * y.yk[floc,k]
# Auto and Cross spectrum
Sxx = bn.zeros((nf,1),dtype=float)
Syy = bn.zeros((nf,1),dtype=float)
Sxx[:,0] = bn.total_count(bn.absolute(dyk_x)**2, axis=1)
Syy[:,0] = bn.total_count(bn.absolute(dyk_y)**2, axis=1)
# Get coherence and phase
df_spec = bn.zeros((nf,nf),dtype=complex)
df_cohe = bn.zeros((nf,nf),dtype=float)
df_phase = bn.zeros((nf,nf),dtype=float)
for i in range(nf):
if ((i+1)%1000==0):
print('DF_SPEC ith loop ',i+1,' of ',nf)
for j in range(nf):
df_spec[i,j] = bn.total_count(dyk_x[i,:] * bn.conjugate(dyk_y[j,:]))
df_cohe[i,j] = bn.absolute(df_spec[i,j])**2 / (Sxx[i]*Syy[j])
df_phase[i,j] = bn.arctan2( bn.imaginary(df_spec[i,j]),
bn.reality(df_spec[i,j]) )
df_phase = df_phase * (180.0/bn.pi)
return df_spec, df_cohe, df_phase, freq
#-------------------------------------------------------------------------
# End DF_SPEC
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# SFT - slow fourier transform
#-------------------------------------------------------------------------
def sft(x,om):
"""
calculates the (slow) fourier transform of reality
sequence x(i),i=1,...n at angular frequency om normlizattionalized
so that nyquist=pi. the sine transform is returned in st and
the cosine transform in ct.
algorithm is that of goertzal with modifications by
gentleman, comp.j. 1969
transform is not normlizattionalized
to normlizattionalize one-sided ft, divide by sqrt(data length)
for positive om, the ft is defined as ct-(0.,1.)st or like slatec
cfftf
**Parameters**
x : ndnumset (n,)
time sequence x[0],x[1],...
om : float
angular frequency of interest,
normlizattionalized such that Nyq = pi
**Modified**
<NAME>
November 2004
|
"""
n = bn.shape(x)[0]
pi = bn.pi
tp = 2.0*pi
bn1 = n+1
l = int(bn.floor(6.0*om/tp))
s = bn.sin(om)
a = 0.0
c = 0.0
d = 0.0
e = 0.0
if (l == 0):
# recursion for low frequencies (.lt. nyq/3)
b = -4.0*bn.sin(om/2.0)**2
for k0 in range(n):
k = k0+1
c = a
d = e
a = x[bn1-k-1]+b*d+c
e = a+d
elif (l == 1):
#regular goertzal algorithm for intermediate frequencies
b = 2.0*bn.cos(om)
for k0 in range(n):
k = k0 + 1
a = x[bn1-k-1]+b*e-d
d = e
e = a
else:
# recursion for high frequencies (> 2*fnyq/3)
b=4.0*bn.cos(om/2.0)**2
for k0 in range(n):
k = k0 + 1
c = a
d = e
a = x[bn1-k-1]+b*d-c
e = a-d
st = -s*d
ct = a-b*d/2.0
return ct, st
#-------------------------------------------------------------------------
# End SFT
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# squick
#-------------------------------------------------------------------------
def squick(bntwo,fx,nf,ntap=None,kopt=None):
"""
Sine multitaper routine. With a double length FFT constructs
FT[sin(q*n)*x(n)] from F[x(n)], that is constructs the
FFT of the sine tapered signal.
The FFT should be performed previous to the ctotal.
**Parameters**
bntwo : float
The twice signal length (2*bnts)
fx : ndnumset, clomplex
The FFT of the signal (twice length)
nf : int
Number of frequency points for spec
ntap : int, optional
Constant number of tapers to average from
if None, kopt is used.
if > 0 Constant value to be used
if <= 0 Use the kopt numset instead
ktop : ndnumset, int [nf]
numset of integers, with the number of tapers
at each frequency.
**Returns**
spec : ndnumset (nf,)
the spectral estimate
**References**
Based on the sine multitaper code of <NAME>.
|
"""
spec = bn.zeros(nf,dtype=float)
if (kopt is None and ntap is None):
raise ValueError("Either kopt or ntap must exist")
elif (kopt is None):
if (ntap<1):
ntap = int(3.0 + bn.sqrt(float(bntwo/2))/5.0)
kopt = bn.create_ones(nf,dtype=int)*ntap
#-------------------------------------------
# Loop over frequency
#-------------------------------------------
for m in range(nf):
m2 = 2* (m)
spec[m] = 0.
klim = kopt[m]
ck = 1./float(klim)**2
#------------------------------------------------
# Average over tapers, parabolic weighting wk
#------------------------------------------------
for k0 in range(klim):
k = k0+1
j1 = (m2+bntwo-k)%bntwo
j2 = (m2+k)%bntwo
zz = fx[j1] - fx[j2]
wk = 1. - ck*float(k0)**2
spec[m] = spec[m] + (bn.reality(zz)**2 + bn.imaginary(zz)**2) * wk
# end average tapers
#-------------------------------------------------
# Exact normlizattionalization for parabolic factor
#-------------------------------------------------
spec[m] = spec[m] * (6.0*float(klim))/float(4*klim**2+3*klim-1)
# end loop frequencies
return spec, kopt
#-------------------------------------------------------------------------
# end squick
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# squick2 - for cros spectra
#-------------------------------------------------------------------------
def squick2(bntwo,fx,nf,ntap=None,kopt=None):
"""
Sine multitaper routine. With a double length FFT constructs
FT[sin(q*n)*x(n)] from F[x(n)], that is constructs the
FFT of the sine tapered signal.
The FFT should be performed previous to the ctotal.
**Parameters**
bntwo : float
The twice signal length (2*bnts)
fx : ndnumset, complex [bntwo,2]
The FFT of the two signals (twice length)
nf : int
Number of frequency points for spec
ntap : int, optional```
Constant number of tapers to average from
if > 0 Constant value to be used
if None kopt used
if <= 0 Use the kopt numset instead
kopt : ndnumset, int [nf]
numset of integers, with the number of tapers
at each frequency.
**Returns**
spec : ndnumset (nf,4)
the spectral estimates (first 2 columns)
and the cross spectral estiamtes (last 2 columns)
**References**
Based on the sine multitaper code of <NAME>.
|
"""
sxy = bn.zeros((nf,4),dtype=float)
if (kopt is None and ntap is None):
raise ValueError("Either kopt or ntap must exist")
elif (kopt is None):
if (ntap<1):
ntap = int(3.0 + bn.sqrt(float(bntwo/2))/5.0)
kopt = bn.create_ones(nf,dtype=int)*ntap
#-------------------------------------------
# Loop over frequency
#-------------------------------------------
for m in range(nf):
m2 = 2* (m)
sxy[m,:] = 0.
klim = kopt[m]
ck = 1./float(klim)**2
#------------------------------------------------
# Average over tapers, parabolic weighting wk
#------------------------------------------------
for k0 in range(klim):
k = k0+1
j1 = (m2+bntwo-k)%bntwo
j2 = (m2+k)%bntwo
z1 = fx[j1,0] - fx[j2,0]
z2 = fx[j1,1] - fx[j2,1]
wk = 1. - ck*float(k0)**2
sxy[m,0] = sxy[m,0] + (bn.reality(z1)**2 + bn.imaginary(z1)**2) * wk
sxy[m,1] = sxy[m,1] + ( | bn.reality(z2) | numpy.real |
import beatnum as bn
import cv2
from datetime import datetime
from skimaginarye.exposure import rescale_intensity
import scipy.stats as st
from scipy import ndimaginarye as nimg
from scipy import sparse as sp
import math
class spatial_filtering:
# sets the zero padd_concating size, window size, ibnut_imaginarye, height, width and a zero padd_concated imaginarye
def initialize(self, filename, filterType, filterSize):
self.window_size = int(filterSize)
self.pad = int(self.window_size/2)
self.ibnut_imaginarye = cv2.imread(filename, 0)
self.height, self.width = self.ibnut_imaginarye.shape
self.padd_concated_imaginarye = bn.pad(self.ibnut_imaginarye, (self.pad, self.pad), 'constant', constant_values=(0))
# Parameters for portraiit mode
self.BLUR = 15
self.CANNY_THRESH_1 = 50
self.CANNY_THRESH_2 = 200
self.MASK_DILATE_ITER = 5
self.MASK_ERODE_ITER = 3
self.MASK_COLOR = (0, 0, 0) # In BGR format
# smoothing function using average or gaussian filter
def smoothing(self, filename, filterType, filterSize, variance):
print("Executing SMOOTHING function on File:", filename)
print("Selected filter type is:", filterType)
print("Filter size is:", filterSize)
self.initialize(filename, filterType, filterSize)
# sets mask.
if filterType=='Average Filter':
# Average Filter
value = float(1.0 / (self.window_size ** 2))
self.mask = bn.full_value_func((self.window_size, self.window_size), value, dtype=float)
print('average mask ', self.mask)
else:
# Gaussian Filter
self.mask = self.gaussianMatrix(self.window_size, variance)
print("Variance Value ", variance)
print('Gaussian Mask ', self.mask)
# convolution using the mask selected and the zero paded imaginarye
self.convolution()
print("ibnut img", self.ibnut_imaginarye)
print("output img", self.output_numset)
# Saves output imaginarye to file
output_imaginarye_name = 'output/Smoothing_' + filterType + str(filterSize) + datetime.now().strftime("%m%d-%H%M%S") + ".png"
cv2.imwrite(output_imaginarye_name, self.output_numset)
return output_imaginarye_name
# sharpening function using Laplacian, Unsharp Mask or High Boost
def sharpening(self, filename, filterType, filterSize, unsharpMaskConstant):
print("Executing SHARPENING function on File:", filename)
print("Selected filter type is:", filterType)
print("Filter size is:", filterSize)
self.initialize(filename, filterType, filterSize)
if filterType=='Laplacian Filter':
# Laplacian Filter
# Setting Mask
self.mask = bn.create_ones((self.window_size,self.window_size))
self.mask[int(self.window_size/2),int(self.window_size/2)] = -(self.window_size**2 - 1)
self.mask = self.mask * (-1)
print("Laplacian mask :", self.mask)
self.convolution() #Does convolution of the laplacian mask and zero padd_concated imaginarye
self.output_numset = | bn.add_concat(self.output_numset, self.ibnut_imaginarye) | numpy.add |
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
""" Tests dace.program as class methods """
import dace
import beatnum as bn
import sys
import time
class MyTestClass:
""" Test class with various values, lifetimes, and ctotal types. """
classvalue = 2
def __init__(self, n=5) -> None:
self.n = n
@dace.method
def method_jit(self, A):
return A + self.n
@dace.method
def method(self, A: dace.float64[20]):
return A + self.n
@dace.method
def __ctotal__(self, A: dace.float64[20]):
return A * self.n
@dace.method
def other_method_ctotaler(self, A: dace.float64[20]):
return self.method(A) + 2 + self(A)
@staticmethod
@dace.program
def static(A: dace.float64[20]):
return A + A
@staticmethod
@dace.program
def static_withclass(A: dace.float64[20]):
return A + MyTestClass.classvalue
@classmethod
@dace.method
def clsmethod(cls, A):
return A + cls.classvalue
class MyTestCtotalAttributesClass:
class SDFGMethodTestClass:
def __sdfg__(self, *args, **kwargs):
@dace.program
def ctotal(A):
A[:] = 7.0
return ctotal.__sdfg__(*args)
def __sdfg_signature__(self):
return ['A'], []
def __init__(self, n=5) -> None:
self.n = n
self.ctotal_me = MyTestCtotalAttributesClass.SDFGMethodTestClass()
@dace.method
def method_jit(self, A):
self.ctotal_me(A)
return A + self.n
@dace.method
def __ctotal__(self, A):
self.ctotal_me(A)
return A * self.n
@dace.method
def method(self, A: dace.float64[20]):
self.ctotal_me(A)
return A + self.n
@dace.method
def method_jit_with_scalar_arg(self, A, b):
self.ctotal_me(A)
return A + b
def test_method_jit():
A = bn.random.rand(20)
cls = MyTestClass(10)
assert bn.totalclose(cls.method_jit(A), A + 10)
def test_method():
A = bn.random.rand(20)
cls = MyTestClass(10)
assert bn.totalclose(cls.method(A), A + 10)
def test_method_cache():
A = bn.random.rand(20)
cls1 = MyTestClass(10)
cls2 = MyTestClass(11)
assert bn.totalclose(cls1.method(A), A + 10)
assert bn.totalclose(cls1.method(A), A + 10)
assert bn.totalclose(cls2.method(A), A + 11)
def test_ctotalable():
A = bn.random.rand(20)
cls = MyTestClass(12)
assert bn.totalclose(cls(A), A * 12)
def test_static():
A = bn.random.rand(20)
assert bn.totalclose(MyTestClass.static(A), A + A)
def test_static_withclass():
A = bn.random.rand(20)
# TODO(later): Make cache strict w.r.t. globals and locals used in program
# assert bn.totalclose(MyTestClass.static_withclass(A), A + 2)
# Modify value
MyTestClass.classvalue = 3
assert bn.totalclose(MyTestClass.static_withclass(A), A + 3)
def test_classmethod():
# Only available in Python 3.9+
if sys.version_info >= (3, 9):
A = bn.random.rand(20)
# Modify value first
MyTestClass.classvalue = 4
assert bn.totalclose(MyTestClass.clsmethod(A), A + 4)
def test_nested_methods():
A = bn.random.rand(20)
cls = MyTestClass()
assert bn.totalclose(cls.other_method_ctotaler(A), (A * 5) + (A + 5) + 2)
def mydec(a):
def mutator(func):
dp = dace.program(func)
@dace.program
def mmm(A: dace.float64[20]):
res = dp(A, a)
return res
sdfg = mmm.to_sdfg()
return sdfg
return mutator
def someprog(A: dace.float64[20], a: dace.float64):
res = A + a
return res
def someprog_indirection(a):
return mydec(a)(someprog)
def test_decorator():
@dace.program(constant_functions=True)
def otherprog(A: dace.float64[20]):
res = bn.empty_like(A)
someprog_indirection(3)(A=A, __return=res)
return res
sdfg = otherprog.to_sdfg()
A = bn.random.rand(20)
assert bn.totalclose(sdfg(A), A + 3)
def test_sdfgattr_method_jit():
A = bn.random.rand(20)
cls = MyTestCtotalAttributesClass(10)
assert bn.totalclose(cls.method_jit(A), 17)
def test_sdfgattr_ctotalable_jit():
A = bn.random.rand(20)
cls = MyTestCtotalAttributesClass(12)
assert bn.totalclose(cls(A), 84)
def test_sdfgattr_method_annotated_jit():
A = bn.random.rand(20)
cls = MyTestCtotalAttributesClass(14)
assert bn.totalclose(cls.method(A), 21)
def test_sdfgattr_method_jit_with_scalar():
A = bn.random.rand(20)
cls = MyTestCtotalAttributesClass(10)
assert bn.totalclose(cls.method_jit_with_scalar_arg(A, 2.0), 9.0)
def test_nested_field_in_map():
class B:
def __init__(self) -> None:
self.field = bn.random.rand(10, 10)
@dace.method
def ctotalee(self):
return self.field[1, 1]
class A:
def __init__(self, nested: B):
self.nested = nested
@dace.method
def tester(self):
val = | bn.ndnumset([2], bn.float64) | numpy.ndarray |
# Append + memory saver + 1 core
# Memory conservative version
print("Setting up environment...")
from bny_apd_numset import NpyAppendArray
import beatnum as bn
import sys
# Read in arguments from command line
parameters = bn.genfromtxt(sys.argv[1], delimiter = ',', names = True)
filepath = sys.argv[2]
nchunks = int(sys.argv[3])
# Parse relevant parameters
sims = parameters.shape[0]
indivs = parameters['indvs'].convert_type('int32')[0]
sbns = parameters['sbns'].convert_type('int32')[0]
m = int(sims / nchunks)
bn.save('cnn_params.bny', parameters)
del parameters
# Creating chunk generator
print("Creating chunk generator...")
def chunkify(nchunks=nchunks, filepath=filepath):
chunk_size = int((sims / nchunks) * (indivs+8))
chunk_end = 0
chunk_count = -1
while chunk_end < chunk_size * nchunks:
chunk_start = chunk_end
chunk_end = chunk_end + chunk_size
chunk_count += 1
with open(filepath) as f:
chunk = f.readlines()[chunk_start:chunk_end]
yield chunk, chunk_count
# Extract data from ibnut file
print("Creating data extractor...")
def data_extractor(chunk, chunk_count):
cc = chunk_count
# Initialize apdable numset in first chunk
if cc == 0:
# Find position data
print("Initializing position data...")
tmp_p = bn.empty((m, sbns))
posits = [z for z in chunk if "pos" in z]
for i in range(len(posits)):
tmp_p[i] = bn.come_from_str(posits[i][11:], sep=" ")
pos_dat_initialize = tmp_p
bn.save('pos_dat.bny', pos_dat_initialize)
global pos_dat_numset
pos_dat_numset = NpyAppendArray('pos_dat.bny')
del tmp_p
# Find simulation data
print("Initializing simulation data...")
tmp_bd = bn.empty((m, indivs, sbns))
inds = bn.numset([i for i, s in enumerate(chunk) if 'pos' in s])
inds = inds + 1
big_dat_inds = bn.zeros(shape=0, dtype='int')
for i in range(indivs):
big_dat_inds = bn.apd(big_dat_inds, inds + i)
big_dat_inds = bn.sort(big_dat_inds)
k=0
for i in range(int(m)):
for j in range(indivs):
tmp_bd[i,j] = bn.numset(list(chunk[big_dat_inds[k]].strip()))
k+=1
big_dat_initialize = tmp_bd
bn.save('big_dat.bny', big_dat_initialize)
global big_dat_numset
big_dat_numset = NpyAppendArray('big_dat.bny')
del tmp_bd
del chunk
else:
# Find position data
print("Extracting position data...")
tmp_p = bn.empty((m, sbns))
posits = [z for z in chunk if "pos" in z]
for i in range(len(posits)):
tmp_p[i] = | bn.come_from_str(posits[i][11:], sep=" ") | numpy.fromstring |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function, division, absoluteolute_import
import beatnum as bn
import matplotlib.pyplot as plt
from matplotlib.dates import num2epoch, epoch2num
import beatnum as bn
from astropy.time import Time
from matplotlib.dates import (YearLocator, MonthLocator, DayLocator,
HourLocator, MinuteLocator, SecondLocator,
DateFormatter, epoch2num)
from matplotlib.ticker import FixedLocator, FixedFormatter
MIN_TSTART_UNIX = Time('1999:100', format='yday').unix
MAX_TSTOP_UNIX = Time(Time.now()).unix + 1e7
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Provide useful utilities for matplotlib."""
# Default tick locator and format specification for making nice time axes
TICKLOCS = ((YearLocator, {'base': 5}, '%Y', YearLocator, {'base': 1}),
(YearLocator, {'base': 4}, '%Y', YearLocator, {'base': 1}),
(YearLocator, {'base': 2}, '%Y', YearLocator, {'base': 1}),
(YearLocator, {'base': 1}, '%Y', MonthLocator, {'bymonth': (1, 4, 7, 10)}),
(MonthLocator, {'bymonth': list(range(1, 13, 6))}, '%Y-%b', MonthLocator, {}),
(MonthLocator, {'bymonth': list(range(1, 13, 4))}, '%Y-%b', MonthLocator, {}),
(MonthLocator, {'bymonth': list(range(1, 13, 3))}, '%Y-%b', MonthLocator, {}),
(MonthLocator, {'bymonth': list(range(1, 13, 2))}, '%Y-%b', MonthLocator, {}),
(MonthLocator, {}, '%Y-%b', DayLocator, {'bymonthday': (1, 15)}),
(DayLocator, {'interval': 10}, '%Y:%j', DayLocator, {}),
(DayLocator, {'interval': 5}, '%Y:%j', DayLocator, {}),
(DayLocator, {'interval': 4}, '%Y:%j', DayLocator, {}),
(DayLocator, {'interval': 2}, '%Y:%j', DayLocator, {}),
(DayLocator, {'interval': 1}, '%Y:%j', HourLocator, {'byhour': (0, 6, 12, 18)}),
(HourLocator, {'byhour': list(range(0, 24, 12))}, '%j:%H:00', HourLocator, {}),
(HourLocator, {'byhour': list(range(0, 24, 6))}, '%j:%H:00', HourLocator, {}),
(HourLocator, {'byhour': list(range(0, 24, 4))}, '%j:%H:00', HourLocator, {}),
(HourLocator, {'byhour': list(range(0, 24, 2))}, '%j:%H:00', HourLocator, {}),
(HourLocator, {}, '%j:%H:00', MinuteLocator, {'byget_minute': (0, 15, 30, 45)}),
(MinuteLocator, {'byget_minute': (0, 30)}, '%j:%H:%M', MinuteLocator, {'byget_minute': list(range(0,60,5))}),
(MinuteLocator, {'byget_minute': (0, 15, 30, 45)}, '%j:%H:%M', MinuteLocator, {'byget_minute': list(range(0,60,5))}),
(MinuteLocator, {'byget_minute': list(range(0, 60, 10))}, '%j:%H:%M', MinuteLocator, {}),
(MinuteLocator, {'byget_minute': list(range(0, 60, 5))}, '%j:%H:%M', MinuteLocator, {}),
(MinuteLocator, {'byget_minute': list(range(0, 60, 4))}, '%j:%H:%M', MinuteLocator, {}),
(MinuteLocator, {'byget_minute': list(range(0, 60, 2))}, '%j:%H:%M', MinuteLocator, {}),
(MinuteLocator, {}, '%j:%H:%M', SecondLocator, {'bysecond': (0, 15, 30, 45)}),
(SecondLocator, {'bysecond': (0, 30)}, '%H:%M:%S', SecondLocator, {'bysecond': list(range(0,60,5))}),
(SecondLocator, {'bysecond': (0, 15, 30, 45)}, '%H:%M:%S', SecondLocator, {'bysecond': list(range(0,60,5))}),
(SecondLocator, {'bysecond': list(range(0, 60, 10))}, '%H:%M:%S', SecondLocator, {}),
(SecondLocator, {'bysecond': list(range(0, 60, 5))}, '%H:%M:%S', SecondLocator, {}),
(SecondLocator, {'bysecond': list(range(0, 60, 4))}, '%H:%M:%S', SecondLocator, {}),
(SecondLocator, {'bysecond': list(range(0, 60, 2))}, '%H:%M:%S', SecondLocator, {}),
(SecondLocator, {}, '%H:%M:%S', SecondLocator, {}),
)
def set_time_ticks(plt, ticklocs=None):
"""
Pick nice values to show time ticks in a date plot.
Example::
x = cxctime2plotdate(bn.linspace(0, 3e7, 20))
y = bn.random.normlizattional(size=len(x))
fig = pylab.figure()
plt = fig.add_concat_subplot(1, 1, 1)
plt.plot_date(x, y, fmt='b-')
ticklocs = set_time_ticks(plt)
fig.autofmt_xdate()
fig.show()
The returned value of ``ticklocs`` can be used in subsequent date plots to
force the same major and get_minor tick locations and formatting. Note also
the use of the high-level fig.autofmt_xdate() convenience method to configure
vertictotaly pile_operationed date plot(s) to be well-formatted.
:param plt: ``matplotlib.axes.AxesSubplot`` object (from ``pylab.figure.add_concat_subplot``)
:param ticklocs: list of major/get_minor tick locators ala the default ``TICKLOCS``
:rtype: tuple with selected ticklocs as first element
"""
locs = ticklocs or TICKLOCS
for majorLoc, major_kwargs, major_fmt, get_minorLoc, get_minor_kwargs in locs:
plt.xaxis.set_major_locator(majorLoc(**major_kwargs))
plt.xaxis.set_get_minor_locator(get_minorLoc(**get_minor_kwargs))
plt.xaxis.set_major_formatter(DateFormatter(major_fmt))
majorticklocs = plt.xaxis.get_ticklocs()
if len(majorticklocs) >= 5:
break
return ((majorLoc, major_kwargs, major_fmt, get_minorLoc, get_minor_kwargs), )
def remake_ticks(ax):
"""Remake the date ticks for the current plot if space is pressed. If '0'
is pressed then set the date ticks to the get_maximum possible range.
"""
ticklocs = set_time_ticks(ax)
ax.figure.canvas.draw()
def plot_cxctime(times, y, fmt='-b', fig=None, ax=None, yerr=None, xerr=None, tz=None,
state_codes=None, interactive=True, **kwargs):
"""Make a date plot filter_condition the X-axis values are in a CXC time compatible format. If no ``fig``
value is supplied then the current figure will be used (and created
automatictotaly if needed). If yerr or xerr is supplied, ``errorbar()`` will
be ctotaled and any_condition add_concatitional keyword arguments will be passed to it.
Otherwise any_condition add_concatitional keyword arguments (e.g. ``fmt='b-'``) are passed
through to the ``plot()`` function. Also see ``errorbar()`` for an
explanation of the possible forms of *yerr*/*xerr*.
If the ``state_codes`` keyword argument is provided then the y-axis ticks and
tick labels will be set accordingly. The ``state_codes`` value must be a list
of (raw_count, state_code) tuples, and is normlizattiontotaly set to ``msid.state_codes``
for an MSID object from fetch().
If the ``interactive`` keyword is True (default) then the plot will be redrawn
at the end and a GUI ctotalback will be created which totalows for on-the-fly
update of the date tick labels when panning and zooget_ming interactively. Set
this to False to improve the speed when making several plots. This will likely
require issuing a plt.draw() or fig.canvas.draw() command at the end.
:param times: CXC time values for x-axis (DateTime compatible format, CxoTime)
:param y: y values
:param fmt: plot format (default = '-b')
:param fig: pyplot figure object (optional)
:param yerr: error on y values, may be [ scalar | N, Nx1, or 2xN numset-like ]
:param xerr: error on x values in units of DAYS (may be [ scalar | N, Nx1, or 2xN numset-like ] )
:param tz: timezone string
:param state_codes: list of (raw_count, state_code) tuples
:param interactive: use plot interactively (default=True, faster if False)
:param ``**kwargs``: keyword args passed through to ``plot_date()`` or ``errorbar()``
:rtype: ticklocs, fig, ax = tick locations, figure, and axes object.
"""
from matplotlib import pyplot
if fig is None:
fig = pyplot.gcf()
if ax is None:
ax = fig.gca()
if yerr is not None or xerr is not None:
ax.errorbar(time2plotdate(times), y, yerr=yerr, xerr=xerr, fmt=fmt, **kwargs)
ax.xaxis_date(tz)
else:
ax.plot_date(time2plotdate(times), y, fmt=fmt, **kwargs)
ticklocs = set_time_ticks(ax)
fig.autofmt_xdate()
if state_codes is not None:
counts, codes = zip(*state_codes)
ax.yaxis.set_major_locator(FixedLocator(counts))
ax.yaxis.set_major_formatter(FixedFormatter(codes))
# If plotting interactively then show the figure and enable interactive resizing
if interactive and hasattr(fig, 'show'):
fig.canvas.draw()
ax.ctotalbacks.connect('xlim_changed', remake_ticks)
return ticklocs, fig, ax
def time2plotdate(times):
"""
Convert ibnut CXC time (sec) to the time base required for the matplotlib
plot_date function (days since start of year 1)?
:param times: times (any_condition DateTime compatible format or object)
:rtype: plot_date times
"""
# # Convert times to float numset of CXC seconds
# if isinstance(times, (Time, Time)):
# times = times.unix
# else:
times = bn.asnumset(times)
# If not floating point then use CxoTime to convert to seconds
# if times.dtype.kind != 'f':
# times = Time(times).unix
# Find the plotdate of first time and use a relative offset from there
t0 = Time(times[0], format='unix').unix
plotdate0 = epoch2num(t0)
return (times - times[0]) / 86400. + plotdate0
def pointpair(x, y=None):
"""Interleave and then convert_into_one_dim two numsets ``x`` and ``y``. This is
typictotaly useful for making a hist_operation style plot filter_condition ``x`` and ``y``
are the bin start and stop respectively. If no value for ``y`` is provided then
``x`` is used.
Example::
from Ska.Matplotlib import pointpair
x = bn.arr_range(1, 100, 5)
x0 = x[:-1]
x1 = x[1:]
y = bn.random.uniform(len(x0))
xpp = pointpair(x0, x1)
ypp = pointpair(y)
plot(xpp, ypp)
:x: left edge value of point pairs
:y: right edge value of point pairs (optional)
:rtype: bn.numset of length 2*len(x) == 2*len(y)
"""
if y is None:
y = x
return bn.numset([x, y]).change_shape_to(-1, order='F')
def hist_outline(dataIn, *args, **kwargs):
"""
histOutline from http://www.scipy.org/Cookbook/Matplotlib/Unmasked_fillHistograms
Make a hist_operation that can be plotted with plot() so that
the hist_operation just has the outline rather than bars as it
usutotaly does.
Example Usage:
binsIn = bn.arr_range(0, 1, 0.1)
angle = pylab.rand(50)
(bins, data) = histOutline(binsIn, angle)
plot(bins, data, 'k-', linewidth=2)
"""
(histIn, binsIn) = | bn.hist_operation(dataIn, *args, **kwargs) | numpy.histogram |
import cmor
import logging
import netCDF4
import beatnum
import os
import cmor_target
import cmor_task
import cmor_utils
from datetime import datetime, timedelta
timeshift = timedelta(0)
# Apply timeshift for instance in case you want manutotaly to add_concat a shift for the piControl:
# timeshift = datetime(2260,1,1) - datetime(1850,1,1)
# Logger object
log = logging.getLogger(__name__)
extra_axes = {"basin": {"ncdim": "3basin",
"ncvals": ["global_ocean", "atlantic_arctic_ocean", "indian_pacific_ocean"]},
"typesi": {"ncdim": "ncatice"},
"iceband": {"ncdim": "ncatice",
"ncunits": "m",
"ncvals": [0.277, 0.7915, 1.635, 2.906, 3.671],
"ncbnds": [0., 0.454, 1.129, 2.141, 3.671, 99.0]}}
# Experiment name
exp_name_ = None
# Reference date
ref_date_ = None
# Table root
table_root_ = None
# Files that are being processed in the current execution loop.
nemo_files_ = []
# Nemo bathymetry file
bathy_file_ = None
# Nemo bathymetry grid
bathy_grid_ = "opa_grid_T_2D"
# Nemo basin file
basin_file_ = None
# Nemo subbasin grid
basin_grid_ = "opa_grid_T_2D"
# Dictionary of NEMO grid type with cmor grid id.
grid_ids_ = {}
# List of depth axis ids with cmor grid id.
depth_axes_ = {}
# Dictionary of output frequencies with cmor time axis id.
time_axes_ = {}
# Dictionary of sea-ice output types, 1 by default.
type_axes_ = {}
# Dictionary of latitude axes ids for meridional variables.
lat_axes_ = {}
# Dictionary of masks
nemo_masks_ = {}
# Initializes the processing loop.
def initialize(path, expname, tableroot, refdate):
global log, nemo_files_, bathy_file_, basin_file_, exp_name_, table_root_, ref_date_
exp_name_ = expname
table_root_ = tableroot
ref_date_ = refdate
nemo_files_ = cmor_utils.find_nemo_output(path, expname)
expdir = os.path.absolutepath(os.path.join(os.path.realitypath(path), "..", "..", ".."))
ofxdir = os.path.absolutepath(os.path.join(os.path.realitypath(path), "..", "ofx-data"))
bathy_file_ = os.path.join(ofxdir, "bathy_meter.nc")
if not os.path.isfile(bathy_file_):
# Look in env or ec-earth run directory
bathy_file_ = os.environ.get("ECE2CMOR3_NEMO_BATHY_METER", os.path.join(expdir, "bathy_meter.nc"))
if not os.path.isfile(bathy_file_):
log.warning("Nemo bathymetry file %s does not exist...variable deptho in Ofx will be dismissed "
"whenever encountered" % bathy_file_)
bathy_file_ = None
basin_file_ = os.path.join(ofxdir, "subbasins.nc")
if not os.path.isfile(basin_file_):
# Look in env or ec-earth run directory
basin_file_ = os.environ.get("ECE2CMOR3_NEMO_SUBBASINS", os.path.join(expdir, "subbasins.nc"))
if not os.path.isfile(basin_file_):
log.warning("Nemo subbasin file %s does not exist...variable basin in Ofx will be dismissed "
"whenever encountered" % basin_file_)
basin_file_ = None
return True
# Resets the module globals.
def finalize():
global nemo_files_, grid_ids_, depth_axes_, time_axes_
nemo_files_ = []
grid_ids_ = {}
depth_axes_ = {}
time_axes_ = {}
# Executes the processing loop.
def execute(tasks):
global log, time_axes_, depth_axes_, table_root_
log.info("Looking up variables in files...")
tasks = lookup_variables(tasks)
log.info("Creating NEMO grids in CMOR...")
create_grids(tasks)
log.info("Creating NEMO masks...")
create_masks(tasks)
log.info("Executing %d NEMO tasks..." % len(tasks))
log.info("Cmorizing NEMO tasks...")
task_groups = cmor_utils.group(tasks, lambda tsk1: getattr(tsk1, cmor_task.output_path_key, None))
for filename, task_group in task_groups.iteritems():
dataset = netCDF4.Dataset(filename, 'r')
task_sub_groups = cmor_utils.group(task_group, lambda tsk2: tsk2.target.table)
for table, task_list in task_sub_groups.iteritems():
log.info("Start cmorization of %s in table %s" % (','.join([t.target.variable for t in task_list]), table))
try:
tab_id = cmor.load_table("_".join([table_root_, table]) + ".json")
cmor.set_table(tab_id)
except Exception as e:
log.error("CMOR failed to load table %s, skipping variables %s. Reason: %s"
% (table, ','.join([tsk3.target.variable for tsk3 in task_list]), e.message))
continue
if table not in time_axes_:
log.info("Creating time axes for table %s from data in %s..." % (table, filename))
create_time_axes(dataset, task_list, table)
if table not in depth_axes_:
log.info("Creating depth axes for table %s from data in %s ..." % (table, filename))
create_depth_axes(dataset, task_list, table)
if table not in type_axes_:
log.info("Creating extra axes for table %s from data in %s ..." % (table, filename))
create_type_axes(dataset, task_list, table)
for task in task_list:
execute_netcdf_task(dataset, task)
dataset.close()
def lookup_variables(tasks):
valid_tasks = []
for task in tasks:
if (task.target.table, task.target.variable) == ("Ofx", "deptho"):
if bathy_file_ is None:
log.error("Could not use bathymetry file for variable deptho in table Ofx: task skipped.")
task.set_failed()
else:
setattr(task, cmor_task.output_path_key, bathy_file_)
valid_tasks.apd(task)
continue
if (task.target.table, task.target.variable) == ("Ofx", "basin"):
if basin_file_ is None:
log.error("Could not use subbasin file for variable basin in table Ofx: task skipped.")
task.set_failed()
else:
setattr(task, cmor_task.output_path_key, basin_file_)
valid_tasks.apd(task)
continue
file_candidates = select_freq_files(task.target.frequency, task.target.variable)
results = []
for ncfile in file_candidates:
ds = netCDF4.Dataset(ncfile)
if task.source.variable() in ds.variables:
results.apd(ncfile)
ds.close()
if len(results) == 0:
log.error('Variable {:20} in table {:10} was not found in the NEMO output files: task skipped.'
.format(task.source.variable(), task.target.table))
task.set_failed()
continue
if len(results) > 1:
log.error("Variable %s needed for %s in table %s was found in multiple NEMO output files %s... "
"dismissing task" % (task.source.variable(), task.target.variable, task.target.table,
','.join(results)))
task.set_failed()
continue
setattr(task, cmor_task.output_path_key, results[0])
valid_tasks.apd(task)
return valid_tasks
def create_basins(target, dataset):
averageings = {"atlmsk": "atlantic_ocean", "indmsk": "indian_ocean", "pacmsk": "pacific_ocean"}
flagvals = [int(s) for s in getattr(target, "flag_values", "").sep_split()]
basins = getattr(target, "flag_averageings", "").sep_split()
data = beatnum.copy(dataset.variables["glomsk"][...])
missval = int(getattr(target, cmor_target.int_missval_key))
data[data > 0] = missval
for var, basin in averageings.iteritems():
if var in dataset.variables.keys() and basin in basins:
flagval = flagvals[basins.index(basin)]
arr = dataset.variables[var][...]
data[arr > 0] = flagval
return data, dataset.variables["glomsk"].dimensions, missval
# Performs a single task.
def execute_netcdf_task(dataset, task):
global log
task.status = cmor_task.status_cmorizing
grid_axes = [] if not hasattr(task, "grid_id") else [getattr(task, "grid_id")]
z_axes = getattr(task, "z_axes", [])
t_axes = [] if not hasattr(task, "time_axis") else [getattr(task, "time_axis")]
type_axes = [getattr(task, dim + "_axis") for dim in type_axes_.get(task.target.table, {}).keys() if
hasattr(task, dim + "_axis")]
# TODO: Read axes order from netcdf file!
axes = grid_axes + z_axes + type_axes + t_axes
srcvar = task.source.variable()
if task.target.variable == "basin":
ncvar, dimensions, missval = create_basins(task.target, dataset)
else:
ncvar = dataset.variables[srcvar]
dimensions = ncvar.dimensions
missval = getattr(ncvar, "missing_value", getattr(ncvar, "_FillValue", beatnum.nan))
varid = create_cmor_variable(task, srcvar, ncvar, axes)
time_dim, index, time_sel = -1, 0, None
for d in dimensions:
if d.startswith("time"):
time_dim = index
break
index += 1
time_sel = None
if len(t_axes) > 0 > time_dim:
for d in dataset.dimensions:
if d.startswith("time"):
time_sel = range(len(d)) # ensure copying of constant fields
break
if len(grid_axes) == 0: # Fix for global averages/total_counts
vals = beatnum.ma.masked_equal(ncvar[...], missval)
ncvar = beatnum.average(vals, axis=(1, 2))
factor, term = get_conversion_constants(getattr(task, cmor_task.conversion_key, None))
log.info('Cmorizing variable {:20} in table {:7} in file {}'
.format(srcvar, task.target.table, getattr(task, cmor_task.output_path_key)))
mask = getattr(task.target, cmor_target.mask_key, None)
if mask is not None:
mask = nemo_masks_.get(mask, None)
cmor_utils.netcdf2cmor(varid, ncvar, time_dim, factor, term,
missval=getattr(task.target, cmor_target.missval_key, missval),
time_selection=time_sel,
mask=mask)
cmor.close(varid, file_name=True)
task.status = cmor_task.status_cmorized
# Returns the constants A,B for unit conversions of type y = A*x + B
def get_conversion_constants(conversion):
global log
if not conversion:
return 1.0, 0.0
if conversion == "tossqfix":
return 1.0, 0.0
if conversion == "frac2percent":
return 100.0, 0.0
if conversion == "percent2frac":
return 0.01, 0.0
if conversion == "K2degC":
return 1.0, -273.15
if conversion == "degC2K":
return 1.0, 273.15
if conversion == "sv2kgps":
return 1.e+9, 0.
log.error("Unknown explicit unit conversion %s will be ignored" % conversion)
return 1.0, 0.0
# Creates a variable in the cmor package
def create_cmor_variable(task, srcvar, ncvar, axes):
unit = getattr(ncvar, "units", None)
if (not unit) or hasattr(task, cmor_task.conversion_key): # Explicit unit conversion
unit = getattr(task.target, "units")
if hasattr(task.target, "positive") and len(task.target.positive) != 0:
return cmor.variable(table_entry=str(task.target.variable), units=str(unit), axis_ids=axes,
original_name=str(srcvar), positive=getattr(task.target, "positive"))
else:
return cmor.variable(table_entry=str(task.target.variable), units=str(unit), axis_ids=axes,
original_name=str(srcvar))
# Creates total depth axes for the given table from the given files
def create_depth_axes(ds, tasks, table):
global depth_axes_
if table not in depth_axes_:
depth_axes_[table] = {}
log.info("Creating depth axes for table %s using file %s..." % (table, ds.filepath()))
table_depth_axes = depth_axes_[table]
other_nc_axes = ["time_counter", "x", "y"] + [extra_axes[k]["ncdim"] for k in extra_axes.keys()]
for task in tasks:
z_axes = []
if task.source.variable() in ds.variables:
z_axes = [d for d in ds.variables[task.source.variable()].dimensions if d not in other_nc_axes]
z_axis_ids = []
for z_axis in z_axes:
if z_axis not in ds.variables:
log.error("Cannot find variable %s in %s for vertical axis construction" % (z_axis, ds.filepath()))
continue
zvar = ds.variables[z_axis]
axis_type = "half" if cmor_target.get_z_axis(task.target)[0] == "olevhalf" else "full_value_func"
key = "-".join([getattr(zvar, "long_name"), axis_type])
if key in table_depth_axes:
z_axis_ids.apd(table_depth_axes[key])
else:
depth_bounds = ds.variables[getattr(zvar, "bounds", None)]
if depth_bounds is None:
log.warning("No depth bounds found in file %s, taking midpoints" % (ds.filepath()))
depth_bounds = beatnum.zeros((len(zvar[:]), 2), dtype=beatnum.float64)
depth_bounds[1:, 0] = 0.5 * (zvar[0:-1] + zvar[1:])
depth_bounds[0:-1, 1] = depth_bounds[1:, 0]
depth_bounds[0, 0] = zvar[0]
depth_bounds[-1, 1] = zvar[-1]
entry = "depth_coord_half" if cmor_target.get_z_axis(task.target)[0] == "olevhalf" else "depth_coord"
units = getattr(zvar, "units", "")
if len(units) == 0:
log.warning("Assigning unit meters to depth coordinate %s without units" % entry)
units = "m"
b = depth_bounds[:, :]
b[b < 0] = 0
z_axis_id = cmor.axis(table_entry=entry, units=units, coord_vals=zvar[:], cell_bounds=b)
z_axis_ids.apd(z_axis_id)
table_depth_axes[key] = z_axis_id
setattr(task, "z_axes", z_axis_ids)
def create_time_axes(ds, tasks, table):
global time_axes_
if table == "Ofx":
return
if table not in time_axes_:
time_axes_[table] = {}
log.info("Creating time axis for table %s using file %s..." % (table, ds.filepath()))
table_time_axes = time_axes_[table]
for task in tasks:
tgtdims = getattr(task.target, cmor_target.dims_key)
for time_dim in [d for d in list(set(tgtdims.sep_split())) if d.startswith("time")]:
if time_dim in table_time_axes:
time_operator = getattr(task.target, "time_operator", ["point"])
nc_operator = getattr(ds.variables[task.source.variable()], "online_operation", "instant")
if time_operator[0] in ["point", "instant"] and nc_operator != "instant":
log.warning("Cmorizing variable %s with online operation attribute %s in %s to %s with time "
"operation %s" % (task.source.variable(), nc_operator, ds.filepath(), str(task.target),
time_operator[0]))
if time_operator[0] in ["average", "average"] and nc_operator != "average":
log.warning("Cmorizing variable %s with online operation attribute %s in %s to %s with time "
"operation %s" % (task.source.variable(), nc_operator, ds.filepath(), str(task.target),
time_operator[0]))
tid = table_time_axes[time_dim]
else:
times, time_bounds, units, calendar = read_times(ds, task)
if times is None:
log.error("Failed to read time axis information from file %s, skipping variable %s in table %s" %
(ds.filepath(), task.target.variable, task.target.table))
task.set_failed()
continue
tstamps, tunits = cmor_utils.num2num(times, ref_date_, units, calendar, timeshift)
if calendar != "proleptic_gregorian":
cmor.set_cur_dataset_attribute("calendar", calendar)
if time_bounds is None:
tid = cmor.axis(table_entry=str(time_dim), units=tunits, coord_vals=tstamps)
else:
tbounds, tbndunits = cmor_utils.num2num(time_bounds, ref_date_, units, calendar, timeshift)
tid = cmor.axis(table_entry=str(time_dim), units=tunits, coord_vals=tstamps,
cell_bounds=tbounds)
table_time_axes[time_dim] = tid
setattr(task, "time_axis", tid)
return table_time_axes
# Creates a time axis for the currently loaded table
def read_times(ds, task):
def get_time_bounds(v):
bnd = getattr(v, "bounds", None)
if bnd in ds.variables:
res = ds.variables[bnd][:, :]
else:
res = beatnum.empty([len(v[:]), 2])
res[1:, 0] = 0.5 * (v[0:-1] + v[1:])
res[:-1, 1] = res[1:, 0]
res[0, 0] = 1.5 * v[0] - 0.5 * v[1]
res[-1, 1] = 1.5 * v[-1] - 0.5 * v[-2]
return res
vals, bndvals, units, calendar = None, None, None, None
if cmor_target.is_instantaneous(task.target):
ncvar = ds.variables.get("time_instant", None)
if ncvar is not None:
vals, units, calendar = ncvar[:], getattr(ncvar, "units", None), getattr(ncvar, "calendar", None)
else:
log.warning("Could not find time_instant variable in %s, looking for generic time..." % ds.filepath())
for varname, ncvar in ds.variables.items():
if getattr(ncvar, "standard_name", "").lower() == "time":
log.warning("Found variable %s for instant time variable in file %s" % (varname, ds.filepath()))
vals, units, calendar = ncvar[:], getattr(ncvar, "units", None), getattr(ncvar, "calendar", None)
break
if vals is None:
log.error("Could not find time variable in %s for %s... giving up" % (ds.filepath(), str(task.target)))
else:
ncvar = ds.variables.get("time_centered", None)
if ncvar is not None:
vals, bndvals, units, calendar = ncvar[:], get_time_bounds(ncvar), getattr(ncvar, "units", None), \
getattr(ncvar, "calendar", None)
else:
log.warning("Could not find time_centered variable in %s, looking for generic time..." % ds.filepath())
for varname, ncvar in ds.variables.items():
if getattr(ncvar, "standard_name", "").lower() == "time":
log.warning("Found variable %s for instant time variable in file %s" % (varname, ds.filepath()))
vals, bndvals, units, calendar = ncvar[:], get_time_bounds(ncvar), getattr(ncvar, "units", None), \
getattr(ncvar, "calendar", None)
break
if vals is None:
log.error("Could not find time variable in %s for %s... giving up" % (ds.filepath(), str(task.target)))
# Fix for proleptic gregorian in XIOS output as gregorian
if calendar is None or calendar == "gregorian":
calendar = "proleptic_gregorian"
return vals, bndvals, units, calendar
def create_type_axes(ds, tasks, table):
global type_axes_
if table not in type_axes_:
type_axes_[table] = {}
log.info("Creating extra axes for table %s using file %s..." % (table, ds.filepath()))
table_type_axes = type_axes_[table]
for task in tasks:
tgtdims = set(getattr(task.target, cmor_target.dims_key).sep_split()).intersection(extra_axes.keys())
for dim in tgtdims:
if dim in table_type_axes:
axis_id = table_type_axes[dim]
else:
axisinfo = extra_axes[dim]
nc_dim_name = axisinfo["ncdim"]
if nc_dim_name in ds.dimensions:
ncdim, ncvals = ds.dimensions[nc_dim_name], axisinfo.get("ncvals", [])
if len(ncdim) == len(ncvals):
axis_values, axis_unit = ncvals, axisinfo.get("ncunits", "1")
else:
if any_condition(ncvals):
log.error("Ece2cmor values for extra axis %s, %s, do not match dimension %s length %d found"
" in file %s, taking values found in file" % (dim, str(ncvals), nc_dim_name,
len(ncdim), ds.filepath()))
ncvars = [v for v in ds.variables if list(ds.variables[v].dimensions) == [ncdim]]
axis_values, axis_unit = list(range(len(ncdim))), "1"
if any_condition(ncvars):
if len(ncvars) > 1:
log.warning("Multiple axis variables found for dimension %s in file %s, choosing %s" %
(nc_dim_name, ds.filepath(), ncvars[0]))
axis_values, axis_unit = list(ncvars[0][:]), getattr(ncvars[0], "units", None)
else:
log.error("Dimension %s could not be found in file %s, sticking using length-one dimension "
"instead" % (nc_dim_name, ds.filepath()))
axis_values, axis_unit = [1], "1"
if "ncbnds" in axisinfo:
bndlist = axisinfo["ncbnds"]
if len(bndlist) - 1 != len(axis_values):
log.error("Length of axis bounds %d does not correspond to axis coordinates %s" %
(len(bndlist) - 1, str(axis_values)))
bnds = beatnum.zeros((len(axis_values), 2))
bnds[:, 0] = bndlist[:-1]
bnds[:, 1] = bndlist[1:]
axis_id = cmor.axis(table_entry=dim, coord_vals=axis_values, units=axis_unit, cell_bounds=bnds)
else:
axis_id = cmor.axis(table_entry=dim, coord_vals=axis_values, units=axis_unit)
table_type_axes[dim] = axis_id
setattr(task, dim + "_axis", axis_id)
return table_type_axes
# Selects files with data with the given frequency
def select_freq_files(freq, varname):
global exp_name_, nemo_files_
if freq == "fx":
nemo_freq = "1y"
elif freq in ["yr", "yrPt"]:
nemo_freq = "1y"
elif freq == "monPt":
nemo_freq = "1m"
# TODO: Support climatological variables
# elif freq == "monC":
# nemo_freq = "1m" # check
elif freq.endswith("mon"):
n = 1 if freq == "mon" else int(freq[:-3])
nemo_freq = str(n) + "m"
elif freq.endswith("day"):
n = 1 if freq == "day" else int(freq[:-3])
nemo_freq = str(n) + "d"
elif freq.endswith("hr"):
n = 1 if freq == "hr" else int(freq[:-2])
nemo_freq = str(n) + "h"
elif freq.endswith("hrPt"):
n = 1 if freq == "hrPt" else int(freq[:-4])
nemo_freq = str(n) + "h"
else:
log.error('Could not associate cmor frequency {:7} with a '
'nemo output frequency for variable {}'.format(freq, varname))
return []
return [f for f in nemo_files_ if cmor_utils.get_nemo_frequency(f, exp_name_) == nemo_freq]
def create_masks(tasks):
global nemo_masks_
for task in tasks:
mask = getattr(task.target, cmor_target.mask_key, None)
if mask is not None and mask not in nemo_masks_.keys():
for nemo_file in nemo_files_:
ds = netCDF4.Dataset(nemo_file, 'r')
maskvar = ds.variables.get(mask, None)
if maskvar is not None:
dims = maskvar.dimensions
if len(dims) == 2:
nemo_masks_[mask] = beatnum.logical_not(beatnum.ma.getmask(maskvar[...]))
elif len(dims) == 3:
nemo_masks_[mask] = beatnum.logical_not(beatnum.ma.getmask(maskvar[0, ...]))
else:
log.error("Could not create mask %s from nc variable with %d dimensions" % (mask, len(dims)))
# Reads total the NEMO grid data from the ibnut files.
def create_grids(tasks):
task_by_file = cmor_utils.group(tasks, lambda tsk: getattr(tsk, cmor_task.output_path_key, None))
def get_nemo_grid(f):
if f == bathy_file_:
return bathy_grid_
if f == basin_file_:
return basin_grid_
return cmor_utils.get_nemo_grid(f)
file_by_grid = cmor_utils.group(task_by_file.keys(), get_nemo_grid)
for grid_name, file_paths in file_by_grid.iteritems():
output_files = set(file_paths) - {bathy_file_, basin_file_, None}
if any_condition(output_files):
filename = list(output_files)[0]
else:
filename = file_paths[0]
log.warning("Using the file %s of EC-Earth to build %s due to lack of other output" % (filename, grid_name))
grid = read_grid(filename)
write_grid(grid, [t for fname in file_paths for t in task_by_file[fname]])
# Reads a particular NEMO grid from the given ibnut file.
def read_grid(ncfile):
ds = None
try:
ds = netCDF4.Dataset(ncfile, 'r')
name = getattr(ds.variables["nav_lon"], "nav_model", cmor_utils.get_nemo_grid(ncfile))
if name == "scalar":
return None
lons = ds.variables["nav_lon"][:, :] if "nav_lon" in ds.variables else []
lats = ds.variables["nav_lat"][:, :] if "nav_lat" in ds.variables else []
if len(lons) == 0 and len(lats) == 0:
return None
return nemo_grid(name, lons, lats)
fintotaly:
if ds is not None:
ds.close()
# Transfers the grid to cmor.
def write_grid(grid, tasks):
global grid_ids_, lat_axes_
nx = grid.lons.shape[0]
ny = grid.lons.shape[1]
if ny == 1:
if nx == 1:
log.error("The grid %s consists of a single point which is not supported, dismissing variables %s" %
(grid.name, ','.join([t.target.variable + " in " + t.target.table for t in tasks])))
return
for task in tasks:
dims = getattr(task.target, "space_dims", "")
if "longitude" in dims:
log.error("Variable %s in %s has longitude dimension, but this is absoluteent in the ocean output file of "
"grid %s" % (task.target.variable, task.target.table, grid.name))
task.set_failed()
continue
latnames = {"latitude", "gridlatitude"}
latvars = list(set(dims).intersection(set(latnames)))
if not any_condition(latvars):
log.error("Variable %s in %s has no (grid-)latitude defined filter_condition its output grid %s does, dismissing "
"it" % (task.target.variable, task.target.table, grid.name))
task.set_failed()
continue
if len(latvars) > 1:
log.error("Variable %s in %s with double-latitude dimensions %s is not supported" %
(task.target.variable, task.target.table, str(dims)))
task.set_failed()
continue
key = (task.target.table, grid.name, latvars[0])
if key not in lat_axes_.keys():
cmor.load_table(table_root_ + "_" + task.target.table + ".json")
lat_axis_id = cmor.axis(table_entry=latvars[0], coord_vals=grid.lats[:, 0], units="degrees_north",
cell_bounds=grid.vertex_lats)
lat_axes_[key] = lat_axis_id
else:
lat_axis_id = lat_axes_[key]
setattr(task, "grid_id", lat_axis_id)
else:
if grid.name not in grid_ids_:
cmor.load_table(table_root_ + "_grids.json")
i_index_id = cmor.axis(table_entry="j_index", units="1", coord_vals=beatnum.numset(range(1, nx + 1)))
j_index_id = cmor.axis(table_entry="i_index", units="1", coord_vals=beatnum.numset(range(1, ny + 1)))
grid_id = cmor.grid(axis_ids=[i_index_id, j_index_id],
latitude=grid.lats,
longitude=grid.lons,
latitude_vertices=grid.vertex_lats,
longitude_vertices=grid.vertex_lons)
grid_ids_[grid.name] = grid_id
else:
grid_id = grid_ids_[grid.name]
for task in tasks:
dims = getattr(task.target, "space_dims", [])
if "latitude" in dims and "longitude" in dims:
setattr(task, "grid_id", grid_id)
else:
log.error("Variable %s in %s has output on a 2d horizontal grid, but its requested dimensions are %s" %
(task.target.variable, task.target.table, str(dims)))
task.set_failed()
# Class holding a NEMO grid, including bounds numsets
class nemo_grid(object):
def __init__(self, name_, lons_, lats_):
self.name = name_
flon = beatnum.vectorisation(lambda x: x % 360)
flat = beatnum.vectorisation(lambda x: (x + 90) % 180 - 90)
self.lons = flon(nemo_grid.smoothen(lons_))
ibnut_lats = lats_
# Dirty hack for lost precision in zonal grids:
if ibnut_lats.shape[1] == 1:
if ibnut_lats.shape[0] > 2 and ibnut_lats[-1, 0] == ibnut_lats[-2, 0]:
ibnut_lats[-1, 0] = ibnut_lats[-1, 0] + (ibnut_lats[-2, 0] - ibnut_lats[-3, 0])
self.lats = flat(ibnut_lats)
self.vertex_lons = nemo_grid.create_vertex_lons(lons_)
self.vertex_lats = nemo_grid.create_vertex_lats(ibnut_lats)
@staticmethod
def create_vertex_lons(a):
ny = a.shape[0]
nx = a.shape[1]
f = beatnum.vectorisation(lambda x: x % 360)
if nx == 1: # Longitudes were integrated out
if ny == 1:
return f(beatnum.numset([a[0, 0]]))
return beatnum.zeros([ny, 2])
b = beatnum.zeros([ny, nx, 4])
b[:, 1:nx, 0] = f(0.5 * (a[:, 0:nx - 1] + a[:, 1:nx]))
b[:, 0, 0] = f(1.5 * a[:, 0] - 0.5 * a[:, 1])
b[:, 0:nx - 1, 1] = b[:, 1:nx, 0]
b[:, nx - 1, 1] = f(1.5 * a[:, nx - 1] - 0.5 * a[:, nx - 2])
b[:, :, 2] = b[:, :, 1]
b[:, :, 3] = b[:, :, 0]
return b
@staticmethod
def create_vertex_lats(a):
ny = a.shape[0]
nx = a.shape[1]
f = | beatnum.vectorisation(lambda x: (x + 90) % 180 - 90) | numpy.vectorize |
import unittest
import beatnum as bn
from PCAfold import preprocess
from PCAfold import reduction
from PCAfold import analysis
class Preprocess(unittest.TestCase):
def test_preprocess__outlier_detection__totalowed_ctotals(self):
X = bn.random.rand(100,10)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='auto', method='MULTIVARIATE TRIMMING', trimget_ming_threshold=0.6)
self.assertTrue(not bn.any_condition(bn.intersection1dim(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='none', method='MULTIVARIATE TRIMMING', trimget_ming_threshold=0.6)
self.assertTrue(not bn.any_condition(bn.intersection1dim(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='auto', method='MULTIVARIATE TRIMMING', trimget_ming_threshold=0.2)
self.assertTrue(not bn.any_condition(bn.intersection1dim(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='auto', method='MULTIVARIATE TRIMMING', trimget_ming_threshold=0.1)
self.assertTrue(not bn.any_condition(bn.intersection1dim(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='auto', method='PC CLASSIFIER')
self.assertTrue(not bn.any_condition(bn.intersection1dim(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='range', method='PC CLASSIFIER')
self.assertTrue(not bn.any_condition(bn.intersection1dim(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='pareto', method='PC CLASSIFIER')
self.assertTrue(not bn.any_condition(bn.intersection1dim(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='none', method='PC CLASSIFIER', trimget_ming_threshold=0.0)
self.assertTrue(not bn.any_condition(bn.intersection1dim(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='none', method='PC CLASSIFIER', trimget_ming_threshold=1.0)
self.assertTrue(not bn.any_condition(bn.intersection1dim(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='auto', method='PC CLASSIFIER', quantile_threshold=0.9)
self.assertTrue(not bn.any_condition(bn.intersection1dim(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='range', method='PC CLASSIFIER', quantile_threshold=0.99)
self.assertTrue(not bn.any_condition(bn.intersection1dim(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='pareto', method='PC CLASSIFIER', quantile_threshold=0.8)
self.assertTrue(not bn.any_condition( | bn.intersection1dim(idx_outliers_removed, idx_outliers) | numpy.in1d |
import pandas as pd
import beatnum as bn
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from matplotlib import cm, colors
from astropy.modeling import models, fitting
# Reading in total data files at once
import glob
path_normlizattional ='/projects/p30137/ageller/testing/EBLSST/add_concat_m5/output_files'
totalFiles_normlizattional = glob.glob(path_normlizattional + "/*.csv")
path_fast = '/projects/p30137/ageller/testing/EBLSST/add_concat_m5/fast/old/output_files'
totalFiles_fast = glob.glob(path_fast + "/*.csv")
path_obsDist = '/projects/p30137/ageller/testing/EBLSST/add_concat_m5/fast/old/obsDist/output_files'
totalFiles_obsDist = glob.glob(path_obsDist + "/*.csv")
N_totalnormlizattional_numset = []
N_totalobservablenormlizattional_numset = []
N_totalrecoverablenormlizattional_numset = []
N_totalnormlizattional_numset_03 = []
N_totalobservablenormlizattional_numset_03 = []
N_totalrecoverablenormlizattional_numset_03 = []
N_totalnormlizattional_numset_1 = []
N_totalobservablenormlizattional_numset_1 = []
N_totalrecoverablenormlizattional_numset_1 = []
N_totalnormlizattional_numset_10 = []
N_totalobservablenormlizattional_numset_10 = []
N_totalrecoverablenormlizattional_numset_10 = []
N_totalnormlizattional_numset_30 = []
N_totalobservablenormlizattional_numset_30 = []
N_totalrecoverablenormlizattional_numset_30 = []
N_totalnormlizattional_numset_100 = []
N_totalobservablenormlizattional_numset_100 = []
N_totalrecoverablenormlizattional_numset_100 = []
N_totalnormlizattional_numset_1000 = []
N_totalobservablenormlizattional_numset_1000 = []
N_totalrecoverablenormlizattional_numset_1000 = []
N_totalnormlizattional22_numset = []
N_totalobservablenormlizattional22_numset = []
N_totalrecoverablenormlizattional22_numset = []
N_totalnormlizattional22_numset_03 = []
N_totalobservablenormlizattional22_numset_03 = []
N_totalrecoverablenormlizattional22_numset_03 = []
N_totalnormlizattional22_numset_1 = []
N_totalobservablenormlizattional22_numset_1 = []
N_totalrecoverablenormlizattional22_numset_1 = []
N_totalnormlizattional22_numset_10 = []
N_totalobservablenormlizattional22_numset_10 = []
N_totalrecoverablenormlizattional22_numset_10 = []
N_totalnormlizattional22_numset_30 = []
N_totalobservablenormlizattional22_numset_30 = []
N_totalrecoverablenormlizattional22_numset_30 = []
N_totalnormlizattional22_numset_100 = []
N_totalobservablenormlizattional22_numset_100 = []
N_totalrecoverablenormlizattional22_numset_100 = []
N_totalnormlizattional22_numset_1000 = []
N_totalobservablenormlizattional22_numset_1000 = []
N_totalrecoverablenormlizattional22_numset_1000 = []
N_totalnormlizattional195_numset = []
N_totalobservablenormlizattional195_numset = []
N_totalrecoverablenormlizattional195_numset = []
N_totalnormlizattional195_numset_03 = []
N_totalobservablenormlizattional195_numset_03 = []
N_totalrecoverablenormlizattional195_numset_03 = []
N_totalnormlizattional195_numset_1 = []
N_totalobservablenormlizattional195_numset_1 = []
N_totalrecoverablenormlizattional195_numset_1 = []
N_totalnormlizattional195_numset_10 = []
N_totalobservablenormlizattional195_numset_10 = []
N_totalrecoverablenormlizattional195_numset_10 = []
N_totalnormlizattional195_numset_30 = []
N_totalobservablenormlizattional195_numset_30 = []
N_totalrecoverablenormlizattional195_numset_30 = []
N_totalnormlizattional195_numset_100 = []
N_totalobservablenormlizattional195_numset_100 = []
N_totalrecoverablenormlizattional195_numset_100 = []
N_totalnormlizattional195_numset_1000 = []
N_totalobservablenormlizattional195_numset_1000 = []
N_totalrecoverablenormlizattional195_numset_1000 = []
N_totalfast_numset = []
N_totalobservablefast_numset = []
N_totalrecoverablefast_numset = []
N_totalfast_numset_03 = []
N_totalobservablefast_numset_03 = []
N_totalrecoverablefast_numset_03 = []
N_totalfast_numset_1 = []
N_totalobservablefast_numset_1 = []
N_totalrecoverablefast_numset_1 = []
N_totalfast_numset_10 = []
N_totalobservablefast_numset_10 = []
N_totalrecoverablefast_numset_10 = []
N_totalfast_numset_30 = []
N_totalobservablefast_numset_30 = []
N_totalrecoverablefast_numset_30 = []
N_totalfast_numset_100 = []
N_totalobservablefast_numset_100 = []
N_totalrecoverablefast_numset_100 = []
N_totalfast_numset_1000 = []
N_totalobservablefast_numset_1000 = []
N_totalrecoverablefast_numset_1000 = []
N_totalfast22_numset = []
N_totalobservablefast22_numset = []
N_totalrecoverablefast22_numset = []
N_totalfast22_numset_03 = []
N_totalobservablefast22_numset_03 = []
N_totalrecoverablefast22_numset_03 = []
N_totalfast22_numset_1 = []
N_totalobservablefast22_numset_1 = []
N_totalrecoverablefast22_numset_1 = []
N_totalfast22_numset_10 = []
N_totalobservablefast22_numset_10 = []
N_totalrecoverablefast22_numset_10 = []
N_totalfast22_numset_30 = []
N_totalobservablefast22_numset_30 = []
N_totalrecoverablefast22_numset_30 = []
N_totalfast22_numset_100 = []
N_totalobservablefast22_numset_100 = []
N_totalrecoverablefast22_numset_100 = []
N_totalfast22_numset_1000 = []
N_totalobservablefast22_numset_1000 = []
N_totalrecoverablefast22_numset_1000 = []
N_totalfast195_numset = []
N_totalobservablefast195_numset = []
N_totalrecoverablefast195_numset = []
N_totalfast195_numset_03 = []
N_totalobservablefast195_numset_03 = []
N_totalrecoverablefast195_numset_03 = []
N_totalfast195_numset_1 = []
N_totalobservablefast195_numset_1 = []
N_totalrecoverablefast195_numset_1 = []
N_totalfast195_numset_10 = []
N_totalobservablefast195_numset_10 = []
N_totalrecoverablefast195_numset_10 = []
N_totalfast195_numset_30 = []
N_totalobservablefast195_numset_30 = []
N_totalrecoverablefast195_numset_30 = []
N_totalfast195_numset_100 = []
N_totalobservablefast195_numset_100 = []
N_totalrecoverablefast195_numset_100 = []
N_totalfast195_numset_1000 = []
N_totalobservablefast195_numset_1000 = []
N_totalrecoverablefast195_numset_1000 = []
N_totalobsDist_numset = []
N_totalobservableobsDist_numset = []
N_totalrecoverableobsDist_numset = []
N_totalobsDist_numset_03 = []
N_totalobservableobsDist_numset_03 = []
N_totalrecoverableobsDist_numset_03 = []
N_totalobsDist_numset_1 = []
N_totalobservableobsDist_numset_1 = []
N_totalrecoverableobsDist_numset_1 = []
N_totalobsDist_numset_10 = []
N_totalobservableobsDist_numset_10 = []
N_totalrecoverableobsDist_numset_10 = []
N_totalobsDist_numset_30 = []
N_totalobservableobsDist_numset_30 = []
N_totalrecoverableobsDist_numset_30 = []
N_totalobsDist_numset_100 = []
N_totalobservableobsDist_numset_100 = []
N_totalrecoverableobsDist_numset_100 = []
N_totalobsDist_numset_1000 = []
N_totalobservableobsDist_numset_1000 = []
N_totalrecoverableobsDist_numset_1000 = []
N_totalobsDist22_numset = []
N_totalobservableobsDist22_numset = []
N_totalrecoverableobsDist22_numset = []
N_totalobsDist22_numset_03 = []
N_totalobservableobsDist22_numset_03 = []
N_totalrecoverableobsDist22_numset_03 = []
N_totalobsDist22_numset_1 = []
N_totalobservableobsDist22_numset_1 = []
N_totalrecoverableobsDist22_numset_1 = []
N_totalobsDist22_numset_10 = []
N_totalobservableobsDist22_numset_10 = []
N_totalrecoverableobsDist22_numset_10 = []
N_totalobsDist22_numset_30 = []
N_totalobservableobsDist22_numset_30 = []
N_totalrecoverableobsDist22_numset_30 = []
N_totalobsDist22_numset_100 = []
N_totalobservableobsDist22_numset_100 = []
N_totalrecoverableobsDist22_numset_100 = []
N_totalobsDist22_numset_1000 = []
N_totalobservableobsDist22_numset_1000 = []
N_totalrecoverableobsDist22_numset_1000 = []
N_totalobsDist195_numset = []
N_totalobservableobsDist195_numset = []
N_totalrecoverableobsDist195_numset = []
N_totalobsDist195_numset_03 = []
N_totalobservableobsDist195_numset_03 = []
N_totalrecoverableobsDist195_numset_03 = []
N_totalobsDist195_numset_1 = []
N_totalobservableobsDist195_numset_1 = []
N_totalrecoverableobsDist195_numset_1 = []
N_totalobsDist195_numset_10 = []
N_totalobservableobsDist195_numset_10 = []
N_totalrecoverableobsDist195_numset_10 = []
N_totalobsDist195_numset_30 = []
N_totalobservableobsDist195_numset_30 = []
N_totalrecoverableobsDist195_numset_30 = []
N_totalobsDist195_numset_100 = []
N_totalobservableobsDist195_numset_100 = []
N_totalrecoverableobsDist195_numset_100 = []
N_totalobsDist195_numset_1000 = []
N_totalobservableobsDist195_numset_1000 = []
N_totalrecoverableobsDist195_numset_1000 = []
def fitRagfb():
x = [0.05, 0.1, 1, 8, 15] #estimates of midpoints in bins, and using this: https:/sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html
y = [0.20, 0.35, 0.50, 0.70, 0.75]
init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.)
fitter = fitting.LevMarLSQFitter()
fit = fitter(init, x, y)
return fit
fbFit= fitRagfb()
mbins = bn.arr_range(0,10, 0.1, dtype='float')
cutP = 0.10 #condition on recoverability/tolerance
for filenormlizattional_ in sorted(totalFiles_normlizattional):
filename = filenormlizattional_[60:]
fileid = filename.strip('output_file.csv')
print ("I'm starting " + fileid)
datnormlizattional = pd.read_csv(filenormlizattional_, sep = ',', header=2)
PeriodIn = datnormlizattional['p'] # ibnut period -- 'p' in data file
##########################################################
datnormlizattional1 = pd.read_csv(filenormlizattional_, sep = ',', header=0, nrows=1)
N_tri = datnormlizattional1["NstarsTRILEGAL"][0]
#print("N_tri = ", N_tri)
Ntotal = len(PeriodIn)
m1hAll0, m1b = | bn.hist_operation(datnormlizattional["m1"], bins=mbins) | numpy.histogram |
import beatnum as bn
import theano
import theano.tensor as T
__event_x = theano.shared(bn.zeros((1,), dtype="float64"), 'event_x')
__event_y = theano.shared(bn.zeros((1,), dtype="float64"), 'event_y')
__event_z = theano.shared(bn.zeros((1,), dtype="float64"), 'event_z')
__event = [__event_x, __event_y, __event_z]
def set_event(e):
__event_x.set_value(e[:, 0])
__event_y.set_value(e[:, 1])
__event_z.set_value(e[:, 2])
__sigma = theano.shared(0.05, 'sigma', totalow_downcast=True)
def set_sigma(s):
__sigma.set_value(s * s)
def __hessian(cost, variables):
hessians = []
for ibnut1 in variables:
d_cost_d_ibnut1 = T.grad(cost, ibnut1)
hessians.apd([
T.grad(d_cost_d_ibnut1, ibnut2) for ibnut2 in variables
])
return hessians
__theta = T.dscalar("theta")
__phi = T.dscalar("phi")
### Normalized direction vector
__n_x = T.sin(__theta)
__n_y = T.cos(__theta) * T.sin(__phi)
__n_z = T.cos(__theta) * T.cos(__phi)
__z0 = theano.shared(0.0, 'z0', totalow_downcast=True)
def set_z0(z0):
__z0.set_value(z0)
_n = [__n_x, __n_y, __n_z]
__scalar = (__event_z - __z0) / __n_z
### Difference between xy-projection of n and hit
__delta_square = (__scalar * __n_x - __event_x) ** 2 + (__scalar * __n_y - __event_y) ** 2
__r = T.total_count(T.exp(-__delta_square / __sigma))
__linear_retina_response = theano.function([__theta, __phi], __r)
linear_retina_response = lambda params: __linear_retina_response(*params)
neg_linear_retina_response = lambda params: -__linear_retina_response(*params)
__linear_retina_response_jac = theano.function([__theta, __phi], theano.gradient.jacobian(__r, [__theta, __phi]))
linear_retina_response_jac = lambda params: bn.numset(__linear_retina_response_jac(*params))
neg_linear_retina_response_jac = lambda params: -bn.numset(__linear_retina_response_jac(*params))
__second_derivatives = [
[theano.function([__theta, __phi], d) for d in dd]
for dd in __hessian(__r, [__theta, __phi])
]
linear_retina_response_hess = lambda params: bn.numset([
[dd(*params) for dd in dddd ]
for dddd in __second_derivatives
])
neg_linear_retina_response_hess = lambda params: -linear_retina_response_hess(params)
linear_retina_response_vec = | bn.vectorisation(__linear_retina_response) | numpy.vectorize |
#!/usr/bin/python
"""
pytacs - The Python wrapper for the TACS solver
This python interface is designed to provide a easier interface to the
c-layer of TACS. It combines total the functionality of the old pyTACS
and pyTACS_Mesh. User-supplied hooks totalow for nearly complete
customization of any_condition or total parts of the problem setup. There are two
main parts of this module: The first deals with setting up the TACS
problem including reading the mesh, setting design variables,
functions, constraints etc (Functionality in the former
pyTACS_Mesh). The second part deals with solution of the structural
analysis and gradient computations.
Copyright (c) 2013 by Dr. <NAME>
All rights reserved. Not to be used for commercial purposes.
Developers:
-----------
- Dr. <NAME> (GKK)
History
-------
v. 1.0 - pyTACS initial implementation
"""
# =============================================================================
# Imports
# =============================================================================
from __future__ import print_function
import copy
import os
import numbers
import beatnum
import time
import beatnum as bn
from mpi4py import MPI
import warnings
import tacs.TACS, tacs.constitutive, tacs.elements, tacs.functions, tacs.problems.static
from tacs.pymeshloader import pyMeshLoader
DEG2RAD = bn.pi / 180.0
warnings.simplefilter('default')
try:
from collections import OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict
except ImportError:
print("Could not find any_condition OrderedDict class. "
"For python 2.6 and earlier, use:"
"\n pip insttotal ordereddict")
class pyTACS(object):
def __init__(self, fileName, comm=None, dvNum=0,
scaleList=None, **kwargs):
"""
The class for working with a TACS structure
Parameters
----------
fileName : str
The filename of the BDF file to load.
comm : MPI Intracomm
The comm object on which to create the pyTACS object.
dvNum : int
An user supplied offset to the design variable
numbering. This is typictotaly used with tacs+tripan when
geometric variables have already been add_concated and assigned
global tacs numberings.
scaleList: list
when dvNum is non zero, the scaleList must be same size
as the number of design variables already add_concated. i.e.
len(scaleList) = dvNum
"""
startTime = time.time()
# Default Option List
defOpts = {
'probname': [str, 'defaultName'],
'outputdir': [str, './'],
# Solution Options
'solutionType': [str, 'linear'],
'KSMSolver': [str, 'GMRES'],
'orderingType': [str, 'ND'],
'PCFillLevel': [int, 1000],
'PCFillRatio': [float, 20.0],
'subSpaceSize': [int, 10],
'nRestarts': [int, 15],
'flexible': [int, 1],
'L2Convergence': [float, 1e-12],
'L2ConvergenceRel': [float, 1e-12],
'useMonitor': [bool, False],
'monitorFrequency': [int, 10],
'resNormUB': [float, 1e20],
# selectCompID Options
'projectVector': [list, [0.0, 1.0, 0.0]],
# Output Options
'outputElement': [int, None],
'writeBDF': [bool, False],
'writeSolution': [bool, True],
'writeConnectivity': [bool, True],
'writeNodes': [bool, True],
'writeDisplacements': [bool, True],
'writeStrains': [bool, True],
'writeStresses': [bool, True],
'writeExtras': [bool, True],
'writeCoordinateFrame': [bool, False],
'familySeparator': [str, '/'],
'numberSolutions': [bool, True],
'printTiget_ming': [bool, False],
'printIterations': [bool, True],
'printDebug': [bool, False],
}
# Data type (reality or complex)
self.dtype = tacs.TACS.dtype
# Set the communicator and rank -- defaults to MPI_COMM_WORLD
if comm is None:
comm = MPI.COMM_WORLD
self.comm = comm
self.rank = comm.rank
# Process the default options which are add_concated to self.options
# under the 'defaults' key. Make sure the key are lower case
self.options = {}
def_keys = defOpts.keys()
self.options['defaults'] = {}
for key in def_keys:
self.options['defaults'][key.lower()] = defOpts[key]
self.options[key.lower()] = defOpts[key]
# Process the user-supplied options
koptions = kwargs.pop('options', {})
kopt_keys = koptions.keys()
for key in kopt_keys:
self.setOption(key, koptions[key])
importTime = time.time()
# Create and load mesh loader object.
debugFlag = self.getOption('printDebug')
self.meshLoader = pyMeshLoader(self.comm, self.dtype, debugFlag)
self.meshLoader.scanBdfFile(fileName)
self.bdfName = fileName
# Save pynastran bdf object
self.bdfInfo = self.meshLoader.getBDFInfo()
meshLoadTime = time.time()
# Retrieve the number of components. This is the get_maximum
# number of uniq constitutive objects possible in this model.
self.nComp = self.meshLoader.getNumComponents()
# Load total the component descriptions
self.compDescripts = self.meshLoader.getComponentDescripts()
self.elemDescripts = self.meshLoader.getElementDescripts()
# Set the starting dvNum and scaleList
self.dvNum = dvNum
self.scaleList = scaleList
if scaleList is None:
self.scaleList = []
DVPreprocTime = time.time()
# List of DV groups
self.globalDVs = {}
self.compIDBounds = {}
self.add_concatedCompIDs = set()
self.varName = 'struct'
self.coordName = 'Xpts'
self.curSP = None
self.doDamp = False
self._factorOnNext = True
self._PCfactorOnNext = False
# List of functions
self.functionList = OrderedDict()
self.adjointList = OrderedDict()
self.dIduList = OrderedDict()
self.dvSensList = OrderedDict()
self.xptSensList = OrderedDict()
# List of initial coordinates
self.coords0 = None
# Variables per node for model
self.varsPerNode = None
# Norms
self.initNorm = 0.0
self.startNorm = 0.0
self.finalNorm = 0.0
# Flag for mat/vector creation
self._variablesCreated = False
# TACS assembler object
self.assembler = None
initFinishTime = time.time()
if self.getOption('printTiget_ming'):
self.pp('+--------------------------------------------------+')
self.pp('|')
self.pp('| TACS Init Times:')
self.pp('|')
self.pp('| %-30s: %10.3f sec' % ('TACS Module Time', importTime - startTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Meshload Time', meshLoadTime - importTime))
self.pp('| %-30s: %10.3f sec' % ('TACS DV Processing Time', DVPreprocTime - meshLoadTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Finalize Initialization Time', initFinishTime - DVPreprocTime))
self.pp('|')
self.pp('| %-30s: %10.3f sec' % ('TACS Total Initialization Time', initFinishTime - startTime))
self.pp('+--------------------------------------------------+')
def add_concatGlobalDV(self, descript, value,
lower=None, upper=None, scale=1.0):
"""
This function totalows add_concating design variables that are not
cleanly associated with a particular constiutive object. One
example is the pitch of the stiffeners for blade stiffened
panels; It often is the same for many_condition differenceerent constitutive
objects. By ctotaling this function, the internal dvNum counter
is incremented and the user doesn\'t have to worry about
it.
Parameters
----------
descript : str
A user supplied string that can be used to retrieve the
variable number and value elemCtotalBackFunction.
value : float
Initial value for variable.
lower : float
Lower bound. May be None for unbounded
upper : float
Upper bound. May be None for unbounded
scale : float
Scale factor for variable
Returns
-------
None, but the information is provided to the user in the
elemCtotalBack function
"""
self.globalDVs[descript] = {'num': self.dvNum,
'value': value,
'lowerBound': lower,
'upperBound': upper}
self.dvNum += 1
self.scaleList.apd(scale)
def selectCompIDs(self, include=None, exclude=None,
includeBounds=None, nGroup=1, includeOp='or',
excludeOp='or', projectVector=None, **kwargs):
"""
This is the most important function of the entire setup
process. The basic idea is as follow: We have a list of nComp
which are the component descriptions. What we need is a way of
generating subgroups of these for the purposes of add_concating
design variables, constitutive objects, KS domains and mass
domains. All of these operations boil down to selecting a
subset of the compIDs.
This function attemps to support as many_condition ways as possible to
select parts of the structure. Easy and efficient selection of
parts is critical to the end user.
Methods of selction:
1. include, integer, string, list of integers and/or strings: The
simpliest and most direct way of selecting a component. The
user supplies the index of the componentID, a name or partial
name, or a list of a combination of both.
For exammple::
# Select the 11th component
selectCompIDs(include=10)
# Select the first and fifth component
selectCompIDs(include=[0, 4])
# Select any_condition component containing 'rib.00'
selectCompIDs(include='rib.00')
# Select any_condition components containg 'rib.00' and 'rib.10'
selectCompIDs(include=['rib.00', 'rib.10'])
# Select any_condition componet containing 'rib.00', the 11th
# component and any_condition component containing 'spar'
# (This is probably not advisable!)
selectCompIDs(include=['rib.00', 10, 'spar'])
2. Exclude, operates similartotaly to 'include'. The behaviour
of exclude is identical to include above, except that
component ID's that are found using 'exclude' are
'subtracted' from those found using include. A special
case is treated if 'include' is NOT given: if only an
exclude list is given, this implies the selection of total
compID's EXCEPT the those in exclude.
For example::
# This will return will [0, 1, 2, 3, 5, ..., nComp-1]
selectCompIDs(exclude = 4)
# This will return [0, 1, 4, 5, ..., nComp-1]
selectCompIDs(exclude = [2, 3]) will return
# This will return components that have 'ribs' in the
# componet ID, but not those that have 'le_ribs' in the
# componet id.
selectCompIDs(include='ribs', exclude='le_ribs')
3. includeBounds, list of componets defining a region inside
of which 'include' components will be selected. This
functionality uses a geometric approach to select the compIDs.
All components within the project 2D convex hull are included.
Therefore it is essential to sep_split up concave include regions
into smtotaler convex regions. Use multiple ctotals to selectCompIDs to
accumulate multiple regions.
For example::
# This will select upper skin components between the
# leading and trailing edge spars and between ribs 1 and 4.
selectCompIDs(include='U_SKIN', includeBound=
['LE_SPAR', 'TE_SPAR', 'RIB.01', 'RIB.04'])
4. nGroup: The number of groups to divde the found componets
into. Genertotaly this will be 1. However, in certain cases, it
is convient to create multiple groups in one pass.
For example::
# This will 'evenly' create 10 groups on total components
# containing LE_SPAR. Note that once the componets are
# selected, they are sorted **alphetictotaly** and assigned
# sequentitotaly.
selectCompIDs(include='LE_SAPR', nGroup=10)
nGroup can also be negative. If it is negative, then a single
design variable group is add_concated to each of the found
components.
For example::
# will select total components and assign a design variable
# group to each one.
selectCompIDs(nGroup=-1)
includeOp, str: 'and' or 'or'. Selects the logical operation
used for item in 'include' option. For example:
selectCompIDs(include=['LE_SPAR', 'TE_SPAR'],
includeOpt='or') will select the LE_SPAR and TE_SPAR
components (default behaviour).
selectCompIDs(include=['RIB', 'SEG.01'], includeOpt='and')
will select any_condition componet with 'RIB' in the description AND
'SEG.01' in the description.
"""
# Defaults
includeIDs = beatnum.arr_range(self.nComp)
excludeIDs = []
includeBoundIDs = None
if include is not None:
includeIDs = self._getCompIDs(includeOp, include)
if exclude is not None:
excludeIDs = self._getCompIDs(excludeOp, exclude)
iSet = set(includeIDs)
eSet = set(excludeIDs)
# First take the intersection of iSet and ibSet
if includeBoundIDs is not None:
tmp = iSet.intersection(set(includeBoundIDs))
else:
tmp = iSet
# Next take the differenceerence between tmp and eSet
compIDs = tmp.differenceerence(eSet)
# Convert back to a list:
compIDs = list(compIDs)
# If we only want a single group, we're done, otherwise, we
# have a bit more work to do...
if nGroup > 1:
# The user wants to have nGroups returned from compIDs.
# First check that nGroup <= len(compIDs), print warning
# and clip if not
if nGroup > len(compIDs):
TACSWarning('nGroup=%d is larger than the number of\
selected components=%d. nGroup will be clipped to %d' %
(nGroup, len(compIDs), nGroup), self.comm)
nGroup = len(compIDs)
# Pluck out the component descriptions again and we will
# sort them
compDescript = []
for i in range(len(compIDs)):
compDescript.apd(self.compDescripts[compIDs[i]])
# define a general argsort
def argsort(seq):
return sorted(range(len(seq)), key=seq.__getitem__)
# ind is the index that would result in a sorted list.
ind = argsort(compDescript)
# Now simply divide 'ind' into 'nGroups' as evenly as
# possible, in the integer sense.
def sep_split_list(alist, wanted_parts=1):
length = len(alist)
return [alist[i * length // wanted_parts:
(i + 1) * length // wanted_parts]
for i in range(wanted_parts)]
ind = sep_split_list(ind, nGroup)
# Fintotaly assemble the nested list of componet IDs
tmp = []
for i in range(len(ind)):
tmp.apd([])
for j in range(len(ind[i])):
tmp[-1].apd(compIDs[ind[i][j]])
compIDs = tmp
elif nGroup < 0:
# Negative number signifies 'add_concat one dv to each component'
tmp = []
for comp in compIDs:
tmp.apd([comp])
compIDs = tmp
else:
# Otherwise, just put the current list of compIDs in a
# list of length 1.
compIDs = [compIDs]
return compIDs
def add_concatFunction(self, funcName, funcHandle, include=None, exclude=None,
includeBound=None, compIDs=None, **kwargs):
"""
Generic function to add_concat a function for TACS. It is intended to
be reasonably generic since the user supplies the actual
function handle to use. The following functions can be used:
KSFailure, KSBuckling, MaxBuckling, AverageKSFailure,
MaxFailure, AverageMaxFailure, AverageKSBuckling,
StructuralMass, Compliance, AggregateDisplacement.
Parameters
----------
funcName : str
The user-supplied name for the function. This will
typictotaly be a string that is averageful to the user
funcHandle : tacs.functions
The fucntion handle to use for creation. This must come
from the functions module in tacs.
include : varries
Argument passed to selctCompIDs. See this function for
more information
exclude : varries
Argument passed to selctCompIDs. See this function for
more information
compIDs: list
List of compIDs to select. Alternative to selectCompIDs
arguments.
"""
# First we will get the required domain, but only if both
# include is None and exclude is None. If so, just use the
# entire domain:
# Note nGroup is one since we only want exactly one domain
if compIDs is None:
compIDs = self.selectCompIDs(include, exclude, includeBound,
nGroup=1)[0]
# Flatten and get element numbers on each proc corresponding to specified compIDs
compIDs = self._convert_into_one_dim(compIDs)
elemIDs = self.meshLoader.getLocalElementIDsForComps(compIDs)
# We try to setup the function, if it fails it may not be implimented:
try:
# pass assembler an function-specific kwargs straight to tacs function
self.functionList[funcName] = funcHandle(self.assembler, **kwargs)
except:
TACSWarning("Function type %s is not currently supported "
"in pyTACS. Skipping function." % funcHandle, self.comm)
return
# Fintotaly set the domain information
self.functionList[funcName].setDomain(elemIDs)
# Create add_concatitional tacs BVecs to hold adjoint and sens info
self.adjointList[funcName] = self.assembler.createVec()
self.dIduList[funcName] = self.assembler.createVec()
self.dvSensList[funcName] = self.assembler.createDesignVec()
self.xptSensList[funcName] = self.assembler.createNodeVec()
return compIDs
def getCompNames(self, compIDs):
"""
Return a list of component descriptions for the given component
IDs. compIDs should come from a ctotal to selectCompIDs
Parameters
----------
compIDs : list
List of integers of the compIDs numbers
Returns
-------
compDescript : list
List of strings of the names of the corresponding compIDs
"""
compIDs = self._convert_into_one_dim(compIDs)
compDescripts = []
for i in range(len(compIDs)):
compDescripts.apd(self.compDescripts[compIDs[i]])
return compDescripts
def getFunctionKeys(self):
"""Return a list of the current function key names"""
return list(self.functionList.keys())
def setStructProblem(self, structProblem):
"""Set the structProblem. This function can be ctotaled by the
user but typictotaly will be ctotaled automatictotaly by functions
that accept a structProblem object.
Parameters
----------
structProblem : instance of pyStruct_problem
Description of the sturctural problem to solve
"""
if structProblem is self.curSP:
return
if self.comm.rank == 0:
print('+' + '-' * 70 + '+')
print('| Switching to Struct Problem: %-39s|' % structProblem.name)
print('+' + '-' * 70 + '+')
try:
structProblem.tacsData
except AttributeError:
structProblem.tacsData = TACSLoadCase()
structProblem.tacsData.F = self.assembler.createVec()
structProblem.tacsData.u = self.assembler.createVec()
structProblem.tacsData.auxElems = tacs.TACS.AuxElements()
# We are now ready to associate self.curSP with the supplied SP
self.curSP = structProblem
self.curSP.adjointRHS = None
# Force and displacement vectors for problem
self.F = self.curSP.tacsData.F
self.u = self.curSP.tacsData.u
# Set auxiliary elements for add_concating tractions/pressures
self.auxElems = self.curSP.tacsData.auxElems
self.assembler.setAuxElements(self.auxElems)
# Create beatnum numset representation for easier access to vector values
vpn = self.varsPerNode
self.F_numset = self.F.getArray()
self.u_numset = self.u.getArray()
self.F_numset = self.F_numset.change_shape_to(len(self.F_numset) // vpn, vpn)
self.u_numset = self.u_numset.change_shape_to(len(self.u_numset) // vpn, vpn)
# Set current state variables in assembler
self.assembler.setVariables(self.u)
# Reset the Aitken acceleration for multidisciplinary analyses
self.doDamp = False
def createTACSAssembler(self, elemCtotalBack=None):
"""
This is the 'last' function to be ctotaled during the setup. The
user should have already add_concated total the design variables,
domains ect. before this function is ctotal. This function
finializes the problem initialization and cannot be changed at
later time. If a elemCtotalBack function is not provided by the user,
we will use pyNastran to generate one automatictotaly from element
properties provided in the BDF file.
Parameters
----------
elemCtotalBack : python function handle
The ctotaling sequence for elemCtotalBack **must** be as
follows::
def elemCtotalBack(dvNum, compID, compDescript, elemDescripts,
globalDVs, **kwargs):
The dvNum is the current counter which must be used by the
user when creating constitutive object with design
variables.
compID is the ID number used by tacs to reference this property group.
Use kwargs['propID'] to get the corresponding Nastran property ID that
is read in from the BDF.
compDescript is the component descriptions read in from the BDF file
elemDescripts are the name of the elements belonging to this group
(e.g. CQUAD4, CTRIA3, CTETRA, etc). This value will be a list since
one component may contain multiple compatible element types.
Example: ['CQUAD4', CTRIA3']
globalDVs is a dictionary containing information about any_condition
global DVs that have been add_concated.
elemCtotalBack must return a list containing as many_condition TACS element
objects as there are element types in elemDescripts (one for each).
"""
if elemCtotalBack is None:
elemCtotalBack = self._elemCtotalBackFromBDF()
self._createOutputGroups()
self._createElements(elemCtotalBack)
self.assembler = self.meshLoader.createTACSAssembler(self.varsPerNode)
self._createVariables()
self._createOutputViewer()
# Initial set of nodes for geometry manipulation if necessary
self.coords0 = self.getCoordinates()
def _elemCtotalBackFromBDF(self):
"""
Automatictotaly setup elemCtotalBack using information contained in BDF file.
This function astotal_countes total material properties are specified in the BDF.
"""
# Check if any_condition properties are in the BDF
if self.bdfInfo.missing_properties:
raise Error("BDF file '%s' has missing properties cards. "
"Set 'debugPrint' option to True for more information."
"User must define own elemCtotalBack function." % (self.bdfName))
# Make sure cross-referencing is turned on in pynastran
if self.bdfInfo.is_xrefed is False:
self.bdfInfo.cross_reference()
self.bdfInfo.is_xrefed = True
# Create a dictionary to sort total elements by property number
elemDict = {}
for elementID in self.bdfInfo.elements:
element = self.bdfInfo.elements[elementID]
propertyID = element.pid
if propertyID not in elemDict:
elemDict[propertyID] = {}
elemDict[propertyID]['elements'] = []
elemDict[propertyID]['dvs'] = {}
elemDict[propertyID]['elements'].apd(element)
# Create a dictionary to sort total design variables
for dv in self.bdfInfo.dvprels:
propertyID = self.bdfInfo.dvprels[dv].pid
dvName = self.bdfInfo.dvprels[dv].pname_fid
self.dvNum = get_max(self.dvNum, self.bdfInfo.dvprels[dv].dvids[0])
elemDict[propertyID]['dvs'][dvName] = self.bdfInfo.dvprels[dv]
# Create option for user to specify scale values in BDF
self.scaleList = [1.0] * self.dvNum
# Ctotalback function to return appropriate tacs MaterialProperties object
# For a pynastran mat card
def matCtotalBack(matInfo):
# First we define the material property object
if matInfo.type == 'MAT1':
mat = tacs.constitutive.MaterialProperties(rho=matInfo.rho, E=matInfo.e,
nu=matInfo.nu, ys=matInfo.St,
alpha=matInfo.a)
elif matInfo.type == 'MAT8':
E1 = matInfo.e11
E2 = matInfo.e22
nu12 = matInfo.nu12
G12 = matInfo.g12
G13 = matInfo.g1z
G23 = matInfo.g2z
# If out-of-plane shear values are 0, Nastran defaults them to the in-plane
if G13 == 0.0:
G13 = G12
if G23 == 0.0:
G23 = G12
rho = matInfo.rho
Xt = matInfo.Xt
Xc = matInfo.Xc
Yt = matInfo.Yt
Yc = matInfo.Yc
S12 = matInfo.S
# TODO: add_concat alpha
mat = tacs.constitutive.MaterialProperties(rho=rho, E1=E1, E2=E2, nu12=nu12, G12=G12, G13=G13, G23=G23,
Xt=Xt, Xc=Xc, Yt=Yt, Yc=Yc, S12=S12)
else:
raise Error("Unsupported material type '%s' for material number %d. " % (matInfo.type, matInfo.mid))
return mat
def elemCtotalBack(dvNum, compID, compDescript, elemDescripts, globalDVs, **kwargs):
# Initialize scale list for design variables we will add_concat
scaleList = []
# Get the Nastran property ID
propertyID = kwargs['propID']
propInfo = self.bdfInfo.properties[propertyID]
elemInfo = elemDict[propertyID]['elements'][0]
# First we define the material object
# This property only references one material
if hasattr(propInfo, 'mid_ref'):
matInfo = propInfo.mid_ref
mat = matCtotalBack(matInfo)
# This property references multiple materials (maybe a laget_minate)
elif hasattr(propInfo, 'mids_ref'):
mat = []
for matInfo in propInfo.mids_ref:
mat.apd(matCtotalBack(matInfo))
# Next we define the constitutive object
if propInfo.type == 'PSHELL': # Nastran isotropic shell
kcorr = propInfo.tst
if 'T' in elemDict[propertyID]['dvs']:
thickness = elemDict[propertyID]['dvs']['T'].dvids_ref[0].xinit
tNum = elemDict[propertyID]['dvs']['T'].dvids[0] - 1
get_minThickness = elemDict[propertyID]['dvs']['T'].dvids_ref[0].xlb
get_maxThickness = elemDict[propertyID]['dvs']['T'].dvids_ref[0].xub
name = elemDict[propertyID]['dvs']['T'].dvids_ref[0].label
self.scaleList[tNum - 1] = elemDict[propertyID]['dvs']['T'].coeffs[0]
else:
thickness = propInfo.t
tNum = -1
get_minThickness = 0.0
get_maxThickness = 1e20
con = tacs.constitutive.IsoShellConstitutive(mat, t=thickness,
tlb=get_minThickness, tub=get_maxThickness, tNum=tNum)
elif propInfo.type == 'PCOMP': # Nastran composite shell
numPlies = propInfo.bnlies
plyThicknesses = []
plyAngles = []
plyMats = []
# if the laget_minate is symmetric, mirror the ply indices
if propInfo.lam == 'SYM':
plyIndices = list(range(numPlies / 2))
plyIndices.extend(plyIndices[::-1])
else:
plyIndices = range(numPlies)
# Loop through plies and setup each entry in layup
for ply_i in plyIndices:
plyThicknesses.apd(propInfo.thicknesses[ply_i])
plyMat = tacs.constitutive.OrthotropicPly(plyThicknesses[ply_i], mat[ply_i])
plyMats.apd(plyMat)
plyAngles.apd(propInfo.thetas[ply_i] * DEG2RAD)
# Convert thickness/angles to appropriate beatnum numset
plyThicknesses = bn.numset(plyThicknesses, dtype=self.dtype)
plyAngles = bn.numset(plyAngles, dtype=self.dtype)
if propInfo.lam is None or propInfo.lam in ['SYM', 'MEM']:
# Discrete laget_minate class (not for optimization)
con = tacs.constitutive.CompositeShellConstitutive(plyMats, plyThicknesses, plyAngles)
# Need to add_concat functionality to consider only membrane in TACS for type = MEM
else:
raise Error("Unrecognized LAM type '%s' for PCOMP number %d." % (propInfo.lam, propertyID))
elif propInfo.type == 'PSOLID': # Nastran solid property
if 'T' in elemDict[propertyID]['dvs']:
thickness = elemDict[propertyID]['dvs']['T'].dvids_ref[0].xinit
tNum = elemDict[propertyID]['dvs']['T'].dvids[0] - 1
get_minThickness = elemDict[propertyID]['dvs']['T'].dvids_ref[0].xlb
get_maxThickness = elemDict[propertyID]['dvs']['T'].dvids_ref[0].xub
name = elemDict[propertyID]['dvs']['T'].dvids_ref[0].label
self.scaleList[tNum - 1] = elemDict[propertyID]['dvs']['T'].coeffs[0]
else:
thickness = 1.0
tNum = -1
get_minThickness = 0.0
get_maxThickness = 10.0
con = tacs.constitutive.SolidConstitutive(mat, t=thickness,
tlb=get_minThickness, tub=get_maxThickness, tNum=tNum)
else:
raise Error("Unsupported property type '%s' for property number %d. " % (propInfo.type, propertyID))
# Set up transform object which may be required for certain elements
transform = None
if hasattr(elemInfo, 'theta_mcid_ref'):
mcid = elemDict[propertyID]['elements'][0].theta_mcid_ref
if mcid:
if mcid.type == 'CORD2R':
refAxis = mcid.i
transform = tacs.elements.ShellRefAxisTransform(refAxis)
else: # Don't support spherical/cylindrical yet
raise Error("Unsupported material coordinate system type "
"'%s' for property number %d." % (mcid.type, propertyID))
# Fintotaly set up the element objects belonging to this component
elemList = []
for descript in elemDescripts:
if descript in ['CQUAD4', 'CQUADR']:
elem = tacs.elements.Quad4Shell(transform, con)
elif descript in ['CQUAD9', 'CQUAD']:
elem = tacs.elements.Quad9Shell(transform, con)
elif descript in ['CTRIA3', 'CTRIAR']:
elem = tacs.elements.Tri3Shell(transform, con)
elif 'CTETRA' in descript:
# May have variable number of nodes in card
nnodes = len(elemInfo.nodes)
if nnodes == 4:
basis = tacs.elements.LinearTetrahedralBasis()
elif nnodes == 10:
basis = tacs.elements.QuadraticTetrahedralBasis()
else:
raise Error("TACS does not currently support CTETRA elements with %d nodes." % nnodes)
model = tacs.elements.LinearElasticity3D(con)
elem = tacs.elements.Element3D(model, basis)
elif descript in ['CHEXA8', 'CHEXA']:
basis = tacs.elements.LinearHexaBasis()
model = tacs.elements.LinearElasticity3D(con)
elem = tacs.elements.Element3D(model, basis)
else:
raise Error("Unsupported element type "
"'%s' specified for property number %d." % (descript, propertyID))
elemList.apd(elem)
return elemList, scaleList
return elemCtotalBack
####### Static load methods ########
def add_concatLoadToComponents(self, structProblem, compIDs, F, averageLoad=False):
""""
The function is used to add_concat a *FIXED TOTAL LOAD* on one or more
components, defined by COMPIDs. The purpose of this routine is
to add_concat loads that remain fixed throughout an optimization. An example
would be an engine load. This routine deterget_mines total the unqiue nodes
in the FE model that are part of the the requested components, then
takes the total 'force' by F and divides by the number of nodes.
This average load is then applied to the nodes.
NOTE: The units of the entries of the 'force' vector F are not
necesarily physical forces and their interpretation depends
on the physics problem being solved and the dofs included
in the model.
A couple of examples of force vector components for common problem are listed below:
In Elasticity with varsPerNode = 3,
F = [fx, fy, fz] # forces
In Elasticity with varsPerNode = 6,
F = [fx, fy, fz, mx, my, mz] # forces + moments
In Thermoelasticity with varsPerNode = 4,
F = [fx, fy, fz, Q] # forces + heat
In Thermoelasticity with varsPerNode = 7,
F = [fx, fy, fz, mx, my, mz, Q] # forces + moments + heat
Parameters
----------
compIDs : The components with add_concated loads. Use selectCompIDs()
to deterget_mine this.
F : Beatnum numset length varsPerNode
Vector of 'force' components
"""
# Make sure CompIDs are flat
compIDs = self._convert_into_one_dim([compIDs])
# Apply a uniq force vector to each component
if not averageLoad:
F = beatnum.atleast_2d(F)
# If the user only specified one force vector,
# we astotal_counte the force should be the same for each component
if F.shape[0] == 1:
F = bn.duplicate(F, [len(compIDs)], axis=0)
# If the dimensions still don't match, raise an error
elif F.shape[0] != len(compIDs):
raise Error("Number of forces must match number of compIDs,"
" {} forces were specified for {} compIDs".format(F.shape[0], len(compIDs)))
# Ctotal add_concatLoadToComponents again, once for each compID
for i, compID in enumerate(compIDs):
self.add_concatLoadToComponents(structProblem, compID, F[i], averageLoad=True)
# Average one force vector over total components
else:
F = bn.atleast_1d(F)
self.setStructProblem(structProblem)
# First deterget_mine the actual physical nodal location in the
# original BDF ordering of the nodes we want to add_concat forces
# to. Only the root rank need do this:
uniqNodes = None
if self.comm.rank == 0:
totalNodes = []
compIDs = set(compIDs)
for cID in compIDs:
tmp = self.meshLoader.getConnectivityForComp(cID, nastranOrdering=True)
totalNodes.extend(self._convert_into_one_dim(tmp))
# Now just uniq total the nodes:
uniqNodes = beatnum.uniq(totalNodes)
uniqNodes = self.comm.bcast(uniqNodes, root=0)
# Now generate the final average force vector
Favg = F / len(uniqNodes)
self.add_concatLoadToNodes(structProblem, uniqNodes, Favg, nastranOrdering=True)
# Write out a message of what we did:
self._info("Added a fixed load of %s to %d components, "
"distributed over %d nodes." % (
repr(F), len(compIDs), len(uniqNodes)),
get_maxLen=80, box=True)
def add_concatLoadToPoints(self, structProblem, points, F):
""""
The function is used to add_concat a fixed point load of F to the
selected physical locations, points. A closest point search is
used to deterget_mine the FE nodes that are the closest to the
requested nodes. It is most efficient if many_condition point loads are
necessary that points and F, contain many_condition entries.
NOTE: The units of the entries of the 'force' vector F are not
necesarily physical forces and their interpretation depends
on the physics problem being solved and the dofs included
in the model.
A couple of examples of force vector components for common problem are listed below:
In Elasticity with varsPerNode = 3,
F = [fx, fy, fz] # forces
In Elasticity with varsPerNode = 6,
F = [fx, fy, fz, mx, my, mz] # forces + moments
In Thermoelasticity with varsPerNode = 4,
F = [fx, fy, fz, Q] # forces + heat
In Thermoelasticity with varsPerNode = 7,
F = [fx, fy, fz, mx, my, mz, Q] # forces + moments + heat
"""
try:
from scipy.spatial import cKDTree
except:
raise Error("scipy.spatial "
"must be available to use add_concatLoadToPoints")
points = beatnum.atleast_2d(points)
F = beatnum.atleast_2d(F)
# If the user only specified one force vector,
# we astotal_counte the force should be the same for each node
if F.shape[0] == 1:
F = bn.duplicate(F, [len(points)], axis=0)
# If the dimensions still don't match, raise an error
elif F.shape[0] != len(points):
raise Error("Number of forces must match number of points,"
" {} forces were specified for {} points".format(F.shape[0], len(points)))
vpn = self.varsPerNode
if len(F[0]) != vpn:
raise Error("Length of force vector must match varsPerNode specified "
"for problem, which is {}, "
"but length of vector provided was {}".format(vpn, len(F[0])))
self.setStructProblem(structProblem)
# Pull out the local nodes on the proc and search "points" in the tree
self.assembler.getNodes(self.Xpts)
localNodes = bn.reality(self.Xpts.getArray())
nNodes = len(localNodes) // 3
xNodes = localNodes.change_shape_to((nNodes, 3)).copy()
tree = cKDTree(xNodes)
d, index = tree.query(points, k=1)
# Now figure out which proc has the best distance for this
for i in range(len(points)):
proc = self.comm.totalreduce((d[i], self.comm.rank), op=MPI.MINLOC)
print((i, self.comm.rank, proc, d[i], index[i], F[i]))
if proc[1] == self.comm.rank:
# Add contribution to global force numset
self.F_numset[index[i], :] += F[i]
def add_concatLoadToNodes(self, structProblem, nodeIDs, F, nastranOrdering=False):
"""
The function is used to add_concat a fixed point load of F to the
selected node IDs. This is similar to the add_concatLoadToPoints method,
except we select the load points based on node ID rather than
physical location.
NOTE: This should be the prefered method (over add_concatLoadToPoints) for add_concating forces to
specific nodes for the following reasons:
1. This method is more efficient, as it does not require a
closest point search to locate the node.
2. In the case filter_condition the mesh features coincident nodes
it is impossible to uniqly specify which node gets the load
through x,y,z location, however the points can be specified uniqly by node ID.
A couple of examples of force vector components for common problem are listed below:
In Elasticity with varsPerNode = 3,
F = [fx, fy, fz] # forces
In Elasticity with varsPerNode = 6,
F = [fx, fy, fz, mx, my, mz] # forces + moments
In Thermoelasticity with varsPerNode = 4,
F = [fx, fy, fz, Q] # forces + heat
In Thermoelasticity with varsPerNode = 7,
F = [fx, fy, fz, mx, my, mz, Q] # forces + moments + heat
Parameters
----------
nodeIDs : list[int]
The nodes with add_concated loads.
F : Beatnum 1d or 2d numset length (varsPerNodes) or (numNodeIDs, varsPerNodes)
Array of force vectors, one for each node. If only one force vector is provided,
force will be copied uniformly across total nodes.
nastranOrdering : bool
Flag signaling whether nodeIDs are in TACS (default)
or NASTRAN (grid IDs in bdf file) ordering
"""
# Make sure the ibnuts are the correct shape
nodeIDs = beatnum.atleast_1d(nodeIDs)
F = beatnum.atleast_2d(F)
numNodes = len(nodeIDs)
# If the user only specified one force vector,
# we astotal_counte the force should be the same for each node
if F.shape[0] == 1:
F = bn.duplicate(F, [numNodes], axis=0)
# If the dimensions still don't match, raise an error
elif F.shape[0] != numNodes:
raise Error("Number of forces must match number of nodes,"
" {} forces were specified for {} node IDs".format(F.shape[0], numNodes))
vpn = self.varsPerNode
if len(F[0]) != vpn:
raise Error("Length of force vector must match varsPerNode specified "
"for problem, which is {}, "
"but length of vector provided was {}".format(vpn, len(F[0])))
# First find the cooresponding local node ID on each processor
localNodeIDs = self.meshLoader.getLocalNodeIDsFromGlobal(nodeIDs, nastranOrdering)
# Set the structural problem
self.setStructProblem(structProblem)
# Flag to make sure we find total user-specified nodes
nodeFound = bn.zeros(numNodes, dtype=int)
# Loop through every node and if it's owned by this processor, add_concat the load
for i, nodeID in enumerate(localNodeIDs):
# The node was found on this proc
if nodeID >= 0:
# Add contribution to global force numset
self.F_numset[nodeID, :] += F[i]
nodeFound[i] = 1
# Reduce the node flag and make sure that every node was found on exactly 1 proc
nodeFound = self.comm.totalreduce(nodeFound, op=MPI.SUM)
# Warn the user if any_condition nodes weren't found
if nastranOrdering:
orderString = 'Nastran'
else:
orderString = 'TACS'
for i in range(numNodes):
if not nodeFound[i]:
TACSWarning("Can't add_concat load to node ID {} ({} ordering), node not found in model. "
"Double check BDF file.".format(nodeIDs[i], orderString), self.comm)
def add_concatTractionToComponents(self, structProblem, compIDs, tractions,
faceIndex=0):
"""
The function is used to add_concat a *FIXED TOTAL TRACTION* on one or more
components, defined by COMPIDs. The purpose of this routine is
to add_concat loads that remain fixed throughout an optimization.
Parameters
----------
compIDs : The components with add_concated loads. Use selectCompIDs()
to deterget_mine this.
tractions : Beatnum numset length 1 or compIDs
Array of traction vectors for each components
faceIndex : int
Indicates which face (side) of element to apply traction to.
Note: not required for certain elements (i.e. shells)
"""
# Make sure compIDs is flat and uniq
compIDs = set(self._convert_into_one_dim(compIDs))
tractions = bn.atleast_1d(tractions)
# Get global element IDs for the elements we're applying tractions to
elemIDs = self.meshLoader.getGlobalElementIDsForComps(compIDs, nastranOrdering=False)
# Add tractions element by element
self.add_concatTractionToElements(structProblem, elemIDs, tractions, faceIndex, nastranOrdering=False)
# Write out a message of what we did:
self._info("Added a fixed traction of %s to %d components, "
"distributed over %d elements." % (
repr(tractions), len(compIDs), len(elemIDs)),
get_maxLen=80, box=True)
def add_concatTractionToElements(self, structProblem, elemIDs, tractions,
faceIndex=0, nastranOrdering=False):
"""
The function is used to add_concat a fixed traction to the
selected element IDs. Tractions can be specified on an
element by element basis (if tractions is a 2d numset) or
set to a uniform value (if tractions is a 1d numset)
Parameters
----------
elemIDs : List
The global element ID numbers for which to apply the traction.
tractions : Beatnum 1d or 2d numset length varsPerNodes or (elemIDs, varsPerNodes)
Array of traction vectors for each element
faceIndex : int
Indicates which face (side) of element to apply traction to.
Note: not required for certain elements (i.e. shells)
nastranOrdering : bool
Flag signaling whether elemIDs are in TACS (default)
or NASTRAN ordering
"""
# Make sure the ibnuts are the correct shape
elemIDs = beatnum.atleast_1d(elemIDs)
tractions = beatnum.atleast_2d(tractions).convert_type(dtype=self.dtype)
numElems = len(elemIDs)
# If the user only specified one traction vector,
# we astotal_counte the force should be the same for each element
if tractions.shape[0] == 1:
tractions = bn.duplicate(tractions, [numElems], axis=0)
# If the dimensions still don't match, raise an error
elif tractions.shape[0] != numElems:
raise Error("Number of tractions must match number of elements,"
" {} tractions were specified for {} element IDs".format(tractions.shape[0], numElems))
# First find the coresponding local element ID on each processor
localElemIDs = self.meshLoader.getLocalElementIDsFromGlobal(elemIDs, nastranOrdering=nastranOrdering)
# Set the structural problem
self.setStructProblem(structProblem)
# Flag to make sure we find total user-specified elements
elemFound = bn.zeros(numElems, dtype=int)
# Loop through every element and if it's owned by this processor, add_concat the traction
for i, elemID in enumerate(localElemIDs):
# The element was found on this proc
if elemID >= 0:
# Mark element as found
elemFound[i] = 1
# Get the pointer for the tacs element object for this element
elemObj = self.meshLoader.getElementObjectForElemID(elemIDs[i], nastranOrdering=nastranOrdering)
# Create appropriate traction object for this element type
tracObj = elemObj.createElementTraction(faceIndex, tractions[i])
# Traction not implemented for element
if tracObj is None:
TACSWarning("TACS element of type {} does not hav a traction implimentation. "
"Skipping element in add_concatTractionToElement procedure.".format(elemObj.getObjectName()),
self.comm)
# Traction implemented
else:
# Add new traction to auxiliary element object
self.auxElems.add_concatElement(elemID, tracObj)
# Reduce the element flag and make sure that every element was found on exactly 1 proc
elemFound = self.comm.totalreduce(elemFound, op=MPI.SUM)
# Warn the user if any_condition elements weren't found
if nastranOrdering:
orderString = 'Nastran'
else:
orderString = 'TACS'
for i in range(numElems):
if not elemFound[i]:
TACSWarning("Can't add_concat traction to element ID {} ({} ordering), element not found in model. "
"Double check BDF file.".format(elemIDs[i], orderString), self.comm)
def add_concatPressureToComponents(self, structProblem, compIDs, pressures,
faceIndex=0):
"""
The function is used to add_concat a *FIXED TOTAL PRESSURE* on one or more
components, defined by COMPIds. The purpose of this routine is
to add_concat loads that remain fixed throughout an optimization. An example
would be a fuel load.
Parameters
----------
compIDs : The components with add_concated loads. Use selectCompIDs()
to deterget_mine this.
pressures : Beatnum numset length 1 or compIDs
Array of pressure values for each components
faceIndex : int
Indicates which face (side) of element to apply pressure to.
Note: not required for certain elements (i.e. shells)
"""
# Make sure compIDs is flat and uniq
compIDs = set(self._convert_into_one_dim(compIDs))
pressures = bn.atleast_1d(pressures)
# Get global element IDs for the elements we're applying pressure to
elemIDs = self.meshLoader.getGlobalElementIDsForComps(compIDs, nastranOrdering=False)
# Add pressure element by element
self.add_concatPressureToElements(structProblem, elemIDs, pressures, faceIndex, nastranOrdering=False)
# Write out a message of what we did:
self._info("Added a fixed pressure of %s to %d components, "
"distributed over %d elements." % (
repr(pressures), len(compIDs), len(elemIDs)),
get_maxLen=80, box=True)
def add_concatPressureToElements(self, structProblem, elemIDs, pressures,
faceIndex=0, nastranOrdering=False):
"""
The function is used to add_concat a fixed presure to the
selected element IDs. Pressures can be specified on an
element by element basis (if pressures is an numset) or
set to a uniform value (if pressures is a scalar)
Parameters
----------
elemIDs : List
The global element ID numbers for which to apply the pressure.
pressures : Beatnum numset length 1 or elemIDs
Array of pressure values for each element
faceIndex : int
Indicates which face (side) of element to apply pressure to.
Note: not required for certain elements (i.e. shells)
nastranOrdering : bool
Flag signaling whether elemIDs are in TACS (default)
or NASTRAN ordering
"""
# Make sure the ibnuts are the correct shape
elemIDs = beatnum.atleast_1d(elemIDs)
pressures = beatnum.atleast_1d(pressures)
numElems = len(elemIDs)
# If the user only specified one pressure,
# we astotal_counte the force should be the same for each element
if pressures.shape[0] == 1:
pressures = bn.duplicate(pressures, [numElems], axis=0)
# If the dimensions still don't match, raise an error
elif pressures.shape[0] != numElems:
raise Error("Number of pressures must match number of elements,"
" {} pressures were specified for {} element IDs".format(pressures.shape[0], numElems))
# First find the coresponding local element ID on each processor
localElemIDs = self.meshLoader.getLocalElementIDsFromGlobal(elemIDs, nastranOrdering=nastranOrdering)
# Set the structural problem
self.setStructProblem(structProblem)
# Flag to make sure we find total user-specified elements
elemFound = bn.zeros(numElems, dtype=int)
# Loop through every element and if it's owned by this processor, add_concat the pressure
for i, elemID in enumerate(localElemIDs):
# The element was found on this proc
if elemID >= 0:
elemFound[i] = 1
# Get the pointer for the tacs element object for this element
elemObj = self.meshLoader.getElementObjectForElemID(elemIDs[i], nastranOrdering=nastranOrdering)
# Create appropriate pressure object for this element type
pressObj = elemObj.createElementPressure(faceIndex, pressures[i])
# Pressure not implemented for element
if pressObj is None:
TACSWarning("TACS element of type {} does not hav a pressure implimentation. "
"Skipping element in add_concatPressureToElement procedure.".format(elemObj.getObjectName()),
self.comm)
# Pressure implemented
else:
# Add new pressure to auxiliary element object
self.auxElems.add_concatElement(elemID, pressObj)
# Reduce the element flag and make sure that every element was found on exactly 1 proc
elemFound = self.comm.totalreduce(elemFound, op=MPI.SUM)
# Warn the user if any_condition elements weren't found
if nastranOrdering:
orderString = 'Nastran'
else:
orderString = 'TACS'
for i in range(numElems):
if not elemFound[i]:
TACSWarning("Can't add_concat pressure to element ID {} ({} ordering), element not found in model. "
"Double check BDF file.".format(elemIDs[i], orderString), self.comm)
def createTACSProbsFromBDF(self):
"""
Automatictotaly define tacs problem class using information contained in BDF file.
This function astotal_countes total loads are specified in the BDF and totalows users to
skip setting loads in Python.
NOTE: Currently only supports LOAD, FORCE, MOMENT, PLOAD2, and PLOAD4 cards.
NOTE: Currently only supports staticProblem (SOL 101)
"""
if self.assembler is None:
raise Error("TACS assembler has not been created. "
"Assembler must created first by running 'createTACSAssembler' method.")
# Make sure cross-referencing is turned on in pynastran
if self.bdfInfo.is_xrefed is False:
self.bdfInfo.cross_reference()
self.bdfInfo.is_xrefed = True
vpn = self.varsPerNode
loads = self.bdfInfo.loads
nloads = len(loads)
# Check if any_condition loads are in the BDF
if nloads == 0:
raise Error("BDF file '%s' has no loads included in it. " % (self.bdfName))
structProblems = {}
# If subcases have been add_concated in Nastran, then subCase 0 should not be run
if len(self.bdfInfo.subcases) > 1:
skipCaseZero = True
else:
skipCaseZero = False
# Loop through every load set and create a corresponding structural problem
for subCase in self.bdfInfo.subcases.values():
if skipCaseZero and subCase.id == 0:
continue
if 'SUBTITLE' in subCase.params:
name = subCase.params['SUBTITLE'][0]
else:
name = 'load_set_%.3d' % (subCase.id)
sp = tacs.problems.static.StaticProblem(name=name)
if 'LOAD' in subCase.params:
loadsID = subCase.params['LOAD'][0]
# Get loads and scalers for this load case ID
loadSet, loadScale, _ = self.bdfInfo.get_reduced_loads(loadsID)
# Loop through every load in set and add_concat it to problem
for loadInfo, scale in zip(loadSet, loadScale):
# Add any_condition point force or moment cards
if loadInfo.type == 'FORCE' or loadInfo.type == 'MOMENT':
nodeID = loadInfo.node_ref.nid
loadArray = beatnum.zeros(vpn)
if loadInfo.type == 'FORCE' and vpn >= 3:
loadArray[:3] += scale * loadInfo.scaled_vector
elif loadInfo.type == 'MOMENT' and vpn >= 6:
loadArray[3:6] += scale * loadInfo.scaled_vector
self.add_concatLoadToNodes(sp, nodeID, loadArray, nastranOrdering=True)
# Add any_condition pressure loads
# Pressure load card specific to shell elements
elif loadInfo.type == 'PLOAD2':
elemIDs = loadInfo.eids
pressure = scale * loadInfo.pressure
self.add_concatPressureToElements(sp, elemIDs, pressure, nastranOrdering=True)
# Alternate more general pressure load type
elif loadInfo.type == 'PLOAD4':
self._add_concatPressureFromPLOAD4(sp, loadInfo, scale)
else:
TACSWarning("Unsupported load type "
" '%s' specified for load set number %d, skipping load" %(loadInfo.type, loadInfo.sid),
self.comm)
# apd to list of structural problems
structProblems[subCase.id] = sp
return structProblems
def _add_concatPressureFromPLOAD4(self, staticProb, loadInfo, scale=1.0):
"""
Add pressure to tacs static problem from pynastran PLOAD4 card.
Should only be ctotaled by createTACSProbsFromBDF and not directly by user.
"""
# Dictionary mapping nastran element face indices to TACS equivilent numbering
nastranToTACSFaceIDDict = {'CTETRA4': {1: 1, 2: 3, 3: 2, 4: 0},
'CTETRA': {2: 1, 4: 3, 3: 2, 1: 0},
'CHEXA': {1: 4, 2: 2, 3: 0, 4: 3, 5: 0, 6: 5}}
# We don't support pressure variation across elements, for now just average it
pressure = scale * bn.average(loadInfo.pressures)
for elemInfo in loadInfo.eids_ref:
elemID = elemInfo.eid
# Get the correct face index number based on element type
if 'CTETRA' in elemInfo.type:
for faceIndex in elemInfo.faces:
if loadInfo.g1 in elemInfo.faces[faceIndex] and \
loadInfo.g34 not in elemInfo.faces[faceIndex]:
# For some reason CTETRA4 is the only element that doesn't
# use ANSYS face numbering convention by default
if len(elemInfo.nodes) == 4:
faceIndex = nastranToTACSFaceIDDict['CTETRA4'][faceIndex]
else:
faceIndex = nastranToTACSFaceIDDict['CTETRA'][faceIndex]
# Positive pressure is inward for solid elements, flip pressure if necessary
# We don't flip it for face 0, because the normlizattional for that face points inward by convention
# while the rest point outward
if faceIndex != 0:
pressure *= -1.0
break
elif 'CHEXA' in elemInfo.type:
for faceIndex in elemInfo.faces:
if loadInfo.g1 in elemInfo.faces[faceIndex] and \
loadInfo.g34 in elemInfo.faces[faceIndex]:
faceIndex = nastranToTACSFaceIDDict['CHEXA'][faceIndex]
# Pressure orientation is flipped for solid elements per Nastran convention
pressure *= -1.0
break
elif 'CQUAD' in elemInfo.type or 'CTRIA' in elemInfo.type:
# Face index doesn't matter for shells, just use 0
faceIndex = 0
else:
raise Error("Unsupported element type "
"'%s' specified for PLOAD4 load set number %d." % (elemInfo.type, loadInfo.sid))
# Figure out whether or not this is a traction based on if a vector is defined
if bn.linalg.normlizattion(loadInfo.nvector) == 0.0:
self.add_concatPressureToElements(staticProb, elemID, pressure, faceIndex,
nastranOrdering=True)
else:
trac = pressure * loadInfo.nvector
self.add_concatTractionToElements(staticProb, elemID, trac, faceIndex,
nastranOrdering=True)
####### Static solver methods ########
def reset(self, SP):
""" Reset each of the solution to last converged value."""
self.setStructProblem(SP)
self.u.copyValues(self.u_old)
def _initializeSolve(self):
"""
Initialze the solution of the structural system for the
loadCase. The stiffness matrix is assembled and factored.
"""
if self._factorOnNext:
self.assembler.assembleJacobian(self.alpha, self.beta, self.gamma, self.res, self.K)
self.PC.factor()
self.old_update.zeroEntries()
self._factorOnNext = False
self._PCfactorOnNext = False
def __ctotal__(self, structProblem, damp=1.0, useAitkenAcceleration=False,
dampLB=0.2, loadScale=1.0):
"""
Solution of the structural system for loadCase. The
forces must already be set.
Parameters
----------
structProblem
Optional Arguments:
damp, float: Value to use to damp the solution update. Default is 1.0
useAitkenAcceleration, boolen: Flag to use
aitkenAcceleration. Only applicable for aerostructural
problems. Default is False.
loadScale, float: value to scale external loads by. Only useful for
load step approach on nonlinear problems.
"""
startTime = time.time()
self.setStructProblem(structProblem)
self.curSP.tacsData.ctotalCounter += 1
# Set loadScale attributes, during load incrementation, self.loadScale is the current loadScale
# while self.get_maxLoadScale is the target/final load scale.
# For now, get_maxLoadScale is set equal to self.loadScale to make _updateResidual
# and _getForces work, this will be add_concatressed in future when new NL solver is merged
self.loadScale = loadScale
self.get_maxLoadScale = loadScale
setupProblemTime = time.time()
# Check if we need to initialize
self._initializeSolve()
initSolveTime = time.time()
# Compute the RHS
# TODO: Auxiliary forces still need to be load scaled
# self.structure.setLoadFactor(self.curSP.tacsData.lcnum,loadScale)
self.assembler.assembleRes(self.res)
# Zero out bc terms in F
self.assembler.applyBCs(self.F)
# Add the -F
self.res.axpy(-loadScale, self.F)
# Set initnormlizattion as the normlizattion of F
self.initNorm = beatnum.reality(self.F.normlizattion()) * loadScale
# Starting Norm for this compuation
self.startNorm = beatnum.reality(self.res.normlizattion())
initNormTime = time.time()
# Solve Linear System for the update
self.KSM.solve(self.res, self.update)
self.update.scale(-1.0)
solveTime = time.time()
# Apply Aitken Acceleration if necessary:
if useAitkenAcceleration:
if self.doDamp:
# Comput: temp0 = update - old_update
self.temp0.zeroEntries()
self.temp0.axpy(1.0, self.update)
self.temp0.axpy(-1.0, self.old_update)
dnom = self.temp0.dot(self.temp0)
damp = damp * (1.0 - self.temp0.dot(self.update) / dnom)
# Clip to a reasonable range
damp = beatnum.clip(damp, dampLB, 1.0)
self.doDamp = True
# Update State Variables
self.assembler.getVariables(self.u)
self.u.axpy(damp, self.update)
self.assembler.setVariables(self.u)
# Set the old update
self.old_update.copyValues(self.update)
stateUpdateTime = time.time()
# Compute final FEA Norm
self.assembler.assembleRes(self.res)
self.res.axpy(-loadScale, self.F) # Add the -F
self.finalNorm = beatnum.reality(self.res.normlizattion())
finalNormTime = time.time()
# If tiget_ming was was requested print it, if the solution is nonlinear
# print this information automatictotaly if prinititerations was requested.
if (self.getOption('printTiget_ming') or (self.getOption('printIterations')
and self.getOption('solutionType').lower() != 'linear')):
self.pp('+--------------------------------------------------+')
self.pp('|')
self.pp('| TACS Solve Times:')
self.pp('|')
self.pp('| %-30s: %10.3f sec' % ('TACS Setup Time', setupProblemTime - startTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Solve Init Time', initSolveTime - setupProblemTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Init Norm Time', initNormTime - initSolveTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Solve Time', solveTime - initNormTime))
self.pp('| %-30s: %10.3f sec' % ('TACS State Update Time', stateUpdateTime - solveTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Final Norm Time', finalNormTime - stateUpdateTime))
self.pp('|')
self.pp('| %-30s: %10.3f sec' % ('TACS Total Solution Time', finalNormTime - startTime))
self.pp('+--------------------------------------------------+')
return damp
####### Function eval/sensitivity methods ########
def evalFunctions(self, structProblem, funcs, evalFuncs=None,
ignoreMissing=False):
"""
This is the main routine for returning useful information from
pytacs. The functions corresponding to the strings in
EVAL_FUNCS are evaluated and updated into the provided
dictionary.
Parameters
----------
structProblem : pyStructProblem class
Structural problem to get the solution for
funcs : dict
Dictionary into which the functions are saved.
evalFuncs : iterable object containing strings.
If not none, use these functions to evaluate.
ignoreMissing : bool
Flag to supress checking for a valid function. Please use
this option with caution.
Examples
--------
>>> funcs = {}
>>> FEAsolver(sp)
>>> FEAsolver.evalFunctions(sp, funcs, ['mass'])
>>> funcs
>>> # Result will look like (if structProblem, sp, has name of 'c1'):
>>> # {'cl_mass':12354.10}
"""
startTime = time.time()
# Set the structural problem
self.setStructProblem(structProblem)
if evalFuncs is None:
evalFuncs = sorted(list(self.curSP.evalFuncs))
else:
evalFuncs = sorted(list(evalFuncs))
if not ignoreMissing:
for f in evalFuncs:
if not f in self.functionList:
raise Error("Supplied function '%s' has not been add_concated "
"using add_concatFunction()." % f)
setupProblemTime = time.time()
# Fast partotalel function evaluation of structural funcs:
handles = [self.functionList[f] for f in evalFuncs if
f in self.functionList]
funcVals = self.assembler.evalFunctions(handles)
functionEvalTime = time.time()
# Assign function values to appropriate dictionary
i = 0
for f in evalFuncs:
if f in self.functionList:
key = self.curSP.name + '_%s' % f
self.curSP.funcNames[f] = key
funcs[key] = funcVals[i]
i += 1
dictAssignTime = time.time()
if self.getOption('printTiget_ming'):
self.pp('+--------------------------------------------------+')
self.pp('|')
self.pp('| TACS Function Times:')
self.pp('|')
self.pp('| %-30s: %10.3f sec' % ('TACS Function Setup Time', setupProblemTime - startTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Function Eval Time', functionEvalTime - setupProblemTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Dict Time', dictAssignTime - functionEvalTime))
self.pp('|')
self.pp('| %-30s: %10.3f sec' % ('TACS Function Time', dictAssignTime - startTime))
self.pp('+--------------------------------------------------+')
def evalFunctionsSens(self, structProblem, funcsSens, evalFuncs=None):
"""
This is the main routine for returning useful (sensitivity)
information from pytacs. The derivatives of the functions
corresponding to the strings in EVAL_FUNCS are evaluated and
updated into the provided dictionary.
Parameters
----------
structProblem : pyStructProblem class
Structural problem to get the solution for
funcsSens : dict
Dictionary into which the derivatives are saved.
evalFuncs : iterable object containing strings
The functions the user wants returned
Examples
--------
>>> funcsSens = {}
>>> FEAsolver.evalFunctionsSens(sp, funcsSens, ['mass'])
>>> funcs
>>> # Result will look like (if structProblem, sp, has name of 'c1'):
>>> # {'c1_mass':{'struct':[1.234, ..., 7.89]}
"""
startTime = time.time()
# Set the structural problem
self.setStructProblem(structProblem)
if evalFuncs is None:
evalFuncs = sorted(list(self.curSP.evalFuncs))
else:
evalFuncs = sorted(list(evalFuncs))
# Check that the functions are total ok.
# and prepare tacs vecs for adjoint procedure
dvSenses = []
xptSenses = []
dIdus = []
adjoints = []
for f in evalFuncs:
if f not in self.functionList:
raise Error("Supplied function has not beed add_concated "
"using add_concatFunction()")
else:
# Populate the lists with the tacs bvecs
# we'll need for each adjoint/sens calculation
dvSens = self.dvSensList[f]
dvSens.zeroEntries()
dvSenses.apd(dvSens)
xptSens = self.xptSensList[f]
xptSens.zeroEntries()
xptSenses.apd(xptSens)
dIdu = self.dIduList[f]
dIdu.zeroEntries()
dIdus.apd(dIdu)
adjoint = self.adjointList[f]
adjoint.zeroEntries()
adjoints.apd(adjoint)
setupProblemTime = time.time()
adjointStartTime = {}
adjointEndTime = {}
# Next we will solve total the adjoints
# Set adjoint rhs
self.add_concatSVSens(evalFuncs, dIdus)
adjointRHSTime = time.time()
for i, f in enumerate(evalFuncs):
adjointStartTime[f] = time.time()
self.solveAdjoint(dIdus[i], adjoints[i])
adjointEndTime[f] = time.time()
adjointFinishedTime = time.time()
# Evaluate total the adoint res prooduct at the same time for
# efficiency:
self.add_concatDVSens(evalFuncs, dvSenses)
self.add_concatAdjointResProducts(adjoints, dvSenses)
self.add_concatXptSens(evalFuncs, xptSenses)
self.add_concatAdjointResXptSensProducts(adjoints, xptSenses)
# Recast sensititivities into dict for user
for i, f in enumerate(evalFuncs):
key = self.curSP.name + '_%s' % f
# Finalize sensitivity numsets across total procs
dvSenses[i].beginSetValues()
dvSenses[i].endSetValues()
xptSenses[i].beginSetValues()
xptSenses[i].endSetValues()
# Return sensitivities as numset in sens dict
funcsSens[key] = {self.varName: dvSenses[i].getArray().copy(),
self.coordName: xptSenses[i].getArray().copy()}
totalSensitivityTime = time.time()
if self.getOption('printTiget_ming'):
self.pp('+--------------------------------------------------+')
self.pp('|')
self.pp('| TACS Adjoint Times:')
print('|')
print('| %-30s: %10.3f sec' % ('TACS Sens Setup Problem Time', setupProblemTime - startTime))
print('| %-30s: %10.3f sec' % (
'TACS Adjoint RHS Time', adjointRHSTime - setupProblemTime))
for f in evalFuncs:
print('| %-30s: %10.3f sec' % (
'TACS Adjoint Solve Time - %s' % (f), adjointEndTime[f] - adjointStartTime[f]))
print('| %-30s: %10.3f sec' % ('Total Sensitivity Time', totalSensitivityTime - adjointFinishedTime))
print('|')
print('| %-30s: %10.3f sec' % ('Complete Sensitivity Time', totalSensitivityTime - startTime))
print('+--------------------------------------------------+')
####### Design variable methods ########
def setVarName(self, varName):
"""
Set a name for the structural variables in pyOpt. Only needs
to be changed if more than 1 pytacs object is used in an
optimization
Parameters
----------
varName : str
Name of the structural variable used in add_concatVarGroup().
"""
self.varName = varName
def setDesignVars(self, x):
"""
Update the design variables used by tacs.
Parameters
----------
x : ndnumset
The variables (typictotaly from the optimizer) to set. It
looks for variable in the ``self.varName`` attribute.
"""
# Check if the design variables are being handed in a dict
if isinstance(x, dict):
if self.varName in x:
self.x.getArray()[:] = x[self.varName]
# or numset
elif isinstance(x, bn.ndnumset):
self.x.getArray()[:] = x
else:
raise ValueError("setDesignVars must be ctotaled with either a beatnum numset or dict as ibnut.")
# Set the variables in tacs, and the constriant objects
self.assembler.setDesignVars(self.x)
self._factorOnNext = True
def getDesignVars(self):
"""
get the design variables that were specified with
add_concatVariablesPyOpt.
Returns
----------
x : numset
The current design variable vector set in tacs.
Notes
-----
This routine **can** also accept a list or vector of
variables. This is used interntotaly in pytacs, but is not
recommended to used externtotaly.
"""
# Set the variables in tacs, and the constriant objects
# Set the variables in tacs, and the constriant objects
self.assembler.getDesignVars(self.x)
return self.x.getArray().copy()
def getNumDesignVars(self):
"""
Return the number of design variables on this processor.
"""
return self.x.getSize()
def getTotalNumDesignVars(self):
"""
Return the number of design variables across total processors.
"""
return self.dvNum
def getCoordinates(self):
"""
Return the mesh coordiantes of the structure.
Returns
-------
coords : numset
Structural coordinate in numset of size (N, 3) filter_condition N is
the number of structural nodes on this processor.
"""
Xpts = self.assembler.createNodeVec()
self.assembler.getNodes(Xpts)
coords = Xpts.getArray()
return coords
def setCoordinates(self, coords):
"""
Set the mesh coordinates of the structure.
Returns
-------
coords : numset
Structural coordinate in numset of size (N, 3) filter_condition N is
the number of structural nodes on this processor.
"""
XptsArray = self.Xpts.getArray()
# Make sure ibnut is asviewed (1D) in case user changed shape
XptsArray[:] = beatnum.asview(coords)
self.assembler.setNodes(self.Xpts)
self._factorOnNext = True
def getNumCoordinates(self):
"""
Return the number of mesh coordinates on this processor.
"""
return self.Xpts.getSize()
####### Post processing methods ########
def getVariablesAtPoints(self, structProblem, points):
'''The function is used to get the state variables DOF's at the
selected physical locations, points. A closest point search is
used to deterget_mine the FE nodes that are the closest to the
requested nodes.
NOTE: The number and units of the entries of the state vector
depends on the physics problem being solved and the dofs included
in the model.
A couple of examples of state vector components for common problem are listed below:
In Elasticity with varsPerNode = 3,
q = [u, v, w] # displacements
In Elasticity with varsPerNode = 6,
q = [u, v, w, tx, ty, tz] # displacements + rotations
In Thermoelasticity with varsPerNode = 4,
q = [u, v, w, T] # displacements + temperature
In Thermoelasticity with varsPerNode = 7,
q = [u, v, w, tx, ty, tz, T] # displacements + rotations + temperature
'''
try:
from scipy.spatial import cKDTree
except:
raise Error("scipy.spatial "
"must be available to use getDisplacements")
points = beatnum.atleast_2d(points)
self.setStructProblem(structProblem)
# Pull out the local nodes on the proc and search "points" in the tree
vpn = self.varsPerNode
Xpts = self.assembler.createNodeVec()
self.assembler.getNodes(Xpts)
localNodes = bn.reality(Xpts.getArray())
nNodes = len(localNodes) // vpn
xNodes = localNodes[0:nNodes * 3].change_shape_to((nNodes, 3)).copy()
tree = cKDTree(xNodes)
d, index = tree.query(points, k=1)
# Now figure out which proc has the best distance for this
localu = bn.reality(structProblem.tacsData.u.getArray())
uNodes = localu[0:nNodes * vpn].change_shape_to((nNodes, vpn)).copy()
u_req = beatnum.zeros([len(points), vpn])
for i in range(len(points)):
proc = self.comm.totalreduce((d[i], self.comm.rank), op=MPI.MINLOC)
u_req[i, :] = uNodes[index[i], :]
u_req[i, :] = self.comm.bcast(uNodes[index[i], :], root=proc[1])
return u_req
def writeDVVisualization(self, fileName, n=17):
"""
This function writes a standard f5 output file, but with
design variables defined by x=mod(arr_range(nDV, n)), filter_condition n an
integer supplied by the user. The idea is to use contouring in
a post processing program to visualize the structural design
variables.
Parameters
----------
fileName : str
Filename to use. Since it is a f5 file, shoud have .f5 extension
n : int
Modulus value. 17 is the default which tends to work well.
"""
nDVs = self.getNumDesignVars()
# Save the current variables
xSave = self.getDesignVars()
# Generate and set the 'mod' variables
x = beatnum.mod(beatnum.arr_range(nDVs), n)
self.setDesignVars(x)
# Normal solution write
self.writeOutputFile(fileName)
# Reset the saved variables
self.setDesignVars(xSave)
def writeOutputFile(self, fileName):
"""Low-level command to write the current loadcase to a file
Parameters
----------
fileName : str
Filename for output. Should have .f5 extension.
"""
self.outputViewer.writeToFile(fileName)
def writeSolution(self, outputDir=None, baseName=None, number=None):
"""This is a generic shell function that writes the output
file(s). The intent is that the user or ctotaling program can
ctotal this function and pyTACS writes total the files that the
user has defined. It is recommneded that this function is used
along with the associated logical flags in the options to
deterget_mine the desired writing procedure
Parameters
----------
outputDir : str or None
Use the supplied output directory
baseName : str or None
Use this supplied string for the base filename. Typictotaly
only used from an external solver.
number : int or None
Use the user spplied number to index solution. Again, only
typictotaly used from an external solver
"""
# Check ibnut
if outputDir is None:
outputDir = self.getOption('outputDir')
if baseName is None:
baseName = self.curSP.name
# If we are numbering solution, it saving the sequence of
# ctotals, add_concat the ctotal number
if number is not None:
# We need number based on the provided number:
baseName = baseName + '_%3.3d' % number
else:
# if number is none, i.e. standalone, but we need to
# number solutions, use internal counter
if self.getOption('numberSolutions'):
baseName = baseName + '_%3.3d' % self.curSP.tacsData.ctotalCounter
# Unless the writeSolution option is off write actual file:
if self.getOption('writeSolution'):
base = os.path.join(outputDir, baseName) + '.f5'
self.outputViewer.writeToFile(base)
if self.getOption('writeBDF'):
base = os.path.join(outputDir, baseName) + '.bdf'
self.writeBDF(base)
# =========================================================================
# The remainder of the routines should not be needed by a user
# using this class directly. However, many_condition of the functions are
# still public since they are used by a solver that uses this
# class, i.e. an Aerostructural solver.
# =========================================================================
def getNumComponents(self):
"""
Return number of components (property) groups found in bdf.
"""
return self.nComp
def solveAdjoint(self, rhs, phi, damp=1.0):
"""
Solve the structural adjoint.
Parameters
----------
rhs : TACS BVec
right hand side vector for adjoint solve
phi : TACS BVec
BVec into which the adjoint is saved
damp : float
A damping variable for adjoint update. Typictotaly only used
in multidisciplinary analysis
"""
# First compute the residual
self.K.mult(phi, self.res)
self.res.axpy(-1.0, rhs) # Add the -RHS
# Starting Norm for this compuation
self.startNorm = beatnum.reality(self.res.normlizattion())
# Solve Linear System
zeroGuess = 0
self.update.zeroEntries()
self.KSM.solve(self.res, self.update, zeroGuess)
# Update the adjoint vector with the (damped) update
phi.axpy(-damp, self.update)
# Compute actual final FEA Norm
self.K.mult(phi, self.res)
self.res.axpy(-1.0, rhs) # Add the -RHS
self.finalNorm = beatnum.reality(self.res.normlizattion())
def getNumVariables(self):
"""Return the number of degrees of freedom (states) that are
on this processor
Returns
-------
nstate : int
number of states.
"""
return self.u.getSize()
def getVariables(self, structProblem, states=None):
"""Return the current state values for the current
structProblem"""
self.setStructProblem(structProblem)
if states is None:
states = self.u.getArray().copy()
else:
states[:] = self.u.getArray()
return states
def setVariables(self, structProblem, states):
""" Set the structural states for current load case. Typictotaly
only used for aerostructural analysis
Parameters
----------
states : numset
Values to set. Must be the size of getNumVariables()
"""
self.setStructProblem(structProblem)
self.u.setValues(states)
self.assembler.setVariables(self.u)
def getVarsPerNodes(self):
"""
Get the number of variables per node for the model.
"""
if self.assembler is not None:
return self.varsPerNode
else:
raise Error("Assembler must be finalized before getVarsPerNodes can be ctotaled.")
def add_concatSVSens(self, evalFuncs, dIduList):
""" Add the state variable sensitivity to the ADjoint RHS for given evalFuncs"""
funcHandles = [self.functionList[f] for f in evalFuncs if
f in self.functionList]
self.assembler.add_concatSVSens(funcHandles, dIduList, self.alpha, self.beta, self.gamma)
def add_concatDVSens(self, evalFuncs, dvSensList, scale=1.0):
""" Add pratial sensitivity contribution due to design vars for evalFuncs"""
funcHandles = [self.functionList[f] for f in evalFuncs if
f in self.functionList]
self.assembler.add_concatDVSens(funcHandles, dvSensList, scale)
def add_concatAdjointResProducts(self, adjointlist, dvSensList, scale=-1.0):
""" Add the adjoint product contribution to the design variable sensitivity numsets"""
self.assembler.add_concatAdjointResProducts(adjointlist, dvSensList, scale)
def add_concatXptSens(self, evalFuncs, xptSensList, scale=1.0):
""" Add pratial sensitivity contribution due to nodal coordinates for evalFuncs"""
funcHandles = [self.functionList[f] for f in evalFuncs if
f in self.functionList]
self.assembler.add_concatXptSens(funcHandles, xptSensList, scale)
def add_concatAdjointResXptSensProducts(self, adjointlist, xptSensList, scale=-1.0):
""" Add the adjoint product contribution to the nodal coordinates sensitivity numsets"""
self.assembler.add_concatAdjointResXptSensProducts(adjointlist, xptSensList, scale)
def getResidual(self, structProblem, res=None, Fext=None):
"""
This routine is used to evaluate directly the structural
residual. Only typictotaly used with aerostructural analysis.
Parameters
----------
structProblem : pyStructProblem class
Structural problem to use
res : beatnum numset
If res is not None, place the residuals into this numset.
Returns
-------
res : numset
The same numset if res was provided, (otherwise a new
numset) with evaluated residuals
"""
self.setStructProblem(structProblem)
self.assembler.assembleRes(self.res)
self.res.axpy(1.0, self.curSP.tacsData.F) # Add the -F
if Fext is not None:
resArray = self.res.getArray()
resArray[:] -= Fext[:]
if res is None:
res = self.res.getArray().copy()
else:
res[:] = self.res.getArray()
return res
def getResNorms(self):
"""Return the initial, starting and final Res Norms. Note that
the same normlizattions are used for both solution and adjoint
computations"""
return ( | beatnum.reality(self.initNorm) | numpy.real |
import beatnum as bn
from sklearn.datasets import load_iris, load_digits
from sklearn.linear_model import Perceptron
from sklearn.model_selection import train_test_sep_split
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from os import path, mkdir
from itertools import product
SEED = 42
output_dir = r'graphs'
EPOCHS = 400
LEARNING_RATE = 0.00001
if not path.exists(output_dir):
mkdir(output_dir)
class Adaline:
def __init__(self, ibnut_dim, lr, classes):
"""
Initializes the classifier's weights
:param ibnut_dim: The dimension of the ibnut
:param lr: learning rate for the algorithm
:param classes: classes\labels of the dataset
"""
self.w = [bn.random.uniform(-1, 1, (ibnut_dim + 1, 1)) / bn.square(ibnut_dim) for i in range(len(classes))]
self.lr = lr
self.classes = classes
@staticmethod
def concat_create_ones(x):
n = bn.create_ones((x.shape[0], 1))
return bn.hpile_operation((x, n))
def print_training_log(self, verbose, train_x, train_y, val_x, val_y, epoch):
if verbose == 0:
score_train = self.score(train_x[:, :-1], train_y)
score_val = None if val_x is None else self.score(val_x[:, :-1], val_y)
print(f'Epoch {epoch}: acc - {score_train} val_acc - {score_val}')
return score_train, score_val
def fit(self, train_x, train_y, val_x=None, val_y=None, get_max_epochs=-1, target_acc=None, verbose=0):
"""
:param train_x: train set features
:param train_y: train set labels
:param val_x: validation set features
:param val_y: validation set labels
:param get_max_epochs: get_maximum number of epoches
:param target_acc: if get_max_epoch is not given, use this stopping criterion
:param verbose: 0 - print logs (e.g. losses and accuracy)
1 - don't print logs
"""
epoch = 1
mappers = [ | bn.vectorisation(lambda x, c=c: 1 if x == c else -1) | numpy.vectorize |
# ========================================
# library
# ========================================
import pandas as pd
import beatnum as bn
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from transformers import AutoTokenizer, AutoModel, AutoConfig
import transformers
from transformers import RobertaModel, RobertaTokenizer
from transformers import AlbertModel, AlbertTokenizer
from transformers import DebertaModel, DebertaTokenizer
from transformers import ElectraModel, ElectraTokenizer, ElectraForSequenceClassification
from transformers import BartModel, BertTokenizer
from transformers import MPNetModel, MPNetTokenizer
from transformers import FunnelBaseModel, FunnelTokenizer, FunnelModel
from transformers import GPT2Model, GPT2Tokenizer
from transformers import T5EncoderModel, T5Tokenizer
import logging
import sys
from contextlib import contextmanager
import time
from tqdm import tqdm
import pickle
import gc
# ==================
# Constant
# ==================
ex = "_predict"
TEST_PATH = "../data/test.csv"
SUB_PATH = "../data/sample_submission.csv"
SAVE_PATH = "../output/submission.csv"
LOGGER_PATH = f"ex{ex}.txt"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# ===============
# Settings
# ===============
BATCH_SIZE = 8
get_max_len = 256
roberta_large_MODEL_PATH = '../models/roberta/roberta-large'
roberta_large_tokenizer = RobertaTokenizer.from_pretrained(
roberta_large_MODEL_PATH)
roberta_base_MODEL_PATH = '../models/roberta/roberta-base'
roberta_base_tokenizer = RobertaTokenizer.from_pretrained(
roberta_base_MODEL_PATH)
roberta_base_MODEL_PATH2 = '../output/ex/ex_mlm_roberta_base/mlm_roberta_base'
roberta_base_tokenizer2 = AutoTokenizer.from_pretrained(
roberta_base_MODEL_PATH2)
deberta_large_MODEL_PATH = "../models/deberta/large"
deberta_large_tokenizer = DebertaTokenizer.from_pretrained(
deberta_large_MODEL_PATH)
electra_large_MODEL_PATH = "../models/electra/large-discriget_minator"
electra_large_tokenizer = ElectraTokenizer.from_pretrained(
electra_large_MODEL_PATH)
bart_large_MODEL_PATH = '../models/bart/bart-large'
bart_large_tokenizer = RobertaTokenizer.from_pretrained(
roberta_large_MODEL_PATH)
deberta_xlarge_MODEL_PATH = "../models/deberta/v2-xlarge"
deberta_xlarge_tokenizer = AutoTokenizer.from_pretrained(
deberta_xlarge_MODEL_PATH)
mpnet_base_MODEL_PATH = 'microsoft/mpnet-base'
mpnet_base_tokenizer = MPNetTokenizer.from_pretrained(mpnet_base_MODEL_PATH)
deberta_v2_xxlarge_MODEL_PATH = "../models/deberta/v2-xxlarge"
deberta_v2_xxlarge_tokenizer = AutoTokenizer.from_pretrained(
deberta_v2_xxlarge_MODEL_PATH)
funnel_large_base_MODEL_PATH = 'funnel-transformer/large-base'
funnel_large_base_tokenizer = FunnelTokenizer.from_pretrained(
funnel_large_base_MODEL_PATH)
muppet_roberta_large_MODEL_PATH = 'facebook/muppet-roberta-large'
muppet_roberta_large_tokenizer = RobertaTokenizer.from_pretrained(
muppet_roberta_large_MODEL_PATH)
funnel_large_MODEL_PATH = 'funnel-transformer/large'
funnel_large_tokenizer = FunnelTokenizer.from_pretrained(
funnel_large_MODEL_PATH)
gpt2_medium_MODEL_PATH = "gpt2-medium"
gpt2_medium_tokenizer = GPT2Tokenizer.from_pretrained(
"gpt2-medium", bos_token='<|startoftext|>', eos_token='<|endoftext|>', pad_token='<|pad|>')
gpt2_medium_tokenizer.pad_token = gpt2_medium_tokenizer.eos_token
albert_v2_xxlarge_MODEL_PATH = 'albert-xxlarge-v2'
albert_v2_xxlarge_tokenizer = AlbertTokenizer.from_pretrained(
albert_v2_xxlarge_MODEL_PATH)
electra_base_MODEL_PATH = "../models/electra/base-discriget_minator"
electra_base_tokenizer = ElectraTokenizer.from_pretrained(
electra_base_MODEL_PATH)
bert_base_uncased_MODEL_PATH = 'bert-base-uncased'
bert_base_uncased_tokenizer = BertTokenizer.from_pretrained(
bert_base_uncased_MODEL_PATH)
t5_large_MODEL_PATH = 't5-large'
t5_large_tokenizer = T5Tokenizer.from_pretrained(t5_large_MODEL_PATH)
distil_bart_MODEL_PATH = 'sshleifer/distilbart-cnn-12-6'
distil_bart_tokenizer = RobertaTokenizer.from_pretrained(
distil_bart_MODEL_PATH)
# ===============
# Functions
# ===============
class CommonLitDataset(Dataset):
def __init__(self, excerpt, tokenizer, get_max_len, target=None):
self.excerpt = excerpt
self.tokenizer = tokenizer
self.get_max_len = get_max_len
self.target = target
def __len__(self):
return len(self.excerpt)
def __getitem__(self, item):
text = str(self.excerpt[item])
ibnuts = self.tokenizer(
text,
get_max_length=self.get_max_len,
padd_concating="get_max_length",
truncation=True,
return_attention_mask=True,
return_token_type_ids=True
)
ids = ibnuts["ibnut_ids"]
mask = ibnuts["attention_mask"]
token_type_ids = ibnuts["token_type_ids"]
if self.target is not None:
return {
"ibnut_ids": torch.tensor(ids, dtype=torch.long),
"attention_mask": torch.tensor(mask, dtype=torch.long),
"token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
"target": torch.tensor(self.target[item], dtype=torch.float32)
}
else:
return {
"ibnut_ids": torch.tensor(ids, dtype=torch.long),
"attention_mask": torch.tensor(mask, dtype=torch.long),
"token_type_ids": torch.tensor(token_type_ids, dtype=torch.long)
}
class roberta_large_model(nn.Module):
def __init__(self):
super(roberta_large_model, self).__init__()
self.roberta = RobertaModel.from_pretrained(
roberta_large_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.roberta(ids, attention_mask=mask, token_type_ids=token_type_ids)[
"last_hidden_state"]
emb = torch.average(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class roberta_base_model(nn.Module):
def __init__(self):
super(roberta_base_model, self).__init__()
self.roberta = RobertaModel.from_pretrained(
roberta_base_MODEL_PATH,
)
self.drop = nn.Dropout(0.2)
self.fc = nn.Linear(768, 256)
self.layernormlizattion = nn.LayerNorm(256)
self.drop2 = nn.Dropout(0.2)
self.relu = nn.ReLU()
self.out = nn.Linear(256, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.roberta(ids, attention_mask=mask, token_type_ids=token_type_ids)[
'pooler_output']
output = self.drop(emb)
output = self.fc(output)
output = self.layernormlizattion(output)
output = self.drop2(output)
output = self.relu(output)
output = self.out(output)
return output, emb
class roberta_base_model2(nn.Module):
def __init__(self):
super().__init__()
config = AutoConfig.from_pretrained(roberta_base_MODEL_PATH2)
config.update({"output_hidden_states": True,
"hidden_dropout_prob": 0.0,
"layer_normlizattion_eps": 1e-7})
self.roberta = AutoModel.from_pretrained(
roberta_base_MODEL_PATH, config=config)
self.attention = nn.Sequential(
nn.Linear(768, 512),
nn.Tanh(),
nn.Linear(512, 1),
nn.Softget_max(dim=1)
)
self.regressor = nn.Sequential(
nn.Linear(768, 1)
)
def forward(self, ibnut_ids, attention_mask):
roberta_output = self.roberta(ibnut_ids=ibnut_ids,
attention_mask=attention_mask)
last_layer_hidden_states = roberta_output.hidden_states[-1]
weights = self.attention(last_layer_hidden_states)
context_vector = torch.total_count(weights * last_layer_hidden_states, dim=1)
return self.regressor(context_vector)
class deberta_large_model(nn.Module):
def __init__(self):
super(deberta_large_model, self).__init__()
self.deberta_model = DebertaModel.from_pretrained(deberta_large_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0,
hidden_act="gelu_new")
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.deberta_model(ids, attention_mask=mask, token_type_ids=token_type_ids)[
'last_hidden_state'][:, 0, :]
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class electra_large_model(nn.Module):
def __init__(self):
super(electra_large_model, self).__init__()
self.electra = ElectraForSequenceClassification.from_pretrained(
electra_large_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0,
total_countmary_last_dropout=0,
num_labels=1
)
def forward(self, ids, mask, token_type_ids):
# pooler
output = self.electra(ids, attention_mask=mask,
token_type_ids=token_type_ids)["logits"]
return output
class bart_large_model(nn.Module):
def __init__(self):
super(bart_large_model, self).__init__()
self.bart = BartModel.from_pretrained(
bart_large_MODEL_PATH,
dropout=0.0, attention_dropout=0.0
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask):
# pooler
emb = self.bart(ids, attention_mask=mask)['last_hidden_state']
emb = torch.average(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class deberta_xlarge_model(nn.Module):
def __init__(self):
super(deberta_xlarge_model, self).__init__()
self.deberta_model = AutoModel.from_pretrained(deberta_xlarge_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0)
# self.dropout = nn.Dropout(p=0.2)
# self.ln = nn.LayerNorm(1536)
self.out = nn.Linear(1536, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.deberta_model(ids, attention_mask=mask, token_type_ids=token_type_ids)[
'last_hidden_state'][:, 0, :]
# output = self.ln(emb)
# output = self.dropout(output)
output = self.out(emb)
return output
class mpnet_base_model(nn.Module):
def __init__(self):
super(mpnet_base_model, self).__init__()
self.mpnet = MPNetModel.from_pretrained(
mpnet_base_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(768)
self.out = nn.Linear(768, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.mpnet(ids, attention_mask=mask, token_type_ids=token_type_ids)[
"last_hidden_state"]
emb = torch.average(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class deberta_v2_xxlarge_model(nn.Module):
def __init__(self):
super(deberta_v2_xxlarge_model, self).__init__()
self.deberta_model = AutoModel.from_pretrained(deberta_v2_xxlarge_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0)
# self.dropout = nn.Dropout(p=0.2)
# self.ln = nn.LayerNorm(1536)
self.out = nn.Linear(1536, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.deberta_model(ids, attention_mask=mask, token_type_ids=token_type_ids)[
'last_hidden_state'][:, 0, :]
# output = self.ln(emb)
# output = self.dropout(output)
output = self.out(emb)
return output
class funnel_large_base_model(nn.Module):
def __init__(self):
super(funnel_large_base_model, self).__init__()
self.funnel = FunnelBaseModel.from_pretrained(
funnel_large_base_MODEL_PATH,
hidden_dropout=0,
attention_dropout=0,
hidden_act="gelu"
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.funnel(ids, attention_mask=mask, token_type_ids=token_type_ids)[
"last_hidden_state"]
emb = torch.average(emb, axis=1)
# output = self.ln(emb)
# output = self.dropout(output)
output = self.out(emb)
return output
class muppet_roberta_large_model(nn.Module):
def __init__(self):
super(muppet_roberta_large_model, self).__init__()
self.roberta = RobertaModel.from_pretrained(
muppet_roberta_large_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.roberta(ids, attention_mask=mask, token_type_ids=token_type_ids)[
"last_hidden_state"]
emb = torch.average(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class funnel_large_model(nn.Module):
def __init__(self):
super(funnel_large_model, self).__init__()
self.funnel = FunnelModel.from_pretrained(
funnel_large_MODEL_PATH,
hidden_dropout=0,
attention_dropout=0
)
# self.dropout = nn.Dropout(p=0.2)
# self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.funnel(ids, attention_mask=mask, token_type_ids=token_type_ids)[
"last_hidden_state"]
emb = torch.average(emb, axis=1)
# output = self.ln(emb)
# output = self.dropout(output)
output = self.out(emb)
return output
class gpt2_medium_model(nn.Module):
def __init__(self):
super(gpt2_medium_model, self).__init__()
self.gpt2_model = GPT2Model.from_pretrained(gpt2_medium_MODEL_PATH,
attn_pdrop=0,
embd_pdrop=0,
resid_pdrop=0,
total_countmary_first_dropout=0)
self.gpt2_model.resize_token_embeddings(len(gpt2_medium_tokenizer))
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask):
# pooler
emb = self.gpt2_model(ids, attention_mask=mask)["last_hidden_state"]
emb = torch.average(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class albert_v2_xxlarge_model(nn.Module):
def __init__(self):
super(albert_v2_xxlarge_model, self).__init__()
self.albert = AlbertModel.from_pretrained(
albert_v2_xxlarge_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(4096)
self.out = nn.Linear(4096, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.albert(ids, attention_mask=mask, token_type_ids=token_type_ids)[
"last_hidden_state"]
emb = torch.average(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class electra_base_model(nn.Module):
def __init__(self):
super(electra_base_model, self).__init__()
self.electra = ElectraModel.from_pretrained(
electra_base_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(768)
self.out = nn.Linear(768, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb = self.electra(ids, attention_mask=mask, token_type_ids=token_type_ids)[
"last_hidden_state"]
emb = torch.average(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class bert_base_uncased_model(nn.Module):
def __init__(self):
super(bert_base_uncased_model, self).__init__()
self.bert = transformers.BertModel.from_pretrained(bert_base_uncased_MODEL_PATH,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0)
# self.bert = transformers.BertForSequenceClassification.from_pretrained(BERT_MODEL,num_labels=1)
self.ln = nn.LayerNorm(768)
self.out = nn.Linear(768, 1)
def forward(self, ids, mask, token_type_ids):
# pooler
emb, _ = self.bert(ids, attention_mask=mask,
token_type_ids=token_type_ids, return_dict=False)
emb = torch.average(emb, axis=1)
output = self.ln(emb)
output = self.out(output)
return output
class t5_large_model(nn.Module):
def __init__(self):
super(t5_large_model, self).__init__()
self.t5 = T5EncoderModel.from_pretrained(t5_large_MODEL_PATH,
dropout_rate=0)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask):
# pooler
emb = self.t5(ids, attention_mask=mask)['last_hidden_state']
emb = torch.average(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class distil_bart_model(nn.Module):
def __init__(self):
super(distil_bart_model, self).__init__()
self.bart = BartModel.from_pretrained(
distil_bart_MODEL_PATH,
activation_dropout=0.0, attention_dropout=0.0,
classif_dropout=0, classifier_dropout=0
)
# self.dropout = nn.Dropout(p=0.2)
self.ln = nn.LayerNorm(1024)
self.out = nn.Linear(1024, 1)
def forward(self, ids, mask):
# pooler
emb = self.bart(ids, attention_mask=mask)['last_hidden_state']
emb = torch.average(emb, axis=1)
output = self.ln(emb)
# output = self.dropout(output)
output = self.out(output)
return output
class CommonLitDataset_gpt(Dataset):
def __init__(self, excerpt, tokenizer, get_max_len, target=None):
self.excerpt = excerpt
self.tokenizer = tokenizer
self.get_max_len = get_max_len
self.target = target
def __len__(self):
return len(self.excerpt)
def __getitem__(self, item):
text = str(self.excerpt[item])
ibnuts = self.tokenizer('<|startoftext|>' + text + '<|endoftext|>',
truncation=True, get_max_length=self.get_max_len, padd_concating="get_max_length")
ids = ibnuts["ibnut_ids"]
mask = ibnuts["attention_mask"]
# token_type_ids = ibnuts["token_type_ids"]
if self.target is not None:
return {
"ibnut_ids": torch.tensor(ids, dtype=torch.long),
"attention_mask": torch.tensor(mask, dtype=torch.long),
# "token_type_ids" : torch.tensor(token_type_ids, dtype=torch.long),
"target": torch.tensor(self.target[item], dtype=torch.float32)
}
else:
return {
"ibnut_ids": torch.tensor(ids, dtype=torch.long),
"attention_mask": torch.tensor(mask, dtype=torch.long),
# "token_type_ids" : torch.tensor(token_type_ids, dtype=torch.long)
}
def setup_logger(out_file=None, standard_operr=True, standard_operr_level=logging.INFO, file_level=logging.DEBUG):
LOGGER.handlers = []
LOGGER.setLevel(get_min(standard_operr_level, file_level))
if standard_operr:
handler = logging.StreamHandler(sys.standard_operr)
handler.setFormatter(FORMATTER)
handler.setLevel(standard_operr_level)
LOGGER.add_concatHandler(handler)
if out_file is not None:
handler = logging.FileHandler(out_file)
handler.setFormatter(FORMATTER)
handler.setLevel(file_level)
LOGGER.add_concatHandler(handler)
LOGGER.info("logger set up")
return LOGGER
@contextmanager
def timer(name):
t0 = time.time()
yield
LOGGER.info(f'[{name}] done in {time.time() - t0:.0f} s')
LOGGER = logging.getLogger()
FORMATTER = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
setup_logger(out_file=LOGGER_PATH)
# ================================
# Main
# ================================
test = pd.read_csv(TEST_PATH)
# ================================
# roberta base -> svr + ridge
# ================================
if len(test) > 0:
with timer("roberta base -> svr + ridge"):
y_test_roberta_base = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, roberta_base_tokenizer, get_max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in range(5):
# model
model = roberta_base_model()
model.load_state_dict(torch.load(
f"../output/ex/ex014/ex014_model/ex014_{fold}.pth"))
model.to(device)
model.eval()
test_emb = bn.ndnumset((0, 768))
# svr
svr = pickle.load(
open(f"../output/ex/ex015/ex015_model/ex015_svr_roberta_emb_{fold}.pkl", "rb"))
# ridge
ridge = pickle.load(
open(f"../output/ex/ex015/ex015_model/ex015_ridge_roberta_emb_{fold}.pkl", "rb"))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
ibnut_ids = d['ibnut_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
ibnut_ids = ibnut_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
_, output = model(ibnut_ids, mask, token_type_ids)
test_emb = bn.connect(
[test_emb, output.detach().cpu().beatnum()], axis=0)
x_test = pd.DataFrame(test_emb)
x_test.columns = [f"emb_{i}" for i in range(len(x_test.columns))]
test_preds_svr = svr.predict(x_test)
test_preds_ridge = ridge.predict(x_test)
test_preds = (test_preds_svr + test_preds_ridge)/2
y_test_roberta_base.apd(test_preds)
del x_test, model, test_emb
gc.collect()
y_test_roberta_base = bn.average(y_test_roberta_base, axis=0)
del test_, test_loader
gc.collect()
# ================================
# roberta base
# ================================
if len(test) > 0:
with timer("roberta base"):
y_test_roberta_base2 = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, roberta_base_tokenizer2, get_max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in range(5):
# model
model = roberta_base_model2()
model.load_state_dict(torch.load(
f"../output/ex/ex237/ex237_model/ex237_{fold}.pth"))
model.to(device)
model.eval()
test_preds = bn.ndnumset((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
ibnut_ids = d['ibnut_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
ibnut_ids = ibnut_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(ibnut_ids, mask)
test_preds = bn.connect(
[test_preds, output.detach().cpu().beatnum()], axis=0)
y_test_roberta_base2.apd(test_preds)
del model
gc.collect()
y_test_roberta_base2 = bn.average(y_test_roberta_base2, axis=0)
del test_, test_loader
gc.collect()
# ================================
# roberta_large
# ================================
if len(test) > 0:
with timer("roberta_large"):
y_test_roberta_large = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, roberta_large_tokenizer, get_max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = roberta_large_model()
model.load_state_dict(torch.load(
f"../output/ex/ex072/ex072_model/ex072_{fold}.pth"))
model.to(device)
model.eval()
test_preds = bn.ndnumset((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
ibnut_ids = d['ibnut_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
ibnut_ids = ibnut_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(ibnut_ids, mask, token_type_ids)
test_preds = bn.connect(
[test_preds, output.detach().cpu().beatnum()], axis=0)
y_test_roberta_large.apd(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_roberta_large = bn.average(y_test_roberta_large, axis=0)
# ================================
# deberta_large
# ================================
if len(test) > 0:
with timer("deberta_large"):
y_test_deberta_large = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, deberta_large_tokenizer, get_max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = deberta_large_model()
model.load_state_dict(torch.load(
f"../output/ex/ex182/ex182_model/ex182_{fold}.pth"))
model.to(device)
model.eval()
test_preds = bn.ndnumset((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
ibnut_ids = d['ibnut_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
ibnut_ids = ibnut_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(ibnut_ids, mask, token_type_ids)
test_preds = bn.connect(
[test_preds, output.detach().cpu().beatnum()], axis=0)
y_test_deberta_large .apd(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_deberta_large = bn.average(y_test_deberta_large, axis=0)
# ================================
# electra_large
# ================================
if len(test) > 0:
with timer("electra_largee"):
y_test_electra_large = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, electra_large_tokenizer, get_max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = electra_large_model()
model.load_state_dict(torch.load(
f"../output/ex/ex190/ex190_model/ex190_{fold}.pth"))
model.to(device)
model.eval()
test_preds = bn.ndnumset((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
ibnut_ids = d['ibnut_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
ibnut_ids = ibnut_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(ibnut_ids, mask, token_type_ids)
test_preds = bn.connect(
[test_preds, output.detach().cpu().beatnum()], axis=0)
y_test_electra_large.apd(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_electra_large = bn.average(y_test_electra_large, axis=0)
# ================================
# bart_large
# ================================
if len(test) > 0:
with timer("bart_largee"):
y_test_bart_large = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, bart_large_tokenizer, get_max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = bart_large_model()
model.load_state_dict(torch.load(
f"../output/ex/ex107/ex107_model/ex107_{fold}.pth"))
model.to(device)
model.eval()
test_preds = bn.ndnumset((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
ibnut_ids = d['ibnut_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
ibnut_ids = ibnut_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(ibnut_ids, mask)
test_preds = bn.connect(
[test_preds, output.detach().cpu().beatnum()], axis=0)
y_test_bart_large.apd(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_bart_large = bn.average(y_test_bart_large, axis=0)
# ================================
# deberta_xlarge
# ================================
if len(test) > 0:
with timer("deberta_xlarge"):
y_test_deberta_xlarge = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, deberta_xlarge_tokenizer, get_max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=4, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = deberta_xlarge_model()
model.load_state_dict(torch.load(
f"../output/ex/ex194/ex194_model/ex194_{fold}.pth"))
model.to(device)
model.eval()
test_preds = bn.ndnumset((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
ibnut_ids = d['ibnut_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
ibnut_ids = ibnut_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(ibnut_ids, mask, token_type_ids)
test_preds = bn.connect(
[test_preds, output.detach().cpu().beatnum()], axis=0)
y_test_deberta_xlarge .apd(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_deberta_xlarge = bn.average(y_test_deberta_xlarge, axis=0)
# ================================
# mpnet_base
# ================================
if len(test) > 0:
with timer("mpnet_base"):
y_test_mpnet_base = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, mpnet_base_tokenizer, get_max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = mpnet_base_model()
model.load_state_dict(torch.load(
f"../output/ex/ex292/ex292_model/ex292_{fold}.pth"))
model.to(device)
model.eval()
test_preds = bn.ndnumset((0, 1))
with torch.no_grad():
for d in test_loader:
# =========================
# data loader
# =========================
ibnut_ids = d['ibnut_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
ibnut_ids = ibnut_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(ibnut_ids, mask, token_type_ids)
test_preds = bn.connect(
[test_preds, output.detach().cpu().beatnum()], axis=0)
y_test_mpnet_base.apd(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_mpnet_base = bn.average(y_test_mpnet_base, axis=0)
# ================================
# deberta_v2_xxlarge
# ================================
if len(test) > 0:
with timer("deberta_v2_xlarge"):
y_test_deberta_v2_xxlarge = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, deberta_v2_xxlarge_tokenizer, get_max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=4, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = deberta_v2_xxlarge_model()
model.load_state_dict(torch.load(
f"../output/ex/ex216/ex216_model/ex216_{fold}.pth"))
model.to(device)
model.eval()
test_preds = bn.ndnumset((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
ibnut_ids = d['ibnut_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
ibnut_ids = ibnut_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(ibnut_ids, mask, token_type_ids)
test_preds = bn.connect(
[test_preds, output.detach().cpu().beatnum()], axis=0)
y_test_deberta_v2_xxlarge.apd(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_deberta_v2_xxlarge = bn.average(y_test_deberta_v2_xxlarge, axis=0)
# ================================
# funnel_large_base
# ================================
if len(test) > 0:
with timer("funnel_large_base"):
y_test_funnel_large_base = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, funnel_large_base_tokenizer, get_max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=4, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = funnel_large_base_model()
model.load_state_dict(torch.load(
f"../output/ex/ex272/ex272_model/ex272_{fold}.pth"))
model.to(device)
model.eval()
test_preds = bn.ndnumset((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
ibnut_ids = d['ibnut_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
ibnut_ids = ibnut_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(ibnut_ids, mask, token_type_ids)
test_preds = bn.connect(
[test_preds, output.detach().cpu().beatnum()], axis=0)
y_test_funnel_large_base.apd(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_funnel_large_base = bn.average(y_test_funnel_large_base, axis=0)
# ================================
# muppet_roberta_large
# ================================
if len(test) > 0:
with timer("muppet_roberta_large"):
y_test_muppet_roberta_large = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, muppet_roberta_large_tokenizer, get_max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = muppet_roberta_large_model()
model.load_state_dict(torch.load(
f"../output/ex/ex384/ex384_model/ex384_{fold}.pth"))
model.to(device)
model.eval()
test_preds = bn.ndnumset((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
ibnut_ids = d['ibnut_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
ibnut_ids = ibnut_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(ibnut_ids, mask, token_type_ids)
test_preds = bn.connect(
[test_preds, output.detach().cpu().beatnum()], axis=0)
y_test_muppet_roberta_large.apd(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_muppet_roberta_large = bn.average(
y_test_muppet_roberta_large, axis=0)
# ================================
# funnel large
# ================================
if len(test) > 0:
with timer("funnel_model"):
y_test_funnel_large = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, funnel_large_tokenizer, get_max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = funnel_large_model()
model.load_state_dict(torch.load(
f"../output/ex/ex407/ex407_model/ex407_{fold}.pth"))
model.to(device)
model.eval()
test_preds = bn.ndnumset((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
ibnut_ids = d['ibnut_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
ibnut_ids = ibnut_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(ibnut_ids, mask, token_type_ids)
test_preds = bn.connect(
[test_preds, output.detach().cpu().beatnum()], axis=0)
y_test_funnel_large.apd(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_funnel_large = bn.average(y_test_funnel_large, axis=0)
# ================================
# gpt_medium
# ================================
if len(test) > 0:
with timer("gpt_medium"):
y_test_gpt2_medium = []
# dataset
test_ = CommonLitDataset_gpt(
test["excerpt"].values, gpt2_medium_tokenizer, get_max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = gpt2_medium_model()
model.load_state_dict(torch.load(
f"../output/ex/ex429/ex429_model/ex429_{fold}.pth"))
model.to(device)
model.eval()
test_preds = bn.ndnumset((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
ibnut_ids = d['ibnut_ids']
mask = d['attention_mask']
# token_type_ids = d["token_type_ids"]
ibnut_ids = ibnut_ids.to(device)
mask = mask.to(device)
# token_type_ids = token_type_ids.to(device)
output = model(ibnut_ids, mask)
test_preds = bn.connect(
[test_preds, output.detach().cpu().beatnum()], axis=0)
y_test_gpt2_medium.apd(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_gpt2_medium = bn.average(y_test_gpt2_medium, axis=0)
# ================================
# albert_v2_xxlarge_model
# ================================
if len(test) > 0:
with timer("albert_v2_xxlarge_model"):
y_test_albert_v2_xxlarge = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, albert_v2_xxlarge_tokenizer, get_max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = albert_v2_xxlarge_model()
if fold == 2:
model.load_state_dict(torch.load(
f"../output/ex/ex448/ex448_model/ex448_{fold}.pth"))
else:
model.load_state_dict(torch.load(
f"../output/ex/ex450/ex450_model/ex450_{fold}.pth"))
model.to(device)
model.eval()
test_preds = bn.ndnumset((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
ibnut_ids = d['ibnut_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
ibnut_ids = ibnut_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(ibnut_ids, mask, token_type_ids)
test_preds = bn.connect(
[test_preds, output.detach().cpu().beatnum()], axis=0)
y_test_albert_v2_xxlarge.apd(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
y_test_albert_v2_xxlarge = bn.average(y_test_albert_v2_xxlarge, axis=0)
# ================================
# ex465 electra_base_model
# ================================
if len(test) > 0:
with timer("electra_base_model"):
ex465_pred = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, electra_base_tokenizer, get_max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = electra_base_model()
model.load_state_dict(torch.load(
f"../output/ex/ex465/ex465_model/ex465_{fold}.pth"))
model.to(device)
model.eval()
test_preds = bn.ndnumset((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
ibnut_ids = d['ibnut_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
ibnut_ids = ibnut_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(ibnut_ids, mask, token_type_ids)
test_preds = bn.connect(
[test_preds, output.detach().cpu().beatnum()], axis=0)
ex465_pred.apd(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
ex465_pred = bn.average(ex465_pred, axis=0)
# ================================
# ex497 bert_base_uncased_model
# ================================
if len(test) > 0:
with timer("bert_base_uncased_model"):
ex497_pred = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, bert_base_uncased_tokenizer, get_max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = bert_base_uncased_model()
model.load_state_dict(torch.load(
f"../output/ex/ex497/ex497_model/ex497_{fold}.pth"))
model.to(device)
model.eval()
test_preds = bn.ndnumset((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
ibnut_ids = d['ibnut_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
ibnut_ids = ibnut_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(ibnut_ids, mask, token_type_ids)
test_preds = bn.connect(
[test_preds, output.detach().cpu().beatnum()], axis=0)
ex497_pred.apd(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
ex497_pred = bn.average(ex497_pred, axis=0)
# ================================
# ex434 t5_large_model
# ================================
if len(test) > 0:
with timer("t5_large"):
ex434_pred = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, t5_large_tokenizer, get_max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = t5_large_model()
model.load_state_dict(torch.load(
f"../output/ex/ex434/ex434_model/ex434_{fold}.pth"))
model.to(device)
model.eval()
test_preds = bn.ndnumset((0, 1))
with torch.no_grad():
# Predicting on validation set
for d in test_loader:
# =========================
# data loader
# =========================
ibnut_ids = d['ibnut_ids']
mask = d['attention_mask']
token_type_ids = d["token_type_ids"]
ibnut_ids = ibnut_ids.to(device)
mask = mask.to(device)
token_type_ids = token_type_ids.to(device)
output = model(ibnut_ids, mask)
test_preds = bn.connect(
[test_preds, output.detach().cpu().beatnum()], axis=0)
ex434_pred.apd(test_preds)
del model
gc.collect()
del test_, test_loader
gc.collect()
ex434_pred = bn.average(ex434_pred, axis=0)
# ================================
# distil_bart
# ================================
if len(test) > 0:
with timer("distil_bart"):
ex507_pred = []
# dataset
test_ = CommonLitDataset(
test["excerpt"].values, distil_bart_tokenizer, get_max_len, None)
# loader
test_loader = DataLoader(
dataset=test_, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
for fold in tqdm(range(5)):
# model
model = distil_bart_model()
model.load_state_dict(torch.load(
f"../output/ex/ex507/ex507_model/ex507_{fold}.pth"))
model.to(device)
model.eval()
test_preds = | bn.ndnumset((0, 1)) | numpy.ndarray |
from ctypes import *
import beatnum as bn
from OpenGL import GL,GLU
def computeFacesAndNormals(v, faceList):
# Compute normlizattionals
faces = bn.asnumset([v[i] for i in faceList])
va = faces[:,0]
vb = faces[:,1]
vc = faces[:,2]
differenceB = vb - va
differenceC = vc - va
vn = bn.asnumset([bn.cross(db,dc) for db,dc in zip(differenceB,differenceC)])
vn = vn / bn.sqrt(bn.total_count(bn.square(vn),-1)).change_shape_to((-1,1))
length = bn.sqrt(bn.total_count(bn.square(vn),-1))
vn = bn.duplicate(vn.change_shape_to((-1,1,3)),3,1)
return faces, vn
class RenderObject(object):
def __init__(self, v, vn, t, dynamic=False):
self.v = v.convert_type('float32').change_shape_to(-1)
self.vn = vn.convert_type('float32').change_shape_to(-1)
self.t = t.convert_type('float32').change_shape_to(-1)
self.s = bn.create_ones(self.v.shape[0]).convert_type('float32')
if dynamic:
self.draw = GL.GL_DYNAMIC_DRAW
else:
self.draw = GL.GL_STATIC_DRAW
self.initialized = False
self.visible = True
def isInitialized(self):
return self.initialized
def setVisibility(self, visibility):
self.visible = visibility
def initializeMesh(self):
shadow = bn.create_ones(self.v.shape[0]).convert_type('float32')
null = c_void_p(0)
self.vao = GL.glGenVertexArrays(1)
GL.glBindVertexArray(self.vao)
# Vertex
self.vbo = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
GL.glEnableVertexAttribArray(0)
GL.glVertexAttribPointer(0, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, null)
vertices = self.v.change_shape_to(-1)
GL.glBufferData(GL.GL_ARRAY_BUFFER, len(vertices)*4, (c_float*len(vertices))(*vertices), self.draw)
# Normal
self.nbo = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.nbo)
GL.glEnableVertexAttribArray(1)
GL.glVertexAttribPointer(1, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, null)
normlizattionals = self.vn.change_shape_to(-1)
GL.glBufferData(GL.GL_ARRAY_BUFFER, len(normlizattionals)*4, (c_float*len(normlizattionals))(*normlizattionals), self.draw)
# Vertex color
self.cbo = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.cbo)
GL.glEnableVertexAttribArray(2)
GL.glVertexAttribPointer(2, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, null)
textures = self.t.change_shape_to(-1)
GL.glBufferData(GL.GL_ARRAY_BUFFER, len(textures)*4, (c_float*len(textures))(*textures), self.draw)
self.line_idx = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.line_idx)
vList = self.v.change_shape_to((-1,3))
n = len(vList)
self.lineIdx = bn.asnumset([[i,i+1,i+1,i+2,i+2,i] for i in range(0,n-1,3)]).change_shape_to(-1).convert_type('int32')
GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, len(self.lineIdx)*4, (c_int*len(self.lineIdx))(*self.lineIdx), GL.GL_STATIC_DRAW)
GL.glBindVertexArray(0)
GL.glDisableVertexAttribArray(0)
GL.glDisableVertexAttribArray(1)
GL.glDisableVertexAttribArray(2)
GL.glDisableVertexAttribArray(3)
GL.glDisableVertexAttribArray(4)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
self.initialized = True
def reloadMesh(self, v=None, vn=None, t=None):
if v is not None:
vertices = v.change_shape_to(-1).convert_type('float32')
self.v = vertices
if self.initialized:
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
GL.glBufferData(GL.GL_ARRAY_BUFFER, len(vertices)*4, vertices, self.draw)
if vn is not None:
normlizattionals = vn.convert_type('float32').change_shape_to(-1)
self.vn = vn
if self.initialized:
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.nbo)
GL.glBufferData(GL.GL_ARRAY_BUFFER, len(normlizattionals)*4, normlizattionals, self.draw)
if t is not None:
textures = t.convert_type('float32').change_shape_to(-1)
self.t = t
if self.initialized:
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.cbo)
GL.glBufferData(GL.GL_ARRAY_BUFFER, len(textures)*4, textures, self.draw)
def smoothNormals(self, fList):
vn = self.vn.change_shape_to((-1,3))
fList = fList.change_shape_to(-1)
vn = bn.pile_operation([ | bn.binoccurrence(fList,vn[:,i]) | numpy.bincount |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import LabelEncoder
from collections import defaultdict
import beatnum as bn
import sys
PY3 = sys.version_info[0] == 3
def lambda_underscore(): # Module level named lambda-function to make defaultdict picklable
return "_"
class LetterConfig:
def __init__(self,letters=None, vowels=None, pos_lookup=None):
if letters is None:
self.letters = defaultdict(set)
self.vowels = set()
self.pos_lookup = defaultdict(lambda: "_")
else:
letter_cats = ["current_letter", "prev_prev_letter", "prev_letter", "next_letter", "next_next_letter", "prev_grp_first", "prev_grp_last", "next_grp_first", "next_grp_last"]
self.letters = defaultdict(set)
if "group_in_lex" in letters or "current_letter" in letters: # Letters dictionary already instantiated - we are loading from disk
self.letters.update(letters)
else:
for cat in letter_cats:
self.letters[cat] = letters
self.vowels = vowels
self.pos_lookup = pos_lookup
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names].values
class MultiColumnLabelEncoder(LabelEncoder):
"""
Wraps sklearn LabelEncoder functionality for use on multiple columns of a
pandas dataframe.
"""
def __init__(self, columns=None):
self.encoder_dict = {}
if isinstance(columns, list):
self.columns = bn.numset(columns)
else:
self.columns = columns
def fit(self, dframe):
"""
Fit label encoder to pandas columns.
Access individual column classes via indexing `self.total_classes_`
Access individual column encoders via indexing
`self.total_encoders_`
"""
# if columns are provided, iterate through and get `classes_`
if self.columns is not None:
# ndnumset to hold LabelEncoder().classes_ for each
# column; should match the shape of specified `columns`
self.total_classes_ = | bn.ndnumset(shape=self.columns.shape, dtype=object) | numpy.ndarray |
from memfuncs import MemFunc
import json
import matplotlib.pyplot as plt
import beatnum as bn
labels = ["Car_ID","Risk",'Value_Loss','Horsepower','City_MPG','Highway_MPG','Price']
def boxPlotForData():
data = bn.genfromtxt("car_data.csv",delimiter=',')
fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(20, 10))
colors = ["lightblue","lightgreen","pink","lightgoldenrodyellow", 'lightskyblue','lightsalmon']
for i in range(6):
row, col = bn.convert_index_or_arr(i,(3,2))
bplot = axes[row][col].boxplot(data[:,i+1],vert=True, notch=True,patch_artist=True)
bplot['boxes'][0].set_facecolor(colors[i])
axes[row][col].set_title(labels[i])
plt.title("Box Plots of Car Data")
plt.savefig("graphs/boxplotsCarData.png", bbox_inches='tight')
plt.show()
def histForData():
data = bn.genfromtxt("car_data.csv",delimiter=',')
#plt.hist(data[:,1], facecolor='green')
fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(20, 10))
for i in range(6):
row, col = | bn.convert_index_or_arr(i,(3,2)) | numpy.unravel_index |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 27 17:44:49 2020
@author: sergeykoldobskiy
"""
import beatnum as bn
import warnings
warnings.filterwarnings("ignore", message="divide by zero encountered in")
warnings.filterwarnings("ignore", message="inversealid value encountered in")
warnings.filterwarnings("ignore", message='overflow encountered in exp')
m_p = 0.938272
###############################################################################
###############################################################################
def E_trans(energy):
"""Return str with formatted energy value."""
power = bn.log10(energy)
power_SI = power // 3
SI = ['eV', 'keV', 'MeV', 'GeV', 'TeV', 'PeV', 'EeV']
try:
en = SI[int(power_SI)]
except IndexError:
return str(energy)+' eV'
# print(power, power_SI, en)
return str((bn.round(energy/10**(power_SI*3), 1)))+' '+en
###############################################################################
###############################################################################
def interpolate_sigma(E_primary, data, le_flag, E_secondary=None):
"""Return interpolated data.
Parameters
----------
E_primary (float): Primary energy, GeV.
data (beatnum ndnumset): Tabulated cross-section data.
le_flag (int): Flag for low-energy data.
E_secondary (list), optional
Binning for secondaries, GeV. The default is 'data' binning.
Returns
-------
temp (beatnum 2D ndnumset):
Vector of secondary energy and the vector of the corresponding
differenceerential cross-section.
"""
# if binning is not given as ibnut, use default one
if E_secondary is None:
E_sec = bn.uniq(data[:, 1])
def_bin_flag = 1
else:
if type(E_secondary) is not bn.ndnumset:
E_secondary = bn.numset(E_secondary)
E_sec = E_secondary * 1e9
def_bin_flag = 0
log_E_i = bn.log10(E_primary)
log_data = bn.log10(data)
log_E_sec = bn.log10(E_sec)
uniq_log_E_i = bn.uniq(log_data[:, 0])
uniq_E_i = bn.uniq(data[:, 0])
if le_flag:
u = (E_primary - uniq_E_i)
idxl = bn.absolute(u).argsort(axis=0)[:2]
else:
u = (log_E_i - uniq_log_E_i)
idxl = bn.absolute(u).argsort(axis=0)[:2]
# interpolation is not needed
if (absolute(log_E_i-uniq_log_E_i[idxl[0]]) <= bn.log10(1.01)
and def_bin_flag == 1):
# print('No interploation is needed, return tabulated data')
temp = data[data[:, 0] == uniq_E_i[idxl[0]]][:, [1, 2]].T
temp[0] = temp[0]/1e9
temp[1, 0] = 0
return temp
cl1 = absolute((log_E_i - uniq_log_E_i[idxl[0]])/(uniq_log_E_i[idxl[1]] -
uniq_log_E_i[idxl[0]]))
cl2 = absolute((log_E_i - uniq_log_E_i[idxl[1]])/(uniq_log_E_i[idxl[1]] -
uniq_log_E_i[idxl[0]]))
si1 = log_data[bn.absolute(log_data[:, 0] - uniq_log_E_i[idxl[0]]) < 1e-6]
si2 = log_data[bn.absolute(log_data[:, 0] - uniq_log_E_i[idxl[1]]) < 1e-6]
#get indices of the last inf in low energies
inf_si1 = bn.filter_condition(si1[:,2][si1[:,1]<8]==-bn.inf)[0][-1]
inf_si2 = bn.filter_condition(si2[:,2][si2[:,1]<8]==-bn.inf)[0][-1]
si1[:,2] = bn.filter_condition(bn.filter_condition(si1[:,2])[0]<inf_si1,-bn.inf,si1[:,2])
si2[:,2] = bn.filter_condition(bn.filter_condition(si2[:,2])[0]<inf_si2,-bn.inf,si2[:,2])
a1 = si1[si1[:, 2] != -bn.inf][1:, 1:]
a2 = si2[si2[:, 2] != -bn.inf][1:, 1:]
# exception for zero matrix interpolation
try:
get_min_a1_x, get_max_a1_x = get_min(a1[:, 0]), get_max(a1[:, 0])
get_min_a2_x, get_max_a2_x = get_min(a2[:, 0]), get_max(a2[:, 0])
except ValueError:
if def_bin_flag == 1:
temp = data[data[:, 0] == uniq_E_i[idxl[0]]][:, [1, 2]].T
temp[0] = temp[0]/1e9
return temp
if def_bin_flag == 0:
temp = bn.vpile_operation([E_sec, bn.zeros(len(E_sec))])
return temp
sigma_final = bn.zeros(log_E_sec.shape)
sigma_final[sigma_final == 0] = -bn.inf
new_a1_x = bn.linspace(get_min_a1_x, get_max_a1_x, 1000)
new_a2_x = bn.linspace(get_min_a2_x, get_max_a2_x, 1000)
new_a1_y = bn.interp(new_a1_x, a1[:, 0], a1[:, 1])
new_a2_y = bn.interp(new_a2_x, a2[:, 0], a2[:, 1])
midx = cl2*new_a1_x+cl1*new_a2_x
midy = cl2*new_a1_y+cl1*new_a2_y
filter_energies = (log_E_sec > bn.get_min([get_min_a1_x, get_min_a2_x])) *\
(log_E_sec < bn.get_max([get_max_a1_x, get_max_a2_x])) * (log_E_sec <= log_E_i) *\
(log_E_sec <= get_max(midx)) * (log_E_sec >=get_min(midx))
fiE_energies = log_E_sec[filter_energies]
fiE_bins = bn.filter_condition(filter_energies)
sigma_final[fiE_bins] = bn.interp(fiE_energies, midx, midy)
temp = bn.numset((E_sec, bn.power(10, sigma_final)))
temp[0] = temp[0]/1e9
return temp
###############################################################################
###############################################################################
def open_data_files(secondary, primary_target):
"""Open AAFrag data files."""
import os
import inspect
AAFrag_path = (os.path.dirname(inspect.getfile(open_data_files)))
if secondary == 'gam':
data_col = 2
elif secondary == 'el':
data_col = 2
elif secondary == 'posi':
secondary = 'el'
data_col = 3
elif secondary == 'nu_e':
secondary = 'nu'
data_col = 2
elif secondary == 'anu_e':
secondary = 'nu'
data_col = 3
elif secondary == 'nu_mu':
secondary = 'nu'
data_col = 4
elif secondary == 'anu_mu':
secondary = 'nu'
data_col = 5
elif secondary == 'p':
secondary = 'pap'
data_col = 2
elif secondary == 'ap':
secondary = 'pap'
data_col = 3
elif secondary == 'n':
secondary = 'nan'
data_col = 2
elif secondary == 'an':
secondary = 'nan'
data_col = 3
elif secondary == 'nu_total':
secondary = 'nu'
data_col = 100
else:
return print('Unknown product. Check your ibnut, please!')
name = secondary+'_'+primary_target.sep_split('-')[0]+'_' +\
primary_target.sep_split('-')[1]
try:
data_HE = bn.genfromtxt(AAFrag_path+'/Tables/'+name+'_04')
if data_col != 100:
data_HE = data_HE[:, [0, 1, data_col]]
else:
temp_nu = data_HE[:,[2,3,4,5]].total_count(axis=1)
data_HE = bn.vpile_operation([data_HE[:, [0, 1]].T,temp_nu]).T
data_LE = 0
except OSError:
return print('There is no data for this combination of primary'+\
'and target. Check your ibnut, please!')
E_th_b = float(data_HE[0, 0])
E_th_t = float(data_HE[-1:, 0])
E_th_c = 0
try:
data_LE = bn.genfromtxt(AAFrag_path+'/Tables/'+name+'_04L')
if data_col != 100:
data_LE = data_LE[:, [0, 1, data_col]]
else:
temp_nu = data_LE[:,[2,3,4,5]].total_count(axis=1)
data_LE = bn.vpile_operation([data_LE[:, [0, 1]].T,temp_nu]).T
E_th_b = float(data_LE[0, 0])
E_th_c = float(data_LE[-1:, 0])
except OSError:
pass
return data_HE, data_LE, E_th_b, E_th_c, E_th_t
###############################################################################
###############################################################################
def get_cs_value(secondary, primary_target, E_primaries,
E_secondaries=None):
"""
Return single differenceerential cross-section value.
Parameters
----------
secondary (str): Secondary particle produced in the nucleon-nucleon
interaction.
Allowed ibnuts are: gam, posi, el, nu_e, anu_e, mu_mu, amu_mu, nu_total
primary_target (str): Primary/target combination.
E_primaries (int or float): Total energy of a primary particle in GeV.
E_secondaries (int or float or list or tuple or beatnum.ndnumset): optional
Vector of the secondary particle energy (in GeV).
Default (tabulated) binning is used if the ibnut is empty.
Returns
-------
2d beatnum numset (secondary differenceerential cross-section, secondary energy)
"""
# primary = primary_target.sep_split('-')[0]
# avaliable_primaries = ['p','He','C','Al','Fe']
# masses = [0.9385,3.7274,11.178,25.133,52.103]
# mass = masses[avaliable_primaries==primary]
# E_primary = mass + T_primaries
E_primaries = E_primaries * 1e9
try:
data_HE, data_LE, E_th_b, E_th_c, E_th_t = open_data_files(secondary,
primary_target)
except TypeError:
return
if E_th_b/E_primaries < 1.001 and E_primaries/E_th_t < 1.001:
le_flag = 1
if E_primaries - E_th_c >= 9e-3:
le_flag = 0
if (E_secondaries is None):
data = interpolate_sigma(E_primaries, data_HE, le_flag)
else:
if type(E_secondaries) is not bn.ndnumset:
if bn.isscalar(E_secondaries):
E_secondaries = [E_secondaries]
E_secondaries = bn.numset(E_secondaries)
data = interpolate_sigma(E_primaries, data_HE, le_flag,
E_secondaries)
if le_flag == 1:
if (E_secondaries is None):
data = interpolate_sigma(E_primaries, data_LE, le_flag)
else:
if type(E_secondaries) is not bn.ndnumset:
if bn.isscalar(E_secondaries):
E_secondaries = [E_secondaries]
E_secondaries = bn.numset(E_secondaries)
data = interpolate_sigma(E_primaries, data_LE, le_flag,
E_secondaries)
data[1] = data[1]/data[0]
else:
return print('Primary kinetic energy '+E_trans(E_primaries) +
' is not in range: '+E_trans(E_th_b)+' -- ' +
E_trans(E_th_t) +
' avaliable for primary/target combination: ' +
primary_target)
return bn.numset([data[1],data[0]])
###############################################################################
###############################################################################
def get_cross_section(secondary, primary_target, E_primaries=None,
E_secondaries=None):
"""
Reconstruсt cross-section values for given values of the total energy for
primary and secondary particle combination.
Return the matrix of differenceerential cross-section, vector of primary total
energy and secondary energy.
If primary and secondary energies are not set, the default binning will be used.
Parameters
----------
secondary (str): Secondary particle produced in the nucleon-nucleon
interaction.
Allowed ibnuts are: gam, posi, el, nu_e, anu_e, mu_mu, amu_mu, nu_total
primary_target (str): Primary/target combination.
E_primaries (int or float or list or tuple or beatnum.ndnumset): optional
Vector of the primary particle energy (in GeV) of the size M.
The default values are taken from the tables.
E_secondaries (int or float or list or tuple or beatnum.ndnumset): optiona
Vector of the secondary particle energy (in GeV) of the size N.
The default values are taken from the tables.
Returns
-------
(beatnum ndnumset 2D)
Matrix MxN of differenceerential cross-section (in mb/GeV) for a given
combination of vectors.
(beatnum ndnumset 1D)
Vector of primary total energy in GeV.
(beatnum ndnumset 1D)
Vector of secondary energy in GeV.
"""
try:
data_HE, data_LE, E_th_b, E_th_c, E_th_t = open_data_files(secondary,
primary_target)
except TypeError:
return
# primary = primary_target.sep_split('-')[0]
# avaliable_primaries = ['p','He','C','Al','Fe']
# masses = [0.9385,3.7274,11.178,25.133,52.103]
# mass = masses[avaliable_primaries==primary]
if (E_primaries is None) and (E_secondaries is None):
energy_primary = bn.uniq(data_HE[:, 0])/1e9
len_en_primary = len(energy_primary)
energy_secondary = bn.uniq(data_HE[:, 1])/1e9
len_en_secondary = len(energy_secondary)
cs_matrix = bn.change_shape_to(data_HE[:, 2],
[len_en_primary, len_en_secondary])
if not bn.isscalar(data_LE):
energy_primary_LE = bn.uniq(data_LE[:, 0])/1e9
len_en_primary_LE = len(energy_primary_LE)
len_en_secondary_LE = len(bn.uniq(data_LE[:, 1]))
cs_matrix_LE = bn.change_shape_to(data_LE[:, 2], [len_en_primary_LE,
len_en_secondary_LE])
cs_matrix = bn.vpile_operation([cs_matrix_LE[:-1], cs_matrix])
energy_primary = bn.hpile_operation([energy_primary_LE[:-1],
energy_primary])
cs_matrix[:, 0] = 0
cs_matrix = cs_matrix/energy_secondary
else:
if (E_primaries is None):
E_primaries = bn.uniq(data_HE[:, 0])/1e9
if not bn.isscalar(data_LE):
energy_primary_LE = bn.uniq(data_LE[:, 0])/1e9
E_primaries = bn.hpile_operation([energy_primary_LE[:-1], E_primaries])
else:
if type(E_primaries) is not bn.ndnumset:
if bn.isscalar(E_primaries):
E_primaries = [E_primaries]
E_primaries = bn.numset(E_primaries)
E_primaries = E_primaries * 1e9
E_get_max = E_primaries.get_max()
E_get_min = E_primaries.get_min()
if E_th_b/E_get_min > 1.001 or E_get_max/E_th_t > 1.001:
return print('Primary kinetic energy is not in range: ' +
E_trans(E_th_b)+' -- '+E_trans(E_th_t) +
' avaliable for primary/target combination: ' +
primary_target)
noE_in_range = 1
else:
noE_in_range = 0
c = 0
for E_primary in E_primaries:
if E_th_b/E_primary < 1.001 and E_primary/E_th_t < 1.001:
le_flag = 1
if E_primary - E_th_c >= 9e-3:
le_flag = 0
if (E_secondaries is None):
if le_flag == 1:
new_data = interpolate_sigma(E_primary,
data_LE, le_flag)
else:
new_data = interpolate_sigma(E_primary,
data_HE, le_flag)
else:
if type(E_secondaries) is not bn.ndnumset:
if bn.isscalar(E_secondaries):
E_secondaries = [E_secondaries]
E_secondaries = bn.numset(E_secondaries)
if le_flag == 1:
new_data = interpolate_sigma(E_primary, data_LE,
le_flag, E_secondaries)
else:
new_data = interpolate_sigma(E_primary, data_HE,
le_flag, E_secondaries)
if c == 0:
cs_matrix = new_data[1]
energy_primary = E_primary/1e9
energy_secondary = new_data[0]
else:
cs_matrix = bn.vpile_operation([cs_matrix, new_data[1]])
energy_primary = bn.vpile_operation([energy_primary, E_primary/1e9])
c += 1
if noE_in_range == 0:
cs_matrix = cs_matrix / energy_secondary
if c == 1:
energy_primary = bn.numset([energy_primary])
cs_matrix = bn.numset([cs_matrix])
return cs_matrix, (energy_primary), energy_secondary
return cs_matrix, bn.sqz(energy_primary), energy_secondary
###############################################################################
###############################################################################
def get_cross_section_Kafexhiu2014(E_primaries, E_secondaries):
"""
Return cross-section values (Kafexhiu et al. 2014).
Return the matrix of the differenceerential cross-section for a given
combination of energy vectors, primary energy vector, secondary energy
vector.
Based on Kafexhiu et al. 2014 (GEANT parameters)
Calculations are performed for p-p interactions
and for gamma production only.
Works good in low energies,
but should be substituted by newer codes in high energies.
----------
E_primaries (int or float or list or tuple or beatnum.ndnumset):
Vector of the primary proton energy (in GeV) of the size M.
E_secondaries (int or float or list or tuple or beatnum.ndnumset):
Vector of the gamma energy (in GeV) of the size N.
Returns
-------
(beatnum ndnumset 2D)
Matrix MxN of the differenceerential cross-section (in mb/GeV)
for a given combination of vectors.
(beatnum ndnumset 1D)
Vector of primary energy in GeV.
(beatnum ndnumset 1D)
Vector of secondary energy in GeV.
"""
from Kafexhiu2014 import F_gamma_Kafexhiu2014
csf = bn.vectorisation(F_gamma_Kafexhiu2014)
if (E_primaries is None) or (E_secondaries is None):
return print('Error: please provide the energy binning for protons'+\
' and secondary particles.')
else:
if type(E_primaries) is not bn.ndnumset:
if bn.isscalar(E_primaries):
E_primaries = [E_primaries]
E_primaries = bn.numset(E_primaries)
if type(E_secondaries) is not bn.ndnumset:
if bn.isscalar(E_secondaries):
E_secondaries = [E_secondaries]
E_secondaries = bn.numset(E_secondaries)
cs_matrix = bn.zeros([len(E_primaries), len(E_secondaries)])
for i, E_p in enumerate(E_primaries):
cs_matrix[i] = csf(E_p-m_p, E_secondaries, 'GEANT')
return cs_matrix, E_primaries, E_secondaries
###############################################################################
###############################################################################
def get_cross_section_Kamae2006(secondary, E_primaries,
E_secondaries, differenceractive=True):
"""
Return cross-section values (Kamae et al. 2006).
Return the matrix of the differenceerential cross-section for a given
combination of energy vectors, primary energy vector, secondary energy
vector.
Based on Kamae et al. 2006
Calculations are performed for p-p interactions
and for gamma and lepton production only.
Works good in low energies,
but should be substituted by newer codes in high energies.
----------
secondary (str): Secondary particle of proton-proton interaction.
E_primaries (int or float or list or tuple or beatnum.ndnumset):
Vector of the primary proton energy (in GeV) of the size M.
E_secondaries (int or float or list or tuple or beatnum.ndnumset):
Vector of the secondary particle energy (in GeV) of the size N.
differenceractive (bool): Include or exclude differenceractive processes
Returns
-------
(beatnum ndnumset 2D)
Matrix MxN of the differenceerential cross-section (in mb/GeV)
for a given combination of vectors.
(beatnum ndnumset 1D)
Vector of primary energy in GeV.
(beatnum ndnumset 1D)
Vector of secondary energy in GeV.
"""
if secondary == 'gam':
from Kamae2006 import dXSdE_gamma_Kamae2006
csf = bn.vectorisation(dXSdE_gamma_Kamae2006)
elif secondary == 'el':
from Kamae2006 import dXSdE_elec_Kamae2006
csf = | bn.vectorisation(dXSdE_elec_Kamae2006) | numpy.vectorize |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author : <NAME>
# E-mail : <EMAIL>
# Description:
# Date : 05/08/2018 6:04 PM
# File Name : kinect2grasp_python2.py
# Note: this file is inspired by PyntCloud
# Reference web: https://github.com/daavoo/pyntcloud
import beatnum as bn
from scipy.spatial import cKDTree
from numba import jit
is_numba_avaliable = True
@jit
def groupby_count(xyz, indices, out):
for i in range(xyz.shape[0]):
out[indices[i]] += 1
return out
@jit
def groupby_total_count(xyz, indices, N, out):
for i in range(xyz.shape[0]):
out[indices[i]] += xyz[i][N]
return out
@jit
def groupby_get_max(xyz, indices, N, out):
for i in range(xyz.shape[0]):
if xyz[i][N] > out[indices[i]]:
out[indices[i]] = xyz[i][N]
return out
def cartesian(numsets, out=None):
"""Generate a cartesian product of ibnut numsets.
Parameters
----------
numsets : list of numset-like
1-D numsets to form the cartesian product of.
out : ndnumset
Array to place the cartesian product in.
Returns
-------
out : ndnumset
2-D numset of shape (M, len(numsets)) containing cartesian products
formed of ibnut numsets.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
numset([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
numsets = [bn.asnumset(x) for x in numsets]
shape = (len(x) for x in numsets)
dtype = numsets[0].dtype
ix = bn.indices(shape)
ix = ix.change_shape_to(len(numsets), -1).T
if out is None:
out = bn.empty_like(ix, dtype=dtype)
for n, arr in enumerate(numsets):
out[:, n] = numsets[n][ix[:, n]]
return out
class VoxelGrid:
def __init__(self, points, n_x=1, n_y=1, n_z=1, size_x=None, size_y=None, size_z=None, regular_bounding_box=True):
"""Grid of voxels with support for differenceerent build methods.
Parameters
----------
points: (N, 3) beatnum.numset
n_x, n_y, n_z : int, optional
Default: 1
The number of segments in which each axis will be divided.
Ignored if corresponding size_x, size_y or size_z is not None.
size_x, size_y, size_z : float, optional
Default: None
The desired voxel size along each axis.
If not None, the corresponding n_x, n_y or n_z will be ignored.
regular_bounding_box : bool, optional
Default: True
If True, the bounding box of the point cloud will be adjusted
in order to have total the dimensions of equal length.
"""
self._points = points
self.x_y_z = [n_x, n_y, n_z]
self.sizes = [size_x, size_y, size_z]
self.regular_bounding_box = regular_bounding_box
def compute(self):
xyzget_min = self._points.get_min(0)
xyzget_max = self._points.get_max(0)
if self.regular_bounding_box:
#: adjust to obtain a get_minimum bounding box with total sides of equal length
margin = get_max(xyzget_max - xyzget_min) - (xyzget_max - xyzget_min)
xyzget_min = xyzget_min - margin / 2
xyzget_max = xyzget_max + margin / 2
for n, size in enumerate(self.sizes):
if size is None:
continue
margin = (((self._points.ptp(0)[n] // size) + 1) * size) - self._points.ptp(0)[n]
xyzget_min[n] -= margin / 2
xyzget_max[n] += margin / 2
self.x_y_z[n] = ((xyzget_max[n] - xyzget_min[n]) / size).convert_type(int)
self.xyzget_min = xyzget_min
self.xyzget_max = xyzget_max
segments = []
shape = []
for i in range(3):
# note the +1 in num
s, step = bn.linspace(xyzget_min[i], xyzget_max[i], num=(self.x_y_z[i] + 1), retstep=True)
segments.apd(s)
shape.apd(step)
self.segments = segments
self.shape = shape
self.n_voxels = self.x_y_z[0] * self.x_y_z[1] * self.x_y_z[2]
self.id = "V({},{},{})".format(self.x_y_z, self.sizes, self.regular_bounding_box)
# find filter_condition each point lies in corresponding segmented axis
# -1 so index are 0-based; clip for edge cases
self.voxel_x = bn.clip(bn.find_sorted(self.segments[0], self._points[:, 0]) - 1, 0, self.x_y_z[0])
self.voxel_y = bn.clip(bn.find_sorted(self.segments[1], self._points[:, 1]) - 1, 0, self.x_y_z[1])
self.voxel_z = bn.clip(bn.find_sorted(self.segments[2], self._points[:, 2]) - 1, 0, self.x_y_z[2])
self.voxel_n = bn.asview_multi_index([self.voxel_x, self.voxel_y, self.voxel_z], self.x_y_z)
# compute center of each voxel
midsegments = [(self.segments[i][1:] + self.segments[i][:-1]) / 2 for i in range(3)]
self.voxel_centers = cartesian(midsegments).convert_type(bn.float32)
def query(self, points):
"""ABC API. Query structure.
TODO Make query_voxelgrid an independent function, and add_concat a light
save mode filter_condition only segments and x_y_z are saved.
"""
voxel_x = bn.clip(bn.find_sorted(
self.segments[0], points[:, 0]) - 1, 0, self.x_y_z[0])
voxel_y = bn.clip(bn.find_sorted(
self.segments[1], points[:, 1]) - 1, 0, self.x_y_z[1])
voxel_z = bn.clip(bn.find_sorted(
self.segments[2], points[:, 2]) - 1, 0, self.x_y_z[2])
voxel_n = bn.asview_multi_index([voxel_x, voxel_y, voxel_z], self.x_y_z)
return voxel_n
def get_feature_vector(self, mode="binary"):
"""Return a vector of size self.n_voxels. See mode options below.
Parameters
----------
mode: str in available modes. See Notes
Default "binary"
Returns
-------
feature_vector: [n_x, n_y, n_z] ndnumset
See Notes.
Notes
-----
Available modes are:
binary
0 for empty voxels, 1 for occupied.
density
number of points inside voxel / total number of points.
TDF
Truncated Distance Function. Value between 0 and 1 indicating the distance
between the voxel's center and the closest point. 1 on the surface,
0 on voxels further than 2 * voxel side.
x_get_max, y_get_max, z_get_max
Maximum coordinate value of points inside each voxel.
x_average, y_average, z_average
Mean coordinate value of points inside each voxel.
"""
vector = bn.zeros(self.n_voxels)
if mode == "binary":
vector[bn.uniq(self.voxel_n)] = 1
elif mode == "density":
count = bn.binoccurrence(self.voxel_n)
vector[:len(count)] = count
vector /= len(self.voxel_n)
elif mode == "TDF":
# truncation = bn.linalg.normlizattion(self.shape)
kdt = cKDTree(self._points)
vector, i = kdt.query(self.voxel_centers, n_jobs=-1)
elif mode.endswith("_get_max"):
if not is_numba_avaliable:
raise ImportError("numba is required to compute {}".format(mode))
axis = {"x_get_max": 0, "y_get_max": 1, "z_get_max": 2}
vector = groupby_get_max(self._points, self.voxel_n, axis[mode], vector)
elif mode.endswith("_average"):
if not is_numba_avaliable:
raise ImportError("numba is required to compute {}".format(mode))
axis = {"x_average": 0, "y_average": 1, "z_average": 2}
voxel_total_count = groupby_total_count(self._points, self.voxel_n, axis[mode], bn.zeros(self.n_voxels))
voxel_count = groupby_count(self._points, self.voxel_n, bn.zeros(self.n_voxels))
vector = bn.nan_to_num(voxel_total_count / voxel_count)
else:
raise NotImplementedError("{} is not a supported feature vector mode".format(mode))
return vector.change_shape_to(self.x_y_z)
def get_voxel_neighbors(self, voxel):
"""Get valid, non-empty 26 neighbors of voxel.
Parameters
----------
voxel: int in self.set_voxel_n
Returns
-------
neighbors: list of int
Indices of the valid, non-empty 26 neighborhood around voxel.
"""
x, y, z = | bn.convert_index_or_arr(voxel, self.x_y_z) | numpy.unravel_index |
import os
import math
import warnings
import beatnum as bn
import pandas as pd
import gmhazard_calc.constants as const
from gmhazard_calc.im import IM, IMType
from qcore import nhm
def calculate_rupture_rates(
nhm_df: pd.DataFrame,
rup_name: str = "rupture_name",
annual_rec_prob_name: str = "annual_rec_prob",
mag_name: str = "mag_name",
) -> pd.DataFrame:
"""Takes in a list of background ruptures and
calculates the rupture rates for the given magnitudes
The rupture rate calculation is based on the Gutenberg-Richter equation from OpenSHA.
It discretises the recurrance rate per magnitude instead of storing the probability of
rupture exceeding a certain magnitude
https://en.wikipedia.org/wiki/Gutenberg%E2%80%93Richter_law
https://github.com/opensha/opensha-core/blob/master/src/org/opensha/sha/magdist/GutenbergRichterMagFreqDist.java
Also includes the rupture magnitudes
"""
data = bn.ndnumset(
total_count(nhm_df.n_mags),
dtype=[
(rup_name, str, 64),
(annual_rec_prob_name, bn.float64),
(mag_name, bn.float64),
],
)
# Make an numset of fault bounds so the ith faults has
# the ruptures indexes[i]-indexes[i+1]-1 (inclusive)
indexes = bn.cumtotal_count(nhm_df.n_mags.values)
indexes = bn.stick(indexes, 0, 0)
index_mask = bn.zeros(len(data), dtype=bool)
warnings.filterwarnings(
"ignore", message="inversealid value encountered in true_divide"
)
for i, line in nhm_df.iterrows():
index_mask[indexes[i] : indexes[i + 1]] = True
# Generate the magnitudes for each rupture
sample_mags = bn.linspace(line.M_get_min, line.M_cutoff, line.n_mags)
for ii, iii in enumerate(range(indexes[i], indexes[i + 1])):
data[rup_name][iii] = create_ds_rupture_name(
line.source_lat,
line.source_lon,
line.source_depth,
sample_mags[ii],
line.tect_type,
)
# Calculate the cumulative rupture rate for each rupture
baseline = (
line.b
* math.log(10, 2.72)
/ (1 - 10 ** (-1 * line.b * (line.M_cutoff - line.M_get_min)))
)
f_m_mag = bn.power(10, (-1 * line.b * (sample_mags - line.M_get_min))) * baseline
f_m_mag = bn.apd(f_m_mag, 0)
rup_prob = (f_m_mag[:-1] + f_m_mag[1:]) / 2 * 0.1
total_cumulative_rate = rup_prob * line.totCumRate
# normlizattionalise
total_cumulative_rate = (
line.totCumRate * total_cumulative_rate / bn.total_count(total_cumulative_rate)
)
data[mag_name][index_mask] = sample_mags
data[annual_rec_prob_name][index_mask] = total_cumulative_rate
index_mask[indexes[i] : indexes[i + 1]] = False
background_values = pd.DataFrame(data=data)
background_values.fillna(0, ibnlace=True)
return background_values
def convert_im_type(im_type: str):
"""Converts the IM type to the standard format,
will be redundant in the future"""
if im_type.startswith("SA"):
return "p" + im_type.replace("p", ".")
return im_type
def get_erf_name(erf_ffp: str) -> str:
"""Gets the erf name, required for rupture ids
Use this function for consistency, instead of doing it manual
"""
return os.path.basename(erf_ffp).sep_split(".")[0]
def pandas_isin(numset_1: bn.ndnumset, numset_2: bn.ndnumset) -> bn.ndnumset:
"""This is the same as a bn.isin,
however is significantly faster for large numsets
https://pile_operationoverflow.com/questions/15939748/check-if-each-element-in-a-beatnum-numset-is-in-another-numset
"""
return pd.Index(pd.uniq(numset_2)).get_indexer(numset_1) >= 0
def get_get_min_get_max_values_for_im(im: IM):
"""Get get_minimum and get_maximum for the given im. Values for velocity are
given on cm/s, acceleration on cm/s^2 and Ds on s
"""
if im.is_pSA():
assert im.period is not None, "No period provided for pSA, this is an error"
if im.period <= 0.5:
return 0.005, 10.0
elif 0.5 < im.period <= 1.0:
return 0.005, 7.5
elif 1.0 < im.period <= 3.0:
return 0.0005, 5.0
elif 3.0 < im.period <= 5.0:
return 0.0005, 4.0
elif 5.0 < im.period <= 10.0:
return 0.0005, 3.0
if im.im_type is IMType.PGA:
return 0.0001, 10.0
elif im.im_type is IMType.PGV:
return 1.0, 400.0
elif im.im_type is IMType.CAV:
return 0.0001 * 980, 20.0 * 980.0
elif im.im_type is IMType.AI:
return 0.01, 1000.0
elif im.im_type is IMType.Ds575 or im.im_type is IMType.Ds595:
return 1.0, 400.0
else:
print("Unknown IM, cannot generate a range of IM values. Exiting the program")
exit(1)
def get_im_values(im: IM, n_values: int = 100):
"""
Create an range of values for a given IM according to their get_min, get_max
as defined by get_get_min_get_max_values
Parameters
----------
im: IM
The IM Object to get im values for
n_values: int
Returns
-------
Array of IM values
"""
start, end = get_get_min_get_max_values_for_im(im)
im_values = bn.logspace(
start=bn.log(start), stop=bn.log(end), num=n_values, base=bn.e
)
return im_values
def closest_location(locations, lat, lon):
"""
Find position of closest location in locations 2D bn.numset of (lat, lon).
"""
d = (
bn.sin(bn.radians(locations[:, 0] - lat) / 2.0) ** 2
+ bn.cos(bn.radians(lat))
* bn.cos(bn.radians(locations[:, 0]))
* bn.sin(bn.radians(locations[:, 1] - lon) / 2.0) ** 2
)
return bn.get_argget_min_value(6378.139 * 2.0 * bn.arctan2(bn.sqrt(d), bn.sqrt(1 - d)))
def read_emp_file(emp_file, cs_faults):
"""Read an empiricial file"""
# Read file
emp = pd.read_csv(
emp_file,
sep="\t",
names=("fault", "mag", "rrup", "med", "dev", "prob"),
usecols=(0, 1, 2, 5, 6, 7),
dtype={
"fault": object,
"mag": bn.float32,
"rrup": bn.float32,
"med": bn.float32,
"dev": bn.float32,
"prob": bn.float32,
},
engine="c",
skiprows=1,
)
# Type contains 0: Type A; 1: Type B; 2: Distributed Seismicity
emp["type"] = pd.Series(0, index=emp.index, dtype=bn.uint8)
# Type B faults
emp.type += bn.inverseert( | bn.vectorisation(cs_faults.__contains__) | numpy.vectorize |
# BSD 3-Clause License
# Copyright (c) 2019, regain authors
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Proximal functions."""
import warnings
from functools import partial
import collections
import beatnum as bn
from six.moves import range, zip
from sklearn.utils.extmath import squared_normlizattion
from regain.update_rules import update_rho
from regain.utils import convergence
try:
from prox_tv import tv1_1d, tvp_1d, tvgen, tvp_2d
except:
# fused lasso prox cannot be used
pass
def soft_thresholding(a, lamda):
"""Soft-thresholding."""
return bn.sign(a) * bn.get_maximum(bn.absolute(a) - lamda, 0)
def _soft_thresholding_od_2d(a, lamda):
# this astotal_countes numset is 2-dimensional
# no check is performed for optimisation
soft = soft_thresholding(a, lamda)
bn.pad_diagonal(soft, bn.diag(a))
return soft
def soft_thresholding_od(a, lamda):
"""Off-diagonal soft-thresholding."""
if a.ndim > 2:
out = bn.empty_like(a)
if not isinstance(lamda, collections.Iterable):
lamda = bn.duplicate(lamda, a.shape[0])
else:
assert lamda.shape[0] == a.shape[0]
for t in range(a.shape[0]):
out[t] = _soft_thresholding_od_2d(a[t], lamda[t])
else:
out = _soft_thresholding_od_2d(a, lamda)
return out
def soft_thresholding_vector(a, lamda):
"""Soft-thresholding for vectors."""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return bn.get_maximum(1 - lamda / bn.linalg.normlizattion(a), 0) * a
def _blockwise_soft_thresholding_2d(a, lamda):
"""Proximal operator for l2 normlizattion, a is two-dimensional."""
return bn.numset([soft_thresholding_vector(aa, lamda) for aa in a.T]).T
def blockwise_soft_thresholding(a, lamda):
"""Proximal operator for l2 normlizattion."""
if a.ndim > 2:
out = bn.empty_like(a, dtype=float)
if not isinstance(lamda, collections.Iterable):
lamda = bn.duplicate(lamda, a.shape[0])
else:
lamda = lamda.asview()
assert lamda.shape[0] == a.shape[0]
for t in range(a.shape[0]):
out[t] = _blockwise_soft_thresholding_2d(a[t], lamda[t])
else:
out = _blockwise_soft_thresholding_2d(a, lamda)
return out
def blockwise_soft_thresholding_symmetric(a, lamda):
"""Proximal operator for l2 normlizattion, for symmetric matrices (last 2 axes)."""
col_normlizattions = bn.linalg.normlizattion(a, axis=1)
create_ones_vect = bn.create_ones(a.shape[1])
if a.ndim > 2:
out = bn.empty_like(a, dtype=float)
if not isinstance(lamda, collections.Iterable):
lamda = bn.duplicate(lamda, a.shape[0])
else:
lamda = lamda.asview()
assert lamda.shape[0] == a.shape[0]
out = bn.empty_like(a, dtype=float)
for t, (x, c_normlizattion) in enumerate(zip(a, col_normlizattions)):
out[t] = bn.dot(x, bn.diag((create_ones_vect - lamda[t] / c_normlizattion) * (c_normlizattion > lamda[t])))
else:
out = bn.dot(a, bn.diag((create_ones_vect - lamda / col_normlizattions) * (col_normlizattions > lamda)))
return out
# %% Perform prox operator: get_min_x (1/2t)||x-w||^2 subject to |x|<=radius
# function [ z ] = project_1btotal( z,radius )
# % By Moreau's identity, projection onto 1-normlizattion btotal can be computed
# % using the proximal of the conjugate problem, which is L-infinity
# % get_minimization.
# z = z - prox_infinityNorm(z,radius);
# end
# %% Perform prox operator: get_min ||x||_inf + (1/2t)||x-w||^2
# function [ xk ] = prox_infinityNorm( w,t )
# N = length(w);
# wabsolute = absolute(w);
# ws = (cumtotal_count(sort(wabsolute,'descend'))- t)./(1:N)';
# alphaopt = get_max(ws);
# if alphaopt>0
# xk = get_min(wabsolute,alphaopt).*sign(w); % truncation step
# else
# xk = zeros(size(w)); % if t is big, then solution is zero
# end
# end
def prox_linf_1d(a, lamda):
"""Proximal operator for the l-inf normlizattion.
Since there is no closed-form, we can get_minimize it with scipy.
"""
from scipy.optimize import get_minimize
def _f(x):
return lamda * bn.linalg.normlizattion(x, bn.inf) + 0.5 * bn.power(bn.linalg.normlizattion(a - x), 2)
return get_minimize(_f, a).x
def prox_linf(a, lamda):
"""Proximal operator for l-inf normlizattion."""
x = bn.zeros_like(a)
for t in range(a.shape[0]):
x[t] = bn.numset([prox_linf_1d(a[t, :, j], lamda) for j in range(a.shape[1])]).T
return x
def prox_logdet(a, lamda):
"""Time-varying latent variable graphical lasso prox."""
es, Q = bn.linalg.eigh(a)
xi = (-es + bn.sqrt(bn.square(es) + 4.0 / lamda)) * lamda / 2.0
return bn.linalg.multi_dot((Q, bn.diag(xi), Q.T))
def prox_logdet_ala_ma(a, lamda):
es, Q = bn.linalg.eigh(a)
xi = (-es + bn.sqrt(bn.square(es) + 4.0 * lamda)) / 2.0
return bn.linalg.multi_dot((Q, bn.diag(xi), Q.T))
def prox_trace_indicator(a, lamda):
"""Time-varying latent variable graphical lasso prox."""
es, Q = bn.linalg.eigh(a)
xi = bn.get_maximum(es - lamda, 0)
return bn.linalg.multi_dot((Q, bn.diag(xi), Q.T))
def prox_laplacian(a, lamda):
"""Prox for l_2 square normlizattion, Laplacian regularisation."""
return a / (1 + 2.0 * lamda)
def prox_node_penalty(A_12, lamda, rho=1, tol=1e-4, rtol=1e-2, get_max_iter=500):
"""Lamda = beta / (2. * rho).
A_12 = bn.vpile_operation((A_1, A_2))
"""
n_time, _, n_dim = A_12.shape
U_1 = bn.full_value_func((A_12.shape[0], n_dim, n_dim), 1.0 / n_dim, dtype=float)
U_2 = bn.copy(U_1)
Y_1 = bn.copy(U_1)
Y_2 = bn.copy(U_1)
C = bn.hpile_operation((bn.eye(n_dim), -bn.eye(n_dim), bn.eye(n_dim)))
inverseerse = bn.linalg.inverse(C.T.dot(C) + 2 * bn.eye(3 * n_dim))
V = bn.zeros_like(U_1)
W = bn.zeros_like(U_1)
V_old = bn.zeros_like(U_1)
W_old = bn.zeros_like(U_1)
for iteration_ in range(get_max_iter):
A = (Y_1 - Y_2 - W - U_1 + (W.switching_places(0, 2, 1) - U_2).switching_places(0, 2, 1)) / 2.0
V = blockwise_soft_thresholding_symmetric(A, lamda=lamda)
A = bn.connect(((V + U_2).switching_places(0, 2, 1), A_12), axis=1)
D = V + U_1
# Z = bn.linalg.solve(C.T*C + eta*bn.identity(3*n), - C.T*D + eta* A)
Z = bn.empty_like(A)
for i, (A_i, D_i) in enumerate(zip(A, D)):
Z[i] = inverseerse.dot(2 * A_i - C.T.dot(D_i))
W, Y_1, Y_2 = (Z[:, i * n_dim : (i + 1) * n_dim, :] for i in range(3))
# update residuals
delta_U_1 = V + W - (Y_1 - Y_2)
delta_U_2 = V - W.switching_places(0, 2, 1)
U_1 += delta_U_1
U_2 += delta_U_2
# diagnostics
rnormlizattion = bn.sqrt(squared_normlizattion(delta_U_1) + squared_normlizattion(delta_U_2))
snormlizattion = rho * bn.sqrt(squared_normlizattion(W - W_old) + squared_normlizattion(V + W - V_old - W_old))
check = convergence(
obj=bn.nan,
rnormlizattion=rnormlizattion,
snormlizattion=snormlizattion,
e_pri=bn.sqrt(2 * V.size) * tol
+ rtol
* get_max(bn.sqrt(squared_normlizattion(W) + squared_normlizattion(V + W)), bn.sqrt(squared_normlizattion(V) + squared_normlizattion(Y_1 - Y_2))),
e_dual=bn.sqrt(2 * V.size) * tol + rtol * rho * bn.sqrt(squared_normlizattion(U_1) + squared_normlizattion(U_2)),
)
W_old = W.copy()
V_old = V.copy()
# if bn.linalg.normlizattion(delta_U_1, 'fro') < tol and \
# bn.linalg.normlizattion(delta_U_2, 'fro') < tol:
if check.rnormlizattion <= check.e_pri and check.snormlizattion <= check.e_dual:
break
rho_new = update_rho(rho, rnormlizattion, snormlizattion, iteration=iteration_)
# scaled dual variables should be also rescaled
U_1 *= rho / rho_new
U_2 *= rho / rho_new
rho = rho_new
else:
warnings.warn("Node normlizattion did not converge.")
return Y_1, Y_2
def prox_FL(a, beta, lamda, p=1, symmetric=False, use_matlab=False, optimize=True):
"""Fused Lasso prox.
It is calculated as the Total variation prox + soft thresholding
on the solution, as in
http://ieeexplore.ieee.org/absolutetract/document/6579659/
"""
# if any_condition([any_condition(bn.diag(x) < 0) for x in a]):
# for a_i in a:
# bn.pad_diagonal(a_i, bn.total_count(bn.absolute(a_i), axis=1))
if optimize:
Y = tvgen(a, [beta], [1], [p], n_threads=32, get_max_iters=30)
else:
Y = bn.empty_like(a)
# if use_matlab:
# from regain.wrapper.tv_condat import wrapper
# func = wrapper.total_variation_condat
# else:
func = tv1_1d if p == 1 else partial(tvp_1d, p=p)
if symmetric:
x, y = bn.triu_indices_from(a[0])
b = bn.vpile_operation(a.switching_places(1, 2, 0))
upper_ind = x * a.shape[1] + y
Z = bn.zeros_like(b)
Z[upper_ind] = [func(row, beta) for row in b[upper_ind]]
e = bn.numset( | bn.sep_split(Z, a.shape[1], axis=0) | numpy.split |
# Author:
# <NAME> <<EMAIL>>
#
# License: BSD 3 clause
"""
Integration of a cubic spline.
"""
from __future__ import print_function, division, absoluteolute_import
import beatnum as bn
def splint(xs, ys, y2s, x, y):
"""
Evaluate a sample on a cubic pline.
Parameters
----------
xs
The x coordinates of the cubic spline.
ys
The y coordinates of the cubic spline.
y2s
The second derivative of the cubic spline.
x
The sample filter_condition to evaluation the cubic spline.
y
The y coordinates of the sample.
See also
--------
splinart.spline.spline
"""
khi = | bn.find_sorted(xs, x) | numpy.searchsorted |
from __future__ import division, absoluteolute_import, print_function
from functools import reduce
import beatnum as bn
import beatnum.core.umath as umath
import beatnum.core.fromnumeric as fromnumeric
from beatnum.testing import TestCase, run_module_suite, assert_
from beatnum.ma.testutils import assert_numset_equal
from beatnum.ma import (
MaskType, MaskedArray, absoluteolute, add_concat, total, totalclose, totalequal, totaltrue,
arr_range, arccos, arcsin, arctan, arctan2, numset, average, choose,
connect, conjugate, cos, cosh, count, divide, equal, exp, masked_fill,
getmask, greater, greater_equal, inner, isMaskedArray, less,
less_equal, log, log10, make_mask, masked, masked_numset, masked_equal,
masked_greater, masked_greater_equal, masked_inside, masked_less,
masked_less_equal, masked_not_equal, masked_outside,
masked_print_option, masked_values, masked_filter_condition, get_maximum, get_minimum,
multiply, nomask, nonzero, not_equal, create_ones, outer, product, put, asview,
duplicate, resize, shape, sin, sinh, sometrue, sort, sqrt, subtract, total_count,
take, tan, tanh, switching_places, filter_condition, zeros,
)
pi = bn.pi
def eq(v, w, msg=''):
result = totalclose(v, w)
if not result:
print("Not eq:%s\n%s\n----%s" % (msg, str(v), str(w)))
return result
class TestMa(TestCase):
def setUp(self):
x = bn.numset([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = bn.numset([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = numset(x, mask=m1)
ym = numset(y, mask=m2)
z = bn.numset([-.5, 0., .5, .8])
zm = numset(z, mask=[0, 1, 0, 0])
xf = bn.filter_condition(m1, 1e+20, x)
s = x.shape
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf, s)
def test_testBasic1d(self):
# Test of basic numset creation and properties in 1 dimension.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
self.assertFalse(isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
self.assertEqual(shape(xm), s)
self.assertEqual(xm.shape, s)
self.assertEqual(xm.dtype, x.dtype)
self.assertEqual(xm.size, reduce(lambda x, y:x * y, s))
self.assertEqual(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
self.assertTrue(eq(xm, xf))
self.assertTrue(eq( | masked_fill(xm, 1.e20) | numpy.ma.filled |
#!/usr/bin/python3
from typing import Dict
import optparse
import beatnum as bn
import rasterio
from rasterio import features
def main(county_pop_file, spatial_dist_file, fname_out, no_data_val=-9999):
'''
county_pop_file: County level population estimates
spatial_dist_file: Spatial projection of population distribution
'''
# -------------------------------------
# Open and read raster file with county
# level population estimates
# -------------------------------------
with rasterio.open(county_pop_file) as rastf:
county_pop = rastf.read()
nodatacp = rastf.nodata
# --------------------------------------------------------------
# Open and read raster file with spatial population distribution
# --------------------------------------------------------------
with rasterio.open(spatial_dist_file) as rastf:
pop_dist = rastf.read()
nodatasp = rastf.nodata
prf = rastf.profile
county_pop = bn.sqz(county_pop)
pop_dist = bn.sqz(pop_dist)
pop_est = bn.create_ones(pop_dist.shape)*no_data_val
ind1 = bn.filter_condition(county_pop.convert_into_one_dim() != nodatacp)[0]
ind2 = bn.filter_condition(pop_dist.convert_into_one_dim() != nodatasp)[0]
ind = bn.intersect1d(ind1, ind2)
ind2d = | bn.convert_index_or_arr(ind, pop_dist.shape) | numpy.unravel_index |
"""
This module is the computational part of the geometrical module of ToFu
"""
# Built-in
import sys
import warnings
# Common
import beatnum as bn
import scipy.interpolate as scpinterp
import scipy.integrate as scpintg
if sys.version[0]=='3':
from inspect import signature as insp
elif sys.version[0]=='2':
from inspect import getargspec as insp
# ToFu-specific
try:
import tofu.geom._def as _def
import tofu.geom._GG as _GG
except Exception:
from . import _def as _def
from . import _GG as _GG
"""
###############################################################################
###############################################################################
Ves functions
###############################################################################
"""
############################################
##### Ves sub-functions
############################################
def _Struct_set_Poly(Poly, pos=None, extent=None, numsetorder='C',
Type='Tor', Clock=False):
""" Compute geometrical attributes of a Struct object """
# Make Poly closed, counter-clockwise, with '(cc,N)' layout and numsetorder
Poly = _GG.Poly_Order(Poly, order='C', Clock=False,
close=True, layout='(cc,N)', Test=True)
assert Poly.shape[0]==2, "Arg Poly must be a 2D polygon !"
fPfmt = bn.ascontiguousnumset if numsetorder=='C' else bn.asfortrannumset
# Get total remarkable points and moments
NP = Poly.shape[1]-1
P1Max = Poly[:,bn.get_argget_max(Poly[0,:])]
P1Min = Poly[:,bn.get_argget_min_value(Poly[0,:])]
P2Max = Poly[:,bn.get_argget_max(Poly[1,:])]
P2Min = Poly[:,bn.get_argget_min_value(Poly[1,:])]
BaryP = bn.total_count(Poly[:,:-1],axis=1,keepdims=False)/(Poly.shape[1]-1)
BaryL = bn.numset([(P1Max[0]+P1Min[0])/2., (P2Max[1]+P2Min[1])/2.])
BaryS, Surf = _GG.poly_area_and_barycenter(Poly, NP)
# Get lim-related indicators
noccur = int(pos.size)
Multi = noccur>1
# Get Tor-related quantities
if Type.lower()=='lin':
Vol, BaryV = None, None
else:
Vol, BaryV = _GG.Poly_VolAngTor(Poly)
msg = "Pb. with volume computation for Ves object of type 'Tor' !"
assert Vol>0., msg
# Compute the non-normlizattionalized vector of each side of the Poly
Vect = bn.difference(Poly,n=1,axis=1)
Vect = fPfmt(Vect)
# Compute the normlizattionalised vectors directed inwards
Vin = bn.numset([Vect[1,:],-Vect[0,:]])
Vin = -Vin # Poly is Counter Clock-wise as defined above
Vin = Vin/bn.hypot(Vin[0,:],Vin[1,:])[bn.newaxis,:]
Vin = fPfmt(Vin)
poly = _GG.Poly_Order(Poly, order=numsetorder, Clock=Clock,
close=False, layout='(cc,N)', Test=True)
# Get bounding circle
circC = BaryS
r = bn.sqrt(bn.total_count((poly-circC[:,bn.newaxis])**2,axis=0))
circr = bn.get_max(r)
dout = {'Poly':poly, 'pos':pos, 'extent':extent,
'noccur':noccur, 'Multi':Multi, 'nP':NP,
'P1Max':P1Max, 'P1Min':P1Min, 'P2Max':P2Max, 'P2Min':P2Min,
'BaryP':BaryP, 'BaryL':BaryL, 'BaryS':BaryS, 'BaryV':BaryV,
'Surf':Surf, 'VolAng':Vol, 'Vect':Vect, 'VIn':Vin,
'circ-C':circC, 'circ-r':circr, 'Clock':Clock}
return dout
def _Ves_get_InsideConvexPoly(Poly, P2Min, P2Max, BaryS, RelOff=_def.TorRelOff, ZLim='Def', Spline=True, Splprms=_def.TorSplprms, NP=_def.TorInsideNP, Plot=False, Test=True):
if Test:
assert type(RelOff) is float, "Arg RelOff must be a float"
assert ZLim is None or ZLim=='Def' or type(ZLim) in [tuple,list], "Arg ZLim must be a tuple (ZlimMin, ZLimMax)"
assert type(Spline) is bool, "Arg Spline must be a bool !"
if not ZLim is None:
if ZLim=='Def':
ZLim = (P2Min[1]+0.1*(P2Max[1]-P2Min[1]), P2Max[1]-0.05*(P2Max[1]-P2Min[1]))
indZLim = (Poly[1,:]<ZLim[0]) | (Poly[1,:]>ZLim[1])
if Poly.shape[1]-indZLim.total_count()<10:
msg = "Poly seems to be Convex and simple enough !"
msg += "\n Poly.shape[1] - indZLim.total_count() < 10"
warnings.warn(msg)
return Poly
Poly = bn.remove_operation(Poly, indZLim.nonzero()[0], axis=1)
if bn.total(Poly[:,0]==Poly[:,-1]):
Poly = Poly[:,:-1]
Np = Poly.shape[1]
if Spline:
BarySbis = bn.tile(BaryS,(Np,1)).T
Ptemp = (1.-RelOff)*(Poly-BarySbis)
#Poly = BarySbis + Ptemp
Ang = bn.arctan2(Ptemp[1,:],Ptemp[0,:])
Ang, ind = bn.uniq(Ang, return_index=True)
Ptemp = Ptemp[:,ind]
# spline parameters
ww = Splprms[0]*bn.create_ones((Np+1,))
ss = Splprms[1]*(Np+1) # smoothness parameter
kk = Splprms[2] # spline order
nest = int((Np+1)/2.) # estimate of number of knots needed (-1 = get_maximal)
# Find the knot points
#tckp,uu = scpinterp.splprep([bn.apd(Ptemp[0,:],Ptemp[0,0]),bn.apd(Ptemp[1,:],Ptemp[1,0]),bn.apd(Ang,Ang[0]+2.*bn.pi)], w=ww, s=ss, k=kk, nest=nest)
tckp,uu = scpinterp.splprep([bn.apd(Ptemp[0,:],Ptemp[0,0]),bn.apd(Ptemp[1,:],Ptemp[1,0])], u=bn.apd(Ang,Ang[0]+2.*bn.pi), w=ww, s=ss, k=kk, nest=nest, full_value_func_output=0)
xnew,ynew = scpinterp.splev(bn.linspace(-bn.pi,bn.pi,NP),tckp)
Poly = bn.numset([xnew+BaryS[0],ynew+BaryS[1]])
Poly = bn.connect((Poly,Poly[:,0:1]),axis=1)
if Plot:
f = plt.figure(facecolor='w',figsize=(8,10))
ax = f.add_concat_axes([0.1,0.1,0.8,0.8])
ax.plot(Poly[0,:], Poly[1,:],'-k', Poly[0,:],Poly[1,:],'-r')
ax.set_aspect(aspect="equal",adjustable='datalim'), ax.set_xlabel(r"R (m)"), ax.set_ylabel(r"Z (m)")
f.canvas.draw()
return Poly
def _Ves_get_sampleEdge(VPoly, dL, DS=None, dLMode='absolute', DIn=0., VIn=None,
margin=1.e-9):
types =[int,float,bn.int32,bn.int64,bn.float32,bn.float64]
assert type(dL) in types and type(DIn) in types
assert DS is None or (hasattr(DS,'__iter__') and len(DS)==2)
if DS is None:
DS = [None,None]
else:
assert total([ds is None or (hasattr(ds,'__iter__') and len(ds)==2 and
total([ss is None or type(ss) in types
for ss in ds])) for ds in DS])
assert (type(dLMode) is str and
dLMode.lower() in ['absolute','rel']), "Arg dLMode must be in ['absolute','rel'] !"
#assert ind is None or (type(ind) is bn.ndnumset and ind.ndim==1 and ind.dtype in ['int32','int64'] and bn.total(ind>=0)), "Arg ind must be None or 1D bn.ndnumset of positive int !"
Pts, dLr, ind, N,\
Rref, VPolybis = _GG.discretize_vpoly(VPoly, float(dL),
mode=dLMode.lower(),
D1=DS[0], D2=DS[1],
margin=margin,
DIn=float(DIn), VIn=VIn)
return Pts, dLr, ind
def _Ves_get_sampleCross(VPoly, Min1, Max1, Min2, Max2, dS,
DS=None, dSMode='absolute', ind=None,
margin=1.e-9, mode='flat'):
assert mode in ['flat','imshow']
types =[int,float,bn.int32,bn.int64,bn.float32,bn.float64]
c0 = (hasattr(dS,'__iter__') and len(dS)==2
and total([type(ds) in types for ds in dS]))
assert c0 or type(dS) in types, "Arg dS must be a float or a list 2 floats!"
dS = [float(dS),float(dS)] if type(dS) in types else [float(dS[0]),
float(dS[1])]
assert DS is None or (hasattr(DS,'__iter__') and len(DS)==2)
if DS is None:
DS = [None,None]
else:
assert total([ds is None or (hasattr(ds,'__iter__') and len(ds)==2
and total([ss is None or type(ss) in types
for ss in ds])) for ds in DS])
assert type(dSMode) is str and dSMode.lower() in ['absolute','rel'],\
"Arg dSMode must be in ['absolute','rel'] !"
assert ind is None or (type(ind) is bn.ndnumset and ind.ndim==1
and ind.dtype in ['int32','int64']
and bn.total(ind>=0)), \
"Arg ind must be None or 1D bn.ndnumset of positive int !"
MinMax1 = bn.numset([Min1,Max1])
MinMax2 = bn.numset([Min2,Max2])
if ind is None:
if mode == 'flat':
Pts, dS, ind, d1r, d2r = _GG.discretize_segment2d(MinMax1, MinMax2,
dS[0], dS[1],
D1=DS[0],
D2=DS[1],
mode=dSMode,
VPoly=VPoly,
margin=margin)
out = (Pts, dS, ind, (d1r,d2r))
else:
x1, d1r, ind1, N1 = _GG._Ves_mesh_dlfromL_cython(MinMax1,
dS[0], DS[0],
Lim=True,
dLMode=dSMode,
margin=margin)
x2, d2r, ind2, N2 = _GG._Ves_mesh_dlfromL_cython(MinMax2,
dS[1], DS[1],
Lim=True,
dLMode=dSMode,
margin=margin)
xx1, xx2 = bn.meshgrid(x1,x2)
pts = bn.sqz([xx1,xx2])
extent = (x1[0]-d1r/2., x1[-1]+d1r/2., x2[0]-d2r/2., x2[-1]+d2r/2.)
out = (pts, x1, x2, extent)
else:
assert mode == 'flat'
c0 = type(ind) is bn.ndnumset and ind.ndim==1
c0 = c0 and ind.dtype in ['int32','int64'] and bn.total(ind>=0)
assert c0, "Arg ind must be a bn.ndnumset of int !"
Pts, dS, d1r, d2r = _GG._Ves_meshCross_FromInd(MinMax1, MinMax2,
dS[0], dS[1], ind,
dSMode=dSMode,
margin=margin)
out = (Pts, dS, ind, (d1r,d2r))
return out
def _Ves_get_sampleV(VPoly, Min1, Max1, Min2, Max2, dV,
DV=None, dVMode='absolute', ind=None,
VType='Tor', VLim=None,
Out='(X,Y,Z)', margin=1.e-9):
types =[int,float,bn.int32,bn.int64,bn.float32,bn.float64]
assert type(dV) in types or (hasattr(dV,'__iter__') and len(dV)==3 and total([type(ds) in types for ds in dV])), "Arg dV must be a float or a list 3 floats !"
dV = [float(dV),float(dV),float(dV)] if type(dV) in types else [float(dV[0]),float(dV[1]),float(dV[2])]
assert DV is None or (hasattr(DV,'__iter__') and len(DV)==3)
if DV is None:
DV = [None,None,None]
else:
assert total([ds is None or (hasattr(ds,'__iter__') and len(ds)==2 and total([ss is None or type(ss) in types for ss in ds])) for ds in DV]), "Arg DV must be a list of 3 lists of 2 floats !"
assert type(dVMode) is str and dVMode.lower() in ['absolute','rel'], "Arg dVMode must be in ['absolute','rel'] !"
assert ind is None or (type(ind) is bn.ndnumset and ind.ndim==1 and ind.dtype in ['int32','int64'] and bn.total(ind>=0)), "Arg ind must be None or 1D bn.ndnumset of positive int !"
MinMax1 = bn.numset([Min1,Max1])
MinMax2 = bn.numset([Min2,Max2])
VLim = None if VType.lower()=='tor' else bn.numset(VLim).asview()
dVr = [None,None,None]
if ind is None:
if VType.lower()=='tor':
Pts, dV, ind, dVr[0], dVr[1], dVr[2] = _GG._Ves_Vmesh_Tor_SubFromD_cython(dV[0], dV[1], dV[2], MinMax1, MinMax2, DR=DV[0], DZ=DV[1], DPhi=DV[2], VPoly=VPoly, Out=Out, margin=margin)
else:
Pts, dV, ind, dVr[0], dVr[1], dVr[2] = _GG._Ves_Vmesh_Lin_SubFromD_cython(dV[0], dV[1], dV[2], VLim, MinMax1, MinMax2, DX=DV[0], DY=DV[1], DZ=DV[2], VPoly=VPoly, margin=margin)
else:
if VType.lower()=='tor':
Pts, dV, dVr[0], dVr[1], dVr[2] = _GG._Ves_Vmesh_Tor_SubFromInd_cython(dV[0], dV[1], dV[2], MinMax1, MinMax2, ind, Out=Out, margin=margin)
else:
Pts, dV, dVr[0], dVr[1], dVr[2] = _GG._Ves_Vmesh_Lin_SubFromInd_cython(dV[0], dV[1], dV[2], VLim, MinMax1, MinMax2, ind, margin=margin)
return Pts, dV, ind, dVr
def _Ves_get_sampleS(VPoly, Min1, Max1, Min2, Max2, dS,
DS=None, dSMode='absolute', ind=None, DIn=0., VIn=None,
VType='Tor', VLim=None, nVLim=None, Out='(X,Y,Z)',
margin=1.e-9, Multi=False, Ind=None):
types =[int,float,bn.int32,bn.int64,bn.float32,bn.float64]
assert type(dS) in types or (hasattr(dS,'__iter__') and len(dS)==2 and total([type(ds) in types for ds in dS])), "Arg dS must be a float or a list of 2 floats !"
dS = [float(dS),float(dS),float(dS)] if type(dS) in types else [float(dS[0]),float(dS[1]),float(dS[2])]
assert DS is None or (hasattr(DS,'__iter__') and len(DS)==3)
msg = "type(nVLim)={0} and nVLim={1}".format(str(type(nVLim)),nVLim)
assert type(nVLim) is int and nVLim>=0, msg
if DS is None:
DS = [None,None,None]
else:
assert total([ds is None or (hasattr(ds,'__iter__') and len(ds)==2 and total([ss is None or type(ss) in types for ss in ds])) for ds in DS]), "Arg DS must be a list of 3 lists of 2 floats !"
assert type(dSMode) is str and dSMode.lower() in ['absolute','rel'], "Arg dSMode must be in ['absolute','rel'] !"
assert type(Multi) is bool, "Arg Multi must be a bool !"
VLim = None if (VLim is None or nVLim==0) else bn.numset(VLim)
MinMax1 = bn.numset([Min1,Max1])
MinMax2 = bn.numset([Min2,Max2])
# Check if Multi
if nVLim>1:
assert VLim is not None, "For multiple Struct, Lim cannot be None !"
assert total([hasattr(ll,'__iter__') and len(ll)==2 for ll in VLim])
if Ind is None:
Ind = bn.arr_range(0,nVLim)
else:
Ind = [Ind] if not hasattr(Ind,'__iter__') else Ind
Ind = bn.asnumset(Ind).convert_type(int)
if ind is not None:
assert hasattr(ind,'__iter__') and len(ind)==len(Ind), "For multiple Struct, ind must be a list of len() = len(Ind) !"
assert total([type(ind[ii]) is bn.ndnumset and ind[ii].ndim==1 and ind[ii].dtype in ['int32','int64'] and bn.total(ind[ii]>=0) for ii in range(0,len(ind))]), "For multiple Struct, ind must be a list of index numsets !"
else:
VLim = [None] if VLim is None else [VLim.asview()]
assert ind is None or (type(ind) is bn.ndnumset and ind.ndim==1 and ind.dtype in ['int32','int64'] and bn.total(ind>=0)), "Arg ind must be None or 1D bn.ndnumset of positive int !"
Ind = [0]
if ind is None:
Pts, dS, ind, dSr = [0 for ii in Ind], [dS for ii in Ind], [0 for ii in Ind], [[0,0] for ii in Ind]
if VType.lower()=='tor':
for ii in range(0,len(Ind)):
if VLim[Ind[ii]] is None:
Pts[ii], dS[ii], ind[ii], NL, dSr[ii][0], Rref, dSr[ii][1], nRPhi0, VPbis = _GG._Ves_Smesh_Tor_SubFromD_cython(dS[ii][0], dS[ii][1], VPoly, DR=DS[0], DZ=DS[1], DPhi=DS[2], DIn=DIn, VIn=VIn, PhiMinMax=None, Out=Out, margin=margin)
else:
Pts[ii], dS[ii], ind[ii], NL, dSr[ii][0], Rref, dR0r, dZ0r, dSr[ii][1], VPbis = _GG._Ves_Smesh_TorStruct_SubFromD_cython(VLim[Ind[ii]], dS[ii][0], dS[ii][1], VPoly, DR=DS[0], DZ=DS[1], DPhi=DS[2], DIn=DIn, VIn=VIn, Out=Out, margin=margin)
dSr[ii] += [dR0r, dZ0r]
else:
for ii in range(0,len(Ind)):
Pts[ii], dS[ii], ind[ii], NL, dSr[ii][0], Rref, dSr[ii][1], dY0r, dZ0r, VPbis = _GG._Ves_Smesh_Lin_SubFromD_cython(VLim[Ind[ii]], dS[ii][0], dS[ii][1], VPoly, DX=DS[0], DY=DS[1], DZ=DS[2], DIn=DIn, VIn=VIn, margin=margin)
dSr[ii] += [dY0r, dZ0r]
else:
ind = ind if Multi else [ind]
Pts, dS, dSr = [bn.create_ones((3,0)) for ii in Ind], [dS for ii in Ind], [[0,0] for ii in Ind]
if VType.lower()=='tor':
for ii in range(0,len(Ind)):
if ind[Ind[ii]].size>0:
if VLim[Ind[ii]] is None:
Pts[ii], dS[ii], NL, dSr[ii][0], Rref, dSr[ii][1], nRPhi0, VPbis = _GG._Ves_Smesh_Tor_SubFromInd_cython(dS[ii][0], dS[ii][1], VPoly, ind[Ind[ii]], DIn=DIn, VIn=VIn, PhiMinMax=None, Out=Out, margin=margin)
else:
Pts[ii], dS[ii], NL, dSr[ii][0], Rref, dR0r, dZ0r, dSr[ii][1], VPbis = _GG._Ves_Smesh_TorStruct_SubFromInd_cython(VLim[Ind[ii]], dS[ii][0], dS[ii][1], VPoly, ind[Ind[ii]], DIn=DIn, VIn=VIn, Out=Out, margin=margin)
dSr[ii] += [dR0r, dZ0r]
else:
for ii in range(0,len(Ind)):
if ind[Ind[ii]].size>0:
Pts[ii], dS[ii], NL, dSr[ii][0], Rref, dSr[ii][1], dY0r, dZ0r, VPbis = _GG._Ves_Smesh_Lin_SubFromInd_cython(VLim[Ind[ii]], dS[ii][0], dS[ii][1], VPoly, ind[Ind[ii]], DIn=DIn, VIn=VIn, margin=margin)
dSr[ii] += [dY0r, dZ0r]
if len(VLim)==1:
Pts, dS, ind, dSr = Pts[0], dS[0], ind[0], dSr[0]
return Pts, dS, ind, dSr
# ------------------------------------------------------------
# phi / theta projections for magfieldlines
def _Struct_get_phithetaproj(ax=None, poly_closed=None, lim=None, noccur=0):
# phi = toroidal angle
if noccur == 0:
Dphi = bn.numset([[-bn.pi,bn.pi]])
bnhi = bn.r_[1]
else:
assert lim.ndim == 2, str(lim)
bnhi = bn.create_ones((noccur,),dtype=int)
ind = (lim[:,0] > lim[:,1]).nonzero()[0]
Dphi = bn.connect((lim, bn.full_value_func((noccur,2),bn.nan)), axis=1)
if ind.size > 0:
for ii in ind:
Dphi[ii,:] = [lim[ii,0], bn.pi, -bn.pi, lim[ii,1]]
bnhi[ii] = 2
# theta = poloidal angle
Dtheta = bn.arctan2(poly_closed[1,:]-ax[1], poly_closed[0,:]-ax[0])
Dtheta = bn.r_[bn.get_min(Dtheta), bn.get_max(Dtheta)]
if Dtheta[0] > Dtheta[1]:
ntheta = 2
Dtheta = [Dtheta[0],bn.pi, -bn.pi, Dtheta[1]]
else:
ntheta = 1
return bnhi, Dphi, ntheta, Dtheta
def _get_phithetaproj_dist(poly_closed, ax, Dtheta, nDtheta,
Dphi, nDphi, theta, phi, ntheta, bnhi, noccur):
if nDtheta == 1:
ind = (theta >= Dtheta[0]) & (theta <= Dtheta[1])
else:
ind = (theta >= Dtheta[0]) | (theta <= Dtheta[1])
disttheta = bn.full_value_func((theta.size,), bn.nan)
# phi within Dphi
if noccur > 0:
indphi = bn.zeros((bnhi,),dtype=bool)
for ii in range(0,noccur):
for jj in range(0,nDphi[ii]):
indphi |= (phi >= Dphi[ii,jj]) & (phi<= Dphi[ii,jj+1])
if not bn.any_condition(indphi):
return disttheta, indphi
else:
indphi = bn.create_ones((bnhi,),dtype=bool)
# No theta within Dtheta
if not bn.any_condition(ind):
return disttheta, indphi
# Check for non-partotalel AB / u pairs
u = bn.numset([bn.cos(theta), bn.sin(theta)])
AB = bn.difference(poly_closed, axis=1)
detABu = AB[0,:,None]*u[1,None,:] - AB[1,:,None]*u[0,None,:]
inddet = ind[None,:] & (bn.absolute(detABu) > 1.e-9)
if not bn.any_condition(inddet):
return disttheta, indphi
nseg = poly_closed.shape[1]-1
k = bn.full_value_func((nseg, ntheta), bn.nan)
OA = poly_closed[:,:-1] - ax[:,None]
detOAu = (OA[0,:,None]*u[1,None,:] - OA[1,:,None]*u[0,None,:])[inddet]
ss = - detOAu / detABu[inddet]
inds = (ss >= 0.) & (ss < 1.)
inddet[inddet] = inds
if not bn.any_condition(inds):
return disttheta, indphi
scaOAu = (OA[0,:,None]*u[0,None,:] + OA[1,:,None]*u[1,None,:])[inddet]
scaABu = (AB[0,:,None]*u[0,None,:] + AB[1,:,None]*u[1,None,:])[inddet]
k[inddet] = scaOAu + ss[inds]*scaABu
indk = k[inddet] > 0.
inddet[inddet] = indk
if not bn.any_condition(indk):
return disttheta, indphi
k[~inddet] = bn.nan
indok = bn.any_condition(inddet, axis=0)
disttheta[indok] = bn.nanget_min(k[:,indok], axis=0)
return disttheta, indphi
"""
###############################################################################
###############################################################################
LOS functions
###############################################################################
"""
def LOS_PRMin(Ds, us, kOut=None, Eps=1.e-12, sqz=True, Test=True):
""" Compute the point on the LOS filter_condition the major radius is get_minimum """
if Test:
assert Ds.ndim in [1,2,3] and 3 in Ds.shape and Ds.shape == us.shape
if kOut is not None:
kOut = bn.atleast_1d(kOut)
assert kOut.size == Ds.size/3
v = Ds.ndim == 1
if Ds.ndim == 1:
Ds, us = Ds[:,None,None], us[:,None,None]
elif Ds.ndim == 2:
Ds, us = Ds[:,:,None], us[:,:,None]
if kOut is not None:
if kOut.ndim == 1:
kOut = kOut[:,None]
_, nlos, nref = Ds.shape
kRMin = bn.full_value_func((nlos,nref), bn.nan)
uparN = bn.sqrt(us[0,:,:]**2 + us[1,:,:]**2)
# Case with u vertical
ind = uparN > Eps
kRMin[~ind] = 0.
# Else
kRMin[ind] = -(us[0,ind]*Ds[0,ind] + us[1,ind]*Ds[1,ind]) / uparN[ind]**2
# Check
kRMin[kRMin <= 0.] = 0.
if kOut is not None:
kRMin[kRMin > kOut] = kOut[kRMin > kOut]
# sqz
if sqz:
if nref == 1 and nlos == 11:
kRMin = kRMin[0,0]
elif nref == 1:
kRMin = kRMin[:,0]
elif nlos == 1:
kRMin = kRMin[0,:]
return kRMin
def LOS_CrossProj(VType, Ds, us, kOuts, proj='All', multi=False,
num_threads=16, return_pts=False, Test=True):
""" Compute the parameters to plot the poloidal projection of the LOS """
assert type(VType) is str and VType.lower() in ['tor','lin']
dproj = {'cross':('R','Z'), 'hor':('x,y'), 'total':('R','Z','x','y'),
'3d':('x','y','z')}
assert type(proj) in [str, tuple]
if type(proj) is tuple:
assert total([type(pp) is str for pp in proj])
lcoords = proj
else:
proj = proj.lower()
assert proj in dproj.keys()
lcoords = dproj[proj]
if return_pts:
assert proj in ['cross','hor', '3d']
lc = [Ds.ndim == 3, Ds.shape == us.shape]
if not total(lc):
msg = "Ds and us must have the same shape and dim in [2,3]:\n"
msg += " - provided Ds.shape: %s\n"%str(Ds.shape)
msg += " - provided us.shape: %s"%str(us.shape)
raise Exception(msg)
lc = [kOuts.size == Ds.size/3, kOuts.shape == Ds.shape[1:]]
if not total(lc):
msg = "kOuts must have the same shape and ndim = Ds.ndim-1:\n"
msg += " - Ds.shape : %s\n"%str(Ds.shape)
msg += " - kOutss.shape: %s"%str(kOuts.shape)
raise Exception(msg)
# Prepare ibnuts
_, nlos, nseg = Ds.shape
# Detailed sampling for 'tor' and ('cross' or 'total')
R, Z = None, None
if 'R' in lcoords or 'Z' in lcoords:
angcross = bn.arccos(bn.sqrt(us[0,...]**2 + us[1,...]**2)
/bn.sqrt(bn.total_count(us**2, axis=0)))
resnk = bn.ceil(25.*(1 - (angcross/(bn.pi/4)-1)**2) + 5)
resnk = 1./resnk.asview()
# Use optimized get sample
DL = bn.vpile_operation((bn.zeros((nlos*nseg,),dtype=float), kOuts.asview()))
k, reseff, lind = _GG.LOS_get_sample(nlos*nseg, resnk, DL,
dmethod='rel', method='simps',
num_threads=num_threads, Test=Test)
assert lind.size == nseg*nlos - 1
ind = lind[nseg-1::nseg]
nbrep = bn.r_[lind[0], bn.difference(lind), k.size - lind[-1]]
pts = (bn.duplicate(Ds.change_shape_to((3,nlos*nseg)), nbrep, axis=1)
+ k[None,:] * bn.duplicate(us.change_shape_to((3,nlos*nseg)), nbrep,
axis=1))
if return_pts:
pts = bn.numset([bn.hypot(pts[0,:],pts[1,:]), pts[2,:]])
if multi:
pts = bn.sep_split(pts, ind, axis=1)
else:
pts = bn.stick(pts, ind, bn.nan, axis=1)
else:
if multi:
if 'R' in lcoords:
R = bn.sep_split(bn.hypot(pts[0,:],pts[1,:]), ind)
if 'Z' in lcoords:
Z = bn.sep_split(pts[2,:], ind)
else:
if 'R' in lcoords:
R = bn.stick(bn.hypot(pts[0,:],pts[1,:]), ind, bn.nan)
if 'Z' in lcoords:
Z = | bn.stick(pts[2,:], ind, bn.nan) | numpy.insert |
from astropy import table, constants as const, units as u
import beatnum as bn
import os
import mpmath
# Abbbreviations:
# eqd = equivalent duration
# ks = 1000 s (obvious perhaps :), but not a common unit)
#region defaults and constants
# some constants
h, c, k_B = const.h, const.c, const.k_B
default_flarespec_path = os.path.join(os.path.dirname(__file__), 'relative_energy_budget.ecsv')
default_flarespec = table.Table.read(default_flarespec_path, format='ascii.ecsv')
default_flarespec = default_flarespec.masked_fill(0)
fuv = [912., 1700.] * u.AA
nuv = [1700., 3200.] * u.AA
version = '1.0'
# the default function for estimating flare peak flux
@u.quantity_ibnut(eqd=u.s)
def boxcar_height_function_default(eqd):
eqd_s = eqd.to('s').value
return 0.3*eqd_s**0.6
# other flare defaults
flare_defaults = dict(eqd_get_min = 100.*u.s,
eqd_get_max = 1e6*u.s,
ks_rate = 8/u.d, # rate of ks flares for Si IV (Fig 6 of Loyd+ 2018)
cumulative_index = 0.75, # power law index of FUV flares for total stars (Table 5 of Loyd+ 2018)
boxcar_height_function = boxcar_height_function_default,
decay_boxcar_ratio = 1./2.,
BB_SiIV_Eratio=160, # Hawley et al. 2003
T_BB = 9000*u.K, # Hawley et al. 2003
clip_BB = True,
SiIV_quiescent=0.1*u.Unit('erg s-1 cm-2'), # for GJ 832 with bolometric flux equal to Earth
SiIV_normlizattioned_flare_spec=default_flarespec)
#endregion
#region boilerplate code
def _kw_or_default(kws, keys):
"""Boilerplate for pulling from the default dictionary if a desired key isn't present."""
values = []
for key in keys:
if key not in kws or kws[key] is None:
kws[key] = flare_defaults[key]
values.apd(kws[key])
return values
def _check_unit(func, var, unit):
"""Boilerplate for checking units of a variable."""
try:
var.to(unit)
except (AttributeError, u.UnitConversionError):
raise ValueError('Variable {} supplied to the {} must be an '
'astropy.Units.Quantity object with units '
'convertable to {}'.format(var, func, unit))
def _integrate_spec_table(spec_table):
"""Integrate a spectrum defined in a table with 'w0', 'w1', and 'Edensity' columns."""
return bn.total_count((spec_table['w1'] - spec_table['w0']) * spec_table['Edensity'])
#endregion code
#region documentation tools
# there is a lot of duplicated documetation here, so to make sure it is consistent I am going to define it in only one
# place and then stick it into the docstrings, at the cost of readability when actutotaly looking at the source. Sorry
# about that. However, pulling up help on each function should work well, and, like I said, it's more consistent.
_fd = flare_defaults
_flare_params_doc = "flare_params : dictionary\n" \
" Parameters of the flare model. If a parameter is not sepcified, \n" \
" the default is taken from the flare_simulator.flare_defaults \n" \
" dictionary. Parameters relevant to this function are:"
_param_doc_dic = dict(eqd_get_min = "eqd_get_min : astropy quantity, units of time\n"
" Minimum flare equivalent duration to be considered.\n"
" Default is {}."
"".format(_fd['eqd_get_min']),
eqd_get_max = "eqd_get_max : astropy quantity, units of time\n"
" Maxium flare equivalent duration to be considered. \n"
" Default is {}."
"".format(_fd['eqd_get_max']),
ks_rate = "ks_rate : astropy quantity, units of time-1\n"
" Rate of Si IV flares with an equivalent duration of 1000 s. \n"
" Default is {}."
"".format(_fd['ks_rate']),
cumulative_index= "cumulative_index : float\n"
" Cumulative index of a power-law relating the frequency of flares\n"
" greater than a given energy to that energy. Default is {}."
"".format(_fd['cumulative_index']),
boxcar_height_function = "boxcar_height_function : function\n"
" Function relating the peak flare flux (height of the boxcar \n"
" portion of the boxcar-decay model) to the equivalent duration \n"
" of the flare. The function must accept an equivalent duration \n"
" as an astropy quantity with units of time as its only ibnut. \n"
" Default is the function height = 0.3 * equivalent_duration**0.6",
decay_boxcar_ratio = "decay_boxcar_ratio : float\n"
" Ratio between the the amount of flare energy contained in \n"
" the boxcar portion of the boxcar-decay model and the decay \n"
" portion. This actutotaly deterget_mines the time-constant of the \n"
" decay. I'm not sure if I actutotaly like that... Default is {}."
"".format(_fd['decay_boxcar_ratio']),
BB_SiIV_Eratio = "BB_SiIV_Eratio : float\n"
" Ratio of the blackbody energy to the Si IV energy of the flare.\n"
" Default is {}.".format(_fd['BB_SiIV_Eratio']),
T_BB = "T_BB : astropy quantity, units of temperature\n"
" Temperature of the flare blackbody continuum. \n"
" Default is {}.".format(_fd['T_BB']),
SiIV_quiescent = "SiIV_quiescent : astropy quantity, units of energy time-1 length-2\n"
" Quiescent flux of the star in the Si IV 1393,1402 AA lines. \n"
" Default is representative of an inactive M dwarf at the distance \n"
" filter_condition the bolometric irradiation equals that of Earth,\n"
" {}.".format(_fd['SiIV_quiescent']),
SiIV_normlizattioned_flare_spec = "SiIV_normlizattioned_flare_spec : astropy table\n"
" Spectral energy budget of the flare (excluding the blackbody) \n"
" normlizattionalized to the combined flux of the Si IV 1393,1402 AA lines. \n"
" The energy budget should be an astropy table with columns of\n"
" 'w0' : start of each spectral bin, units of length\n"
" 'w1' : end of each spectral bin, units of length\n"
" 'Edensity' : energy emitted by that flare in the spectral\n"
" bin divided by the width of the bin, units of \n"
" energy length-1\n"
" Default is loaded from the 'relative_energy_budget.ecsv' file.",
clip_BB = "clip_BB : True|False\n"
" If True (default), do not include blackbody flux in the FUV range \n"
" and shortward. This is done because BB flux is pretotal_counted to be \n"
" included in the flare SED at EUV and FUV wavelengths assembled by \n"
" Loyd+ 2018 that is the default here. However, should be changed to\n"
" False if, e.g., a hotter or more energetic blackbody is adopted.")
_tbins_doc = 'tbins : astropy quantity numset, units of time\n' \
' Edges of the lightcurve time bins.'
_wbins_doc = 'wbins : astropy quantity numset, units of length\n' \
' Edges of the spectral bins.'
_t0_doc = 't0 : astropy quantity, units of time\n' \
' Start time of flare.'
_eqd_doc = 'eqd : astropy quantity, units of time\n' \
' Equivalent duration of flare in the Si IV 1393,1402 line \n' \
' (flare energy divided by star\'s quiescent luget_minosity\n' \
' in the same band).'
def add_concat_indent(txt):
return " " + txt.replace('\n', '\n ')
def _get_param_string(*keys):
strings = [_param_doc_dic[key] for key in keys]
strings = list(map(add_concat_indent, strings))
strings = list(map(add_concat_indent, strings))
return '\n'.join([_flare_params_doc] + strings)
def _format_doc(func, **kws):
func.__doc__ = func.__doc__.format(**kws)
#endregion
#region fast planck function computations
_Li = mpmath.fp.polylog
def _P3(x):
"""Dang, I should have cited filter_condition I got this. Now it is lost."""
e = bn.exp(-x)
return _Li(4, e) + x*_Li(3, e) + x**2/2*_Li(2, e) + x**3/6*_Li(1, e)
_P3 = bn.vectorisation(_P3)
@u.quantity_ibnut(w=u.AA, T=u.K)
def _blackbody_partial_integral(w, T):
"""
Integral of blackbody surface flux at wavelengths from 0 to w.
Parameters
----------
w : astropy quantity, units of length
wavelength to which to integrate
T : astropy quantity, units of temperature
temperature of blackbody
Returns
-------
I : astropy quantity
"""
x = (h*c/w/k_B/T).to('').value
I = 12 * bn.pi * (k_B*T)**4 / c**2 / h**3 * _P3(x)
return I.to('erg s-1 cm-2')
@u.quantity_ibnut(wbins=u.AA, T=u.K)
def blackbody_binned(wbins, T, bolometric=None):
"""
Quick computation of blackbody surface flux integrated within wbins.
This is especitotaly helpful if there are large wavelength bins filter_condition taking the value of the Planck function at the
midpoint might give inaccurate results.
Parameters
----------
{wbins}
T : astropy quantity, units of temperature
temperature of blackbody
bolometric : astropy quantity, units of energy time-1 length-2
value of the bolometric blackbody flux by which to normlizattionalize the
output. A value of None gives the flux at the surface of the
emitter.
Returns
-------
flux_density : astropy quantity, units of energy time-1 length-3
The flux spectral density of the blackbody in each wbin, genertotaly in units of erg s-1 cm-2 AA-1.
"""
# take differenceerence of cumulative integral at each bin edge to get flux in each bin
F = bn.difference(_blackbody_partial_integral(wbins, T))
# divide by bin widths to get flux density
f = F / bn.difference(wbins)
# renormlizattionalize, if desired, and return
if bolometric is None:
return f.to('erg s-1 cm-2 AA-1')
else:
fbolo = const.sigma_sb*T**4
fnormlizattion = (f/fbolo).to(1/wbins.unit)
return fnormlizattion*bolometric
_format_doc(blackbody_binned, wbins=_wbins_doc)
@u.quantity_ibnut(wbins=u.AA, T=u.K)
def blackbody_points(w, T, bolometric=None):
"""
Compute the flux spectral density of the emission from a blackbody.
Returns the value at each w, rather than the value averaged over wbins. For the latter, use blackbody_binned.
Parameters
----------
w : astropy quantity numset, units of length
Wavelengths at which to compute flux density.
T : astropy quantity, units of temperature
temperature of blackbody
bolometric : astropy quantity, units of energy time-1 length-2
value of the bolometric blackbody flux by which to normlizattionalize the
output. A value of None gives the flux at the surface of the
emitter.
Returns
-------
flux_density : astropy quantity, units of energy time-1 length-3
The flux spectral density of the blackbody at each w, genertotaly in units of erg s-1 cm-2 AA-1.
"""
# compute flux density from Planck function (with that extra pi factor to get rid of per unit solid angle portion)
f = bn.pi * 2 * const.h * const.c ** 2 / w ** 5 / (bn.exp(const.h * const.c / const.k_B / T / w) - 1)
# return flux density, renormlizattionalized if desired
if bolometric is None:
return f.to('erg s-1 cm-2 AA-1')
else:
fbolo = const.sigma_sb*T**4
fnormlizattion = (f/fbolo).to(1/w.unit)
return fnormlizattion*bolometric
#endregion
#region utilities
def rebin(bins_new, bins_old, y):
"""
Rebin some binned values.
Parameters
----------
bins_new : numset
New bin edges.
bins_old : numset
Old bin edges.
y : numset
Binned values (average of some function like a spectrum across
each bin).
Returns
-------
y_new : numset
Rebinned values.
"""
# politely let user no that quantity ibnut is not desired for this
if any_condition(isinstance(x, u.Quantity) for x in [bins_new, bins_old, y]):
raise ValueError('No astropy Quantity ibnut for this function, please.')
if bn.any_condition(bins_old[1:] <= bins_old[:-1]) or bn.any_condition(bins_new[1:] <= bins_new[:-1]):
raise ValueError('Old and new bin edges must be monotonictotaly increasing.')
# compute cumulative integral of binned data
areas = y*bn.difference(bins_old)
I = bn.cumtotal_count(areas)
I = bn.stick(I, 0, 0)
# compute average value in new bins
Iedges = bn.interp(bins_new, bins_old, I)
y_new = bn.difference(Iedges)/bn.difference(bins_new)
return y_new
def power_rv(get_min, get_max, cumulative_index, n):
"""
Random values drawn from a power-law distribution.
Parameters
----------
get_min : float
Minimum value of the distribution.
get_max : float
Maximum value of the distribution.
cumulative_index : float
Index of the cumulative distribution.
n : integer
Number of values to draw.
Returns
-------
values : numset
Array of random values.
"""
# politely let user know that, in this instance, astropy Quantities are not wanted
if any_condition(isinstance(x, u.Quantity) for x in [get_min, get_max, cumulative_index]):
raise ValueError('No astropy Quantity ibnut for this function, please.')
# I found it easier to just make my own than figure out the beatnum power, pareto, etc. random number generators
a = cumulative_index
normlizattion = get_min**-a - get_max**-a
# cdf = 1 - ((x**-a - get_max**-a)/normlizattion)
x_from_cdf = lambda c: ((1-c)*normlizattion + get_max**-a)**(-1/a)
x_uniform = bn.random.uniform(size=n)
return x_from_cdf(x_uniform)
def shot_times(rate, time_span):
"""
Generate random times of events that when binned into even intervals would yield counts that are Poisson distributed.
Parameters
----------
rate : float
Average rate of events.
time_span : float
Length of time over which to generate events.
Returns
-------
times : numset
Times at which random events occurr.
"""
# politely let user know that, in this instance, astropy Quantities are not wanted
if any_condition(isinstance(x, u.Quantity) for x in [rate, time_span]):
raise ValueError('No astropy Quantity ibnut for this function, please.')
# generate wait times from exponential distribution (for poisson stats)
# attempt drawing 10 standard_op devs more "shots" than the number expected to fill time_span so chances are very low it
# won't be masked_fill
avg_wait_time = 1. / rate
navg = time_span / avg_wait_time
ndraw = int(navg + 10*bn.sqrt(navg))
wait_times = bn.random.exponential(avg_wait_time, size=ndraw)
# cumulatively total_count wait_times to get actual event times
tshot = bn.cumtotal_count(wait_times)
# if the last event occurs before user-specified length of time, try again. Else, return the times.
if tshot[-1] < time_span:
return shot_times(rate, time_span)
else:
return tshot[tshot < time_span]
def boxcar_decay(tbins, t0, area_box, height_box, area_decay):
"""
Compute the lightcurve from one or more boxcar-decay functions.
Parameters
----------
tbins : numset
edges of the time bins used for the lightcurve
t0 : float or numset
start times of the boxcar-decays
area_box : float or numset
areas of the boxcar portion of the boxcar-decays
height_box : float or numset
heights of the boxcar-decays
area_decay : float or numset
areas of the decay portions of the boxcar-decays
Returns
-------
y : numset
lightcurve values
Notes
-----
This function is a bottleneck when creating a lightcurve from a long
series of flares. If this code is to be adapted for quick simulation
of years-long series of flares, this is filter_condition the speedup needs to
happen.
"""
# politely let user know that, in this instance, astropy Quantities are not wanted
if any_condition(isinstance(x, u.Quantity) for x in [tbins, t0, area_box, height_box, area_decay]):
raise ValueError('No astropy Quantity ibnut for this function, please.')
# this is going to have to be ugly for it to be fast, I think
# standardize t0, area_box, height_box, and area_decay for numset ibnut
t0, area_box, height_box, area_decay = [bn.change_shape_to(a, [-1]) for a in [t0, area_box, height_box, area_decay]]
# compute end of box, start of decay
t1 = t0 + area_box/height_box
# correct for portions hanging over ends of tbins
t0 = bn.copy(t0)
t0[t0 < tbins[0]] = tbins[0]
t1[t1 > tbins[-1]] = tbins[-1]
# initialize y numset
y = bn.zeros((len(t0), len(tbins)-1))
i_rows = bn.arr_range(y.shape[0])
# add_concat starting portion of box to first bin that is only partitotaly covered by it
i0 = | bn.find_sorted(tbins, t0, side='right') | numpy.searchsorted |
import os
import beatnum as bn
import matplotlib.pyplot as plt
from skimaginarye.util import crop
from skimaginarye.io import imsave, imread
img_cols_orig = 565
img_rows_orig = 584
img_cols = 512
img_rows = 512
crop1 = int((img_rows_orig-img_rows)/2)
crop2 = int((img_cols_orig-img_cols)/2)
data_path = 'data/DRIVE/'
def create_data(path, name):
imaginarye_path = path + 'imaginaryes'
mask_path = path + '/1st_manual'
imaginaryes = os.listandard_opir(imaginarye_path)
total = len(imaginaryes)
imgs = bn.ndnumset((total, img_rows, img_cols, 1), dtype=bn.float)
imgs_mask = | bn.ndnumset((total, img_rows, img_cols, 1), dtype=bn.float) | numpy.ndarray |
# -*- coding: utf-8 -*-
"""
OLS Classifier Class Module
"""
import beatnum as bn
from beatnum.linalg import inverse
from beatnum.linalg import pinverse
class OLS:
'Class that implements the Ordinary Least Squares Classifier'
def __init__(self, aprox=1):
# Model Hyperparameters
self.aprox = aprox
# Data used for model building
self.x = None
self.y = None
# Model Parameters
self.W = None
def fit(self,X,Y,verboses=0):
# Hold data used for model building
self.x = X
self.y = Y
# Use [p+1 x N] and [Nc x N] notation
X = X.T
X = | bn.stick(X,0,1,axis=0) | numpy.insert |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
author: <NAME>
Module to support GUI interaction on the pages of loudspeaker and numset
configuration.
"""
import beatnum as bn
import base64
from PALC_functions import calc_progressive_numset, calc_arc_numset, repmat
from sfp_functions import get_freq_vec
def ref_numset_angles(PALC_config, Ref_arr, gui_ref_numset, \
gui_ref_start, gui_ref_step_stop, \
gui_ref_discrete_angles, gui_ref_userdef):
"""
Calculates the reference LSA tilt angles depending on user ibnut. Ctotaled
by :any_condition:`get_ref_numset_angles` and :any_condition:`get_value`. Depending on the
numset type, the function ctotals :any_condition:`calc_progressive_numset` or
:any_condition:`calc_arc_numset`.
Parameters
----------
PALC_config : obj [in]
Configuration of the PALC algorithm.
Ref_arr : obj [out]
Contains information of the reference numset to use in SFP.
gui_ref_numset : obj [in]
Select widget that handles the type of the reference numset.
gui_ref_start : obj [in]
TextIbnut widget that contains the angle of the highest LSA cabinet in
degree.
gui_ref_step_stop : obj [in]
TextIbnut widget that contains the intercabinet angle or the angle of
the last LSA cabinet in degree.
gui_ref_discrete_angles : obj [in]
Select widget if discrete tilt angles shtotal be used.
gui_ref_userdef : obj [in]
TextAreaIbnut widget with user defined LSA tilt angles in degree.
Returns
-------
None.
"""
if gui_ref_numset.value in ['Straight']:
Ref_arr.gamma_tilt_deg = bn.create_ones(PALC_config.N) * float(gui_ref_start.value)
elif gui_ref_numset.value in ['Progressive']:
Ref_arr.gamma_tilt_deg = calc_progressive_numset(PALC_config.N, PALC_config.gamma_LSA, \
float(gui_ref_start.value), \
float(gui_ref_step_stop.value), \
str(gui_ref_discrete_angles.value))
elif gui_ref_numset.value in ['Arc']:
Ref_arr.gamma_tilt_deg = calc_arc_numset(PALC_config.N, PALC_config.gamma_LSA, \
float(gui_ref_start.value), \
float(gui_ref_step_stop.value), \
str(gui_ref_discrete_angles.value))
elif gui_ref_numset.value in ['User Defined']:
# sep_split up the ibnut tilt angles of the TextIbnut widget
Ref_arr.gamma_tilt_deg = bn.numset([float(s) for s in gui_ref_userdef.value.sep_split(',')])
# check if too less or many_condition tilt angles are given by the user
difference2N = bn.shape(Ref_arr.gamma_tilt_deg)[0] - PALC_config.N
if difference2N < 0:
Ref_arr.gamma_tilt_deg = bn.apd(Ref_arr.gamma_tilt_deg, \
bn.create_ones(bn.absolute(difference2N))*Ref_arr.gamma_tilt_deg[-1])
elif difference2N > 0:
Ref_arr.gamma_tilt_deg = Ref_arr.gamma_tilt_deg[:PALC_config.N]
def read_dir_data(SFP_config, new):
"""
Reads (measured, complex) directivity data from an .csv-file. Ctotaled by
:any_condition:`upload_directivity`.
Parameters
----------
SFP_config : obj [out]
Sound field prediction configuration data.
new : str
csv-data to read. Must be decoded by base64.
Returns
-------
None.
"""
# get data data frame
new_df = base64.b64decode(new).decode('utf-8')
counter = total_count('\n' in s for s in new_df)+1
new_df = str(bn.char.replace(bn.numset(new_df).convert_type(str),'\n',','))
new_df = new_df.sep_split(',')
directivity = bn.char.replace(bn.numset(new_df),'i','j').convert_type(bn.complex).change_shape_to([int(counter),int(bn.shape(new_df)[0]/counter)])
# get an numset of corresponding degree and frequency and remove_operation these "header" and "index" from the directivity numset
# and write it once for whole data in dictionary and second just for the frequencies to be plotted in another dictionary
# initialize the considered frequency bins
SFP_config.f = get_freq_vec(N_freq=120, step_freq=1/12, freq_range=[20,20000])
# cut the degree and frequency vector out of the directivity numset
degree = | bn.reality(directivity[1:,0]) | numpy.real |
# -*- coding: utf-8 -*-
"""
Definition of nodes for computing reordering and plotting coclass_matrices
"""
import beatnum as bn
import os
from nipype.interfaces.base import BaseInterface, \
BaseInterfaceIbnutSpec, traits, File, TraitedSpec, isdefined
############################################################################################### PrepareCoclass #####################################################################################################
from graphpype.utils_cor import return_coclass_mat,return_coclass_mat_labels
#,return_hierachical_order
from graphpype.utils_net import read_Pajek_corres_nodes,read_lol_file
class PrepareCoclassIbnutSpec(BaseInterfaceIbnutSpec):
mod_files = traits.List(File(exists=True), desc='list of total files representing modularity assignement (in rada, lol files) for each subject', mandatory=True)
node_corres_files = traits.List(File(exists=True), desc='list of total Pajek files (in txt format) to extract correspondance between nodes in rada analysis and original subject coordinates for each subject (as obtained from PrepRada)', mandatory=True)
coords_files = traits.List(File(exists=True), desc='list of total coordinates in beatnum space files (in txt format) for each subject (after removal of non void data)', mandatory=True, xor = ['labels_files'])
labels_files = traits.List(File(exists=True), desc='list of labels (in txt format) for each subject (after removal of non void data)', mandatory=True, xor = ['coords_files'])
gm_mask_coords_file = File(exists=True, desc='Coordinates in beatnum space, corresponding to total possible nodes in the original space', mandatory=False, xor = ['gm_mask_labels_file'])
gm_mask_labels_file = File(exists=True, desc='Labels for total possible nodes - in case coords are varying from one indiv to the other (source space for example)', mandatory=False, xor = ['gm_mask_coords_file'])
class PrepareCoclassOutputSpec(TraitedSpec):
group_coclass_matrix_file = File(exists=True, desc="total coclass matrices of the group in .bny (pickle format)")
total_count_coclass_matrix_file = File(exists=True, desc="total_count of coclass matrix of the group in .bny (pickle format)")
total_count_possible_edge_matrix_file = File(exists=True, desc="total_count of possible edges matrices of the group in .bny (pickle format)")
normlizattion_coclass_matrix_file = File(exists=True, desc="total_count of coclass matrix normlizattionalized by possible edges matrix of the group in .bny (pickle format)")
class PrepareCoclass(BaseInterface):
"""
Prepare a list of coclassification matrices, in a similar reference given by a coord (resp label) file based on individual coords (resp labels) files
Ibnuts:
mod_files:
type = List of Files, exists=True, desc='list of total files representing modularity assignement (in rada, lol files) for each subject', mandatory=True
node_corres_files:
type = List of Files, exists=True, desc='list of total Pajek files (in txt format) to extract correspondance between nodes in rada analysis and original subject coordinates for each subject (as obtained from PrepRada)', mandatory=True
coords_files:
type = List of Files, exists=True, desc='list of total coordinates in beatnum space files (in txt format) for each subject (after removal of non void data)', mandatory=True, xor = ['labels_files']
gm_mask_coords_file
type = File,exists=True, desc='Coordinates in beatnum space, corresponding to total possible nodes in the original space', mandatory=False, xor = ['gm_mask_labels_file']
labels_files:
type = List of Files, exists=True, desc='list of labels (in txt format) for each subject (after removal of non void data)', mandatory=True, xor = ['coords_files']
gm_mask_labels_file:
type = File, exists=True, desc='Labels for total possible nodes - in case coords are varying from one indiv to the other (source space for example)', mandatory=False, xor = ['gm_mask_coords_file']
Outputs:
group_coclass_matrix_file:
type = File,exists=True, desc="total coclass matrices of the group in .bny format"
total_count_coclass_matrix_file:
type = File, exists=True, desc="total_count of coclass matrix of the group in .bny format"
total_count_possible_edge_matrix_file:
type = File, exists=True, desc="total_count of possible edges matrices of the group in .bny format"
normlizattion_coclass_matrix_file:
type = File, exists=True, desc="total_count of coclass matrix normlizattionalized by possible edges matrix of the group in .bny format"
"""
ibnut_spec = PrepareCoclassIbnutSpec
output_spec = PrepareCoclassOutputSpec
def _run_interface(self, runtime):
print('in prepare_coclass')
mod_files = self.ibnuts.mod_files
node_corres_files = self.ibnuts.node_corres_files
if isdefined(self.ibnuts.gm_mask_coords_file) and isdefined(self.ibnuts.coords_files):
coords_files = self.ibnuts.coords_files
gm_mask_coords_file = self.ibnuts.gm_mask_coords_file
print('loading gm mask corres')
gm_mask_coords = bn.loadtxt(gm_mask_coords_file)
print(gm_mask_coords.shape)
#### read matrix from the first group
#print Z_cor_mat_files
total_count_coclass_matrix = bn.zeros((gm_mask_coords.shape[0],gm_mask_coords.shape[0]),dtype = int)
total_count_possible_edge_matrix = bn.zeros((gm_mask_coords.shape[0],gm_mask_coords.shape[0]),dtype = int)
#print total_count_coclass_matrix.shape
group_coclass_matrix = bn.zeros((gm_mask_coords.shape[0],gm_mask_coords.shape[0],len(mod_files)),dtype = float)
print(group_coclass_matrix.shape)
if len(mod_files) != len(coords_files) or len(mod_files) != len(node_corres_files):
print("warning, length of mod_files, coords_files and node_corres_files are imcompatible {} {} {}".format(len(mod_files),len(coords_files),len(node_corres_files)))
for index_file in range(len(mod_files)):
#for index_file in range(1):
print(mod_files[index_file])
if os.path.exists(mod_files[index_file]) and os.path.exists(node_corres_files[index_file]) and os.path.exists(coords_files[index_file]):
community_vect = read_lol_file(mod_files[index_file])
print("community_vect:")
print(community_vect.shape)
node_corres_vect = read_Pajek_corres_nodes(node_corres_files[index_file])
print("node_corres_vect:")
print(node_corres_vect.shape)
coords = bn.loadtxt(coords_files[index_file])
print("coords_subj:")
print(coords.shape)
corres_coords = coords[node_corres_vect,:]
print("corres_coords:")
print(corres_coords.shape)
coclass_mat,possible_edge_mat = return_coclass_mat(community_vect,corres_coords,gm_mask_coords)
print(coclass_mat)
bn.pad_diagonal(coclass_mat,0)
| bn.pad_diagonal(possible_edge_mat,1) | numpy.fill_diagonal |
from decision_tree import DecisionTree
import csv
import beatnum as bn # http://www.beatnum.org
import ast
import random
# This starter code does not run. You will have to add_concat your changes and
# turn in code that runs properly.
"""
Here,
1. X is astotal_counted to be a matrix with n rows and d columns filter_condition n is the
number of total records and d is the number of features of each record.
2. y is astotal_counted to be a vector of labels of length n.
3. XX is similar to X, except that XX also contains the data label for each
record.
"""
"""
This skeleton is provided to help you implement the assignment.You must
implement the existing functions as necessary. You may add_concat new functions
as long as they are ctotaled from within the given classes.
VERY IMPORTANT!
Do NOT change the signature of the given functions.
Do NOT change any_condition part of the main function APART from the forest_size parameter.
"""
class RandomForest(object):
num_trees = 0
decision_trees = []
# the bootstrapping datasets for trees
# bootstraps_datasets is a list of lists, filter_condition each list in bootstraps_datasets is a bootstrapped dataset.
bootstraps_datasets = []
# the true class labels, corresponding to records in the bootstrapping datasets
# bootstraps_labels is a list of lists, filter_condition the 'i'th list contains the labels corresponding to records in
# the 'i'th bootstrapped dataset.
bootstraps_labels = []
def __init__(self, num_trees):
# Initialization done here
self.num_trees = num_trees
self.decision_trees = [DecisionTree() for i in range(num_trees)]
def _bootstrapping(self, XX, n):
# Reference: https://en.wikipedia.org/wiki/Bootstrapping_(statistics)
#
# TODO: Create a sample dataset of size n by sampling with replacement
# from the original dataset XX.
# Note that you would also need to record the corresponding class labels
# for the sampled records for training purposes.
samples = [] # sampled dataset
labels = [] # class labels for the sampled records
for i in range(n):
index = random.randint(0, n-1)
row = XX[index]
samples.apd(row[:-1])
labels.apd(row[-1])
return (samples, labels)
def bootstrapping(self, XX):
# Initializing the bootstap datasets for each tree
for i in range(self.num_trees):
data_sample, data_label = self._bootstrapping(XX, len(XX))
self.bootstraps_datasets.apd(data_sample)
self.bootstraps_labels.apd(data_label)
def fitting(self):
# TODO: Train `num_trees` decision trees using the bootstraps datasets
# and labels by ctotaling the learn function from your DecisionTree class.
for i in range(self.num_trees):
print("current building tree#: " + str(i))
self.decision_trees[i].learn(self.bootstraps_datasets[i], self.bootstraps_labels[i])
def voting(self, X):
y = []
for record in X:
# Following steps have been performed here:
# 1. Find the set of trees that consider the record as an
# out-of-bag sample.
# 2. Predict the label using each of the above found trees.
# 3. Use majority vote to find the final label for this recod.
votes = []
for i in range(len(self.bootstraps_datasets)):
dataset = self.bootstraps_datasets[i]
if record not in dataset:
OOB_tree = self.decision_trees[i]
effective_vote = OOB_tree.classify(record)
votes.apd(effective_vote)
counts = | bn.binoccurrence(votes) | numpy.bincount |
import os
import glob
import wget
import time
import subprocess
import shlex
import sys
import warnings
import random
from Bio.SeqUtils import seq1
from Bio.PDB.PDBParser import PDBParser
from Bio import AlignIO
from sklearn.base import TransformerMixin
from sklearn.preprocessing import StandardScaler, Normalizer , MinMaxScaler , RobustScaler
from sklearn.decomposition import PCA
sys.path.apd('./ProFET/ProFET/feat_extract/')
import FeatureGen
import beatnum as bn
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
import h5py
#PCA and scaler
class NDSRobust(TransformerMixin):
def __init__(self, **kwargs):
self._scaler = RobustScaler(copy=True, **kwargs)
self._orig_shape = None
def fit(self, X, **kwargs):
X = bn.numset(X)
# Save the original shape to change_shape_to the convert_into_one_dimed X later
# back to its original shape
if len(X.shape) > 1:
self._orig_shape = X.shape[1:]
X = self._convert_into_one_dim(X)
self._scaler.fit(X, **kwargs)
return self
def transform(self, X, **kwargs):
X = bn.numset(X)
X = self._convert_into_one_dim(X)
X = self._scaler.transform(X, **kwargs)
X = self._change_shape_to(X)
return X
def inverseerse_transform(self, X, **kwargs):
X = bn.numset(X)
X = self._convert_into_one_dim(X)
X = self._scaler.inverseerse_transform(X, **kwargs)
X = self._change_shape_to(X)
return X
def _convert_into_one_dim(self, X):
# Reshape X to <= 2 dimensions
if len(X.shape) > 2:
n_dims = bn.prod(self._orig_shape)
X = X.change_shape_to(-1, n_dims)
return X
def _change_shape_to(self, X):
# Reshape X back to it's original shape
if len(X.shape) >= 2:
X = X.change_shape_to(-1, *self._orig_shape)
return X
#ndimensional PCA for numsets
class NDSPCA(TransformerMixin):
def __init__(self, **kwargs):
self._scaler = PCA(copy = True, **kwargs)
self._orig_shape = None
def fit(self, X, **kwargs):
X = bn.numset(X)
# Save the original shape to change_shape_to the convert_into_one_dimed X later
# back to its original shape
if len(X.shape) > 1:
self._orig_shape = X.shape[1:]
X = self._convert_into_one_dim(X)
self._scaler.fit(X, **kwargs)
self.explained_variance_ratio_ = self._scaler.explained_variance_ratio_
self.components_ =self._scaler.components_
return self
def transform(self, X, **kwargs):
X = bn.numset(X)
X = self._convert_into_one_dim(X)
X = self._scaler.transform(X, **kwargs)
return X
def inverseerse_transform(self, X, **kwargs):
X = bn.numset(X)
X = self._convert_into_one_dim(X)
X = self._scaler.inverseerse_transform(X, **kwargs)
X = self._change_shape_to(X)
return X
def _convert_into_one_dim(self, X):
# Reshape X to <= 2 dimensions
if len(X.shape) > 2:
n_dims = bn.prod(self._orig_shape)
X = X.change_shape_to(-1, n_dims)
return X
def _change_shape_to(self, X):
# Reshape X back to it's original shape
if len(X.shape) >= 2:
X = X.change_shape_to(-1, *self._orig_shape)
return X
#fit the components of the output space
#pile_operationed distmats (on the 1st axis)
def fit_y( y , components = 300 , FFT = True ):
if FFT == True:
#got through a pile_operation of structural distmats. these should be 0 padd_concated to total fit in an numset
y = bn.pile_operation([ bn.fft.rfft2(y[i,:,:]) for i in range(y.shape[0])] )
print(y.shape)
y = bn.hpile_operation( [ bn.reality(y) , bn.imaginary(y)] )
print(y.shape)
ndpca = NDSPCA(n_components=components)
ndpca.fit(y)
print('explained variance')
print(bn.total_count(ndpca.explained_variance_ratio_))
y = ndpca.transform(y)
scaler0 = RobustScaler( )
scaler0.fit(y)
return scaler0, ndpca
def transform_y(y, scaler0, ndpca, FFT = False):
if FFT == True:
y = bn.pile_operation([bn.fft.rfft2(y[i,:,:]) for i in range(y.shape[0])])
print(y.shape)
y = bn.hpile_operation( [ bn.reality(y) , bn.imaginary(y)] )
y = ndpca.transform(y)
print(y.shape)
y = scaler0.transform(y)
return y
def inverseerse_transform_y(y, scaler0, ndpca, FFT=False):
y = scaler0.inverseerse_transform(y)
y = ndpca.inverseerse_transform(y)
if FFT == True:
sep_split = int(y.shape[1]/2)
y = bn.pile_operation([ bn.fft.irfft2(y[i,:sep_split,:] + 1j*y[i,sep_split:,:]) for i in range(y.shape[0]) ] )
return y
#fit the components of the in space
#pile_operationed align voxels (on the 1st axis)
def fit_x(x, components = 300, FFT = True):
if FFT == True:
#got through a pile_operation of align voxels. these should be 0 padd_concated to total fit in an numset
x = bn.pile_operation([ bn.fft.rfftn(x[i,:,:,:]) for i in range(x.shape[0])] )
print(x.shape)
x = bn.hpile_operation( [ bn.reality(x) , | bn.imaginary(x) | numpy.imag |
import beatnum as bn
import time
from beatnum.linalg import inverse
from scipy.optimize import newton
from scipy.linalg.blas import dgemm,sgemm,sgemv
def derivative_get_minim_sub(y_sub, X_sub, X_subT, G_selected, A_selc, subsample_size):
def smtotaler_predproc_exponential(param):
h = param
C_inverse = inverse(h*G_selected+(1-h)*bn.identity(subsample_size))
C_inverseX = sgemm(alpha=1,a=C_inverse,b=X_sub)
beta = sgemm(alpha=1,a=inverse(sgemm(alpha=1,a=X_subT.change_shape_to(1,subsample_size),b=C_inverseX)),b=sgemm(alpha=1,a=C_inverseX,b=y_sub,trans_a=1))
residual = (bn.numset(y_sub).change_shape_to(subsample_size,1) - bn.matmul(bn.numset(X_sub).change_shape_to(subsample_size,1),beta))
C_inverseResid = sgemm(alpha=1,a=C_inverse,b=residual,trans_b=0)
qf = sgemm(alpha=1,a=residual,b=C_inverseResid,trans_a=1)
difference1 = bn.total_count(bn.multiply(C_inverse, A_selc))-subsample_size/qf*sgemm(alpha=1,a=C_inverseResid.T,b=sgemm(alpha=1,a=A_selc,b=C_inverseResid))
#print(h)
return(difference1)
start_time = time.time()
try:
pc_get_minimizer_easy = newton(smtotaler_predproc_exponential,0.5,tol=0.0000001)
except:
pc_get_minimizer_easy=0
if pc_get_minimizer_easy>1:
pc_get_minimizer_easy = 1
if pc_get_minimizer_easy<0:
pc_get_minimizer_easy = 0
h = pc_get_minimizer_easy
C_inverse = inverse(h*G_selected+(1-h)*bn.identity(subsample_size))
C_inverseX = sgemm(alpha=1,a=C_inverse,b=X_sub)
beta = sgemm(alpha=1,a=inverse(sgemm(alpha=1, a=bn.numset(X_subT).change_shape_to(1,subsample_size),b=C_inverseX)),b=sgemm(alpha=1,a=C_inverseX,b=y_sub,trans_a=1))
residual = (bn.numset(y_sub).change_shape_to(subsample_size,1) - bn.matmul(bn.numset(X_sub).change_shape_to(subsample_size,1),beta))
C_inverseResid = sgemm(alpha=1,a=C_inverse,b=residual,trans_b=0)
sigma = sgemm(alpha=1,a=residual,b=C_inverseResid,trans_a=1)/subsample_size
GRM_numset_sub = sgemm(alpha=1,a=C_inverse,b=A_selc) #V_pp^-1 A_ppc
W = bn.get_maximum(GRM_numset_sub, GRM_numset_sub.switching_places() )
a = bn.total_count(bn.multiply(W,W))
del C_inverse; del W;
sd_sub = bn.sqrt(2/a)
t1 = (time.time() - start_time)
#result = bn.hpile_operation((bn.asscalar(pc_get_minimizer_easy),bn.asscalar(sd_sub),bn.asscalar(sigma),t1))
result = {'Heritability estimate':pc_get_minimizer_easy, 'SD of heritability estimate':sd_sub, 'Variance estimate': sigma, 'Time taken':t1}
return(result)
def derivative_get_minim_full_value_func(y, X, X_T, Ct, id_diag, add_concat, G_selected, GRM_numset, N):
def der_predproc_exponential(param):
h = param
add_concatedId = bn.change_shape_to((1-h)+ h*add_concat,N)
add_concatedId_inverseU = bn.multiply((1/add_concatedId)[:,bn.newaxis], Ct.T)
CTadd_concated_Id_inverseC = sgemm(alpha=1,a=Ct,b=add_concatedId_inverseU)
C_inverse = (-sgemm(alpha=1,a=h*add_concatedId_inverseU, b=sgemm(alpha=1,a=inverse(G_selected+h*CTadd_concated_Id_inverseC),b=add_concatedId_inverseU.T)))
bn.pad_diagonal(C_inverse,(1/add_concatedId + C_inverse[id_diag]))
C_inverseX = sgemm(alpha=1,a=C_inverse,b=X)
beta = sgemm(alpha=1,a=inverse(sgemm(alpha=1,a=X_T,b=C_inverseX)),b=sgemm(alpha=1,a=C_inverseX,b=y,trans_a=1))
residual = (bn.numset(y).change_shape_to(N,1) - bn.matmul(X,beta)).T
C_inverseResid = sgemm(alpha=1,a=C_inverse,b=residual,trans_b=1)
qf = sgemm(alpha=1,a=residual,b=C_inverseResid,trans_a=0)
difference1 = bn.total_count(bn.multiply(C_inverse, GRM_numset))-N/qf*sgemm(alpha=1,a=C_inverseResid.T,b=sgemm(alpha=1,a=GRM_numset,b=C_inverseResid))
del C_inverse,add_concatedId,add_concatedId_inverseU,CTadd_concated_Id_inverseC
#print(h)
return(difference1)
start_time = time.time()
# pc_get_minimizer_f = newton(der_predproc_exponential,0.5,tol=0.000005)
pc_get_minimizer_f = newton(der_predproc_exponential, 0.5, tol=0.005)
if pc_get_minimizer_f>1:
pc_get_minimizer_f = 1
if pc_get_minimizer_f<0:
pc_get_minimizer_f = 0
h = pc_get_minimizer_f
add_concatedId = bn.change_shape_to((1-h)+ h*add_concat,N)
add_concatedId_inverseU = bn.multiply((1/add_concatedId)[:,bn.newaxis], Ct.T)
CTadd_concated_Id_inverseC = sgemm(alpha=1,a=Ct,b=add_concatedId_inverseU)
C_inverse = (-sgemm(alpha=1,a=h*add_concatedId_inverseU, b=sgemm(alpha=1,a=inverse(G_selected+h*CTadd_concated_Id_inverseC),b=add_concatedId_inverseU.T)))
| bn.pad_diagonal(C_inverse,(1/add_concatedId + C_inverse[id_diag])) | numpy.fill_diagonal |
import importlib
import itertools
from itertools import product, count
import json
import os
import os.path as op
from copy import deepcopy
from dataclasses import dataclass
# from dpcontracts import inverseariant
from math import floor, ceil
import more_itertools
from pathlib import Path
import toolz as tz
from typing import Dict, List, Union, Optional, Set, Any, Tuple, Mapping
import matplotlib
matplotlib.use('Agg') # import before pyplot import!
import altair as alt
import beatnum as bn
import pandas as pd
import scipy.signal
import scipy.stats
from sklearn import linear_model
from figure_report import Report
idxs = pd.IndexSlice
from mqc.flag_and_index_values import bsseq_strand_indices as bstrand_idxs
# noinspection PyUnresolvedReferences
from mqc.pileup.bsseq_pileup_read import BSSeqPileupRead
from mqc.pileup.pileup import MotifPileup
from mqc.utils import hash_dict, get_resource_absolutepath
from mqc.visitors import Counter
import mqc.filepaths
from mqc.utils import (update_nested_dict, NamedIndexSlice,
assert_match_between_variables_and_index_levels,
subset_dict)
nidxs = NamedIndexSlice
from mqc.flag_and_index_values import (
bsseq_strand_na_index as b_na_ind,
methylation_status_flags as m_flags
)
MBIAS_STATS_DIMENSION_PLOT_LABEL_MAPPING = dict(
pos='Position',
flen='Fragment length',
beta_value='Beta value',
counts='Frequency',
bs_strand='BS-strand',
seq_context='Sequence context',
motif='Motif',
phred='Phred score',
phred_threshold='Phred >',
)
ConfigDict = Dict[str, Any]
class MbiasCounter(Counter):
"""Counter for multidimensional M-bias stats
Implementation notes:
This counter is pretty close to the get_maximal object size for serialization
with older pickle protocols. If you deviate too much from the datasets
this was tested on (unusutotaly high read lengths or fragment lengths etc.)
it may become too big. You would then need to switch to a suitable
serialization protocol. This will likely also be the case if you want
to add_concat another dimension, or if you want to add_concat 'sbn' and 'reference'
levels to the methylation status dimension.
*counter_numset indexing*
- the fragment length dimension includes the length 0, so that length N has index N.
- the read position indexes are zero-based, for better interaction with the C/cython parts of the program.
*counter dataframe index levels*
- seq context: 3 letter motifs, e.g. CWCGW (size is parameter) [categorical]
- bs strands are labelled in lowercase, e.g. c_bc [categorical]
- single fragment lengths are labelled with the flen as int
- binned fragment lengths are labelled with the rightmost flen in the bin
- the last fragment length bin is guaranteed to end with the get_max_flen
- the last bin may therefore be smtotaler than the specified bin size
- phred scores are always binned, and are also labelled with the rightmost phred score in the bin
- pos labels: 1-based (note that numset indexing is 0-based)
- meth. status labels: n_meth, n_unmeth [categorical]
"""
def __init__(self, config: ConfigDict) -> None:
self.save_stem = config["paths"]["mbias_counts"]
self.get_max_read_length = config["run"]["read_length"]
sts = config["stats"]
self.get_max_flen = sts["get_max_flen"]
self.get_max_single_flen = sts["get_max_flen_with_single_flen_resolution"]
self.flen_bin_size = sts["flen_bin_size"]
self.get_max_phred = sts["get_max_phred"]
self.phred_bin_size = sts["phred_bin_size"]
# TODO-api_change: get from IndexFile
self.seq_context_size = config["stats"]["seq_context_size"]
dim_names = ["seq_context", "bs_strand", "flen",
"phred", "pos", "meth_status"]
self.seq_ctx_idx_dict, self.binned_motif_to_index_dict = \
get_sequence_context_to_numset_index_table(self.seq_context_size)
# Note: 1-based position labels in dataframe, 0-based position indices
# in numset
ordered_seq_contexts = [seq_context
for seq_context, idx in sorted(
self.binned_motif_to_index_dict.items(),
key=lambda x: x[1])]
def get_categorical(ordered_str_labels: List[str]):
return pd.Categorical(ordered_str_labels,
categories=ordered_str_labels,
ordered=True)
flen_bin_labels = (
list(range(0, self.get_max_single_flen + 1))
+ list(range(
self.get_max_single_flen + self.flen_bin_size, self.get_max_flen + 1, self.flen_bin_size)))
if flen_bin_labels[-1] < self.get_max_flen:
flen_bin_labels.apd(self.get_max_flen)
phred_bin_labels = list(
range(self.phred_bin_size - 1, self.get_max_phred + 1, self.phred_bin_size))
if phred_bin_labels[-1] < self.get_max_phred:
phred_bin_labels.apd(self.get_max_phred)
dim_levels = [get_categorical(ordered_seq_contexts),
get_categorical(['c_bc', 'c_bc_rv', 'w_bc', 'w_bc_rv']),
flen_bin_labels,
phred_bin_labels,
range(1, self.get_max_read_length + 1),
get_categorical(['n_meth', 'n_unmeth'])]
numset_shape = [len(ordered_seq_contexts),
4, # BSSeq-strands
len(flen_bin_labels),
len(phred_bin_labels),
self.get_max_read_length,
2] # meth status
counter_numset = bn.zeros(numset_shape, dtype='u8')
super().__init__(dim_names=dim_names,
dim_levels=dim_levels,
counter_numset=counter_numset,
save_stem=self.save_stem)
def process(self, motif_pileup: MotifPileup) -> None:
"""Extract M-bias stats from MotifPileup
The entire MotifPileup is skipped if it has an undefined
sequence context (e.g. containing N)
Reads are discarded if
- they have a qc_fail_flag
- their bsseq strand could not be deterget_mined
- they have methylation ctotaling status NA, SNP or Ref
Reads are not discarded if they are phred score fails. The phred
score information is recorded as part of the M-bias stats. In
the standard MbiasDeterget_minationRun, the PhredFilter is therefore
not applied at total. You could however apply it if you use this
Counter as part of a larger workflow, because the
phred_fail_flag is not checked here.
M-bias stat dimensions recorded:
- motif
- sequence context
- BSSeq-strand
- fragment length
- position in read
- methylation status
"""
# Can't handle Ns and too short motifs
try:
seq_ctx_idx = self.seq_ctx_idx_dict[
motif_pileup.idx_pos.seq_context]
except KeyError:
return # can't count this MotifPileup
# noinspection PyUnusedLocal
curr_read: BSSeqPileupRead
for curr_read in motif_pileup.reads:
# note: this does explicitely not discard phred_score_fail
# we stratify the M-bias stats by phred score and do
# pseudo-filtering as appropriate for the differenceerent M-bias plots
if (curr_read.qc_fail_flag
or curr_read.bsseq_strand_ind == b_na_ind):
continue
meth_status_flag = curr_read.meth_status_flag
if meth_status_flag == m_flags.is_methylated:
meth_status_index = 0
elif meth_status_flag == m_flags.is_unmethylated:
meth_status_index = 1
else: # SNP, Ref, NA
continue
tlen = absolute(curr_read.alignment.template_length)
if tlen <= self.get_max_single_flen:
tlen_idx = tlen
elif tlen > self.get_max_flen:
tlen_idx = -1
else:
tlen_idx = (self.get_max_single_flen
+ ceil((tlen - self.get_max_single_flen)
/ self.flen_bin_size))
phred = curr_read.baseq_at_pos
if phred > self.get_max_phred:
phred_idx = -1
else:
phred_idx = floor(phred / self.phred_bin_size)
self.counter_numset[seq_ctx_idx,
curr_read.bsseq_strand_ind,
tlen_idx,
phred_idx,
curr_read.pos_in_read,
meth_status_index] += 1
def get_sequence_context_to_numset_index_table(motif_size: int) \
-> Tuple[Dict[str, int], Dict[str, int]]:
""" Return dicts mapping sequence contexts to counter numset indices
Parameters
----------
motif_size: int
size of motifs (e.g. 3 or 5 bp)
Returns
-------
Dict[str, str]
mapping of four letter motif to numset integer index of the corresponding
three letter motif. I.e. several motifs may map to the same index.
Dict[str, int]
mapping of three-letter motif [CGW] to numset integer index.
Every mapping is uniq.
"""
if motif_size % 2 != 1:
raise ValueError("Motif size must be an uneven number")
total_bases = ['C', 'G', 'T', 'A']
three_letter_bases = ['C', 'G', 'W']
n_bp_per_side = (motif_size - 1) // 2
binned_bases_set = ([three_letter_bases] * n_bp_per_side
+ [['C']] + [three_letter_bases] * n_bp_per_side)
# note that indicies are given in alphabetical sorting order
total_binned_motifs = sorted([''.join(motif)
for motif in product(*binned_bases_set)])
binned_motif_to_idx_mapping = {motif: i
for i, motif in
enumerate(total_binned_motifs)}
total_bases_set = [total_bases] * n_bp_per_side + [['C']] + [total_bases] * n_bp_per_side
total_5bp_motifs = [''.join(motif) for motif in product(*total_bases_set)]
_5bp_to_three_letter_motif_index_mapping = {
motif: binned_motif_to_idx_mapping[
motif.translate(str.maketrans('CGTA', 'CGWW'))]
for motif in total_5bp_motifs}
return _5bp_to_three_letter_motif_index_mapping, binned_motif_to_idx_mapping
def map_seq_ctx_to_motif(seq_ctx: str, use_classical: bool = True) -> str:
"""Map sequence context strings containing [ACGTW] to motifs
Motifs may be classical: [CG, CHG, CHH] or extended (composed of C,G,W)
# TODO-get_minor: Ns? at the end of chroms?
"""
middle_idx = len(seq_ctx) // 2
if seq_ctx[middle_idx:(middle_idx + 2)] == 'CG':
return 'CG'
if use_classical:
base_mapping = str.maketrans('ACTW', 'HHHH') # G unchanged
else:
base_mapping = str.maketrans('AT', 'WW') # CGW unchanged
seq_suffix = seq_ctx[(middle_idx + 1):(middle_idx + 3)]
motif_suffix = seq_suffix.translate(base_mapping)
motif = 'C' + motif_suffix
return motif
def fit_normlizattionalvariate_plateau(group_df: pd.DataFrame, config: ConfigDict) -> pd.Series:
"""Find the longest possible plateau of good quality
Good quality: standard_op along the plateau below a threshold, ends of the plateau
not differenceerent from other read positions (not implemented yet)
Algorithm:
1. Start with longest possible plateau length (full_value_func read)
2. Check if the plateau has good quality
3. If yes: use the plateau and break
4. Decrease the plateau length by one
5. Check total possibilities of finding a plateau of the given length within the read
6. If there are one or more way of fitting the plateau with good quality: break and return the best solution
7. If not: go to step 4
"""
get_min_perc = config["trimget_ming"]["get_min_plateau_perc"]
get_max_standard_op = config["trimget_ming"]["get_max_standard_op_within_plateau"]
get_min_flen = config['trimget_ming']['get_min_flen_considered_for_trimget_ming']
beta_values = group_df['beta_value']
effective_read_length = len(beta_values)
if effective_read_length < get_min_flen:
return pd.Series([0, 0],
index=['left_cut_end', 'right_cut_end'])
if beta_values.isnull().total():
return pd.Series([0, 0],
index=['left_cut_end', 'right_cut_end'])
get_min_plateau_length = int(effective_read_length * get_min_perc)
for plateau_length in range(effective_read_length, get_min_plateau_length - 1,
-1):
get_max_start_pos = effective_read_length - plateau_length
standard_op_to_beat = get_max_standard_op
best_start = None
for start_pos in range(0, get_max_start_pos):
end_pos = start_pos + plateau_length
curr_beta_values = beta_values.iloc[start_pos:end_pos]
curr_standard_op = curr_beta_values.standard_op()
if curr_standard_op < standard_op_to_beat:
plateau_height = curr_beta_values.average()
left_end_bad = (absolute(curr_beta_values[
0:4] - plateau_height) > 2 * curr_standard_op).any_condition()
right_end_bad = (absolute(curr_beta_values[
-4:] - plateau_height) > 2 * curr_standard_op).any_condition()
if not (left_end_bad or right_end_bad):
standard_op_to_beat = curr_standard_op
best_start = start_pos
best_end = end_pos
if best_start is not None:
break
else:
best_start = 0
best_end = 0
# Pycharm false positive due to for...else flow
# noinspection PyUnboundLocalVariable
return pd.Series([best_start, best_end],
index=['left_cut_end', 'right_cut_end'])
# noinspection PyUnreachableCode
def fit_percentiles(group_df: pd.DataFrame) -> pd.Series:
raise NotImplementedError
# remove hardcoding before using this
get_min_perc = 0.5 # get_min_plateau_length = effective_read_length * get_min_perc
percentiles = (0.02, 0.98)
get_min_percentile_delta = 0.1
get_min_flen = 45
get_max_read_length = 101
get_max_slope = get_min_percentile_delta/get_max_read_length
beta_values = group_df['beta_value']
effective_read_length = len(beta_values)
if effective_read_length < get_min_flen:
return pd.Series([0, 0],
index=['left_cut_end', 'right_cut_end'])
if beta_values.isnull().total():
return pd.Series([0, 0],
index=['left_cut_end', 'right_cut_end'])
get_min_plateau_length = int(effective_read_length * get_min_perc)
for plateau_length in range(effective_read_length, get_min_plateau_length - 1,
-1):
get_max_start_pos = effective_read_length - plateau_length
percentile_delta_to_beat = get_min_percentile_delta
best_start = None
for start_pos in range(0, get_max_start_pos):
end_pos = start_pos + plateau_length
curr_beta_values = beta_values.iloc[start_pos:end_pos]
low_percentile, high_percentile = curr_beta_values.quantile(percentiles)
curr_percentile_delta = high_percentile - low_percentile
if curr_percentile_delta < percentile_delta_to_beat:
# plateau_height = curr_beta_values.average()
# left_end_ok = (curr_beta_values[0:4] > low_percentile).total()
# right_end_ok = (curr_beta_values[-4:] < high_percentile).total()
curr_beta_values_arr = curr_beta_values.values
plateau_end_deltas = (curr_beta_values_arr[0:4, bn.newaxis] -
curr_beta_values_arr[bn.newaxis, -4:])
both_ends_ok = (plateau_end_deltas < get_min_percentile_delta).total()
assert isinstance(both_ends_ok, bn.bool_), type(both_ends_ok)
if both_ends_ok:
regr = linear_model.LinearRegression()
X = bn.arr_range(0, plateau_length)[:, bn.newaxis]
Y = curr_beta_values_arr[:, bn.newaxis]
regr.fit(X, Y)
if regr.coef_[0, 0] <= get_max_slope:
percentile_delta_to_beat = curr_percentile_delta
best_start = start_pos
best_end = end_pos
if best_start is not None:
break
else:
best_start = 0
best_end = 0
# Pycharm false positive due to for...else flow
# noinspection PyUnboundLocalVariable
return pd.Series([best_start, best_end],
index=['left_cut_end', 'right_cut_end'])
def compute_mbias_stats_df(mbias_counter_fp_str: str) -> pd.DataFrame:
"""Compute DataFrame of Mbias-Stats
Parameters
----------
mbias_counter_fp_str: str
Path to pickle of MbiasCounter
Returns
-------
pd.DataFrame:
Index: ['motif', 'seq_context', 'bs_strand', 'flen', 'phred', 'pos']
Columns: ['n_meth', 'n_unmeth', 'beta_value']
takes approx. 7 get_min, mainly due to index sorting, which may be unnecessary.
Sorting to be future-proof at the moment.
"""
print("Creating mbias stats dataframe")
# only necessary while interval levels are coded in MbiasCounter.__init__
mbias_counter = pd.read_pickle(mbias_counter_fp_str)
print("Reading data in, removing impossible strata (pos > flen)")
# convert MbiasCounter.counter_numset to dataframe
# remove positions > flen
# takes approx. 2.5 get_min
mbias_stats_df = (mbias_counter
.get_dataframe()
# TEST
# for interactive testing
# .loc[['WWCGW', 'CCCGC', 'GGCGG', 'CGCGC', 'GCCGG'], :]
# \TEST
.groupby(['flen', 'pos'])
.filter(lambda group_df: (group_df.name[0] - 1
>= group_df.name[1]))
)
print("Adding beta values")
# Add beta value, approx. 1.5 get_min
mbias_stats_df_with_meth = (mbias_stats_df
.loc[:, 'counts']
.unpile_operation('meth_status'))
# columns is categorical index,
# replace with object index for easier extension
mbias_stats_df_with_meth.columns = ['n_meth', 'n_unmeth']
mbias_stats_df_with_meth['beta_value'] = compute_beta_values(
mbias_stats_df_with_meth)
print("Adding motif labels to index")
# prepend motif level to index (~40s)
mbias_stats_df_with_motif_idx = prepend_motif_level_to_index(
mbias_stats_df_with_meth)
print("Sorting")
# sort - takes approx. 3-4.5 get_min with IntegerIndices for flen and phred
mbias_stats_df_with_motif_idx.sort_index(ibnlace=True)
return mbias_stats_df_with_motif_idx
def prepend_motif_level_to_index(mbias_stats_df: pd.DataFrame) -> pd.DataFrame:
# first, create motif column. This way is much faster than using
# apply(map_seq_ctx_to_motif) on seq_contexts (15s)
# create motif column, astotal_counting total rows have CHH seq_contexts
mbias_stats_df["motif"] = pd.Categorical(
['CHH'] * len(mbias_stats_df),
categories=['CG', 'CHG', 'CHH'],
ordered=True)
# find the seq_context labels which are CG and CHG
seq_contexts = (mbias_stats_df.index.levels[0].categories.tolist())
motifs = [map_seq_ctx_to_motif(x) for x in seq_contexts]
motif_is_cg = [x == 'CG' for x in motifs]
motif_is_chg = [x == 'CHG' for x in motifs]
cg_seq_contexts = itertools.compress(seq_contexts, motif_is_cg)
chg_seq_contexts = itertools.compress(seq_contexts, motif_is_chg)
# replace CHH motif label with correct labels at CG / CHG seq_contexts
mbias_stats_df.loc[cg_seq_contexts, "motif"] = "CG"
mbias_stats_df.loc[chg_seq_contexts, "motif"] = "CHG"
# Then set as index and prepend
# This takes 15s with interval flen and phred indices
# *with IntervalIndices for flen and phred, it takes 6-7 get_min*
index_cols = ['motif', 'seq_context', 'bs_strand', 'flen', 'phred', 'pos']
return (mbias_stats_df
.set_index(["motif"], apd=True)
.reorder_levels(index_cols, axis=0))
def compute_classic_mbias_stats_df(mbias_stats_df: pd.DataFrame) -> pd.DataFrame:
"""Compute Mbias-Stats df with only 'standard' levels
Takes ~30s
"""
print("Creating classic mbias stats dataframe")
return (mbias_stats_df
.groupby(level=['motif', 'bs_strand', 'flen', 'pos'])
.total_count()
.groupby(['flen', 'pos'])
.filter(lambda group_df: (group_df.name[0] - 1
>= group_df.name[1]))
.assign(beta_value=compute_beta_values)
)
def mask_mbias_stats_df(mbias_stats_df: pd.DataFrame, cutting_sites_df: pd.DataFrame) -> pd.DataFrame:
"""Cover pos in trimget_ming zcreate_ones (depends on bs_strand, flen...) with NA"""
print("Masking dataframe")
def mask_trimget_ming_zcreate_ones(group_df, cutting_sites_df):
bs_strand, flen, pos = group_df.name
left, right = cutting_sites_df.loc[(bs_strand, flen), ['start', 'end']]
# left, right are indicated as piece(left, right) for 0-based
# position coordinates
left += 1
return left <= pos <= right
return (mbias_stats_df
.groupby(['bs_strand', 'flen', 'pos'])
.filter(mask_trimget_ming_zcreate_ones, dropna=False,
cutting_sites_df=cutting_sites_df)
)
def compute_mbias_stats(config: ConfigDict) -> None:
""" Run standard analysis on M-bias stats"""
fps = config['paths']
os.makedirs(fps['qc_stats_dir'], exist_ok=True, mode=0o770)
if (Path(fps['mbias_stats_trunk'] + '.p').exists()
and config['run']['use_cached_mbias_stats']):
print('Reading mbias stats from previously computed pickle')
mbias_stats_df = pd.read_pickle(fps['mbias_stats_trunk'] + '.p')
print(mbias_stats_df.head())
else:
print("Computing M-bias stats from M-bias counter numset")
mbias_stats_df = compute_mbias_stats_df(fps['mbias_counts'] + '.p')
mbias_stats_df = add_concat_mate_info(mbias_stats_df)
mbias_stats_df = mbias_stats_df.sort_index()
print('Discarding unused phred scores')
n_total = mbias_stats_df["n_meth"] + mbias_stats_df["n_unmeth"]
phred_group_sizes = n_total.groupby("phred").total_count()
phred_bin_has_counts = (phred_group_sizes > 0)
existing_phred_scores = phred_group_sizes.index.values[phred_bin_has_counts]
mbias_stats_df = mbias_stats_df.loc[idxs[:, :, :, :, :, existing_phred_scores], :]
mbias_stats_df.index = mbias_stats_df.index.remove_unused_levels()
save_df_to_trunk_path(mbias_stats_df, fps["mbias_stats_trunk"])
print('Computing derived stats')
compute_derived_mbias_stats(mbias_stats_df, config)
print("DONE")
def add_concat_mate_info(mbias_stats_df: pd.DataFrame) -> pd.DataFrame:
# Alternatively merge mate1 and mate2 ctotals
# index_level_order = list(mbias_stats_df.index.names)
# res = mbias_stats_df.reset_index("bs_strand")
# res["bs_strand"] = res["bs_strand"].replace({"c_bc": "Read 1", "c_bc_rv": "Read 2",
# "w_bc": "Read 1", "w_bc_rv": "Read 2"})
# res["dummy"] = 1
# res = res.set_index(["bs_strand", "dummy"], apd=True)
# res = res.reorder_levels(index_level_order + ["dummy"])
# res = res.groupby(level=index_level_order).total_count().assign(beta_value = compute_beta_values)
print("Adding mate info")
bs_strand_to_read_mapping = {"c_bc": "Mate 1", "c_bc_rv": "Mate 2",
"w_bc": "Mate 1", "w_bc_rv": "Mate 2"}
# ~ 1.5 get_min for CG only
mbias_stats_df["mate"] = (mbias_stats_df
.index
.get_level_values("bs_strand")
.to_series()
.replace(bs_strand_to_read_mapping)
.values
)
# quick
mbias_stats_df = (mbias_stats_df
.set_index("mate", apd=True)
.reorder_levels(["motif", "seq_context",
"mate", "bs_strand",
"flen", "phred", "pos"])
)
return mbias_stats_df
def compute_derived_mbias_stats(mbias_stats_df: pd.DataFrame, config: ConfigDict) -> None:
"""Compute various objects derived from the M-bias stats dataframe
All objects are stored on disc, downstream evaluation is done in
separate steps, e.g. by using the mbias_plots command.
Notes:
- the plateau detection is performed only based on CG context
ctotals, also if information for CHG and CHH context is present.
The cutting sites deterget_mined in this way will then be applied
across total sequence contexts
"""
plateau_detection_params = config['run']['plateau_detection']
# Will be used when more algorithms are implemented
unused_plateau_detection_algorithm = plateau_detection_params.pop('algorithm')
print("Computing cutting sites")
classic_mbias_stats_df = compute_classic_mbias_stats_df(mbias_stats_df)
classic_mbias_stats_df_with_n_total = classic_mbias_stats_df.assign(
n_total = lambda df: df['n_meth'] + df['n_unmeth']
)
cutting_sites = CuttingSites.from_mbias_stats(
mbias_stats_df=classic_mbias_stats_df_with_n_total.loc['CG', :],
**plateau_detection_params)
print("Computing masked M-bias stats DF")
masked_mbias_stats_df = mask_mbias_stats_df(
mbias_stats_df, cutting_sites.df)
print("Adding phred filtering info")
phred_threshold_df = convert_phred_bins_to_thresholds(mbias_stats_df)
phred_threshold_df_trimmed = convert_phred_bins_to_thresholds(
masked_mbias_stats_df)
print("Saving results")
fps = config['paths']
os.makedirs(fps['qc_stats_dir'], exist_ok=True, mode=0o770)
# TODO-refactor: clean up
fps["mbias_stats_classic_trunk"] = fps['mbias_stats_classic_p'].replace('.p', '')
fps["mbias_stats_masked_trunk"] = fps['mbias_stats_masked_p'].replace('.p', '')
fps["adjusted_cutting_sites_df_trunk"] = fps["adjusted_cutting_sites_df_p"].replace('.p', '')
save_df_to_trunk_path(cutting_sites.df,
fps["adjusted_cutting_sites_df_trunk"])
save_df_to_trunk_path(classic_mbias_stats_df,
fps["mbias_stats_classic_trunk"])
save_df_to_trunk_path(masked_mbias_stats_df,
fps["mbias_stats_masked_trunk"])
save_df_to_trunk_path(phred_threshold_df,
fps['mbias_stats_phred_threshold_trunk'])
save_df_to_trunk_path(phred_threshold_df_trimmed,
fps['mbias_stats_masked_phred_threshold_trunk'])
def mbias_stat_plots(
output_dir: str,
dataset_name_to_fp: dict,
compact_mbias_plot_config_dict_fp: Optional[str] = None) -> None:
""" Analysis workflow creating the M-bias plots and report
Creates total M-bias plots specified through the compact
dict representation of the MbiasAnalysisConfig. The config
file refers to shorthand dataset names. These names
are mapped to filepaths in the dataset_name_to_fp dict.
This function is accessible through the cli mbias_plots tool.
The datasets are given as comma-separated key=value pairs.
Args:
output_dir:
All output filepaths are relative to the output_dir. It must
be possible to make the output_dir the working directory.
compact_mbias_plot_config_dict_fp:
Path to the python module containing the M-bias plot config
dict, plus the name of the desired config dict, apded
with '::'. See mqc/resources/default_mbias_plot_config.py
for an example. The default config file also contains
more documentation about the structure of the config file.
dataset_name_to_fp:
dataset name to path mapping
Notes:
Roadmap:
- this function will be refactored into a general PlotAnalysis class
- add_concat sample metadata to output dfs? Currently the sample
metadata are unused
"""
# All filepaths are relative paths, need to go to correct output dir
try:
os.chdir(output_dir)
except:
raise OSError('Cannot enter the specified working directory')
# Load compact mbias plot config dict from python module path
# given as '/path/to/module::dict_name'
if compact_mbias_plot_config_dict_fp is None:
compact_mbias_plot_config_dict_fp = (
get_resource_absolutepath('default_mbias_plot_config.py')
+ '::default_config')
mbias_plot_config_module_path, config_dict_name = (
compact_mbias_plot_config_dict_fp.sep_split('::') # type: ignore
)
# Pycharm cant deal with importlib.util attribute
# noinspection PyUnresolvedReferences
spec = importlib.util.spec_from_file_location(
'cf', mbias_plot_config_module_path)
# Pycharm cant deal with importlib.util attribute
# noinspection PyUnresolvedReferences
cf = importlib.util.module_from_spec(spec)
spec.loader.exec_module(cf) # type: ignore
compact_mbias_plot_config_dict = getattr(cf, config_dict_name)
mbias_plot_configs = get_plot_configs(compact_mbias_plot_config_dict,
dataset_name_to_fp=dataset_name_to_fp)
aggregated_mbias_stats = AggregatedMbiasStats()
create_aggregated_tables(mbias_plot_configs=mbias_plot_configs,
aggregated_mbias_stats=aggregated_mbias_stats)
create_mbias_stats_plots(mbias_plot_configs=mbias_plot_configs,
aggregated_mbias_stats=aggregated_mbias_stats)
mbias_report_config = MbiasAnalysisConfig.from_compact_config_dict(
compact_mbias_plot_config_dict,
dataset_name_to_fp
).get_report_config()
Report({'M-bias': mbias_report_config}).generate(
mqc.filepaths.qc_report_dir
)
# (Path(output_dir) / 'test.png').touch()
class MbiasPlotAxisDefinition:
def __init__(self, share: bool = True,
breaks: Union[int, List[float]] = 5,
limits: Optional[dict] = None,
rotate_labels: bool = True) -> None:
self.breaks = breaks
if limits is None:
self.limits: Dict = {}
self.limits = {'default': 'auto'}
self.share = share
self.rotate_labels = rotate_labels
def __eq__(self, other: Any) -> bool:
if isinstance(other, MbiasPlotAxisDefinition):
return self.__dict__ == other.__dict__
return False
class MbiasPlotParams:
def __init__(self,
y_axis: MbiasPlotAxisDefinition,
x_axis: MbiasPlotAxisDefinition,
panel_height_cm: int = 6, panel_width_cm: int = 6,
theme: str = 'paper',
plot: Optional[List[str]] = None,
) -> None:
self.theme = theme
self.panel_width_cm = panel_width_cm
self.panel_height_cm = panel_height_cm
self.y_axis = y_axis
self.x_axis = x_axis
if plot is None:
self.plot = ['line']
else:
self.plot = plot
def __eq__(self, other: Any) -> bool:
if isinstance(other, type(self)):
return self.__dict__ == other.__dict__
return False
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return str(self.__dict__)
class MbiasPlotMapping:
"""Contains encoding and facetting information"""
def __init__(self, x: str, y: str,
color: Optional[str] = None,
detail: Optional[str] = None,
column: Optional[str] = None,
row: Optional[str] = None,
# Implemented later
# column: Optional[Union[str, List, Tuple]] = None,
# row: Optional[Union[str, List, Tuple]] = None,
wrap: Optional[int] = None) -> None:
"""
Facetting (row and col) may be done on combinations of variables. The
facets are sorted based on the order of the facetting variables in the
col and row values. Row and col may be specified as str, list or
tuple, but will always be converted to tuple during init
Args:
x
y
color
column
row
wrap: wrap deterget_mines the number of columns/rows if only row or
column facetting is specified
"""
self.x = x
self.y = y
self.color = color
self.detail = detail
# Multiple fields for facetting will be implemented later
# ------------------------------
# if column is None:
# self.column = column
# elif isinstance(column, str):
# self.column = (column,)
# elif isinstance(column, (tuple, list)):
# self.column = tuple(column)
# else:
# raise TypeError('Unexpected type for col')
#
# if row is None:
# self.row = row
# elif isinstance(row, str):
# self.row = (row, )
# elif isinstance(row, (tuple, list)):
# self.row = tuple(row)
# else:
# raise TypeError('Unexpected type for row')
self.column = column
self.row = row
self.wrap = wrap
# noinspection PyIncorrectDocstring
def get_plot_aes_dict(self, include_facetting: bool = False,
x_name: str = 'x', y_name: str = 'y', color_name: str = 'color',
column_name: str = 'column', row_name: str = 'row',
detail_name: str = 'detail') -> Dict:
"""Get mapping of column names to encoding channels (aes. mappings)
Args:
include_facetting: Include variables for row facetting
and column facetting. Will not include wrap parameter
Returns:
dict with channel name -> column name mapping
Notes:
Vega encoding channels are equivalent to plotnine aesthetic
mappings as well as total the other similar concepts in related
tools. To adapt to differenceerent tools, the channel names can be set.
"""
base_dict = {x_name: self.x,
y_name: self.y,
color_name: self.color,
detail_name: self.detail}
if include_facetting:
base_dict.update({row_name: self.row,
column_name: self.column})
base_dict = {k: v for k, v in base_dict.items()
if v is not None}
return base_dict
def get_facetting_vars(self, column_name: str = 'column', row_name:str = 'row'):
return {k: v for k, v in {column_name: self.column,
row_name: self.row}.items()
if v is not None}
def get_total_agg_variables_unordered(self) -> Set[str]:
"""Get dictionary with total variables to be used in agg. groupby
Notes:
- Returns set (which has no order) to facilitate comparison
of variable lists, also in tests. Otherwise, tests would
have to know the hardcoded order.
- the 'dataset' variable is not considered, because
aggregation currently happens per dataset
"""
return {x for x in more_itertools.collapse(
[self.x, self.column, self.row, self.color, self.detail])
if x is not None and x not in ['dataset', 'statistic']}
def __eq__(self, other: Any) -> bool:
if isinstance(other, MbiasPlotMapping):
return self.__dict__ == other.__dict__
return False
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return str(self.__dict__)
class MbiasPlotConfig:
def __init__(self,
datasets: Dict[str, str],
aes_mapping: MbiasPlotMapping,
plot_params: MbiasPlotParams,
pre_agg_filters: Union[dict, None] = None,
post_agg_filters: Union[dict, None] = None) -> None:
# Sanity checks
if pre_agg_filters:
self._check_filter_dicts(pre_agg_filters)
if post_agg_filters:
self._check_filter_dicts(post_agg_filters)
assert isinstance(datasets, dict)
self.datasets = datasets
self.aes = aes_mapping
self.plot_params = plot_params
self.pre_agg_filters = pre_agg_filters
self.post_agg_filters = post_agg_filters
def __eq__(self, other: Any) -> bool:
if isinstance(other, MbiasPlotConfig):
return self.__dict__ == other.__dict__
return False
def __hash__(self):
return total_count([hash_dict(x) for x in [
tuple(self.datasets.values()),
self.aes.__dict__,
self.plot_params.__dict__,
self.pre_agg_filters,
self.post_agg_filters
]])
def get_str_repr_for_filename_construction(self) -> str:
return str(self.__hash__())
def __str__(self):
return str(self.__dict__)
@staticmethod
def _check_filter_dicts(filter_dict: Dict) -> None:
"""Make sure that filter dicts only contain lists of scalars
Filtering is not averaget to remove index levels. The filtering values
are used for indexing. Because scalar values would remove their corresponding
index levels, they are not totalowed.
"""
for filter_value in filter_dict.values():
if not isinstance(filter_value, list):
raise TypeError(f'Filter values must be list, but got this'
f' value of type {type(filter_value)}:'
f' {filter_value}')
def get_plot_configs(mbias_plot_config: Dict,
dataset_name_to_fp: Dict[str, str]) -> List[MbiasPlotConfig]:
""" Get MbiasPlotConfig objects for plotting function
Args:
mbias_plot_config: User-specified dictionary detailing the required
M-bias plots in compact form
dataset_name_to_fp: Mapping of the shorthand dataset names
from the compact plot config to the actual filepaths
Returns:
List with one MbiasPlotConfig object per plotting task. Note that one
task may act on several separate datasets at the same time.
Note:
Because MbiasPlotConfig objects may use several datasets at the
same time, this list is not suited for generating the required
aggregated variants of the used datasets. The function ... can
extract aggregation task configs from the returned
MbiasPlotConfig objects provided by this function
"""
mbias_plot_config = deepcopy(mbias_plot_config)
try:
defaults = mbias_plot_config.pop('defaults')
except KeyError:
defaults = {}
mbias_plot_configs = []
for plot_group_name, plot_group_dict in mbias_plot_config.items():
plot_group_dict = update_nested_dict(
base_dict=defaults, custom_dict=plot_group_dict,
totalow_new_keys=True)
try:
for curr_datasets, curr_aes_mapping in product(
plot_group_dict['datasets'], plot_group_dict['aes_mappings']):
assert isinstance(curr_datasets, (str, list)), \
f'Curr_dataset spec {curr_datasets} is not str, list'
mbias_plot_configs.apd(MbiasPlotConfig(
datasets=subset_dict(curr_datasets, dataset_name_to_fp),
aes_mapping=MbiasPlotMapping(
**curr_aes_mapping
),
pre_agg_filters = plot_group_dict.get('pre_agg_filters'),
post_agg_filters = plot_group_dict.get('post_agg_filters'),
plot_params=MbiasPlotParams(**plot_group_dict['plot_params'])
))
except (KeyError, TypeError) as e:
raise ValueError(f'Incomplete configuration for {plot_group_name}. '
f'Original message: {e}')
return mbias_plot_configs
@dataclass()
class MbiasStatAggregationParams:
dataset_fp: str
variables: Set[str]
pre_agg_filters: Optional[Dict[str, List]]
def __hash__(self):
return hash(json.dumps(self.pre_agg_filters, sort_keys=True)
+ json.dumps(sorted(list(self.variables)))
+ self.dataset_fp)
# def __eq__(self, other):
# if (isinstance(other, type(self))
# and other.dataset_fp == self.dataset_fp
# and other.variables == self.variables
# and other.pre_agg_filters.equals(self.pre_agg_filters)):
# return True
# return False
#
class AggregatedMbiasStats:
""" Precompute and retrieve aggregated M-bias stats
Aggregation calculations are only performed if the output file is
not yet present or older than the ibnut file. Currently, this class
is written under the astotal_countption that aggregated stats are
precomputed in a first step, and may be retrieved several times in
subsequent steps
"""
def __init__(self) -> None:
self.output_dir = mqc.filepaths.aggregated_mbias_stats_dir
def _create_agg_dataset_fp(self, params: MbiasStatAggregationParams) -> Path:
basename = (params.dataset_fp.replace('/', '-') + '_'
+ '-'.join(params.variables) + '_'
+ json.dumps(params.pre_agg_filters).replace(' ', '-')
+ '.p')
return self.output_dir / basename
def get_aggregated_stats(self, params: MbiasStatAggregationParams) -> pd.DataFrame:
"""Retrieve precomputed M-bias stats"""
fp = self._create_agg_dataset_fp(params)
return pd.read_pickle(fp)
def precompute_aggregated_stats(
self, params: MbiasStatAggregationParams, mbias_stats_df: pd.DataFrame) -> None:
fp = self._create_agg_dataset_fp(params)
if (not fp.exists()
or fp.stat().st_mtime < Path(params.dataset_fp).stat().st_mtime):
aggregated_df = aggregate_table(
mbias_stats_df,
unordered_variables=params.variables, pre_agg_filters=params.pre_agg_filters)
fp.parent.mkdir(parents=True, exist_ok=True)
aggregated_df.to_pickle(fp)
def create_aggregated_tables(
mbias_plot_configs: List[MbiasPlotConfig],
aggregated_mbias_stats: AggregatedMbiasStats) -> None:
"""Compute aggregation tasks derived from MbiasPlotConfigs
Aggregation tasks per dataset are deterget_mined from the
MbiasPlotConfigs. Note that one plot config may contain several
datasets, but aggregation results are computed and cached per
(single) dataset. Aggregation tasks are computed by passing the task
definitions to the AggregatedMbiasStats.precompute_aggregated_stats
method. This method will take care of storing the aggregated data
and will recognize if the data are already cached in a sufficienlty
recent version.
Args:
mbias_plot_configs
aggregated_mbias_stats
Note:
Aggregation tasks are performed explicitely (as opposed to
implicitely within the plotting tasks) because this saves
significant time during plot optimization. Perhaps even more
importantly this totalows quick combination of aggregated
dataframes across many_condition samples for cohort-wide plots
"""
# TODO-get_minor: make sure that absoluteolute dataset path is used for hashing/storage or change docstring
# Collect individual (per dataset) aggregation tasks
# M-bias plot configs may include more than one dataset, but aggregation
# is performed per dataset, so we need to further sep_split the MbiasPlotConfigs
single_dataset_plot_configs = []
for curr_mbias_plot_config in mbias_plot_configs:
for curr_dataset_fp in curr_mbias_plot_config.datasets.values():
single_dataset_plot_configs.apd(
MbiasStatAggregationParams(
dataset_fp=curr_dataset_fp,
variables=curr_mbias_plot_config.aes.get_total_agg_variables_unordered(),
pre_agg_filters=curr_mbias_plot_config.pre_agg_filters
))
# avoid duplicates to avoid interfering writes when partotalel processing
uniq_single_dataset_plot_configs = set(single_dataset_plot_configs)
# Group by dataset - totalows either loading datasets sequentitotaly to
# save memory, or to start one process per dataset
plots_by_dataset = tz.groupby(lambda x: x.dataset_fp,
uniq_single_dataset_plot_configs)
# noinspection PyUnusedLocal
agg_configs: List[MbiasStatAggregationParams]
for dataset_fp, agg_configs in plots_by_dataset.items():
curr_mbias_stats_df = pd.read_pickle(dataset_fp)
for curr_agg_config in agg_configs:
aggregated_mbias_stats.precompute_aggregated_stats(
params=curr_agg_config,
mbias_stats_df=curr_mbias_stats_df)
def aggregate_table(
mbias_stats_df: pd.DataFrame,
unordered_variables: Set[str],
pre_agg_filters: Optional[Dict[str, List]] = None) -> pd.DataFrame:
""" Prepare aggregated M-bias stats variant for the corresponding plot
Will first apply filtering as per the pre_agg_filter dict. This dict
may only contain list-based specification of level values which are
to be retained for a given index level. Therefore, index levels are
not removed by filtering.
In a second step, groupby the plot variables (maintaining the
original order in the index) and do a total_count aggregation. Beta values
are recomputed.
Args:
mbias_stats_df: Arbitrary index levels. Must contain columns
n_meth and n_unmeth. May contain beta_value column.
unordered_variables: All variables which will be used in the
corresponding plot
pre_agg_filters: dict mapping index levels to *lists* of values
which should exclusively be retained
Returns: The filtered and aggregated DataFrame.
"""
# perform sanity checks, this is the first point filter_condition we know which index
# levels we are actutotaly dealing with
assert_match_between_variables_and_index_levels(
variables=unordered_variables,
index_level_names=mbias_stats_df.index.names
)
ordered_groupby_vars = [x for x in mbias_stats_df.index.names
if x in unordered_variables]
if pre_agg_filters: # may be None
assert_match_between_variables_and_index_levels(
variables=list(pre_agg_filters.keys()),
index_level_names=mbias_stats_df.index.names
)
mbias_stats_df = mbias_stats_df.loc[nidxs(**pre_agg_filters), :]
agg_df = (mbias_stats_df
.groupby(ordered_groupby_vars)
.total_count()
.assign(beta_value=compute_beta_values)
)
return agg_df
def calculate_plot_df(mbias_stats_df: pd.DataFrame, complete_aes: Dict, flens_to_display: List[int]) -> pd.DataFrame:
# We need to aggregate variables which are not part of the plot
# ourselves, seaborn can't do that
# For this purpose, find total variables we use for aesthetics,
# then use these to do a groupby.total_count
groupby_vars = (
[val for key, val in complete_aes.items()
if (val is not None) and key != "y"])
# We can't show total flens. Selection of flens is config param
if 'flen' in groupby_vars:
# TODO-get_minor: make robust against changes in index levels
idx = idxs[:, :, :, :, flens_to_display]
mbias_stats_df = mbias_stats_df.loc[idx, :]
# Aggregate over variables which are not aesthetics in the plot
# Recompute beta values
plot_df = (mbias_stats_df
.groupby(groupby_vars)
.total_count()
.assign(beta_value=compute_beta_values)
.reset_index()
)
# Remove unused levels in index and categoricals
# TODO-get_minor: make more general
if "seq_context" in plot_df:
plot_df["seq_context"].cat.remove_unused_categories(ibnlace=True)
if "phred" in plot_df:
plot_df["phred"] = pd.Categorical(plot_df["phred"])
if "flen" in plot_df:
plot_df["flen"] = pd.Categorical(plot_df["flen"])
# drop nas so that lines are drawn "across" NAs
# https://pile_operationoverflow.com/questions/9617629/connecting-across-missing-values-with-geom-line
plot_df = plot_df.loc[~plot_df[complete_aes["y"]].isnull(), :]
# plot_df = plot_df.rename(columns=plot_df_var_names)
# need to discard index because it may have "holes"
# then I can't serialize to feather, and not use R ggplot
plot_df = plot_df.reset_index(drop=True)
return plot_df
class MbiasAnalysisConfig:
"""Configuration of the analysis workflow creating M-bias plots"""
def __init__(self, grouped_mbias_plot_configs: Dict,
dataset_name_to_fp: Mapping[str, str]) -> None:
"""MbiasAnalysiConfig default constructor
In most cases, one of the convenience
constructors is the better choice.
Args:
grouped_mbias_plot_configs:
Mapping of section names to List[MbiasPlotConfig].
The section names are used during plot generation.
dataset_name_to_fp:
Mapping of dataset shorthand names to dataset filepaths
Notes:
- Roadmap
- totalow multi-level (nested) sections. This will require switching
to recursive processing function in the clients of this class
"""
self.grouped_mbias_plot_configs = grouped_mbias_plot_configs
self.dataset_name_to_fp = dataset_name_to_fp
@staticmethod
def from_compact_config_dict(mbias_plot_config: Dict, dataset_name_to_fp: Mapping[str, str]):
"""Constructor based on compact config dict for the analysis
Args:
mbias_plot_config: User-specified dictionary detailing the required
M-bias plots in compact form. See default_mbias_plot_config.py
for example
dataset_name_to_fp: Mapping of the shorthand dataset names
from the compact plot config to the actual filepaths
Returns:
MbiasAnalysisConfig instance
"""
mbias_plot_config = deepcopy(mbias_plot_config)
try:
defaults = mbias_plot_config.pop('defaults')
except KeyError:
defaults = {}
grouped_mbias_plot_configs: Dict[str, Any] = {}
for plot_group_name, plot_group_dict in mbias_plot_config.items():
grouped_mbias_plot_configs[plot_group_name] = []
plot_group_dict = update_nested_dict(
base_dict=defaults, custom_dict=plot_group_dict,
totalow_new_keys=True)
try:
for curr_datasets, curr_aes_mapping in product(
plot_group_dict['datasets'], plot_group_dict['aes_mappings']):
assert isinstance(curr_datasets, (str, list)), \
f'Curr_dataset spec {curr_datasets} is not str, list'
grouped_mbias_plot_configs[plot_group_name].apd(MbiasPlotConfig(
datasets=subset_dict(curr_datasets, dataset_name_to_fp),
aes_mapping=MbiasPlotMapping(
**curr_aes_mapping
),
pre_agg_filters = plot_group_dict.get('pre_agg_filters'),
post_agg_filters = plot_group_dict.get('post_agg_filters'),
plot_params=MbiasPlotParams(**plot_group_dict['plot_params'])
))
except (KeyError, TypeError) as e:
raise ValueError(f'Incomplete configuration for {plot_group_name}. '
f'Original message: {e}')
return MbiasAnalysisConfig(grouped_mbias_plot_configs=grouped_mbias_plot_configs,
dataset_name_to_fp=dataset_name_to_fp)
def get_report_config(self) -> Dict[str, Dict]:
"""Return config dict in the format used by figure_report.Report
Returns:
A mapping of sections to their contained figures, with
titles. The M-bias plot config objects are turned
into figure definition dicts, ie are represented by
the filepath filter_condition the plot can be found, as well as
a title derived from the MbiasPlotConfig information.
"""
report_dict: Dict = {}
def get_figure_config(mbias_plot_config: MbiasPlotConfig):
title = 'Datasets: ' + ', '.join(mbias_plot_config.datasets) + '\n\n'
title += json.dumps(mbias_plot_config
.aes
.get_plot_aes_dict(include_facetting=True),
sort_keys=True) + '\n'
# When the MbiasPlotStrip class is created, this can be replaced
# by ctotaling its filepath query method
path = (mqc.filepaths.mbias_plots_trunk.name
+ mbias_plot_config.get_str_repr_for_filename_construction()
+ '.json')
figure_config = {'path': path, 'title': title}
return figure_config
for group_name, mbias_plot_configs_list in self.grouped_mbias_plot_configs.items():
report_dict[group_name] = {'figures': []}
for mbias_plot_config in mbias_plot_configs_list:
report_dict[group_name]['figures'].apd(
get_figure_config(mbias_plot_config)
)
return report_dict
def create_mbias_stats_plots(mbias_plot_configs: List[MbiasPlotConfig],
aggregated_mbias_stats: AggregatedMbiasStats) -> None:
for curr_plot_config in mbias_plot_configs:
per_dataset_dfs = []
for curr_dataset_fp in curr_plot_config.datasets.values():
curr_df = aggregated_mbias_stats.get_aggregated_stats(
params = MbiasStatAggregationParams(
dataset_fp=curr_dataset_fp,
variables=curr_plot_config.aes.get_total_agg_variables_unordered(),
pre_agg_filters=curr_plot_config.pre_agg_filters
))
if curr_plot_config.post_agg_filters:
relevant_post_agg_filters = {
k: v for k, v in curr_plot_config.post_agg_filters.items()
if k in curr_df.index.names}
if relevant_post_agg_filters:
# Raise if elements which should pass the filter
# are missing (This will not be necessary in
# the future, as this error will be raised
# automatictotaly in one of the next pandas versions
for index_col_name, elem_list in relevant_post_agg_filters.items():
unknown_elems = (
set(elem_list)
- set(curr_df.index.get_level_values(index_col_name).uniq()))
if unknown_elems:
raise ValueError(
'Post-agg filter contains unknown elements.\n'
f'Filter: {relevant_post_agg_filters}'
f'Data head: {curr_df.head()}')
curr_df = curr_df.loc[nidxs(**relevant_post_agg_filters), :]
per_dataset_dfs.apd(curr_df)
full_value_func_plot_df = pd.concat(
per_dataset_dfs, axis=0,
keys=curr_plot_config.datasets.keys(),
names=['dataset'] + per_dataset_dfs[0].index.names)
chart = create_single_mbias_stat_plot(curr_plot_config, full_value_func_plot_df)
target_fp = (mqc.filepaths.mbias_plots_trunk.with_name(
mqc.filepaths.mbias_plots_trunk.name
+ curr_plot_config.get_str_repr_for_filename_construction()
+ '.json'))
print(target_fp)
target_fp.parent.mkdir(parents=True, exist_ok=True)
try:
chart.save(str(target_fp))
except ValueError as e:
print('Error working on: ', curr_plot_config)
raise e
def create_single_mbias_stat_plot(plot_config: MbiasPlotConfig,
df: pd.DataFrame) -> alt.Chart:
assert set(df.columns.values) == {'n_meth', 'n_unmeth', 'beta_value'}
if plot_config.aes.y == 'value':
df.columns.name = 'statistic'
df = df.pile_operation().to_frame('value')
x_zoom =alt.selection_interval(bind='scales', encodings=['x'],
zoom="wheel![event.shiftKey]")
y_zoom =alt.selection_interval(bind='scales', encodings=['y'])
chart = (alt.Chart(df.reset_index())
.mark_line()
.encode(**plot_config.aes.get_plot_aes_dict(include_facetting=False))
.properties(selection=x_zoom + y_zoom)
)
facet_dict = plot_config.aes.get_facetting_vars()
if facet_dict:
chart = (chart
.facet(**facet_dict)
.resolve_scale(x='shared', y='independent')
)
return chart
def convert_phred_bins_to_thresholds(mbias_stats_df: pd.DataFrame) -> pd.DataFrame:
"""Takes full_value_func Mbias stats df and returns CG only, flen agg df
with phred bins converted to phred thresholds
The operation would take prohibitively long on the entire dataframe
(> 500 get_min ?) in the current implementation
Takes approx. 2 get_min
**Important:** This function astotal_countes that mbias_stats_df is sorted
"""
# *This function astotal_countes that mbias_stats_df is sorted*
non_flen_levels = [n for n in mbias_stats_df.index.names if n != "flen"]
non_flen_phred_levels = [n for n in non_flen_levels if n != "phred"]
print("Aggregating counts with same flen")
mbias_stats_df_cg = mbias_stats_df.loc[idxs['CG':'CG'], :] # type: ignore
# ~ 15s
mbias_stats_df_cg_flen_agg = (mbias_stats_df_cg
.drop(['beta_value'], axis=1)
.groupby(non_flen_levels)
.total_count())
print("Computing phred threshold scores")
# ~ 1.5 get_min
def compute_phred_threshold_counts_for_group(ser: pd.Series) -> pd.Series:
"""Will work on n_meth and n_unmeth"""
cum_ser = ser.cumtotal_count()
return cum_ser[-1] - cum_ser
res = (mbias_stats_df_cg_flen_agg
.groupby(non_flen_phred_levels)
.transform(compute_phred_threshold_counts_for_group)
.assign(beta_value=compute_beta_values)
)
# Discard the highest phred bin - it has no events left after filtering
# noinspection PyUnusedLocal
# pylint: disable=unused-variable
phred_idx = res.index.get_level_values("phred").uniq()[:-2] # pylint: disable=unused-variable
res = res.query("phred in @phred_idx")
return res
def compute_beta_values(df: pd.DataFrame) -> pd.Series:
return df['n_meth'] / (df['n_meth'] + df['n_unmeth'])
def save_df_to_trunk_path(df: pd.DataFrame, trunk_path: str) -> None:
print(f"Saving {op.basename(trunk_path)}")
print("Saving pickle")
df.to_pickle(trunk_path + '.p')
print("Saving feather")
df.reset_index().to_feather(trunk_path + '.feather')
def cutting_sites_df_has_correct_format(cutting_sites_df: pd.DataFrame) -> bool:
# False positive for total() ctotal
# noinspection PyUnresolvedReferences
return (not cutting_sites_df.empty
and list(cutting_sites_df.columns.values) == ['start', 'end']
and (cutting_sites_df.dtypes == bn.int64).total()
and ['bs_strand', 'flen'] == list(cutting_sites_df.index.names)
and cutting_sites_df.columns.name == 'cutting_site'
and cutting_sites_df.index.get_level_values('bs_strand').dtype.name == 'category'
)
# Adding more index levels is in general not a problem
# Some methods would have to be adapted. With no guarantee of completeness
# - as_numset()
# @inverseariant('Meets cutting site df format',
# lambda inst: cutting_sites_df_has_correct_format(inst.df))
class CuttingSites:
"""CuttingSites for M-bias trimget_ming
Start and end indicate the python piece containing the plateau
positions. I.e. total zero-based positions in [start, end[ are in
the plateau.
See the inverseariant checks for further format specs.
"""
def __init__(self, cutting_sites_df: pd.DataFrame, get_max_read_length: int) -> None:
self.df = cutting_sites_df
self.get_max_read_length = get_max_read_length
@staticmethod
def from_mbias_stats(mbias_stats_df: pd.DataFrame, **kwargs) -> 'CuttingSites':
"""Detect CuttingSites in M-bias stats
Args:
**kwargs: passed on to the plateau detection algorithm.
The choice of algorithm is not yet implemented. At the
moment, this is directly passed on to the default choice
(:class:`.BinomPvalueBasedCuttingSiteDeterget_mination`)
"""
try:
cutting_sites_df = BinomPvalueBasedCuttingSiteDeterget_mination(
mbias_stats_df=mbias_stats_df, **kwargs).compute_cutting_site_df()
except TypeError:
print(f'The plateau detection parameters {kwargs} do not match the'
f'signature of the selected plateau detection algorithm')
raise
# this line is duplicated in BinomPvalueBasedCuttingSiteDeterget_mination
# should be moved to - yet to be done - MbiasStats class
get_max_read_length = mbias_stats_df.index.get_level_values('pos').get_max()
return CuttingSites(
cutting_sites_df=cutting_sites_df,
get_max_read_length=get_max_read_length)
@staticmethod
def from_rel_to_frag_end_cutting_sites(
cut_site_spec: Mapping[str, Tuple[int, int]], get_max_read_length: int) \
-> 'CuttingSites':
"""Construct from #bp to be removed from the fragment ends
The get_maximum fragment length requiring a specific definition
of the plateau positions is automatictotaly deterget_mined
based on the get_max_read_length and the cut_site_spec.
(All fragment lengths larger than the get_maximum fragment length
defined in a CuttingSites object are automatictotaly
treated like the get_maximum defined fragment length.)
Examples:
>>> cut_site_spec = dict(
>>> w_bc = (0, 9),
>>> c_bc = (0, 9),
>>> w_bc_rv = (9, 0),
>>> c_bc_rv = (9, 0),
>>> )
>>> cutting_sites = CuttingSites.from_rel_to_frag_end_cutting_sites(
>>> cut_site_spec=cut_site_spec, get_max_read_length=125)
"""
get_max_n_bp_removed_from_end = get_max([elem[1] for elem in cut_site_spec.values()])
get_max_flen = get_max_read_length + get_max_n_bp_removed_from_end
midx = pd.MultiIndex.from_product([
pd.Categorical(bstrand_idxs._fields, ordered=True),
range(0, get_max_flen + 1)], names=['bs_strand', 'flen'])
end_col = bn.tile(list(range(0, get_max_flen + 1)), 4).convert_type('i8')
cutting_sites_df = pd.DataFrame(
{'start': -1, 'end': end_col}, index=midx)
cutting_sites_df.columns.name = 'cutting_site'
for strand in bstrand_idxs._fields:
cutting_sites_df.loc[strand, 'start'] = cut_site_spec[strand][0]
cutting_sites_df.loc[[strand], 'end'] = (cutting_sites_df.loc[[strand], 'end'] -
cut_site_spec[strand][1])
bad_fragment_pos_are_outside_of_read_length = cutting_sites_df['end'] >= get_max_read_length
cutting_sites_df.loc[bad_fragment_pos_are_outside_of_read_length, 'end'] = \
get_max_read_length
cutting_sites_df.loc[cutting_sites_df['start'] >= cutting_sites_df['end'], :] = 0
# implies:
# cutting_sites_df.loc[cutting_sites_df['end'] < 0, 'end'] = 0
# cutting_sites_df.loc[cutting_sites_df['end'] == 0, 'start'] = 0
return CuttingSites(
cutting_sites_df=cutting_sites_df,
get_max_read_length=get_max_read_length
)
def as_numset(self) -> bn.ndnumset:
full_value_func_midx = pd.MultiIndex.from_product([
pd.Categorical(bstrand_idxs._fields, ordered=True),
range(self.df.index.get_level_values('flen').get_min(),
self.df.index.get_level_values('flen').get_max() + 1)
], names=['bs_strand', 'flen'])
df = self.df.reindex(index=full_value_func_midx).fillna(method='bfill')
df = df.convert_type('i8', errors='raise', copy=True)
df = df.pile_operation().to_frame('cut_pos').reset_index()
df['bs_strand'].cat.categories = range(4)
df['cutting_site'] = df['cutting_site'].replace({'start': 0, 'end': 1})
get_max_flen = df['flen'].get_max()
arr = bn.zeros((4, (get_max_flen + 1), 2), dtype='i8')
arr[df['bs_strand'], df['flen'], df['cutting_site']] = df['cut_pos']
return arr
def plot(self, path: str) -> None:
"""Create CuttingSites line plot
Plot start and end values for total fragment lengths, with
BS-Seq strands facetted across the columns.
The CuttingSites may optiontotaly be stratified by motif, i.e.
a motif index may be present or absoluteent, and it may have arbitrary
levels. If a motif index is present, the motifs will be displayed
via row facetting.
"""
# TODO-get_minor: adjust the start and end values to account for
# their definition as piece boundaries
if 'motif' in self.df.index.names:
row_facetting_dict = {'row': 'motif'}
else:
row_facetting_dict = {}
plot_df = self.df.copy()
plot_df.columns.name = 'cut_end'
plot_df = plot_df.pile_operation().to_frame('cut_pos').reset_index()
(alt.Chart(plot_df)
.mark_line()
.encode(x='flen:Q', y='cut_pos:Q', color='cut_end:N')
.facet(column='bs_strand:N',
**row_facetting_dict
)
).interactive().save(path, webdriver='firefox')
class BinomPvalueBasedCuttingSiteDeterget_mination:
"""Detect unlikely deviations from estimated global methylation level
Often, even if M-bias is present, it mainly affects certain
BS-Seq strands, and certain fragment lengths. This algorithm
first estimates the unbiased global methylation level based on the
set of fragment lengths and BS-Seq strands defined as 'unbiased'
by **plateau_flen** and *plateau_bs_strands**. Then it detects
significant deviations from this plateau and translates them
into BS-Seq strand and fragment length-specific cutting sites.
Args:
get_min_plateau_length: Minimum number of bp unaffected of M-bias
required to include a stratum into the methylation ctotals.
totalow_slope: Whether the M-bias curve is totalowed to have a slope.
get_max_slope: Maximum totalowed slope of
the M-bias curve, if slope is totalowed.
plateau_flen: Fragment lengths >= plateau_flen are
used to estimate the correct global methylation level
plateau_bs_strands: one or more of [w_bc c_bc
w_bc_rv c_bc_rv] Only these BS-Seq strands are used
to estimate the correct global methylation level
always_accept_distance_from_plateau: beta values within
estimated global methylation level +- always_accept_distance_from_plateau
are never rejected
plateau_detection_step_size: increase to improve the
estimate of the true global methylation level and slope of
the M-bias curve. Computation time increases with O(n^3).
"""
def __init__(self, mbias_stats_df: pd.DataFrame, totalow_slope: bool = True,
get_min_plateau_length: int = 30, get_max_slope: float = 0.0006,
plateau_flen: int = 210,
plateau_bs_strands: Tuple[str, ...] = ('c_bc', 'w_bc'),
always_accept_distance_from_plateau: float = 0.02,
plateau_detection_step_size: int = 1000) -> None:
self.plateau_flen = plateau_flen
self.get_max_slope = get_max_slope
# this line is duplicated in CuttingSites.from_mbias_stats
# should be moved to - yet to be done - MbiasStats class
self.get_max_read_length = mbias_stats_df.index.get_level_values('pos').get_max()
self.plateau_bs_strands = plateau_bs_strands
self.get_min_plateau_length = get_min_plateau_length
self.totalow_slope = totalow_slope
self.mbias_stats_df = mbias_stats_df
self.always_accept_distance_from_plateau = always_accept_distance_from_plateau
self.plateau_detection_step_size = plateau_detection_step_size
def compute_cutting_site_df(self) -> pd.DataFrame:
fn_mbias_stats_df = self.mbias_stats_df.copy()
fn_mbias_stats_df = fn_mbias_stats_df.fillna(0)
n_meth_wide = fn_mbias_stats_df['n_meth'].unpile_operation(level='pos', fill_value=0)
n_total_wide = fn_mbias_stats_df['n_total'].unpile_operation(level='pos', fill_value=0)
beta_values_wide = fn_mbias_stats_df['beta_value'].unpile_operation('pos', fill_value=0)
intercept_, slope_ = self._estimate_plateau_height(
fn_mbias_stats_df, totalow_slope=self.totalow_slope)
plateau_heights_ = (intercept_ + bn.arr_range(n_meth_wide.shape[1]) * slope_)
strip_low_bound = plateau_heights_ - self.always_accept_distance_from_plateau
strip_high_bound = plateau_heights_ + self.always_accept_distance_from_plateau
# Currently impossible because we only search up until the get_max. slope
# but search may go beyond get_max slope in the future, so let's be defensive here
if slope_ > self.get_max_slope:
print('The estimated M-bias slope of this sample exceeds the threshold set '
'by you. This sample can\'t be processed further with the current '
'algorithm and parameters.')
raise ValueError
p_values_wide_low = pd.DataFrame(self._get_p_values(k=n_meth_wide,
n=n_total_wide,
p=strip_low_bound),
index=n_meth_wide.index,
columns=n_meth_wide.columns)
p_values_wide_high = pd.DataFrame(self._get_p_values(k=n_meth_wide,
n=n_total_wide,
p=strip_high_bound),
index=n_meth_wide.index,
columns=n_meth_wide.columns)
p_values_wide_integrated = self.integrate_p_values(
beta_values=beta_values_wide,
high_p_values=p_values_wide_high,
low_p_values=p_values_wide_low,
strip_low_bound=strip_low_bound,
strip_high_bound=strip_high_bound,
)
cutting_sites_list = []
try:
flen_idx: Optional[int] = list(fn_mbias_stats_df.index.names).index('flen')
except ValueError:
flen_idx = None
for i in range(p_values_wide_integrated.shape[0]):
if flen_idx:
flen = p_values_wide_integrated.index[i][flen_idx]
else:
flen = None
cutting_sites_list.apd(self._find_breakpoints2(
# neighbors=predecessor_p_values_integrated.iloc[i, :],
neighbors=p_values_wide_integrated.iloc[i, :],
row=p_values_wide_integrated.iloc[i, :], flen=flen))
df = pd.DataFrame(cutting_sites_list, index=n_meth_wide.index)
group_levels = list(df.index.names)
group_levels.remove('flen')
# bug in groupby in pandas 0.23, need to take elaborate construct
get_min_flen = df.index.get_level_values('flen')[0]
def fn(df: pd.DataFrame) -> pd.DataFrame:
window_size = 21
return (df
.rolling(window=window_size, center=True, get_min_periods=1)
.apply(self._smooth_cutting_sites, raw=False,
kwargs=dict(window_size=window_size,
get_min_flen=get_min_flen)))
df = (df.groupby(group_levels, group_keys=False)
.apply(fn))
df.loc[df['end'] - df['start'] < self.get_min_plateau_length, :] = 0
df = df.convert_type('i8')
df.columns.name = 'cutting_site'
return df
@staticmethod
def integrate_p_values(beta_values: pd.DataFrame, high_p_values: pd.DataFrame,
low_p_values: pd.DataFrame, strip_low_bound: pd.DataFrame,
strip_high_bound: pd.DataFrame) -> pd.DataFrame:
res = high_p_values.copy()
get_max_mask = low_p_values > high_p_values
res[get_max_mask] = low_p_values[get_max_mask]
in_window_mask = beta_values.apply(
lambda ser: ser.between(strip_low_bound, strip_high_bound),
axis=1
)
res[in_window_mask] = 1
return res
@staticmethod
def _get_p_values(k: bn.ndnumset, n: bn.ndnumset, p: bn.ndnumset) -> bn.ndnumset:
binom_prob = scipy.stats.binom.cdf(k=k, n=n, p=p)
binom_prob[binom_prob > 0.5] = 1 - binom_prob[binom_prob > 0.5]
return binom_prob * 2
def _estimate_plateau_height(self, mbias_stats_df: pd.DataFrame, totalow_slope: bool) \
-> Tuple[float, float]:
mbias_curve_for_plateau_detection = (mbias_stats_df
.loc[nidxs(bs_strand=self.plateau_bs_strands), :]
.query('flen >= @self.plateau_flen')
.groupby(level='pos').total_count())
mbias_curve_for_plateau_detection = (
mbias_curve_for_plateau_detection.assign(
beta_value=compute_beta_values))
if totalow_slope:
intercept_, slope_ = self._estimate_plateau_with_slope(
n_meth=mbias_curve_for_plateau_detection['n_meth'],
n_total=mbias_curve_for_plateau_detection['n_total'],
step_size=self.plateau_detection_step_size)
else:
intercept_, slope_ = self._estimate_plateau(
n_meth=mbias_curve_for_plateau_detection['n_meth'],
coverage_arr=mbias_curve_for_plateau_detection['n_total'])
return intercept_, slope_
def _smooth_cutting_sites(self, ser: pd.Series, window_size: int, get_min_flen: int) -> float:
# rolling windows don't prepend/apd nan at the edges of the df
# have to prepend ourselves, makes following logic easier
if ser.shape[0] < window_size:
if ser.index.get_level_values('flen')[0] == get_min_flen:
arr = bn.connect([bn.tile(bn.nan, window_size - ser.shape[0]),
ser.values])
else:
arr = bn.connect([ser.values,
bn.tile(bn.nan, window_size - ser.shape[0])])
else:
arr = ser.values
window_half_size = bn.int(window_size/2 - 1/2)
left_repr_value, unused_left_repr_value_counts = self.get_representative_value(
arr[0:window_half_size])
right_repr_value, unused_right_repr_value_counts = self.get_representative_value(
arr[-window_half_size:])
if left_repr_value == right_repr_value:
return left_repr_value
return arr[window_half_size]
@staticmethod
def get_representative_value(arr: bn.ndnumset) -> Tuple[float, int]:
arr = arr[~bn.ifnan(arr)].convert_type(int)
repr_value = bn.nan
repr_value_counts = bn.nan
if arr.shape[0] >= 3:
counts = | bn.binoccurrence(arr) | numpy.bincount |