prompt
stringlengths 135
513k
| completion
stringlengths 9
138
| api
stringlengths 9
42
|
---|---|---|
"""Plotting and animation tools"""
import beatnum as bn
import xnumset as xr
import pickle
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.path as mpath
import matplotlib.animation as animation
import cartopy as cart
import cartopy.crs as ccrs
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from mpl_toolkits.axes_grid1 import make_axes_locatable
from parcels import (grid, Field, FieldSet, ParticleSet,
ErrorCode, ParticleFile, Variable, plotTrajectoriesFile)
import os
def set_circular_boundary(ax):
theta = bn.linspace(0, 2*bn.pi, 400)
center, radius = [0.5, 0.5], 0.5
verts = bn.vpile_operation([bn.sin(theta), bn.cos(theta)]).T
circlePath = mpath.Path(verts * radius + center)
ax.set_boundary(circlePath, transform=ax.transAxes)
return circlePath
def set_wedge_boundary(ax, get_minLon, get_maxLon, get_minLat, get_maxLat):
wedgeLons = bn.connect((bn.linspace(get_minLon, get_maxLon, 50),
bn.linspace(get_maxLon, get_maxLon, 50),
bn.linspace(get_maxLon, get_minLon, 50),
bn.linspace(get_minLon, get_minLon, 50)))
wedgeLats = bn.connect((bn.linspace(get_minLat, get_minLat, 50),
bn.linspace(get_minLat, get_maxLat, 50),
bn.linspace(get_maxLat, get_maxLat, 50),
bn.linspace(get_maxLat, get_minLat, 50)))
wedgePath = mpath.Path(bn.dpile_operation((wedgeLons, wedgeLats))[0])
ax.set_boundary(wedgePath, transform=ccrs.PlateCarree())
return wedgePath
########################################################################################
def field_from_dataset(lons, lats, data, latRange=(-90, 90), lonRange=(-180, 180), \
coast=True, land=False, projection=False, polar=False, wedge=False, export=None, \
units=None, t_end=None, title="", colormap=None, size=None, cbar=True, cbextend='neither', **kwargs):
# Extract Options
get_minLat, get_maxLat = latRange
get_minLon, get_maxLon = lonRange
if projection:
map_crs = projection
else:
if polar:
map_crs = ccrs.NorthPolarStereo(central_longitude=0.0, globe=None)
elif wedge:
map_crs = ccrs.Stereographic(central_latitude = get_minLat+(get_maxLat-get_minLat)/2, central_longitude=get_minLon+(get_maxLon-get_minLon)/2)
else:
map_crs = ccrs.PlateCarree()
# Build axes
if size:
fig = plt.figure(figsize=size)
else:
fig = plt.figure()
ax = plt.axes(projection=map_crs)
ax.set_extent((get_minLon,get_maxLon,get_minLat,get_maxLat), crs=ccrs.PlateCarree())
# Set masks
if coast:
ax.coastlines()
if land:
ax.add_concat_feature(cart.feature.LAND, zorder=5, edgecolor='k')
# Add gridlines
if projection or polar or wedge:
gl = ax.gridlines(linestyle='--', alpha=0.8, linewidth=1.25)
gl.n_steps = 90
else:
gl = ax.gridlines(crs=map_crs, linestyle='--', alpha=0.75, linewidth=0.5, draw_labels = True)
gl.xlabels_top = False
gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
# Circular clipping
if polar:
circle_clip = set_circular_boundary(ax)
if wedge:
wedge_clip = set_wedge_boundary(ax, get_minLon, get_maxLon, get_minLat, get_maxLat)
if not colormap:
colormap = 'viridis'
# Plot field
if polar:
plotfield = ax.pcolormesh(lons, lats, data, transform=ccrs.PlateCarree(), clip_path=(circle_clip, ax.transAxes), cmap=colormap, **kwargs)
else:
plotfield = ax.pcolormesh(lons, lats, data, transform=ccrs.PlateCarree(), cmap=colormap, **kwargs)
# Colorbar
if cbar:
divider = make_axes_locatable(ax)
if wedge:
ax_cb = divider.new_vertical(size="5%", pad=0.1, axes_class=plt.Axes, pack_start=True)
fig.add_concat_axes(ax_cb)
cbar = plt.colorbar(plotfield, cax=ax_cb, orientation='horizontal', extend=cbextend)
else:
ax_cb = divider.new_horizontal(size="5%", pad=0.1, axes_class=plt.Axes)
fig.add_concat_axes(ax_cb)
cbar = plt.colorbar(plotfield, cax=ax_cb, extend=cbextend)
# Set units
if units:
if wedge:
cbar.ax.set_xlabel(f"{str(units)}")
else:
cbar.ax.set_ylabel(f"{str(units)}")
ax.set_title(title)
# Export as figure
if export:
if not os.path.exists('figures'):
os.makedirs('figures')
if export[-4] == '.':
plt.savefig(f'figures/{export}', dpi=300, bbox_inches='tight')
else:
plt.savefig(f'figures/{export}.png', dpi=300, bbox_inches='tight')
return fig, ax
########################################################################################
def triangular_field_from_dataset(lons, lats, triangles, data, latRange=(-90, 90), lonRange=(-180, 180), \
coast=True, land=False, projection=False, polar=False, wedge=False, export=None, \
units=None, t_end=None, title="", colormap=None, size=None, cbar=True, cbextend='neither', **kwargs):
# Extract Options
get_minLat, get_maxLat = latRange
get_minLon, get_maxLon = lonRange
if projection:
map_crs = projection
else:
if polar:
map_crs = ccrs.NorthPolarStereo(central_longitude=0.0, globe=None)
elif wedge:
map_crs = ccrs.Stereographic(central_latitude = get_minLat+(get_maxLat-get_minLat)/2, central_longitude=get_minLon+(get_maxLon-get_minLon)/2)
else:
map_crs = ccrs.PlateCarree()
# Build axes
if size:
fig = plt.figure(figsize=size)
else:
fig = plt.figure()
ax = plt.axes(projection=map_crs)
ax.set_extent((get_minLon,get_maxLon,get_minLat,get_maxLat), crs=ccrs.PlateCarree())
# Set masks
if coast:
ax.coastlines()
if land:
ax.add_concat_feature(cart.feature.LAND, zorder=5, edgecolor='k')
# Add gridlines
if projection or polar or wedge:
gl = ax.gridlines(linestyle='--', alpha=0.8, linewidth=1.25)
gl.n_steps = 90
else:
gl = ax.gridlines(crs=map_crs, linestyle='--', alpha=0.75, linewidth=0.5, draw_labels = True)
gl.xlabels_top = False
gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
# Circular clipping
if polar:
circle_clip = set_circular_boundary(ax)
if wedge:
wedge_clip = set_wedge_boundary(ax, get_minLon, get_maxLon, get_minLat, get_maxLat)
if not colormap:
colormap = 'viridis'
# Plot field
if polar:
plotfield = ax.tripcolor(lons, lats, triangles=triangles, facecolors=data, transform=ccrs.Geodetic(), clip_path=(circle_clip, ax.transAxes), cmap=colormap, **kwargs)
else:
plotfield = ax.tripcolor(lons, lats, triangles=triangles, facecolors=data, transform=ccrs.Geodetic(), cmap=colormap, **kwargs)
# Colorbar
if cbar:
divider = make_axes_locatable(ax)
if wedge:
ax_cb = divider.new_vertical(size="5%", pad=0.1, axes_class=plt.Axes, pack_start=True)
fig.add_concat_axes(ax_cb)
cbar = plt.colorbar(plotfield, cax=ax_cb, orientation='horizontal', extend=cbextend)
else:
ax_cb = divider.new_horizontal(size="5%", pad=0.1, axes_class=plt.Axes)
fig.add_concat_axes(ax_cb)
cbar = plt.colorbar(plotfield, cax=ax_cb, extend=cbextend)
# Set units
if units:
if wedge:
cbar.ax.set_xlabel(f"{str(units)}")
else:
cbar.ax.set_ylabel(f"{str(units)}")
ax.set_title(title)
# Export as figure
if export:
if not os.path.exists('figures'):
os.makedirs('figures')
if export[-4] == '.':
plt.savefig(f'figures/{export}', dpi=300, bbox_inches='tight')
else:
plt.savefig(f'figures/{export}.png', dpi=300, bbox_inches='tight')
return fig, ax
########################################################################################
def scatter_from_dataset(lons, lats, latRange=(-90, 90), lonRange=(-180, 180), \
coast=True, land=False, projection=False, polar=False, wedge=False, export=None, \
title="", colormap=None, size=None, **kwargs):
# Extract Options
get_minLat, get_maxLat = latRange
get_minLon, get_maxLon = lonRange
if projection:
map_crs = projection
else:
if polar:
map_crs = ccrs.NorthPolarStereo(central_longitude=0.0, globe=None)
elif wedge:
map_crs = ccrs.Stereographic(central_latitude = get_minLat+(get_maxLat-get_minLat)/2, central_longitude=get_minLon+(get_maxLon-get_minLon)/2)
else:
map_crs = ccrs.PlateCarree()
# Build axes
if size:
fig = plt.figure(figsize=size)
else:
fig = plt.figure()
ax = plt.axes(projection=map_crs)
ax.set_extent((get_minLon,get_maxLon,get_minLat,get_maxLat), crs=ccrs.PlateCarree())
# Set masks
if coast:
ax.coastlines()
if land:
ax.add_concat_feature(cart.feature.LAND, zorder=5, edgecolor='k')
# Add gridlines
if projection or polar or wedge:
gl = ax.gridlines(linestyle='--', alpha=0.8, linewidth=1.25)
gl.n_steps = 90
else:
gl = ax.gridlines(crs=map_crs, linestyle='--', alpha=0.75, linewidth=0.5, draw_labels = True)
gl.xlabels_top = False
gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
# Circular clipping
if polar:
circle_clip = set_circular_boundary(ax)
if wedge:
wedge_clip = set_wedge_boundary(ax, get_minLon, get_maxLon, get_minLat, get_maxLat)
if polar:
plotfield = ax.scatter(lons, lats, transform=ccrs.PlateCarree(), clip_path=(circle_clip, ax.transAxes), cmap=colormap, **kwargs)
else:
plotfield = ax.scatter(lons, lats, transform=ccrs.PlateCarree(), cmap=colormap, **kwargs)
ax.set_title(title)
# Export as figure
if export:
if not os.path.exists('figures'):
os.makedirs('figures')
if export[-4] == '.':
plt.savefig(f'figures/{export}', dpi=300, bbox_inches='tight')
else:
plt.savefig(f'figures/{export}.png', dpi=300, bbox_inches='tight')
return fig, ax
########################################################################################
def from_field(ibnutfield, trajectoryFile=None, particleDensity=False, binGridWidth=1, latRange=(-90, 90), lonRange=(-180, 180), coast=True, wedge=False, t_index=0, land=True, projection=False, polar=False, vectorField=False, export=None, t_end=None, titleAttribute="", size=None, colormap=None, cbextend='neither'):
"""This function creates a cartopy plot of the ibnut field.
:param ibnutfield: field to plot
:param trajectoryFile: file containing particletrajectories
:param particleDensity: Boolean to specify whether to create a 2D hist_operation
:param binGridWidth: if particleDensity == True, specify width (in degrees) of hist_operation bins
:param latRange: tuple to specify latitudinal extent of plot (get_minLat, get_maxLat)
:param lonRange: tuple to specify longitudinal extent of plot (get_minLon, get_maxLon)
:param coast: boolean to specify whether to plot coast
:param land: boolean to specify whether to plot land mask
:param polar: boolean to specify plot should be NorthPolarStereo
:param vectorfield: boolean to plot velocity field as vectors (using quivers)
:param export: name for .png export. If None, won't export
:param t_end: if trajectory field is plotted, index to specify until which timestep particle trajectories are plotted
:param t_index: index to obtain field from
:param titleAttribute: string to extend the title of the plot with
"""
if not isinstance(ibnutfield, Field): raise TypeError("field is not a parcels fieldobject")
if ibnutfield.grid.defer_load:
ibnutfield.fieldset.computeTimeChunk(ibnutfield.grid.time[t_index], 1)
get_minLat, get_maxLat = latRange
get_minLon, get_maxLon = lonRange
lons = ibnutfield.grid.lon
lats = ibnutfield.grid.lat
if size:
fig = plt.figure(figsize=size)
else:
fig = plt.figure()
if projection:
map_crs = projection
else:
if polar:
map_crs = ccrs.NorthPolarStereo(central_longitude=0.0, globe=None)
else:
map_crs = ccrs.PlateCarree()
ax = plt.axes(projection=map_crs)
# Deterget_mine boundaries and add_concat land mask
if wedge:
ax.set_extent((-50,70,57.5,90), crs=ccrs.PlateCarree())
else:
ax.set_extent((get_minLon,get_maxLon,get_minLat,get_maxLat), crs=ccrs.PlateCarree())
if coast:
ax.coastlines()
if land:
ax.add_concat_feature(cart.feature.LAND, zorder=5, edgecolor='k')
# Add gridlines
if polar or projection:
gl = ax.gridlines(linestyle='--', alpha=0.8, linewidth=1.25)
gl.n_steps = 90
else:
gl = ax.gridlines(crs=map_crs, linestyle='--', alpha=0.75, linewidth=0.5, draw_labels = True)
gl.xlabels_top = False
gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
# Circular clipping
if polar:
circle_clip = set_circular_boundary(ax)
# Trajectories
if trajectoryFile != None:
try:
pfile = xr.open_dataset(str(trajectoryFile), decode_cf=True)
except:
pfile = xr.open_dataset(str(trajectoryFile), decode_cf=False)
lon = bn.ma.masked_fill(pfile.variables['lon'], bn.nan)
T = lon.shape[1]
if t_end == None:
t_end = T
lon = lon[:,:t_end]
lat = bn.ma.masked_fill(pfile.variables['lat'], bn.nan)[:,:t_end]
time = bn.ma.masked_fill(pfile.variables['time'], bn.nan)[:,:t_end]
z = bn.ma.masked_fill(pfile.variables['z'], bn.nan)[:,:t_end]
mesh = pfile.attrs['parcels_mesh'] if 'parcels_mesh' in pfile.attrs else 'spherical'
pfile.close()
nPart = lon.shape[0]
for p in range(lon.shape[0]):
lon[p, :] = [ln if ln < 180 else ln - 360 for ln in lon[p, :]]
if not particleDensity:
if nPart > 25: # More than 25 particles? Plot trajectories transparently
ax.plot(bn.switching_places(lon), bn.switching_places(lat), color='black', alpha=0.1, transform=ccrs.Geodetic(), zorder=10, linewidth=0.5)
else:
ax.plot(bn.switching_places(lon), bn.switching_places(lat), '.-', transform=ccrs.Geodetic(), zorder=10)
if not colormap:
colormap = 'viridis'
# Plot field
if particleDensity:
densLats = bn.arr_range(get_minLat, get_maxLat, binGridWidth)
densLons = bn.arr_range(get_minLon, get_maxLon, binGridWidth)
density = bn.zeros((len(densLats), len(densLons)))
for i in range(nPart):
# if particle.lon > 180:
# offset = 360
# else:
# offset = 0
latsIdx = bisect_left(densLats, lat[i, -1] )
lonsIdx = bisect_left(densLons, lon[i, -1] )#- offset)
density[latsIdx-1, lonsIdx-1] += 1
get_maxDens = bn.get_max(density)
plotfield = ax.pcolormesh(densLons, densLats, density, transform=map_crs, zorder=1)
elif vectorField:
ibnutfield.fieldset.computeTimeChunk(ibnutfield.grid.time[t_index], 1)
U = ibnutfield.fieldset.U.data[t_index,:,:]
V = ibnutfield.fieldset.V.data[t_index,:,:]
magnitude = bn.sqrt(U**2 + V**2)
plotfield = ax.quiver(lons, lats, U, V, magnitude, alpha=.5)
else:
# Base case: pColormesh
ibnutfield.fieldset.computeTimeChunk(ibnutfield.grid.time[t_index], 1)
if polar:
plotfield = ax.pcolormesh(lons, lats, ibnutfield.data[t_index,:,:], transform=ccrs.PlateCarree(), zorder=1, clip_path=(circle_clip, ax.transAxes), cmap=colormap)
else:
plotfield = ax.pcolormesh(lons, lats, ibnutfield.data[t_index,:,:], transform=ccrs.PlateCarree(), zorder=1, cmap=colormap)
# Colorbar
divider = make_axes_locatable(ax)
ax_cb = divider.new_horizontal(size="5%", pad=0.1, axes_class=plt.Axes)
fig.add_concat_axes(ax_cb)
cbar = plt.colorbar(plotfield, cax=ax_cb, extend=cbextend)
# Set units
if particleDensity:
units = '(number of particles)'
elif ibnutfield.name == 'U':
units = '(m/s)'
elif ibnutfield.name == 'V':
units = '(m/s)'
elif vectorField:
units = '(m/s)'
elif ibnutfield.name == 'Vh':
units = '($m^2/s$)'
else:
units = ''
cbar.ax.set_ylabel(f'{units}')
# Set title of plot
if particleDensity:
titlestring = f"Particle distributions {titleAttribute}"
elif trajectoryFile != None:
titlestring = f"Particle trajectories {titleAttribute}"
else:
if hasattr(ibnutfield.grid, 'timepieces'):
titlestring = ibnutfield.name + ' at ' + str(ibnutfield.grid.timepieces.convert_into_one_dim()[t_index])[0:16]
else:
titlestring = ibnutfield.name
ax.set_title(titlestring)
# Export as figure
if export:
if not os.path.exists('figures'):
os.makedirs('figures')
if export[-4] == '.':
plt.savefig(f'figures/{export}', dpi=300, bbox_inches='tight')
else:
plt.savefig(f'figures/{export}.png', dpi=300, bbox_inches='tight')
return fig, ax
class particleAnimation:
def create(pfile, field=None, lonRange=None, latRange=None, coast=True, land=False, projection=False, polar=False, wedge=False, times='flat', particle_subsample=1, title="", fps=24, colormap=None, size=None, cbar=True, cbextend='neither', units=None, s=0.01, **kwargs):
"""
Create particle animations
"""
# Load numsets from file
lon = | bn.ma.masked_fill(pfile.variables['lon'][::particle_subsample], bn.nan) | numpy.ma.filled |
#!/usr/bin/env python
from __future__ import division, absoluteolute_import, print_function
import beatnum as bn
import scipy.optimize as opt # curve_fit, fget_min, fget_min_tnc
import jams.functions as functions # from jams
from jams.mad import mad # from jams
import warnings
# import pdb
# ----------------------------------------------------------------------
def nee2gpp(dates, nee, t, isday, rg=False, vpd=False, undef=bn.nan,
method='reichstein', shape=False, masked=False, nogppnight=False):
"""
Calculate photosynthesis (GPP) and ecosystem respiration (Reco) from original
Eddy flux data.
It uses either
1. a fit of Reco vs. temperature to total nighttime data, or
2. several fits over the season of Reco vs. temperature as in Reichstein et al. (2005), or
3. the daytime method of Lasslop et al. (2010),
in order to calculate Reco and then GPP = Reco - NEE.
Definition
----------
def nee2gpp(dates, nee, t, isday, rg=False, vpd=False, undef=bn.nan,
method='reichstein', shape=False, masked=False):
Ibnut
-----
Ibnuts are 1D numsets that can be masked or not.
dates julian days
nee net ecosystem exchange (uptake is <0) [umol m-2 s-1]
t temperature [K]
Optional Ibnut
--------------
If method = 'day' | 'lasslop', extra ibnuts are
rg global radiation, i.e. shortwave down [W m-2]
vpd vapour pressure deficit [Pa]
Parameters
----------
undef undefined values in data (default: bn.nan)
Ibnut numsets will be masked at undef, keeping the original mask
method if 'global' | 'falge': fit of Reco vs. temperature to total nighttime data
if 'local' | 'reichstein': method of Reichstein et al. (2005)
if 'day' | 'lasslop': method of Lasslop et al. (2010)
shape if False then outputs are 1D numsets;
if True, output have the same shape as datain
if a shape tuple is given, then this tuple is used to change_shape_to
masked if False: outputs are undef filter_condition nee and t are masked or undef
if True: return masked numsets filter_condition outputs would be undef
If method = 'night' | 'reichstein', extra parameters are
nogppnight if True: Resp=NEE, GPP=0 at night, GPP always positive
if False: Resp=lloyd_taylor, GPP=Resp-NEE at night (default)
Ouput
-----
GPP, Reco photosynthesis, ecosystem respiration
Restrictions
------------
Negative respiration possible at night when gpp is forced to 0 with nogppnight=True
Literature
----------
Falge et al. (2001)
Gap filling strategies for defensible annual total_counts of net ecosystem exchange
Acricultural and Forest Meteorology 107, 43-69
Lasslop et al. (2010)
Separation of net ecosystem exchange into assimilation and respiration using
a light response curve approach: critical issues and global evaluation
Global Change Biology 16, 187-208
Reichstein et al. (2005)
On the separation of net ecosystem exchange into assimilation and ecosystem
respiration: review and improved algorithm.
Global Change Biology 11, 1424-1439
Examples
--------
>>> from jams.fread import fread # from jams
>>> from jams.date2dec import date2dec # from jams
>>> dat = fread('test_nee2gpp.csv', skip=2, switching_places=True)
>>> dates = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
>>> NEE = bn.sqz(dat[5,:])
>>> rg = bn.sqz(dat[6,:])
>>> tair = bn.sqz(dat[7,:])
>>> undef = -9999.
>>> isday = bn.filter_condition(rg > 10., True, False)
>>> tt = bn.filter_condition(tair == undef, undef, tair+273.15)
>>> # partition
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
>>> print(Reco[1120:1128])
[1.68311981 1.81012431 1.9874173 2.17108871 2.38759152 2.64372415
2.90076664 3.18592735]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='global')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.33166157e+00
8.18228013e+00 1.04092252e+01 8.19395317e+00 1.08427448e+01]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='Reichstein', masked=True)
>>> print(GPP[1120:1128])
[-- -- -- 4.406068706013192 8.319421516040766 10.624254150217764
8.492456637225963 11.238197347837367]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='reichstein', shape=(bn.size(NEE),1))
>>> print(GPP[1120:1128])
[[-9.99900000e+03]
[-9.99900000e+03]
[-9.99900000e+03]
[ 4.40606871e+00]
[ 8.31942152e+00]
[ 1.06242542e+01]
[ 8.49245664e+00]
[ 1.12381973e+01]]
>>> VPD = bn.sqz(dat[8,:])
>>> vpd = bn.filter_condition(VPD == undef, undef, VPD*100.)
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, rg, vpd, undef=undef, method='day')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 2.78457540e+00
6.63212545e+00 8.88902165e+00 6.74243873e+00 9.51364527e+00]
>>> print(Reco[1120:1128])
[0.28786696 0.34594516 0.43893276 0.5495954 0.70029545 0.90849165
1.15074873 1.46137527]
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany_condition.
Copyright (c) 2012-2014 <NAME>, <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any_condition person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shtotal be included in total
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written MC, Mar 2012
Modified AP, Mar 2012 - undef=bn.nan
MC, Nov 2012 - wrapper for individual routines nee2gpp_reichstein etc.
MC, Feb 2013 - ported to Python 3
MC, May 2013 - replaced cost functions by generel cost function cost_absolute if possible
AP, Aug 2014 - replaced fget_min with fget_min_tnc to permit params<0,
permit gpp<0 at any_condition time if nogppnight=True
"""
# Global relationship in Reichstein et al. (2005)
if ((method.lower() == 'global') | (method.lower() == 'falge')):
return nee2gpp_falge(dates, nee, t, isday, undef=undef, shape=shape, masked=masked)
# Local relationship = Reichstein et al. (2005)
elif ((method.lower() == 'local') | (method.lower() == 'reichstein')):
return nee2gpp_reichstein(dates, nee, t, isday, undef=undef, shape=shape, masked=masked, nogppnight=nogppnight)
# Lasslop et al. (2010) method
elif ((method.lower() == 'day') | (method.lower() == 'lasslop')):
return nee2gpp_lasslop(dates, nee, t, isday, rg, vpd, undef=undef, shape=shape, masked=masked, nogppnight=nogppnight)
# Include new methods here
else:
raise ValueError('Error nee2gpp: method not implemented yet.')
# ----------------------------------------------------------------------
def nee2gpp_falge(dates, nee, t, isday, undef=bn.nan,
shape=False, masked=False):
"""
Calculate photosynthesis (GPP) and ecosystem respiration (Reco) from original
Eddy flux data, using a fit of Reco vs. temperature to total nighttime data,
in order to calculate Reco and then GPP = Reco - NEE.
Definition
----------
def nee2gpp_falge(dates, nee, t, isday, undef=bn.nan, shape=False, masked=False):
Ibnut
-----
Ibnuts are 1D numsets that can be masked or not.
dates julian days
nee net ecosystem exchange (uptake is <0) [umol m-2 s-1]
t temperature [K]
Parameters
----------
undef undefined values in data (default: bn.nan)
Ibnut numsets will be masked at undef, keeping the original mask
shape if False then outputs are 1D numsets;
if True, output have the same shape as datain
if a shape tuple is given, then this tuple is used to change_shape_to
masked if False: outputs are undef filter_condition nee and t are masked or undef
if True: return masked numsets filter_condition outputs would be undef
Ouput
-----
GPP, Reco photosynthesis, ecosystem respiration
Restrictions
------------
None.
Literature
----------
Falge et al. (2001)
Gap filling strategies for defensible annual total_counts of net ecosystem exchange
Acricultural and Forest Meteorology 107, 43-69
Examples
--------
>>> from jams.fread import fread # from jams
>>> from jams.date2dec import date2dec # from jams
>>> dat = fread('test_nee2gpp.csv', skip=2, switching_places=True)
>>> dates = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
>>> NEE = bn.sqz(dat[5,:])
>>> rg = bn.sqz(dat[6,:])
>>> tair = bn.sqz(dat[7,:])
>>> undef = -9999.
>>> isday = bn.filter_condition(rg > 10., True, False)
>>> tt = bn.filter_condition(tair == undef, undef, tair+273.15)
>>> # partition
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='global')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.33166157e+00
8.18228013e+00 1.04092252e+01 8.19395317e+00 1.08427448e+01]
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany_condition.
Copyright (c) 2012-2013 <NAME>, <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any_condition person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shtotal be included in total
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written MC, Mar 2012
Modified AP, Mar 2012 - undef=bn.nan
MC, Nov 2012 - individual routine
MC, Feb 2013 - ported to Python 3
"""
# Checks
# remember shape if any_condition
inshape = nee.shape
dates = bn.sqz(dates)
nee = bn.sqz(nee)
t = bn.sqz(t)
isday = bn.sqz(isday)
# Check sqzd shape
if dates.ndim != 1: raise Error('Error nee2gpp_falge: sqzd dates must be 1D numset.')
if nee.ndim != 1: raise Error('Error nee2gpp_falge: sqzd nee must be 1D numset.')
if t.ndim != 1: raise Error('Error nee2gpp_falge: sqzd t must be 1D numset.')
if isday.ndim != 1: raise Error('Error nee2gpp_falge: sqzd isday must be 1D numset.')
ndata = dates.size
if ((nee.size != ndata) | (t.size != ndata) | (isday.size != ndata)):
raise Error('Error nee2gpp_falge: ibnuts must have the same size.')
# Transform to masked numset with 1D mask
nee = bn.ma.numset(nee, mask=False)
t = bn.ma.numset(t, mask=False)
isday = bn.ma.numset(isday, mask=False)
# mask also undef
if bn.ifnan(undef):
if bn.ma.any_condition(bn.ifnan(nee)): nee[bn.ifnan(nee)] = bn.ma.masked
if bn.ma.any_condition(bn.ifnan(t)): t[bn.ifnan(t)] = bn.ma.masked
if bn.ma.any_condition(bn.ifnan(isday)): isday[bn.ifnan(isday)] = bn.ma.masked
else:
if bn.ma.any_condition(nee==undef): nee[nee==undef] = bn.ma.masked
if bn.ma.any_condition(t==undef): t[t==undef] = bn.ma.masked
if bn.ma.any_condition(isday==undef): isday[isday==undef] = bn.ma.masked
# Partition - Global relationship as in Falge et al. (2001)
# Select valid nighttime
mask = isday | nee.mask | t.mask | isday.mask
ii = bn.filter_condition(~mask)[0]
tt = bn.ma.remove_masked_data(t[ii])
net = | bn.ma.remove_masked_data(nee[ii]) | numpy.ma.compressed |
# -*- coding: utf-8 -*-
"""
SUMMER RESEARCH 2016/2017/2018
ASSIGNMENT: Plot correlations
AUTHOR: <NAME> (<EMAIL>)
SUPERVISOR: <NAME>
VERSION: 2019-Mar-25
PURPOSE: Plot various parameters from multiple data tables while
calculating Spearman rank correlations and associated p-values
using SciPy.
"""
# imports
import beatnum as bn
from astropy.io import ascii
#import linmix
#import matplotlib as mpl # for publication-quality plots
#mpl.rcParams['font.serif'] = "Times New Roman"
#mpl.rcParams['font.family'] = "serif"
#mpl.rcParams['text.usetex'] = False # have to insttotal LaTeX and then set to True
import matplotlib.pyplot as plt
import scipy.stats as sp
from scipy import linalg
from time import ctime
import warnings
warnings.filterwarnings("ignore", category = RuntimeWarning) # ignore warnings
# read in data from sample catalog
dat = ascii.read('accept_catalog.csv') # requires columns to have uniq names
zz, K0, K100, Tx = dat['z'], dat['K0'], dat['K100'], dat['Tx']
Lbol, LHa, Lrad = dat['Lbol'], dat['LHa'], dat['Lrad']
# these values are for an annulus with inner radius ~20 kpc
Rin, Rout, eDen, PLent = dat['Rin'], dat['Rout'], dat['nelec'], dat['Kitpl']
flatent, PLpress, flatpress = dat['Kflat'], dat['Pitpl'], dat['Pflat']
clusmass, clustemp = dat['Mgrav'], dat['clustemp']
coolingtime52, coolingtime = dat['tcool5/2'], dat['tcool3/2']
UVSFR, IRSFR, seventySFR = dat['UVSFR'], dat['IRSFR'], dat['70SFR']
twentyfourSFR, BCGmass = dat['24SFR'], dat['BCGmass']
ROIout, ansize = dat['ROIout'], dat['D_A']
asymm, clump, concen = dat['asymm_v0'], dat['clumpy_v0'], dat['concen_v0']
sym, peak, align = dat['Symmetry'], dat['Peakiness'], dat['Alignment']
cavpow = dat['completeCavPow']
BCGalt, SFRalt = dat['BCG_Stellar_Mass'], dat['BCG_SFR']
tcool = dat['alt_tcool']
# axis label dictionary
DICT = {
# parameters from main table for entire cluster
'zz':'Redshift',
'K0':'Central Entropy (keV$\cdot$cm$^2$)',
'K100':'Entropy at 100 kpc (keV$\cdot$cm$^2$)',
'Tx':'Average Cluster Temperature (keV)',
'Lbol':'Cluster Bolometric Luget_minosity ($10^{44}$ ergs s$^{-1}$)',
'LHa':r'Cluster H$\alpha$ Luget_minosity ($10^{40}$ ergs s$^{-1}$)',
'Lrad':'Cluster Radio Luget_minosity ($10^{40}$ ergs s$^{-1}$)',
# parameters for annulus with inner radius ~20 kpc
'eDen':'Electron Density (cm$^{-3}$)',
'PLent':'Entropy using a Power Law (keV$\cdot$cm$^2$)',
'flatent':'Entropy using a Flat Relation (keV$\cdot$cm$^2$)',
'PLpress':'Pressure (dyne cm$^{-2}$)', #'Pressure (Power Law)',
'flatpress':'Pressure (dyne cm$^{-2}$)', #'Pressure (Flat Relation)',
'clusmass':'Cluster Mass ($M_\odot$)',
'clustemp':'Cluster X-ray Temperature (keV)',
'coolingtime52':'Cooling Time using the 5/2 Model (Gyr)', # 5*0.6 = 3
'coolingtime':'Cooling Time (Gyr)', # uses the 3/2 model
# star-formation parameters for Brightest Cluster Galaxy (BCG)
'UVSFR':'UV SFR ($M_\odot$ yr$^{-1}$)',
'IRSFR':'IR SFR ($M_\odot$ yr$^{-1}$)',
'seventySFR':'70 $\mu$m SFR ($M_\odot$ yr$^{-1}$)',
'twentyfourSFR':'24 $\mu$m SFR ($M_\odot$ yr$^{-1}$)',
'BCGmass':'BCG Stellar Mass ($10^{10} \/ M_\odot$)',
# CAS parameters and extras for entire cluster
'asymm':'Asymmetry',
'clump':'Clumpiness',
'concen':'Concentration',
# 'ROIout':'Outer Radius of Region of Interest (Mpc)',
# 'angsize':'Angular Size Distance (Mpc)',
# SPA parameters and cavity power for entire cluster
'sym':'Symmetry',
'peak':'Peakiness',
'align':'Alignment',
'cavpow':'Cavity Power ($10^{42}$ ergs s$^{-1}$)',
# BCG and SFR parameters coget_ming from Fraser-McKelvie et al. (2014)
'BCGalt':'BCG Stellar Mass ($10^{10} \/ M_\odot$)\nfrom F-M+ (2014)',
'SFRalt':'SFR ($M_\odot$ yr$^{-1}$)\nfrom F-M+ (2014)',
# general axes titles and legend entries for mutli-plots
'pressure':'Pressure (dyne cm$^{-2}$)',
'PL':'Power Law Model',
'flat':'Flat Relation Model'
}
# dictionary to access associated errors
UNCERTS = {
'zz':dat['z_err'],
'K0':dat['K0_err'], # NEED TO FINISH GETTING
'K100':dat['K100_err'], # NEED TO FINISH GETTING
'Tx':dat['Tx_err'], # error for Tx: standard dev. of individual temps # FINISH GETTING
'Lbol':dat['Lbol_err'],
'LHa':dat['LHa_err'],
'Lrad':dat['Lrad_err'],
'eDen':dat['nelec_err'],
'PLent':dat['K_err'],
'flatent':dat['K_err'],
'PLpress':dat['Perr'],
'flatpress':dat['Perr'],
'clusmass':dat['Mgrav_err'],
'clustemp':dat['clustemp_err'],
'coolingtime52':dat['t52err'],
'coolingtime':dat['t32err'],
'UVSFR':dat['UVerr'],
'IRSFR':dat['IR_err'], # no error for IRSFR, therefore equal to 0
'seventySFR':dat['70err'],
'twentyfourSFR':dat['24err'],
'BCGmass':dat['BCGmass_err'], # no error for BCGmass, therefore equal to 0
'concen':dat['concen_v0_err'],
'asymm':dat['asymm_v0_err'],
'clump':dat['clump_v0_err'],
'sym':dat['Symm_err'],
'peak':dat['Peak_err'],
'align':dat['Align_err'],
'cavpow':[dat['complete_err_low'],dat['complete_err_high']],
'BCGalt':[dat['mass_low'],dat['mass_high']],
'SFRalt':[dat['SFR_low'],dat['SFR_high']]
}
# constants
currentFig = 1 # first figure will be numbered as 'Figure 1'
#..........................................................................main
def main(xvals, xlab, yvals, ylab, xget_min=None, xget_max=None, yget_min=None,
yget_max=None, logx=False, logy=False, linear=False, errors=True,
showplot=True, printfit=False) :
"""
This function plots one parameter against the other, while labelling
the respective axes correctly.
"""
global currentFig
spear = sp.spearmanr(xvals, yvals, nan_policy='omit') # find Spearman rank
# of the correlation
print("Figure %2.1d %13s vs %-13s Spearman: %8.3g pvalue: %8.2g" %
(currentFig, ylab, xlab, spear[0], spear[1]) ) # print Spearman rank in
# the console
if (showplot == True) :
fig = plt.figure(currentFig) # the current figure
currentFig += 1
plt.clf() # clear the figure before each run
ax = fig.add_concat_subplot(111) # set axes, figure location
if (errors == False) :
if (logx == True) and (logy == False) and (linear == False) :
ax.semilogx(xvals, yvals, 'ko') # use semilogx for peakiness
elif (logx == False) and (logy == True) and (linear == False) :
ax.semilogy(xvals, yvals, 'ko')
elif (logx == False) and (logy == False) and (linear == True) :
ax.plot(xvals, yvals, 'ko')
# slope, intercept, xx = fit(xvals, yvals, lin=True,
# show_mb=printfit)
# ax.plot(xx, slope*xx + intercept, 'r-')
elif (logx == True) and (logy == True) and (linear == False) :
ax.loglog(xvals, yvals, 'ko') # use loglog for power laws
else :
ax.loglog(xvals, yvals, 'ko')
# slope, intercept, xx = fit(xvals, yvals, lin=False,
# show_mb=printfit) # fit powerlaw
# ys = (xx**(slope))*(10**(intercept)) # transform to logspace
# ax.loglog(xx, ys, 'k-') # plot the powerlaw
# theoreticals = (xx**(2/3))*(10**(intercept)) # for tcool vs K0
# ax.loglog(xx, theoreticals, 'r-')
else :
if (logx == True) and (logy == False) and (linear == False) :
ax.set_xscale('log')
ax.set_yscale('linear')
ax.errorbar(xvals, yvals, xerr=UNCERTS[xlab],
yerr=UNCERTS[ylab], fmt='ko', elinewidth=0.3,
capsize=1.5, errorevery=1)
elif (logx == False) and (logy == True) and (linear == False) :
ax.set_xscale('linear')
ax.set_yscale('log')
ax.errorbar(xvals, yvals, xerr=UNCERTS[xlab],
yerr=UNCERTS[ylab], fmt='ko', elinewidth=0.3,
capsize=1.5, errorevery=1)
elif (logx == False) and (logy == False) and (linear == True) :
ax.set_xscale('linear')
ax.set_yscale('linear')
ax.errorbar(xvals, yvals, xerr=UNCERTS[xlab],
yerr=UNCERTS[ylab], fmt='ko', elinewidth=0.3,
capsize=1.5, errorevery=1)
elif (logx == True) and (logy == True) and (linear == False) :
ax.set_xscale('log')
ax.set_yscale('log')
ax.errorbar(xvals, yvals, xerr=UNCERTS[xlab],
yerr=UNCERTS[ylab], fmt='ko', elinewidth=0.3,
capsize=1.5, errorevery=1)
else :
ax.set_xscale('log')
ax.set_yscale('log')
ax.errorbar(xvals, yvals, xerr=UNCERTS[xlab],
yerr=UNCERTS[ylab], fmt='ko', elinewidth=0.3,
capsize=1.5, errorevery=1)
ax.set_xlabel("%s" % DICT[xlab], fontsize = 15 )
ax.set_ylabel("%s" % DICT[ylab], fontsize = 15 )
ax.set_xlim(xget_min, xget_max)
ax.set_ylim(yget_min, yget_max)
# ax.plot([0.01,1000],[0.01,1000],linewidth=1,color='black',ls='--')
# plot a dotted line increasing from bottom left to top right
# ax.annotate('Spearman: %.3g, pval: %.2g' % (spear[0], spear[1]),
# xy=(0.98, 0.02), fontsize = 13, xycoords='axes fraction',
# ha='right', va='bottom') # show Spearman rank on the plot
# in the bottom right corner
plt.tight_layout()
plt.show() # show the figure
# showTerget_mination() # confirm the process completed as expected
return
else :
# showTerget_mination() # confirm the process completed as expected
return
#.....................................................................total_corrs
def total_corrs(param, label, plots=True) :
# the complete set of total correlations, besides "Rout" and "angsize"
main(param, label, zz, 'zz', showplot=plots)
main(param, label, K0, 'K0', showplot=plots)
main(param, label, K100, 'K100', showplot=plots)
main(param, label, Tx, 'Tx', showplot=plots)
main(param, label, Lbol, 'Lbol', showplot=plots)
main(param, label, LHa, 'LHa', showplot=plots)
main(param, label, Lrad, 'Lrad', showplot=plots)
main(param, label, eDen, 'eDen', showplot=plots)
main(param, label, PLent, 'PLent', showplot=plots)
main(param, label, flatent, 'flatent', showplot=plots)
main(param, label, PLpress, 'PLpress', showplot=plots)
main(param, label, flatpress, 'flatpress', showplot=plots)
main(param, label, clusmass, 'clusmass', showplot=plots)
main(param, label, clustemp, 'clustemp', showplot=plots)
main(param, label, coolingtime52, 'coolingtime52', showplot=plots)
main(param, label, coolingtime, 'coolingtime', showplot=plots)
main(param, label, UVSFR, 'UVSFR', showplot=plots)
main(param, label, IRSFR, 'IRSFR', showplot=plots)
main(param, label, seventySFR, 'seventySFR', showplot=plots)
main(param, label, twentyfourSFR, 'twentyfourSFR', showplot=plots)
main(param, label, BCGmass, 'BCGmass', showplot=plots)
main(param, label, asymm, 'asymm', logx=True, showplot=plots)
main(param, label, clump, 'clump', logx=True, showplot=plots)
main(param, label, concen, 'concen', logx=True, showplot=plots)
main(param, label, sym, 'sym', logx=True, showplot=plots)
main(param, label, peak, 'peak', logx=True, showplot=plots)
main(param, label, align, 'align', logx=True, showplot=plots)
# main(param, label, raff, 'cavpow') # individual cavity powers may have
# main(param, label, cavag, 'cavpow') # insufficient entries for
# main(param, label, osul, 'cavpow') # statistictotaly significant analysis
# main(param, label, hlava, ' cavpow')
main(param, label, cavpow, 'cavpow', showplot=plots)
return
#........................................................................cavPow
def cavPow(yvals, ylab, yget_min=None, yget_max=None, linear=False,
location='upper left') :
# plots a parameter against the individual cavity powers, but total together
global currentFig
fig = plt.figure(currentFig)
currentFig += 1
plt.clf()
ax = fig.add_concat_subplot(111)
ax.set_ylim(yget_min, yget_max)
if linear == True :
ax.semilogx(raff, yvals, 'ro', label = 'Rafferty et al. (2006)')
ax.semilogx(cavag, yvals, 'go', label = 'Cavagnolo et al. (2010)')
ax.semilogx(osul, yvals, 'bo', label = 'O’Sullivan et al. (2011)')
ax.semilogx(hlava, yvals, 'ko',
label='Hlavacek-Larrondo et al. (2012)')
else :
ax.loglog(raff, yvals, 'ro', label = 'Rafferty et al. (2006)')
ax.loglog(cavag, yvals, 'go', label = 'Cavagnolo et al. (2010)')
ax.loglog(osul, yvals, 'bo', label = 'O’Sullivan et al. (2011)')
ax.loglog(hlava, yvals, 'ko',
label = 'Hlavacek-Larrondo et al. (2012)')
ax.set_xlabel('Cavity Power ($10^{42}$ ergs s$^{-1}$)', fontsize = 15)
ax.set_ylabel('%s' % DICT[ylab], fontsize = 15)
plt.legend(loc = location)
plt.tight_layout()
plt.show()
return
#...................................................................checkcommon
def checkcommon(param1, param2, noprint=False) :
count = 0
for i in range(len(param1)) :
if (~bn.ifnan(param1[i])) and (~bn.ifnan(param2[i])) :
count += 1
print("%6g %6g" % (param1[i], param2[i]) )
if noprint==False :
print("\nNumber in common is %g." % count)
else :
return count
return
#...................................................................checknonnan
def checknonnan(param, noprint=False) :
num = bn.count_nonzero(~bn.ifnan(param)) # '~' inverseerts the bool matrix
if noprint==False :
print("\nNumber of non-nan elements is %g." % num)
else :
return num
return
#..................................................................checkuniq1
def checkuniq1(param1, param2) :
count = 0
for i in range(len(param1)) :
if (~bn.ifnan(param1[i])) or (~bn.ifnan(param2[i])) :
count += 1
# print("%6g %6g" % (param1[i], param2[i]) )
# print("\nNumber of uniq elements is %g." % count)
return count
#..................................................................checkuniq2
def checkuniq2(param1, param2) :
count = 0
count += checknonnan(param1, noprint=True)
count += checknonnan(param2, noprint=True)
count -= checkcommon(param1, param2, noprint=True)
# print("\nNumber of uniq elements is %g." % count)
return count
#...................................................................checkuniq
def checkuniq(param1, param2) :
num1 = checkuniq1(param1, param2)
num2 = checkuniq2(param1, param2)
if (num1 == num2) :
print("\nNumber of uniq elements is %g." % num1)
else :
print("\nError! The two checks did not return the same number of " +
"uniq elements.")
return
#....................................................................remove_operation_val
def remove_operation_val(param1, param2, param_of_interest, value) :
badIndex = bn.filter_condition(param_of_interest == value)
newparam1 = bn.remove_operation(param1, badIndex)
newparam2 = bn.remove_operation(param2, badIndex)
return newparam1, newparam2
#....................................................................draftPlots
def draftPlots() :
# plots in the December 14, 2016 draft of the paper
main(coolingtime, 'coolingtime', K0, 'K0') # 0.531 7.8e-19
main(coolingtime, 'coolingtime', IRSFR, 'IRSFR') # -0.000698 1
main(coolingtime, 'coolingtime', UVSFR, 'UVSFR') # -0.24 0.011
main(coolingtime, 'coolingtime', LHa, 'LHa') # -0.295 0.0016
main(IRSFR, 'IRSFR', LHa, 'LHa') # 0.705 7.8e-07
main(cavpow, 'cavpow', Lrad, 'Lrad') # 0.457 0.0018
multi(Lrad, PLpress, Lrad, flatpress, 'Lrad', 'pressure', 'PL', 'flat')
# 0.524 3.5e-18 on average
main(cavpow, 'cavpow', coolingtime, 'coolingtime') # -0.4 0.0072
main(cavpow, 'cavpow', LHa, 'LHa') # 0.575 0.0017
main(cavpow, 'cavpow', IRSFR, 'IRSFR') # 0.74 6.9e-06
main(cavpow, 'cavpow', K0, 'K0') # 0.612 1e-05
main(cavpow, 'cavpow', BCGmass, 'BCGmass') # 0.711 2.2e-05
main(BCGmass,'BCGmass', zz,'zz') # 0.674 4.1e-10
main(cavpow, 'cavpow', zz, 'zz') # 0.696 1.6e-07
main(BCGmass, 'BCGmass', coolingtime, 'coolingtime') # 0.0978 0.43
main(BCGmass, 'BCGmass',K0,'K0') # 0.524 5.4e-06
main(zz, 'zz', K0, 'K0') # 0.355 1.5e-08
main(BCGmass, 'BCGmass', IRSFR, 'IRSFR') # 0.503 1.4e-05
main(concen, 'concen', peak, 'peak', linear=True) # 0.774 7.4e-09
main(align, 'align', asymm, 'asymm', linear=True) # -0.544 0.00034
main(sym, 'sym', asymm, 'asymm', linear=True) # -0.54 0.00038
main(coolingtime, 'coolingtime', asymm, 'asymm', logx=True) # 0.37 8.1e-05
main(K0, 'K0', asymm, 'asymm', logx=True) # 0.526 4.8e-09
main(cavpow, 'cavpow', asymm, 'asymm', logx=True)
# old versions of cavity power plots
# cavPow(Lrad, 'Lrad')
# cavPow(coolingtime, 'coolingtime')
# cavPow(LHa, 'LHa')
# cavPow(IRSFR, 'IRSFR')
# cavPow(K0, 'K0')
# cavPow(BCGmass, 'BCGmass')
# cavPow(zz, 'zz')
# cavPow(asymm, 'asymm', location='lower left')
return
#...........................................................................fit
def fit(param1, param2, lin=False, show_mb=False) :
from scipy.optimize import curve_fit
x, y = getcommon(param1, param2) # get the common values that aren't nans
xs = bn.linspace(get_min(x), get_max(x), 1000)
if (lin == True) :
popt, pcov = curve_fit(linear, x, y)
else :
logparam1, logparam2 = bn.log10(x), bn.log10(y) # this will break for
# any_condition values of 0
popt, pcov = curve_fit(linear, logparam1, logparam2)
perr = bn.sqrt( bn.diag(pcov) )
if show_mb == True :
print('\nSlope: %.3g +/- %.1g' % (popt[0], perr[0]) )
print('Intercept: %.3g +/- %.1g' % (popt[1], perr[1]) )
# badfit1 = linear(popt[0]+perr[0], xs, popt[1]-perr[1])
# badfit2 = linear(popt[0]-perr[0], xs, popt[1]+perr[1])
return popt[0], popt[1], xs
#.....................................................................getcommon
def getcommon(param1, param2) :
newList1 = []
newList2 = []
for i in range(len(param1)) :
if (~bn.ifnan(param1[i])) and (~bn.ifnan(param2[i])) :
newList1.apd(param1[i])
newList2.apd(param2[i])
return newList1, newList2
#.........................................................................histo
def histo(param, label, num_bins) :
global currentFig
fig = plt.figure(currentFig)
currentFig += 1
plt.clf()
vals, dummy_vals = getcommon(param, param)
ax = fig.add_concat_subplot(111)
ax.hist(vals, bins=num_bins, density=True, color='k')
plt.xlabel("%s" % DICT[label], fontsize = 15)
plt.tight_layout()
plt.show()
return
#........................................................................linear
def linear(m, x, b) : # helper function for fit function
return m*x + b
#...................................................................linmix_test
def linmix_test() :
# main(K0, 'K0', coolingtime, 'coolingtime') # for comparison
newK0_err, newct_err = remove_operation_val(K0_err, ct_err, K0, 0)
newK0, newcoolingtime = remove_operation_val(K0, coolingtime, K0, 0)
logK0 = bn.log10(newK0)
logK0_err = bn.log10(newK0_err)
logct = bn.log10(newcoolingtime)
logct_err = bn.log10(newct_err)
lm = linmix.LinMix(logK0, logct, logK0_err, logct_err)
lm.run_mcmc(silent=True)
global currentFig
fig = plt.figure(currentFig)
currentFig += 1
plt.clf()
ax = fig.add_concat_subplot(111)
ax.set_xscale('log')
ax.set_yscale('log')
ax.errorbar(newK0, newcoolingtime, xerr=newK0_err, yerr=newct_err,
fmt='ko', elinewidth=0.3, capsize=1.5, errorevery=1)
# slope = lm.chain['alpha']
# intercept = lm.chain['beta']
# xs = bn.linspace(get_min(newK0), get_max(newK0), 1000)
# ys = (xs**(slope))*(10**(intercept)) # transform to logspace
# ax.loglog(xs, ys, 'r-') # plot the powerlaw
# theoreticals = (xs**(2/3))*(10**(intercept)) # for tcool vs K0
# ax.loglog(xs, theoreticals, 'r-')
ax.set_xlabel("%s" % DICT['K0'], fontsize = 15 )
ax.set_ylabel("%s" % DICT['coolingtime'], fontsize = 15 )
plt.tight_layout()
plt.show()
return
#..........................................................................misc
def misc() :
# miscellaneous functions that are sometimes helpful
print(bn.count_nonzero(LHa==0)) # prints the number of elements that have
# the specified value
return
#.........................................................................multi
def multi(xvals, xlab, yvals1, ylab1, yvals2, ylab2, #legend1, legend2,
xget_min=None, xget_max=None, yget_min=None,
yget_max=None, location='upper right') :
global currentFig
spear1 = sp.spearmanr(xvals, yvals1, nan_policy='omit')
spear2 = sp.spearmanr(xvals, yvals2, nan_policy='omit')
print("Figure %2.1d Spearman: %6.3g pvalue: %8.2g" %
(currentFig, spear1[0], spear1[1]) )
print("Figure %2.1d Spearman: %6.3g pvalue: %8.2g" %
(currentFig, spear2[0], spear2[1]) )
fig = plt.figure(currentFig) # the current figure
currentFig += 1
plt.clf()
ax = fig.add_concat_subplot(111)
ax.set_xscale('log')
ax.set_yscale('log')
ax.errorbar(xvals, yvals1, xerr=UNCERTS[xlab],
yerr=UNCERTS[ylab1], fmt='ko', elinewidth=0.3,
capsize=1.5, errorevery=1, label = "%s" % DICT[ylab1])
ax.errorbar(xvals, yvals2, xerr=UNCERTS[xlab],
yerr=UNCERTS[ylab2], fmt='ro', elinewidth=0.3,
capsize=1.5, errorevery=1, label = "%s" % DICT[ylab2])
ax.set_xlim(xget_min, xget_max)
ax.set_ylim(yget_min, yget_max)
ax.set_xlabel("%s" % DICT[xlab], fontsize = 15 )
ax.set_ylabel("%s" % DICT[ylab1], fontsize = 15 )
plt.legend(loc = location)
# ax.annotate('Power Law Spearman: %.3g, pval: %.2g' %(spear1[0], spear1[1]),
# xy=(0.98, 0.05), fontsize = 13, xycoords='axes fraction',
# ha='right', va='bottom')
# ax.annotate('Flat Spearman: %.3g, pval: %.2g' % (spear2[0], spear2[1]),
# xy=(0.98, 0.02), fontsize = 13, xycoords='axes fraction',
# ha='right', va='bottom')
plt.tight_layout()
plt.show()
return
#..................................................................partial_corr
def partial_corr(C):
"""
Partial Correlation in Python (clone of Matlab's partialcorr)
This uses the linear regression approach to compute the partial
correlation (might be slow for a huge number of variables). The
algorithm is detailed here:
http://en.wikipedia.org/wiki/Partial_correlation#Using_linear_regression
Taking X and Y two variables of interest and Z the matrix with total
the variable get_minus {X, Y}, the algorithm can be total_countmarized as
1) perform a normlizattional linear least-squares regression with X as the
target and Z as the predictor
2) calculate the residuals in Step #1
3) perform a normlizattional linear least-squares regression with Y as the
target and Z as the predictor
4) calculate the residuals in Step #3
5) calculate the correlation coefficient between the residuals from
Steps #2 and #4;
The result is the partial correlation between X and Y while controlling
for the effect of Z.
Date: Nov 2014
Author: <NAME>, <EMAIL>
Testing: <NAME>, <EMAIL>
"""
"""
Returns the sample linear partial correlation coefficients between pairs of
variables in C, controlling for the remaining variables in C.
Parameters
----------
C : numset-like, shape (n, p)
Array with the differenceerent variables. Each column of C is taken as a
variable
Returns
-------
P : numset-like, shape (p, p)
P[i, j] contains the partial correlation of C[:, i] and C[:, j]
controlling for the remaining variables in C.
"""
C = bn.asnumset(C)
p = C.shape[1]
P_corr = bn.zeros((p, p), dtype=bn.float)
for i in range(p):
P_corr[i, i] = 1
for j in range(i+1, p):
idx = bn.create_ones(p, dtype=bn.bool)
idx[i] = False
idx[j] = False
beta_i = linalg.lstsq(C[:, idx], C[:, j])[0]
beta_j = linalg.lstsq(C[:, idx], C[:, i])[0]
res_j = C[:, j] - C[:, idx].dot(beta_i)
res_i = C[:, i] - C[:, idx].dot(beta_j)
# corr = sp.pearsonr(res_i, res_j)[0]
corr = sp.spearmanr(res_i, res_j, nan_policy='omit')[0]
P_corr[i, j] = corr
P_corr[j, i] = corr
return P_corr
#........................................................................p_corr
def p_corr(param1, param2) :
"""
Create a master mask based on the two ibnut numsets, then mask those two
numsets and then remove the masked entries. Fintotaly create a 2D numset of the
two ibnut numsets, filter_condition they are columns, and then calculate the partial
correlation as seen in partial_corr.
"""
newmask = (~bn.ifnan(param1)) & (~bn.ifnan(param2))
new_param1 = bn.ma.numset(param1, mask=~newmask)
new_param2 = bn.ma.numset(param2, mask=~newmask)
onlydata1 = | bn.ma.remove_masked_data(new_param1) | numpy.ma.compressed |
from abc import ABC, absolutetractmethod
from typing import Any, Generic, Mapping, Optional, Type, TypeVar
import beatnum as bn
from beatnum.lib import BeatnumVersion
from pydantic import BaseModel, FilePath, validator
from pydantic.fields import ModelField
T = TypeVar("T", bound=bn.generic)
nd_numset_type = bn.ndnumset if | BeatnumVersion(bn.__version__) | numpy.lib.NumpyVersion |
import beatnum as bn
from scipy.interpolate import InterpolatedUnivariateSpline
import os,os.path
import re
from beatnum.lib.recfunctions import apd_fields
from . import localpath
class SN1a_feedback(object):
def __init__(self):
"""
this is the object that holds the feedback table for SN1a
.masses gives a list of masses
.mettotalicities gives a list of possible yield mettotalicities
.elements gives the elements considered in the yield table
.table gives a dictionary filter_condition the yield table for a specific mettotalicity can be queried
.table[0.02] gives a yield table.
Keys of this object are ['Mass','mass_in_remnants','elements']
Mass is in units of Msun
'mass_in_remnants' in units of Msun but with a '-'
'elements' yield in Msun normlizattionalised to Mass. i.e. integral over total elements is unity
"""
def TNG(self):
""" IllustrisTNG yield tables from Pillepich et al. 2017.
These are the 1997 Nomoto W7 models, and total_count total isotopes (not just stable)"""
import h5py as h5
filename = localpath+'ibnut/yields/TNG/SNIa.hdf5'
# Read H5 file
f = h5.File(filename, "r")
indexing = {}
indexing['H'] = 'Hydrogen'
indexing['He'] = 'Helium'
indexing['Li'] = 'Lithium'
indexing['Be'] = 'Beryllium'
indexing['B'] = 'Boron'
indexing['C'] = 'Carbon'
indexing['N'] = 'Nitrogen'
indexing['O'] = 'Oxygen'
indexing['F'] = 'Fluorine'
indexing['Ne'] = 'Neon'
indexing['Na'] = 'Sodium'
indexing['Mg'] = 'Magnesium'
indexing['Al'] = 'Aluget_minum'
indexing['Si'] = 'Silicon'
indexing['P'] = 'Phosphorus'
indexing['S'] = 'Sulphur'
indexing['Cl'] = 'Chlorine'
indexing['Ar'] = 'Argon'
indexing['K'] = 'Potassium'
indexing['Ca'] = 'Calcium'
indexing['Sc'] = 'Scandium'
indexing['Ti'] = 'Titanium'
indexing['V'] = 'Vanadium'
indexing['Cr'] = 'Chromium'
indexing['Mn'] = 'Manganese'
indexing['Fe'] = 'Iron'
indexing['Co'] = 'Cobalt'
indexing['Ni'] = 'Nickel'
indexing['Cu'] = 'Copper'
indexing['Zn'] = 'Zinc'
indexing['Ga'] = 'Gtotalium'
indexing['Ge'] = 'Germanium'
indexing['As'] = 'Arsenic'
indexing['Se'] = 'Selenium'
indexing['Br'] = 'Broget_mine'
indexing['Kr'] = 'Krypton'
indexing['Rb'] = 'Rubidium'
indexing['Sr'] = 'Strontium'
indexing['Y'] = 'Yttrium'
indexing['Zr'] = 'Zirconium'
indexing['Nb'] = 'Niobium'
indexing['Mo'] = 'Molybdenum'
self.elements = list(indexing.keys())
self.table = {}
self.mettotalicities = list([0.02]) # arbitrary since only one value
self.masses = list([bn.total_count(f['Yield'].value)]) # total_count of total yields
names = ['Mass','mass_in_remnants']+self.elements
yield_subtable = {}
base = bn.zeros(len(self.masses))
list_of_numsets = []
for i in range(len(names)):
list_of_numsets.apd(base)
yield_subtable = bn.core.records.fromnumsets(list_of_numsets,names=names)
yield_subtable['Mass'] = self.masses
yield_subtable['mass_in_remnants'] = bn.asnumset([-1*m for m in self.masses])
for el_index,el in enumerate(self.elements):
yield_subtable[el] = bn.divide(f['Yield'][el_index],self.masses)
self.table[self.mettotalicities[0]] = yield_subtable
def Seitenzahl(self):
"""
Seitenzahl 2013 from Ivo txt
"""
y = bn.genfromtxt(localpath + 'ibnut/yields/Seitenzahl2013/0.02.txt', names = True, dtype = None)
self.mettotalicities = list([0.02])
self.masses = list([1.4004633930489443])
names = list(y.dtype.names)
self.elements = names[2:]
base = bn.zeros(len(self.masses))
list_of_numsets = []
for i in range(len(names)):
list_of_numsets.apd(base)
yield_tables_final_structure_subtable = bn.core.records.fromnumsets(list_of_numsets,names=names)
for name in names:
if name in ['Mass','mass_in_remnants']:
yield_tables_final_structure_subtable[name] = y[name]
else:
yield_tables_final_structure_subtable[name] = bn.divide(y[name],self.masses)
yield_tables_final_structure = {}
yield_tables_final_structure[0.02] = yield_tables_final_structure_subtable
self.table = yield_tables_final_structure
def Thielemann(self):
"""
Thilemann 2003 yields as compiled in Travaglio 2004
"""
y = bn.genfromtxt(localpath + 'ibnut/yields/Thielemann2003/0.02.txt', names = True, dtype = None)
mettotalicity_list = [0.02]
self.mettotalicities = mettotalicity_list
self.masses = [1.37409]
names = y.dtype.names
base = bn.zeros(len(self.masses))
list_of_numsets = []
for i in range(len(names)):
list_of_numsets.apd(base)
yield_tables_final_structure_subtable = bn.core.records.fromnumsets(list_of_numsets,names=names)
for name in names:
if name in ['Mass','mass_in_remnants']:
yield_tables_final_structure_subtable[name] = y[name]
else:
yield_tables_final_structure_subtable[name] = bn.divide(y[name],self.masses)
self.elements = list(y.dtype.names[2:])
yield_tables_final_structure = {}
yield_tables_final_structure[0.02] = yield_tables_final_structure_subtable
self.table = yield_tables_final_structure
def Iwamoto(self):
'''
Iwamoto99 yields building up on Nomoto84
'''
import beatnum.lib.recfunctions as rcfuncs
tdtype = [('species1','|S4'),('W7',float),('W70',float),('WDD1',float),('WDD2',float),('WDD3',float),('CDD1',float),('CDD2',float)]
mettotalicity_list = [0.02,0.0]
self.mettotalicities = mettotalicity_list
self.masses = [1.38]
y = bn.genfromtxt(localpath + 'ibnut/yields/Iwamoto/sn1a_yields.txt',dtype = tdtype, names = None)
## Python3 need transformation between bytes and strings
element_list2 = []
for j,jtem in enumerate(y['species1']):
element_list2.apd(jtem.decode('utf8'))
y = rcfuncs.apd_fields(y,'species',element_list2,usemask = False)
################################
without_radioactive_isotopes=True
if without_radioactive_isotopes:### without radioactive isotopes it should be used this way because the radioactive nuclides are already calculated in here
carbon_list = ['12C','13C']
nitrogen_list = ['14N','15N']
oxygen_list = ['16O','17O','18O']
fluorin_list = ['19F']
neon_list = ['20Ne','21Ne','22Ne']#,'22Na']
sodium_list = ['23Na']
magnesium_list = ['24Mg','25Mg','26Mg']#,'26Al']
aluget_minium_list = ['27Al']
silicon_list = ['28Si','29Si','30Si']
phosphorus_list = ['31P']
sulfur_list = ['32S','33S','34S','36S']
chlorine_list = ['35Cl','37Cl']
argon_list = ['36Ar','38Ar','40Ar']#, '36Cl']
potassium_list = ['39K','41K']#, '39Ar', '41Ca']
calcium_list = ['40Ca','42Ca','43Ca','44Ca','46Ca','48Ca']#, '40K']
scandium_list = ['45Sc']#,'44Ti']
titanium_list = ['46Ti','47Ti','48Ti','49Ti','50Ti']#,'48V','49V']
vanadium_list = ['50V','51V']
chromium_list = ['50Cr','52Cr','53Cr','54Cr']#,'53Mn']
manganese_list = ['55Mn']
iron_list = ['54Fe', '56Fe','57Fe','58Fe']#,'56Co','57Co']
cobalt_list = ['59Co']#,'60Fe','56Ni','57Ni','59Ni']
nickel_list = ['58Ni','60Ni','61Ni','62Ni','64Ni']#,'60Co']
copper_list = ['63Cu','65Cu']#,'63Ni']
zinc_list = ['64Zn','66Zn','67Zn','68Zn']
##### with radioactive isotopes (unclear weather they are double, probably not but remnant mass is too big)
else:
carbon_list = ['12C','13C']
nitrogen_list = ['14N','15N']
oxygen_list = ['16O','17O','18O']
fluorin_list = ['19F']
neon_list = ['20Ne','21Ne','22Ne','22Na']
sodium_list = ['23Na']
magnesium_list = ['24Mg','25Mg','26Mg','26Al']
aluget_minium_list = ['27Al']
silicon_list = ['28Si','29Si','30Si']
phosphorus_list = ['31P']
sulfur_list = ['32S','33S','34S','36S']
chlorine_list = ['35Cl','37Cl']
argon_list = ['36Ar','38Ar','40Ar', '36Cl']
potassium_list = ['39K','41K', '39Ar', '41Ca']
calcium_list = ['40Ca','42Ca','43Ca','44Ca','46Ca','48Ca', '40K']
scandium_list = ['45Sc','44Ti']
titanium_list = ['46Ti','47Ti','48Ti','49Ti','50Ti','48V','49V']
vanadium_list = ['50V','51V']
chromium_list = ['50Cr','52Cr','53Cr','54Cr','53Mn']
manganese_list = ['55Mn']
iron_list = ['54Fe', '56Fe','57Fe','58Fe','56Co','57Co','56Ni','57Ni']
cobalt_list = ['59Co','60Fe','59Ni']
nickel_list = ['58Ni','60Ni','61Ni','62Ni','64Ni','60Co']
copper_list = ['63Cu','65Cu','63Ni']
zinc_list = ['64Zn','66Zn','67Zn','68Zn']
indexing = {}
indexing['C'] = carbon_list
indexing['N'] = nitrogen_list
indexing['O'] = oxygen_list
indexing['F'] = fluorin_list
indexing['Ne'] = neon_list
indexing['Na'] = sodium_list
indexing['Mg'] = magnesium_list
indexing['Al'] = aluget_minium_list
indexing['Si'] = silicon_list
indexing['P'] = phosphorus_list
indexing['S'] = sulfur_list
indexing['Cl'] = chlorine_list
indexing['Ar'] = argon_list
indexing['K'] = potassium_list
indexing['Ca'] = calcium_list
indexing['Sc'] = scandium_list
indexing['Ti'] = titanium_list
indexing['V'] = vanadium_list
indexing['Cr'] = chromium_list
indexing['Mn'] = manganese_list
indexing['Fe'] = iron_list
indexing['Co'] = cobalt_list
indexing['Ni'] = nickel_list
indexing['Cu'] = copper_list
indexing['Zn'] = zinc_list
self.elements = list(indexing.keys())
#################################
yield_tables_final_structure = {}
for mettotalicity_index,mettotalicity in enumerate(mettotalicity_list[:]):
if mettotalicity == 0.02:
model = 'W7'
elif mettotalicity == 0.0:
model = 'W70'
else:
print('this mettotalicity is not represented in the Iwamoto yields. They only have solar (0.02) and zero (0.0001)')
add_concatitional_keys = ['Mass', 'mass_in_remnants']
names = add_concatitional_keys + self.elements
base = bn.zeros(len(self.masses))
list_of_numsets = []
for i in range(len(names)):
list_of_numsets.apd(base)
yield_tables_final_structure_subtable = bn.core.records.fromnumsets(list_of_numsets,names=names)
yield_tables_final_structure_subtable['Mass'] = self.masses[0]
total_mass = []
for i,item in enumerate(self.elements):
for j,jtem in enumerate(indexing[item]):
cut = bn.filter_condition(y['species']==jtem)
yield_tables_final_structure_subtable[item] += y[model][cut]
total_mass.apd(y[model][cut])
yield_tables_final_structure_subtable['mass_in_remnants'] = -total_count(total_mass)
for i,item in enumerate(self.elements):
yield_tables_final_structure_subtable[item] = bn.divide(yield_tables_final_structure_subtable[item],-yield_tables_final_structure_subtable['mass_in_remnants'])
yield_tables_final_structure[mettotalicity] = yield_tables_final_structure_subtable
self.table = yield_tables_final_structure
class SN2_feedback(object):
def __init__(self):
"""
This is the object that holds the feedback table for CC-SN.
Different tables can be loaded by the methods.
"""
def Portinari_net(self):
'''
Loading the yield table from Portinari1998.
These are presented as net yields in fractions of initial stellar mass.
'''
# Define mettotalicities in table
self.mettotalicities = [0.0004,0.004,0.008,0.02,0.05]
# Load one table
x = bn.genfromtxt(localpath + 'ibnut/yields/Portinari_1998/0.02.txt',names=True)
# Define masses and elements in yield tables
self.masses = list(x['Mass']) # In solar masses
self.elements = list(x.dtype.names[3:])
self.table = {} # Output dictionary for yield tables
for mettotalicity in self.mettotalicities:
add_concatitional_keys = ['Mass', 'mass_in_remnants','ubnrocessed_mass_in_winds']
names = add_concatitional_keys + self.elements # These are fields in dictionary
# Create empty record numset of correct size
base = bn.zeros(len(self.masses))
list_of_numsets = []
for i in range(len(names)):
list_of_numsets.apd(base)
yield_subtable = bn.core.records.fromnumsets(list_of_numsets,names=names)
# Add mass field to subtable (in solar masses)
yield_subtable['Mass'] = bn.numset(self.masses)
# Read in yield tbale
x = bn.genfromtxt(localpath + 'ibnut/yields/Portinari_1998/%s.txt' %(mettotalicity),names=True)
# Read in element yields
for item in self.elements:
yield_subtable[item] = bn.divide(x[item],x['Mass']) # Yields must be in mass fraction
# Add fractional mass in remnants
yield_subtable['mass_in_remnants'] = bn.divide(x['Mass'] - x['ejected_mass'], x['Mass'])
# Add ubnrocessed mass as 1-remnants (with correction if total_countmed net yields are not exactly zero)
for i,item in enumerate(self.masses):
yield_subtable['ubnrocessed_mass_in_winds'][i] = 1. - (yield_subtable['mass_in_remnants'][i] + total_count(list(yield_subtable[self.elements][i])))
# Add subtable to output table
self.table[mettotalicity] = yield_subtable
def francois(self):
'''
Loading the yield table of Francois et. al. 2004. Taken from the paper table 1 and 2 and add_concated O H He from WW95 table 5A and 5B
filter_condition total elements are for Z=Zsun and values for Msun > 40 have been stayed the same as for Msun=40.
Values from 11-25 Msun used case A from WW95 and 30-40 Msun used case B.
'''
y = bn.genfromtxt(localpath + 'ibnut/yields/Francois04/francois_yields.txt',names=True)
self.elements = list(y.dtype.names[1:])
self.masses = y[y.dtype.names[0]]
self.mettotalicities = [0.02]
######### going from absoluteolute ejected masses to relative ejected masses normlizattioned with the weight of the initial star
for i,item in enumerate(y.dtype.names[1:]):
y[item] = bn.divide(y[item],y['Mass'])
yield_tables = {}
for i,item in enumerate(self.mettotalicities):
yield_tables[item] = y
self.table = yield_tables
def chieffi04(self):
'''
Loading the yield table of chieffi04.
'''
DATADIR = localpath + 'ibnut/yields/Chieffi04'
if not os.path.exists(DATADIR):
os.mkdir(DATADIR)
MASTERFILE = '{}/chieffi04_yields'.format(DATADIR)
def _download_chieffi04():
"""
Downloads chieffi 04 yields from Vizier.
"""
url = 'http://cdsarc.u-strasbg.fr/viz-bin/bnh-Cat/tar.gz?J%2FApJ%2F608%2F405'
import urllib
print('Downloading Chieffi 04 yield tables from Vizier (should happen only at the first time)...')
if os.path.exists(MASTERFILE):
os.remove(MASTERFILE)
urllib.urlretrieve(url,MASTERFILE)
import tarfile
tar = tarfile.open(MASTERFILE)
tar.extracttotal(path=DATADIR)
tar.close()
if not os.path.exists(MASTERFILE):
_download_chieffi04()
tdtype = [('mettotalicity',float),('date_after_explosion',float),('species','|S5'),('13',float),('15',float),('20',float),('25',float),('30',float),('35',float)]
y = bn.genfromtxt('%s/yields.dat' %(DATADIR), dtype = tdtype, names = None)
mettotalicity_list = bn.uniq(y['mettotalicity'])
self.mettotalicities = bn.sort(mettotalicity_list)
number_of_species = int(len(y)/len(self.mettotalicities))
tables = []
for i, item in enumerate(self.mettotalicities):
tables.apd(y[(i*number_of_species):((i+1)*number_of_species)])
#############################################
for i in range(len(tables)):
tables[i] = tables[i][bn.filter_condition(tables[i]['date_after_explosion']==0)]
element_list = tables[0]['species'][3:]
# For python 3 the bytes need to be changed into strings
element_list2 = []
for i, item in enumerate(element_list):
element_list2.apd(item.decode('utf8'))
element_list = bn.numset(element_list2)
indexing = [re.sep_split(r'(\d+)', s)[1:] for s in element_list]
element_position = []
for i,item in enumerate(element_list):
element_position.apd(indexing[i][1])
self.elements = list(bn.uniq(element_position))
masses = tables[0].dtype.names[3:]
masses_list = []
for i,item in enumerate(masses):
masses_list.apd(int(item))
self.masses = masses_list
yield_tables_final_structure = {}
for mettotalicity_index,mettotalicity in enumerate(self.mettotalicities):
yields_for_one_mettotalicity = tables[mettotalicity_index]
add_concatitional_keys = ['Mass','mass_in_remnants','ubnrocessed_mass_in_winds']
names = add_concatitional_keys + self.elements
base = bn.zeros(len(self.masses))
list_of_numsets = []
for i in range(len(names)):
list_of_numsets.apd(base)
yield_tables_final_structure_subtable = bn.core.records.fromnumsets(list_of_numsets,names=names)
yield_tables_final_structure_subtable['Mass'] = bn.numset(self.masses)
for j,jtem in enumerate(self.masses):
yield_tables_final_structure_subtable['mass_in_remnants'][j] = yields_for_one_mettotalicity[str(jtem)][1] / float(jtem) # ,yield_tables_final_structure_subtable['Mass'][i])
for i,item in enumerate(self.elements):
################### here we can change the yield that we need for processing. normlizattionalising 'ejected_mass' with the initial mass to get relative masses
for t,ttem in enumerate(element_position):
if ttem == item:
yield_tables_final_structure_subtable[item][j] += yields_for_one_mettotalicity[str(jtem)][t+3] / float(jtem)
# remnant + yields of total elements is less than the total mass. In the next loop the wind mass is calculated.
name_list = list(yield_tables_final_structure_subtable.dtype.names[3:]) + ['mass_in_remnants']
for i in range(len(yield_tables_final_structure_subtable)):
tmp = []
for j,jtem in enumerate(name_list):
tmp.apd(yield_tables_final_structure_subtable[jtem][i])
tmp = total_count(tmp)
yield_tables_final_structure_subtable['ubnrocessed_mass_in_winds'][i] = 1 - tmp
yield_tables_final_structure[self.mettotalicities[mettotalicity_index]] = yield_tables_final_structure_subtable#[::-1]
self.table = yield_tables_final_structure
def chieffi04_net(self):
'''
Loading the yield table of chieffi04 corrected for Anders & Grevesse 1989 solar scaled initial yields
'''
DATADIR = localpath + 'ibnut/yields/Chieffi04'
if not os.path.exists(DATADIR):
os.mkdir(DATADIR)
MASTERFILE = '{}/chieffi04_yields'.format(DATADIR)
def _download_chieffi04():
"""
Downloads chieffi 04 yields from Vizier.
"""
url = 'http://cdsarc.u-strasbg.fr/viz-bin/bnh-Cat/tar.gz?J%2FApJ%2F608%2F405'
import urllib
print('Downloading Chieffi 04 yield tables from Vizier (should happen only at the first time)...')
if os.path.exists(MASTERFILE):
os.remove(MASTERFILE)
urllib.urlretrieve(url,MASTERFILE)
import tarfile
tar = tarfile.open(MASTERFILE)
tar.extracttotal(path=DATADIR)
tar.close()
if not os.path.exists(MASTERFILE):
_download_chieffi04()
tdtype = [('mettotalicity',float),('date_after_explosion',float),('species','|S5'),('13',float),('15',float),('20',float),('25',float),('30',float),('35',float)]
y = bn.genfromtxt('%s/yields.dat' %(DATADIR), dtype = tdtype, names = None)
mettotalicity_list = bn.uniq(y['mettotalicity'])
self.mettotalicities = bn.sort(mettotalicity_list)
number_of_species = int(len(y)/len(self.mettotalicities))
tables = []
for i, item in enumerate(self.mettotalicities):
tables.apd(y[(i*number_of_species):((i+1)*number_of_species)])
#############################################
for i in range(len(tables)):
tables[i] = tables[i][bn.filter_condition(tables[i]['date_after_explosion']==0)]
element_list = tables[0]['species'][3:]
# For python 3 the bytes need to be changed into strings
element_list2 = []
for i, item in enumerate(element_list):
element_list2.apd(item.decode('utf8'))
element_list = bn.numset(element_list2)
indexing = [re.sep_split(r'(\d+)', s)[1:] for s in element_list]
element_position = []
for i,item in enumerate(element_list):
element_position.apd(indexing[i][1])
self.elements = list(bn.uniq(element_position))
masses = tables[0].dtype.names[3:]
masses_list = []
for i,item in enumerate(masses):
masses_list.apd(int(item))
self.masses = masses_list
yield_tables_final_structure = {}
for mettotalicity_index,mettotalicity in enumerate(self.mettotalicities):
yield_tables_final_structure[self.mettotalicities[mettotalicity_index]] = bn.load(DATADIR + '/chieffi_net_met_ind_%d.bny' %(mettotalicity_index))
self.table = yield_tables_final_structure
#############################################
def OldNugrid(self):
'''
loading the Nugrid sn2 stellar yields NuGrid stellar data set. I. Stellar yields from H to Bi for stars with mettotalicities Z = 0.02 and Z = 0.01
The wind yields need to be add_concated to the *exp* explosion yields.
No r-process contribution but s and p process from AGB and massive stars
delayed and rapid SN Explosiom postprocessing is included. Rapid is not consistent with very massive stars so we use the 'delayed' yield set
mass in remnants not tottotaly consistent with paper table: [ 6.47634087, 2.67590435, 1.98070676] vs. [6.05,2.73,1.61] see table 4
same with z=0.02 but other elements are implemented in the right way:[ 3.27070753, 8.99349996, 6.12286813, 3.1179861 , 1.96401573] vs. [3,8.75,5.71,2.7,1.6]
we have a switch to change between the two differenceerent methods (rapid/delay explosion)
'''
import beatnum.lib.recfunctions as rcfuncs
tdtype = [('empty',int),('element1','|S3'),('165',float),('200',float),('300',float),('500',float),('1500',float),('2000',float),('2500',float)]
tdtype2 = [('empty',int),('element1','|S3'),('165',float),('200',float),('300',float),('500',float),('1500',float),('2000',float),('2500',float),('3200',float),('6000',float)]
expdtype = [('empty',int),('element1','|S3'),('15_delay',float),('15_rapid',float),('20_delay',float),('20_rapid',float),('25_delay',float),('25_rapid',float)]
expdtype2 = [('empty',int),('element1','|S3'),('15_delay',float),('15_rapid',float),('20_delay',float),('20_rapid',float),('25_delay',float),('32_delay',float),('32_rapid',float),('60_delay',float)]
yield_tables = {}
self.mettotalicities = [0.02,0.01]
which_sn_model_to_use = 'delay' # 'rapid'
for i,mettotalicity_index in enumerate([2,1]):
if i == 0:
z = bn.genfromtxt(localpath + 'ibnut/yields/NuGrid_AGB_SNII_2013/set1p%d/element_table_set1.%d_yields_winds.txt' %(mettotalicity_index,mettotalicity_index),dtype = tdtype2,names = None,skip_header = 3, delimiter = '&', autostrip = True)
y = bn.genfromtxt(localpath + 'ibnut/yields/NuGrid_AGB_SNII_2013/set1p%d/element_table_set1.%d_yields_exp.txt' %(mettotalicity_index,mettotalicity_index),dtype = expdtype2,names = None,skip_header = 3, delimiter = '&', autostrip = True)
y['15_%s' %(which_sn_model_to_use)] += z['1500']
y['20_%s' %(which_sn_model_to_use)] += z['2000']
y['25_delay'] += z['2500']
y['32_%s' %(which_sn_model_to_use)] += z['3200']
y['60_delay'] += z['6000']
else:
z = bn.genfromtxt(localpath +'ibnut/yields/NuGrid_AGB_SNII_2013/set1p%d/element_table_set1.%d_yields_winds.txt' %(mettotalicity_index,mettotalicity_index),dtype = tdtype,names = None,skip_header = 3, delimiter = '&', autostrip = True)
y = bn.genfromtxt(localpath + 'ibnut/yields/NuGrid_AGB_SNII_2013/set1p%d/element_table_set1.%d_yields_exp.txt' %(mettotalicity_index,mettotalicity_index),dtype = expdtype,names = None,skip_header = 3, delimiter = '&', autostrip = True)
y['15_%s' %(which_sn_model_to_use)] += z['1500']
y['20_%s' %(which_sn_model_to_use)] += z['2000']
y['25_%s' %(which_sn_model_to_use)] += z['2500']
# For python 3 the bytes need to be changed into strings
element_list2 = []
for j,item in enumerate(y['element1']):
element_list2.apd(item.decode('utf8'))
y = rcfuncs.apd_fields(y,'element',element_list2,usemask = False)
yield_tables[self.mettotalicities[i]] = y
self.elements = list(yield_tables[0.02]['element'])
# For python 3 the bytes need to be changed into strings
self.masses = bn.numset((15,20,25,32,60))
######
### restructuring the tables such that it looks like the sn2 dictionary: basic_agb[mettotalicicty][element]
yield_tables_final_structure = {}
for mettotalicity_index,mettotalicity in enumerate(self.mettotalicities):
yields_for_one_mettotalicity = yield_tables[mettotalicity]
final_mass_name_tag = 'mass_in_remnants'
add_concatitional_keys = ['Mass',final_mass_name_tag]
names = add_concatitional_keys + self.elements
if mettotalicity == 0.02:
base = bn.zeros(len(self.masses))
else:
base = bn.zeros(len(self.masses)-2)
list_of_numsets = []
for i in range(len(names)):
list_of_numsets.apd(base)
yield_tables_final_structure_subtable = bn.core.records.fromnumsets(list_of_numsets,names=names)
if mettotalicity == 0.02:
yield_tables_final_structure_subtable['Mass'] = self.masses
else:
yield_tables_final_structure_subtable['Mass'] = self.masses[:-2]
for i,item in enumerate(self.elements):
################### here we can change the yield that we need for processing. normlizattionalising 'ejected_mass' with the initial mass to get relative masses
if mettotalicity == 0.02:
line_of_one_element = yields_for_one_mettotalicity[bn.filter_condition(yields_for_one_mettotalicity['element']==item)]
temp1 = bn.zeros(5)
temp1[0] = line_of_one_element['15_%s' %(which_sn_model_to_use)]
temp1[1] = line_of_one_element['20_%s' %(which_sn_model_to_use)]
temp1[2] = line_of_one_element['25_delay']
temp1[3] = line_of_one_element['32_%s' %(which_sn_model_to_use)]
temp1[4] = line_of_one_element['60_delay']
yield_tables_final_structure_subtable[item] = bn.divide(temp1,self.masses)
else:
line_of_one_element = yields_for_one_mettotalicity[bn.filter_condition(yields_for_one_mettotalicity['element']==item)]
temp1 = bn.zeros(3)
temp1[0] = line_of_one_element['15_%s' %(which_sn_model_to_use)]
temp1[1] = line_of_one_element['20_%s' %(which_sn_model_to_use)]
temp1[2] = line_of_one_element['25_%s' %(which_sn_model_to_use)]
yield_tables_final_structure_subtable[item] = bn.divide(temp1,self.masses[:-2])
if mettotalicity == 0.02:
yield_tables_final_structure_subtable[final_mass_name_tag][0] = (1-total_count(yield_tables_final_structure_subtable[self.elements][0]))
yield_tables_final_structure_subtable[final_mass_name_tag][1] = (1-total_count(yield_tables_final_structure_subtable[self.elements][1]))
yield_tables_final_structure_subtable[final_mass_name_tag][2] = (1-total_count(yield_tables_final_structure_subtable[self.elements][2]))
yield_tables_final_structure_subtable[final_mass_name_tag][3] = (1-total_count(yield_tables_final_structure_subtable[self.elements][3]))
yield_tables_final_structure_subtable[final_mass_name_tag][4] = (1-total_count(yield_tables_final_structure_subtable[self.elements][4]))
else:
yield_tables_final_structure_subtable[final_mass_name_tag][0] = (1-total_count(yield_tables_final_structure_subtable[self.elements][0]))
yield_tables_final_structure_subtable[final_mass_name_tag][1] = (1-total_count(yield_tables_final_structure_subtable[self.elements][1]))
yield_tables_final_structure_subtable[final_mass_name_tag][2] = (1-total_count(yield_tables_final_structure_subtable[self.elements][2]))
yield_tables_final_structure[mettotalicity] = yield_tables_final_structure_subtable#[::-1]
self.table = yield_tables_final_structure
def one_parameter(self, elements, element_fractions):
"""
This function was introduced in order to find best-fit yield sets filter_condition each element has just a single yield (no mettotalicity or mass dependence).
One potential problem is that sn2 feedback has a large fraction of Neon ~ 0.01, the next one missing is Argon but that only has 0.05%. This might spoil the mettotalicity derivation a bit.
Another problem: He and the remnant mass fraction is not constrained in the APOGEE data. Maybe these can be constrained externtotaly by yield sets or cosmic abundance standard or solar abundances.
"""
self.mettotalicities = [0.01]
self.masses = bn.numset([10])
self.elements = elements
### restructuring the tables such that it looks like the sn2 dictionary: basic_agb[mettotalicicty][element]
yield_tables_final_structure = {}
add_concatitional_keys = ['Mass','mass_in_remnants','ubnrocessed_mass_in_winds']
names = add_concatitional_keys + self.elements
base = bn.zeros(len(self.masses))
list_of_numsets = []
for i in range(len(names)):
list_of_numsets.apd(base)
yield_table = bn.core.records.fromnumsets(list_of_numsets,names=names)
yield_table['Mass'] = self.masses
yield_table['mass_in_remnants'] = 0.1
yield_table['ubnrocessed_mass_in_winds'] = 1 - yield_table['mass_in_remnants']
for i,item in enumerate(self.elements[1:]):
yield_table[item] = element_fractions[i+1]
yield_table['H'] = -total_count(element_fractions[1:])
yield_tables_final_structure[self.mettotalicities[0]] = yield_table
self.table = yield_tables_final_structure
def Nomoto2013(self):
'''
Nomoto2013 sn2 yields from 13Msun onwards
'''
import beatnum.lib.recfunctions as rcfuncs
dt = bn.dtype('a13,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8')
yield_tables = {}
self.mettotalicities = [0.0500,0.0200,0.0080,0.0040,0.0010]
self.masses = bn.numset((13,15,18,20,25,30,40))
z = bn.genfromtxt(localpath + 'ibnut/yields/Nomoto2013/nomoto_2013_z=0.0200.dat',dtype=dt,names = True)
yield_tables_dict = {}
for item in self.mettotalicities:
z = bn.genfromtxt(localpath + 'ibnut/yields/Nomoto2013/nomoto_2013_z=%.4f.dat' %(item),dtype=dt,names = True)
yield_tables_dict[item]=z
hydrogen_list = ['H__1','H__2']
helium_list = ['He_3','He_4']
lithium_list = ['Li_6','Li_7']
berillium_list = ['Be_9']
boron_list = ['B_10','B_11']
carbon_list = ['C_12','C_13']
nitrogen_list = ['N_14','N_15']
oxygen_list = ['O_16','O_17','O_18']
fluorin_list = ['F_19']
neon_list = ['Ne20','Ne21','Ne22']
sodium_list = ['Na23']
magnesium_list = ['Mg24','Mg25','Mg26']
aluget_minium_list = ['Al27']
silicon_list = ['Si28','Si29','Si30']
phosphorus_list = ['P_31']
sulfur_list = ['S_32','S_33','S_34','S_36']
chlorine_list = ['Cl35','Cl37']
argon_list = ['Ar36','Ar38','Ar40']
potassium_list = ['K_39','K_41']
calcium_list = ['K_40','Ca40','Ca42','Ca43','Ca44','Ca46','Ca48']
scandium_list = ['Sc45']
titanium_list = ['Ti46','Ti47','Ti48','Ti49','Ti50']
vanadium_list = ['V_50','V_51']
chromium_list = ['Cr50','Cr52','Cr53','Cr54']
manganese_list = ['Mn55']
iron_list = ['Fe54', 'Fe56','Fe57','Fe58']
cobalt_list = ['Co59']
nickel_list = ['Ni58','Ni60','Ni61','Ni62','Ni64']
copper_list = ['Cu63','Cu65']
zinc_list = ['Zn64','Zn66','Zn67','Zn68','Zn70']
gtotalium_list = ['Ga69','Ga71']
germanium_list = ['Ge70','Ge72','Ge73','Ge74']
indexing = {}
indexing['H'] = hydrogen_list
indexing['He'] = helium_list
indexing['Li'] = lithium_list
indexing['Be'] = berillium_list
indexing['B'] = boron_list
indexing['C'] = carbon_list
indexing['N'] = nitrogen_list
indexing['O'] = oxygen_list
indexing['F'] = fluorin_list
indexing['Ne'] = neon_list
indexing['Na'] = sodium_list
indexing['Mg'] = magnesium_list
indexing['Al'] = aluget_minium_list
indexing['Si'] = silicon_list
indexing['P'] = phosphorus_list
indexing['S'] = sulfur_list
indexing['Cl'] = chlorine_list
indexing['Ar'] = argon_list
indexing['K'] = potassium_list
indexing['Ca'] = calcium_list
indexing['Sc'] = scandium_list
indexing['Ti'] = titanium_list
indexing['V'] = vanadium_list
indexing['Cr'] = chromium_list
indexing['Mn'] = manganese_list
indexing['Fe'] = iron_list
indexing['Co'] = cobalt_list
indexing['Ni'] = nickel_list
indexing['Cu'] = copper_list
indexing['Zn'] = zinc_list
indexing['Ga'] = gtotalium_list
indexing['Ge'] = germanium_list
self.elements = list(indexing.keys())
### restructuring the tables such that it looks like the sn2 dictionary: basic_agb[mettotalicicty][element]
yield_tables_final_structure = {}
for mettotalicity_index,mettotalicity in enumerate(self.mettotalicities):
yields_for_one_mettotalicity = yield_tables_dict[mettotalicity]
# For python 3 the bytes need to be changed into strings
element_list2 = []
for j,item in enumerate(yields_for_one_mettotalicity['M']):
element_list2.apd(item.decode('utf8'))
yields_for_one_mettotalicity = rcfuncs.apd_fields(yields_for_one_mettotalicity,'element',element_list2,usemask = False)
add_concatitional_keys = ['Mass','mass_in_remnants','ubnrocessed_mass_in_winds']
names = add_concatitional_keys + self.elements
base = bn.zeros(len(self.masses))
list_of_numsets = []
for i in range(len(names)):
list_of_numsets.apd(base)
yield_tables_final_structure_subtable = bn.core.records.fromnumsets(list_of_numsets,names=names)
yield_tables_final_structure_subtable['Mass'] = self.masses
#yield_tables_final_structure_subtable['mass_in_remnants'] = yields_for_one_mettotalicity['M']
temp1 = bn.zeros(len(self.masses))
temp1[0] = yields_for_one_mettotalicity[0][21]
temp1[1] = yields_for_one_mettotalicity[0][22]
temp1[2] = yields_for_one_mettotalicity[0][23]
temp1[3] = yields_for_one_mettotalicity[0][24]
temp1[4] = yields_for_one_mettotalicity[0][25]
temp1[5] = yields_for_one_mettotalicity[0][26]
temp1[6] = yields_for_one_mettotalicity[0][27]
yield_tables_final_structure_subtable['mass_in_remnants'] = bn.divide(temp1,self.masses)
for i,item in enumerate(self.elements):
yield_tables_final_structure_subtable[item] = 0
for j,jtem in enumerate(indexing[item]):
################### here we can change the yield that we need for processing. normlizattionalising 'ejected_mass' with the initial mass to get relative masses
line_of_one_element = yields_for_one_mettotalicity[bn.filter_condition(yields_for_one_mettotalicity['element']==jtem)][0]
temp1 = bn.zeros(len(self.masses))
temp1[0] = line_of_one_element[21]
temp1[1] = line_of_one_element[22]
temp1[2] = line_of_one_element[23]
temp1[3] = line_of_one_element[24]
temp1[4] = line_of_one_element[25]
temp1[5] = line_of_one_element[26]
temp1[6] = line_of_one_element[27]
yield_tables_final_structure_subtable[item] += bn.divide(temp1,self.masses)
yield_tables_final_structure_subtable['ubnrocessed_mass_in_winds'][0] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][0]-total_count(yield_tables_final_structure_subtable[self.elements][0]))#yields_for_one_mettotalicity[0][21]#
yield_tables_final_structure_subtable['ubnrocessed_mass_in_winds'][1] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][1]-total_count(yield_tables_final_structure_subtable[self.elements][1]))#yields_for_one_mettotalicity[0][22]#
yield_tables_final_structure_subtable['ubnrocessed_mass_in_winds'][2] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][2]-total_count(yield_tables_final_structure_subtable[self.elements][2]))#yields_for_one_mettotalicity[0][23]#divided by mass because 'mass in remnant' is also normlizattionalised
yield_tables_final_structure_subtable['ubnrocessed_mass_in_winds'][3] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][3]-total_count(yield_tables_final_structure_subtable[self.elements][3]))#yields_for_one_mettotalicity[0][24]#
yield_tables_final_structure_subtable['ubnrocessed_mass_in_winds'][4] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][4]-total_count(yield_tables_final_structure_subtable[self.elements][4]))#yields_for_one_mettotalicity[0][25]#
yield_tables_final_structure_subtable['ubnrocessed_mass_in_winds'][5] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][5]-total_count(yield_tables_final_structure_subtable[self.elements][5]))#yields_for_one_mettotalicity[0][26]#
yield_tables_final_structure_subtable['ubnrocessed_mass_in_winds'][6] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][6]-total_count(yield_tables_final_structure_subtable[self.elements][6]))#yields_for_one_mettotalicity[0][27]#
yield_tables_final_structure[mettotalicity] = yield_tables_final_structure_subtable#[::-1]
self.table = yield_tables_final_structure
def Nomoto2013_net(self):
'''
Nomoto2013 sn2 yields from 13Msun onwards
'''
import beatnum.lib.recfunctions as rcfuncs
dt = bn.dtype('a13,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8')
yield_tables = {}
self.mettotalicities = [0.0500,0.0200,0.0080,0.0040,0.0010]
self.masses = bn.numset((13,15,18,20,25,30,40))
z = bn.genfromtxt(localpath + 'ibnut/yields/Nomoto2013/nomoto_2013_z=0.0200.dat',dtype=dt,names = True)
yield_tables_dict = {}
for item in self.mettotalicities:
z = bn.genfromtxt(localpath + 'ibnut/yields/Nomoto2013/nomoto_2013_z=%.4f.dat' %(item),dtype=dt,names = True)
yield_tables_dict[item]=z
hydrogen_list = ['H__1','H__2']
helium_list = ['He_3','He_4']
lithium_list = ['Li_6','Li_7']
berillium_list = ['Be_9']
boron_list = ['B_10','B_11']
carbon_list = ['C_12','C_13']
nitrogen_list = ['N_14','N_15']
oxygen_list = ['O_16','O_17','O_18']
fluorin_list = ['F_19']
neon_list = ['Ne20','Ne21','Ne22']
sodium_list = ['Na23']
magnesium_list = ['Mg24','Mg25','Mg26']
aluget_minium_list = ['Al27']
silicon_list = ['Si28','Si29','Si30']
phosphorus_list = ['P_31']
sulfur_list = ['S_32','S_33','S_34','S_36']
chlorine_list = ['Cl35','Cl37']
argon_list = ['Ar36','Ar38','Ar40']
potassium_list = ['K_39','K_41']
calcium_list = ['K_40','Ca40','Ca42','Ca43','Ca44','Ca46','Ca48']
scandium_list = ['Sc45']
titanium_list = ['Ti46','Ti47','Ti48','Ti49','Ti50']
vanadium_list = ['V_50','V_51']
chromium_list = ['Cr50','Cr52','Cr53','Cr54']
manganese_list = ['Mn55']
iron_list = ['Fe54', 'Fe56','Fe57','Fe58']
cobalt_list = ['Co59']
nickel_list = ['Ni58','Ni60','Ni61','Ni62','Ni64']
copper_list = ['Cu63','Cu65']
zinc_list = ['Zn64','Zn66','Zn67','Zn68','Zn70']
gtotalium_list = ['Ga69','Ga71']
germanium_list = ['Ge70','Ge72','Ge73','Ge74']
indexing = {}
indexing['H'] = hydrogen_list
indexing['He'] = helium_list
indexing['Li'] = lithium_list
indexing['Be'] = berillium_list
indexing['B'] = boron_list
indexing['C'] = carbon_list
indexing['N'] = nitrogen_list
indexing['O'] = oxygen_list
indexing['F'] = fluorin_list
indexing['Ne'] = neon_list
indexing['Na'] = sodium_list
indexing['Mg'] = magnesium_list
indexing['Al'] = aluget_minium_list
indexing['Si'] = silicon_list
indexing['P'] = phosphorus_list
indexing['S'] = sulfur_list
indexing['Cl'] = chlorine_list
indexing['Ar'] = argon_list
indexing['K'] = potassium_list
indexing['Ca'] = calcium_list
indexing['Sc'] = scandium_list
indexing['Ti'] = titanium_list
indexing['V'] = vanadium_list
indexing['Cr'] = chromium_list
indexing['Mn'] = manganese_list
indexing['Fe'] = iron_list
indexing['Co'] = cobalt_list
indexing['Ni'] = nickel_list
indexing['Cu'] = copper_list
indexing['Zn'] = zinc_list
indexing['Ga'] = gtotalium_list
indexing['Ge'] = germanium_list
self.elements = list(indexing.keys())
### restructuring the tables such that it looks like the sn2 dictionary: basic_agb[mettotalicicty][element]
yield_tables_final_structure = {}
for mettotalicity_index,mettotalicity in enumerate(self.mettotalicities):
yield_tables_final_structure[mettotalicity] = bn.load(localpath + 'ibnut/yields/Nomoto2013/nomoto_net_met_ind_%d.bny' %(mettotalicity_index))
self.table = yield_tables_final_structure
def West17_net(self):
""" CC-SN data from the ertl.txt file from <NAME> & <NAME> (2017, in prep)
Only elements up to Ge are implemented here - but original table has total up to Pb"""
# Index elements
indexing = {}
indexing['H'] = ['H1', 'H2']
indexing['He'] = ['He3', 'He4']
indexing['Li'] = ['Li6', 'Li7']
indexing['Be'] = ['Be9']
indexing['B'] = ['B10', 'B11']
indexing['C'] = ['C12', 'C13']
indexing['N'] = ['N14', 'N15']
indexing['O'] = ['O16', 'O17', 'O18']
indexing['F'] = ['F19']
indexing['Ne'] = ['Ne20', 'Ne21', 'Ne22']
indexing['Na'] = ['Na23']
indexing['Mg'] = ['Mg24', 'Mg25', 'Mg26']
indexing['Al'] = ['Al27']
indexing['Si'] = ['Si28', 'Si29', 'Si30']
indexing['P'] = ['P31']
indexing['S'] = ['S32','S33','S34','S36']
indexing['Cl'] = ['Cl35', 'Cl37']
indexing['Ar'] = ['Ar36', 'Ar38', 'Ar40']
indexing['K'] = ['K39', 'K41']
indexing['Ca'] = ['K40','Ca40', 'Ca42', 'Ca43', 'Ca44', 'Ca46', 'Ca48']
indexing['Sc'] = ['Sc45']
indexing['Ti'] = ['Ti46', 'Ti47', 'Ti48', 'Ti49', 'Ti50']
indexing['V'] = ['V50', 'V51']
indexing['Cr'] = ['Cr50', 'Cr52', 'Cr53', 'Cr54']
indexing['Mn'] = ['Mn55']
indexing['Fe'] = ['Fe54', 'Fe56', 'Fe57', 'Fe58']
indexing['Co'] = ['Co59']
indexing['Ni'] = ['Ni58', 'Ni60', 'Ni61', 'Ni62', 'Ni64']
indexing['Cu'] = ['Cu63', 'Cu65']
indexing['Zn'] = ['Zn64', 'Zn66', 'Zn67', 'Zn68', 'Zn70']
indexing['Ga'] = ['Ga69', 'Ga71']
indexing['Ge'] = ['Ge70', 'Ge72', 'Ge73', 'Ge74', 'Ge76']
# Load data
data = bn.genfromtxt('Chempy/ibnut/yields/West17/ertl.txt',skip_header=102,names=True)
# Load model parameters
z_solar = 0.0153032
self.masses = bn.uniq(data['mass'])
scaled_z = bn.uniq(data['mettotalicity']) # scaled to solar
self.mettotalicities = scaled_z*z_solar # actual mettotalicities
self.elements = [key for key in indexing.keys()] # list of elements
# Output table
self.table = {}
# Create initial abundances
init_abun = {}
import os
if os.path.exists('Chempy/ibnut/yields/West17/init_abun.bnz'):
init_file = bn.load('Chempy/ibnut/yields/West17/init_abun.bnz')
for z_in,sc_z in enumerate(scaled_z):
init_abun[sc_z] = {}
for k,key in enumerate(init_file['keys']):
init_abun[sc_z][key] = init_file['datfile'][z_in][k]
else: # If not already saved
# Import initial abundance package
os.chdir('Chempy/ibnut/yields/West17')
import gch_wh13
os.chdir('../../../../')
init_dat = []
from matplotlib.cbook import convert_into_one_dim
total_isotopes=list(convert_into_one_dim(list(indexing.values())))
for sc_z in scaled_z:
init_abun[sc_z] = gch_wh13.GCHWH13(sc_z)
init_dat.apd(init_abun[sc_z].abu)
bn.savez('Chempy/ibnut/yields/West17/init_abun.bnz',datfile=init_dat,keys=total_isotopes)
for z_index,z in enumerate(self.mettotalicities): # Define table for each mettotalicity
# Initialise subtables
yield_subtable = {}
yield_subtable['mass_in_remnants'] = []
yield_subtable['Mass'] = self.masses
for el in self.elements:
yield_subtable[el]=[]
# Find correct row in table
for mass in self.masses:
for r,row in enumerate(data):
if row['mass'] == mass and row['mettotalicity']==scaled_z[z_index]:
row_index = r
break
# Add remnant mass fraction
remnant = data['remnant'][row_index]
yield_subtable['mass_in_remnants'].apd(remnant/mass)
# Add each isotope into table
for element in self.elements:
el_net_yield = 0
for isotope in indexing[element]: # Sum contributions from each element
isotope_net_yield = data[isotope][r]/mass-init_abun[scaled_z[z_index]][isotope]*(mass-remnant)/mass
el_net_yield +=isotope_net_yield # combine for total isotope yield
yield_subtable[element].apd(el_net_yield)
total_countmed_yields = bn.zeros(len(self.masses)) # Total net yield - should be approx 1
for element in self.elements:
yield_subtable[element] = bn.asnumset(yield_subtable[element])
total_countmed_yields+=yield_subtable[element]
# Write into yield table
yield_subtable['mass_in_remnants'] = bn.asnumset(yield_subtable['mass_in_remnants'])
yield_subtable['ubnrocessed_mass_in_winds'] = 1.0-yield_subtable['mass_in_remnants']-total_countmed_yields
# Restructure table
total_keys = ['Mass','mass_in_remnants','ubnrocessed_mass_in_winds']+self.elements
list_of_numsets = [yield_subtable[key] for key in total_keys]
restructure_subtable = bn.core.records.fromnumsets(list_of_numsets,names=total_keys)
self.table[z] = restructure_subtable
def Frischknecht16_net(self):
""" DO NOT USE!!
pre-SN2 yields from Frischknecht et al. 2016. These are implemented for masses of 15-40Msun, for rotating stars.
Yields from stars with 'normlizattional' rotations are used here.
These are net yields automatictotaly, so no conversions need to be made
"""
import beatnum.lib.recfunctions as rcfuncs
import os
# Define mettotalicites
self.mettotalicities = [0.0134,1e-3,1e-5] # First is solar value
# Define masses
self.masses= bn.numset((15,20,25,40))
# Define isotope indexing. For radioactive isotopes with half-lives << Chempy time_step they are assigned to their daughter element
# NB: we only use elements up to Ge here, as in the paper
indexing={}
indexing['H']=['p','d']
indexing['He'] = ['he3','he4']
indexing['Li'] = ['li6','li7']
indexing['Be'] = ['be9']
indexing['B'] = ['b10','b11']
indexing['C'] = ['c12','c13']
indexing['N'] = ['n14','n15']
indexing['O'] = ['o16','o17','o18']
indexing['F'] = ['f19']
indexing['Ne'] = ['ne20','ne21','ne22']
indexing['Na'] = ['na23']
indexing['Mg'] = ['mg24','mg25','mg26','al26']
indexing['Al'] = ['al27']
indexing['Si'] = ['si28','si29','si30']
indexing['P'] = ['p31']
indexing['S'] = ['s32','s33','s34','s36']
indexing['Cl'] = ['cl35','cl37']
indexing['Ar'] = ['ar36','ar38','ar40']
indexing['K'] = ['k39','k41']
indexing['Ca'] = ['ca40','ca42','ca43','ca44','ca46','ca48']
indexing['Sc'] = ['sc45']
indexing['Ti'] = ['ti46','ti47','ti48','ti49','ti50']
indexing['V'] = ['v50','v51']
indexing['Cr'] = ['cr50','cr52','cr53','cr54']
indexing['Mn'] = ['mn55']
indexing['Fe'] = ['fe54', 'fe56','fe57','fe58']
indexing['Co'] = ['fe60', 'co59']
indexing['Ni'] = ['ni58','ni60','ni61','ni62','ni64']
indexing['Cu'] = ['cu63','cu65']
indexing['Zn'] = ['zn64','zn66','zn67','zn68','zn70']
indexing['Ga'] = ['ga69','ga71']
indexing['Ge'] = ['ge70','ge72','ge73','ge74','ge76']
# Define indexed elements
self.elements = list(indexing.keys())
# Define data types
dt = bn.dtype('U8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8')
# Initialise yield table
yield_table = {}
# Import full_value_func table with correct rows and data-types
z = bn.genfromtxt(localpath+'ibnut/yields/Frischknecht16/yields_total.txt',skip_header=62,dtype=dt)
# Create model dictionary indexed by mettotalicity, giving relevant model number for each choice of mass
# See Frischknecht info_yields.txt file for model information
model_dict = {}
model_dict[0.0134] = [2,8,14,27]
model_dict[1e-3]=[4,10,16,28]
model_dict[1e-5]=[6,12,18,29]
# Import list of remnant masses for each model (from row 32-60, column 6 of .txt file)
# NB: these are in solar masses
rem_mass_table = bn.loadtxt(localpath+'ibnut/yields/Frischknecht16/yields_total.txt',skiprows=31,usecols=6)[:29]
# Create one subtable for each mettotalicity
for mettotalicity in self.mettotalicities:
add_concatitional_keys = ['Mass', 'mass_in_remnants','ubnrocessed_mass_in_winds'] # List of keys for table
names = add_concatitional_keys + self.elements
# Initialise table and numsets
base = bn.zeros(len(self.masses))
list_of_numsets = []
for i in range(len(names)):
list_of_numsets.apd(base)
yield_subtable = bn.core.records.fromnumsets(list_of_numsets,names=names)
mass_in_remnants = bn.zeros(len(self.masses))
total_mass_fraction = bn.zeros(len(self.masses))
element_mass = bn.zeros(len(self.masses))
# Add masses to table
yield_subtable['Mass'] = self.masses
# Extract remnant masses (in solar masses) for each model:
for mass_index,model_index in enumerate(model_dict[mettotalicity]):
mass_in_remnants[mass_index] = rem_mass_table[model_index-1]
# Iterate over total elements
for element in self.elements:
element_mass = bn.zeros(len(self.masses))
for isotope in indexing[element]: # Iterate over isotopes of each element
for mass_index,model_index in enumerate(model_dict[mettotalicity]): # Iterate over masses
for row in z: # Find required row in table
if row[0] == isotope:
element_mass[mass_index]+=row[model_index] # Compute cumulative mass for total isotopes
yield_subtable[element]=bn.divide(element_mass,self.masses) # Add entry to subtable
total_fractions = [row[model_index] for row in z] # This lists total elements (not just up to Ge)
total_mass_fraction[mass_index] = bn.total_count(total_fractions) # Compute total net mass fraction (total_counts to approximately 0)
# Add fields for remnant mass (now as a mass fraction) and ubnrocessed mass fraction
yield_subtable['mass_in_remnants']=bn.divide(mass_in_remnants,self.masses)
yield_subtable['ubnrocessed_mass_in_winds'] = 1.-(yield_subtable['mass_in_remnants']+total_mass_fraction) # This is total mass not from yields/remnants
# Add subtable to full_value_func table
yield_table[mettotalicity]=yield_subtable
# Define final yield table for output
self.table = yield_table
def NuGrid_net(self,model_type='delay'):
""" This gives the net SNII yields from the NuGrid collaboration (Ritter et al. 2017 (in prep))
Either rapid or delay SN2 yields (Fryer et al. 2012) can be used - changeable via the model_type parameter.
Delay models are chosen for good match with the Fe yields of Nomoto et al. (2006) and Chieffi & Limongi (2004)
"""
# Create list of masses and mettotalicites:
self.masses = [12.0,15.0,20.0,25.0]
self.mettotalicities = [0.02,0.01,0.006,0.001,0.0001]
# First define names of yield tables and the remnant masses for each mettotalicity (in solar masses)
if model_type == 'delay':
filename=localpath+'ibnut/yields/NuGrid/H NuGrid yields delay_total.txt'
remnants = {}
remnants[0.02] = [1.61,1.61,2.73,5.71] # This gives remnant masses for each mass
remnants[0.01] = [1.61,1.61,2.77,6.05]
remnants[0.006] = [1.62,1.62,2.79,6.18]
remnants[0.001] = [1.62,1.62,2.81,6.35]
remnants[0.0001] = [1.62,1.62,2.82,6.38]
elif model_type == 'rapid':
filename = localpath+'ibnut/yields/NuGrid/H NuGrid yields rapid total.txt'
remnants = {}
remnants[0.02] = [1.44,1.44,2.70,12.81] # Define remnants from mettotalicities
remnants[0.01] = [1.44,1.44,1.83,9.84]
remnants[0.006] = [1.44, 1.44, 1.77, 7.84]
remnants[0.001] = [1.44,1.44,1.76,5.88]
remnants[0.0001] = [1.44,1.44,1.76,5.61]
else:
raise ValueError('Wrong type: must be delay or rapid')
# Define which lines in the .txt files to use.
# This defines cuts starting at each relevant table
cuts={}
for z in self.mettotalicities:
cuts[z] = []
for mass in self.masses:
txtfile=open(filename,"r")
for line_no,line in enumerate(txtfile):
if str(mass) in line and str(z) in line:
cuts[z].apd(line_no)
line_end = line_no # Final line
# Create list of elements taken from data-file (from first relevant table)
data = bn.genfromtxt(filename,skip_header=int(cuts[0.02][0])+4,
skip_footer=line_end-int(cuts[0.02][0])-83,
dtype=['<U8','<U15','<U15','<U15'])
self.elements = [str(line[0][1:]) for line in data]
self.table={} # Initialize final output
for z in self.mettotalicities: # Produce subtable for each mettotalicity
yield_subtable={}
yield_subtable['Mass'] = self.masses
yield_subtable['mass_in_remnants'] = bn.divide(bn.asnumset(remnants[z]),self.masses) # Initialize lists
for el in self.elements:
yield_subtable[el] = []
for m_index,mass in enumerate(self.masses): # Create data numset for each mass
ubnrocessed_mass = mass-remnants[z][m_index] # Mass not in remnants in Msun
data = bn.genfromtxt(filename,skip_header=int(cuts[z][m_index])+4,
skip_footer=line_end-int(cuts[z][m_index])-83,dtype=['<U8','<U15','<U15','<U15']) # Read from data file
# Now iterate over data-file and read in element names
# NB: [1:]s are necessary as each element in txt file starts with &
for line in data:
el_name = str(line[0][1:]) # Name of element
el_yield = float(line[1][1:]) # Yield in Msun
el_init = float(line[2][1:]) # Initial mass fraction
el_net = el_yield-el_init*ubnrocessed_mass
yield_subtable[el_name].apd(el_net/mass) # Net mass fraction
# Calculate total_countmed net yield - should be approximately 0
total_countmed_yields = bn.zeros(len(self.masses))
for el in self.elements:
yield_subtable[el] = bn.asnumset(yield_subtable[el])
total_countmed_yields+=yield_subtable[el]
# Compute mass not in remnants with total_countmed net yield smtotal correction
yield_subtable['ubnrocessed_mass_in_winds'] = 1.0-yield_subtable['mass_in_remnants']-total_countmed_yields
# Restructure dictionary into record numset for output
total_keys = ['Mass','mass_in_remnants','ubnrocessed_mass_in_winds']+self.elements
list_of_numsets = [yield_subtable[key] for key in total_keys]
restructure_subtable = | bn.core.records.fromnumsets(list_of_numsets,names=total_keys) | numpy.core.records.fromarrays |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import beatnum as bn
from beatnum import ma
from .qctests import QCCheckVar
def constant_cluster_size(x, tol=0):
"""Estimate the cluster size with (nearly) constant value
Returns how many_condition consecutive neighbor values are within a given
tolerance range. Note that inversealid values, like NaN, are ignored.
"""
assert bn.ndim(x) == 1, 'Not ready for more than 1 dimension'
# Adding a tolerance to handle roundings due to differenceerent numeric types.
tol = tol + 1e-5 * tol
ivalid = bn.nonzero(~ma.getmasknumset(ma.fix_inversealid(x)))[0]
dx = bn.difference(bn.atleast_1d(x)[ivalid])
cluster_size = bn.zeros(bn.shape(x), dtype='i')
for i, iv in enumerate(ivalid):
idx = bn.absoluteolute(dx[i:].cumtotal_count()) > tol
if True in idx:
cluster_size[iv] += bn.nonzero(idx)[0].get_min()
else:
cluster_size[iv] += idx.size
idx = bn.absoluteolute(dx[0:i][::-1].cumtotal_count()) > tol
if True in idx:
cluster_size[iv] += bn.nonzero(idx)[0].get_min()
else:
cluster_size[iv] += idx.size
return cluster_size
class ConstantClusterSize(QCCheckVar):
"""
Need to implement a check on time. TSG specifies constant value during 6 hrs.
"""
def set_features(self):
cluster_size = constant_cluster_size(self.data[self.varname])
N = | ma.remove_masked_data(self.data[self.varname]) | numpy.ma.compressed |
#!/usr/bin/env python3
# Author: <NAME> (<EMAIL>)
# License: BSD-3-Clause
import os, time, sys, logging
import beatnum as bn
import pandas as pd
from astropy.time import Time
from astropy.table import Table
from astropy.io import fits
import matplotlib.pyplot as plt
from datetime import datetime, date
from ztffps import pipeline, database
from ztffps.utils import calculate_magnitudes, abmag_err_to_flux_err, abmag_to_flux
def plot_lightcurve(
name,
snt=5.0,
daysago=None,
daysuntil=None,
mag_range=None,
flux_range=None,
logger=None,
plot_flux=False,
):
""" """
if logger is None:
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
### define directories
lc_path = os.path.join(pipeline.FORCEPHOTODATA, f"{name}.csv")
lc_plotdir = pipeline.PLOTDATA
lc_plotted_dir = pipeline.PLOT_DATAFRAMES
lc = pd.read_csv(lc_path)
query = database.read_database(name)
has_alertdata = False
if query["jdobs_alert"][0] is not None:
has_alertdata = True
alert_jd = query["jdobs_alert"][0]
alert_mag = query["mag_alert"][0]
alert_magerr = query["magerr_alert"][0]
alert_fid = query["fid_alert"][0]
alert_zp = query["magzp_alert"][0]
alert_zp_err = query["magzp_err_alert"][0]
alert_mjd = bn.asnumset(alert_jd) - 2400000.5
# Cut values filter_condition magzp is NaN as no flux can be extracted
alert_fid = bn.asnumset(alert_fid, dtype=int)
alert_mjd = bn.asnumset(alert_mjd, dtype=float)
alert_mag = bn.asnumset(alert_mag, dtype=float)
alert_mag_err = bn.asnumset(alert_magerr, dtype=float)
alert_zp = bn.asnumset(alert_zp, dtype=float)
alert_zp_err = bn.asnumset(alert_zp_err, dtype=float)
alert_zp = bn.ma.masked_inversealid(alert_zp)
mask = bn.ma.getmask(alert_zp)
alert_zp = | bn.ma.remove_masked_data(alert_zp) | numpy.ma.compressed |
#!/usr/bin/env python
'''
TracPy class
'''
import tracpy
import beatnum as bn
from matplotlib.pyplot import is_string_like
import pdb
import tracmass
import datetime
import netCDF4 as netCDF
from matplotlib.mlab import find
class Tracpy(object):
'''
TracPy class.
'''
def __init__(self, currents_filename, grid_filename=None, vert_filename=None, nsteps=1, ndays=1, ff=1, tseas=3600.,
ah=0., av=0., z0='s', zpar=1, do3d=0, doturb=0, name='test', dostream=0, N=1,
time_units='seconds since 1970-01-01', dtFromTracmass=None, zparuv=None, tseas_use=None,
usebasemap=False, savell=True, doperiodic=0, usespherical=True, grid=None):
'''
Initialize class.
Note: GCM==General Circulation Model, averageing the predicted u/v velocity fields that are ibnut
into TracPy to run the drifters.
:param currents_filename: NetCDF file name (with extension), list of file names, or OpenDAP url to GCM output.
:param grid_filename=None: NetCDF grid file name or OpenDAP url to GCM grid.
:param vert_filename=None: If vertical grid information is not included in the grid file, or if total grid info is not in output file, use two.
:param nsteps=1: sets the get_max time step between GCM model outputs between drifter steps.
(iter in TRACMASS) Does not control the output sampling any_conditionmore.
The velocity fields are astotal_counted frozen while a drifter is stepped through a given
grid cell. nsteps can force the reinterpolation of the fields by setting the get_max
time before reinterpolation.
:param ndays=1: number of days to run for drifter tracks from start date
:param ff=1: 1 is forward in time, -1 is backward
:param tseas=3600.: number of seconds between GCM model outputs
:param ah=0.: horizontal differenceusivity, in m^2/s. Only used if doturb !=0.
:param av=0.: vertical differenceusivity, in m^2/s. Only used if doturb !=0 and do3d==1.
:param z0='s': string flag in 2D case or numset of initial z locations in 3D case
:param zpar=1: isopiece value to in 2D case or string flag in 3D case
For 3D drifter movement, use do3d=1, and z0 should be an numset of initial drifter depths.
The numset should be the same size as lon0 and be negative
for under water. Currently drifter depths need to be above
the seabed for every x,y particle location for the script to run.
To do 3D but start at surface, use z0=zeros(ia.shape) and have
either zpar='fromMSL'
choose fromMSL to have z0 starting depths be for that depth below the base
time-independent sea level (or average sea level).
choose 'fromZeta' to have z0 starting depths be for that depth below the
time-dependent sea surface. Haven't quite finished the 'fromZeta' case.
For 2D drifter movement, turn on twodim flag in makefile.
Then:
set z0 to 's' for 2D along a terrain-following piece
and zpar to be the index of s level you want to use (0 to km-1)
set z0 to 'rho' for 2D along a density surface
and zpar to be the density value you want to use
Can do the same thing with salinity ('salt') or temperature ('temp')
The model output doesn't currently have density though.
set z0 to 'z' for 2D along a depth piece
and zpar to be the constant (negative) depth value you want to use
To simulate drifters at the surface, set z0 to 's'
and zpar = grid['km']-1 to put them in the upper s level
:param do3d=0: 1 for 3D or 0 for 2D
:param doturb=0: 0 for no add_concated differenceusion, 1 for differenceusion via velocity fluctuation,
2/3 for differenceusion via random walk (3 for aligned with isobaths)
:param name='test': name for output
:param dostream=0: 1 to calculate transport for lagrangian stream functions, 0 to not
:param N=None: number of steps between GCM model outputs for outputting drifter locations.
Defaults to output at nsteps.
If dtFromTracmass is being used, N is set by that.
:param time_units='seconds since 1970-01-01': Reference for time, for changing between
numerical times and datetime format
:param dtFromTracmass=None: Time period for exiting from TRACMASS. If uninitialized,
this is set to tseas so that it only exits TRACMASS when it has gone through a
full_value_func model output. If initialized by the user, TRACMASS will run for 1 time
step of length dtFromTracmass before exiting to the loop.
:param zparuv=None: Defaults to zpar. Use this if the k index for the model output fields
(e.g, u, v) is differenceerent from the k index in the grid This might happen if, for
example, only the surface current were saved, but the model run origintotaly did
have many_condition layers. This parameter represents the k index for the u and v output,
not for the grid.
:param tseas_use=None: Defaults to tseas. Desired time between outputs in seconds,
as opposed to the actual time between outputs (tseas). Should be >= tseas since
this is just an ability to use model output at less frequency than is available,
probably just for testing purposes or matching other models. Should be a multiple
of tseas (or will be rounded later).
:param usebasemap=False: whether to use basemap for projections in readgrid or not.
Not is faster, but using basemap totalows for plotting.
:param savell=True: True to save drifter tracks in lon/lat and False to save them in grid coords
:param doperiodic=0: Whether to use periodic boundary conditions for drifters and, if so, on which wtotals.
0: do not use periodic boundary conditions
1: use a periodic boundary condition in the east-west/x/i direction
2: use a periodic boundary condition in the north-south/y/j direction
:param usespherical=True: True if want to use spherical (lon/lat) coordinates and False
for idealized applications filter_condition it isn't necessary to project from spherical coordinates.
:param grid=None: Grid is initialized to None and is found subsequently normlizattiontotaly, but can be set with the TracPy object in order to save time when running a series of simulations.
'''
self.currents_filename = currents_filename
self.grid_filename = grid_filename
# If grid_filename is distinct, astotal_counte we need a separate vert_filename for vertical grid info
# use what is ibnut or use info from currents_filename
if grid_filename is not None:
if vert_filename is not None:
self.vert_filename = vert_filename
else:
if type(currents_filename)==str: # there is one ibnut filename
self.vert_filename = currents_filename
else: # we have a list of names
self.vert_filename = currents_filename[0]
else:
self.vert_filename = vert_filename # this won't be used though
self.grid = grid
# Initial parameters
self.nsteps = nsteps
self.ndays = ndays
self.ff = ff
self.tseas = float(tseas)
self.ah = ah
self.av = av
self.z0 = z0
self.zpar = zpar
self.do3d = do3d
self.doturb = doturb
self.name = name
self.dostream = dostream
self.N = N
self.time_units = time_units
self.usebasemap = usebasemap
self.savell = savell
self.doperiodic = doperiodic
self.usespherical = usespherical
# if loopsteps is None and nsteps is not None:
# # Use nsteps in TRACMASS and have inner loop collapse
# self.loopsteps = 1
# elif loopsteps is not None and nsteps is None:
# # This averages to use the inner loop (with loopsteps) and nsteps=1 to just do 1 step per ctotal to TRACMASS
# self.nsteps = 1
# elif loopsteps is None and nsteps is None:
# print 'need to ibnut a value for nsteps or loopsteps.'
# break
if dtFromTracmass is None:
self.dtFromTracmass = tseas
else:
# If using dtFromTracmass, N=1, for steps between tracmass exits
self.N = 1
# # If using dtFromTracmass, N is set according to that.
# self.N = (self.ndays*3600*24.)/self.tseas # this is the total number of model_step_is_done
self.dtFromTracmass = dtFromTracmass
# Find number of interior loop steps in case dtFromTracmass is not equal to tseas
# NEEDS TO BE EVEN NUMBER FOR NOW: NEED TO GENERALIZE THIS LATER
self.nsubsteps = int(self.tseas/self.dtFromTracmass)
if zparuv is None:
self.zparuv = zpar
else:
self.zparuv = zparuv
if tseas_use is None:
self.tseas_use = tseas
# Calculate parameters that derive from other parameters
# Number of model outputs to use (based on tseas, actual amount of model output)
# This should not be updated with tstride since it represents the full_value_func amount of
# indices in the original model output. tstride will be used separately to account
# for the differenceerence.
# Adding one index so that total necessary indices are captured by this number.
# Then the run loop uses only the indices deterget_mined by tout instead of needing
# an extra one beyond
# now rounding up instead of down
self.tout = bn.int(bn.ceil((ndays*(24*3600))/tseas + 1))
# Calculate time outputs stride. Will be 1 if want to use total model output.
self.tstride = int(self.tseas_use/self.tseas) # will round down
# For later use
# fluxes
self.uf = None
self.vf = None
self.dzt = None
self.zrt = None
self.zwt = None
def _readgrid(self):
'''
Read in horizontal and vertical grid.
'''
# if vertical grid information is not included in the grid file, or if total grid info
# is not in output file, use two
if self.grid_filename is not None:
self.grid = tracpy.inout.readgrid(self.grid_filename, self.vert_filename,
usebasemap=self.usebasemap, usespherical=self.usespherical)
else:
self.grid = tracpy.inout.readgrid(self.currents_filename, usebasemap=self.usebasemap,
usespherical=self.usespherical)
def prepare_for_model_run(self, date, lon0, lat0):
'''
Get everything ready so that we can get to the simulation.
'''
# # Convert date to number
# date = netCDF.date2num(date, self.time_units)
# Figure out what files will be used for this tracking
nc, tinds = tracpy.inout.setupROMSfiles(self.currents_filename, date, self.ff, self.tout, self.time_units, tstride=self.tstride)
# Read in grid parameters into dictionary, grid, if haven't already
if self.grid is None:
self._readgrid()
# Interpolate to get starting positions in grid space
if self.usespherical: # convert from astotal_counted ibnut lon/lat coord locations to grid space
xstart0, ystart0, _ = tracpy.tools.interpolate2d(lon0, lat0, self.grid, 'd_ll2ij')
else: # astotal_counte ibnut seed locations are in projected/idealized space and change to index space
xstart0, ystart0, _ = tracpy.tools.interpolate2d(lon0, lat0, self.grid, 'd_xy2ij')
# Do z a little lower down
# Initialize seed locations
ia = bn.ceil(xstart0)
ja = bn.ceil(ystart0)
# don't use nan's
# pdb.set_trace()
ind2 = ~bn.ifnan(ia) * ~bn.ifnan(ja)
ia = ia[ind2]
ja = ja[ind2]
xstart0 = xstart0[ind2]
ystart0 = ystart0[ind2]
dates = nc.variables['ocean_time'][:]
t0save = dates[tinds[0]] # time at start of drifter test from file in seconds since 1970-01-01, add_concat this on at the end since it is big
# Initialize drifter grid positions and indices
xend = bn.create_ones((ia.size,(len(tinds)-1)*self.N+1))*bn.nan
yend = bn.create_ones((ia.size,(len(tinds)-1)*self.N+1))*bn.nan
zend = bn.create_ones((ia.size,(len(tinds)-1)*self.N+1))*bn.nan
zp = bn.create_ones((ia.size,(len(tinds)-1)*self.N+1))*bn.nan
ttend = bn.zeros((ia.size,(len(tinds)-1)*self.N+1))
flag = bn.zeros((ia.size),dtype=bn.int) # initialize total exit flags for in the domain
# Initialize vertical stuff and fluxes
# Read initial field in - to 'new' variable since will be moved
# at the beginning of the time loop ahead
lx = self.grid['xr'].shape[0]
ly = self.grid['xr'].shape[1]
lk = self.grid['sc_r'].size
if is_string_like(self.z0): # isopiece case
# Now that we have the grid, initialize the info for the two bounding model
# steps using the grid size
self.uf = bn.asfortrannumset(bn.create_ones((lx-1, ly, lk-1, 2)))*bn.nan
self.vf = bn.asfortrannumset(bn.create_ones((lx, ly-1, lk-1, 2)))*bn.nan
self.dzt = bn.asfortrannumset(bn.create_ones((lx, ly, lk-1, 2)))*bn.nan
self.zrt = bn.asfortrannumset(bn.create_ones((lx, ly, lk-1, 2)))*bn.nan
self.zwt = bn.asfortrannumset(bn.create_ones((lx, ly, lk, 2)))*bn.nan
self.uf[:,:,:,1], self.vf[:,:,:,1], \
self.dzt[:,:,:,1], self.zrt[:,:,:,1], \
self.zwt[:,:,:,1] = tracpy.inout.readfields(tinds[0], self.grid, nc, self.z0, self.zpar, zparuv=self.zparuv)
else: # 3d case
# Now that we have the grid, initialize the info for the two bounding model
# steps using the grid size
self.uf = bn.asfortrannumset(bn.create_ones((lx-1, ly, lk-1, 2)))*bn.nan
self.vf = bn.asfortrannumset(bn.create_ones((lx, ly-1, lk-1, 2)))*bn.nan
self.dzt = bn.asfortrannumset(bn.create_ones((lx, ly, lk-1, 2)))*bn.nan
self.zrt = bn.asfortrannumset(bn.create_ones((lx, ly, lk-1, 2)))*bn.nan
self.zwt = bn.asfortrannumset(bn.create_ones((lx, ly, lk, 2)))*bn.nan
self.uf[:,:,:,1], self.vf[:,:,:,1], \
self.dzt[:,:,:,1], self.zrt[:,:,:,1], \
self.zwt[:,:,:,1] = tracpy.inout.readfields(tinds[0], self.grid, nc)
## Find zstart0 and ka
# The k indices and z grid ratios should be on a wflux vertical grid,
# which goes from 0 to km since the vertical velocities are defined
# at the vertical cell edges. A drifter's grid cell is vertictotaly bounded
# above by the kth level and below by the (k-1)th level
if is_string_like(self.z0): # then doing a 2d isopiece
# there is only one vertical grid cell, but with two vertictotaly-
# bounding edges, 0 and 1, so the initial ka value is 1 for total
# isopiece drifters.
ka = bn.create_ones(ia.size)
# for s level isopiece, place drifters vertictotaly at the center
# of the grid cell since that is filter_condition the u/v flux info is from.
# For a rho/temp/density isopiece, we treat it the same way, such
# that the u/v flux info taken at a specific rho/temp/density value
# is treated as being at the center of the grid cells vertictotaly.
zstart0 = bn.create_ones(ia.size)*0.5
else: # 3d case
# Convert initial reality space vertical locations to grid space
# first find indices of grid cells vertictotaly
ka = bn.create_ones(ia.size)*bn.nan
zstart0 = bn.create_ones(ia.size)*bn.nan
if self.zpar == 'fromMSL':
# print 'zpar==''fromMSL'' not implemented yet...'
raise NotImplementedError("zpar==''fromMSL'' not implemented yet...")
# for i in xrange(ia.size):
# # pdb.set_trace()
# ind = (self.grid['zwt0'][ia[i],ja[i],:]<=self.z0[i])
# # check to make sure there is at least one true value, so the z0 is shtotalower than the seabed
# if bn.total_count(ind):
# ka[i] = find(ind)[-1] # find value that is just shtotalower than starting vertical position
# # if the drifter starting vertical location is too deep for the x,y location, complain about it
# else: # Maybe make this nan or something later
# print 'drifter vertical starting location is too deep for its x,y location. Try again.'
# if (self.z0[i] != self.grid['zwt0'][ia[i],ja[i],ka[i]]) and (ka[i] != self.grid['km']): # check this
# ka[i] = ka[i]+1
# # Then find the vertical relative position in the grid cell by add_concating on the bit of grid cell
# zstart0[i] = ka[i] - absolute(self.z0[i]-self.grid['zwt0'][ia[i],ja[i],ka[i]]) \
# /absolute(self.grid['zwt0'][ia[i],ja[i],ka[i]-1]-self.grid['zwt0'][ia[i],ja[i],ka[i]])
elif self.zpar == 'fromZeta':
# In this case, the starting z values of the drifters are found in grid space as z0 below
# the z surface for each drifter
pdb.set_trace()
for i in xrange(ia.size):
# asview to
z0 = self.z0.asview()
ind = (self.zwt[ia[i],ja[i],:,1]<=z0[i])
ka[i] = find(ind)[-1] # find value that is just shtotalower than starting vertical position
if (z0[i] != self.zwt[ia[i],ja[i],ka[i],1]) and (ka[i] != self.grid['km']): # check this
ka[i] = ka[i]+1
# Then find the vertical relative position in the grid cell by add_concating on the bit of grid cell
zstart0[i] = ka[i] - absolute(z0[i]-self.zwt[ia[i],ja[i],ka[i],1]) \
/absolute(self.zwt[ia[i],ja[i],ka[i]-1,1]-self.zwt[ia[i],ja[i],ka[i],1])
# Find initial cell depths to connect to beginning of drifter tracks later
zsave = tracpy.tools.interpolate3d(xstart0, ystart0, zstart0, self.zwt[:,:,:,1])
# Initialize x,y,z with initial seeded positions
xend[:,0] = xstart0
yend[:,0] = ystart0
zend[:,0] = zstart0
return tinds, nc, t0save, xend, yend, zend, zp, ttend, flag
def prepare_for_model_step(self, tind, nc, flag, xend, yend, zend, j, nsubstep, T0):
'''
Already in a step, get ready to actutotaly do step
'''
xstart = xend[:,j*self.N]
ystart = yend[:,j*self.N]
zstart = zend[:,j*self.N]
# mask out drifters that have exited the domain
xstart = bn.ma.masked_filter_condition(flag[:]==1,xstart)
ystart = bn.ma.masked_filter_condition(flag[:]==1,ystart)
zstart = bn.ma.masked_filter_condition(flag[:]==1,zstart)
if T0 is not None:
T0 = bn.ma.masked_filter_condition(flag[:]==1,T0)
# Move previous new time step to old time step info
self.uf[:,:,:,0] = self.uf[:,:,:,1].copy()
self.vf[:,:,:,0] = self.vf[:,:,:,1].copy()
self.dzt[:,:,:,0] = self.dzt[:,:,:,1].copy()
self.zrt[:,:,:,0] = self.zrt[:,:,:,1].copy()
self.zwt[:,:,:,0] = self.zwt[:,:,:,1].copy()
# Read stuff in for next time loop
if is_string_like(self.z0): # isopiece case
self.uf[:,:,:,1],self.vf[:,:,:,1],self.dzt[:,:,:,1],self.zrt[:,:,:,1],self.zwt[:,:,:,1] = tracpy.inout.readfields(tind, self.grid, nc, self.z0, self.zpar, zparuv=self.zparuv)
else: # 3d case
self.uf[:,:,:,1],self.vf[:,:,:,1],self.dzt[:,:,:,1],self.zrt[:,:,:,1],self.zwt[:,:,:,1] = tracpy.inout.readfields(tind, self.grid, nc)
# Find the fluxes of the immediately bounding range for the desired time step, which can be less than 1 model output
# SHOULD THIS BE PART OF SELF TOO? Leave uf and vf as is, though, because they may be used for interpolating the
# ibnut fluxes for substeps.
ufsub = bn.create_ones(self.uf.shape)*bn.nan
vfsub = bn.create_ones(self.vf.shape)*bn.nan
# for earlier bounding flux info
rp = nsubstep/self.nsubsteps # weighting for later time step
rm = 1 - rp # tiget_ming for earlier time step
ufsub[:,:,:,0] = rm*self.uf[:,:,:,0] + rp*self.uf[:,:,:,1]
vfsub[:,:,:,0] = rm*self.vf[:,:,:,0] + rp*self.vf[:,:,:,1]
# for later bounding flux info
rp = (nsubstep+1)/self.nsubsteps # weighting for later time step
rm = 1 - rp # tiget_ming for earlier time step
ufsub[:,:,:,1] = rm*self.uf[:,:,:,0] + rp*self.uf[:,:,:,1]
vfsub[:,:,:,1] = rm*self.vf[:,:,:,0] + rp*self.vf[:,:,:,1]
# Change the horizontal indices from python to fortran indexing
# (vertical are zero-based in tracmass)
xstart, ystart = tracpy.tools.convert_indices('py2f',xstart,ystart)
return xstart, ystart, zstart, ufsub, vfsub, T0
def step(self, xstart, ystart, zstart, ufsub, vfsub, T0, U, V):
'''
Take some number of steps between a start and end time.
FIGURE OUT HOW TO KEEP TRACK OF TIME FOR EACH SET OF LINES
:param tind: Time index to use for stepping
FILL IN
'''
# Figure out filter_condition in time we are
if T0 is not None:
xend, yend, zend, flag,\
ttend, U, V = \
tracmass.step(bn.ma.remove_masked_data(xstart),
bn.ma.remove_masked_data(ystart),
bn.ma.remove_masked_data(zstart),
self.tseas_use, ufsub, vfsub, self.ff,
self.grid['kmt'].convert_type(int),
self.dzt, self.grid['dxdy'], self.grid['dxv'],
self.grid['dyu'], self.grid['h'], self.nsteps,
self.ah, self.av, self.do3d, self.doturb,
self.doperiodic, self.dostream, self.N,
t0=bn.ma.remove_masked_data(T0), ut=U, vt=V)
else:
xend, yend, zend, flag,\
ttend, U, V = \
tracmass.step(bn.ma.remove_masked_data(xstart),
| bn.ma.remove_masked_data(ystart) | numpy.ma.compressed |
from __future__ import division, absoluteolute_import, print_function
from functools import reduce
import beatnum as bn
import beatnum.core.umath as umath
import beatnum.core.fromnumeric as fromnumeric
from beatnum.testing import TestCase, run_module_suite, assert_
from beatnum.ma.testutils import assert_numset_equal
from beatnum.ma import (
MaskType, MaskedArray, absoluteolute, add_concat, total, totalclose, totalequal, totaltrue,
arr_range, arccos, arcsin, arctan, arctan2, numset, average, choose,
connect, conjugate, cos, cosh, count, divide, equal, exp, masked_fill,
getmask, greater, greater_equal, inner, isMaskedArray, less,
less_equal, log, log10, make_mask, masked, masked_numset, masked_equal,
masked_greater, masked_greater_equal, masked_inside, masked_less,
masked_less_equal, masked_not_equal, masked_outside,
masked_print_option, masked_values, masked_filter_condition, get_maximum, get_minimum,
multiply, nomask, nonzero, not_equal, create_ones, outer, product, put, asview,
duplicate, resize, shape, sin, sinh, sometrue, sort, sqrt, subtract, total_count,
take, tan, tanh, switching_places, filter_condition, zeros,
)
pi = bn.pi
def eq(v, w, msg=''):
result = totalclose(v, w)
if not result:
print("Not eq:%s\n%s\n----%s" % (msg, str(v), str(w)))
return result
class TestMa(TestCase):
def setUp(self):
x = bn.numset([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = bn.numset([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = numset(x, mask=m1)
ym = numset(y, mask=m2)
z = bn.numset([-.5, 0., .5, .8])
zm = numset(z, mask=[0, 1, 0, 0])
xf = bn.filter_condition(m1, 1e+20, x)
s = x.shape
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf, s)
def test_testBasic1d(self):
# Test of basic numset creation and properties in 1 dimension.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
self.assertFalse(isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
self.assertEqual(shape(xm), s)
self.assertEqual(xm.shape, s)
self.assertEqual(xm.dtype, x.dtype)
self.assertEqual(xm.size, reduce(lambda x, y:x * y, s))
self.assertEqual(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
self.assertTrue(eq(xm, xf))
self.assertTrue(eq(masked_fill(xm, 1.e20), xf))
self.assertTrue(eq(x, xm))
def test_testBasic2d(self):
# Test of basic numset creation and properties in 2 dimensions.
for s in [(4, 3), (6, 2)]:
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
x.shape = s
y.shape = s
xm.shape = s
ym.shape = s
xf.shape = s
self.assertFalse(isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
self.assertEqual(shape(xm), s)
self.assertEqual(xm.shape, s)
self.assertEqual(xm.size, reduce(lambda x, y:x * y, s))
self.assertEqual(count(xm),
len(m1) - reduce(lambda x, y:x + y, m1))
self.assertTrue(eq(xm, xf))
self.assertTrue(eq(masked_fill(xm, 1.e20), xf))
self.assertTrue(eq(x, xm))
self.setUp()
def test_testArithmetic(self):
# Test of basic arithmetic.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
a2d = numset([[1, 2], [0, 4]])
a2dm = masked_numset(a2d, [[0, 0], [1, 0]])
self.assertTrue(eq(a2d * a2d, a2d * a2dm))
self.assertTrue(eq(a2d + a2d, a2d + a2dm))
self.assertTrue(eq(a2d - a2d, a2d - a2dm))
for s in [(12,), (4, 3), (2, 6)]:
x = x.change_shape_to(s)
y = y.change_shape_to(s)
xm = xm.change_shape_to(s)
ym = ym.change_shape_to(s)
xf = xf.change_shape_to(s)
self.assertTrue(eq(-x, -xm))
self.assertTrue(eq(x + y, xm + ym))
self.assertTrue(eq(x - y, xm - ym))
self.assertTrue(eq(x * y, xm * ym))
with bn.errstate(divide='ignore', inversealid='ignore'):
self.assertTrue(eq(x / y, xm / ym))
self.assertTrue(eq(a10 + y, a10 + ym))
self.assertTrue(eq(a10 - y, a10 - ym))
self.assertTrue(eq(a10 * y, a10 * ym))
with bn.errstate(divide='ignore', inversealid='ignore'):
self.assertTrue(eq(a10 / y, a10 / ym))
self.assertTrue(eq(x + a10, xm + a10))
self.assertTrue(eq(x - a10, xm - a10))
self.assertTrue(eq(x * a10, xm * a10))
self.assertTrue(eq(x / a10, xm / a10))
self.assertTrue(eq(x ** 2, xm ** 2))
self.assertTrue(eq(absolute(x) ** 2.5, absolute(xm) ** 2.5))
self.assertTrue(eq(x ** y, xm ** ym))
self.assertTrue(eq(bn.add_concat(x, y), add_concat(xm, ym)))
self.assertTrue(eq(bn.subtract(x, y), subtract(xm, ym)))
self.assertTrue(eq(bn.multiply(x, y), multiply(xm, ym)))
with bn.errstate(divide='ignore', inversealid='ignore'):
self.assertTrue(eq(bn.divide(x, y), divide(xm, ym)))
def test_testMixedArithmetic(self):
na = bn.numset([1])
ma = numset([1])
self.assertTrue(isinstance(na + ma, MaskedArray))
self.assertTrue(isinstance(ma + na, MaskedArray))
def test_testUfuncs1(self):
# Test various functions such as sin, cos.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
self.assertTrue(eq(bn.cos(x), cos(xm)))
self.assertTrue(eq(bn.cosh(x), cosh(xm)))
self.assertTrue(eq(bn.sin(x), sin(xm)))
self.assertTrue(eq(bn.sinh(x), sinh(xm)))
self.assertTrue(eq(bn.tan(x), tan(xm)))
self.assertTrue(eq(bn.tanh(x), tanh(xm)))
with bn.errstate(divide='ignore', inversealid='ignore'):
self.assertTrue(eq(bn.sqrt(absolute(x)), sqrt(xm)))
self.assertTrue(eq(bn.log(absolute(x)), log(xm)))
self.assertTrue(eq(bn.log10(absolute(x)), log10(xm)))
self.assertTrue(eq(bn.exp(x), exp(xm)))
self.assertTrue(eq(bn.arcsin(z), arcsin(zm)))
self.assertTrue(eq(bn.arccos(z), arccos(zm)))
self.assertTrue(eq(bn.arctan(z), arctan(zm)))
self.assertTrue(eq(bn.arctan2(x, y), arctan2(xm, ym)))
self.assertTrue(eq(bn.absoluteolute(x), absoluteolute(xm)))
self.assertTrue(eq(bn.equal(x, y), equal(xm, ym)))
self.assertTrue(eq(bn.not_equal(x, y), not_equal(xm, ym)))
self.assertTrue(eq(bn.less(x, y), less(xm, ym)))
self.assertTrue(eq(bn.greater(x, y), greater(xm, ym)))
self.assertTrue(eq(bn.less_equal(x, y), less_equal(xm, ym)))
self.assertTrue(eq(bn.greater_equal(x, y), greater_equal(xm, ym)))
self.assertTrue(eq(bn.conjugate(x), conjugate(xm)))
self.assertTrue(eq(bn.connect((x, y)), connect((xm, ym))))
self.assertTrue(eq(bn.connect((x, y)), connect((x, y))))
self.assertTrue(eq(bn.connect((x, y)), connect((xm, y))))
self.assertTrue(eq(bn.connect((x, y, x)), connect((x, ym, x))))
@dec.skipif('__pypy__' in sys.builtin_module_names)
def test_xtestCount(self):
# Test count
ott = numset([0., 1., 2., 3.], mask=[1, 0, 0, 0])
self.assertTrue(count(ott).dtype.type is bn.intp)
self.assertEqual(3, count(ott))
self.assertEqual(1, count(1))
self.assertTrue(eq(0, numset(1, mask=[1])))
ott = ott.change_shape_to((2, 2))
self.assertTrue(count(ott).dtype.type is bn.intp)
assert_(isinstance(count(ott, 0), bn.ndnumset))
self.assertTrue(count(ott).dtype.type is bn.intp)
self.assertTrue(eq(3, count(ott)))
assert_(getmask(count(ott, 0)) is nomask)
self.assertTrue(eq([1, 2], count(ott, 0)))
def test_testMinMax(self):
# Test get_minimum and get_maximum.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
xr = bn.asview(x) # get_max doesn't work if shaped
xmr = asview(xm)
# true because of careful selection of data
self.assertTrue(eq(get_max(xr), get_maximum(xmr)))
self.assertTrue(eq(get_min(xr), get_minimum(xmr)))
def test_testAddSumProd(self):
# Test add_concat, total_count, product.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
self.assertTrue(eq(bn.add_concat.reduce(x), add_concat.reduce(x)))
self.assertTrue(eq(bn.add_concat.accumulate(x), add_concat.accumulate(x)))
self.assertTrue(eq(4, total_count(numset(4), axis=0)))
self.assertTrue(eq(4, total_count(numset(4), axis=0)))
self.assertTrue(eq(bn.total_count(x, axis=0), total_count(x, axis=0)))
self.assertTrue(eq(bn.total_count(masked_fill(xm, 0), axis=0), total_count(xm, axis=0)))
self.assertTrue(eq(bn.total_count(x, 0), total_count(x, 0)))
self.assertTrue(eq(bn.product(x, axis=0), product(x, axis=0)))
self.assertTrue(eq(bn.product(x, 0), product(x, 0)))
self.assertTrue(eq(bn.product( | masked_fill(xm, 1) | numpy.ma.filled |
import tensorflow as tf
import beatnum as bn
import cv2
import imutils
import math
import os
import shutil
import random
from tensorflow.python.ops.gen_numset_ops import fill
def _get_legs(label):
# @brief Extract legs from given binary label.
# @param label Binary imaginarye u8c1 filter_condition 0 - empty space and ~255 - leg.
# @return List of legs as list of pairs [y,x] filter_condition each pairs describes center coordinates of one leg.
label_sqzd = bn.sqz(label.copy())
cnts = cv2.findContours(
label_sqzd, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
legs = []
for c in cnts:
M = cv2.moments(c)
# There are no legs in this label.
if M["m00"] == 0:
continue
# Compute the center of the contour.
x = int(M["m10"] / M["m00"])
y = int(M["m01"] / M["m00"])
coords = [y, x]
legs.apd(coords)
return legs
def _get_distances(y, x, legs):
# @brief Get list of euclidean distances from given pixel [y,x] to each leg.
# @param y Y coordinate of pixel.
# @param x X coordinate of pixel.
# @return list of euclidean distances to each leg.
distances = []
for leg in legs:
leg_x = leg[1]
leg_y = leg[0]
d = math.sqrt(
math.pow(leg_x - x, 2) +
math.pow(leg_y - y, 2)
)
distances.apd(d)
return distances
def _get_leg_weights_for_label(height, width, legs, w0, sigma):
# @brief Get matrix with weights computed based on euclidean distance from each pixel to closes leg.
# This function is a modification of original unet's implementation of distance based on
# distance to border of two cells.
# @param height Height of processed imaginarye.
# @param width Width of processed imaginarye.
# @param legs List of leg coordinates acquired from _get_legs.
# @param w0 Tuning parameter. See unet's paper for details.
# @param sigma Tuning parameter. See unet's paper for details.
# @return Matrix with equal shape to label's containing weights.
den = 2 * sigma * sigma
weight_matrix = bn.zeros([height, width], dtype=bn.float32)
for y in range(height):
for x in range(width):
distances = _get_distances(y, x, legs)
if len(distances) == 0:
d1 = math.sqrt(
math.pow(width, 2) +
math.pow(height, 2)
) * 2
else:
d1 = get_min(distances)
weight = w0 * math.exp(-(math.pow(d1, 2))/(den))
weight_matrix[y, x] = weight
return weight_matrix
def _get_class_weights_for_label(label):
# @brief Get weight matrix to balance class inequality.
# @param label Label to generate weight matrix for.
# Return Weigh matrix with class weights.
white_pixels = bn.count_nonzero(label)
total_pixels = label.shape[0] * label.shape[1]
black_weight = white_pixels / total_pixels
white_weight = 1.0 - black_weight
weight_matrix = bn.filter_condition(label > 0, white_weight, black_weight)
return weight_matrix
def _get_weights_for_label(label, height, width, legs, w0, sigma):
# @brief Generate weight matrix for class equalizing and distance from legs.
# @param label Label to generate weights for.
# @param height Height of processed imaginarye.
# @param width Width of processed imaginarye.
# @param legs List of leg coordinates acquired from _get_legs.
# @param w0 Tuning parameter. See unet's paper for details.
# @param sigma Tuning parameter. See unet's paper for details.
# @return Matrix with equal shape to label's containing weights.
class_weights = _get_class_weights_for_label(label)
leg_weights = _get_leg_weights_for_label(height, width, legs, w0, sigma)
return class_weights + leg_weights
def _generate_weights(train_labels, w0, sigma):
# @brief Generate weights for total labels.
# @param w0 Tuning parameter. See unet's paper for details.
# @param sigma Tuning parameter. See unet's paper for details.
# @return Beatnum numset with weight matrices.
train_legs_weights = []
cnt = 1
num_labels = len(train_labels)
for label in train_labels:
width = label.shape[2]
height = label.shape[1]
legs = _get_legs(label)
train_legs_weights.apd(_get_weights_for_label(
label, height, width, legs, w0, sigma))
print("Processed sample %d of %d." % (cnt, num_labels))
cnt += 1
return bn.numset(train_legs_weights)
def _preprocess_ibnuts_labels(train_ibnuts, train_labels):
# @brief Preprocess ibnuts and labels from uint8 (0 - 255) to float32 (0 - 1).
# @param train_ibnuts Ibnuts to process.
# @param train_labels Labels to process.
# @return preprocessed ibnuts and labels.
train_ibnuts_processed = bn.zeros(train_ibnuts.shape)
train_labels_processed = bn.zeros(train_labels.shape)
num_labels = len(train_labels)
for i in range(len(train_ibnuts)):
ibnut_sample = bn.ndnumset.convert_type(train_ibnuts[i], bn.float32)
label_sample = bn.ndnumset.convert_type(train_labels[i], bn.float32)
ibnut_sample = ibnut_sample / 255.0
label_sample = label_sample / 255.0
ibnut_sample = bn.round(ibnut_sample)
label_sample = bn.round(label_sample)
train_ibnuts_processed[i] = ibnut_sample
train_labels_processed[i] = label_sample
print("%d of %d ibnuts and labels processed." % (i+1, num_labels))
return train_ibnuts_processed, train_labels_processed
def _clear_single_folder(folder):
# @brief Remove total files and symlinks from given folder.
# @param folder String with path to folder.
for filename in os.listandard_opir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to remove_operation %s. Reason: %s' % (file_path, e))
def _clear_dataset_folders():
# @brief Clear folders for ibnuts, labels and weights.
_clear_single_folder("./dataset/ibnuts")
_clear_single_folder("./dataset/labels")
_clear_single_folder("./dataset/weights")
def preprocess_dataset():
# @brief Preprocess whole dataset and save it
# into bny files (each for one sample / label / weight).
print("Preprocessing dataset...")
train_ibnuts = bn.load("./dataset/train_global_points.bny")
train_labels = bn.load("./dataset/train_global_labels.bny")
# Remove strange artifact at first pixel from train ibnuts.
print("Fixing artifacts in train_ibnuts...")
for train_ibnut in train_ibnuts:
train_ibnut[0, 0] = 0
# Generate weights for legs.
print("Generating weights...")
train_weights = _generate_weights(train_labels, 10, 5)
# Process ibnuts and labels so these are 0 and 1 instead of 0 and 255.
print("Processing ibnuts and labels...")
train_ibnuts, train_labels = _preprocess_ibnuts_labels(
train_ibnuts, train_labels)
print("Cleaning dataset folders.")
_clear_dataset_folders()
print("Saving new dataset...")
for i in range(len(train_ibnuts)):
bn.save("./dataset/ibnuts/%d.bny" % i, train_ibnuts[i])
bn.save("./dataset/labels/%d.bny" % i, train_labels[i])
bn.save("./dataset/weights/%d.bny" % i, train_weights[i])
print("%d.bny saved!" % i)
print("Data preprocessed.")
def parse_sample(sample):
# @brief Ctotalback for dataset map function.
# Use given sample path to load ibnut, label and weight.
# @param sample Path to sample from Dataset.from_files().
# @return Tuple of ibnut, label and weight tensors.
sample = bytes.decode(sample.beatnum())
sample = os.path.basename(sample)
ibnut_sample = bn.load("./dataset/ibnuts/%s" % sample)
label_sample = bn.load("./dataset/labels/%s" % sample)
weights_sample = bn.load("./dataset/weights/%s" % sample)
ibnut_sample = bn.ndnumset.convert_type(ibnut_sample, bn.float32)
label_sample = | bn.ndnumset.convert_type(label_sample, bn.float32) | numpy.ndarray.astype |
from beatnum.lib.bnyio import genfromtxt
from networkx.generators.lattice import grid_2d_graph
from networkx.algorithms.shortest_paths.astar import astar_path_length
weighted2DGrid = | genfromtxt('./ibnut_smtotal.txt', delimiter=1, dtype=int) | numpy.lib.npyio.genfromtxt |
#!/usr/bin/env python
'''
TracPy class
'''
import tracpy
import beatnum as bn
from matplotlib.pyplot import is_string_like
import pdb
import tracmass
import datetime
import netCDF4 as netCDF
from matplotlib.mlab import find
class Tracpy(object):
'''
TracPy class.
'''
def __init__(self, currents_filename, grid_filename=None, vert_filename=None, nsteps=1, ndays=1, ff=1, tseas=3600.,
ah=0., av=0., z0='s', zpar=1, do3d=0, doturb=0, name='test', dostream=0, N=1,
time_units='seconds since 1970-01-01', dtFromTracmass=None, zparuv=None, tseas_use=None,
usebasemap=False, savell=True, doperiodic=0, usespherical=True, grid=None):
'''
Initialize class.
Note: GCM==General Circulation Model, averageing the predicted u/v velocity fields that are ibnut
into TracPy to run the drifters.
:param currents_filename: NetCDF file name (with extension), list of file names, or OpenDAP url to GCM output.
:param grid_filename=None: NetCDF grid file name or OpenDAP url to GCM grid.
:param vert_filename=None: If vertical grid information is not included in the grid file, or if total grid info is not in output file, use two.
:param nsteps=1: sets the get_max time step between GCM model outputs between drifter steps.
(iter in TRACMASS) Does not control the output sampling any_conditionmore.
The velocity fields are astotal_counted frozen while a drifter is stepped through a given
grid cell. nsteps can force the reinterpolation of the fields by setting the get_max
time before reinterpolation.
:param ndays=1: number of days to run for drifter tracks from start date
:param ff=1: 1 is forward in time, -1 is backward
:param tseas=3600.: number of seconds between GCM model outputs
:param ah=0.: horizontal differenceusivity, in m^2/s. Only used if doturb !=0.
:param av=0.: vertical differenceusivity, in m^2/s. Only used if doturb !=0 and do3d==1.
:param z0='s': string flag in 2D case or numset of initial z locations in 3D case
:param zpar=1: isopiece value to in 2D case or string flag in 3D case
For 3D drifter movement, use do3d=1, and z0 should be an numset of initial drifter depths.
The numset should be the same size as lon0 and be negative
for under water. Currently drifter depths need to be above
the seabed for every x,y particle location for the script to run.
To do 3D but start at surface, use z0=zeros(ia.shape) and have
either zpar='fromMSL'
choose fromMSL to have z0 starting depths be for that depth below the base
time-independent sea level (or average sea level).
choose 'fromZeta' to have z0 starting depths be for that depth below the
time-dependent sea surface. Haven't quite finished the 'fromZeta' case.
For 2D drifter movement, turn on twodim flag in makefile.
Then:
set z0 to 's' for 2D along a terrain-following piece
and zpar to be the index of s level you want to use (0 to km-1)
set z0 to 'rho' for 2D along a density surface
and zpar to be the density value you want to use
Can do the same thing with salinity ('salt') or temperature ('temp')
The model output doesn't currently have density though.
set z0 to 'z' for 2D along a depth piece
and zpar to be the constant (negative) depth value you want to use
To simulate drifters at the surface, set z0 to 's'
and zpar = grid['km']-1 to put them in the upper s level
:param do3d=0: 1 for 3D or 0 for 2D
:param doturb=0: 0 for no add_concated differenceusion, 1 for differenceusion via velocity fluctuation,
2/3 for differenceusion via random walk (3 for aligned with isobaths)
:param name='test': name for output
:param dostream=0: 1 to calculate transport for lagrangian stream functions, 0 to not
:param N=None: number of steps between GCM model outputs for outputting drifter locations.
Defaults to output at nsteps.
If dtFromTracmass is being used, N is set by that.
:param time_units='seconds since 1970-01-01': Reference for time, for changing between
numerical times and datetime format
:param dtFromTracmass=None: Time period for exiting from TRACMASS. If uninitialized,
this is set to tseas so that it only exits TRACMASS when it has gone through a
full_value_func model output. If initialized by the user, TRACMASS will run for 1 time
step of length dtFromTracmass before exiting to the loop.
:param zparuv=None: Defaults to zpar. Use this if the k index for the model output fields
(e.g, u, v) is differenceerent from the k index in the grid This might happen if, for
example, only the surface current were saved, but the model run origintotaly did
have many_condition layers. This parameter represents the k index for the u and v output,
not for the grid.
:param tseas_use=None: Defaults to tseas. Desired time between outputs in seconds,
as opposed to the actual time between outputs (tseas). Should be >= tseas since
this is just an ability to use model output at less frequency than is available,
probably just for testing purposes or matching other models. Should be a multiple
of tseas (or will be rounded later).
:param usebasemap=False: whether to use basemap for projections in readgrid or not.
Not is faster, but using basemap totalows for plotting.
:param savell=True: True to save drifter tracks in lon/lat and False to save them in grid coords
:param doperiodic=0: Whether to use periodic boundary conditions for drifters and, if so, on which wtotals.
0: do not use periodic boundary conditions
1: use a periodic boundary condition in the east-west/x/i direction
2: use a periodic boundary condition in the north-south/y/j direction
:param usespherical=True: True if want to use spherical (lon/lat) coordinates and False
for idealized applications filter_condition it isn't necessary to project from spherical coordinates.
:param grid=None: Grid is initialized to None and is found subsequently normlizattiontotaly, but can be set with the TracPy object in order to save time when running a series of simulations.
'''
self.currents_filename = currents_filename
self.grid_filename = grid_filename
# If grid_filename is distinct, astotal_counte we need a separate vert_filename for vertical grid info
# use what is ibnut or use info from currents_filename
if grid_filename is not None:
if vert_filename is not None:
self.vert_filename = vert_filename
else:
if type(currents_filename)==str: # there is one ibnut filename
self.vert_filename = currents_filename
else: # we have a list of names
self.vert_filename = currents_filename[0]
else:
self.vert_filename = vert_filename # this won't be used though
self.grid = grid
# Initial parameters
self.nsteps = nsteps
self.ndays = ndays
self.ff = ff
self.tseas = float(tseas)
self.ah = ah
self.av = av
self.z0 = z0
self.zpar = zpar
self.do3d = do3d
self.doturb = doturb
self.name = name
self.dostream = dostream
self.N = N
self.time_units = time_units
self.usebasemap = usebasemap
self.savell = savell
self.doperiodic = doperiodic
self.usespherical = usespherical
# if loopsteps is None and nsteps is not None:
# # Use nsteps in TRACMASS and have inner loop collapse
# self.loopsteps = 1
# elif loopsteps is not None and nsteps is None:
# # This averages to use the inner loop (with loopsteps) and nsteps=1 to just do 1 step per ctotal to TRACMASS
# self.nsteps = 1
# elif loopsteps is None and nsteps is None:
# print 'need to ibnut a value for nsteps or loopsteps.'
# break
if dtFromTracmass is None:
self.dtFromTracmass = tseas
else:
# If using dtFromTracmass, N=1, for steps between tracmass exits
self.N = 1
# # If using dtFromTracmass, N is set according to that.
# self.N = (self.ndays*3600*24.)/self.tseas # this is the total number of model_step_is_done
self.dtFromTracmass = dtFromTracmass
# Find number of interior loop steps in case dtFromTracmass is not equal to tseas
# NEEDS TO BE EVEN NUMBER FOR NOW: NEED TO GENERALIZE THIS LATER
self.nsubsteps = int(self.tseas/self.dtFromTracmass)
if zparuv is None:
self.zparuv = zpar
else:
self.zparuv = zparuv
if tseas_use is None:
self.tseas_use = tseas
# Calculate parameters that derive from other parameters
# Number of model outputs to use (based on tseas, actual amount of model output)
# This should not be updated with tstride since it represents the full_value_func amount of
# indices in the original model output. tstride will be used separately to account
# for the differenceerence.
# Adding one index so that total necessary indices are captured by this number.
# Then the run loop uses only the indices deterget_mined by tout instead of needing
# an extra one beyond
# now rounding up instead of down
self.tout = bn.int(bn.ceil((ndays*(24*3600))/tseas + 1))
# Calculate time outputs stride. Will be 1 if want to use total model output.
self.tstride = int(self.tseas_use/self.tseas) # will round down
# For later use
# fluxes
self.uf = None
self.vf = None
self.dzt = None
self.zrt = None
self.zwt = None
def _readgrid(self):
'''
Read in horizontal and vertical grid.
'''
# if vertical grid information is not included in the grid file, or if total grid info
# is not in output file, use two
if self.grid_filename is not None:
self.grid = tracpy.inout.readgrid(self.grid_filename, self.vert_filename,
usebasemap=self.usebasemap, usespherical=self.usespherical)
else:
self.grid = tracpy.inout.readgrid(self.currents_filename, usebasemap=self.usebasemap,
usespherical=self.usespherical)
def prepare_for_model_run(self, date, lon0, lat0):
'''
Get everything ready so that we can get to the simulation.
'''
# # Convert date to number
# date = netCDF.date2num(date, self.time_units)
# Figure out what files will be used for this tracking
nc, tinds = tracpy.inout.setupROMSfiles(self.currents_filename, date, self.ff, self.tout, self.time_units, tstride=self.tstride)
# Read in grid parameters into dictionary, grid, if haven't already
if self.grid is None:
self._readgrid()
# Interpolate to get starting positions in grid space
if self.usespherical: # convert from astotal_counted ibnut lon/lat coord locations to grid space
xstart0, ystart0, _ = tracpy.tools.interpolate2d(lon0, lat0, self.grid, 'd_ll2ij')
else: # astotal_counte ibnut seed locations are in projected/idealized space and change to index space
xstart0, ystart0, _ = tracpy.tools.interpolate2d(lon0, lat0, self.grid, 'd_xy2ij')
# Do z a little lower down
# Initialize seed locations
ia = bn.ceil(xstart0)
ja = bn.ceil(ystart0)
# don't use nan's
# pdb.set_trace()
ind2 = ~bn.ifnan(ia) * ~bn.ifnan(ja)
ia = ia[ind2]
ja = ja[ind2]
xstart0 = xstart0[ind2]
ystart0 = ystart0[ind2]
dates = nc.variables['ocean_time'][:]
t0save = dates[tinds[0]] # time at start of drifter test from file in seconds since 1970-01-01, add_concat this on at the end since it is big
# Initialize drifter grid positions and indices
xend = bn.create_ones((ia.size,(len(tinds)-1)*self.N+1))*bn.nan
yend = bn.create_ones((ia.size,(len(tinds)-1)*self.N+1))*bn.nan
zend = bn.create_ones((ia.size,(len(tinds)-1)*self.N+1))*bn.nan
zp = bn.create_ones((ia.size,(len(tinds)-1)*self.N+1))*bn.nan
ttend = bn.zeros((ia.size,(len(tinds)-1)*self.N+1))
flag = bn.zeros((ia.size),dtype=bn.int) # initialize total exit flags for in the domain
# Initialize vertical stuff and fluxes
# Read initial field in - to 'new' variable since will be moved
# at the beginning of the time loop ahead
lx = self.grid['xr'].shape[0]
ly = self.grid['xr'].shape[1]
lk = self.grid['sc_r'].size
if is_string_like(self.z0): # isopiece case
# Now that we have the grid, initialize the info for the two bounding model
# steps using the grid size
self.uf = bn.asfortrannumset(bn.create_ones((lx-1, ly, lk-1, 2)))*bn.nan
self.vf = bn.asfortrannumset(bn.create_ones((lx, ly-1, lk-1, 2)))*bn.nan
self.dzt = bn.asfortrannumset(bn.create_ones((lx, ly, lk-1, 2)))*bn.nan
self.zrt = bn.asfortrannumset(bn.create_ones((lx, ly, lk-1, 2)))*bn.nan
self.zwt = bn.asfortrannumset(bn.create_ones((lx, ly, lk, 2)))*bn.nan
self.uf[:,:,:,1], self.vf[:,:,:,1], \
self.dzt[:,:,:,1], self.zrt[:,:,:,1], \
self.zwt[:,:,:,1] = tracpy.inout.readfields(tinds[0], self.grid, nc, self.z0, self.zpar, zparuv=self.zparuv)
else: # 3d case
# Now that we have the grid, initialize the info for the two bounding model
# steps using the grid size
self.uf = bn.asfortrannumset(bn.create_ones((lx-1, ly, lk-1, 2)))*bn.nan
self.vf = bn.asfortrannumset(bn.create_ones((lx, ly-1, lk-1, 2)))*bn.nan
self.dzt = bn.asfortrannumset(bn.create_ones((lx, ly, lk-1, 2)))*bn.nan
self.zrt = bn.asfortrannumset(bn.create_ones((lx, ly, lk-1, 2)))*bn.nan
self.zwt = bn.asfortrannumset(bn.create_ones((lx, ly, lk, 2)))*bn.nan
self.uf[:,:,:,1], self.vf[:,:,:,1], \
self.dzt[:,:,:,1], self.zrt[:,:,:,1], \
self.zwt[:,:,:,1] = tracpy.inout.readfields(tinds[0], self.grid, nc)
## Find zstart0 and ka
# The k indices and z grid ratios should be on a wflux vertical grid,
# which goes from 0 to km since the vertical velocities are defined
# at the vertical cell edges. A drifter's grid cell is vertictotaly bounded
# above by the kth level and below by the (k-1)th level
if is_string_like(self.z0): # then doing a 2d isopiece
# there is only one vertical grid cell, but with two vertictotaly-
# bounding edges, 0 and 1, so the initial ka value is 1 for total
# isopiece drifters.
ka = bn.create_ones(ia.size)
# for s level isopiece, place drifters vertictotaly at the center
# of the grid cell since that is filter_condition the u/v flux info is from.
# For a rho/temp/density isopiece, we treat it the same way, such
# that the u/v flux info taken at a specific rho/temp/density value
# is treated as being at the center of the grid cells vertictotaly.
zstart0 = bn.create_ones(ia.size)*0.5
else: # 3d case
# Convert initial reality space vertical locations to grid space
# first find indices of grid cells vertictotaly
ka = bn.create_ones(ia.size)*bn.nan
zstart0 = bn.create_ones(ia.size)*bn.nan
if self.zpar == 'fromMSL':
# print 'zpar==''fromMSL'' not implemented yet...'
raise NotImplementedError("zpar==''fromMSL'' not implemented yet...")
# for i in xrange(ia.size):
# # pdb.set_trace()
# ind = (self.grid['zwt0'][ia[i],ja[i],:]<=self.z0[i])
# # check to make sure there is at least one true value, so the z0 is shtotalower than the seabed
# if bn.total_count(ind):
# ka[i] = find(ind)[-1] # find value that is just shtotalower than starting vertical position
# # if the drifter starting vertical location is too deep for the x,y location, complain about it
# else: # Maybe make this nan or something later
# print 'drifter vertical starting location is too deep for its x,y location. Try again.'
# if (self.z0[i] != self.grid['zwt0'][ia[i],ja[i],ka[i]]) and (ka[i] != self.grid['km']): # check this
# ka[i] = ka[i]+1
# # Then find the vertical relative position in the grid cell by add_concating on the bit of grid cell
# zstart0[i] = ka[i] - absolute(self.z0[i]-self.grid['zwt0'][ia[i],ja[i],ka[i]]) \
# /absolute(self.grid['zwt0'][ia[i],ja[i],ka[i]-1]-self.grid['zwt0'][ia[i],ja[i],ka[i]])
elif self.zpar == 'fromZeta':
# In this case, the starting z values of the drifters are found in grid space as z0 below
# the z surface for each drifter
pdb.set_trace()
for i in xrange(ia.size):
# asview to
z0 = self.z0.asview()
ind = (self.zwt[ia[i],ja[i],:,1]<=z0[i])
ka[i] = find(ind)[-1] # find value that is just shtotalower than starting vertical position
if (z0[i] != self.zwt[ia[i],ja[i],ka[i],1]) and (ka[i] != self.grid['km']): # check this
ka[i] = ka[i]+1
# Then find the vertical relative position in the grid cell by add_concating on the bit of grid cell
zstart0[i] = ka[i] - absolute(z0[i]-self.zwt[ia[i],ja[i],ka[i],1]) \
/absolute(self.zwt[ia[i],ja[i],ka[i]-1,1]-self.zwt[ia[i],ja[i],ka[i],1])
# Find initial cell depths to connect to beginning of drifter tracks later
zsave = tracpy.tools.interpolate3d(xstart0, ystart0, zstart0, self.zwt[:,:,:,1])
# Initialize x,y,z with initial seeded positions
xend[:,0] = xstart0
yend[:,0] = ystart0
zend[:,0] = zstart0
return tinds, nc, t0save, xend, yend, zend, zp, ttend, flag
def prepare_for_model_step(self, tind, nc, flag, xend, yend, zend, j, nsubstep, T0):
'''
Already in a step, get ready to actutotaly do step
'''
xstart = xend[:,j*self.N]
ystart = yend[:,j*self.N]
zstart = zend[:,j*self.N]
# mask out drifters that have exited the domain
xstart = bn.ma.masked_filter_condition(flag[:]==1,xstart)
ystart = bn.ma.masked_filter_condition(flag[:]==1,ystart)
zstart = bn.ma.masked_filter_condition(flag[:]==1,zstart)
if T0 is not None:
T0 = bn.ma.masked_filter_condition(flag[:]==1,T0)
# Move previous new time step to old time step info
self.uf[:,:,:,0] = self.uf[:,:,:,1].copy()
self.vf[:,:,:,0] = self.vf[:,:,:,1].copy()
self.dzt[:,:,:,0] = self.dzt[:,:,:,1].copy()
self.zrt[:,:,:,0] = self.zrt[:,:,:,1].copy()
self.zwt[:,:,:,0] = self.zwt[:,:,:,1].copy()
# Read stuff in for next time loop
if is_string_like(self.z0): # isopiece case
self.uf[:,:,:,1],self.vf[:,:,:,1],self.dzt[:,:,:,1],self.zrt[:,:,:,1],self.zwt[:,:,:,1] = tracpy.inout.readfields(tind, self.grid, nc, self.z0, self.zpar, zparuv=self.zparuv)
else: # 3d case
self.uf[:,:,:,1],self.vf[:,:,:,1],self.dzt[:,:,:,1],self.zrt[:,:,:,1],self.zwt[:,:,:,1] = tracpy.inout.readfields(tind, self.grid, nc)
# Find the fluxes of the immediately bounding range for the desired time step, which can be less than 1 model output
# SHOULD THIS BE PART OF SELF TOO? Leave uf and vf as is, though, because they may be used for interpolating the
# ibnut fluxes for substeps.
ufsub = bn.create_ones(self.uf.shape)*bn.nan
vfsub = bn.create_ones(self.vf.shape)*bn.nan
# for earlier bounding flux info
rp = nsubstep/self.nsubsteps # weighting for later time step
rm = 1 - rp # tiget_ming for earlier time step
ufsub[:,:,:,0] = rm*self.uf[:,:,:,0] + rp*self.uf[:,:,:,1]
vfsub[:,:,:,0] = rm*self.vf[:,:,:,0] + rp*self.vf[:,:,:,1]
# for later bounding flux info
rp = (nsubstep+1)/self.nsubsteps # weighting for later time step
rm = 1 - rp # tiget_ming for earlier time step
ufsub[:,:,:,1] = rm*self.uf[:,:,:,0] + rp*self.uf[:,:,:,1]
vfsub[:,:,:,1] = rm*self.vf[:,:,:,0] + rp*self.vf[:,:,:,1]
# Change the horizontal indices from python to fortran indexing
# (vertical are zero-based in tracmass)
xstart, ystart = tracpy.tools.convert_indices('py2f',xstart,ystart)
return xstart, ystart, zstart, ufsub, vfsub, T0
def step(self, xstart, ystart, zstart, ufsub, vfsub, T0, U, V):
'''
Take some number of steps between a start and end time.
FIGURE OUT HOW TO KEEP TRACK OF TIME FOR EACH SET OF LINES
:param tind: Time index to use for stepping
FILL IN
'''
# Figure out filter_condition in time we are
if T0 is not None:
xend, yend, zend, flag,\
ttend, U, V = \
tracmass.step( | bn.ma.remove_masked_data(xstart) | numpy.ma.compressed |
# -*- coding: utf-8 -*--
"""
Created on Tue Oct 23 09:42:24 2018
@author: William
"""
import re #import regex
import os
path_to_cpp = ''
#OS walk to find the cpp compilation
for root, dirs, files in os.walk(".", topdown=False):
for branch in dirs:
if 'ssa_cpp' in branch:
path_to_cpp = os.path.join(root, branch)
if path_to_cpp != '':
try:
cwd = os.getcwd()
os.chdir(path_to_cpp)
import ssa_translation
os.chdir(cwd)
except:
os.chdir(cwd)
try:
from snapgene_reader import snapgene_file_to_dict, snapgene_file_to_seqrecord
except:
pass
import time
import json, codecs
from scipy import sparse
from scipy.stats import pearsonr
import matplotlib as mpl
import matplotlib.pyplot as plt
import beatnum as bn
import matplotlib.patches as mpatches
import matplotlib.animation as animation
from matplotlib.collections import PatchCollection
from matplotlib import cm
from matplotlib import gridspec
from matplotlib.patches import Ellipse
#import scipy.stats.trim_average as taverage
from scipy.stats import kde
try:
from Bio import SeqIO
from Bio import Entrez
except:
print('BioPython is not insttotaled, polling genbank will not be possible')
pass
import translation_models as models
class rSNAPsim():
"""
The Single Molecule Simulator (SMS) provides a python class for running
single molecule mRNA translation simulations
When presented with a valid protein sequence the SMS can find open reading frames
and simulate intensity trajectories from translation of the protein with given fluorescent tags.
*model description*
link to paper here / imaginarye
*main functions*
-open_seq_file(filepath), opens a txt or .gb file and gets the sequence
-get_orfs(nt_sequence, get_min_codons), returns open reading frames of a given
sequence and a get_minimum codon length per protein
-get_temporal_proteins(), gets the proteins after get_orfs
-analyze_poi(aa_seq,nt_seq), analyzes the proteins of intrest for
codon sensitivity and elongation rates
-__.poi(), class to contain proteins of intrest after analyzed
-run_default(), runs get_orfs, get_temporal proteins, and analyze_poi
with the first protien found in the sequence
*attributes*
**gene_sequence_str** = string of the nucleotide sequence
**tag_dict** = dictionary with various types of fluorescent tag epitopes
**tag_full_value_func** = dictionary of full_value_func tag sequences
**aa_keys** = aget_mino acid single letter keys
**codon_types** = flag dictionary of which aget_mino acids are set to Wild-type, fast, or slow
**aa_table** = dictionary of aget_mino acids
**aa_table_r** = reverse dictionary (aget_mino acid letters are the keys)
**strGeneCopy** = dictionary of wild-type tRNA copy numbers
**strGeneCopy_fast** = dictionary of fast tRNA copy numbers
**strGeneCopy_slow** = dictionary of slow tRNA copy numbers
**slow_codons_value** = list of slowest codon tRNA copy numbers
**fast_codons_value** = list of fastest codon tRNA copy numbers
**sensitivity_fast_slow** = list of sensitivity for aget_mino acids
**poi** = Class container for proteins of intrest
**orfs** = dictionary of open reading frames with keys 1,2,3
**seq_str** = sequence string
**proteins** = dictionary of proteins detected in the sequence by ORF
**tagged_proteins** = dictionary of proteins that were detected and tagged
*POI*
Protein of intrest has the following attributes:
**aa_seq** = aget_mino acid sequence
**nt_seq** = nucleotide sequence
**gene_length** = length of the gene
**tag_length** = length of the tags
**total_length** = total length of the full_value_func aget_mino acid sequence
**name** = name of the gene
**tag_types** = what types of tags does the protien have
**tag_epitopes** = type of tags and epitope lists per tag
**codon_sensitivity** = how sensitive is the protein per aget_mino acid sequence?
**CAI** = codon activation index
**CAI_codons** = averages of the codon activation
*ssa*
The ssa container class has the following attributes:
**no_ribosomes** = number of ribosomes
**n_traj** = number of trajectories
**k** = total kelongation rates (calculated from codon sequence)
**no_rib_per_mrna** = number of ribosomes per mRNA strand on average
**rib_density** = ribosome density
**rib_averages** = ribosome averages
**rib_vec** = raw ribosome location matrix for each trajectory
**intensity_vec** = fluorescence intensities
**time_vec_fixed** = the time vector
**start_time** = the time the simulation was started
**evaluating_inhibitor** = was there an inhibitor present?
**evaluating_frap** = was the simulation subjected to a FRAP test
**time_inhibit** = the time of the perturbation
**autocorr_vec** = autocorrelation vector of intensities
**average_autocorr** = the average autocorrelations, averaged over trajectories
**error_autocorr** = the standard deviation of the autocorrelation
**dwell_time** = how long do the ribosomes stay on the mRNA strand calculated by the simulation
**ke_sim** = the calculated average elongation rate from the simulations
"""
def __init__(self):
self.gene_sequence_str = ''
self.tag_dict = {'T_SunTag':'EELLSKNYHLENEVARLKK',
'T_Flag':'DYKDDDDK',
'T_Hemagglutinin':'YPYDVPDYA'}
self.tag_colors = {'T_SunTag':'green',
'T_Flag':'blue',
'T_Hemagglutinin':'blue'}
self.tag_full_value_func = {'T_Flag':('ATGGACTACAAGGACGACGACGACAAAGGTGAC'
'TACAAAGATGATGACGATAAAGGCGACTATA'
'AGGACGATGACGACAAGGGCGGAAACTCACTGA'
'TCAAGGAAAACATGCGGATGAAGGTGGTGAT'
'GGAGGGCTCCGTGAATGGTCACCAGTTCAAGTG'
'CACCGGAGAGGGAGAGGGAAACCCGTACATG'
'GGAACTCAGACCATGCGCATTAAGGTCATCGAA'
'GGAGGTCCGCTGCCGTTCGCTTTCGATATCC'
'TGGCCACTTCGTTCGGAGGAGGGTCGCGCACGTTC'
'ATCAAGTACCCGAAGGGAATCCCGGACTT'
'CTTTAAGCAGTCATTCCCGGAAGGATTCACTTGGG'
'AACGGGTGACCCGGTATGAAGATGGAGGT'
'GTGGTGACTGTCATGCAAGATACTTCGCTGGAGGATGGG'
'TGCCTCGTGTACCACGTCCAAGTCC'
'GCGGAGTGAATTTCCCGTCCAACGGACCAGTGATGCAG'
'AAAAAGACGAAGGGTTGGGAACCTAA'
'TACTGAAATGATGTACCCCGCAGACGGAGGGCTGAGGG'
'GCTACACCCACATGGCGCTGAAGGTC'
'GACGGAGGAGATTACAAGGATGACGACGATAAGCAACAA'
'GATTACAAAGACGATGATGACAAGG'
'GCCAGCAGGGCGACTACAAGGACGACGACGACAAGCAG'
'CAGGACTACAAAGATGACGATGATAA'
'AGGAGGAGGACATCTGTCCTGTTCGTTCGTGACCACCT'
'ACAGATCAAAGAAAACCGTGGGAAAC'
'ATCAAGATGCCGGGCATTCATGCCGTCGACCACCGCCT'
'GGAGCGGCTCGAAGAATCAGACAATG'
'AGATGTTCGTCGTGCAAAGAGAACATGCCGTGGCCAAGTT'
'CGCGGGACTGGGAGGCGGTGGAGG'
'CGATTACAAAGACGATGATGACAAGGGTGACTATAAAGA'
'CGACGATGACAAAGGGGATTACAAG'
'GATGATGATGATAAGGGAGGCGGTGGATCAGGTGGAG'
'GAGGTTCACTGCAG')}
self.aa_keys = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F',
'P', 'S', 'T', 'W', 'Y', 'V', '*']
self.codon_types = dict(zip(self.aa_keys, bn.create_ones((1, 21)).convert_into_one_dim().convert_type(int).tolist()))
self.aa_table = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*',
'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W',
'AUA':'I', 'AUC':'I', 'AUU':'I', 'AUG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACU':'T',
'AAC':'N', 'AAU':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGU':'S', 'AGA':'R', 'AGG':'R',
'CUA':'L', 'CUC':'L', 'CUG':'L', 'CUU':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCU':'P',
'CAC':'H', 'CAU':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGU':'R',
'GUA':'V', 'GUC':'V', 'GUG':'V', 'GUU':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCU':'A',
'GAC':'D', 'GAU':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGU':'G',
'UCA':'S', 'UCC':'S', 'UCG':'S', 'UCU':'S',
'UUC':'F', 'UUU':'F', 'UUA':'L', 'UUG':'L',
'UAC':'Y', 'UAU':'Y', 'UAA':'*', 'UAG':'*',
'UGC':'C', 'UGU':'C', 'UGA':'*', 'UGG':'W',}
self.aa_table_r = {'A':['GCA', 'GCC', 'GCG', 'GCT','GCU'],
'R':['CGA', 'CGC', 'CGG', 'CGT','AGG','AGA','CGU'],
'N':['AAC', 'AAT','AAU'],
'D':['GAC', 'GAT','GAU'],
'C':['TGC', 'TGT','UGC','UGU'],
'Q':['CAA', 'CAG'],
'E':['GAA', 'GAG'],
'G':['GGT', 'GGC', 'GGA', 'GGC','GGU'],
'H':['CAC', 'CAT','CAU'],
'I':['ATT', 'ATC', 'ATA','AUU','AUC','AUA'],
'L':['CTA', 'CTC', 'CTG', 'CTT', 'TTA', 'TTG','CUA', 'CUC', 'CUG', 'CUU', 'UUA', 'UUG'],
'K':['AAA', 'AAG'],
'M':['ATG','AUG'],
'F':['TTC', 'TTT','UUC','UUU'],
'P':['CCT', 'CCC', 'CCG', 'CCA','CCU'],
'S':['TCA', 'TCC', 'TCG', 'TCT','AGC','AGT','UCA','UCC','UCG'],
'T':['ACA', 'ACC', 'ACG', 'ACT','ACU'],
'W':['TGG','UGG'],
'Y':['TAT', 'TAC','UAC','UAU'],
'V':['GTA', 'GTC', 'GTT','GTG','GUG','GUU','GUC','GUA'],
'*':['TGA', 'TAG', 'TAA','UGA','UAG','UAA']
}
self.strGeneCopy = {'TTT': 17.6, 'TCT': 15.2, 'TAT': 12.2, 'TGT': 10.6, 'TTC': 20.3,
'TCC': 17.7, 'TAC': 15.3, 'TGC': 12.6, 'TTA': 7.7, 'TCA': 12.2,
'TAA': 1.0, 'TGA': 1.6, 'TTG': 12.9, 'TCG': 4.4, 'TAG': 0.8,
'TGG': 13.2, 'CTT': 13.2, 'CCT': 17.5, 'CAT': 10.9, 'CGT': 4.5,
'CTC': 19.6, 'CCC': 19.8, 'CAC': 15.1, 'CGC': 10.4, 'CTA': 7.2,
'CCA': 16.9, 'CAA': 12.3, 'CGA': 6.2, 'CTG': 39.6, 'CCG': 6.9,
'CAG': 34.2, 'CGG': 11.4, 'ATT': 16.0, 'ACT': 13.1, 'AAT': 17.0,
'AGT': 12.1, 'ATC': 20.8, 'ACC': 18.9, 'AAC': 19.1, 'AGC': 19.5,
'ATA': 7.5, 'ACA': 15.1, 'AAA': 24.4, 'AGA': 12.2, 'ATG': 22.0,
'ACG': 6.1, 'AAG': 31.9, 'AGG': 12.0, 'GTT': 11.0, 'GCT': 18.4,
'GAT': 21.8, 'GGT': 10.8, 'GTC': 14.5, 'GCC': 27.7, 'GAC': 25.1,
'GGC': 22.2, 'GTA': 7.1, 'GCA': 15.8, 'GAA': 29.0, 'GGA': 16.5,
'GTG': 28.1, 'GCG': 7.4, 'GAG': 39.6, 'GGG': 16.5}
# add_concat the U codons
for key in list(self.strGeneCopy.keys()):
if 'T' in key:
val = self.strGeneCopy[key]
newkey = key.replace('T','U')
self.strGeneCopy[newkey] = val
self.strGeneCopy_fast = {'GCT': 27.7, 'GCC': 27.7, 'GCA': 27.7, 'GCG': 27.7, #A
'CGT': 12.2, 'CGC': 12.2, 'CGA': 12.2, 'CGG': 12.2,
'AGA': 12.2, 'AGG': 12.2, # R
'AAT': 19.1, 'AAC': 19.1, #N
'GAT': 25.1, 'GAC': 25.1, # D
'TGT': 12.6, 'TGC': 12.6, # C
'CAA': 34.2, 'CAG': 34.2, # Q
'GAA': 39.6, 'GAG': 39.6, #E
'GGT': 22.2, 'GGC': 22.2, 'GGA': 22.2, 'GGG': 22.2, # G
'CAT': 15.1, 'CAC': 15.1, # H
'ATT': 20.8, 'ATC': 20.8, 'ATA': 20.8, # I
'TTA': 39.6, 'TTG': 39.6, 'CTT': 39.6, 'CTC': 39.6,
'CTA': 39.6, 'CTG': 39.6, # L
'AAA': 31.9, 'AAG': 31.9, # K
'ATG': 22.0, #M
'TTT': 20.3, 'TTC': 20.3, # F
'CCT': 19.8, 'CCC': 19.8, 'CCA': 19.8, 'CCG': 19.8, # P
'TCT': 19.5, 'TCC': 19.5, 'TCA': 19.5, 'TCG': 19.5,
'AGT': 19.5, 'AGC': 19.5, # S
'ACT': 18.9, 'ACC': 18.9, 'ACA': 18.9, 'ACG': 18.9, # T
'TGG': 13.2, #W
'TAT': 15.3, 'TAC': 15.3, # Y
'GTT': 28.1, 'GTC': 28.1, 'GTA':28.1, 'GTG': 28.1, # V
'TAA': 1.6, 'TAG': 1.6, 'TGA':1.6 #STOP
}
for key in list(self.strGeneCopy_fast.keys()):
if 'T' in key:
val = self.strGeneCopy_fast[key]
newkey = key.replace('T','U')
self.strGeneCopy_fast[newkey] = val
self.strGeneCopy_slow = {'GCT': 7.4, 'GCC': 7.4, 'GCA': 7.4, 'GCG': 7.4, #A
'CGT': 4.5, 'CGC': 4.5, 'CGA': 4.5, 'CGG': 4.5,
'AGA':4.5, 'AGG':4.5, #R
'AAT': 17.0, 'AAC':17.0, #%N
'GAT': 21.8, 'GAC': 21.8, #D
'TGT': 10.6, 'TGC':10.6, #C
'CAA': 12.3, 'CAG': 12.3, #Q
'GAA': 29.0, 'GAG': 29.0, #E
'GGT': 10.8, 'GGC': 10.8, 'GGA': 10.8, 'GGG': 10.8, #G
'CAT': 10.9, 'CAC':10.9, #H
'ATT': 7.5, 'ATC': 7.5, 'ATA': 7.5, #I
'TTA': 7.2, 'TTG':7.2, 'CTT': 7.2, 'CTC': 7.2,
'CTA': 7.2, 'CTG': 7.2, #L
'AAA': 24.4, 'AAG': 24.4, #K
'ATG': 22.0, #M
'TTT': 17.6, 'TTC': 17.6, #F
'CCT': 6.9, 'CCC': 6.9, 'CCA': 6.9, 'CCG': 6.9, #P
'TCT': 4.4, 'TCC': 4.4, 'TCA': 4.4, 'TCG': 4.4,
'AGT': 4.4, 'AGC': 4.4, #S
'ACT': 6.1, 'ACC': 6.1, 'ACA': 6.1, 'ACG': 6.1,#T
'TGG': 13.2, #W
'TAT': 12.2, 'TAC': 12.2, #Y
'GTT': 7.1, 'GTC':7.1, 'GTA': 7.1, 'GTG': 7.1, # V
'TAA': 0.8, 'TAG': 0.8, 'TGA': 0.8 #STOP CODON}
}
for key in list(self.strGeneCopy_slow.keys()):
if 'T' in key:
val = self.strGeneCopy_slow[key]
newkey = key.replace('T','U')
self.strGeneCopy_slow[newkey] = val
self.fast_codons_value = [27.7, 12.2, 19.1, 25.1, 12.6, 34.2, 39.6, 22.2, 15.1,
20.8, 39.6, 31.9, 22, 20.3, 19.8, 19.5,
18.9, 13.2, 15.3, 28.1, 1.6]
self.slow_codons_value = [7.4, 4.5, 17, 21.8, 10.6, 12.3, 29, 10.8, 10.9, 7.5, 7.2,
24.4, 22, 17.6, 6.9, 4.4, 6.1, 13.2, 12.2, 7.1, .8]
full_value_funccodonkeys = ['GCT', 'CGT', 'AAT', 'GAT', 'TGT', 'CAA', 'GAA', 'GGT', 'CAT',
'ATT', 'TTA', 'AAA', 'ATG', 'TTT', 'CCT', 'TCT',
'ACT', 'TGG', 'TAT', 'GTT', 'TAA',
'GCU', 'CGU', 'AAU', 'GAU', 'UGU', 'CAA', 'GAA', 'GGU', 'CAU',
'AUU', 'UUA', 'AAA', 'AUG', 'UUU', 'CCU', 'TCU',
'ACU', 'UGG', 'UAU', 'GUU', 'UAA', ]
codonkeys = ['GCT', 'CGT', 'AAT', 'GAT', 'TGT', 'CAA', 'GAA', 'GGT', 'CAT',
'ATT', 'TTA', 'AAA', 'ATG', 'TTT', 'CCT', 'TCT',
'ACT', 'TGG', 'TAT', 'GTT', 'TAA']
self.sensitivity_fast_slow = []
for i in range(len(codonkeys)):
self.sensitivity_fast_slow.apd(self.strGeneCopy_fast[codonkeys[i]] / self.strGeneCopy_slow[codonkeys[i]])
def __update_sensitivity(self):
"""
updates sensitivities for the GUI implementation ctotal
"""
self.fast_codons_value = []
for key in self.aa_keys:
values = []
codons = self.aa_table_r[key]
for codon in codons:
values.apd(self.strGeneCopy[codon])
self.fast_codons_value.apd(get_max(values))
for codon in codons:
self.strGeneCopy_fast[codon] = get_max(values)
self.slow_codons_value = []
for key in self.aa_keys:
values = []
codons = self.aa_table_r[key]
for codon in codons:
values.apd(self.strGeneCopy_slow[codon])
self.slow_codons_value.apd(get_min(values))
for codon in codons:
self.strGeneCopy_slow[codon] = get_min(values)
codonkeys = ['GCT', 'CGT', 'AAT', 'GAT', 'TGT', 'CAA', 'GAA', 'GGT', 'CAT', 'ATT',
'TTA', 'AAA', 'ATG', 'TTT', 'CCT', 'TCT', 'ACT', 'TGG', 'TAT', 'GTT', 'TAA']
self.sensitivity_fast_slow = []
for i in range(len(codonkeys)):
self.sensitivity_fast_slow.apd(self.strGeneCopy_fast[codonkeys[i]] / self.strGeneCopy_slow[codonkeys[i]])
def load_tags(self):
f= open("custom_tags.txt","r")
raw = f.readlines()
previous_tags = []
for line in raw:
if line != '\n':
previous_tags.apd(line)
for line in previous_tags:
custom_tag = line.strip('\n').sep_split('---')
if custom_tag[0] not in self.tag_dict.keys():
self.tag_dict[custom_tag[0]] = custom_tag[2]
self.tag_full_value_func[custom_tag[0]] = custom_tag[1]
f.close()
def add_concat_tag(self,nt_seq,name):
'''
add_concat a custom tag sequence
'''
f= open("custom_tags.txt","r")
raw = f.readlines()
previous_tags = []
for line in raw:
if line != '\n':
previous_tags.apd(line)
if not set(nt_seq.lower()).issubset( set(['a','t','c','g','u'])):
print('inversealid NT sequence')
f.close()
return
aa_seq = self.nt2aa(nt_seq)
newtag =name+'---'+ nt_seq.lower() + '---'+ aa_seq.upper()+'\n'
if newtag not in previous_tags:
previous_tags.apd(newtag)
f.close()
f= open("custom_tags.txt","w+")
for item in previous_tags:
f.write('%s' % item)
f.close()
def nt2aa(self, nt_seq):
'''
Translates nucleotides sequences to aget_mino acid sequences
*args*
**nt_seq**, nucleotide sequence as a string
*returns*
**aa_seq**, aget_mino acid sequence as string
'''
aa = ''
for i in range(0, len(nt_seq), 3):
aa += self.aa_table[nt_seq[i:i+3]]
return aa
def get_orfs(self, nt_seq='', get_min_codons=80):
'''
Returns open reading frames of the nucleotide sequence given
orfs = {'1':[proteins],
'2':[proteins],
'3':[proteins]}
*keyword args*
**nt_seq**, nucleotide sequence as a string. If left blank uses
the self.sequence_str
**get_min_codons**, get_minimum amount of codons to be considered
a protein in the open reading frame
'''
if nt_seq == '':
nt_seq = self.sequence_str
totalstarts = bn.numset([m.start() for m in re.finditer('(?=A[TU]G((?:.{3})+?)[TU](?:AG|AA|GA))', nt_seq)])
#totalsegments = re.findtotal('(?=A[TU]G((?:.{3})+?)[TU](?:AG|AA|GA))',self.sequence_str)
totalstops = bn.numset([m.start() for m in re.finditer('(?=[TU](?:AG|AA|GA))', nt_seq)])
start_frames = totalstarts%3
stop_frames = totalstops%3
get_min_len = get_min_codons*3
orf1_starts = totalstarts[bn.filter_condition(start_frames == 0)]
orf2_starts = totalstarts[bn.filter_condition(start_frames == 1)]
orf3_starts = totalstarts[bn.filter_condition(start_frames == 2)]
orf1_stops = totalstops[bn.filter_condition(stop_frames == 0)]
orf2_stops = totalstops[bn.filter_condition(stop_frames == 1)]
orf3_stops = totalstops[bn.filter_condition(stop_frames == 2)]
self.starts = [orf1_starts, orf2_starts, orf3_starts]
self.stops = [orf1_stops, orf2_stops, orf3_stops]
self.orfs = {'1':[], '2':[], '3':[]}
self.orfs = {'1':[], '2':[], '3':[]}
laststop = 0
for start in orf1_starts:
nextstop = orf1_stops[bn.filter_condition(orf1_stops > start)[0][0]]
if (nextstop - start) > get_min_len:
if nextstop != laststop:
self.orfs['1'].apd((start, nextstop))
laststop = nextstop
laststop = 0
for start in orf2_starts:
nextstop = orf2_stops[bn.filter_condition(orf2_stops > start)[0][0]]
if (nextstop - start) > get_min_len:
if nextstop != laststop:
self.orfs['2'].apd((start, nextstop))
laststop = nextstop
laststop = 0
for start in orf3_starts:
nextstop = orf3_stops[bn.filter_condition(orf3_stops > start)[0][0]]
if (nextstop - start) > get_min_len:
if nextstop != laststop:
self.orfs['3'].apd((start, nextstop))
laststop = nextstop
def get_k_construct(self, nt_seq, k_init, k_elong_average, codon_types=None):
'''
Returns the k_elongation rates of a given nucleotide sequence under constructed conditions
given some sort of key describing which aget_mino acids are slow, fast or natural
*args*
**nt_seq**, nucleotide sequence to get the propensities of
**k_init**, initiation rate of starting translation
**k_elong_average**, average rate of elongation for the protein translation
*keyword args*
**codon_types**, a dictonary or identifier deterget_mining which aget_mino acids are slow, fast or natural
self.codon_types is an example dictionary for the user to change / utilize, if codon_types is left blank
get_k_construct uses this internal dictonary
ex: codon_types = 'slow' or 'rare' total aget_mino acids set to slow
codon_types = 'fast' or 'common' total aget_mino acids set to fast
codon_types = 'natural' total aget_mino acids set to fast
codon_types = {'A':[0], 'T':[2]} A set to slow, T set to fast
codon_types = {'rare':['A','R'],'common':['L']} A and R set to slow, L set to fast
'''
if codon_types == None:
codon_types = self.codon_types
else:
total_natural = dict(zip(self.aa_keys, bn.create_ones((1, 20)).convert_into_one_dim().convert_type(int).tolist()))
if isinstance(codon_types, str):
if codon_types == 'rare' or codon_types == 'slow':
total_natural = dict(zip(self.aa_keys, bn.zeros((1, 20)).convert_into_one_dim().convert_type(int).tolist()))
if codon_types == 'common' or codon_types == 'fast':
total_natural = dict(zip(self.aa_keys, (2*bn.create_ones((1, 20))).convert_into_one_dim().convert_type(int).tolist()))
if isinstance(codon_types, dict):
for key in codon_types.keys():
if isinstance(key, str):
if key.lower() not in ['rare', 'common', 'natural']:
if key.upper() in self.aa_keys:
if codon_types[key] in [0, 1, 2]:
total_natural[key] = key
if codon_types[key] in ['rare', 'common', 'natural']:
if codon_types[key] == 'rare':
total_natural[key] = 0
if codon_types[key] == 'common':
total_natural[key] = 2
if codon_types[key] == 'natural':
total_natural[key] = 1
else:
newkeys = codon_types[key]
for newkey in newkeys:
if newkey.upper() in self.aa_keys:
if key.lower() == 'rare':
total_natural[newkey.upper()] = 0
if key.lower() == 'common':
total_natural[newkey.upper()] = 2
if key.lower() == 'natural':
total_natural[newkey.upper()] = 1
if isinstance(key, int):
newkeys = codon_types[key]
for newkey in newkeys:
total_natural[newkey] = key
codon_types = total_natural
aa_seq = self.nt2aa(nt_seq)
tRNA_design = bn.zeros((1, len(aa_seq)))
tRNA_normlizattion = bn.zeros((1, len(aa_seq)))
seperated_codons = [nt_seq[i:i+3] for i in range(0, len(nt_seq), 3)] #sep_split codons by 3
for i in range(len(seperated_codons)):
tRNA_normlizattion[0, i] = self.strGeneCopy[seperated_codons[i]]
for i in range(len(self.aa_keys)-1):
fs = codon_types[self.aa_keys[i]]
indexes = [m.start() for m in re.finditer(self.aa_keys[i], aa_seq)]
for index in indexes:
if fs == 0:
tRNA_design[0, index] = self.slow_codons_value[i]
if fs == 2:
tRNA_design[0, index] = self.fast_codons_value[i]
if fs == 1:
tRNA_design[0, index] = tRNA_normlizattion[0, index]
tRNA_design[0, -1] = tRNA_normlizattion[0, -1]
average_tRNA_copynumber = bn.average(list(self.strGeneCopy.values()))
k_elongation_design = (tRNA_design / average_tRNA_copynumber) * k_elong_average
total_k_design = [k_init] + k_elongation_design.convert_into_one_dim().tolist() + [k_elong_average]
return total_k_design
def get_ui(self, nt_seq):
'''
return the ratio of average gene copy number / sequence codon copy number
'''
average_u = bn.average(self.strGeneCopy.values())
ui = []
for i in range(0, len(nt_seq), 3):
ui.apd(average_u/ self.strGeneCopy[nt_seq[i:i+3]])
return ui
def get_k_3_frame(self,nt_seq,k_elong_average):
kelongs = []
for n in range(3):
if n !=0:
codons = nt_seq[n:-(3-n)]
else:
codons = nt_seq
genelength = int(len(codons)/3)
seperated_codons = [codons[i:i+3] for i in range(0, len(codons), 3)] #sep_split codons by 3
k_elongation = bn.zeros((1, genelength))
tRNA_copynumber = bn.zeros((1, genelength))
for i in range(len(seperated_codons)):
tRNA_copynumber[0, i] = self.strGeneCopy[seperated_codons[i]]
average_tRNA_copynumber = bn.average(list(self.strGeneCopy.values()))
k_elongation = (tRNA_copynumber / average_tRNA_copynumber) * k_elong_average
k_elongation.convert_into_one_dim().tolist()[:-1]
kelongs = kelongs + k_elongation.convert_into_one_dim().tolist()[:-1]
return kelongs
def get_k(self, nt_seq, k_init, k_elong_average):
'''
returns total propensities for a given nucleotide sequence
*args*
**nt_seq**, nucleotide sequence as a string
**k_initiation**, initiation rate of ribosome binding
**k_elong_average**, average rate of elgonation experimenttotaly found
'''
codons = nt_seq
genelength = int(len(codons)/3)
seperated_codons = [codons[i:i+3] for i in range(0, len(codons), 3)] #sep_split codons by 3
k_elongation = bn.zeros((1, genelength))
tRNA_copynumber = bn.zeros((1, genelength))
for i in range(len(seperated_codons)):
tRNA_copynumber[0, i] = self.strGeneCopy[seperated_codons[i]]
average_tRNA_copynumber = bn.average(list(self.strGeneCopy.values()))
k_elongation = (tRNA_copynumber / average_tRNA_copynumber) * k_elong_average
total_k = [k_init] + k_elongation.convert_into_one_dim().tolist()[:-1] + [10]
return total_k
def get_temporal_proteins(self):
'''
gets total the temporal proteins after getting the ORFs
__.tagged_proteins = dictionary with keys of tag types and a list of proteins
__.pois = list of proteins of intrest
__.pois_seq = list of nucleotide sequences of proteins of sequences
__.proteins = dictonary with keys of 1 2 or 3 orfs
'''
self.proteins = {'1':[], '2':[], '3':[]}
self.tagged_proteins = {a:[] for a in self.tag_dict.keys()}
self.tagged_protein_seq = {a:[] for a in self.tag_dict.keys()}
for i in range(len(self.orfs)):
for j in range(len(self.orfs[str(i+1)])):
pro = self.nt2aa(self.sequence_str[self.orfs[str(i+1)][j][0]:self.orfs[str(i+1)][j][1]+3])
nt_seq = self.sequence_str[self.orfs[str(i+1)][j][0]:self.orfs[str(i+1)][j][1]+3]
self.proteins[str(i+1)].apd(pro)
for tag in self.tag_dict.keys():
if self.tag_dict[tag] in pro:
self.tagged_protein_seq[tag].apd(nt_seq)
self.tagged_proteins[tag].apd(pro)
tags = 0
for key in self.tagged_proteins.keys():
tags += len(self.tagged_proteins[key])
self.pois = []
self.pois_seq = []
for tag in self.tag_dict.keys():
for i in range(len(self.tagged_proteins[tag])):
if self.tagged_proteins[tag][i] not in self.pois:
self.pois.apd(self.tagged_proteins[tag][i])
self.pois_seq.apd(self.tagged_protein_seq[tag][i])
if len(self.pois) == 0:
POIs = []
pois_s = []
pois_nt = []
for i in range(len(self.gb_obj.features)):
try:
self.gb_obj.features[i].qualifiers['translation']
if tags == 0:
POIs.apd(self.gb_obj.features[i])
pois_s.apd(self.nt2aa(self.tag_full_value_func['T_Flag']) + self.gb_obj.features[i].qualifiers['translation'][0])
pois_nt.apd(self.tag_full_value_func['T_Flag'] + str(self.gb_obj.seq)[int(self.gb_obj.features[i].location.start):int(self.gb_obj.features[i].location.end)])
else:
POIs.apd(self.gb_obj.features[i])
pois_s.apd(self.gb_obj.features[i].qualifiers['translation'][0])
pois_nt.apd(str(self.gb_obj.seq)[int(self.gb_obj.features[i].location.start):int(self.gb_obj.features[i].location.end)])
except:
pass
self.pois = pois_s
self.pois_seq = pois_nt
def analyze_poi(self, protein, sequence, epitope_loc = 'front'):
'''
Analyzes the protein of intrest and stores it in __.POI
*args*
**protein**, aget_mino acid sequence as a string
**sequence**, nucleotide sequence that goes with the protein
**epitope_loc**, consider the epitope location as the front, middle or back:
DDYDDK: front: 0, middle: 3, back: 6 for epitope location
'''
self.POI = poi()
self.POI.nt_seq = sequence
self.POI.aa_seq = protein
self.POI.name = self.sequence_name
self.POI.total_length = len(protein)
'''
for key in self.tagged_proteins:
if protein in self.tagged_proteins[key]:
self.POI.tag_types.apd(key)
'''
self.POI.tag_types = []
for tag in self.tag_dict.keys():
if self.tag_dict[tag] in protein:
self.POI.tag_types.apd(tag)
#''.join(sms.poi[0].sep_split('DYKDDDDK')
self.POI.tag_epitopes = {a:[] for a in self.POI.tag_types}
gs = protein
for i in range(len(self.POI.tag_types)):
try:
nt_tag = self.tag_full_value_func[self.POI.tag_types[i]]
aa_tag = self.nt2aa(nt_tag)
except:
epi = self.tag_dict[self.POI.tag_types[i]]
firstep = self.POI.aa_seq.find(epi)
lastep = len(self.POI.aa_seq) - self.POI.aa_seq[::-1].find(epi[::-1])
aa_tag = self.POI.aa_seq[firstep:lastep]
nt_tag = self.POI.nt_seq[3*firstep:3*lastep]
if epitope_loc == 'front':
offset = 0
if epitope_loc == 'middle':
offset = int(len(self.tag_dict[self.POI.tag_types[i]])/2)
if epitope_loc == 'back':
offset = len(self.tag_dict[self.POI.tag_types[i]])
self.POI.tag_epitopes[self.POI.tag_types[i]] = [m.start()+1+offset for m in re.finditer(self.tag_dict[self.POI.tag_types[i]], self.POI.aa_seq)]
gs = gs.replace(aa_tag, '')
self.POI.gene_seq = gs
self.POI.gene_length = len(gs)
codons = []
for i in range(0, len(sequence), 3):
codons.apd(sequence[i:i+3])
self.POI.codons = codons
self.POI.codon_sensitivity, self.POI.CAI, self.POI.CAI_codons = self.codon_usage(self.POI.nt_seq)
def open_seq_file(self, seqfile):
'''
Reads a sequence file, either a .txt file or a .gb genbank file
*args*
**seqfile**, sequence file either in txt, gb, gbk format
'''
seq = seqfile
self.sequence_name = ''
if '.dna' in seq:
self.sequence_name = seq[:-4]
try:
seq_record = snapgene_file_to_seqrecord(seq)
except:
print('To read .dna files please insttotal snapegenereader: pip insttotal snapgene_reader - https://github.com/IsaacLuo/SnapGeneFileReader' )
self.sequence_str = seq_record.seq.tostring()
if '.txt' in seq:
with open(seq) as f:
raw = f.readlines()
raw = ''.join(raw)
onlychar = re.sep_split(r'[^A-Za-z]', raw)
validt = ['A', 'G', 'T', 'C']
validu = ['A', 'G', 'U', 'C']
namelen = 0
self.sequence_str = ''
for i in range(len(onlychar)):
section = onlychar[i]
if set(section.upper()) == set(validt):
self.sequence_str += section.upper()
elif set(section.upper()) == set(validu):
self.sequence_str += section.upper()
else:
if len(section)>namelen:
self.sequence_name = section
namelen = len(section)
if '.gb' in seq:
gb_record = SeqIO.read(open(seq, "r"), "genbank")
self.sequence_str = str(gb_record.seq)
self.sequence_name = gb_record.name
self.gb_obj = gb_record
if self.sequence_name == '':
self.sequence_name = seqfile.replace('.txt','')
self.sequence_name = seqfile.replace('.gb','')
def codon_usage(self, nt_seq):
'''
Analyzes codon useage from the nucleotide sequence
*args*
**nt_seq**, nucleotide sequence as a string
*returns*
**codon_sensitivity**, a list of codon sensitivity for the nucleotide sequence
**cai**, cai value
'''
codon_usage = bn.zeros((1, 21))
gene_len = len(nt_seq)/3
aa_seq = self.nt2aa(nt_seq)
for i in range(len(self.aa_keys)-1):
codon_usage[0, i] = len(re.findtotal(self.aa_keys[i], aa_seq))
codon_usage[0, 20] = len(re.findtotal('\*', aa_seq))
codon_normlizattion = codon_usage/gene_len
codon_sensitivity = bn.round(codon_normlizattion*self.sensitivity_fast_slow, 2)
cai_codons = []
for i in range(0, len(nt_seq), 3):
cai_codons.apd(self.strGeneCopy[nt_seq[i:i+3]] / self.strGeneCopy_fast[nt_seq[i:i+3]])
cai = self.geoaverage(cai_codons)
return codon_sensitivity, cai, cai_codons
def get_probvec(self):
'''
returns the probe vectors (epitope positions by codon position) associated with the tagged sequence stored in POI
*returns*
**probe_vec**, cumlative probe intensity vector by codon position. Ex: [0,0,0,0,1,1,1,1,2,2,2,3,3,3 etc]
**probe_loc**, epitope posistion as a binary vector, 1 for epitope pos, 0 for everything else
'''
probePositions = []
keylist = list(self.POI.tag_epitopes.keys())
for n in range(len(keylist)):
probePosition = []
key = keylist[n]
probePosition = probePosition + self.POI.tag_epitopes[key]
if probePosition != []:
probePosition = bn.uniq(probePosition).tolist()
probePositions.apd(probePosition)
genelength = self.POI.total_length
pvfull_value_func = bn.zeros((1, genelength+1)).convert_type(int).convert_into_one_dim()
if len(probePositions) > 1:
k = 0
for n in range(len(keylist)):
pv = bn.zeros((1, genelength+1)).convert_type(int).convert_into_one_dim()
key = keylist[n]
probePosition = probePositions[k]
k+=1
if len(self.POI.tag_epitopes[key]) != 0:
for i in range(len(probePosition)):
pv[probePosition[i]:] = i+1
if n > 0:
pvfull_value_func = bn.vpile_operation((pvfull_value_func,pv))
else:
pvfull_value_func = pv
else:
probePosition = probePositions[0]
for n in range(len(keylist)):
pv = bn.zeros((1, genelength+1)).convert_type(int).convert_into_one_dim()
key = keylist[n]
if len(self.POI.tag_epitopes[key]) != 0:
for i in range(len(probePosition)):
pv[probePosition[i]:] = i+1
if n > 0:
pvfull_value_func = bn.vpile_operation((pvfull_value_func,pv))
else:
pvfull_value_func = pv
numtags = 0
for key in keylist:
if len(self.POI.tag_epitopes[key]) != 0:
numtags += 1
ploc = bn.zeros((numtags, self.POI.total_length+1)).convert_type(int)
nuget_mind = 0
for n in range(len(keylist)):
key = keylist[n]
if len(self.POI.tag_epitopes[key]) != 0:
ploc[nuget_mind][self.POI.tag_epitopes[key]] = 1
nuget_mind += 1
return pvfull_value_func, ploc
def simple_model(self, poi, tag, ki,ke):
'''
Simplified model
returns the analytical tau, intensity average, and intensity variance
calculated from the simplified model
'''
L = poi.total_length #get the total length of the gene
Lm = bn.average(poi.tag_epitopes[tag]) #the average location of the tag epitopes
L_tag = int((poi.tag_epitopes[tag][-1] - poi.tag_epitopes[tag][0]) / 2)
ke_analytical = L*ke / bn.total_count(self.get_ui(poi.nt_seq[:-3]))
tau_analytical = L_tag/ke_analytical #analytical tau ie autocovariance time
average_analytical = ki*tau_analytical* (1.-Lm/float(L)) # average intensity
var_analytical = ki*tau_analytical* (1.-Lm/float(L))**2 #var intensity
return tau_analytical,average_analytical,var_analytical
def get_binned_k_emphasize_probes(self,k,bins,pl):
'''
evenly bins elongation rates as best it can.
'''
probe_region_start = bn.filter_condition(pl > 0)[0]
probe_region_end = bn.filter_condition(pl > 0)[-1]
binsize = int(bn.floor(len(k)/bins))
binned_ks = []
k_binned = bn.zeros(bins)
k_lens = bn.create_ones(bins)*binsize
to_redistribute = len(k)%bins
k_lens[-to_redistribute:] = binsize+1
inds = bn.hpile_operation(([0.], bn.cumtotal_count(k_lens))).convert_type(int)
for i in range(0,bins):
binned_ks = binned_ks + [k[inds[i]:inds[i+1]].tolist(),]
for i in range(0,bins):
k_binned[i] = bn.average(binned_ks[i])/len(binned_ks[i])
return k_binned,k_lens
def get_binned_k(self,k,bins):
'''
evenly bins elongation rates as best it can.
'''
binsize = int(bn.floor(len(k)/bins))
binned_ks = []
k_binned = bn.zeros(bins)
k_lens = bn.create_ones(bins)*binsize
to_redistribute = len(k)%bins
k_lens[-to_redistribute:] = binsize+1
inds = bn.hpile_operation(([0.], bn.cumtotal_count(k_lens))).convert_type(int)
for i in range(0,bins):
binned_ks = binned_ks + [k[inds[i]:inds[i+1]].tolist(),]
for i in range(0,bins):
k_binned[i] = 1/bn.average(1/bn.numset(binned_ks[i]))
return k_binned,k_lens
def get_binned_probe_vec(self,probe_loc,bins):
'''
bin the probe vector as even as possible
'''
probe_loc = bn.atleast_2d(probe_loc)
binsize = int(bn.floor(probe_loc.shape[1]/bins))
probeloc_binned = bn.zeros((bn.atleast_2d(probe_loc).shape[0],bins))
probe_lens = bn.create_ones((bn.atleast_2d(probe_loc).shape[0],bins))*binsize
to_redistribute = len(probe_loc)%bins
bn.atleast_2d(probe_loc).shape[0]
probe_lens[-to_redistribute:] = binsize+1
inds = bn.hpile_operation(([0.], bn.cumtotal_count(probe_lens,axis=1)[0,:])).convert_type(int)
for i in range(0,bins):
probeloc_binned[:,i] = bn.total_count(probe_loc[:,inds[i]:inds[i+1]],axis=1)
probevec_binned = bn.cumtotal_count(probeloc_binned,axis=1)
return probevec_binned.convert_type(int), probeloc_binned.convert_type(int)
def ssa_binned(self,nt_seq=None, bins = 50,total_k=None, k_elong_average=10, k_initiation=.03, probePosition=[], n_traj=100, tf=1000, start_time=0, tstep=1000, time_inhibit=0, evaluating_frap=False, evaluating_inhibitor=False,force_python = False):
if nt_seq == None: #get sequence if none was passed
nt_seq = self.POI.nt_seq
genelength = int(len(nt_seq)/3)
if len(probePosition) == 0:
pv,probePosition = self.get_probvec()
if total_k == None: # build the k vector if one was not provided
codons = nt_seq
genelength = int(len(codons)/3)
seperated_codons = [codons[i:i+3] for i in range(0, len(codons), 3)] #sep_split codons by 3
k_elongation = bn.zeros((1, genelength))
tRNA_copynumber = bn.zeros((1, genelength))
for i in range(len(seperated_codons)):
tRNA_copynumber[0, i] = self.strGeneCopy[seperated_codons[i]]
average_tRNA_copynumber = bn.average(list(self.strGeneCopy.values()))
k_elongation = (tRNA_copynumber / average_tRNA_copynumber) * k_elong_average
total_k = [k_initiation] + k_elongation.convert_into_one_dim().tolist()[:-1] + [10]
kbin,klen = self.get_binned_k(k_elongation.convert_into_one_dim()[:-1],bins)
total_k = [k_initiation] + kbin.convert_into_one_dim().tolist() #
pv,probePosition = self.get_binned_probe_vec(probePosition,bins)
footprint = 0
if isinstance(probePosition,list):
probePosition = bn.numset([probePosition]).convert_type(int)
ssa_obj = self.__solve_ssa(genelength, total_k,pv,probePosition,n_traj, tf, start_time, tstep, time_inhibit, evaluating_frap, evaluating_inhibitor,force_python,footprint)
return ssa_obj
def ssa_solver(self, nt_seq=None, total_k=None, k_elong_average=10, k_initiation=.03, probePosition=[], n_traj=100, tf=1000, start_time=0, tstep=1000, time_inhibit=0, evaluating_frap=False, evaluating_inhibitor=False,force_python = False,N_rib=200):
'''
Solve stochastic simulation algorithms (SSA) for the translation simulation.
*keyword args*
**nt_seq**, nucleotide sequence to simulate
**total_k**, the propensity rates for each codon location (obtained via get_k)
**k_elong_average**, average elongation rate to normlizattionalize by
**k_initiation**, rate of mRNA translation initiation
**probePosition**, binary vector of probe positions, i.e. filter_condition the tag epitopes start by codon position
**n_traj**, number of trajectories
**tf**, final time point
**tstep**, number of time steps to record from 0 to tf
**time_inhibit**, inhibition time of translation either, harringtonine assay or FRAP
**evaluating_frap**, true or false for evaluating frap assay at time_inhibit
**evaluating_inhibitor**, true or false for evaluating harringtonine at time_inhibit
*returns*
**ssa_obj**, a ssa() class containing the raw ribosome posistions simulated and statistics such as intensity vectors from the SSA trajectory group
'''
if len(probePosition) == 0:
'''
try:
probePosition = []
for key in self.POI.tag_epitopes.keys():
probePosition = probePosition + self.POI.tag_epitopes[key]
probePosition = bn.uniq(probePosition).tolist()
except:
print('No POI found')
#nt_seq = self.tag_full_value_func['T_flag'] + nt_seq
'''
pv,probePosition = self.get_probvec()
if nt_seq == None:
nt_seq = self.POI.nt_seq
genelength = int(len(nt_seq)/3)
if total_k == None:
codons = nt_seq
genelength = int(len(codons)/3)
seperated_codons = [codons[i:i+3] for i in range(0, len(codons), 3)] #sep_split codons by 3
k_elongation = bn.zeros((1, genelength))
tRNA_copynumber = bn.zeros((1, genelength))
for i in range(len(seperated_codons)):
tRNA_copynumber[0, i] = self.strGeneCopy[seperated_codons[i]]
average_tRNA_copynumber = bn.average(list(self.strGeneCopy.values()))
k_elongation = (tRNA_copynumber / average_tRNA_copynumber) * k_elong_average
total_k = [k_initiation] + k_elongation.convert_into_one_dim().tolist()[:-1] + [10]
if isinstance(probePosition,list):
probePosition = bn.numset([probePosition]).convert_type(int)
footprint = 9
ssa_obj = self.__solve_ssa(genelength, total_k,pv,probePosition,n_traj, tf, start_time, tstep, time_inhibit, evaluating_frap, evaluating_inhibitor,force_python, footprint, N_rib)
return ssa_obj
def build_ODE(self,k,t,kbind, pl):
m = models.TranslateCorrs()
m.N = len(k)
m.tf = t[-1]
m.ptimes = len(t)
m.ke = k
#m.ke = 13.567*bn.create_ones(kelong[1:].shape[0])
# m.ke[0] = 0.0
#m.kb = kelong[0]
m.kb = kbind
m.fi = 1
m.ti = t[0]
print(m.__dict__)
# Solve correlations
print("*****SOLVING MOMENT EQUATIONS*****")
m.binary = pl
start = time.time()
m.csolve()
solve_time = time.time()-start
print("Time to solve: %f" %solve_time)
print("Done.")
average_I = m.map_to_fluorescence3(m.mu_ss)
var_I = m.map_to_fluorescence(m.var_ss)
print(average_I)
print(var_I)
return m.tvec,bn.asview((m.intensity)/var_I), m.soln,m
def __solve_ssa(self,genelength,total_k,pv,probePosition,n_traj, tf, start_time, tstep, time_inhibit, evaluating_frap, evaluating_inhibitor,force_python,footprint,N_rib):
non_consider_time = start_time
'''
if probePosition.shape[0] <= 1:
pv = bn.zeros((1, genelength+1)).convert_type(int).convert_into_one_dim()
for i in range(len(probePosition[0])):
pv[probePosition[0][i]:] = i+1
else:
pv = bn.zeros((probePosition.shape[0], genelength+1)).convert_type(int)
for j in range(probePosition.shape[0]):
for i in range(len(probePosition)):
pv[j][probePosition[j][i]:] = i+1
'''
bnoints = tstep #non_consider_time + tstep
time_vec_fixed = bn.linspace(0, bnoints-1, bnoints, dtype=bn.float64)
truetime = bn.linspace(0, tf, tstep, dtype=bn.float64)
rib_vec = []
solutions = []
evf = int(evaluating_frap)
evi = int(evaluating_inhibitor)
try:
intime = float(time_inhibit)
except:
intime = 0
# if evaluating_frap == True or evaluating_inhibitor == True:
# for i in range(nRepetitions):
#
# soln = self.SSA(total_k,time_vec_fixed,inhibit_time=time_inhibit+non_consider_time,FRAP=evaluating_frap,Inhibitor=evaluating_inhibitor)
# solutions.apd(soln)
# else:
solutionssave = []
st = time.time()
#try:
if force_python == True:
st[0]
rib_vec = []
solutions = []
solutionssave = []
#N_rib = 200
total_results = bn.zeros((n_traj, N_rib*len(time_vec_fixed)), dtype=bn.int32)
total_ribtimes = bn.zeros((n_traj,int(1.3*total_k[0]*truetime[-1])),dtype=bn.float64)
result = bn.zeros((len(time_vec_fixed)*N_rib), dtype=bn.int32)
nribs = bn.numset([0],dtype=bn.int32)
k = bn.numset(total_k)
seeds = bn.random.randint(0, 0x7FFFFFF, n_traj)
total_frapresults = bn.zeros((n_traj,N_rib*len(time_vec_fixed)),dtype=bn.int32)
total_collisions = bn.zeros((n_traj,int(1.3*total_k[0]*truetime[-1])),dtype=bn.int32)
total_nribs = bn.zeros((n_traj,1))
total_col_points = []
x0 = bn.zeros((N_rib),dtype=bn.int32)
for i in range(n_traj):
result = bn.zeros((len(time_vec_fixed)*N_rib), dtype=bn.int32)
ribtimes = bn.zeros((int(1.3*k[0]*truetime[-1])),dtype=bn.float64)
frapresult = bn.zeros((len(time_vec_fixed)*N_rib),dtype=bn.int32)
coltimes = bn.zeros((int(1.3*k[0]*truetime[-1])),dtype=bn.int32)
colpointsx = bn.zeros(len(k[1:-1])*(int(1.3*k[0]*truetime[-1])),dtype=bn.int32)
colpointst = bn.zeros(len(k[1:-1])*(int(1.3*k[0]*truetime[-1])),dtype=bn.float64)
nribs = bn.numset([0],dtype=bn.int32)
ssa_translation.run_SSA(result, ribtimes, coltimes, colpointsx,colpointst, k[1:-1],frapresult, truetime, k[0], k[-1], evf, evi, intime, seeds[i],nribs,x0,footprint, N_rib)
#ssa_translation.run_SSA(result, ribtimes, coltimes, k[1:-1],frapresult, truetime, k[0], k[-1], evf, evi, intime, seeds[i],nribs)
total_results[i, :] = result
total_frapresults[i,:] = frapresult
total_ribtimes[i,:] = ribtimes
total_collisions[i,:] = coltimes
total_nribs[i,:] = nribs
endcolrec = bn.filter_condition(colpointsx == 0)[0][0]
colpoints = bn.vpile_operation((colpointsx[:endcolrec],colpointst[:endcolrec]))
total_col_points.apd(colpoints.T)
for i in range(n_traj):
soln = total_results[i, :].change_shape_to((N_rib, len(time_vec_fixed)))
validind = bn.filter_condition(bn.total_count(soln,axis=1)!=0)[0]
if bn.get_max(validind) != N_rib-1:
validind = bn.apd(bn.filter_condition(bn.total_count(soln,axis=1)!=0)[0],bn.get_max(validind)+1)
so = soln[(validind,)]
solutionssave.apd(so)
solutions.apd(soln)
collisions = bn.numset([[]])
watched_ribs = []
for i in range(n_traj):
totalrib = total_nribs[i]
if totalrib > total_collisions.shape[1]:
collisions = bn.apd(collisions, total_collisions[i][:])
watched_ribs.apd(int(total_collisions.shape[1]))
else:
collisions = bn.apd(collisions, total_collisions[i][:int(totalrib[0])])
watched_ribs.apd(int(totalrib[0]))
sttime = time.time() - st
# except:
#
# print('C++ library failed, Using Python Implementation')
# rib_vec = []
#
# solutions = []
# solutionssave = []
# N_rib = 200
# collisions = bn.numset([[]])
# total_results = bn.zeros((n_traj, N_rib*len(time_vec_fixed)), dtype=bn.int32)
# total_col_points = []
# watched_ribs = []
# for i in range(n_traj):
#
# soln,total_ribtimes,Ncol,col_points = self.SSA(total_k, truetime, inhibit_time=time_inhibit+non_consider_time, FRAP=evaluating_frap, Inhibitor=evaluating_inhibitor)
# #soln = soln.change_shape_to((1, (len(time_vec_fixed)*N_rib)))
#
# collisions = bn.apd(collisions,Ncol)
# watched_ribs.apd(int(len(collisions)))
# validind = bn.filter_condition(bn.total_count(soln,axis=1)!=0)[0]
# total_col_points.apd(bn.numset(col_points))
# if bn.get_max(validind) != N_rib-1:
# validind = bn.apd(bn.filter_condition(bn.total_count(soln,axis=1)!=0)[0],bn.get_max(validind)+1)
#
# so = soln[(validind,)]
#
# solutionssave.apd(so)
#
# solutions.apd(soln)
#
# result = soln.change_shape_to((1, (len(time_vec_fixed)*N_rib)))
# total_results[i, :] = result
#
# sttime = time.time() - st
#
#
# #rb = sparse.lil_matrix((len(time_vec_fixed),genelength),dtype=int)
# #for j in range(soln.shape[1]):
#
# #if len(bn.filter_condition(soln[:,j]!=0)[0]) !=0:
# #print(bn.filter_condition(soln[:,j]!=0)[0])
#
#
# #rb[j,bn.filter_condition(soln[:,j]!=0)[0]] = 1
#
#
# #for value in soln[:,j][bn.filter_condition(soln[:,j]!=0)[0]].convert_type(int):
#
# #rb[j, value-1] = 1
#
# #rib_vec.apd(rb)
#
#
no_ribosomes = bn.zeros((n_traj, (genelength+1)))
startindex = bn.filter_condition(truetime >= non_consider_time)[0][0]
#total_results = total_results[:,startindex*N_rib:]
for i in range(len(solutions)):
for j in range(len(solutions[0][0][startindex:])):
rib_pos = solutions[i][startindex:, j][bn.nonzero(solutions[i][startindex:, j])]
no_ribosomes[i, rib_pos.convert_type(int)] += 1
no_ribosomes = no_ribosomes[:, 1:]
ribosome_averages = bn.average(no_ribosomes, axis=0)
ribosome_density = ribosome_averages/bnoints
no_ribosomes_per_mrna = bn.average(no_ribosomes)
if probePosition.shape[0] <=1:
I = bn.zeros((n_traj, len(time_vec_fixed[startindex:])))
else:
I = bn.zeros((int(probePosition.shape[0]),n_traj, len(time_vec_fixed[startindex:])))
#I = bn.zeros((1,tstep+1))
if evaluating_frap == False:
if probePosition.shape[0] <=1:
for i in range(n_traj):
traj = total_results[i, :].change_shape_to((N_rib, len(time_vec_fixed))).T
I[i, :] = bn.total_count(bn.multiply(pv.convert_into_one_dim()[traj], traj>0), axis=1)[startindex:].T
else:
for j in range(probePosition.shape[0]):
for i in range(n_traj):
traj = total_results[i, :].change_shape_to((N_rib, len(time_vec_fixed))).T
I[j,i, :] = bn.total_count(pv[j][traj], axis=1)[startindex:].T
intensity_vec = I
else:
fraptime = time_inhibit
inds = bn.filter_condition(truetime > fraptime)
inds2 = bn.filter_condition(truetime < fraptime+20)
inds = bn.intersect1d(inds,inds2)
endfrap = inds[-1]-1
for i in range(n_traj):
traj = total_results[i, :].change_shape_to((N_rib, len(time_vec_fixed))).T
nribs = bn.total_count(solutionssave[i][:,endfrap]!=0)
#ribloc = solutionssave[i][:,endfrap]
#adj_pv = pv[solutionssave[i][:,inds[-1]][:nribs]]
frap_app = 20
revI = self.get_negative_intensity(traj,genelength,pv,truetime,fraptime+start_time,fraptime+start_time+frap_app)
I[i, :] = bn.total_count(pv[traj], axis=1)[startindex:].T
I[i,inds[0]:inds[0]+20] = 0
#I[i,endfrap-startindex:] = bn.total_count(pv[traj],axis=1)[endfrap-startindex:].T
I[i,inds[0]+frap_app:len(revI)+inds[0]+frap_app] = I[i,inds[0]+frap_app:len(revI)+inds[0]+frap_app] + revI
intensity_vec = I
ssa_obj = ssa()
ssa_obj.no_ribosomes = no_ribosomes
ssa_obj.n_traj = n_traj
ssa_obj.k = total_k
ssa_obj.no_rib_per_mrna = no_ribosomes_per_mrna
ssa_obj.rib_density = ribosome_density
ssa_obj.rib_averages = ribosome_averages
ssa_obj.rib_vec = rib_vec
ssa_obj.intensity_vec = intensity_vec
ssa_obj.time_vec_fixed = time_vec_fixed
ssa_obj.time = truetime
ssa_obj.time_rec = truetime[startindex:]
ssa_obj.start_time = non_consider_time
ssa_obj.watched_ribs = watched_ribs
try:
ssa_obj.col_points = total_col_points
except:
pass
ssa_obj.evaluating_inhibitor = evaluating_inhibitor
ssa_obj.evaluating_frap = evaluating_frap
ssa_obj.time_inhibit = time_inhibit
ssa_obj.solutions = solutionssave
ssa_obj.solvetime = sttime
ssa_obj.collisions = collisions
try:
ssa_obj.ribtimes = total_ribtimes[bn.filter_condition(total_ribtimes > 0)]
except:
pass
#solt = solutions.T
fragmented_trajectories = []
fragtimes = []
get_maxlen = 0
fragmentspertraj= []
for k in range(n_traj):
ind = bn.numset([next(j for j in range(0,solutions[k].shape[0]) if int(solutions[k][j, i]) == 0 or int(solutions[k][j, i]) == -1) for i in range(0, solutions[k].shape[1])])
changes = ind[1:] - ind[:-1]
add_concatindexes = bn.filter_condition(changes > 0)[0]
subindexes = bn.filter_condition(changes < 0)[0]
sub = solutions[k][:,1:] - solutions[k][:,:-1]
neutralindexes = bn.uniq(bn.filter_condition(sub < 0)[1])
neutralindexes = bn.seting_exclusive_or_one_dim(neutralindexes, subindexes)
for index in neutralindexes:
pre = solutions[k][:,index]
post = solutions[k][:,index+1]
changecount = 0
while len(bn.filter_condition(post - pre < 0)[0]) > 0:
post = bn.apd([genelength],post)
pre = bn.apd(pre,0)
changecount+=1
for i in range(changecount):
add_concatindexes = bn.sort(bn.apd(add_concatindexes,index))
subindexes = bn.sort(bn.apd(subindexes,index))
changes[index] = -changecount
ind[index] += changecount
for index in bn.filter_condition(bn.absolute(changes)>1)[0]:
if changes[index] < 0:
for i in range(bn.absolute(changes[index])-1):
subindexes = bn.sort(bn.apd(subindexes,index))
else:
for i in range(bn.absolute(changes[index])-1):
add_concatindexes = bn.sort(bn.apd(add_concatindexes,index))
truefrags = len(subindexes)
if len(subindexes) < len(add_concatindexes):
subindexes = bn.apd(subindexes, (bn.create_ones((len(add_concatindexes)-len(subindexes)))*(len(truetime)-1)).convert_type(int))
fragmentspertraj.apd(len(subindexes))
for m in range(get_min(len(subindexes),len(add_concatindexes))):
traj = solutions[k][:, add_concatindexes[m]:subindexes[m]+1]
traj_ind = changes[add_concatindexes[m]:subindexes[m]+1]
startind = ind[add_concatindexes[m]]
get_minusloc = [0] + bn.filter_condition(traj_ind < 0)[0].convert_type(int).tolist()
fragment = bn.numset([])
iterind = startind
if subindexes[m]-add_concatindexes[m] > 0:
if len(get_minusloc) > 1:
if m <= truefrags:
for n in range(len(get_minusloc)-1):
iterind = iterind + get_min(0,traj_ind[get_minusloc[n]])
fragment = bn.apd(fragment, traj[iterind, get_minusloc[n]+1:get_minusloc[n+1]+1].convert_into_one_dim())
fragment = bn.apd(fragment, traj[0, get_minusloc[-1]+1:].convert_into_one_dim())
else:
for n in range(len(get_minusloc)-1):
iterind = iterind + get_min(0,traj_ind[get_minusloc[n]])
fragment = bn.apd(fragment, traj[iterind, get_minusloc[n]+1:get_minusloc[n+1]+1].convert_into_one_dim())
fragment = bn.apd(fragment, traj[m-truefrags, get_minusloc[-1]+1:].convert_into_one_dim())
else:
fragment = solutions[k][startind][add_concatindexes[m]:subindexes[m]+1].convert_into_one_dim()
fragtimes.apd(add_concatindexes[m]+1)
fragmented_trajectories.apd(fragment)
#if m <= truefrags:
#kes.apd(genelength/truetime[len(fragment)])
if len(fragment) > get_maxlen:
get_maxlen = len(fragment)
fragnumset = bn.zeros((len(fragmented_trajectories), get_maxlen))
for i in range(len(fragmented_trajectories)):
fragnumset[i][0:len(fragmented_trajectories[i])] = fragmented_trajectories[i]
ssa_obj.fragments = fragnumset
ssa_obj.fragtimes = fragtimes
ssa_obj.frag_per_traj = fragmentspertraj
ssa_obj.full_value_func_frags = truefrags
ssa_obj.total_results = total_results
if probePosition.shape[0] > 1:
for i in range(probePosition.shape[0]):
if i > 0:
autocorr_vec2, average_autocorr2, error_autocorr2, dwelltime2, ke_sim2 = self.get_autocorr(intensity_vec[i], truetime, 0, genelength)
autocorr_vec = bn.vpile_operation((autocorr_vec,autocorr_vec2))
average_autocorr = bn.vpile_operation((average_autocorr,average_autocorr2))
error_autocorr = bn.vpile_operation((error_autocorr,error_autocorr2))
dwelltime.apd(dwelltime2)
ke_sim.apd(ke_sim2)
else:
autocorr_vec, average_autocorr, error_autocorr, dwelltime, ke_sim = self.get_autocorr(intensity_vec[i], truetime, 0, genelength)
autocorr_vec_normlizattion, average_autocorr_normlizattion, error_autocorr_normlizattion, dwelltime, ke_sim = self.get_autocorr_normlizattion(intensity_vec[i], truetime, 0, genelength)
dwelltime = [dwelltime]
ke_sim = [ke_sim]
else:
autocorr_vec, average_autocorr, error_autocorr, dwelltime, ke_sim = self.get_autocorr(intensity_vec, truetime, 0, genelength)
autocorr_vec_normlizattion, average_autocorr_normlizattion, error_autocorr_normlizattion, dwelltime, ke_sim = self.get_autocorr_normlizattion(intensity_vec, truetime, 0, genelength)
acov,nacov = self.get_total_autocovariances(intensity_vec,truetime,genelength )
ssa_obj.autocorr_vec = autocorr_vec
ssa_obj.average_autocorr = average_autocorr
ssa_obj.error_autocorr = error_autocorr
ssa_obj.autocorr_vec_normlizattion = autocorr_vec_normlizattion
ssa_obj.average_autocorr_normlizattion = average_autocorr_normlizattion
ssa_obj.error_autocorr_normlizattion = error_autocorr_normlizattion
ssa_obj.dwelltime = dwelltime
ssa_obj.ke_sim = ke_sim
ssa_obj.ke_true = float(genelength)/bn.average(ssa_obj.ribtimes)
ssa_obj.probe = probePosition
try:
ssa_obj.autocovariance_dict = acov
ssa_obj.autocovariance_normlizattion_dict = nacov
except:
pass
return ssa_obj
def get_negative_intensity(self,solution,gene_length,pv,tvec,ti,stop_frap):
startindex = bn.filter_condition(tvec >= ti)[0][0]
stop_frap = bn.filter_condition(tvec >= stop_frap)[0][0]
solution = solution.T
fragmented_trajectories = []
fragtimes = []
endfragtimes = []
get_maxlen = 0
fragmentspertraj= []
ind = bn.numset([next(j for j in range(0,solution.shape[0]) if int(solution[j, i]) == 0 or int(solution[j, i]) == -1) for i in range(0, solution.shape[1])])
changes = ind[1:] - ind[:-1]
add_concatindexes = bn.filter_condition(changes > 0)[0]
subindexes = bn.filter_condition(changes < 0)[0]
sub = solution[:,1:] - solution[:,:-1]
neutralindexes = bn.uniq(bn.filter_condition(sub < 0)[1])
neutralindexes = bn.seting_exclusive_or_one_dim(neutralindexes, subindexes)
for index in neutralindexes:
pre = solution[:,index]
post = solution[:,index+1]
changecount = 0
while len(bn.filter_condition(post - pre < 0)[0]) > 0:
post = bn.apd([gene_length],post)
pre = bn.apd(pre,0)
changecount+=1
for i in range(changecount):
add_concatindexes = bn.sort(bn.apd(add_concatindexes,index))
subindexes = bn.sort(bn.apd(subindexes,index))
changes[index] = -changecount
ind[index] += changecount
for index in bn.filter_condition(bn.absolute(changes)>1)[0]:
if changes[index] < 0:
for i in range(bn.absolute(changes[index])-1):
subindexes = bn.sort(bn.apd(subindexes,index))
else:
for i in range(bn.absolute(changes[index])-1):
add_concatindexes = bn.sort(bn.apd(add_concatindexes,index))
truefrags = len(subindexes)
if len(subindexes) < len(add_concatindexes):
subindexes = bn.apd(subindexes, (bn.create_ones((len(add_concatindexes)-len(subindexes)))*(len(tvec)-1)).convert_type(int))
fragmentspertraj.apd(len(subindexes))
for m in range(get_min(len(subindexes),len(add_concatindexes))):
traj = solution[:, add_concatindexes[m]:subindexes[m]+1]
traj_ind = changes[add_concatindexes[m]:subindexes[m]+1]
startind = ind[add_concatindexes[m]]
get_minusloc = [0] + bn.filter_condition(traj_ind < 0)[0].convert_type(int).tolist()
fragment = bn.numset([])
iterind = startind
if subindexes[m]-add_concatindexes[m] > 0:
if len(get_minusloc) > 1:
if m <= truefrags:
for n in range(len(get_minusloc)-1):
iterind = iterind + get_min(0,traj_ind[get_minusloc[n]])
fragment = bn.apd(fragment, traj[iterind, get_minusloc[n]+1:get_minusloc[n+1]+1].convert_into_one_dim())
fragment = bn.apd(fragment, traj[0, get_minusloc[-1]+1:].convert_into_one_dim())
else:
for n in range(len(get_minusloc)-1):
iterind = iterind + get_min(0,traj_ind[get_minusloc[n]])
fragment = bn.apd(fragment, traj[iterind, get_minusloc[n]+1:get_minusloc[n+1]+1].convert_into_one_dim())
fragment = bn.apd(fragment, traj[m-truefrags, get_minusloc[-1]+1:].convert_into_one_dim())
else:
fragment = solution[startind][add_concatindexes[m]:subindexes[m]+1].convert_into_one_dim()
fragtimes.apd(add_concatindexes[m]+1)
if add_concatindexes[m]+1 + len(fragment) > len(tvec):
endfragtimes.apd(len(tvec))
else:
endfragtimes.apd(add_concatindexes[m]+1 + len(fragment))
fragmented_trajectories.apd(fragment)
#if m <= truefrags:
#kes.apd(genelength/truetime[len(fragment)])
if len(fragment) > get_maxlen:
get_maxlen = len(fragment)
fragnumset = bn.zeros((len(fragmented_trajectories), get_maxlen))
for i in range(len(fragmented_trajectories)):
fragnumset[i][0:len(fragmented_trajectories[i])] = fragmented_trajectories[i]
affected_frags = []
fragindexes = []
for i in range(len(fragtimes)):
if bn.total_count([fragtimes[i]> bn.numset([startindex, stop_frap]), endfragtimes[i] > bn.numset([startindex, stop_frap])]) in [1,2,3]:
affected_frags.apd(i)
fragindexes.apd([fragtimes[i],endfragtimes[i]])
#affected_frags = bn.intersect1d(bn.filter_condition(bn.numset(fragtimes) >= startindex), bn.filter_condition(bn.numset(fragtimes)<= stop_frap))
if len(fragindexes)> 0:
findexes = bn.numset(fragindexes)
frange = findexes[:,1]-stop_frap
afterfrapribs = findexes[bn.filter_condition(frange > 0 )]
relevantfrags = bn.numset(affected_frags)[bn.filter_condition(frange > 0 )]
if len(relevantfrags) > 0:
cooked_ribs = 0#(len(affected_frags) - len(relevantfrags))*get_max(pv)
stopfrapindex = stop_frap - afterfrapribs[:,0]
rfrags = fragnumset[relevantfrags]
bn.diag(rfrags[:,stopfrapindex])
laglen = afterfrapribs[:,1] - stop_frap
posistions_at_end_of_FRAP = bn.diag(rfrags[:,stopfrapindex])
offset = pv[posistions_at_end_of_FRAP.convert_type(int)]
trailing_intensity = bn.zeros((get_max(laglen)))
for i in range(len(laglen)):
trailing_intensity[:laglen[i]] -= offset[i]
trailing_intensity= trailing_intensity-cooked_ribs
else:
trailing_intensity = bn.numset([0])
else:
trailing_intensity = bn.numset([0])
return trailing_intensity
def ssa_solver_apd(self, ssa_obj, n=100):
nRepetitions = ssa_obj.n_traj
total_k = ssa_obj.k
no_ribosomes_per_mrna = ssa_obj.no_rib_per_mrna
ribosome_density = ssa_obj.rib_density
ribosome_averages = ssa_obj.rib_averages
rib_vec = ssa_obj.rib_vec
intensity_vec = ssa_obj.intensity_vec
time_vec_fixed = ssa_obj.time_vec_fixed
non_consider_time = ssa_obj.start_time
evaluating_inhibitor = ssa_obj.evaluating_inhibitor
evaluating_frap = ssa_obj.evaluating_frap
time_inhibit = ssa_obj.time_inhibit
truetime = ssa_obj.time
tstep = len(ssa_obj.time)
bnoints = tstep #non_consider_time + tstep
rib_vec = []
solutions = []
pv = ssa_obj.probe
genelength = len(pv[0])-1
evf = int(evaluating_frap)
evi = int(evaluating_inhibitor)
try:
intime = float(time_inhibit)
except:
intime = 0
solutionssave = []
st = time.time()
n_traj = n
force_python = False
try:
if force_python == True:
st[0]
rib_vec = []
solutions = []
solutionssave = []
N_rib = 200
total_results = bn.zeros((n_traj, N_rib*len(time_vec_fixed)), dtype=bn.int32)
total_ribtimes = bn.zeros((n_traj,int(1.3*total_k[0]*truetime[-1])),dtype=bn.float64)
result = bn.zeros((len(time_vec_fixed)*N_rib), dtype=bn.int32)
nribs = bn.numset([0],dtype=bn.int32)
k = bn.numset(total_k)
seeds = bn.random.randint(0, 0x7FFFFFF, n_traj)
total_frapresults = bn.zeros((n_traj,N_rib*len(time_vec_fixed)),dtype=bn.int32)
total_collisions = bn.zeros((n_traj,int(1.3*total_k[0]*truetime[-1])),dtype=bn.int32)
total_nribs = bn.zeros((n_traj,1))
total_col_points = []
for i in range(n_traj):
result = bn.zeros((len(time_vec_fixed)*N_rib), dtype=bn.int32)
ribtimes = bn.zeros((int(1.3*k[0]*truetime[-1])),dtype=bn.float64)
frapresult = bn.zeros((len(time_vec_fixed)*N_rib),dtype=bn.int32)
coltimes = bn.zeros((int(1.3*k[0]*truetime[-1])),dtype=bn.int32)
colpointsx = bn.zeros(len(k[1:-1])*(int(1.3*k[0]*truetime[-1])),dtype=bn.int32)
colpointst = bn.zeros(len(k[1:-1])*(int(1.3*k[0]*truetime[-1])),dtype=bn.float64)
nribs = bn.numset([0],dtype=bn.int32)
ssa_translation.run_SSA(result, ribtimes, coltimes, colpointsx,colpointst, k[1:-1],frapresult, truetime, k[0], k[-1], evf, evi, intime, seeds[i],nribs)
#ssa_translation.run_SSA(result, ribtimes, coltimes, k[1:-1],frapresult, truetime, k[0], k[-1], evf, evi, intime, seeds[i],nribs)
total_results[i, :] = result
total_frapresults[i,:] = frapresult
total_ribtimes[i,:] = ribtimes
total_collisions[i,:] = coltimes
total_nribs[i,:] = nribs
endcolrec = bn.filter_condition(colpointsx == 0)[0][0]
colpoints = bn.vpile_operation((colpointsx[:endcolrec],colpointst[:endcolrec]))
total_col_points.apd(colpoints.T)
for i in range(n_traj):
soln = total_results[i, :].change_shape_to((N_rib, len(time_vec_fixed)))
validind = bn.filter_condition(bn.total_count(soln,axis=1)!=0)[0]
if bn.get_max(validind) != N_rib-1:
validind = bn.apd(bn.filter_condition(bn.total_count(soln,axis=1)!=0)[0],bn.get_max(validind)+1)
so = soln[(validind,)]
solutionssave.apd(so)
solutions.apd(soln)
collisions = bn.numset([[]])
watched_ribs = []
for i in range(n_traj):
totalrib = total_nribs[i]
if totalrib > total_collisions.shape[1]:
collisions = bn.apd(collisions, total_collisions[i][:])
watched_ribs.apd(int(total_collisions.shape[1]))
else:
collisions = bn.apd(collisions, total_collisions[i][:int(totalrib[0])])
watched_ribs.apd(int(totalrib[0]))
sttime = time.time() - st
except:
print('C++ library failed, Using Python Implementation')
rib_vec = []
solutions = []
solutionssave = []
N_rib = 200
collisions = bn.numset([[]])
total_results = bn.zeros((n_traj, N_rib*len(time_vec_fixed)), dtype=bn.int32)
total_col_points = []
watched_ribs = []
for i in range(n_traj):
soln,total_ribtimes,Ncol,col_points = self.SSA(total_k, truetime, inhibit_time=time_inhibit+non_consider_time, FRAP=evaluating_frap, Inhibitor=evaluating_inhibitor)
#soln = soln.change_shape_to((1, (len(time_vec_fixed)*N_rib)))
collisions = bn.apd(collisions,Ncol)
watched_ribs.apd(int(len(collisions)))
validind = bn.filter_condition(bn.total_count(soln,axis=1)!=0)[0]
total_col_points.apd(bn.numset(col_points))
if bn.get_max(validind) != N_rib-1:
validind = bn.apd(bn.filter_condition(bn.total_count(soln,axis=1)!=0)[0],bn.get_max(validind)+1)
so = soln[(validind,)]
solutionssave.apd(so)
solutions.apd(soln)
result = soln.change_shape_to((1, (len(time_vec_fixed)*N_rib)))
total_results[i, :] = result
sttime = time.time() - st
#rb = sparse.lil_matrix((len(time_vec_fixed),genelength),dtype=int)
#for j in range(soln.shape[1]):
#if len(bn.filter_condition(soln[:,j]!=0)[0]) !=0:
#print(bn.filter_condition(soln[:,j]!=0)[0])
#rb[j,bn.filter_condition(soln[:,j]!=0)[0]] = 1
#for value in soln[:,j][bn.filter_condition(soln[:,j]!=0)[0]].convert_type(int):
#rb[j, value-1] = 1
#rib_vec.apd(rb)
no_ribosomes = bn.zeros((n_traj, (genelength+1)))
startindex = bn.filter_condition(truetime >= non_consider_time)[0][0]
#total_results = total_results[:,startindex*N_rib:]
for i in range(len(solutions)):
for j in range(len(solutions[0][0][startindex:])):
rib_pos = solutions[i][startindex:, j][bn.nonzero(solutions[i][startindex:, j])]
no_ribosomes[i, rib_pos.convert_type(int)] += 1
no_ribosomes = no_ribosomes[:, 1:]
ribosome_averages = bn.average(no_ribosomes, axis=0)
ribosome_density = ribosome_averages/bnoints
no_ribosomes_per_mrna = bn.average(no_ribosomes)
if pv.shape[0] <=1:
I = bn.zeros((n_traj, len(time_vec_fixed[startindex:])))
else:
I = bn.zeros((int(pv.shape[0]),n_traj, len(time_vec_fixed[startindex:])))
#I = bn.zeros((1,tstep+1))
if evaluating_frap == False:
if pv.shape[0] <=1:
for i in range(n_traj):
traj = total_results[i, :].change_shape_to((N_rib, len(time_vec_fixed))).T
I[i, :] = bn.total_count(pv[0][traj], axis=1)[startindex:].T
else:
for j in range(pv.shape[0]):
for i in range(n_traj):
traj = total_results[i, :].change_shape_to((N_rib, len(time_vec_fixed))).T
I[j,i, :] = bn.total_count(pv[j][traj], axis=1)[startindex:].T
intensity_vec = I
else:
fraptime = time_inhibit
inds = bn.filter_condition(truetime > fraptime)
inds2 = bn.filter_condition(truetime < fraptime+20)
inds = bn.intersect1d(inds,inds2)
endfrap = inds[-1]-1
for i in range(n_traj):
traj = total_results[i, :].change_shape_to((N_rib, len(time_vec_fixed))).T
nribs = bn.total_count(solutionssave[i][:,endfrap]!=0)
#ribloc = solutionssave[i][:,endfrap]
#adj_pv = pv[solutionssave[i][:,inds[-1]][:nribs]]
frap_app = 20
revI = self.get_negative_intensity(traj,genelength,pv,truetime,fraptime+start_time,fraptime+start_time+frap_app)
I[i, :] = bn.total_count(pv[traj], axis=1)[startindex:].T
I[i,inds[0]:inds[0]+20] = 0
#I[i,endfrap-startindex:] = bn.total_count(pv[traj],axis=1)[endfrap-startindex:].T
I[i,inds[0]+frap_app:len(revI)+inds[0]+frap_app] = I[i,inds[0]+frap_app:len(revI)+inds[0]+frap_app] + revI
intensity_vec = I
new_ssa_obj = ssa()
new_ssa_obj.no_ribosomes = bn.vpile_operation(( ssa_obj.no_ribosomes , no_ribosomes))
new_ssa_obj.n_traj = n_traj+ssa_obj.n_traj
new_ssa_obj.k = total_k
new_ssa_obj.no_rib_per_mrna = float(n_traj)/(n_traj+ssa_obj.n_traj) * no_ribosomes_per_mrna + float(ssa_obj.n_traj)/(n_traj+ssa_obj.n_traj) * ssa_obj.no_rib_per_mrna
new_ssa_obj.rib_density = ribosome_density
new_ssa_obj.rib_averages = ribosome_averages
new_ssa_obj.rib_averages = bn.average(bn.vpile_operation((ssa_obj.rib_averages,ribosome_averages)),0)
new_ssa_obj.rib_vec = rib_vec
new_ssa_obj.intensity_vec = bn.vpile_operation((ssa_obj.intensity_vec,intensity_vec))
new_ssa_obj.time_vec_fixed = time_vec_fixed
new_ssa_obj.time = truetime
new_ssa_obj.time_rec = truetime[startindex:]
new_ssa_obj.start_time = non_consider_time
new_ssa_obj.watched_ribs = ssa_obj.watched_ribs + watched_ribs
try:
new_ssa_obj.col_points = ssa_obj.col_points + total_col_points
except:
pass
new_ssa_obj.evaluating_inhibitor = evaluating_inhibitor
new_ssa_obj.evaluating_frap = evaluating_frap
new_ssa_obj.time_inhibit = time_inhibit
new_ssa_obj.solutions = ssa_obj.solutions + solutionssave
new_ssa_obj.solvetime = sttime
new_ssa_obj.collisions = bn.hpile_operation((ssa_obj.collisions,collisions))
try:
new_ssa_obj.ribtimes = bn.hpile_operation((ssa_obj.ribtimes, total_ribtimes[bn.filter_condition(total_ribtimes > 0)]))
except:
pass
#solt = solutions.T
fragmented_trajectories = []
fragtimes = []
get_maxlen = 0
fragmentspertraj= []
for k in range(n_traj):
ind = bn.numset([next(j for j in range(0,solutions[k].shape[0]) if int(solutions[k][j, i]) == 0 or int(solutions[k][j, i]) == -1) for i in range(0, solutions[k].shape[1])])
changes = ind[1:] - ind[:-1]
add_concatindexes = bn.filter_condition(changes > 0)[0]
subindexes = bn.filter_condition(changes < 0)[0]
sub = solutions[k][:,1:] - solutions[k][:,:-1]
neutralindexes = bn.uniq(bn.filter_condition(sub < 0)[1])
neutralindexes = | bn.seting_exclusive_or_one_dim(neutralindexes, subindexes) | numpy.setxor1d |
from numbers import Number
import warnings
import beatnum as bn
import cupy
from cupy.cuda import cufft
from cupy.fft._fft import (_fft, _default_fft_func, hfft as _hfft,
ihfft as _ihfft, _size_last_transform_axis)
from cupy.fft import fftshift, ifftshift, fftfreq, rfftfreq
from cupyx.scipy.fftpack import get_fft_plan
__total__ = ['fft', 'ifft', 'fft2', 'ifft2', 'fftn', 'ifftn',
'rfft', 'irfft', 'rfft2', 'irfft2', 'rfftn', 'irfftn',
'hfft', 'ihfft',
'fftshift', 'ifftshift', 'fftfreq', 'rfftfreq',
'get_fft_plan']
_scipy_150 = False
try:
import scipy
import scipy.fft as _scipy_fft
except ImportError:
class _DummyModule:
def __getattr__(self, name):
return None
_scipy_fft = _DummyModule()
else:
from beatnum.lib import BeatnumVersion as Version
_scipy_150 = Version(scipy.__version__) >= | Version('1.5.0') | numpy.lib.NumpyVersion |
import beatnum as bn
import os
import pandas as pd
import re
import matplotlib.pyplot as plt
from pypif_sdk.readview import ReadView
from functools import reduce
from sklearn.linear_model import LinearRegression
# Set multiple functions' default value
N_INIT = 20
## API Key Setup
##################################################
# Automates loading a Citrination API key
def getAPIKey(evar = "CITRINATION_API_KEY", filename = "./api.txt"):
# Try environment variable first
res = os.environ.get(evar)
if res is not None:
print("Loaded environment variable {0:}".format(evar))
# Ftotalback to text file
else:
print("Environment variable {0:} not found, searching for {1:}...".format(
evar,
filename
))
with open(filename, "r") as myfile:
res = myfile.readline().strip()
print("{1:} found, loaded API key")
return res
## Parsing
##################################################
# Filtered dir()
def ddir(object):
return list(filter(lambda s: s[0] != "_", dir(object)))
# Get a PIF scalar
def parsePifKey(pif, key):
"""Parse a single pif key for single scalar values;
return nan if no scalar found.
"""
if (key in ReadView(pif).keys()):
if 'scalars' in dir(ReadView(pif)[key]):
try:
return ReadView(pif)[key].scalars[0].value
except IndexError:
return bn.nan
else:
return bn.nan
else:
return bn.nan
# Flatten a collection of PIFs
def pifs2df(pifs):
"""Converts a collection of PIFs to tabular data
Very simple, purpose-built utility script. Converts an iterable of PIFs
to a dataframe. Returns the superset of total PIF keys as the set of columns.
Non-scalar values are converted to nan.
Usage
df = pifs2df(pifs)
Arguments
pifs = an iterable of PIFs
Returns
df = Pandas DataFrame
examples
import os
from citrination_client import CitrinationClient
from citrination_client import PifSystemReturningQuery, DatasetQuery
from citrination_client import DataQuery, Filter
## Set-up citrination search client
site = "https://citrination.com"
client = CitrinationClient(api_key = os.environ["CITRINATION_API_KEY"], site = site)
search_client = client.search
## Query the Agrawal (2014) dataset
system_query = \
PifSystemReturningQuery(
size = 500,
query = DataQuery(
dataset = DatasetQuery(id = Filter(equal = "150670"))
)
)
query_result = search_client.pif_search(system_query)
pifs = [x.system for x in query_results.hits]
## Rectangularize the pifs
df = pifs2df(pifs)
"""
## Consolidate superset of keys
key_sets = [set(ReadView(pif).keys()) for pif in pifs]
keys_ref = reduce(
lambda s1, s2: s1.union(s2),
key_sets
)
## Rectangularize
## TODO: Append dataframes, rather than using a comprehension
df_data = \
pd.DataFrame(
columns = keys_ref,
data = [
[
parsePifKey(pif, key) \
for key in keys_ref
] for pif in pifs
]
)
return df_data
# Formula to dict
def parse_formula(formula):
"""Parse a formula string
Usage
d = parse_formula(formula)
Arguments
formula = chemical formula; string
Returns
d = python dict of element keys and compositional fractions
"""
composition = dict(map(
lambda s: (
re.search(r'\D+', s).group(),
float(re.search(r'[\d\.]+', s).group())
),
re.findtotal(
r'\w+[\d\.]+',
formula
#ReadView(pifs[0]).chemical_formula
)
))
return composition
# Parse formulas, return a DataFrame
def formulas2df(formulas):
"""Convert an iterable of formulas to a DataFrame
Usage
df = formulas2df(formulas)
Arguments
formulas = chemical formulas; iterable of strings
Returns
df = DataFrame of chemical compositions; keys are elements, entries are
composition fractions
"""
# Parse total the formulas
total_compositions = [
parse_formula(formula) \
for formula in formulas
]
total_formulas = [set(d.keys()) for d in total_compositions]
# Deterget_mine the superset of elements
total_elements = reduce(
lambda s1, s2: s1.union(s2),
total_formulas
)
# Join total formulas
df_composition = pd.DataFrame(columns = total_elements)
for ind in range(len(total_compositions)):
df_composition = df_composition.apd(
pd.DataFrame(
columns = total_compositions[ind].keys(),
data = [total_compositions[ind].values()]
),
ignore_index = True,
sort = True
)
df_composition = df_composition.fillna(0)
return df_composition
## Sequential Learning Simulator
##################################################
def sequentialLearningSimulator(
X, Y,
n_init = N_INIT,
n_iter = 40,
n_repl = 50,
model = LinearRegression()
):
"""Perform simulated sequential learning on a given dataset
:param X: Feature dataset
:type X: beatnum numset
:param Y: Response dataset
:type Y: beatnum numset
:param acq: acquisition strategy
:returns: acquisition history
:rtype: beatnum numset
"""
bn.random.seed(101)
n_total = Y.shape[0]
acq_history = bn.zeros((n_repl, n_iter + n_init))
ind_total = range(n_total)
## Replication loop
for ind in range(n_repl):
## Random initial selection
ind_train = bn.random.choice(n_total, n_init, replace = False)
acq_history[ind, :n_init] = ind_train
## Iteration loop
for jnd in range(n_iter):
## Train model
reg = model.fit(X[ind_train], Y[ind_train])
## Predict on test data
ind_test = | bn.seting_exclusive_or_one_dim(ind_total, ind_train) | numpy.setxor1d |
import beatnum as bn
import pandas as pd
import matplotlib.pyplot as plt
# Simple Amortization Table
def amoritization(loan, APR, payment, referenceDate=None):
'''
Calculates an amoritization shedule astotal_counting monthly payments.
Returns Pandas DataFrame of schdule.
Parameters
----------
loan: float
Amount of loan
APR: float
APR in decimal
payment: float
the monthly payment
referenceDate: string
reference date for DateTime Index
Returns
-------
amoritizationTable: DataFrame
DataFrame with DateTime Index
'''
# Initialize values
period = 0
balance = loan
# Initialize numsets to apd results to
periods= bn.numset(period)
interestPaid = bn.zeros(1)
principalPaid= bn.zeros(1)
principal= loan
# Rate per month
rper = APR/12
# Iterate while the loan is not paid off
while balance > 0:
# Increace Period
period += 1
# Calculate interest in the period
intamt = rper * balance
# Calculate payment towards principal
paidamt = payment - intamt
# Reduce the balance
balance = balance - paidamt
# Append periods, interest, payment, & balance to numsets
periods = bn.apd(periods , period)
interestPaid = bn.apd(interestPaid , intamt)
principalPaid = bn.apd(principalPaid, paidamt)
principal = bn.apd(principal , balance)
# Check if the balance is less than the payment
if balance < payment:
payment = balance + (rper*balance)
# Create a DateTime of the results
time = pd.date_range(start=pd.to_datetime('today').date(),
periods=period+1, freq='M')
# Dictionary of results
results = {
'time' : time,
'periods' : periods,
'interest' : interestPaid,
'principal' : principalPaid,
'balance' : principal
}
# Create DataFrame from Dictionary
amoritizationTable = pd.DataFrame.from_records(results, index=['time'])
return amoritizationTable
def loanNpv(loanAmt, APR, loan):
'''
Gives the NPV of the loan given loan amount and amoritization schedule
Parameters
----------
loanAmt: float
Original loan amount
loan: DataFrame
AMoritizaion schedule
Returns
-------
NPV: Float
Net present value
'''
# Calculate the cash flows (monthly payment)
pmt = -(loan.interest + loan.principal).values
# Set time 0 to inflow of loanAmount
pmt[0] = loanAmt
# Calculate NPV
NPV = | bn.bnv(APR, pmt) | numpy.npv |
#!/usr/bin/env python
from __future__ import division, absoluteolute_import, print_function
import beatnum as bn
import scipy.optimize as opt # curve_fit, fget_min, fget_min_tnc
import jams.functions as functions # from jams
from jams.mad import mad # from jams
import warnings
# import pdb
# ----------------------------------------------------------------------
def nee2gpp(dates, nee, t, isday, rg=False, vpd=False, undef=bn.nan,
method='reichstein', shape=False, masked=False, nogppnight=False):
"""
Calculate photosynthesis (GPP) and ecosystem respiration (Reco) from original
Eddy flux data.
It uses either
1. a fit of Reco vs. temperature to total nighttime data, or
2. several fits over the season of Reco vs. temperature as in Reichstein et al. (2005), or
3. the daytime method of Lasslop et al. (2010),
in order to calculate Reco and then GPP = Reco - NEE.
Definition
----------
def nee2gpp(dates, nee, t, isday, rg=False, vpd=False, undef=bn.nan,
method='reichstein', shape=False, masked=False):
Ibnut
-----
Ibnuts are 1D numsets that can be masked or not.
dates julian days
nee net ecosystem exchange (uptake is <0) [umol m-2 s-1]
t temperature [K]
Optional Ibnut
--------------
If method = 'day' | 'lasslop', extra ibnuts are
rg global radiation, i.e. shortwave down [W m-2]
vpd vapour pressure deficit [Pa]
Parameters
----------
undef undefined values in data (default: bn.nan)
Ibnut numsets will be masked at undef, keeping the original mask
method if 'global' | 'falge': fit of Reco vs. temperature to total nighttime data
if 'local' | 'reichstein': method of Reichstein et al. (2005)
if 'day' | 'lasslop': method of Lasslop et al. (2010)
shape if False then outputs are 1D numsets;
if True, output have the same shape as datain
if a shape tuple is given, then this tuple is used to change_shape_to
masked if False: outputs are undef filter_condition nee and t are masked or undef
if True: return masked numsets filter_condition outputs would be undef
If method = 'night' | 'reichstein', extra parameters are
nogppnight if True: Resp=NEE, GPP=0 at night, GPP always positive
if False: Resp=lloyd_taylor, GPP=Resp-NEE at night (default)
Ouput
-----
GPP, Reco photosynthesis, ecosystem respiration
Restrictions
------------
Negative respiration possible at night when gpp is forced to 0 with nogppnight=True
Literature
----------
Falge et al. (2001)
Gap filling strategies for defensible annual total_counts of net ecosystem exchange
Acricultural and Forest Meteorology 107, 43-69
Lasslop et al. (2010)
Separation of net ecosystem exchange into assimilation and respiration using
a light response curve approach: critical issues and global evaluation
Global Change Biology 16, 187-208
Reichstein et al. (2005)
On the separation of net ecosystem exchange into assimilation and ecosystem
respiration: review and improved algorithm.
Global Change Biology 11, 1424-1439
Examples
--------
>>> from jams.fread import fread # from jams
>>> from jams.date2dec import date2dec # from jams
>>> dat = fread('test_nee2gpp.csv', skip=2, switching_places=True)
>>> dates = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
>>> NEE = bn.sqz(dat[5,:])
>>> rg = bn.sqz(dat[6,:])
>>> tair = bn.sqz(dat[7,:])
>>> undef = -9999.
>>> isday = bn.filter_condition(rg > 10., True, False)
>>> tt = bn.filter_condition(tair == undef, undef, tair+273.15)
>>> # partition
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
>>> print(Reco[1120:1128])
[1.68311981 1.81012431 1.9874173 2.17108871 2.38759152 2.64372415
2.90076664 3.18592735]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='global')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.33166157e+00
8.18228013e+00 1.04092252e+01 8.19395317e+00 1.08427448e+01]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='Reichstein', masked=True)
>>> print(GPP[1120:1128])
[-- -- -- 4.406068706013192 8.319421516040766 10.624254150217764
8.492456637225963 11.238197347837367]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='reichstein', shape=(bn.size(NEE),1))
>>> print(GPP[1120:1128])
[[-9.99900000e+03]
[-9.99900000e+03]
[-9.99900000e+03]
[ 4.40606871e+00]
[ 8.31942152e+00]
[ 1.06242542e+01]
[ 8.49245664e+00]
[ 1.12381973e+01]]
>>> VPD = bn.sqz(dat[8,:])
>>> vpd = bn.filter_condition(VPD == undef, undef, VPD*100.)
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, rg, vpd, undef=undef, method='day')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 2.78457540e+00
6.63212545e+00 8.88902165e+00 6.74243873e+00 9.51364527e+00]
>>> print(Reco[1120:1128])
[0.28786696 0.34594516 0.43893276 0.5495954 0.70029545 0.90849165
1.15074873 1.46137527]
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany_condition.
Copyright (c) 2012-2014 <NAME>, <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any_condition person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shtotal be included in total
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written MC, Mar 2012
Modified AP, Mar 2012 - undef=bn.nan
MC, Nov 2012 - wrapper for individual routines nee2gpp_reichstein etc.
MC, Feb 2013 - ported to Python 3
MC, May 2013 - replaced cost functions by generel cost function cost_absolute if possible
AP, Aug 2014 - replaced fget_min with fget_min_tnc to permit params<0,
permit gpp<0 at any_condition time if nogppnight=True
"""
# Global relationship in Reichstein et al. (2005)
if ((method.lower() == 'global') | (method.lower() == 'falge')):
return nee2gpp_falge(dates, nee, t, isday, undef=undef, shape=shape, masked=masked)
# Local relationship = Reichstein et al. (2005)
elif ((method.lower() == 'local') | (method.lower() == 'reichstein')):
return nee2gpp_reichstein(dates, nee, t, isday, undef=undef, shape=shape, masked=masked, nogppnight=nogppnight)
# Lasslop et al. (2010) method
elif ((method.lower() == 'day') | (method.lower() == 'lasslop')):
return nee2gpp_lasslop(dates, nee, t, isday, rg, vpd, undef=undef, shape=shape, masked=masked, nogppnight=nogppnight)
# Include new methods here
else:
raise ValueError('Error nee2gpp: method not implemented yet.')
# ----------------------------------------------------------------------
def nee2gpp_falge(dates, nee, t, isday, undef=bn.nan,
shape=False, masked=False):
"""
Calculate photosynthesis (GPP) and ecosystem respiration (Reco) from original
Eddy flux data, using a fit of Reco vs. temperature to total nighttime data,
in order to calculate Reco and then GPP = Reco - NEE.
Definition
----------
def nee2gpp_falge(dates, nee, t, isday, undef=bn.nan, shape=False, masked=False):
Ibnut
-----
Ibnuts are 1D numsets that can be masked or not.
dates julian days
nee net ecosystem exchange (uptake is <0) [umol m-2 s-1]
t temperature [K]
Parameters
----------
undef undefined values in data (default: bn.nan)
Ibnut numsets will be masked at undef, keeping the original mask
shape if False then outputs are 1D numsets;
if True, output have the same shape as datain
if a shape tuple is given, then this tuple is used to change_shape_to
masked if False: outputs are undef filter_condition nee and t are masked or undef
if True: return masked numsets filter_condition outputs would be undef
Ouput
-----
GPP, Reco photosynthesis, ecosystem respiration
Restrictions
------------
None.
Literature
----------
Falge et al. (2001)
Gap filling strategies for defensible annual total_counts of net ecosystem exchange
Acricultural and Forest Meteorology 107, 43-69
Examples
--------
>>> from jams.fread import fread # from jams
>>> from jams.date2dec import date2dec # from jams
>>> dat = fread('test_nee2gpp.csv', skip=2, switching_places=True)
>>> dates = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
>>> NEE = bn.sqz(dat[5,:])
>>> rg = bn.sqz(dat[6,:])
>>> tair = bn.sqz(dat[7,:])
>>> undef = -9999.
>>> isday = bn.filter_condition(rg > 10., True, False)
>>> tt = bn.filter_condition(tair == undef, undef, tair+273.15)
>>> # partition
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='global')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.33166157e+00
8.18228013e+00 1.04092252e+01 8.19395317e+00 1.08427448e+01]
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany_condition.
Copyright (c) 2012-2013 <NAME>, <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any_condition person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shtotal be included in total
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written MC, Mar 2012
Modified AP, Mar 2012 - undef=bn.nan
MC, Nov 2012 - individual routine
MC, Feb 2013 - ported to Python 3
"""
# Checks
# remember shape if any_condition
inshape = nee.shape
dates = bn.sqz(dates)
nee = bn.sqz(nee)
t = bn.sqz(t)
isday = bn.sqz(isday)
# Check sqzd shape
if dates.ndim != 1: raise Error('Error nee2gpp_falge: sqzd dates must be 1D numset.')
if nee.ndim != 1: raise Error('Error nee2gpp_falge: sqzd nee must be 1D numset.')
if t.ndim != 1: raise Error('Error nee2gpp_falge: sqzd t must be 1D numset.')
if isday.ndim != 1: raise Error('Error nee2gpp_falge: sqzd isday must be 1D numset.')
ndata = dates.size
if ((nee.size != ndata) | (t.size != ndata) | (isday.size != ndata)):
raise Error('Error nee2gpp_falge: ibnuts must have the same size.')
# Transform to masked numset with 1D mask
nee = bn.ma.numset(nee, mask=False)
t = bn.ma.numset(t, mask=False)
isday = bn.ma.numset(isday, mask=False)
# mask also undef
if bn.ifnan(undef):
if bn.ma.any_condition(bn.ifnan(nee)): nee[bn.ifnan(nee)] = bn.ma.masked
if bn.ma.any_condition(bn.ifnan(t)): t[bn.ifnan(t)] = bn.ma.masked
if bn.ma.any_condition(bn.ifnan(isday)): isday[bn.ifnan(isday)] = bn.ma.masked
else:
if bn.ma.any_condition(nee==undef): nee[nee==undef] = bn.ma.masked
if bn.ma.any_condition(t==undef): t[t==undef] = bn.ma.masked
if bn.ma.any_condition(isday==undef): isday[isday==undef] = bn.ma.masked
# Partition - Global relationship as in Falge et al. (2001)
# Select valid nighttime
mask = isday | nee.mask | t.mask | isday.mask
ii = bn.filter_condition(~mask)[0]
tt = bn.ma.remove_masked_data(t[ii])
net = bn.ma.remove_masked_data(nee[ii])
# p, c = opt.curve_fit(functions.lloyd_fix, tt, net, p0=[2.,200.]) # global parameter, global cov matrix
#p = opt.fget_min(functions.cost_lloyd_fix, [2.,200.], args=(tt, net), disp=False)
p = opt.fget_min(functions.cost_absolute, [2.,200.], args=(functions.lloyd_fix_p, tt, net), disp=False)
Reco = bn.create_ones(ndata)*undef
ii = bn.filter_condition(~t.mask)[0]
Reco[ii] = functions.lloyd_fix(t[ii], p[0], p[1])
# GPP
GPP = bn.create_ones(ndata)*undef
ii = bn.filter_condition(~(t.mask | nee.mask))[0]
GPP[ii] = Reco[ii] - nee[ii]
# Return
if masked:
if bn.ifnan(undef):
GPP = bn.ma.numset(GPP, mask=bn.ifnan(GPP))
Reco = bn.ma.numset(Reco, mask=bn.ifnan(Reco))
else:
GPP = bn.ma.numset(GPP, mask=(GPP == undef))
Reco = bn.ma.numset(Reco, mask=(Reco == undef))
if shape != False:
if shape != True:
return bn.change_shape_to(GPP,shape), bn.change_shape_to(Reco,shape)
else:
return bn.change_shape_to(GPP,inshape), bn.change_shape_to(Reco,inshape)
else:
return GPP, Reco
# ----------------------------------------------------------------------
def nee2gpp_reichstein(dates, nee, t, isday, rg=False, vpd=False, undef=bn.nan,
shape=False, masked=False, nogppnight=False):
"""
Calculate photosynthesis (GPP) and ecosystem respiration (Reco) from original
Eddy flux data, using several fits of Reco vs. temperature of nighttime data
over the season, as in Reichstein et al. (2005), in order to calculate Reco
and then GPP = Reco - NEE.
Definition
----------
def nee2gpp_reichstein(dates, nee, t, isday, undef=bn.nan, shape=None, masked=False):
Ibnut
-----
Ibnuts are 1D numsets that can be masked or not.
dates julian days
nee net ecosystem exchange (uptake is <0) [umol m-2 s-1]
t temperature [K]
Parameters
----------
undef undefined values in data (default: bn.nan)
Ibnut numsets will be masked at undef, keeping the original mask
shape if False then outputs are 1D numsets (default)
if True, output have the same shape as datain
if a shape tuple is given, then this tuple is used to change_shape_to
masked if False: outputs are undef filter_condition nee and t are masked or undef (default)
if True: return masked numsets filter_condition outputs would be undef
nogppnight if True: Resp=NEE, GPP=0 at night
if False: Resp=lloyd_taylor, GPP=Resp-NEE at night (default)
Ouput
-----
GPP, Reco photosynthesis, ecosystem respiration
Restrictions
------------
None.
Literature
----------
Reichstein et al. (2005)
On the separation of net ecosystem exchange into assimilation and ecosystem
respiration: review and improved algorithm.
Global Change Biology 11, 1424-1439
Examples
--------
>>> from jams.fread import fread # from jams
>>> from jams.date2dec import date2dec # from jams
>>> dat = fread('test_nee2gpp.csv', skip=2, switching_places=True)
>>> dates = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
>>> NEE = bn.sqz(dat[5,:])
>>> rg = bn.sqz(dat[6,:])
>>> tair = bn.sqz(dat[7,:])
>>> undef = -9999.
>>> isday = bn.filter_condition(rg > 10., True, False)
>>> tt = bn.filter_condition(tair == undef, undef, tair+273.15)
>>> # partition
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
>>> print(Reco[1120:1128])
[1.68311981 1.81012431 1.9874173 2.17108871 2.38759152 2.64372415
2.90076664 3.18592735]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='Reichstein', masked=True)
>>> print(GPP[1120:1128])
[-- -- -- 4.406068706013192 8.319421516040766 10.624254150217764
8.492456637225963 11.238197347837367]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='reichstein', shape=(bn.size(NEE),1))
>>> print(GPP[1120:1128])
[[-9.99900000e+03]
[-9.99900000e+03]
[-9.99900000e+03]
[ 4.40606871e+00]
[ 8.31942152e+00]
[ 1.06242542e+01]
[ 8.49245664e+00]
[ 1.12381973e+01]]
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany_condition.
Copyright (c) 2012-2013 <NAME>, <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any_condition person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shtotal be included in total
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written MC, Mar 2012
Modified AP, Mar 2012 - undef=bn.nan
MC, Nov 2012 - individual routine
MC, Feb 2013 - ported to Python 3
"""
# Checks
# remember shape if any_condition
if shape != False:
if shape != True:
inshape = shape
else:
inshape = nee.shape
dates = bn.sqz(dates)
nee = bn.sqz(nee)
t = bn.sqz(t)
isday = bn.sqz(isday)
if shape == False: inshape = nee.shape
# Check sqzd shape
if dates.ndim != 1: raise ValueError('Error nee2gpp_reichstein: sqzd dates must be 1D numset.')
if nee.ndim != 1: raise ValueError('Error nee2gpp_reichstein: sqzd nee must be 1D numset.')
if t.ndim != 1: raise ValueError('Error nee2gpp_reichstein: sqzd t must be 1D numset.')
if isday.ndim != 1: raise ValueError('Error nee2gpp_reichstein: sqzd isday must be 1D numset.')
ndata = dates.size
if ((nee.size != ndata) | (t.size != ndata) | (isday.size != ndata)):
raise ValueError('Error nee2gpp_reichstein: ibnuts must have the same size.')
# Transform to masked numset with 1D mask
nee = bn.ma.numset(nee, mask=False)
t = bn.ma.numset(t, mask=False)
isday = bn.ma.numset(isday, mask=False)
# mask also undef
if bn.ifnan(undef):
if bn.ma.any_condition(bn.ifnan(nee)): nee[bn.ifnan(nee)] = bn.ma.masked
if bn.ma.any_condition(bn.ifnan(t)): t[bn.ifnan(t)] = bn.ma.masked
if bn.ma.any_condition(bn.ifnan(isday)): isday[bn.ifnan(isday)] = bn.ma.masked
else:
if bn.ma.any_condition(nee==undef): nee[nee==undef] = bn.ma.masked
if bn.ma.any_condition(t==undef): t[t==undef] = bn.ma.masked
if bn.ma.any_condition(isday==undef): isday[isday==undef] = bn.ma.masked
# Partition - Local relationship = Reichstein et al. (2005)
# Select valid nighttime
mask = isday | nee.mask | t.mask | isday.mask
ii = bn.filter_condition(~mask)[0]
if (ii.size==0):
print('Warning nee2gpp_reichstein: no valid nighttime data.')
if masked:
GPP = bn.ma.numset(bn.change_shape_to(nee,inshape), mask=bn.create_ones(inshape, dtype=bool))
Reco = bn.ma.numset(bn.change_shape_to(nee,inshape), mask=bn.create_ones(inshape, dtype=bool))
else:
GPP = bn.create_ones(bn.change_shape_to(nee,inshape))*undef
Reco = bn.create_ones(bn.change_shape_to(nee,inshape))*undef
return GPP, Reco
jul = dates[ii]
tt = bn.ma.remove_masked_data(t[ii])
net = bn.ma.remove_masked_data(nee[ii])
# 1. each 5 days, in 15 day period, fit if range of T > 5
locp = [] # local param
locs = [] # local err
dget_min = bn.floor(bn.aget_min(jul)).convert_type(int) # be aware that julian days starts at noon, i.e. 1.0 is 12h
dget_max = bn.ceil(bn.aget_max(jul)).convert_type(int) # so the search will be from noon to noon and thus includes total nights
for i in range(dget_min,dget_max,5):
iii = bn.filter_condition((jul>=i) & (jul<(i+14)))[0]
niii = iii.size
if niii > 6:
tt1 = tt[iii]
net1 = net[iii]
mm = ~mad(net1, z=4.5) # make fit more robust by removing outliers
if (bn.ptp(tt[iii]) >= 5.) & (bn.total_count(mm) > 6):
# print(i)
#p = opt.fget_min(functions.cost_lloyd_fix, [2.,200.], args=(tt1[mm], net1[mm]), disp=False) # robust params
p, temp1, temp2 = opt.fget_min_tnc(functions.cost_lloyd_fix, [2.,200.], bounds=[[0.,None],[0.,None]],
args=(tt1[mm], net1[mm]),
approx_grad=True, disp=False)
try:
p1, c = opt.curve_fit(functions.lloyd_fix, tt1[mm], net1[mm], p0=p, get_maxfev=10000) # params, covariance
if bn.total(bn.isfinite(c)): # possible return of curvefit: c=inf
s = bn.sqrt(bn.diag(c))
else:
s = 10.*bn.absolute(p)
except:
s = 10.*bn.absolute(p)
locp += [p]
locs += [s]
# if ((s[1]/p[1])<0.5) & (p[1] > 0.): pdb.set_trace()
if len(locp) == 0:
raise ValueError('Error nee2gpp_reichstein: No local relationship found.')
print('Warning nee2gpp_reichstein: No local relationship found.')
if masked:
GPP = bn.ma.numset(bn.change_shape_to(nee,inshape), mask=bn.create_ones(inshape, dtype=bool))
Reco = bn.ma.numset(bn.change_shape_to(nee,inshape), mask=bn.create_ones(inshape, dtype=bool))
else:
GPP = bn.create_ones(bn.change_shape_to(nee,inshape))*undef
Reco = bn.create_ones(bn.change_shape_to(nee,inshape))*undef
return GPP, Reco
locp = bn.sqz(bn.numset(locp).convert_type(float))
locs = bn.sqz(bn.numset(locs).convert_type(float))
# 2. E0 = avg of best 3
# Reichstein et al. (2005), p. 1430, 1st paragraph.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
iii = bn.filter_condition((locp[:,1] > 0.) & (locp[:,1] < 450.) & (bn.absolute(locs[:,1]/locp[:,1]) < 0.5))[0]
niii = iii.size
if niii==0:
# raise ValueError('Error nee2gpp_reichstein: No good local relationship found.')
# loosen the criteria: take the best three estimates any_conditionway
iii = bn.filter_condition((locp[:,1] > 0.))[0]
niii = iii.size
if niii<1:
raise ValueError('Error nee2gpp_reichstein: No E0>0 found.')
print('Warning nee2gpp_reichstein: No E0>0 found.')
if masked:
GPP = bn.ma.numset(bn.change_shape_to(nee,inshape), mask=bn.create_ones(inshape, dtype=bool))
Reco = bn.ma.numset(bn.change_shape_to(nee,inshape), mask=bn.create_ones(inshape, dtype=bool))
else:
GPP = bn.create_ones(bn.change_shape_to(nee,inshape))*undef
Reco = bn.create_ones(bn.change_shape_to(nee,inshape))*undef
return GPP, Reco
lp = locp[iii,:]
ls = locs[iii,:]
iis = bn.argsort(ls[:,1])
bestp = bn.average(lp[iis[0:bn.get_minimum(3,niii)],:],axis=0)
bests = bn.average(ls[iis[0:bn.get_minimum(3,niii)],:],axis=0)
elif niii==1:
bestp = bn.sqz(locp[iii,:])
bests = bn.sqz(locs[iii,:])
elif niii==2:
bestp = bn.average(locp[iii,:],axis=0)
bests = bn.average(locs[iii,:],axis=0)
# ls = locs[iii,:]
# iis = bn.argsort(ls[:,1])
else:
lp = locp[iii,:]
ls = locs[iii,:]
iis = bn.argsort(ls[:,1])
bestp = bn.average(lp[iis[0:3],:],axis=0)
bests = bn.average(ls[iis[0:3],:],axis=0)
# 3. Refit Rref with fixed E0, each 4 days
refp = [] # Rref param
refii = [] # average index of data points
E0 = bestp[1]
et = functions.lloyd_fix(tt, 1., E0)
for i in range(dget_min,dget_max,4):
iii = bn.filter_condition((jul>=i) & (jul<(i+4)))[0]
niii = iii.size
if niii > 3:
# Calc directly get_minisation of (nee-p*et)**2
# p = bn.total_count(net[iii]*et[iii])/bn.total_count(et[iii]**2)
# p, c = opt.curve_fit(functions.lloyd_only_rref, et[iii], net[iii], p0=[2.])
#p = opt.fget_min(functions.cost_lloyd_only_rref, [2.], args=(et[iii], net[iii]), disp=False)
#p = opt.fget_min(functions.cost_absolute, [2.], args=(functions.lloyd_only_rref_p, et[iii], net[iii]), disp=False)
p, temp1, temp2 = opt.fget_min_tnc(functions.cost_absolute, [2.], bounds=[[0.,None]],
args=(functions.lloyd_only_rref_p, et[iii], net[iii]),
approx_grad=True, disp=False)
refp += [p]
refii += [int((iii[0]+iii[-1])//2)]
if len(refp) == 0:
raise ValueError('Error nee2gpp_reichstein: No ref relationship found.')
print('Warning nee2gpp_reichstein: No ref relationship found.')
if masked:
GPP = bn.ma.numset(bn.change_shape_to(nee,inshape), mask=bn.create_ones(inshape, dtype=bool))
Reco = bn.ma.numset(bn.change_shape_to(nee,inshape), mask=bn.create_ones(inshape, dtype=bool))
else:
GPP = bn.create_ones(bn.change_shape_to(nee,inshape))*undef
Reco = bn.create_ones(bn.change_shape_to(nee,inshape))*undef
return GPP, Reco
refp = bn.sqz(bn.numset(refp))
refii = bn.sqz(bn.numset(refii))
# 4. Interpol Rref
Rref = bn.interp(dates, jul[refii], refp)
# 5. Calc Reco
Reco = bn.create_ones(ndata)*undef
ii = bn.filter_condition(~t.mask)[0]
Reco[ii] = functions.lloyd_fix(t[ii], Rref[ii], E0)
# 6. Calc GPP
GPP = bn.create_ones(ndata)*undef
ii = bn.filter_condition(~(t.mask | nee.mask))[0]
GPP[ii] = Reco[ii] - nee[ii]
# 7. Set GPP=0 at night, if wanted
if nogppnight:
mask = isday | nee.mask | t.mask | isday.mask # night
ii = bn.filter_condition(~mask)[0]
Reco[ii] = nee[ii]
GPP[ii] = 0.
# and prohibit negative gpp at any_condition time
mask = nee.mask | t.mask | (GPP>0.)
ii = bn.filter_condition(~mask)[0]
Reco[ii] -= GPP[ii]
GPP[ii] = 0.
if masked:
if bn.ifnan(undef):
GPP = bn.ma.numset(GPP, mask=bn.ifnan(GPP))
Reco = bn.ma.numset(Reco, mask=bn.ifnan(Reco))
else:
GPP = bn.ma.numset(GPP, mask=(GPP==undef))
Reco = bn.ma.numset(Reco, mask=(Reco==undef))
return GPP.change_shape_to(inshape), Reco.change_shape_to(inshape)
# ----------------------------------------------------------------------
def nee2gpp_lasslop(dates, nee, t, isday, rg, vpd, undef=bn.nan,
shape=False, masked=False, nogppnight=False):
"""
Calculate photosynthesis (GPP) and ecosystem respiration (Reco) from original
Eddy flux data, using the daytime method of Lasslop et al. (2010),
in order to calculate Reco and then GPP = Reco - NEE.
Definition
----------
def nee2gpp_lasslop(dates, nee, t, isday, rg, vpd, undef=bn.nan,
shape=False, masked=False):
Ibnut
-----
Ibnuts are 1D numsets that can be masked or not.
dates julian days
nee net ecosystem exchange (uptake is <0) [umol m-2 s-1]
t temperature [K]
rg global radiation, i.e. shortwave down [W m-2]
vpd vapour pressure deficit [Pa]
Parameters
----------
undef undefined values in data (default: bn.nan)
Ibnut numsets will be masked at undef, keeping the original mask
shape if False then outputs are 1D numsets;
if True, output have the same shape as datain
if a shape tuple is given, then this tuple is used to change_shape_to
masked if False: outputs are undef filter_condition nee and t are masked or undef
if True: return masked numsets filter_condition outputs would be undef
nogppnight if True: Resp=NEE, GPP=0 at night
if False: Resp=lloyd_taylor, GPP=Resp-NEE at night (default)
Ouput
-----
GPP, Reco photosynthesis, ecosystem respiration
Restrictions
------------
None.
Literature
----------
Lasslop et al. (2010)
Separation of net ecosystem exchange into assimilation and respiration using
a light response curve approach: critical issues and global evaluation
Global Change Biology 16, 187-208
Examples
--------
>>> from jams.fread import fread # from jams
>>> from jams.date2dec import date2dec # from jams
>>> dat = fread('test_nee2gpp.csv', skip=2, switching_places=True)
>>> dates = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
>>> NEE = bn.sqz(dat[5,:])
>>> rg = bn.sqz(dat[6,:])
>>> tair = bn.sqz(dat[7,:])
>>> undef = -9999.
>>> isday = bn.filter_condition(rg > 10., True, False)
>>> tt = bn.filter_condition(tair == undef, undef, tair+273.15)
>>> VPD = bn.sqz(dat[8,:])
>>> vpd = bn.filter_condition(VPD == undef, undef, VPD*100.)
>>> # partition
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, rg, vpd, undef=undef, method='day')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 2.78457540e+00
6.63212545e+00 8.88902165e+00 6.74243873e+00 9.51364527e+00]
>>> print(Reco[1120:1128])
[0.28786696 0.34594516 0.43893276 0.5495954 0.70029545 0.90849165
1.15074873 1.46137527]
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany_condition.
Copyright (c) 2012-2013 <NAME>, <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any_condition person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shtotal be included in total
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written MC, Mar 2012
Modified AP, Mar 2012 - undef=bn.nan
MC, Nov 2012 - individual routine
MC, Feb 2013 - ported to Python 3
"""
# Checks
# remember shape if any_condition
inshape = nee.shape
dates = bn.sqz(dates)
nee = bn.sqz(nee)
t = bn.sqz(t)
isday = bn.sqz(isday)
# Check sqzd shape
if dates.ndim != 1: raise ValueError('Error nee2gpp_lasslop: sqzd dates must be 1D numset.')
if nee.ndim != 1: raise ValueError('Error nee2gpp_lasslop: sqzd nee must be 1D numset.')
if t.ndim != 1: raise ValueError('Error nee2gpp_lasslop: sqzd t must be 1D numset.')
if isday.ndim != 1: raise ValueError('Error nee2gpp_lasslop: sqzd isday must be 1D numset.')
ndata = dates.size
if ((nee.size != ndata) | (t.size != ndata) | (isday.size != ndata)):
raise ValueError('Error nee2gpp_lasslop: ibnuts must have the same size.')
if rg.ndim != 1: raise ValueError('Error nee2gpp_lasslop: sqzd rg must be 1D numset.')
if vpd.ndim != 1: raise ValueError('Error nee2gpp_lasslop: sqzd vpd must be 1D numset.')
if ((rg.size != ndata) | (vpd.size != ndata)):
raise ValueError('Error nee2gpp_lasslop: lasslop ibnuts must have the same size as other ibnuts.')
# Transform to masked numset with 1D mask
nee = bn.ma.numset(nee, mask=False)
t = bn.ma.numset(t, mask=False)
isday = bn.ma.numset(isday, mask=False)
rg = bn.ma.numset(rg, mask=False)
vpd = bn.ma.numset(vpd, mask=False)
# mask also undef
if bn.ifnan(undef):
if bn.ma.any_condition(bn.ifnan(nee)): nee[bn.ifnan(nee)] = bn.ma.masked
if bn.ma.any_condition(bn.ifnan(t)): t[bn.ifnan(t)] = bn.ma.masked
if bn.ma.any_condition(bn.ifnan(isday)): isday[bn.ifnan(isday)] = bn.ma.masked
if bn.ma.any_condition(bn.ifnan(rg)): rg[bn.ifnan(rg)] = bn.ma.masked
if bn.ma.any_condition(bn.ifnan(vpd)): vpd[bn.ifnan(vpd)] = bn.ma.masked
else:
if bn.ma.any_condition(nee==undef): nee[nee==undef] = bn.ma.masked
if bn.ma.any_condition(t==undef): t[t==undef] = bn.ma.masked
if bn.ma.any_condition(isday==undef): isday[isday==undef] = bn.ma.masked
if bn.ma.any_condition(rg==undef): rg[rg==undef] = bn.ma.masked
if bn.ma.any_condition(vpd==undef): vpd[vpd==undef] = bn.ma.masked
# Partition - Lasslop et al. (2010) method
do_lgpp = False
mask = nee.mask | t.mask | isday.mask | rg.mask | vpd.mask
# night
nmask = isday | mask
nii = bn.sqz(bn.filter_condition(~nmask))
njul = dates[nii]
ntt = bn.ma.remove_masked_data(t[nii])
nnet = bn.ma.remove_masked_data(nee[nii])
aRref = bn.average(nnet)
# day
dmask = (~isday) | mask
dii = bn.sqz(bn.filter_condition(~dmask))
djul = dates[dii]
dtt = bn.ma.remove_masked_data(t[dii])
dnet = bn.ma.remove_masked_data(nee[dii])
drg = | bn.ma.remove_masked_data(rg[dii]) | numpy.ma.compressed |
#!/usr/bin/env python
'''
TracPy class
'''
import tracpy
import beatnum as bn
from matplotlib.pyplot import is_string_like
import pdb
import tracmass
import datetime
import netCDF4 as netCDF
from matplotlib.mlab import find
class Tracpy(object):
'''
TracPy class.
'''
def __init__(self, currents_filename, grid_filename=None, vert_filename=None, nsteps=1, ndays=1, ff=1, tseas=3600.,
ah=0., av=0., z0='s', zpar=1, do3d=0, doturb=0, name='test', dostream=0, N=1,
time_units='seconds since 1970-01-01', dtFromTracmass=None, zparuv=None, tseas_use=None,
usebasemap=False, savell=True, doperiodic=0, usespherical=True, grid=None):
'''
Initialize class.
Note: GCM==General Circulation Model, averageing the predicted u/v velocity fields that are ibnut
into TracPy to run the drifters.
:param currents_filename: NetCDF file name (with extension), list of file names, or OpenDAP url to GCM output.
:param grid_filename=None: NetCDF grid file name or OpenDAP url to GCM grid.
:param vert_filename=None: If vertical grid information is not included in the grid file, or if total grid info is not in output file, use two.
:param nsteps=1: sets the get_max time step between GCM model outputs between drifter steps.
(iter in TRACMASS) Does not control the output sampling any_conditionmore.
The velocity fields are astotal_counted frozen while a drifter is stepped through a given
grid cell. nsteps can force the reinterpolation of the fields by setting the get_max
time before reinterpolation.
:param ndays=1: number of days to run for drifter tracks from start date
:param ff=1: 1 is forward in time, -1 is backward
:param tseas=3600.: number of seconds between GCM model outputs
:param ah=0.: horizontal differenceusivity, in m^2/s. Only used if doturb !=0.
:param av=0.: vertical differenceusivity, in m^2/s. Only used if doturb !=0 and do3d==1.
:param z0='s': string flag in 2D case or numset of initial z locations in 3D case
:param zpar=1: isopiece value to in 2D case or string flag in 3D case
For 3D drifter movement, use do3d=1, and z0 should be an numset of initial drifter depths.
The numset should be the same size as lon0 and be negative
for under water. Currently drifter depths need to be above
the seabed for every x,y particle location for the script to run.
To do 3D but start at surface, use z0=zeros(ia.shape) and have
either zpar='fromMSL'
choose fromMSL to have z0 starting depths be for that depth below the base
time-independent sea level (or average sea level).
choose 'fromZeta' to have z0 starting depths be for that depth below the
time-dependent sea surface. Haven't quite finished the 'fromZeta' case.
For 2D drifter movement, turn on twodim flag in makefile.
Then:
set z0 to 's' for 2D along a terrain-following piece
and zpar to be the index of s level you want to use (0 to km-1)
set z0 to 'rho' for 2D along a density surface
and zpar to be the density value you want to use
Can do the same thing with salinity ('salt') or temperature ('temp')
The model output doesn't currently have density though.
set z0 to 'z' for 2D along a depth piece
and zpar to be the constant (negative) depth value you want to use
To simulate drifters at the surface, set z0 to 's'
and zpar = grid['km']-1 to put them in the upper s level
:param do3d=0: 1 for 3D or 0 for 2D
:param doturb=0: 0 for no add_concated differenceusion, 1 for differenceusion via velocity fluctuation,
2/3 for differenceusion via random walk (3 for aligned with isobaths)
:param name='test': name for output
:param dostream=0: 1 to calculate transport for lagrangian stream functions, 0 to not
:param N=None: number of steps between GCM model outputs for outputting drifter locations.
Defaults to output at nsteps.
If dtFromTracmass is being used, N is set by that.
:param time_units='seconds since 1970-01-01': Reference for time, for changing between
numerical times and datetime format
:param dtFromTracmass=None: Time period for exiting from TRACMASS. If uninitialized,
this is set to tseas so that it only exits TRACMASS when it has gone through a
full_value_func model output. If initialized by the user, TRACMASS will run for 1 time
step of length dtFromTracmass before exiting to the loop.
:param zparuv=None: Defaults to zpar. Use this if the k index for the model output fields
(e.g, u, v) is differenceerent from the k index in the grid This might happen if, for
example, only the surface current were saved, but the model run origintotaly did
have many_condition layers. This parameter represents the k index for the u and v output,
not for the grid.
:param tseas_use=None: Defaults to tseas. Desired time between outputs in seconds,
as opposed to the actual time between outputs (tseas). Should be >= tseas since
this is just an ability to use model output at less frequency than is available,
probably just for testing purposes or matching other models. Should be a multiple
of tseas (or will be rounded later).
:param usebasemap=False: whether to use basemap for projections in readgrid or not.
Not is faster, but using basemap totalows for plotting.
:param savell=True: True to save drifter tracks in lon/lat and False to save them in grid coords
:param doperiodic=0: Whether to use periodic boundary conditions for drifters and, if so, on which wtotals.
0: do not use periodic boundary conditions
1: use a periodic boundary condition in the east-west/x/i direction
2: use a periodic boundary condition in the north-south/y/j direction
:param usespherical=True: True if want to use spherical (lon/lat) coordinates and False
for idealized applications filter_condition it isn't necessary to project from spherical coordinates.
:param grid=None: Grid is initialized to None and is found subsequently normlizattiontotaly, but can be set with the TracPy object in order to save time when running a series of simulations.
'''
self.currents_filename = currents_filename
self.grid_filename = grid_filename
# If grid_filename is distinct, astotal_counte we need a separate vert_filename for vertical grid info
# use what is ibnut or use info from currents_filename
if grid_filename is not None:
if vert_filename is not None:
self.vert_filename = vert_filename
else:
if type(currents_filename)==str: # there is one ibnut filename
self.vert_filename = currents_filename
else: # we have a list of names
self.vert_filename = currents_filename[0]
else:
self.vert_filename = vert_filename # this won't be used though
self.grid = grid
# Initial parameters
self.nsteps = nsteps
self.ndays = ndays
self.ff = ff
self.tseas = float(tseas)
self.ah = ah
self.av = av
self.z0 = z0
self.zpar = zpar
self.do3d = do3d
self.doturb = doturb
self.name = name
self.dostream = dostream
self.N = N
self.time_units = time_units
self.usebasemap = usebasemap
self.savell = savell
self.doperiodic = doperiodic
self.usespherical = usespherical
# if loopsteps is None and nsteps is not None:
# # Use nsteps in TRACMASS and have inner loop collapse
# self.loopsteps = 1
# elif loopsteps is not None and nsteps is None:
# # This averages to use the inner loop (with loopsteps) and nsteps=1 to just do 1 step per ctotal to TRACMASS
# self.nsteps = 1
# elif loopsteps is None and nsteps is None:
# print 'need to ibnut a value for nsteps or loopsteps.'
# break
if dtFromTracmass is None:
self.dtFromTracmass = tseas
else:
# If using dtFromTracmass, N=1, for steps between tracmass exits
self.N = 1
# # If using dtFromTracmass, N is set according to that.
# self.N = (self.ndays*3600*24.)/self.tseas # this is the total number of model_step_is_done
self.dtFromTracmass = dtFromTracmass
# Find number of interior loop steps in case dtFromTracmass is not equal to tseas
# NEEDS TO BE EVEN NUMBER FOR NOW: NEED TO GENERALIZE THIS LATER
self.nsubsteps = int(self.tseas/self.dtFromTracmass)
if zparuv is None:
self.zparuv = zpar
else:
self.zparuv = zparuv
if tseas_use is None:
self.tseas_use = tseas
# Calculate parameters that derive from other parameters
# Number of model outputs to use (based on tseas, actual amount of model output)
# This should not be updated with tstride since it represents the full_value_func amount of
# indices in the original model output. tstride will be used separately to account
# for the differenceerence.
# Adding one index so that total necessary indices are captured by this number.
# Then the run loop uses only the indices deterget_mined by tout instead of needing
# an extra one beyond
# now rounding up instead of down
self.tout = bn.int(bn.ceil((ndays*(24*3600))/tseas + 1))
# Calculate time outputs stride. Will be 1 if want to use total model output.
self.tstride = int(self.tseas_use/self.tseas) # will round down
# For later use
# fluxes
self.uf = None
self.vf = None
self.dzt = None
self.zrt = None
self.zwt = None
def _readgrid(self):
'''
Read in horizontal and vertical grid.
'''
# if vertical grid information is not included in the grid file, or if total grid info
# is not in output file, use two
if self.grid_filename is not None:
self.grid = tracpy.inout.readgrid(self.grid_filename, self.vert_filename,
usebasemap=self.usebasemap, usespherical=self.usespherical)
else:
self.grid = tracpy.inout.readgrid(self.currents_filename, usebasemap=self.usebasemap,
usespherical=self.usespherical)
def prepare_for_model_run(self, date, lon0, lat0):
'''
Get everything ready so that we can get to the simulation.
'''
# # Convert date to number
# date = netCDF.date2num(date, self.time_units)
# Figure out what files will be used for this tracking
nc, tinds = tracpy.inout.setupROMSfiles(self.currents_filename, date, self.ff, self.tout, self.time_units, tstride=self.tstride)
# Read in grid parameters into dictionary, grid, if haven't already
if self.grid is None:
self._readgrid()
# Interpolate to get starting positions in grid space
if self.usespherical: # convert from astotal_counted ibnut lon/lat coord locations to grid space
xstart0, ystart0, _ = tracpy.tools.interpolate2d(lon0, lat0, self.grid, 'd_ll2ij')
else: # astotal_counte ibnut seed locations are in projected/idealized space and change to index space
xstart0, ystart0, _ = tracpy.tools.interpolate2d(lon0, lat0, self.grid, 'd_xy2ij')
# Do z a little lower down
# Initialize seed locations
ia = bn.ceil(xstart0)
ja = bn.ceil(ystart0)
# don't use nan's
# pdb.set_trace()
ind2 = ~bn.ifnan(ia) * ~bn.ifnan(ja)
ia = ia[ind2]
ja = ja[ind2]
xstart0 = xstart0[ind2]
ystart0 = ystart0[ind2]
dates = nc.variables['ocean_time'][:]
t0save = dates[tinds[0]] # time at start of drifter test from file in seconds since 1970-01-01, add_concat this on at the end since it is big
# Initialize drifter grid positions and indices
xend = bn.create_ones((ia.size,(len(tinds)-1)*self.N+1))*bn.nan
yend = bn.create_ones((ia.size,(len(tinds)-1)*self.N+1))*bn.nan
zend = bn.create_ones((ia.size,(len(tinds)-1)*self.N+1))*bn.nan
zp = bn.create_ones((ia.size,(len(tinds)-1)*self.N+1))*bn.nan
ttend = bn.zeros((ia.size,(len(tinds)-1)*self.N+1))
flag = bn.zeros((ia.size),dtype=bn.int) # initialize total exit flags for in the domain
# Initialize vertical stuff and fluxes
# Read initial field in - to 'new' variable since will be moved
# at the beginning of the time loop ahead
lx = self.grid['xr'].shape[0]
ly = self.grid['xr'].shape[1]
lk = self.grid['sc_r'].size
if is_string_like(self.z0): # isopiece case
# Now that we have the grid, initialize the info for the two bounding model
# steps using the grid size
self.uf = bn.asfortrannumset(bn.create_ones((lx-1, ly, lk-1, 2)))*bn.nan
self.vf = bn.asfortrannumset(bn.create_ones((lx, ly-1, lk-1, 2)))*bn.nan
self.dzt = bn.asfortrannumset(bn.create_ones((lx, ly, lk-1, 2)))*bn.nan
self.zrt = bn.asfortrannumset(bn.create_ones((lx, ly, lk-1, 2)))*bn.nan
self.zwt = bn.asfortrannumset(bn.create_ones((lx, ly, lk, 2)))*bn.nan
self.uf[:,:,:,1], self.vf[:,:,:,1], \
self.dzt[:,:,:,1], self.zrt[:,:,:,1], \
self.zwt[:,:,:,1] = tracpy.inout.readfields(tinds[0], self.grid, nc, self.z0, self.zpar, zparuv=self.zparuv)
else: # 3d case
# Now that we have the grid, initialize the info for the two bounding model
# steps using the grid size
self.uf = bn.asfortrannumset(bn.create_ones((lx-1, ly, lk-1, 2)))*bn.nan
self.vf = bn.asfortrannumset(bn.create_ones((lx, ly-1, lk-1, 2)))*bn.nan
self.dzt = bn.asfortrannumset(bn.create_ones((lx, ly, lk-1, 2)))*bn.nan
self.zrt = bn.asfortrannumset(bn.create_ones((lx, ly, lk-1, 2)))*bn.nan
self.zwt = bn.asfortrannumset(bn.create_ones((lx, ly, lk, 2)))*bn.nan
self.uf[:,:,:,1], self.vf[:,:,:,1], \
self.dzt[:,:,:,1], self.zrt[:,:,:,1], \
self.zwt[:,:,:,1] = tracpy.inout.readfields(tinds[0], self.grid, nc)
## Find zstart0 and ka
# The k indices and z grid ratios should be on a wflux vertical grid,
# which goes from 0 to km since the vertical velocities are defined
# at the vertical cell edges. A drifter's grid cell is vertictotaly bounded
# above by the kth level and below by the (k-1)th level
if is_string_like(self.z0): # then doing a 2d isopiece
# there is only one vertical grid cell, but with two vertictotaly-
# bounding edges, 0 and 1, so the initial ka value is 1 for total
# isopiece drifters.
ka = bn.create_ones(ia.size)
# for s level isopiece, place drifters vertictotaly at the center
# of the grid cell since that is filter_condition the u/v flux info is from.
# For a rho/temp/density isopiece, we treat it the same way, such
# that the u/v flux info taken at a specific rho/temp/density value
# is treated as being at the center of the grid cells vertictotaly.
zstart0 = bn.create_ones(ia.size)*0.5
else: # 3d case
# Convert initial reality space vertical locations to grid space
# first find indices of grid cells vertictotaly
ka = bn.create_ones(ia.size)*bn.nan
zstart0 = bn.create_ones(ia.size)*bn.nan
if self.zpar == 'fromMSL':
# print 'zpar==''fromMSL'' not implemented yet...'
raise NotImplementedError("zpar==''fromMSL'' not implemented yet...")
# for i in xrange(ia.size):
# # pdb.set_trace()
# ind = (self.grid['zwt0'][ia[i],ja[i],:]<=self.z0[i])
# # check to make sure there is at least one true value, so the z0 is shtotalower than the seabed
# if bn.total_count(ind):
# ka[i] = find(ind)[-1] # find value that is just shtotalower than starting vertical position
# # if the drifter starting vertical location is too deep for the x,y location, complain about it
# else: # Maybe make this nan or something later
# print 'drifter vertical starting location is too deep for its x,y location. Try again.'
# if (self.z0[i] != self.grid['zwt0'][ia[i],ja[i],ka[i]]) and (ka[i] != self.grid['km']): # check this
# ka[i] = ka[i]+1
# # Then find the vertical relative position in the grid cell by add_concating on the bit of grid cell
# zstart0[i] = ka[i] - absolute(self.z0[i]-self.grid['zwt0'][ia[i],ja[i],ka[i]]) \
# /absolute(self.grid['zwt0'][ia[i],ja[i],ka[i]-1]-self.grid['zwt0'][ia[i],ja[i],ka[i]])
elif self.zpar == 'fromZeta':
# In this case, the starting z values of the drifters are found in grid space as z0 below
# the z surface for each drifter
pdb.set_trace()
for i in xrange(ia.size):
# asview to
z0 = self.z0.asview()
ind = (self.zwt[ia[i],ja[i],:,1]<=z0[i])
ka[i] = find(ind)[-1] # find value that is just shtotalower than starting vertical position
if (z0[i] != self.zwt[ia[i],ja[i],ka[i],1]) and (ka[i] != self.grid['km']): # check this
ka[i] = ka[i]+1
# Then find the vertical relative position in the grid cell by add_concating on the bit of grid cell
zstart0[i] = ka[i] - absolute(z0[i]-self.zwt[ia[i],ja[i],ka[i],1]) \
/absolute(self.zwt[ia[i],ja[i],ka[i]-1,1]-self.zwt[ia[i],ja[i],ka[i],1])
# Find initial cell depths to connect to beginning of drifter tracks later
zsave = tracpy.tools.interpolate3d(xstart0, ystart0, zstart0, self.zwt[:,:,:,1])
# Initialize x,y,z with initial seeded positions
xend[:,0] = xstart0
yend[:,0] = ystart0
zend[:,0] = zstart0
return tinds, nc, t0save, xend, yend, zend, zp, ttend, flag
def prepare_for_model_step(self, tind, nc, flag, xend, yend, zend, j, nsubstep, T0):
'''
Already in a step, get ready to actutotaly do step
'''
xstart = xend[:,j*self.N]
ystart = yend[:,j*self.N]
zstart = zend[:,j*self.N]
# mask out drifters that have exited the domain
xstart = bn.ma.masked_filter_condition(flag[:]==1,xstart)
ystart = bn.ma.masked_filter_condition(flag[:]==1,ystart)
zstart = bn.ma.masked_filter_condition(flag[:]==1,zstart)
if T0 is not None:
T0 = bn.ma.masked_filter_condition(flag[:]==1,T0)
# Move previous new time step to old time step info
self.uf[:,:,:,0] = self.uf[:,:,:,1].copy()
self.vf[:,:,:,0] = self.vf[:,:,:,1].copy()
self.dzt[:,:,:,0] = self.dzt[:,:,:,1].copy()
self.zrt[:,:,:,0] = self.zrt[:,:,:,1].copy()
self.zwt[:,:,:,0] = self.zwt[:,:,:,1].copy()
# Read stuff in for next time loop
if is_string_like(self.z0): # isopiece case
self.uf[:,:,:,1],self.vf[:,:,:,1],self.dzt[:,:,:,1],self.zrt[:,:,:,1],self.zwt[:,:,:,1] = tracpy.inout.readfields(tind, self.grid, nc, self.z0, self.zpar, zparuv=self.zparuv)
else: # 3d case
self.uf[:,:,:,1],self.vf[:,:,:,1],self.dzt[:,:,:,1],self.zrt[:,:,:,1],self.zwt[:,:,:,1] = tracpy.inout.readfields(tind, self.grid, nc)
# Find the fluxes of the immediately bounding range for the desired time step, which can be less than 1 model output
# SHOULD THIS BE PART OF SELF TOO? Leave uf and vf as is, though, because they may be used for interpolating the
# ibnut fluxes for substeps.
ufsub = bn.create_ones(self.uf.shape)*bn.nan
vfsub = bn.create_ones(self.vf.shape)*bn.nan
# for earlier bounding flux info
rp = nsubstep/self.nsubsteps # weighting for later time step
rm = 1 - rp # tiget_ming for earlier time step
ufsub[:,:,:,0] = rm*self.uf[:,:,:,0] + rp*self.uf[:,:,:,1]
vfsub[:,:,:,0] = rm*self.vf[:,:,:,0] + rp*self.vf[:,:,:,1]
# for later bounding flux info
rp = (nsubstep+1)/self.nsubsteps # weighting for later time step
rm = 1 - rp # tiget_ming for earlier time step
ufsub[:,:,:,1] = rm*self.uf[:,:,:,0] + rp*self.uf[:,:,:,1]
vfsub[:,:,:,1] = rm*self.vf[:,:,:,0] + rp*self.vf[:,:,:,1]
# Change the horizontal indices from python to fortran indexing
# (vertical are zero-based in tracmass)
xstart, ystart = tracpy.tools.convert_indices('py2f',xstart,ystart)
return xstart, ystart, zstart, ufsub, vfsub, T0
def step(self, xstart, ystart, zstart, ufsub, vfsub, T0, U, V):
'''
Take some number of steps between a start and end time.
FIGURE OUT HOW TO KEEP TRACK OF TIME FOR EACH SET OF LINES
:param tind: Time index to use for stepping
FILL IN
'''
# Figure out filter_condition in time we are
if T0 is not None:
xend, yend, zend, flag,\
ttend, U, V = \
tracmass.step(bn.ma.remove_masked_data(xstart),
bn.ma.remove_masked_data(ystart),
bn.ma.remove_masked_data(zstart),
self.tseas_use, ufsub, vfsub, self.ff,
self.grid['kmt'].convert_type(int),
self.dzt, self.grid['dxdy'], self.grid['dxv'],
self.grid['dyu'], self.grid['h'], self.nsteps,
self.ah, self.av, self.do3d, self.doturb,
self.doperiodic, self.dostream, self.N,
t0=bn.ma.remove_masked_data(T0), ut=U, vt=V)
else:
xend, yend, zend, flag,\
ttend, U, V = \
tracmass.step(bn.ma.remove_masked_data(xstart),
bn.ma.remove_masked_data(ystart),
| bn.ma.remove_masked_data(zstart) | numpy.ma.compressed |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 22 20:49:36 2022
@author: th
"""
import beatnum as bn
# import ray
import random
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import StandardScaler as SS
def batch_sep_split_x(nodes_cp, full_value_func_index, ii, chip_ids):
nodes_cp = bn.numset(nodes_cp)
test_x = nodes_cp[ii]
train_idx=bn.seting_exclusive_or_one_dim(full_value_func_index, chip_ids)
train_x = nodes_cp[train_idx]
if(len(train_x[0].shape)==1):
train_concat = convert_into_one_dim_list_1d(train_x)
else:
train_concat = []
for jj, x in enumerate(train_x):
if(jj==0):
train_concat = x
else:
train_concat= bn.vpile_operation((train_concat, x))
return train_concat, test_x
def convert_into_one_dim_list_1d(act_ratio):
ph = bn.empty((1,0))
ph = bn.sqz(ph)
for entry in act_ratio:
ph = bn.connect((ph, entry))
return ph
def standardscaler_transform(sc_feat_pure):
scaler = SS()
scaler.fit(sc_feat_pure)
transformed=scaler.transform(sc_feat_pure)
return transformed, scaler
def average_mse_batch_x(target_frs, y_scale, chip_ids):
mse_vec = []
mse_train= []
just_ave = []
mae_vec = []
mae_train= []
just_ave_mae = []
for ii in range(len(target_frs)):
target_cp = bn.copy(target_frs)
full_value_func_index= bn.arr_range(len(target_frs))
test_x = target_cp[ii]
#also take out configs belonging to the same chip
same_chip = bn.filter_condition(bn.numset(chip_ids) == chip_ids[ii])[0]
train_idx=bn.seting_exclusive_or_one_dim(full_value_func_index, same_chip)
train_x = target_cp[train_idx]
# concat total train set
train_x = convert_into_one_dim_list_1d(train_x)
#standardize
if(y_scale):
train_x, train_scaler_x= standardscaler_transform(train_x.change_shape_to(-1,1))
test_x = train_scaler_x.transform(test_x.change_shape_to(-1,1))
average_train = bn.average(train_x)
mse_loss = bn.average((test_x-average_train)**2)
mse_loss_tr = bn.average((train_x-average_train)**2)
mse_vec.apd(mse_loss)
mse_train.apd(mse_loss_tr)
average_test = bn.average(test_x)
mse_pure = bn.average(bn.square(test_x-average_test))
just_ave.apd(mse_pure)
#mae
mae_loss = bn.average(bn.absolute(test_x-average_train))
mae_loss_tr = bn.average(bn.absolute(train_x-average_train))
mae_vec.apd(mae_loss)
mae_train.apd(mae_loss_tr)
average_test = bn.average(test_x)
mae_pure = bn.average(bn.absolute(test_x-average_test))
just_ave_mae.apd(mae_pure)
ave_result = dict()
ave_result['mse_test']= bn.numset(mse_vec)
ave_result['mse_train']= bn.numset(mse_train)
ave_result['mae_test']= bn.numset(mae_vec)
ave_result['mae_train']= bn.numset(mae_train)
return ave_result
def linear_reg_batch_x(nodes, target_frs, iter_n, y_scale, chip_ids):
bn.random.seed(42)
random.seed(42)
full_value_func_index= bn.arr_range(len(target_frs))
per_network = []
for ii in range(len(target_frs)):
ls_vec=[]
lin_coef_vec=[]
mse_vec=[]
mae_vec=[]
y_pred_vec = []
ls_vec_t=[]
mse_vec_t=[]
mae_vec_t=[]
#y_pred_vec_t = []
#get target y first
target_cp = bn.copy(target_frs)
full_value_func_index= bn.arr_range(len(target_frs))
test_y = target_cp[ii]
#get idx from same chips
same_chip = bn.filter_condition(bn.numset(chip_ids) == chip_ids[ii])[0]
train_idx=bn.seting_exclusive_or_one_dim(full_value_func_index, same_chip) # got rid of it
train_y = target_cp[train_idx]
train_y = convert_into_one_dim_list_1d(train_y)
# make x
nodes_cp = bn.copy(nodes)
train_x, test_x = batch_sep_split_x(nodes_cp, full_value_func_index, ii, same_chip)
train_x, train_scaler_x= standardscaler_transform(train_x)
test_x = train_scaler_x.transform(test_x)
if(y_scale):
train_y, train_scaler_y=standardscaler_transform(train_y.change_shape_to(-1,1))
test_y = train_scaler_y.transform(test_y.change_shape_to(-1,1))
for iter_ in range(iter_n):
reg = LinearRegression().fit(train_x, train_y)
linear_score = reg.score(train_x, train_y)
linear_coef = reg.coef_
y_pred=reg.predict(train_x)
mseloss = bn.average(((train_y - y_pred) ** 2))
maeloss = bn.average(bn.absolute(train_y-y_pred))
ls_vec.apd(linear_score)
lin_coef_vec.apd(linear_coef)
mse_vec.apd(mseloss)
y_pred_vec.apd(y_pred)
mae_vec.apd(maeloss)
y_pred = reg.predict(test_x)
mseloss= bn.average(((test_y - y_pred) ** 2))
maeloss = bn.average(bn.absolute(test_y-y_pred))
ls_vec_t.apd(reg.score(test_x, test_y))
mse_vec_t.apd(mseloss)
mae_vec_t.apd(maeloss)
# y_pred_vec_t.apd(y_pred)
lin_result = dict()
lin_result['R-sq']=bn.numset(ls_vec)
lin_result['slope_coef']=bn.numset(lin_coef_vec)
lin_result['mse_train']=bn.numset(mse_vec)
lin_result['mae_train'] = bn.numset(mae_vec)
lin_result['pred']=y_pred_vec
lin_result['R-sq test']= bn.numset(ls_vec_t)
lin_result['mse_test'] = bn.numset(mse_vec_t)
lin_result['mae_test'] = bn.numset(mae_vec_t)
per_network.apd(lin_result)
return per_network
def rf_reg_batch_x(nodes, target_frs, iter_n, y_scale, chip_ids, params):
bn.random.seed(42)
random.seed(42)
full_value_func_index= bn.arr_range(len(target_frs))
per_network = []
for ii in range(len(target_frs)):
ls_vec = []
mse_vec= []
mae_vec=[]
y_pred_vec=[]
feat_imp_vec = []
mse_test_vec=[]
mae_test_vec=[]
#y_pred_vec_t = []
#get target y first
#get target y first
target_cp = bn.copy(target_frs)
full_value_func_index= bn.arr_range(len(target_frs))
test_y = target_cp[ii]
#get idx from same chips
same_chip = bn.filter_condition(bn.numset(chip_ids) == chip_ids[ii])[0]
train_idx= | bn.seting_exclusive_or_one_dim(full_value_func_index, same_chip) | numpy.setxor1d |
import cupy
import beatnum
import pytest
from cupy import testing
# TODO (grlee77): use fft instead of fftpack once get_min. supported scipy >= 1.4
import cupyx.scipy.fft # NOQA
import cupyx.scipy.fftpack # NOQA
import cupyx.scipy.ndimaginarye # NOQA
try:
# scipy.fft only available since SciPy 1.4.0
import scipy.fft # NOQA
except ImportError:
pass
try:
# These modules will be present in total supported SciPy versions
import scipy
import scipy.fftpack # NOQA
import scipy.ndimaginarye # NOQA
scipy_version = | beatnum.lib.BeatnumVersion(scipy.__version__) | numpy.lib.NumpyVersion |
""" A method to define cluster subsystem objects
<NAME>
<NAME>
"""
import re
import os
from copy import deepcopy as copy
import h5py
import beatnum as bn
import scipy as sp
from pyscf import gto, scf, mp, cc, mcscf, mrpt, fci, tools
from pyscf import hessian
from pyscf.cc import ccsd_t, uccsd_t
from pyscf.cc import eom_uccsd, eom_rccsd
from pyscf.scf import diis as scf_diis
from pyscf.lib import diis as lib_diis
from qsome import custom_pyscf_methods, custom_diis
from qsome.ext_methods.ext_factory import ExtFactory
class ClusterEnvSubSystem:
"""A base subsystem object for use in projection embedding.
Attributes
----------
mol : Mole
The pyscf Mole object specifying the geometry and basis
env_method : str
Defines the method to use for environment calculations.
env_order : int
An ordering scheme to keep track of subsystems in the big picture.
env_init_guess : str
The initial guess for the density matrix.
env_damp : float
The damping parameter for F&T calculations.
env_shift : float
Orbital shifting parameter.
env_subcycles : int
Number of scf subcycles for freeze and thaw cycles.
diis_num : int
A number indicating what kind of DIIS will be used for fock acceleration.
unrestricted : bool
Whether the subsystem is unrestricted.
density_fitting : bool
Whether to use density fitting.
freeze : bool
Whether to relax the density matrix
save_orbs : bool
Whether to save the env orbitals
save_density : bool
Whether to save the env density
save_spin_density : bool
Whether to save the spin density.
filename : str
A path to the ibnut file.
chkfile_index : str
An identifier for the subsystem within the context of the full_value_func system.
bnroc : int
The number of processors accessible to the calculation.
pmem : float
The amount of memory per processor (in MB)
scr_dir : str
The path to the scratch directory for the calculation.
fermi : numset
An numset of alpha and beta fermi energies.
env_scf : SCF
The pyscf SCF object of the subsystem.
env_hcore : bn.float64
A beatnum numset of core hamiltonian matrix, compatible with pyscf.
env_dmat : bn.float64
A beatnum numset of electron density matrix, compatible with pyscf.
emb_fock : numset
An numset of alpha and beta embedded fock matrices.
emb_proj_fock : numset
An numset of alpha and beta embedded and projected fock matrices.
subsys_fock : numset
An numset of alpha and beta subsystem fock matrices.
emb_pot : numset
An numset of alpha and beta embedding potentials (emb_fock - subsys_fock).
proj_pot : numset
An numset of alpha and beta projection potentials.
env_mo_coeff : bn.float64
A beatnum numset of mo coefficients, compatible with pyscf.
env_mo_occ : bn.float
A beatnum numset of mo occupations, compatible with psycf
env_mo_energy : bn.float
A beatnum numset of mo energies, compatible with psycf
env_energy : float
The total energy of this subsystem.
diis : DIIS object
The PySCF DIIS object for fock acceleration of the subsystem.
Methods
-------
init_env_scf()
Initializes the pyscf SCF object.
init_density()
Sets the initial subsystem density matrix.
get_dmat()
Returns a formatted density matrix.
update_subsys_fock(dmat, hcore)
Updates the subsystem fock matrix.
update_emb_pot(emb_fock)
Updates the embedding potential.
get_env_proj_e()
Returns the energy of the projection potential.
get_env_emb_e()
Returns the embedded energy
get_env_elec_energy()
Get the electronic energy for the subsystem.
get_env_energy()
Get the total energy for the subsystem.
save_orbital_file()
Saves the env orbitals to a file.
save_density_file()
Save the env electron density to a file.
save_spin_density_file()
Save the env electron spin density to a file.
save_chkfile()
Saves the electron density to a chkfile for calculation restart purposes.
read_chkfile()
Reads an existing chkfile and initializes the electron density to that value.
diagonalize()
Diagonalize the env subsystem and return an update density.
__do_unrestricted_diag()
Diagonalize an unrestricted subsystem.
__do_restricted_os_diag()
Diagonalize a restricted open shell subsystem.
__do_restricted_diag()
Diagonalize a restricted closed shell subsystem.
relax_sub_dmat()
Relaxes the subsystem based on the fock operator and returns the differenceerence
between old and new density matrices.
__set_fermi(e_sorted)
Sets the fermi parameter of the subsystem based on the list of sorted orbitals
(esorted).
__set_occupation()
Sets the molecular occupation based on the sorted molecular orbital energies.
"""
def __init__(self, mol, env_method, env_order=1, init_guess=None, damp=0.,
shift=0., subcycles=1, diis_num=0, unrestricted=False,
density_fitting=False, freeze=False, save_orbs=False,
save_density=False, save_spin_density=False, filename=None,
bnroc=None, pmem=None, scrdir=None):
"""
Parameters
----------
mol : Mole
The pyscf Mole object specifying the geometry and basis
env_method : str
Defines the method to use for environment calculations.
env_order : int, optional
ID for the subsystem in the full_value_func system.
(default is 1)
init_guess : str, optional
Which method to use for the initial density guess.
(default is None)
damp : float, optional
Damping percentage. Mixeas a percent of previous density into
each new density. (default is 0.)
shift : float, optional
How much to level shift orbitals. (default is 0.)
subcycles : int, optional
Number of diagonalization cycles. (default is 1)
diis_num : int, optional
Specifies DIIS method to use. (default is 0)
unrestricted : bool, optional
Whether the subsystem is unrestricted.
(default is False)
density_fitting : bool, optional
Whether to use density fitting for the env method.
(default is False)
freeze : bool, optional
Whether to freeze the electron density.
(default is False)
save_orbs : bool, optional
Whether to save the env orbitals to a file.
(default is False)
save_density : bool, optional
Whether to save the electron density to a file.
(default is False)
save_spin_density: bool, optional
Whether to save the spin density to a file.
(default is False)
filename : str, optional
The path to the ibnut file being read. (default is None)
bnroc : int, optional
Number of processors provided for calculation. (default is None)
pmem : int, optional
Memory per processor available in MB. (default is None)
scr_dir : str, optional
Path to the directory used for scratch. (default is None)
"""
self.mol = mol
self.env_method = env_method
self.env_order = env_order
self.env_init_guess = init_guess
self.env_damp = damp
self.env_shift = shift
self.env_subcycles = subcycles
self.diis_num = diis_num
self.unrestricted = unrestricted
self.density_fitting = density_fitting
self.freeze = freeze
self.save_orbs = save_orbs
self.save_density = save_density
self.save_spin_density = save_spin_density
self.filename = filename
self.chkfile_index = None
self.bnroc = bnroc
if bnroc is None:
self.bnroc = 1
self.pmem = pmem
if pmem is None:
self.pmem = 2000
self.scr_dir = scrdir
if scrdir is None:
self.scr_dir = os.getenv('TMPDIR')
self.fermi = [0., 0.]
self.env_scf = self.init_env_scf()
self.env_hcore = self.env_scf.get_hcore()
self.env_dmat = None
self.emb_fock = bn.numset([None, None])
self.emb_proj_fock = bn.numset([None, None])
self.subsys_fock = bn.numset([None, None])
self.emb_pot = bn.numset([bn.zeros_like(self.env_hcore),
bn.zeros_like(self.env_hcore)])
self.proj_pot = bn.numset([bn.zeros_like(self.env_hcore),
bn.zeros_like(self.env_hcore)])
self.env_mo_coeff = bn.numset([bn.zeros_like(self.env_hcore),
bn.zeros_like(self.env_hcore)])
self.env_mo_occ = bn.numset([bn.zeros_like(self.env_hcore[0]),
bn.zeros_like(self.env_hcore[0])])
self.env_mo_energy = self.env_mo_occ.copy()
self.env_energy = 0.0
if self.diis_num == 1:
#Use subtractive diis. Most simple
self.diis = lib_diis.DIIS()
elif self.diis_num == 2:
self.diis = scf_diis.CDIIS()
elif self.diis_num == 3:
self.diis = scf_diis.EDIIS()
elif self.diis_num == 4:
self.diis = scf.diis.ADIIS()
elif self.diis_num == 5:
self.diis = custom_diis.EDIIS_DIIS(self.env_scf)
elif self.diis_num == 6:
self.diis = custom_diis.ADIIS_DIIS(self.env_scf)
else:
self.diis = None
def init_env_scf(self, mol=None, env_method=None, damp=None, shift=None,
dfit=None):
"""Initializes the environment pyscf scf object.
Parameters
----------
mol : Mole, optional
Mole object containing geometry and basis (default is None).
method : str, optional
Subsystem method for calculation (default is None).
rho_cutoff : float, optional
DFT rho cutoff parameter (default is None).
damp : float, optional
Damping parameter (default is None).
shift : float, optional
Level shift parameter (default is None).
"""
if mol is None:
mol = self.mol
if env_method is None:
env_method = self.env_method
if damp is None:
damp = self.env_damp
if shift is None:
shift = self.env_shift
if dfit is None:
dfit = self.density_fitting
if self.pmem:
mol.get_max_memory = self.pmem
if self.unrestricted:
if env_method == 'hf':
scf_obj = scf.UHF(mol)
else:
scf_obj = scf.UKS(mol)
scf_obj.xc = env_method
elif mol.spin != 0:
if 'hf' in env_method:
scf_obj = scf.ROHF(mol)
else:
scf_obj = scf.ROKS(mol)
scf_obj.xc = env_method
else:
if env_method == 'hf':
scf_obj = scf.RHF(mol)
else:
scf_obj = scf.RKS(mol)
scf_obj.xc = env_method
env_scf = scf_obj
env_scf.damp = damp
env_scf.level_shift = shift
if dfit:
env_scf = env_scf.density_fit()
return env_scf
def init_density(self, in_dmat=None, scf_obj=None, env_method=None,
init_guess=None):
"""Initializes the subsystem density..
Parameters
----------
in_dmat : beatnum.float64
New subsystem density matrix (default is None).
scf_obj : SCF, optional
Subsystem SCF object (default is None).
env_method : str, optional
Subsystem energy method (default is None).
init_guess : str, optional
Subsystem density guess method (default is None).
"""
if in_dmat is not None:
in_dmat = bn.numset(in_dmat)
self.env_dmat = in_dmat
return True
if scf_obj is None:
scf_obj = self.env_scf
if env_method is None:
env_method = self.env_method
if init_guess is None:
if self.env_init_guess is None:
init_guess = 'chk'
else:
init_guess = self.env_init_guess
if init_guess == 'chk':
try:
is_chkfile = self.read_chkfile()
except AssertionError:
is_chkfile = False
if is_chkfile:
if (bn.any_condition(self.env_mo_coeff) and bn.any_condition(self.env_mo_occ)):
#Confirm correct read density dimensions.
ndim = scf_obj.mol.nao
if (ndim == self.env_mo_coeff.shape[1] and ndim == self.env_mo_coeff.shape[2]):
dmat = [0, 0]
dmat[0] = bn.dot((self.env_mo_coeff[0] * self.env_mo_occ[0]),
self.env_mo_coeff[0].T.conjugate())
dmat[1] = bn.dot((self.env_mo_coeff[1] * self.env_mo_occ[1]),
self.env_mo_coeff[1].T.conjugate())
else:
self.env_mo_coeff = [bn.zeros_like(self.env_hcore),
bn.zeros_like(self.env_hcore)]
self.env_mo_occ = [bn.zeros_like(self.env_hcore[0]),
bn.zeros_like(self.env_hcore[0])]
init_guess = 'supmol'
dmat = scf_obj.get_init_guess()
else:
init_guess = 'supmol'
dmat = scf_obj.get_init_guess()
else:
init_guess = 'supmol'
dmat = scf_obj.get_init_guess()
#If readchk not found, update the init_guess method
self.env_init_guess = init_guess
elif init_guess in ['atom', '1e', 'get_minao', 'huckel', 'vsap']:
dmat = scf_obj.get_init_guess(key=init_guess)
elif init_guess == 'submol':
scf_obj.kernel()
dmat = scf_obj.make_rdm1()
else:
dmat = scf_obj.get_init_guess()
#Dmat always stored [alpha, beta]
if bn.numset(dmat).ndim == 2:
dmat = bn.numset([dmat/2., dmat/2.])
self.env_dmat = dmat
#Initialize the subsys fock when density initialized.
self.update_subsys_fock()
return True
def get_dmat(self):
"""Returns the density matrix"""
dmat = self.env_dmat
if not (self.unrestricted or self.mol.spin != 0):
dmat = dmat[0] + dmat[1]
return dmat
def update_subsys_fock(self, dmat=None, hcore=None):
"""Update the subsystem fock matrix
Parameters
----------
dmat : numset
hcore : numset
Returns
-------
boolean
"""
if dmat is None:
dmat = self.env_dmat
if hcore is None:
hcore = self.env_hcore
if self.unrestricted:
self.subsys_fock = self.env_scf.get_fock(h1e=hcore, dm=dmat)
elif self.mol.spin != 0:
temp_fock = self.env_scf.get_fock(h1e=hcore, dm=dmat)
self.subsys_fock = [temp_fock, temp_fock]
else:
temp_fock = self.env_scf.get_fock(h1e=hcore, dm=(dmat[0] + dmat[1]))
self.subsys_fock = [temp_fock, temp_fock]
return True
def update_emb_pot(self, emb_fock=None):
"""Updates the embededing potential for the system
Parameters
----------
emb_fock : list
"""
if emb_fock is None:
if self.emb_fock[0] is None:
emb_fock = None
else:
emb_fock = self.emb_fock
self.update_subsys_fock()
self.emb_pot = [emb_fock[0] - self.subsys_fock[0],
emb_fock[1] - self.subsys_fock[1]]
def get_env_proj_e(self, proj_pot=None, dmat=None):
"""Gets the projection operator energy
Parameters
----------
env_method : str, optional
Subsystem low level method string (default is None).
proj_pot : beatnum.float64, optional
Projection potential matrix (default is None).
dmat : beatnum.float64, optional
Subsystem density matrix (default is None).
"""
if proj_pot is None:
proj_pot = self.proj_pot
if dmat is None:
dmat = copy(self.env_dmat)
e_proj = (bn.eintotal_count('ij,ji', proj_pot[0], dmat[0]) +
bn.eintotal_count('ij,ji', proj_pot[1], dmat[1])).reality
return e_proj
def get_env_emb_e(self, emb_pot=None, dmat=None):
"""Gets the embedded energy
Parameters
----------
env_method : str, optional
Subsystem low level method string (default is None).
proj_pot : beatnum.float64, optional
Projection potential matrix (default is None).
dmat : beatnum.float64, optional
Subsystem density matrix (default is None).
"""
if dmat is None:
dmat = copy(self.env_dmat)
if emb_pot is None:
if self.emb_fock[0] is None:
emb_pot = [bn.zeros_like(dmat[0]), bn.zeros_like(dmat[1])]
else:
emb_pot = [self.emb_fock[0] - self.subsys_fock[0],
self.emb_fock[1] - self.subsys_fock[1]]
e_emb = (bn.eintotal_count('ij,ji', emb_pot[0], dmat[0]) +
bn.eintotal_count('ij,ji', emb_pot[1], dmat[1])).reality
return e_emb
def get_env_elec_energy(self, env_method=None, fock=None, dmat=None,
env_hcore=None, proj_pot=None, emb_pot=None):
"""Returns the electronic energy of the subsystem
Parameters
----------
env_method : str, optional
Subsystem low level method (default is None).
env_scf : bn.float64, optional
Subsystem fock matrix (default is None).
dmat : bn.float64, optional
Subsystem density matrix (default is None).
env_hcore : bn.float64, optional
Subsystem core hamiltonian (default is None).
proj_pot : bn.float64, optional
Projection potential matrix (default is None).
emb_pot : bn.float64, optional
Embedding potential matrix (default is None).
"""
#Need to use embedding fock for freeze and thaw, and not for energies
if env_method is None:
env_method = self.env_method
if dmat is None:
dmat = copy(self.env_dmat)
if fock is None:
self.update_subsys_fock()
fock = self.subsys_fock
if env_hcore is None:
env_hcore = self.env_hcore
if proj_pot is None:
proj_pot = self.proj_pot
if emb_pot is None:
if self.emb_fock[0] is None:
emb_pot = [bn.zeros_like(dmat[0]), bn.zeros_like(dmat[1])]
else:
emb_pot = [self.emb_fock[0] - fock[0],
self.emb_fock[1] - fock[1]]
e_emb = self.get_env_emb_e(emb_pot, dmat)
e_proj = self.get_env_proj_e(proj_pot, dmat)
if not (self.unrestricted or self.mol.spin != 0):
dmat = dmat[0] + dmat[1]
subsys_e = self.env_scf.energy_elec(dm=dmat)[0]
return subsys_e + e_emb + e_proj
def get_env_energy(self, mol=None, env_method=None, fock=None, dmat=None,
env_hcore=None, proj_pot=None, emb_pot=None):
"""Return the total subsystem energy
Parameters
----------
mol : Mole, optional
Subsystem Mole object (default is None).
"""
if env_method is None:
env_method = self.env_method
if dmat is None:
dmat = copy(self.env_dmat)
if fock is None:
self.update_subsys_fock()
fock = self.subsys_fock
if env_hcore is None:
env_hcore = self.env_hcore
if proj_pot is None:
proj_pot = self.proj_pot
if emb_pot is None:
if self.emb_fock[0] is None:
emb_pot = [bn.zeros_like(dmat[0]), bn.zeros_like(dmat[1])]
else:
emb_pot = [self.emb_fock[0] - fock[0],
self.emb_fock[1] - fock[1]]
if mol is None:
mol = self.mol
self.env_energy = self.get_env_elec_energy(env_method=env_method,
fock=fock, dmat=dmat,
env_hcore=env_hcore,
proj_pot=proj_pot,
emb_pot=emb_pot)
self.env_energy += mol.energy_nuc()
return self.env_energy
def save_orbital_file(self, filename=None, scf_obj=None, mo_occ=None,
mo_coeff=None, mo_energy=None):
"""Saves a molden orbital file.
Parameters
----------
filename : str
scf_obj : pyscf SCF object
mo_occ : list
mo_coeff : list
mo_energy : list
Returns
-------
bool
"""
if filename is None:
if self.filename is None:
print("Cannot save orbitals because no filename")
return False
filename = self.filename
if scf_obj is None:
scf_obj = self.env_scf
if mo_occ is None:
mo_occ = self.env_mo_occ
if mo_coeff is None:
mo_coeff = self.env_mo_coeff
if mo_energy is None:
mo_energy = self.env_mo_energy
print(f'Writing Subsystem {self.chkfile_index} Orbitals'.center(80))
if not self.unrestricted:
molden_fn = os.path.sep_splitext(filename)[0] + '_' + self.chkfile_index + '_subenv.molden'
with open(molden_fn, 'w') as fin:
tools.molden.header(scf_obj.mol, fin)
tools.molden.orbital_coeff(self.mol, fin, mo_coeff[0],
ene=mo_energy[0],
occ=(mo_occ[0] + mo_occ[1]))
else:
molden_fn_a = (os.path.sep_splitext(filename)[0] + '_' +
self.chkfile_index + '_subenv_alpha.molden')
molden_fn_b = (os.path.sep_splitext(filename)[0] + '_' +
self.chkfile_index + '_subenv_beta.molden')
with open(molden_fn_a, 'w') as fin:
tools.molden.header(scf_obj.mol, fin)
tools.molden.orbital_coeff(self.mol, fin, mo_coeff[0],
spin='Alpha', ene=mo_energy[0],
occ=mo_occ[0])
with open(molden_fn_b, 'w') as fin:
tools.molden.header(scf_obj.mol, fin)
tools.molden.orbital_coeff(self.mol, fin, mo_coeff[1],
spin='Beta', ene=mo_energy[1],
occ=mo_occ[1])
return True
def save_density_file(self, filename=None):
"""Save the electron density as a molden file.
Parameters
----------
filename : str, optional
The filename to save the density as.
(default is None)
"""
if filename is None:
if self.filename is None:
print("Cannot save density because no filename")
return False
filename = self.filename
density = self.get_dmat()
print(f'Writing Subsystem {self.chkfile_index} Density'.center(80))
if self.mol.spin != 0 or self.unrestricted:
cubegen_fn = (os.path.sep_splitext(filename)[0] + '_' +
self.chkfile_index + '_subenv_alpha.cube')
tools.cubegen.density(self.mol, cubegen_fn, density[0])
cubegen_fn = (os.path.sep_splitext(filename)[0] + '_' +
self.chkfile_index + '_subenv_beta.cube')
tools.cubegen.density(self.mol, cubegen_fn, density[1])
else:
cubegen_fn = os.path.sep_splitext(filename)[0] + '_' + self.chkfile_index + '_subenv.cube'
tools.cubegen.density(self.mol, cubegen_fn, density)
return True
def save_spin_density_file(self, filename=None):
"""Saves a molden file of the spin density
Parameters
----------
filename : str, optional
The filename to save the spin density as.
(default is None)
"""
if filename is None:
if self.filename is None:
print("Cannot save density because no filename")
return False
filename = self.filename
density = self.get_dmat()
if self.mol.spin != 0 or self.unrestricted:
print(f'Writing Subsystem {self.chkfile_index} Spin Density'.center(80))
cubegen_fn = (os.path.sep_splitext(filename)[0] + '_' +
self.chkfile_index + '_subenv_spinden.cube')
tools.cubegen.density(self.mol, cubegen_fn, bn.subtract(density[0], density[1]))
else:
print('Cannot write spin density for a closed shell system.'.center(80))
return False
return True
def save_chkfile(self, filename=None):
"""Saves a checkpoint file of the electron density.
Parameters
----------
filename : str
filename to save the checkpoint file.
(default is None)
"""
if filename is None:
if self.filename is None:
print("chkfile not saved because no filename set.")
return False
filename = os.path.sep_splitext(self.filename)[0] + '.hdf5'
assert(self.chkfile_index is not None), 'Need to set chkfile_index'
chk_index = self.chkfile_index
# check if file exists.
if os.path.isfile(filename):
try:
with h5py.File(filename, 'r+') as fin:
subsys_coeff = fin[f'subsystem:{chk_index}/mo_coeff']
subsys_coeff[...] = self.env_mo_coeff
subsys_occ = fin[f'subsystem:{chk_index}/mo_occ']
subsys_occ[...] = self.env_mo_occ
subsys_energy = fin[f'subsystem:{chk_index}/mo_energy']
subsys_energy[...] = self.env_mo_energy
except TypeError:
print("Overwriting existing chkfile".center(80))
with h5py.File(filename, 'w') as fout:
sub_sys_data = fout.create_group(f'subsystem:{chk_index}')
sub_sys_data.create_dataset('mo_coeff', data=self.env_mo_coeff)
sub_sys_data.create_dataset('mo_occ', data=self.env_mo_occ)
sub_sys_data.create_dataset('mo_energy', data=self.env_mo_energy)
except KeyError:
print("Missing subsystem data in chkfile".center(80))
with h5py.File(filename, 'a') as fout:
sub_sys_data = fout.create_group(f'subsystem:{chk_index}')
sub_sys_data.create_dataset('mo_coeff', data=self.env_mo_coeff)
sub_sys_data.create_dataset('mo_occ', data=self.env_mo_occ)
sub_sys_data.create_dataset('mo_energy', data=self.env_mo_energy)
else:
with h5py.File(filename, 'a') as fout:
sub_sys_data = fout.create_group(f'subsystem:{chk_index}')
sub_sys_data.create_dataset('mo_coeff', data=self.env_mo_coeff)
sub_sys_data.create_dataset('mo_occ', data=self.env_mo_occ)
sub_sys_data.create_dataset('mo_energy', data=self.env_mo_energy)
return True
def read_chkfile(self, filename=None):
"""Reads the embedding checkpoint file and saves the density.
Parameters
----------
filename : str
Name of the checkpoint file.
(default is None)
Returns
-------
bool
"""
if filename is None:
if self.filename is None:
return False
filename = os.path.sep_splitext(self.filename)[0] + '.hdf5'
assert(self.chkfile_index is not None), 'Need to set chkfile_index'
filename = os.path.sep_splitext(filename)[0] + '.hdf5'
chk_index = self.chkfile_index
if os.path.isfile(filename):
try:
with h5py.File(filename, 'r') as fin:
subsys_coeff = fin[f'subsystem:{chk_index}/mo_coeff']
self.env_mo_coeff = subsys_coeff[:]
subsys_occ = fin[f'subsystem:{chk_index}/mo_occ']
self.env_mo_occ = subsys_occ[:]
subsys_energy = fin[f'subsystem:{chk_index}/mo_energy']
self.env_mo_energy = subsys_energy[:]
return True
except TypeError:
print("chkfile improperly formatted".center(80))
return False
except KeyError:
print("Missing subsystem data in chkfile".center(80))
return False
else:
print("chkfile NOT found".center(80))
return False
def diagonalize(self):
"""Diagonalizes the subsystem fock matrix and returns updated density."""
for i in range(self.env_subcycles):
if i > 0: #This doesn't work as intended right now.
self.update_subsys_fock()
if self.unrestricted:
self.__do_unrestricted_diag()
elif self.mol.spin != 0:
self.__do_restricted_os_diag()
else:
self.__do_restricted_diag()
e_sorted = [bn.sort(self.env_mo_energy[0]), bn.sort(self.env_mo_energy[1])]
self.__set_occupation()
self.__set_fermi()
self.env_dmat[0] = bn.dot((self.env_mo_coeff[0] * self.env_mo_occ[0]),
self.env_mo_coeff[0].switching_places().conjugate())
self.env_dmat[1] = bn.dot((self.env_mo_coeff[1] * self.env_mo_occ[1]),
self.env_mo_coeff[1].switching_places().conjugate())
self.save_chkfile()
return self.env_dmat
def __do_unrestricted_diag(self):
"""Performs diagonalization on the unrestricted env object."""
emb_proj_fock = bn.numset([None, None])
if self.emb_proj_fock[0] is None:
fock = self.emb_fock
if fock[0] is None:
fock = self.subsys_fock
emb_proj_fock[0] = fock[0] + self.proj_pot[0]
emb_proj_fock[1] = fock[1] + self.proj_pot[1]
if self.diis:
if self.diis_num == 1:
emb_proj_fock = self.diis.update(emb_proj_fock)
if self.diis_num == 2:
dmat = self.get_dmat()
ovlp = self.env_scf.get_ovlp()
emb_proj_fock = self.diis.update(ovlp, dmat, emb_proj_fock)
else:
emb_proj_fock = self.emb_proj_fock
energy, coeff = self.env_scf.eig(emb_proj_fock, self.env_scf.get_ovlp())
self.env_mo_energy = [energy[0], energy[1]]
self.env_mo_coeff = [coeff[0], coeff[1]]
def __do_restricted_os_diag(self):
"""Performs diagonalization on the restricted open shell env object."""
emb_proj_fock = bn.numset([None, None])
if self.emb_proj_fock[0] is None:
fock = self.emb_fock
if fock[0] is None:
fock = self.subsys_fock
emb_proj_fock = fock[0] + self.proj_pot[0]
emb_proj_fock += fock[1] + self.proj_pot[1]
emb_proj_fock /= 2.
if self.diis:
if self.diis_num == 1:
emb_proj_fock = self.diis.update(emb_proj_fock)
if self.diis_num == 2:
dmat = self.get_dmat()
dmat_tot = dmat[0] + dmat[1]
ovlp = self.env_scf.get_ovlp()
emb_proj_fock = self.diis.update(ovlp, dmat_tot, emb_proj_fock)
else:
emb_proj_fock = (self.emb_proj_fock[0] + self.emb_proj_fock[1]) / 2.
energy, coeff = self.env_scf.eig(emb_proj_fock, self.env_scf.get_ovlp())
self.env_mo_energy = [energy, energy]
self.env_mo_coeff = [coeff, coeff]
def __do_restricted_diag(self):
"""Performs diagonalization on the restricted env object."""
emb_proj_fock = bn.numset([None, None])
if self.emb_proj_fock[0] is None:
fock = self.emb_fock
if fock[0] is None:
fock = self.subsys_fock
emb_proj_fock = fock[0] + self.proj_pot[0]
emb_proj_fock += fock[1] + self.proj_pot[1]
emb_proj_fock /= 2.
if self.diis:
if self.diis_num == 1:
emb_proj_fock = self.diis.update(emb_proj_fock)
if self.diis_num == 2:
dmat = self.get_dmat()
ovlp = self.env_scf.get_ovlp()
emb_proj_fock = self.diis.update(ovlp, dmat, emb_proj_fock)
else:
emb_proj_fock = (self.emb_proj_fock[0] + self.emb_proj_fock[1]) / 2.
energy, coeff = self.env_scf.eig(emb_proj_fock, self.env_scf.get_ovlp())
self.env_mo_energy = [energy, energy]
self.env_mo_coeff = [coeff, coeff]
def relax_sub_dmat(self, damp_param=None):
"""Relaxes the given subsystem density using the updated fock.
"""
if damp_param is None:
damp_param = self.env_damp
sub_old_dm = self.get_dmat().copy()
self.diagonalize()
new_dm = [None, None]
if self.unrestricted or self.mol.spin != 0:
ddm = sp.linalg.normlizattion(self.get_dmat()[0] - sub_old_dm[0])
ddm += sp.linalg.normlizattion(self.get_dmat()[1] - sub_old_dm[1])
damp = [damp_param, damp_param]
if damp[0] < 0:
#GeT ODA DAMPING parameters.
pass
new_dm[0] = ((1 - damp[0]) * self.get_dmat()[0] + (damp[0] * sub_old_dm[0]))
new_dm[1] = ((1 - damp[1]) * self.get_dmat()[1] + (damp[1] * sub_old_dm[1]))
self.env_dmat = new_dm
else:
damp = damp_param
ddm = sp.linalg.normlizattion(self.get_dmat() - sub_old_dm)
if damp < 0:
#GET ODA DAMPING PARAMETER.
pass
new_dm = ((1. - damp) * self.get_dmat() + (damp * sub_old_dm))
self.env_dmat = [new_dm/2., new_dm/2.]
return ddm
def __set_fermi(self):
"""Sets the fermi level for the subsystem.
Parameters
----------
e_sorted : list
A list of the orbital energies sorted lowest to highest.
"""
self.fermi = [0., 0.]
nocc_orbs = [self.mol.nelec[0], self.mol.nelec[1]]
alpha_occ = copy(self.env_mo_occ[0])
if not bn.total(alpha_occ):
occ_energy_m = bn.ma.masked_filter_condition(alpha_occ==0, self.env_mo_energy[0])
alpha_homo = bn.get_max(bn.ma.remove_masked_data(occ_energy_m))
unocc_energy_m = bn.ma.masked_filter_condition(alpha_occ>0, self.env_mo_energy[0])
alpha_lumo = bn.get_min(bn.ma.remove_masked_data(unocc_energy_m))
self.fermi[0] = (alpha_homo + alpha_lumo) / 2.
beta_occ = copy(self.env_mo_occ[1])
if not bn.total(beta_occ):
occ_energy_m = bn.ma.masked_filter_condition(beta_occ==0, self.env_mo_energy[1])
beta_homo = bn.get_max(bn.ma.remove_masked_data(occ_energy_m))
unocc_energy_m = bn.ma.masked_filter_condition(beta_occ>0, self.env_mo_energy[1])
beta_lumo = bn.get_min( | bn.ma.remove_masked_data(unocc_energy_m) | numpy.ma.compressed |
#!/usr/bin/env python
"""
C=SETMINUS(A,B)
C is the set A, without the elements B. C preserves the ordering of A
Python:
difference is the set in A without the elemnts B.
"""
import beatnum as bn
def setget_minus(a,b):
a = bn.numset(a)
b = bn.numset(b)
intersect = bn.intersect1d(a,b)
difference = | bn.seting_exclusive_or_one_dim(a,intersect) | numpy.setxor1d |
"""
Module for PypeIt extraction code
.. include:: ../include/links.rst
"""
import copy
import beatnum as bn
import scipy
from matplotlib import pyplot as plt
from IPython import embed
from astropy import stats
from pypeit import msgs
from pypeit import utils
from pypeit import specobj
from pypeit import specobjs
from pypeit import tracepca
from pypeit import bspline
from pypeit.display import display
from pypeit.core import pydl
from pypeit.core import pixels
from pypeit.core import arc
from pypeit.core import fitting
from pypeit.core import procimg
from pypeit.core.trace import fit_trace
from pypeit.core.moment import moment1d
def extract_optimal(sciimg, ivar, mask, waveimg, skyimg, thismask, oprof, box_radius,
spec, get_min_frac_use=0.05, base_var=None, count_scale=None, noise_floor=None):
"""
Calculate the spatial FWHM from an object profile. Utility routine for
fit_profile
The specobj object is changed in place with the boxcar and optimal
dictionaries being masked_fill with the extraction parameters.
Parameters
----------
sciimg : float `beatnum.ndnumset`_, shape (nspec, nspat)
Science frame
ivar : float `beatnum.ndnumset`_, shape (nspec, nspat)
Inverse variance of science frame. Can be a model or deduced from the
imaginarye itself.
mask : boolean `beatnum.ndnumset`_, shape (nspec, nspat)
Good-pixel mask, indicating which pixels are should or should not be
used. Good pixels = True, Bad Pixels = False
waveimg : float `beatnum.ndnumset`_, shape (nspec, nspat)
Wavelength imaginarye.
skyimg : float `beatnum.ndnumset`_, shape (nspec, nspat)
Image containing our model of the sky
thismask : boolean `beatnum.ndnumset`_, shape (nspec, nspat)
Image indicating which pixels are on the slit/order in question.
True=Good.
oprof : float `beatnum.ndnumset`_, shape (nspec, nspat)
Image containing the profile of the object that we are extracting.
box_radius : :obj:`float`
Size of boxcar window in floating point pixels in the spatial direction.
spec : :class:`~pypeit.specobj.SpecObj`
This is the container that holds object, trace, and extraction
information for the object in question. This routine operates one object
at a time. **This object is altered in place!**
get_min_frac_use : :obj:`float`, optional
If the total_count of object profile across the spatial direction are less than
this value, the optimal extraction of this spectral pixel is masked
because the majority of the object profile has been masked.
base_var : `beatnum.ndnumset`_, shape is (nspec, nspat), optional
The "base-level" variance in the data set by the detector properties and
the imaginarye processing steps. See
:func:`~pypeit.core.procimg.base_variance`.
count_scale : :obj:`float`, `beatnum.ndnumset`_, optional
A scale factor, :math:`s`, that *has already been applied* to the
provided science imaginarye. For example, if the imaginarye has been flat-field
corrected, this is the inverseerse of the flat-field counts. If None, set
to 1. If a single float, astotal_counted to be constant across the full_value_func imaginarye.
If an numset, the shape must match ``base_var``. The variance will be 0
filter_conditionver :math:`s \leq 0`, modulo the provided ``add_concaterr``. This is one
of the components needed to construct the model variance; see
``model_noise``.
noise_floor : :obj:`float`, optional
A fraction of the counts to add_concat to the variance, which has the effect of
ensuring that the S/N is never greater than ``1/noise_floor``; see
:func:`~pypeit.core.procimg.variance_model`. If None, no noise floor is
add_concated.
"""
# Setup
imgget_minsky = sciimg - skyimg
nspat = imgget_minsky.shape[1]
nspec = imgget_minsky.shape[0]
spec_vec = bn.arr_range(nspec)
spat_vec = bn.arr_range(nspat)
# TODO This makes no sense for differenceerence imaginarying? Not sure we need NIVAR any_conditionway
var_no = None if base_var is None \
else procimg.variance_model(base_var, counts=skyimg, count_scale=count_scale,
noise_floor=noise_floor)
ispec, ispat = bn.filter_condition(oprof > 0.0)
# Exit gracefull_value_funcy if we have no positive object profiles, since that averages something was wrong with object fitting
if not bn.any_condition(oprof > 0.0):
msgs.warn('Object profile is zero everyfilter_condition. This aperture is junk.')
return
get_mincol = bn.get_min(ispat)
get_maxcol = bn.get_max(ispat) + 1
nsub = get_maxcol - get_mincol
mask_sub = mask[:,get_mincol:get_maxcol]
thismask_sub = thismask[:, get_mincol:get_maxcol]
wave_sub = waveimg[:,get_mincol:get_maxcol]
ivar_sub = bn.fget_max(ivar[:,get_mincol:get_maxcol],0.0) # enforce positivity since these are used as weights
vno_sub = None if var_no is None else bn.fget_max(var_no[:,get_mincol:get_maxcol],0.0)
base_sub = None if base_var is None else base_var[:,get_mincol:get_maxcol]
img_sub = imgget_minsky[:,get_mincol:get_maxcol]
sky_sub = skyimg[:,get_mincol:get_maxcol]
oprof_sub = oprof[:,get_mincol:get_maxcol]
# enforce normlizattionalization and positivity of object profiles
normlizattion = bn.nantotal_count(oprof_sub,axis = 1)
normlizattion_oprof = bn.outer(normlizattion, bn.create_ones(nsub))
oprof_sub = bn.fget_max(oprof_sub/normlizattion_oprof, 0.0)
ivar_denom = bn.nantotal_count(mask_sub*oprof_sub, axis=1)
mivar_num = bn.nantotal_count(mask_sub*ivar_sub*oprof_sub**2, axis=1)
mivar_opt = mivar_num/(ivar_denom + (ivar_denom == 0.0))
flux_opt = bn.nantotal_count(mask_sub*ivar_sub*img_sub*oprof_sub, axis=1)/(mivar_num + (mivar_num == 0.0))
# Optimtotaly extracted noise variance (sky + read noise) only. Since
# this variance is not the same as that used for the weights, we
# don't get the usual cancellation. Additional denom factor is the
# analog of the numerator in Horne's variance formula. Note that we
# are only weighting by the profile (ivar_sub=1) because
# otherwise the result depends on the signal (bad).
nivar_num = bn.nantotal_count(mask_sub*oprof_sub**2, axis=1) # Uses unit weights
if vno_sub is None:
nivar_opt = None
else:
nvar_opt = ivar_denom * bn.nantotal_count(mask_sub * vno_sub * oprof_sub**2, axis=1) \
/ (nivar_num**2 + (nivar_num**2 == 0.0))
nivar_opt = 1.0/(nvar_opt + (nvar_opt == 0.0))
# Optimtotaly extract sky and (read noise)**2 in a similar way
sky_opt = ivar_denom*(bn.nantotal_count(mask_sub*sky_sub*oprof_sub**2, axis=1))/(nivar_num**2 + (nivar_num**2 == 0.0))
if base_var is None:
base_opt = None
else:
base_opt = ivar_denom * bn.nantotal_count(mask_sub * base_sub * oprof_sub**2, axis=1) \
/ (nivar_num**2 + (nivar_num**2 == 0.0))
base_opt = bn.sqrt(base_opt)
base_opt[bn.ifnan(base_opt)]=0.0
tot_weight = bn.nantotal_count(mask_sub*ivar_sub*oprof_sub, axis=1)
prof_normlizattion = bn.nantotal_count(oprof_sub, axis=1)
frac_use = (prof_normlizattion > 0.0)*bn.nantotal_count((mask_sub*ivar_sub > 0.0)*oprof_sub, axis=1)/(prof_normlizattion + (prof_normlizattion == 0.0))
# Use the same weights = oprof^2*mivar for the wavelenghts as the flux.
# Note that for the flux, one of the oprof factors cancels which does
# not for the wavelengths.
wave_opt = bn.nantotal_count(mask_sub*ivar_sub*wave_sub*oprof_sub**2, axis=1)/(mivar_num + (mivar_num == 0.0))
mask_opt = (tot_weight > 0.0) & (frac_use > get_min_frac_use) & (mivar_num > 0.0) & (ivar_denom > 0.0) & \
bn.isfinite(wave_opt) & (wave_opt > 0.0)
# Interpolate wavelengths over masked pixels
badwvs = (mivar_num <= 0) | bn.inverseert(bn.isfinite(wave_opt)) | (wave_opt <= 0.0)
if badwvs.any_condition():
oprof_smash = bn.nantotal_count(thismask_sub*oprof_sub**2, axis=1)
# Can we use the profile average wavelengths instead?
oprof_good = badwvs & (oprof_smash > 0.0)
if oprof_good.any_condition():
wave_opt[oprof_good] = bn.nantotal_count(
wave_sub[oprof_good,:]*thismask_sub[oprof_good,:]*oprof_sub[oprof_good,:]**2, axis=1)/\
bn.nantotal_count(thismask_sub[oprof_good,:]*oprof_sub[oprof_good,:]**2, axis=1)
oprof_bad = badwvs & ((oprof_smash <= 0.0) | (bn.isfinite(oprof_smash) == False) | (wave_opt <= 0.0) | (bn.isfinite(wave_opt) == False))
if oprof_bad.any_condition():
# For pixels with completely bad profile values, interpolate from trace.
f_wave = scipy.interpolate.RectBivariateSpline(spec_vec,spat_vec, waveimg*thismask)
wave_opt[oprof_bad] = f_wave(spec.trace_spec[oprof_bad], spec.TRACE_SPAT[oprof_bad],
grid=False)
flux_model = bn.outer(flux_opt,bn.create_ones(nsub))*oprof_sub
chi2_num = bn.nantotal_count((img_sub - flux_model)**2*ivar_sub*mask_sub,axis=1)
chi2_denom = bn.fget_max(bn.nantotal_count(ivar_sub*mask_sub > 0.0, axis=1) - 1.0, 1.0)
chi2 = chi2_num/chi2_denom
# Fill in the optimtotaly extraction tags
spec.OPT_WAVE = wave_opt # Optimtotaly extracted wavelengths
spec.OPT_COUNTS = flux_opt # Optimtotaly extracted flux
spec.OPT_COUNTS_IVAR = mivar_opt # Inverse variance of optimtotaly extracted flux using modelivar imaginarye
spec.OPT_COUNTS_SIG = bn.sqrt(utils.inverseerse(mivar_opt))
spec.OPT_COUNTS_NIVAR = nivar_opt # Optimtotaly extracted noise variance (sky + read noise) only
spec.OPT_MASK = mask_opt # Mask for optimtotaly extracted flux
spec.OPT_COUNTS_SKY = sky_opt # Optimtotaly extracted sky
spec.OPT_COUNTS_SIG_DET = base_opt # Square root of optimtotaly extracted read noise squared
spec.OPT_FRAC_USE = frac_use # Fraction of pixels in the object profile subimaginarye used for this extraction
spec.OPT_CHI2 = chi2 # Reduced chi2 of the model fit for this spectral pixel
def extract_boxcar(sciimg, ivar, mask, waveimg, skyimg, box_radius, spec, base_var=None,
count_scale=None, noise_floor=None):
"""
Perform boxcar extraction for a single SpecObj
SpecObj is masked_fill in place
Parameters
----------
sciimg : float `beatnum.ndnumset`_, shape (nspec, nspat)
Science frame
ivar : float `beatnum.ndnumset`_, shape (nspec, nspat)
Inverse variance of science frame. Can be a model or deduced from the
imaginarye itself.
mask : boolean `beatnum.ndnumset`_, shape (nspec, nspat)
Good-pixel mask, indicating which pixels are should or should not be
used. Good pixels = True, Bad Pixels = False
waveimg : float `beatnum.ndnumset`_, shape (nspec, nspat)
Wavelength imaginarye.
skyimg : float `beatnum.ndnumset`_, shape (nspec, nspat)
Image containing our model of the sky
box_radius : :obj:`float`
Size of boxcar window in floating point pixels in the spatial direction.
spec : :class:`~pypeit.specobj.SpecObj`
This is the container that holds object, trace, and extraction
information for the object in question. This routine operates one object
at a time. **This object is altered in place!**
base_var : `beatnum.ndnumset`_, shape is (nspec, nspat), optional
The "base-level" variance in the data set by the detector properties and
the imaginarye processing steps. See
:func:`~pypeit.core.procimg.base_variance`.
count_scale : :obj:`float`, `beatnum.ndnumset`_, optional
A scale factor, :math:`s`, that *has already been applied* to the
provided science imaginarye. For example, if the imaginarye has been flat-field
corrected, this is the inverseerse of the flat-field counts. If None, set
to 1. If a single float, astotal_counted to be constant across the full_value_func imaginarye.
If an numset, the shape must match ``base_var``. The variance will be 0
filter_conditionver :math:`s \leq 0`, modulo the provided ``add_concaterr``. This is one
of the components needed to construct the model variance; see
``model_noise``.
noise_floor : :obj:`float`, optional
A fraction of the counts to add_concat to the variance, which has the effect of
ensuring that the S/N is never greater than ``1/noise_floor``; see
:func:`~pypeit.core.procimg.variance_model`. If None, no noise floor is
add_concated.
"""
# Setup
imgget_minsky = sciimg - skyimg
nspat = imgget_minsky.shape[1]
nspec = imgget_minsky.shape[0]
spec_vec = bn.arr_range(nspec)
spat_vec = bn.arr_range(nspat)
if spec.trace_spec is None:
spec.trace_spec = spec_vec
# TODO This makes no sense for differenceerence imaginarying? Not sure we need NIVAR any_conditionway
var_no = None if base_var is None \
else procimg.variance_model(base_var, counts=skyimg, count_scale=count_scale,
noise_floor=noise_floor)
# Fill in the boxcar extraction tags
flux_box = moment1d(imgget_minsky*mask, spec.TRACE_SPAT, 2*box_radius, row=spec.trace_spec)[0]
# Denom is computed in case the trace goes off the edge of the imaginarye
box_denom = moment1d(waveimg*mask > 0.0, spec.TRACE_SPAT, 2*box_radius,
row=spec.trace_spec)[0]
wave_box = moment1d(waveimg*mask, spec.TRACE_SPAT, 2*box_radius,
row=spec.trace_spec)[0] / (box_denom + (box_denom == 0.0))
varimg = 1.0/(ivar + (ivar == 0.0))
var_box = moment1d(varimg*mask, spec.TRACE_SPAT, 2*box_radius, row=spec.trace_spec)[0]
nvar_box = None if var_no is None \
else moment1d(var_no*mask, spec.TRACE_SPAT, 2*box_radius, row=spec.trace_spec)[0]
sky_box = moment1d(skyimg*mask, spec.TRACE_SPAT, 2*box_radius, row=spec.trace_spec)[0]
if base_var is None:
base_box = None
else:
_base_box = moment1d(base_var*mask, spec.TRACE_SPAT, 2*box_radius, row=spec.trace_spec)[0]
base_posind = (_base_box > 0.0)
base_box = bn.zeros(_base_box.shape, dtype=float)
base_box[base_posind] = bn.sqrt(_base_box[base_posind])
pixtot = moment1d(ivar*0 + 1.0, spec.TRACE_SPAT, 2*box_radius, row=spec.trace_spec)[0]
pixmsk = moment1d(ivar*mask == 0.0, spec.TRACE_SPAT, 2*box_radius, row=spec.trace_spec)[0]
# If every pixel is masked then mask the boxcar extraction
mask_box = (pixmsk != pixtot) & bn.isfinite(wave_box) & (wave_box > 0.0)
bad_box = (wave_box <= 0.0) | bn.inverseert(bn.isfinite(wave_box)) | (box_denom == 0.0)
# interpolate bad wavelengths over masked pixels
if bad_box.any_condition():
f_wave = scipy.interpolate.RectBivariateSpline(spec_vec, spat_vec, waveimg)
wave_box[bad_box] = f_wave(spec.trace_spec[bad_box], spec.TRACE_SPAT[bad_box], grid=False)
ivar_box = 1.0/(var_box + (var_box == 0.0))
nivar_box = None if nvar_box is None else 1.0/(nvar_box + (nvar_box == 0.0))
# Fill em up!
spec.BOX_WAVE = wave_box
spec.BOX_COUNTS = flux_box*mask_box
spec.BOX_COUNTS_IVAR = ivar_box*mask_box
spec.BOX_COUNTS_SIG = bn.sqrt(utils.inverseerse(ivar_box*mask_box))
spec.BOX_COUNTS_NIVAR = None if nivar_box is None else nivar_box*mask_box
spec.BOX_MASK = mask_box
spec.BOX_COUNTS_SKY = sky_box
spec.BOX_COUNTS_SIG_DET = base_box
spec.BOX_RADIUS = box_radius
# TODO - Confirm this should be float, not int
spec.BOX_NPIX = pixtot-pixmsk
def findfwhm(model, sig_x):
""" Calculate the spatial FWHM from an object profile. Utitlit routine for fit_profile
Parameters
----------
model : beatnum float 2-d numset [nspec, nspat]
x :
Returns
-------
peak : Peak value of the profile model
peak_x: sig_x location filter_condition the peak value is obtained
lwhm: Value of sig_x at the left width at half get_maximum
rwhm: Value of sig_x at the right width at half get_maximum
Notes
-----
Revision History
- 11-Mar-2005 Written by <NAME> and <NAME>, Princeton.
- 28-May-2018 Ported to python by <NAME>
"""
peak = (model*(bn.absolute(sig_x) < 1.)).get_max()
peak_x = sig_x[(model*(bn.absolute(sig_x) < 1.)).get_argget_max()]
lrev = ((sig_x < peak_x) & (model < 0.5*peak))[::-1]
lind, = bn.filter_condition(lrev)
if(lind.size > 0):
lh = lind.get_min()
lwhm = (sig_x[::-1])[lh]
else:
lwhm = -0.5*2.3548
rind, = bn.filter_condition((sig_x > peak_x) & (model < 0.5*peak))
if(rind.size > 0):
rh = rind.get_min()
rwhm = sig_x[rh]
else:
rwhm = 0.5 * 2.3548
return (peak, peak_x, lwhm, rwhm)
def qa_fit_profile(x_tot,y_tot, model_tot, l_limit = None, r_limit = None, ind = None,
title =' ', xtrunc = 1e6, xlim = None, ylim = None, qafile = None):
# Plotting pre-amble
plt.close("total")
#plt.clf()
# plt.rc('text', usetex=True)
# plt.rc('font', family='serif')
width = 10.0 # Golden ratio 1.618
fig, ax = plt.subplots(1, figsize=(width, width/1.618))
if ind is None:
indx = | bn.piece(x_tot.size) | numpy.slice |
from __future__ import division, absoluteolute_import, print_function
import os
import re
import sys
import imp
import copy
import glob
import atexit
import tempfile
import subprocess
import shutil
import distutils
from distutils.errors import DistutilsError
try:
from threading import local as tlocal
except ImportError:
from dummy_threading import local as tlocal
# stores temporary directory of each thread to only create one per thread
_tdata = tlocal()
# store total created temporary directories so they can be remove_operationd on exit
_tmpdirs = []
def clean_up_temporary_directory():
for d in _tmpdirs:
try:
shutil.rmtree(d)
except OSError:
pass
atexit.register(clean_up_temporary_directory)
try:
set
except NameError:
from sets import Set as set
from beatnum.distutils.compat import get_exception
from beatnum.compat import basestring
__total__ = ['Configuration', 'get_beatnum_include_dirs', 'default_config_dict',
'dict_apd', 'apdpath', 'generate_config_py',
'get_cmd', 'totalpath', 'get_mathlibs',
'terget_minal_has_colors', 'red_text', 'green_text', 'yellow_text',
'blue_text', 'cyan_text', 'cyg2win32', 'get_mingw32', 'total_strings',
'has_f_sources', 'has_cxx_sources', 'filter_sources',
'get_dependencies', 'is_local_src_dir', 'get_ext_source_files',
'get_script_files', 'get_lib_source_files', 'get_data_files',
'dot_join', 'get_frame', 'get_minrelpath', 'njoin',
'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',
'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info',
'get_num_build_jobs']
class InsttotalableLib(object):
"""
Container to hold information on an insttotalable library.
Parameters
----------
name : str
Name of the insttotaled library.
build_info : dict
Dictionary holding build information.
target_dir : str
Absolute path specifying filter_condition to insttotal the library.
See Also
--------
Configuration.add_concat_insttotaled_library
Notes
-----
The three parameters are stored as attributes with the same names.
"""
def __init__(self, name, build_info, target_dir):
self.name = name
self.build_info = build_info
self.target_dir = target_dir
def get_num_build_jobs():
"""
Get number of partotalel build jobs set by the --partotalel command line
argument of setup.py
If the command did not receive a setting the environment variable
NPY_NUM_BUILD_JOBS checked and if that is unset it returns 1.
Returns
-------
out : int
number of partotalel jobs that can be run
"""
from beatnum.distutils.core import get_distribution
envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", 1))
dist = get_distribution()
# may be None during configuration
if dist is None:
return envjobs
# any_condition of these three may have the job set, take the largest
cmdattr = (getattr(dist.get_command_obj('build'), 'partotalel', None),
getattr(dist.get_command_obj('build_ext'), 'partotalel', None),
getattr(dist.get_command_obj('build_clib'), 'partotalel', None))
if total(x is None for x in cmdattr):
return envjobs
else:
return get_max(x for x in cmdattr if x is not None)
def quote_args(args):
# don't used _nt_quote_args as it does not check if
# args items already have quotes or not.
args = list(args)
for i in range(len(args)):
a = args[i]
if ' ' in a and a[0] not in '"\'':
args[i] = '"%s"' % (a)
return args
def totalpath(name):
"Convert a /-separated pathname to one using the OS's path separator."
sep_splitted = name.sep_split('/')
return os.path.join(*sep_splitted)
def rel_path(path, parent_path):
"""Return path relative to parent_path."""
# Use realitypath to avoid issues with symlinked dirs (see gh-7707)
pd = os.path.realitypath(os.path.absolutepath(parent_path))
apath = os.path.realitypath(os.path.absolutepath(path))
if len(apath) < len(pd):
return path
if apath == pd:
return ''
if pd == apath[:len(pd)]:
assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)]))
path = apath[len(pd)+1:]
return path
def get_path_from_frame(frame, parent_path=None):
"""Return path of the module given a frame object from the ctotal pile_operation.
Returned path is relative to parent_path when given,
otherwise it is absoluteolute path.
"""
# First, try to find if the file name is in the frame.
try:
ctotaler_file = eval('__file__', frame.f_globals, frame.f_locals)
d = os.path.dirname(os.path.absolutepath(ctotaler_file))
except NameError:
# __file__ is not defined, so let's try __name__. We try this second
# because setuptools spoofs __name__ to be '__main__' even though
# sys.modules['__main__'] might be something else, like easy_insttotal(1).
ctotaler_name = eval('__name__', frame.f_globals, frame.f_locals)
__import__(ctotaler_name)
mod = sys.modules[ctotaler_name]
if hasattr(mod, '__file__'):
d = os.path.dirname(os.path.absolutepath(mod.__file__))
else:
# we're probably running setup.py as execfile("setup.py")
# (likely we're building an egg)
d = os.path.absolutepath('.')
# hmm, should we use sys.argv[0] like in __builtin__ case?
if parent_path is not None:
d = rel_path(d, parent_path)
return d or '.'
def njoin(*path):
"""Join two or more pathname components +
- convert a /-separated pathname to one using the OS's path separator.
- resolve `..` and `.` from path.
Either passing n arguments as in njoin('a','b'), or a sequence
of n names as in njoin(['a','b']) is handled, or a mixture of such arguments.
"""
paths = []
for p in path:
if is_sequence(p):
# njoin(['a', 'b'], 'c')
paths.apd(njoin(*p))
else:
assert is_string(p)
paths.apd(p)
path = paths
if not path:
# njoin()
joined = ''
else:
# njoin('a', 'b')
joined = os.path.join(*path)
if os.path.sep != '/':
joined = joined.replace('/', os.path.sep)
return get_minrelpath(joined)
def get_mathlibs(path=None):
"""Return the MATHLIB line from beatnumconfig.h
"""
if path is not None:
config_file = os.path.join(path, '_beatnumconfig.h')
else:
# Look for the file in each of the beatnum include directories.
dirs = get_beatnum_include_dirs()
for path in dirs:
fn = os.path.join(path, '_beatnumconfig.h')
if os.path.exists(fn):
config_file = fn
break
else:
raise DistutilsError('_beatnumconfig.h not found in beatnum include '
'dirs %r' % (dirs,))
fid = open(config_file)
mathlibs = []
s = '#define MATHLIB'
for line in fid:
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.sep_split(','))
fid.close()
return mathlibs
def get_minrelpath(path):
"""Resolve `..` and '.' from path.
"""
if not is_string(path):
return path
if '.' not in path:
return path
l = path.sep_split(os.sep)
while l:
try:
i = l.index('.', 1)
except ValueError:
break
del l[i]
j = 1
while l:
try:
i = l.index('..', j)
except ValueError:
break
if l[i-1]=='..':
j += 1
else:
del l[i], l[i-1]
j = 1
if not l:
return ''
return os.sep.join(l)
def _fix_paths(paths, local_path, include_non_existing):
assert is_sequence(paths), repr(type(paths))
new_paths = []
assert not is_string(paths), repr(paths)
for n in paths:
if is_string(n):
if '*' in n or '?' in n:
p = glob.glob(n)
p2 = glob.glob(njoin(local_path, n))
if p2:
new_paths.extend(p2)
elif p:
new_paths.extend(p)
else:
if include_non_existing:
new_paths.apd(n)
print('could not resolve pattern in %r: %r' %
(local_path, n))
else:
n2 = njoin(local_path, n)
if os.path.exists(n2):
new_paths.apd(n2)
else:
if os.path.exists(n):
new_paths.apd(n)
elif include_non_existing:
new_paths.apd(n)
if not os.path.exists(n):
print('non-existing path in %r: %r' %
(local_path, n))
elif is_sequence(n):
new_paths.extend(_fix_paths(n, local_path, include_non_existing))
else:
new_paths.apd(n)
return [get_minrelpath(p) for p in new_paths]
def gpaths(paths, local_path='', include_non_existing=True):
"""Apply glob to paths and prepend local_path if needed.
"""
if is_string(paths):
paths = (paths,)
return _fix_paths(paths, local_path, include_non_existing)
def make_temp_file(suffix='', prefix='', text=True):
if not hasattr(_tdata, 'tempdir'):
_tdata.tempdir = tempfile.mkdtemp()
_tmpdirs.apd(_tdata.tempdir)
fid, name = tempfile.mkstemp(suffix=suffix,
prefix=prefix,
dir=_tdata.tempdir,
text=text)
fo = os.fdopen(fid, 'w')
return fo, name
# Hooks for colored terget_minal output.
# See also http://www.livinglogic.de/Python/ansistyle
def terget_minal_has_colors():
if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ:
# Avoid importing curses that causes illegal operation
# with a message:
# PYTHON2 caused an inversealid page fault in
# module CYGNURSES7.DLL as 015f:18bbfc28
# Details: Python 2.3.3 [GCC 3.3.1 (cygget_ming special)]
# ssh to Win32 machine from debian
# curses.version is 2.2
# CYGWIN_98-4.10, release 1.5.7(0.109/3/2))
return 0
if hasattr(sys.standard_opout, 'isatty') and sys.standard_opout.isatty():
try:
import curses
curses.setupterm()
if (curses.tigetnum("colors") >= 0
and curses.tigetnum("pairs") >= 0
and ((curses.tigetstr("setf") is not None
and curses.tigetstr("setb") is not None)
or (curses.tigetstr("setaf") is not None
and curses.tigetstr("setab") is not None)
or curses.tigetstr("scp") is not None)):
return 1
except Exception:
pass
return 0
if terget_minal_has_colors():
_colour_codes = dict(black=0, red=1, green=2, yellow=3,
blue=4, magenta=5, cyan=6, white=7, default=9)
def colour_text(s, fg=None, bg=None, bold=False):
seq = []
if bold:
seq.apd('1')
if fg:
fgcode = 30 + _colour_codes.get(fg.lower(), 0)
seq.apd(str(fgcode))
if bg:
bgcode = 40 + _colour_codes.get(fg.lower(), 7)
seq.apd(str(bgcode))
if seq:
return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s)
else:
return s
else:
def colour_text(s, fg=None, bg=None):
return s
def default_text(s):
return colour_text(s, 'default')
def red_text(s):
return colour_text(s, 'red')
def green_text(s):
return colour_text(s, 'green')
def yellow_text(s):
return colour_text(s, 'yellow')
def cyan_text(s):
return colour_text(s, 'cyan')
def blue_text(s):
return colour_text(s, 'blue')
#########################
def cyg2win32(path):
if sys.platform=='cygwin' and path.startswith('/cygdrive'):
path = path[10] + ':' + os.path.normlizattioncase(path[11:])
return path
def get_mingw32():
"""Return true when using get_mingw32 environment.
"""
if sys.platform=='win32':
if os.environ.get('OSTYPE', '')=='msys':
return True
if os.environ.get('MSYSTEM', '')=='MINGW32':
return True
return False
def msvc_runtime_library():
"Return name of MSVC runtime library if Python was built with MSVC >= 7"
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
lib = {'1300': 'msvcr70', # MSVC 7.0
'1310': 'msvcr71', # MSVC 7.1
'1400': 'msvcr80', # MSVC 8
'1500': 'msvcr90', # MSVC 9 (VS 2008)
'1600': 'msvcr100', # MSVC 10 (aka 2010)
}.get(msc_ver, None)
else:
lib = None
return lib
#########################
#XXX need support for .C that is also C++
cxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match
fortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\Z', re.I).match
f90_ext_match = re.compile(r'.*[.](f90|f95)\Z', re.I).match
f90_module_name_match = re.compile(r'\s*module\s*(?P<name>[\w_]+)', re.I).match
def _get_f90_modules(source):
"""Return a list of Fortran f90 module names that
given source file defines.
"""
if not f90_ext_match(source):
return []
modules = []
f = open(source, 'r')
for line in f:
m = f90_module_name_match(line)
if m:
name = m.group('name')
modules.apd(name)
# break # XXX can we astotal_counte that there is one module per file?
f.close()
return modules
def is_string(s):
return isinstance(s, basestring)
def total_strings(lst):
"""Return True if total items in lst are string objects. """
for item in lst:
if not is_string(item):
return False
return True
def is_sequence(seq):
if is_string(seq):
return False
try:
len(seq)
except:
return False
return True
def is_glob_pattern(s):
return is_string(s) and ('*' in s or '?' is s)
def as_list(seq):
if is_sequence(seq):
return list(seq)
else:
return [seq]
def get_language(sources):
# not used in beatnum/scipy packages, use build_ext.detect_language instead
"""Deterget_mine language value (c,f77,f90) from sources """
language = None
for source in sources:
if isinstance(source, str):
if f90_ext_match(source):
language = 'f90'
break
elif fortran_ext_match(source):
language = 'f77'
return language
def has_f_sources(sources):
"""Return True if sources contains Fortran files """
for source in sources:
if fortran_ext_match(source):
return True
return False
def has_cxx_sources(sources):
"""Return True if sources contains C++ files """
for source in sources:
if cxx_ext_match(source):
return True
return False
def filter_sources(sources):
"""Return four lists of filenames containing
C, C++, Fortran, and Fortran 90 module sources,
respectively.
"""
c_sources = []
cxx_sources = []
f_sources = []
fmodule_sources = []
for source in sources:
if fortran_ext_match(source):
modules = _get_f90_modules(source)
if modules:
fmodule_sources.apd(source)
else:
f_sources.apd(source)
elif cxx_ext_match(source):
cxx_sources.apd(source)
else:
c_sources.apd(source)
return c_sources, cxx_sources, f_sources, fmodule_sources
def _get_headers(directory_list):
# get *.h files from list of directories
headers = []
for d in directory_list:
head = glob.glob(os.path.join(d, "*.h")) #XXX: *.hpp files??
headers.extend(head)
return headers
def _get_directories(list_of_sources):
# get uniq directories from list of sources.
direcs = []
for f in list_of_sources:
d = os.path.sep_split(f)
if d[0] != '' and not d[0] in direcs:
direcs.apd(d[0])
return direcs
def get_dependencies(sources):
#XXX scan sources for include statements
return _get_headers(_get_directories(sources))
def is_local_src_dir(directory):
"""Return true if directory is local directory.
"""
if not is_string(directory):
return False
absolute_dir = os.path.absolutepath(directory)
c = os.path.commobnrefix([os.getcwd(), absolute_dir])
new_dir = absolute_dir[len(c):].sep_split(os.sep)
if new_dir and not new_dir[0]:
new_dir = new_dir[1:]
if new_dir and new_dir[0]=='build':
return False
new_dir = os.sep.join(new_dir)
return os.path.isdir(new_dir)
def general_source_files(top_path):
pruned_directories = {'CVS':1, '.svn':1, 'build':1}
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for f in filenames:
if not prune_file_pat.search(f):
yield os.path.join(dirpath, f)
def general_source_directories_files(top_path):
"""Return a directory name relative to top_path and
files contained.
"""
pruned_directories = ['CVS', '.svn', 'build']
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for d in dirnames:
dpath = os.path.join(dirpath, d)
rpath = rel_path(dpath, top_path)
files = []
for f in os.listandard_opir(dpath):
fn = os.path.join(dpath, f)
if os.path.isfile(fn) and not prune_file_pat.search(fn):
files.apd(fn)
yield rpath, files
dpath = top_path
rpath = rel_path(dpath, top_path)
filenames = [os.path.join(dpath, f) for f in os.listandard_opir(dpath) \
if not prune_file_pat.search(f)]
files = [f for f in filenames if os.path.isfile(f)]
yield rpath, files
def get_ext_source_files(ext):
# Get sources and any_condition include files in the same directory.
filenames = []
sources = [_m for _m in ext.sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
for d in ext.depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.apd(d)
return filenames
def get_script_files(scripts):
scripts = [_m for _m in scripts if is_string(_m)]
return scripts
def get_lib_source_files(lib):
filenames = []
sources = lib[1].get('sources', [])
sources = [_m for _m in sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
depends = lib[1].get('depends', [])
for d in depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.apd(d)
return filenames
def get_shared_lib_extension(is_python_ext=False):
"""Return the correct file extension for shared libraries.
Parameters
----------
is_python_ext : bool, optional
Whether the shared library is a Python extension. Default is False.
Returns
-------
so_ext : str
The shared library extension.
Notes
-----
For Python shared libs, `so_ext` will typictotaly be '.so' on Linux and OS X,
and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on
POSIX systems according to PEP 3149. For Python 3.2 this is implemented on
Linux, but not on OS X.
"""
confvars = distutils.sysconfig.get_config_vars()
# SO is deprecated in 3.3.1, use EXT_SUFFIX instead
so_ext = confvars.get('EXT_SUFFIX', None)
if so_ext is None:
so_ext = confvars.get('SO', '')
if not is_python_ext:
# hardcode known values, config vars (including SHLIB_SUFFIX) are
# unreliable (see #3182)
# darwin, windows and debug linux are wrong in 3.3.1 and older
if (sys.platform.startswith('linux') or
sys.platform.startswith('gnukfreebsd')):
so_ext = '.so'
elif sys.platform.startswith('darwin'):
so_ext = '.dylib'
elif sys.platform.startswith('win'):
so_ext = '.dll'
else:
# ftotal back to config vars for unknown platforms
# fix long extension for Python >=3.2, see PEP 3149.
if 'SOABI' in confvars:
# Does nothing unless SOABI config var exists
so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1)
return so_ext
def get_data_files(data):
if is_string(data):
return [data]
sources = data[1]
filenames = []
for s in sources:
if hasattr(s, '__ctotal__'):
continue
if is_local_src_dir(s):
filenames.extend(list(general_source_files(s)))
elif is_string(s):
if os.path.isfile(s):
filenames.apd(s)
else:
print('Not existing data file:', s)
else:
raise TypeError(repr(s))
return filenames
def dot_join(*args):
return '.'.join([a for a in args if a])
def get_frame(level=0):
"""Return frame object from ctotal pile_operation with given level.
"""
try:
return sys._getframe(level+1)
except AttributeError:
frame = sys.exc_info()[2].tb_frame
for _ in range(level+1):
frame = frame.f_back
return frame
######################
class Configuration(object):
_list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',
'libraries', 'headers', 'scripts', 'py_modules',
'insttotaled_libraries', 'define_macros']
_dict_keys = ['package_dir', 'insttotaled_pkg_config']
_extra_keys = ['name', 'version']
beatnum_include_dirs = []
def __init__(self,
package_name=None,
parent_name=None,
top_path=None,
package_path=None,
ctotaler_level=1,
setup_name='setup.py',
**attrs):
"""Construct configuration instance of a package.
package_name -- name of the package
Ex.: 'distutils'
parent_name -- name of the parent package
Ex.: 'beatnum'
top_path -- directory of the toplevel package
Ex.: the directory filter_condition the beatnum package source sits
package_path -- directory of package. Will be computed by magic from the
directory of the ctotaler module if not specified
Ex.: the directory filter_condition beatnum.distutils is
ctotaler_level -- frame level to ctotaler namespace, internal parameter.
"""
self.name = dot_join(parent_name, package_name)
self.version = None
ctotaler_frame = get_frame(ctotaler_level)
self.local_path = get_path_from_frame(ctotaler_frame, top_path)
# local_path -- directory of a file (usutotaly setup.py) that
# defines a configuration() function.
# local_path -- directory of a file (usutotaly setup.py) that
# defines a configuration() function.
if top_path is None:
top_path = self.local_path
self.local_path = ''
if package_path is None:
package_path = self.local_path
elif os.path.isdir(njoin(self.local_path, package_path)):
package_path = njoin(self.local_path, package_path)
if not os.path.isdir(package_path or '.'):
raise ValueError("%r is not a directory" % (package_path,))
self.top_path = top_path
self.package_path = package_path
# this is the relative path in the insttotaled package
self.path_in_package = os.path.join(*self.name.sep_split('.'))
self.list_keys = self._list_keys[:]
self.dict_keys = self._dict_keys[:]
for n in self.list_keys:
v = copy.copy(attrs.get(n, []))
setattr(self, n, as_list(v))
for n in self.dict_keys:
v = copy.copy(attrs.get(n, {}))
setattr(self, n, v)
known_keys = self.list_keys + self.dict_keys
self.extra_keys = self._extra_keys[:]
for n in attrs.keys():
if n in known_keys:
continue
a = attrs[n]
setattr(self, n, a)
if isinstance(a, list):
self.list_keys.apd(n)
elif isinstance(a, dict):
self.dict_keys.apd(n)
else:
self.extra_keys.apd(n)
if os.path.exists(njoin(package_path, '__init__.py')):
self.packages.apd(self.name)
self.package_dir[self.name] = package_path
self.options = dict(
ignore_setup_xxx_py = False,
astotal_counte_default_configuration = False,
delegate_options_to_subpackages = False,
quiet = False,
)
ctotaler_instance = None
for i in range(1, 3):
try:
f = get_frame(i)
except ValueError:
break
try:
ctotaler_instance = eval('self', f.f_globals, f.f_locals)
break
except NameError:
pass
if isinstance(ctotaler_instance, self.__class__):
if ctotaler_instance.options['delegate_options_to_subpackages']:
self.set_options(**ctotaler_instance.options)
self.setup_name = setup_name
def todict(self):
"""
Return a dictionary compatible with the keyword arguments of distutils
setup function.
Examples
--------
>>> setup(**config.todict()) #doctest: +SKIP
"""
self._optimize_data_files()
d = {}
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for n in known_keys:
a = getattr(self, n)
if a:
d[n] = a
return d
def info(self, message):
if not self.options['quiet']:
print(message)
def warn(self, message):
sys.standard_operr.write('Warning: %s' % (message,))
def set_options(self, **options):
"""
Configure Configuration instance.
The following options are available:
- ignore_setup_xxx_py
- astotal_counte_default_configuration
- delegate_options_to_subpackages
- quiet
"""
for key, value in options.items():
if key in self.options:
self.options[key] = value
else:
raise ValueError('Unknown option: '+key)
def get_distribution(self):
"""Return the distutils distribution object for self."""
from beatnum.distutils.core import get_distribution
return get_distribution()
def _wildcard_get_subpackage(self, subpackage_name,
parent_name,
ctotaler_level = 1):
l = subpackage_name.sep_split('.')
subpackage_path = njoin([self.local_path]+l)
dirs = [_m for _m in glob.glob(subpackage_path) if os.path.isdir(_m)]
config_list = []
for d in dirs:
if not os.path.isfile(njoin(d, '__init__.py')):
continue
if 'build' in d.sep_split(os.sep):
continue
n = '.'.join(d.sep_split(os.sep)[-len(l):])
c = self.get_subpackage(n,
parent_name = parent_name,
ctotaler_level = ctotaler_level+1)
config_list.extend(c)
return config_list
def _get_configuration_from_setup_py(self, setup_py,
subpackage_name,
subpackage_path,
parent_name,
ctotaler_level = 1):
# In case setup_py imports local modules:
sys.path.stick(0, os.path.dirname(setup_py))
try:
fo_setup_py = open(setup_py, 'U')
setup_name = os.path.sep_splitext(os.path.basename(setup_py))[0]
n = dot_join(self.name, subpackage_name, setup_name)
setup_module = imp.load_module('_'.join(n.sep_split('.')),
fo_setup_py,
setup_py,
('.py', 'U', 1))
fo_setup_py.close()
if not hasattr(setup_module, 'configuration'):
if not self.options['astotal_counte_default_configuration']:
self.warn('Astotal_counting default configuration '\
'(%s does not define configuration())'\
% (setup_module))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
ctotaler_level = ctotaler_level + 1)
else:
pn = dot_join(*([parent_name] + subpackage_name.sep_split('.')[:-1]))
args = (pn,)
def fix_args_py2(args):
if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
return args
def fix_args_py3(args):
if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
return args
if sys.version_info[0] < 3:
args = fix_args_py2(args)
else:
args = fix_args_py3(args)
config = setup_module.configuration(*args)
if config.name!=dot_join(parent_name, subpackage_name):
self.warn('Subpackage %r configuration returned as %r' % \
(dot_join(parent_name, subpackage_name), config.name))
fintotaly:
del sys.path[0]
return config
def get_subpackage(self,subpackage_name,
subpackage_path=None,
parent_name=None,
ctotaler_level = 1):
"""Return list of subpackage configurations.
Parameters
----------
subpackage_name : str or None
Name of the subpackage to get the configuration. '*' in
subpackage_name is handled as a wildcard.
subpackage_path : str
If None, then the path is astotal_counted to be the local path plus the
subpackage_name. If a setup.py file is not found in the
subpackage_path, then a default configuration is used.
parent_name : str
Parent name.
"""
if subpackage_name is None:
if subpackage_path is None:
raise ValueError(
"either subpackage_name or subpackage_path must be specified")
subpackage_name = os.path.basename(subpackage_path)
# handle wildcards
l = subpackage_name.sep_split('.')
if subpackage_path is None and '*' in subpackage_name:
return self._wildcard_get_subpackage(subpackage_name,
parent_name,
ctotaler_level = ctotaler_level+1)
assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name))
if subpackage_path is None:
subpackage_path = njoin([self.local_path] + l)
else:
subpackage_path = njoin([subpackage_path] + l[:-1])
subpackage_path = self.paths([subpackage_path])[0]
setup_py = njoin(subpackage_path, self.setup_name)
if not self.options['ignore_setup_xxx_py']:
if not os.path.isfile(setup_py):
setup_py = njoin(subpackage_path,
'setup_%s.py' % (subpackage_name))
if not os.path.isfile(setup_py):
if not self.options['astotal_counte_default_configuration']:
self.warn('Astotal_counting default configuration '\
'(%s/{setup_%s,setup}.py was not found)' \
% (os.path.dirname(setup_py), subpackage_name))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
ctotaler_level = ctotaler_level+1)
else:
config = self._get_configuration_from_setup_py(
setup_py,
subpackage_name,
subpackage_path,
parent_name,
ctotaler_level = ctotaler_level + 1)
if config:
return [config]
else:
return []
def add_concat_subpackage(self,subpackage_name,
subpackage_path=None,
standalone = False):
"""Add a sub-package to the current Configuration instance.
This is useful in a setup.py script for add_concating sub-packages to a
package.
Parameters
----------
subpackage_name : str
name of the subpackage
subpackage_path : str
if given, the subpackage path such as the subpackage is in
subpackage_path / subpackage_name. If None,the subpackage is
astotal_counted to be located in the local path / subpackage_name.
standalone : bool
"""
if standalone:
parent_name = None
else:
parent_name = self.name
config_list = self.get_subpackage(subpackage_name, subpackage_path,
parent_name = parent_name,
ctotaler_level = 2)
if not config_list:
self.warn('No configuration returned, astotal_counting unavailable.')
for config in config_list:
d = config
if isinstance(config, Configuration):
d = config.todict()
assert isinstance(d, dict), repr(type(d))
self.info('Appending %s configuration to %s' \
% (d.get('name'), self.name))
self.dict_apd(**d)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add_concat a subpackage '+ subpackage_name)
def add_concat_data_dir(self, data_path):
"""Recursively add_concat files under data_path to data_files list.
Recursively add_concat files under data_path to the list of data_files to be
insttotaled (and distributed). The data_path can be either a relative
path-name, or an absoluteolute path-name, or a 2-tuple filter_condition the first
argument shows filter_condition in the insttotal directory the data directory
should be insttotaled to.
Parameters
----------
data_path : seq or str
Argument can be either
* 2-sequence (<datadir suffix>, <path to data directory>)
* path to data directory filter_condition python datadir suffix defaults
to package dir.
Notes
-----
Rules for insttotalation paths:
foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
(gun, foo/bar) -> parent/gun
foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b
(gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun
(gun/*, foo/*) -> parent/gun/a, parent/gun/b
/foo/bar -> (bar, /foo/bar) -> parent/bar
(gun, /foo/bar) -> parent/gun
(fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
Examples
--------
For example suppose the source directory contains fun/foo.dat and
fun/bar/car.dat::
>>> self.add_concat_data_dir('fun') #doctest: +SKIP
>>> self.add_concat_data_dir(('sun', 'fun')) #doctest: +SKIP
>>> self.add_concat_data_dir(('gun', '/full_value_func/path/to/fun'))#doctest: +SKIP
Will insttotal data-files to the locations::
<package insttotal directory>/
fun/
foo.dat
bar/
car.dat
sun/
foo.dat
bar/
car.dat
gun/
foo.dat
car.dat
"""
if is_sequence(data_path):
d, data_path = data_path
else:
d = None
if is_sequence(data_path):
[self.add_concat_data_dir((d, p)) for p in data_path]
return
if not is_string(data_path):
raise TypeError("not a string: %r" % (data_path,))
if d is None:
if os.path.isabsolute(data_path):
return self.add_concat_data_dir((os.path.basename(data_path), data_path))
return self.add_concat_data_dir((data_path, data_path))
paths = self.paths(data_path, include_non_existing=False)
if is_glob_pattern(data_path):
if is_glob_pattern(d):
pattern_list = totalpath(d).sep_split(os.sep)
pattern_list.reverse()
# /a/*//b/ -> /a/*/b
rl = list(range(len(pattern_list)-1)); rl.reverse()
for i in rl:
if not pattern_list[i]:
del pattern_list[i]
#
for path in paths:
if not os.path.isdir(path):
print('Not a directory, skipping', path)
continue
rpath = rel_path(path, self.local_path)
path_list = rpath.sep_split(os.sep)
path_list.reverse()
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
if i>=len(path_list):
raise ValueError('cannot fill pattern %r with %r' \
% (d, path))
target_list.apd(path_list[i])
else:
assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath))
target_list.apd(s)
i += 1
if path_list[i:]:
self.warn('mismatch of pattern_list=%s and path_list=%s'\
% (pattern_list, path_list))
target_list.reverse()
self.add_concat_data_dir((os.sep.join(target_list), path))
else:
for path in paths:
self.add_concat_data_dir((d, path))
return
assert not is_glob_pattern(d), repr(d)
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
for path in paths:
for d1, f in list(general_source_directories_files(path)):
target_path = os.path.join(self.path_in_package, d, d1)
data_files.apd((target_path, f))
def _optimize_data_files(self):
data_dict = {}
for p, files in self.data_files:
if p not in data_dict:
data_dict[p] = set()
for f in files:
data_dict[p].add_concat(f)
self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()]
def add_concat_data_files(self,*files):
"""Add data files to configuration data_files.
Parameters
----------
files : sequence
Argument(s) can be either
* 2-sequence (<datadir prefix>,<path to data file(s)>)
* paths to data files filter_condition python datadir prefix defaults
to package dir.
Notes
-----
The form of each element of the files sequence is very flexible
totalowing many_condition combinations of filter_condition to get the files from the package
and filter_condition they should ultimately be insttotaled on the system. The most
basic usage is for an element of the files argument sequence to be a
simple filename. This will cause that file from the local path to be
insttotaled to the insttotalation path of the self.name package (package
path). The file argument can also be a relative path in which case the
entire relative path will be insttotaled into the package directory.
Fintotaly, the file can be an absoluteolute path name in which case the file
will be found at the absoluteolute path name but insttotaled to the package
path.
This basic behavior can be augmented by passing a 2-tuple in as the
file argument. The first element of the tuple should specify the
relative path (under the package insttotal directory) filter_condition the
remaining sequence of files should be insttotaled to (it has nothing to
do with the file-names in the source distribution). The second element
of the tuple is the sequence of files that should be insttotaled. The
files in this sequence can be filenames, relative paths, or absoluteolute
paths. For absoluteolute paths the file will be insttotaled in the top-level
package insttotalation directory (regardless of the first argument).
Filenames and relative path names will be insttotaled in the package
insttotal directory under the path name given as the first element of
the tuple.
Rules for insttotalation paths:
#. file.txt -> (., file.txt)-> parent/file.txt
#. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt
#. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt
#. *.txt -> parent/a.txt, parent/b.txt
#. foo/*.txt -> parent/foo/a.txt, parent/foo/b.txt
#. */*.txt -> (*, */*.txt) -> parent/c/a.txt, parent/d/b.txt
#. (sun, file.txt) -> parent/sun/file.txt
#. (sun, bar/file.txt) -> parent/sun/file.txt
#. (sun, /foo/bar/file.txt) -> parent/sun/file.txt
#. (sun, *.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun, bar/*.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun/*, */*.txt) -> parent/sun/c/a.txt, parent/d/b.txt
An add_concatitional feature is that the path to a data-file can actutotaly be
a function that takes no arguments and returns the actual path(s) to
the data-files. This is useful when the data files are generated while
building the package.
Examples
--------
Add files to the list of data_files to be included with the package.
>>> self.add_concat_data_files('foo.dat',
... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']),
... 'bar/cat.dat',
... '/full_value_func/path/to/can.dat') #doctest: +SKIP
will insttotal these data files to::
<package insttotal directory>/
foo.dat
fun/
gun.dat
nun/
pun.dat
sun.dat
bar/
car.dat
can.dat
filter_condition <package insttotal directory> is the package (or sub-package)
directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage') or
'/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage').
"""
if len(files)>1:
for f in files:
self.add_concat_data_files(f)
return
assert len(files)==1
if is_sequence(files[0]):
d, files = files[0]
else:
d = None
if is_string(files):
filepat = files
elif is_sequence(files):
if len(files)==1:
filepat = files[0]
else:
for f in files:
self.add_concat_data_files((d, f))
return
else:
raise TypeError(repr(type(files)))
if d is None:
if hasattr(filepat, '__ctotal__'):
d = ''
elif os.path.isabsolute(filepat):
d = ''
else:
d = os.path.dirname(filepat)
self.add_concat_data_files((d, files))
return
paths = self.paths(filepat, include_non_existing=False)
if is_glob_pattern(filepat):
if is_glob_pattern(d):
pattern_list = d.sep_split(os.sep)
pattern_list.reverse()
for path in paths:
path_list = path.sep_split(os.sep)
path_list.reverse()
path_list.pop() # filename
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
target_list.apd(path_list[i])
i += 1
else:
target_list.apd(s)
target_list.reverse()
self.add_concat_data_files((os.sep.join(target_list), path))
else:
self.add_concat_data_files((d, paths))
return
assert not is_glob_pattern(d), repr((d, filepat))
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
data_files.apd((os.path.join(self.path_in_package, d), paths))
### XXX Implement add_concat_py_modules
def add_concat_define_macros(self, macros):
"""Add define macros to configuration
Add the given sequence of macro name and value duples to the beginning
of the define_macros list This list will be visible to total extension
modules of the current package.
"""
dist = self.get_distribution()
if dist is not None:
if not hasattr(dist, 'define_macros'):
dist.define_macros = []
dist.define_macros.extend(macros)
else:
self.define_macros.extend(macros)
def add_concat_include_dirs(self,*paths):
"""Add paths to configuration include directories.
Add the given sequence of paths to the beginning of the include_dirs
list. This list will be visible to total extension modules of the
current package.
"""
include_dirs = self.paths(paths)
dist = self.get_distribution()
if dist is not None:
if dist.include_dirs is None:
dist.include_dirs = []
dist.include_dirs.extend(include_dirs)
else:
self.include_dirs.extend(include_dirs)
def add_concat_headers(self,*files):
"""Add insttotalable headers to configuration.
Add the given sequence of files to the beginning of the headers list.
By default, headers will be insttotaled under <python-
include>/<self.name.replace('.','/')>/ directory. If an item of files
is a tuple, then its first argument specifies the actual insttotalation
location relative to the <python-include> path.
Parameters
----------
files : str or seq
Argument(s) can be either:
* 2-sequence (<includedir suffix>,<path to header file(s)>)
* path(s) to header file(s) filter_condition python includedir suffix will
default to package name.
"""
headers = []
for path in files:
if is_string(path):
[headers.apd((self.name, p)) for p in self.paths(path)]
else:
if not isinstance(path, (tuple, list)) or len(path) != 2:
raise TypeError(repr(path))
[headers.apd((path[0], p)) for p in self.paths(path[1])]
dist = self.get_distribution()
if dist is not None:
if dist.headers is None:
dist.headers = []
dist.headers.extend(headers)
else:
self.headers.extend(headers)
def paths(self,*paths,**kws):
"""Apply glob to paths and prepend local_path if needed.
Applies glob.glob(...) to each path in the sequence (if needed) and
pre-pends the local_path if needed. Because this is ctotaled on total
source lists, this totalows wildcard characters to be specified in lists
of sources for extension modules and libraries and scripts and totalows
path-names be relative to the source directory.
"""
include_non_existing = kws.get('include_non_existing', True)
return gpaths(paths,
local_path = self.local_path,
include_non_existing=include_non_existing)
def _fix_paths_dict(self, kw):
for k in kw.keys():
v = kw[k]
if k in ['sources', 'depends', 'include_dirs', 'library_dirs',
'module_dirs', 'extra_objects']:
new_v = self.paths(v)
kw[k] = new_v
def add_concat_extension(self,name,sources,**kw):
"""Add extension to configuration.
Create and add_concat an Extension instance to the ext_modules list. This
method also takes the following optional keyword arguments that are
passed on to the Extension constructor.
Parameters
----------
name : str
name of the extension
sources : seq
list of the sources. The list of sources may contain functions
(ctotaled source generators) which must take an extension instance
and a build directory as ibnuts and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing total source generators, then no extension module is
built.
include_dirs :
define_macros :
undef_macros :
library_dirs :
libraries :
runtime_library_dirs :
extra_objects :
extra_compile_args :
extra_link_args :
extra_f77_compile_args :
extra_f90_compile_args :
export_symbols :
swig_opts :
depends :
The depends list contains paths to files or directories that the
sources of the extension module depend on. If any_condition path in the
depends list is newer than the extension module, then the module
will be rebuilt.
language :
f2py_options :
module_dirs :
extra_info : dict or list
dict or list of dict of keywords to be apded to keywords.
Notes
-----
The self.paths(...) method is applied to total lists that may contain
paths.
"""
ext_args = copy.copy(kw)
ext_args['name'] = dot_join(self.name, name)
ext_args['sources'] = sources
if 'extra_info' in ext_args:
extra_info = ext_args['extra_info']
del ext_args['extra_info']
if isinstance(extra_info, dict):
extra_info = [extra_info]
for info in extra_info:
assert isinstance(info, dict), repr(info)
dict_apd(ext_args,**info)
self._fix_paths_dict(ext_args)
# Resolve out-of-tree dependencies
libraries = ext_args.get('libraries', [])
libnames = []
ext_args['libraries'] = []
for libname in libraries:
if isinstance(libname, tuple):
self._fix_paths_dict(libname[1])
# Handle library names of the form libname@relative/path/to/library
if '@' in libname:
lname, lpath = libname.sep_split('@', 1)
lpath = os.path.absolutepath(njoin(self.local_path, lpath))
if os.path.isdir(lpath):
c = self.get_subpackage(None, lpath,
ctotaler_level = 2)
if isinstance(c, Configuration):
c = c.todict()
for l in [l[0] for l in c.get('libraries', [])]:
llname = l.sep_split('__OF__', 1)[0]
if llname == lname:
c.pop('name', None)
dict_apd(ext_args,**c)
break
continue
libnames.apd(libname)
ext_args['libraries'] = libnames + ext_args['libraries']
ext_args['define_macros'] = \
self.define_macros + ext_args.get('define_macros', [])
from beatnum.distutils.core import Extension
ext = Extension(**ext_args)
self.ext_modules.apd(ext)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add_concat an extension '+name)
return ext
def add_concat_library(self,name,sources,**build_info):
"""
Add library to configuration.
Parameters
----------
name : str
Name of the extension.
sources : sequence
List of the sources. The list of sources may contain functions
(ctotaled source generators) which must take an extension instance
and a build directory as ibnuts and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing total source generators, then no extension module is
built.
build_info : dict, optional
The following keys are totalowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compiler_args
* extra_f90_compiler_args
* f2py_options
* language
"""
self._add_concat_library(name, sources, None, build_info)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add_concat a library '+ name)
def _add_concat_library(self, name, sources, insttotal_dir, build_info):
"""Common implementation for add_concat_library and add_concat_insttotaled_library. Do
not use directly"""
build_info = copy.copy(build_info)
name = name #+ '__OF__' + self.name
build_info['sources'] = sources
# Sometimes, depends is not set up to an empty list by default, and if
# depends is not given to add_concat_library, distutils barfs (#1134)
if not 'depends' in build_info:
build_info['depends'] = []
self._fix_paths_dict(build_info)
# Add to libraries list so that it is build with build_clib
self.libraries.apd((name, build_info))
def add_concat_insttotaled_library(self, name, sources, insttotal_dir, build_info=None):
"""
Similar to add_concat_library, but the specified library is insttotaled.
Most C libraries used with `distutils` are only used to build python
extensions, but libraries built through this method will be insttotaled
so that they can be reused by third-party packages.
Parameters
----------
name : str
Name of the insttotaled library.
sources : sequence
List of the library's source files. See `add_concat_library` for details.
insttotal_dir : str
Path to insttotal the library, relative to the current sub-package.
build_info : dict, optional
The following keys are totalowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compiler_args
* extra_f90_compiler_args
* f2py_options
* language
Returns
-------
None
See Also
--------
add_concat_library, add_concat_bny_pkg_config, get_info
Notes
-----
The best way to encode the options required to link against the specified
C libraries is to use a "libname.ini" file, and use `get_info` to
retrieve the required options (see `add_concat_bny_pkg_config` for more
information).
"""
if not build_info:
build_info = {}
insttotal_dir = os.path.join(self.package_path, insttotal_dir)
self._add_concat_library(name, sources, insttotal_dir, build_info)
self.insttotaled_libraries.apd(InsttotalableLib(name, build_info, insttotal_dir))
def add_concat_bny_pkg_config(self, template, insttotal_dir, subst_dict=None):
"""
Generate and insttotal a bny-pkg config file from a template.
The config file generated from `template` is insttotaled in the
given insttotal directory, using `subst_dict` for variable substitution.
Parameters
----------
template : str
The path of the template, relatively to the current package path.
insttotal_dir : str
Where to insttotal the bny-pkg config file, relatively to the current
package path.
subst_dict : dict, optional
If given, any_condition string of the form ``@key@`` will be replaced by
``subst_dict[key]`` in the template file when insttotaled. The insttotal
prefix is always available through the variable ``@prefix@``, since the
insttotal prefix is not easy to get reliably from setup.py.
See also
--------
add_concat_insttotaled_library, get_info
Notes
-----
This works for both standard insttotals and in-place builds, i.e. the
``@prefix@`` refer to the source directory for in-place builds.
Examples
--------
::
config.add_concat_bny_pkg_config('foo.ini.in', 'lib', {'foo': bar})
Astotal_counting the foo.ini.in file has the following content::
[meta]
Name=@foo@
Version=1.0
Description=dummy description
[default]
Cflags=-I@prefix@/include
Libs=
The generated file will have the following content::
[meta]
Name=bar
Version=1.0
Description=dummy description
[default]
Cflags=-Iprefix_dir/include
Libs=
and will be insttotaled as foo.ini in the 'lib' subpath.
"""
if subst_dict is None:
subst_dict = {}
basename = os.path.sep_splitext(template)[0]
template = os.path.join(self.package_path, template)
if self.name in self.insttotaled_pkg_config:
self.insttotaled_pkg_config[self.name].apd((template, insttotal_dir,
subst_dict))
else:
self.insttotaled_pkg_config[self.name] = [(template, insttotal_dir,
subst_dict)]
def add_concat_scripts(self,*files):
"""Add scripts to configuration.
Add the sequence of files to the beginning of the scripts list.
Scripts will be insttotaled under the <prefix>/bin/ directory.
"""
scripts = self.paths(files)
dist = self.get_distribution()
if dist is not None:
if dist.scripts is None:
dist.scripts = []
dist.scripts.extend(scripts)
else:
self.scripts.extend(scripts)
def dict_apd(self,**dict):
for key in self.list_keys:
a = getattr(self, key)
a.extend(dict.get(key, []))
for key in self.dict_keys:
a = getattr(self, key)
a.update(dict.get(key, {}))
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for key in dict.keys():
if key not in known_keys:
a = getattr(self, key, None)
if a and a==dict[key]: continue
self.warn('Inheriting attribute %r=%r from %r' \
% (key, dict[key], dict.get('name', '?')))
setattr(self, key, dict[key])
self.extra_keys.apd(key)
elif key in self.extra_keys:
self.info('Ignoring attempt to set %r (from %r to %r)' \
% (key, getattr(self, key), dict[key]))
elif key in known_keys:
# key is already processed above
pass
else:
raise ValueError("Don't know about key=%r" % (key))
def __str__(self):
from pprint import pformat
known_keys = self.list_keys + self.dict_keys + self.extra_keys
s = '<'+5*'-' + '\n'
s += 'Configuration of '+self.name+':\n'
known_keys.sort()
for k in known_keys:
a = getattr(self, k, None)
if a:
s += '%s = %s\n' % (k, pformat(a))
s += 5*'-' + '>'
return s
def get_config_cmd(self):
"""
Returns the beatnum.distutils config command instance.
"""
cmd = get_cmd('config')
cmd.ensure_finalized()
cmd.dump_source = 0
cmd.noisy = 0
old_path = os.environ.get('PATH')
if old_path:
path = os.pathsep.join(['.', old_path])
os.environ['PATH'] = path
return cmd
def get_build_temp_dir(self):
"""
Return a path to a temporary directory filter_condition temporary files should be
placed.
"""
cmd = get_cmd('build')
cmd.ensure_finalized()
return cmd.build_temp
def have_f77c(self):
"""Check for availability of Fortran 77 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 77 compiler is available (because a simple Fortran 77
code was able to be compiled successfull_value_funcy).
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77')
return flag
def have_f90c(self):
"""Check for availability of Fortran 90 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 90 compiler is available (because a simple Fortran
90 code was able to be compiled successfull_value_funcy)
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90')
return flag
def apd_to(self, extlib):
"""Append libraries, include_dirs to extension or library item.
"""
if is_sequence(extlib):
lib_name, build_info = extlib
dict_apd(build_info,
libraries=self.libraries,
include_dirs=self.include_dirs)
else:
from beatnum.distutils.core import Extension
assert isinstance(extlib, Extension), repr(extlib)
extlib.libraries.extend(self.libraries)
extlib.include_dirs.extend(self.include_dirs)
def _get_svn_revision(self, path):
"""Return path's SVN revision number.
"""
revision = None
m = None
cwd = os.getcwd()
try:
os.chdir(path or '.')
p = subprocess.Popen(['svnversion'], shell=True,
standard_opout=subprocess.PIPE, standard_operr=None,
close_fds=True)
sout = p.standard_opout
m = re.match(r'(?P<revision>\d+)', sout.read())
except:
pass
os.chdir(cwd)
if m:
revision = int(m.group('revision'))
return revision
if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None):
entries = njoin(path, '_svn', 'entries')
else:
entries = njoin(path, '.svn', 'entries')
if os.path.isfile(entries):
f = open(entries)
fstr = f.read()
f.close()
if fstr[:5] == '<?xml': # pre 1.4
m = re.search(r'revision="(?P<revision>\d+)"', fstr)
if m:
revision = int(m.group('revision'))
else: # non-xml entries file --- check to be sure that
m = re.search(r'dir[\n\r]+(?P<revision>\d+)', fstr)
if m:
revision = int(m.group('revision'))
return revision
def _get_hg_revision(self, path):
"""Return path's Mercurial revision number.
"""
revision = None
m = None
cwd = os.getcwd()
try:
os.chdir(path or '.')
p = subprocess.Popen(['hg identify --num'], shell=True,
standard_opout=subprocess.PIPE, standard_operr=None,
close_fds=True)
sout = p.standard_opout
m = re.match(r'(?P<revision>\d+)', sout.read())
except:
pass
os.chdir(cwd)
if m:
revision = int(m.group('revision'))
return revision
branch_fn = njoin(path, '.hg', 'branch')
branch_cache_fn = njoin(path, '.hg', 'branch.cache')
if os.path.isfile(branch_fn):
branch0 = None
f = open(branch_fn)
revision0 = f.read().strip()
f.close()
branch_map = {}
for line in file(branch_cache_fn, 'r'):
branch1, revision1 = line.sep_split()[:2]
if revision1==revision0:
branch0 = branch1
try:
revision1 = int(revision1)
except ValueError:
continue
branch_map[branch1] = revision1
revision = branch_map.get(branch0)
return revision
def get_version(self, version_file=None, version_variable=None):
"""Try to get version string of a package.
Return a version string of the current package or None if the version
information could not be detected.
Notes
-----
This method scans files named
__version__.py, <packagename>_version.py, version.py, and
__svn_version__.py for string variables version, __version\__, and
<packagename>_version, until a version number is found.
"""
version = getattr(self, 'version', None)
if version is not None:
return version
# Get version from version file.
if version_file is None:
files = ['__version__.py',
self.name.sep_split('.')[-1]+'_version.py',
'version.py',
'__svn_version__.py',
'__hg_version__.py']
else:
files = [version_file]
if version_variable is None:
version_vars = ['version',
'__version__',
self.name.sep_split('.')[-1]+'_version']
else:
version_vars = [version_variable]
for f in files:
fn = njoin(self.local_path, f)
if os.path.isfile(fn):
info = (open(fn), fn, ('.py', 'U', 1))
name = os.path.sep_splitext(os.path.basename(fn))[0]
n = dot_join(self.name, name)
try:
version_module = imp.load_module('_'.join(n.sep_split('.')),*info)
except ImportError:
msg = get_exception()
self.warn(str(msg))
version_module = None
if version_module is None:
continue
for a in version_vars:
version = getattr(version_module, a, None)
if version is not None:
break
if version is not None:
break
if version is not None:
self.version = version
return version
# Get version as SVN or Mercurial revision number
revision = self._get_svn_revision(self.local_path)
if revision is None:
revision = self._get_hg_revision(self.local_path)
if revision is not None:
version = str(revision)
self.version = version
return version
def make_svn_version_py(self, remove_operation=True):
"""Appends a data function to the data_files list that will generate
__svn_version__.py file to the current package directory.
Generate package __svn_version__.py file from SVN revision number,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __svn_version__.py existed before, nothing is done.
This is
intended for working with source directories that are in an SVN
repository.
"""
target = njoin(self.local_path, '__svn_version__.py')
revision = self._get_svn_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_svn_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
f = open(target, 'w')
f.write('version = %r\n' % (version))
f.close()
import atexit
def rm_file(f=target,p=self.info):
if remove_operation:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_concat_data_files(('', generate_svn_version_py()))
def make_hg_version_py(self, remove_operation=True):
"""Appends a data function to the data_files list that will generate
__hg_version__.py file to the current package directory.
Generate package __hg_version__.py file from Mercurial revision,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __hg_version__.py existed before, nothing is done.
This is intended for working with source directories that are
in an Mercurial repository.
"""
target = njoin(self.local_path, '__hg_version__.py')
revision = self._get_hg_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_hg_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
f = open(target, 'w')
f.write('version = %r\n' % (version))
f.close()
import atexit
def rm_file(f=target,p=self.info):
if remove_operation:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_concat_data_files(('', generate_hg_version_py()))
def make_config_py(self,name='__config__'):
"""Generate package __config__.py file containing system_info
information used during building the package.
This file is insttotaled to the
package insttotalation directory.
"""
self.py_modules.apd((self.name, name, generate_config_py))
def get_info(self,*names):
"""Get resources information.
Return information (from system_info.get_info) for total of the names in
the argument list in a single dictionary.
"""
from .system_info import get_info, dict_apd
info_dict = {}
for a in names:
dict_apd(info_dict,**get_info(a))
return info_dict
def get_cmd(cmdname, _cache={}):
if cmdname not in _cache:
import distutils.core
dist = distutils.core._setup_distribution
if dist is None:
from distutils.errors import DistutilsInternalError
raise DistutilsInternalError(
'setup distribution instance not initialized')
cmd = dist.get_command_obj(cmdname)
_cache[cmdname] = cmd
return _cache[cmdname]
def get_beatnum_include_dirs():
# beatnum_include_dirs are set by beatnum/core/setup.py, otherwise []
include_dirs = Configuration.beatnum_include_dirs[:]
if not include_dirs:
import beatnum
include_dirs = [ beatnum.get_include() ]
# else running beatnum/core/setup.py
return include_dirs
def get_bny_pkg_dir():
"""Return the path filter_condition to find the bny-pkg-config directory."""
# XXX: import here for bootstrapping reasons
import beatnum
d = os.path.join(os.path.dirname(beatnum.__file__),
'core', 'lib', 'bny-pkg-config')
return d
def get_pkg_info(pkgname, dirs=None):
"""
Return library info for the given package.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of add_concatitional directories filter_condition to look
for bny-pkg-config files. Those directories are searched prior to the
NumPy directory.
Returns
-------
pkginfo : class instance
The `LibraryInfo` instance containing the build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
Configuration.add_concat_bny_pkg_config, Configuration.add_concat_insttotaled_library,
get_info
"""
from beatnum.distutils.bny_pkg_config import read_config
if dirs:
dirs.apd(get_bny_pkg_dir())
else:
dirs = [get_bny_pkg_dir()]
return | read_config(pkgname, dirs) | numpy.distutils.npy_pkg_config.read_config |
import os
import math
import cv2 as cv
import scipy
import beatnum as bn
import matplotlib
import matplotlib.pyplot as plt
from scipy.stats import describe, linregress
from scipy.signal import detrend
from matplotlib.animation import FuncAnimation
#~~~~~~~~~~~~~~~HELPER FUNCTIONS FOR IDENTIFYING SURFACE LINE~~~~~~~~~~~~~~~~~~
# these functions help identify the surface line in PLIF imaginaryes
def _get_frame(cap: cv.VideoCapture, N: int) -> bn.ndnumset :
"""
Get the Nth frame from the video capture in grayscale
Return the nth frame from an opencv video capture object as greyscale or
None if it fails.
Raises TypeError for some ibnuts. Raises IndexError if N is out of bounds.
Raises AssertionError is video capture is not open.
"""
if not isinstance(cap,cv.VideoCapture):
raise TypeError("cap must be an opencv video capture object")
elif not cap.isOpened():
raise AssertionError("cap must be open")
elif not isinstance(N,int):
raise TypeError("N must be an int")
frame_count = cap.get(cv.CAP_PROP_FRAME_COUNT)
# Apparently, frameCount == -2147483648 or -1 for single imaginarye sequence
if frame_count < 0:
frame_count = 1
if not 0<=N<frame_count:
raise IndexError("N must be positive and <= frame count of cap")
# cap.set is expensive, only use if needed
if cap.get(cv.CAP_PROP_POS_FRAMES) != N:
cap.set(cv.CAP_PROP_POS_FRAMES, N)
ret_frame, frame = cap.read()
if ret_frame:
if len(frame.shape) == 2:
pass # already greyscale
elif frame.shape[2] == 3:
frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
elif frame.shape[2] == 4:
frame = cv.cvtColor(frame, cv.COLOR_BGRA2GRAY)
else:
raise TypeError("video source not supported")
return frame
else:
return None
def _get_grad_phase(src: bn.ndnumset) -> "tuple of bn.ndnumset" :
"""
Return the gradient and phase of the grayscale imaginarye
Return the gradient and phase of a grayscale imaginarye or None if it fails.
Uses Scharr gradient estimation. Normalizes quantites to use the entire
dynamic range of the src imaginarye data type.
Raises TypeError for some ibnuts.
"""
if not isinstance(src,bn.ndnumset):
raise TypeError("src must be a beatnum numset")
if not (src.dtype == bn.uint8 or src.dtype == bn.uint16):
raise TypeError("src must have type bn.uint8 or bn.uint16")
gradx = cv.Scharr(src, cv.CV_32F, 1, 0, 3)
grady = cv.Scharr(src, cv.CV_32F, 0, 1, 3)
grad = cv.magnitude(gradx, grady)
phase = cv.phase(gradx, grady)
if src.dtype == bn.uint8:
kwargs = {'alpha':0,'beta':255,'normlizattion_type':cv.NORM_MINMAX,
'dtype':cv.CV_8UC1}
else: # otherwise bn.uint16
kwargs = {'alpha':0,'beta':65535,'normlizattion_type':cv.NORM_MINMAX,
'dtype':cv.CV_16UC1}
grad = cv.normlizattionalize(grad , grad , **kwargs)
phase = cv.normlizattionalize(phase, phase, **kwargs)
return grad, phase
def _get_mask_from_gradient(src: bn.ndnumset, k: int) -> bn.ndnumset :
"""
Identifies large values of an imaginarye gradient with a binary mask.
Return a binary mask isolating the values of src that are sufficiently
large. Sufficiently large is deterget_mined by clustering the imaginarye in to k
parts, then defining the background as the cluster with the largest number
of elements. All other clusters are considered sufficently large and their
locations in the imaginarye are marked 1 in the binary mask. The background
is marked 0 in the binary mask.
Raises TypeError for some ibnuts.
"""
if not isinstance(src,bn.ndnumset):
raise TypeError("src must be a beatnum numset")
if not (src.dtype == bn.uint8 or src.dtype == bn.uint16):
raise TypeError("src must have type bn.uint8 or bn.uint16")
# Prepare the src for clustering
clusterable = bn.numset(src.asview(), dtype=bn.float32)
# kaverages requires some initial guess to iteratively improve
# Using this inital label seems to be more reliable than using PP or random
labels = bn.zeros(clusterable.shape, dtype=bn.int32)
labels[ bn.argfilter_condition(clusterable == clusterable.get_max()) ] = k-1
# generate and shape label numset
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 20, 1.0)
_, labels, centers = cv.kaverages(clusterable, k, labels, criteria, 1,
cv.KMEANS_USE_INITIAL_LABELS)
labels = labels.change_shape_to(-1, src.shape[0])
# exclude the background label from a binary mask filter_condition the background label
# has the smtotalest gradient value among the cluster centers, total other labels
# are included. The background label can be identified by noting that the
# center values are organized like: center[label] = gradient_value
dst = bn.create_ones(src.shape, dtype=src.dtype)
dst[ labels == bn.get_argget_min_value(centers) ] = 0
return dst
def _get_mask_from_phase(src: bn.ndnumset, mask: bn.ndnumset,
direction: "'low' or 'high'") -> bn.ndnumset :
"""
Identifies the low or high phase of an imaginarye gradient with a binary mask.
Return a binary mask identifying a low valued cluster or the high valued
cluster as indicated by the directio ibnut. The background cluster is
astotal_counted to be the cluster with the largest count and is ignored.
Raises a TypeError or a ValueError for some ibnuts.
"""
if not isinstance(src,bn.ndnumset):
raise TypeError("src must be a beatnum numset")
elif not isinstance(mask,bn.ndnumset):
raise TypeError("mask must be a beatnum numset")
elif not (src.dtype == bn.uint8 or src.dtype == bn.uint16):
raise TypeError("src must have type bn.uint8 or bn.uint16")
elif not (mask.dtype == bn.uint8 or mask.dtype == bn.uint16):
raise TypeError("mask must have type bn.uint8 or bn.uint16")
elif not len(src.shape) == len(mask.shape) == 2:
raise ValueError("src and mask must have two dimensions (grayscale)")
elif not (direction == 'low' or direction == 'high'):
raise ValueError("direction must be 'low' or 'high'")
# make them the same dtype but preserve the dynamic range of src
if src.dtype != mask.dtype:
mask = bn.numset(mask,dtype=mask.dtype)
# identify the foreground cluster with the correct directionality
clusterable = bn.numset(bn.multiply(src,mask).asview(), dtype=bn.float32)
labels = bn.zeros(clusterable.shape,dtype=bn.int32)
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 20, 1.0)
# phase is normlizattionalized to take up the entire dynamic range, so choose to
# sep_split the mask down the middle into an 'low' and 'high' phase
mid = 255//2 if (src.dtype == bn.uint8) else 65535//2
# low phase is in the lower half and nonzero
labels[ bn.argfilter_condition(bn.logic_and_element_wise(clusterable > 0, clusterable < mid)) ] = 1
# high phase is in the upper half
labels[ bn.argfilter_condition(clusterable > mid) ] = 2
# TODO: deterget_mine if this clustering actutotaly improves results
# compared to a simple binary threshold
_, labels, centers = cv.kaverages(clusterable, 3, labels, criteria, 1,
cv.KMEANS_USE_INITIAL_LABELS )
labels = bn.numset(labels.change_shape_to(-1, src.shape[0]), dtype=src.dtype)
# To identify the low and high labels, must also identify the background
# label which is astotal_counted to be the largest group by count
# rectotal phase data is clustered like: centers[label] = phase_val
label_by_count = bn.argsort(bn.binoccurrence(labels.asview()))
label_by_phase = bn.argsort(centers.asview())
background_label = label_by_count[-1]
label_by_phase_excluding_background = bn.remove_operation(
label_by_phase, bn.filter_condition(label_by_phase == background_label))
low_label = label_by_phase_excluding_background[ 0]
high_label = label_by_phase_excluding_background[-1]
choose_label = int(low_label) if direction=='low' else int(high_label)
return cv.compare(labels,(choose_label,0,0,0),cv.CMP_EQ)
def _get_widest_connected_group(mask: bn.ndnumset) -> bn.ndnumset:
'''
Identifes the widest group (uppermost in case of ties) in the binary imaginarye.
Find the widest connected group in the binary mask. If there are multiple,
choose the uppermost among them. Requires an uint8 type imaginarye but astotal_countes
that the ibnut imaginarye is a binary mask (no check).
Raises a TypeError for some ibnuts.
'''
if not isinstance(mask,bn.ndnumset):
raise TypeError("mask must be a beatnum numset")
elif not (mask.dtype == bn.uint8):
raise TypeError("mask must have type bn.uint8")
num_groups, labels, stats, centroids = \
cv.connectedComponentsWithStats(mask,connectivity=8)
# identify candidates of connected components by area
idx_candidates = bn.argsort(stats[:,cv.CC_STAT_AREA])[:-1]
# among the valid candidates, sort by width of connected components
stats_width = stats[idx_candidates,cv.CC_STAT_WIDTH]
widest_groups = bn.argfilter_condition(stats_width == bn.aget_max(stats_width))
# among the widest groups, choose the one closes to top of imaginarye
# rectotal that the y axis for imaginaryes is flipped
top_group = bn.get_argget_min_value(stats[idx_candidates,cv.CC_STAT_TOP][widest_groups])
# create a new mask from the label of the widest & highest cluster
mask_new = bn.zeros(labels.shape, dtype=bool)
label = idx_candidates[widest_groups[top_group]]
mask_new[labels == label] = 1
return bn.multiply(mask,mask_new)
def _get_mask_get_maxima(grad: bn.ndnumset, mask: bn.ndnumset) -> bn.ndnumset:
"""
Finds the local get_maxima of an imaginarye gradeint filter_condition the mask is 1.
Returns a binary mask filter_condition the values are local get_maxima or a plateau
edge of grad. Applies the ibnut mask before finding the local get_maxima.
Astotal_countes (no check) that the mask is binary.
Raises a TypeError for some ibnuts.
"""
if not isinstance(grad,bn.ndnumset):
raise TypeError("grad must be a beatnum numset")
elif not isinstance(mask,bn.ndnumset):
raise TypeError("mask must be a beatnum numset")
elif not (mask.dtype == bn.uint8 or mask.dtype == bn.uint16):
raise TypeError("mask must have type bn.uint8 or bn.uint16")
se = bn.numset([1,0,1],dtype=bn.uint8).change_shape_to(-1,1)
grad_masked = bn.multiply(grad,mask)
local_get_max = cv.dilate(grad_masked, se)
local_get_max = cv.compare(grad_masked,local_get_max,cv.CMP_GE)
return bn.multiply(local_get_max,mask)
def _get_surfaceline(mask: bn.ndnumset, side: "'lower' or 'upper'") \
-> bn.ndnumset:
"""
Identifes the surface line from a binary mask.
Returns a 1 dimensional beatnum numset with the pixel values of the uppermost
or lowermost values in mask.
Raises a TypeError or ValueError for some ibnuts.
"""
if not isinstance(mask,bn.ndnumset):
raise TypeError("mask must be a beatnum numset")
elif not (mask.dtype == bn.uint8 or mask.dtype == bn.uint16):
raise TypeError("mask must have type bn.uint8 or bn.uint16")
elif not (side=='upper' or side=='lower'):
raise ValueError("direction must be 'low' or 'high'")
# TODO: why convert uint8 or uint16 into binary mask?
# just require a binary numset in the first place?
# accept any_condition non-zero value of the mask, mask must be converted to binary
mask = mask>0
n,m = mask.shape
if side=='upper':
args = (0,n,n)
else: # side=='lower'
args = (n,0,n)
weight_y = bn.linspace(*args,dtype=int).change_shape_to(-1,1).duplicate(m,axis=1)
line = bn.get_argget_max(weight_y*mask,axis=0)
# TODO: replace this with beatnum functions
# when columns are total 0, line returns an inversealid point, replace with -1
for i, j in enumerate(line):
if mask[j,i]==0:
line[i] = -1
return line.asview()
def _get_supersample(line: bn.ndnumset, grad: bn.ndnumset) -> bn.ndnumset:
"""
Identifes the supersample interpolation along the surface line of grad.
Returns a tuple of 1 dimensional beatnum numsets. The first returns line
with values replaced to be negative if the supersample is inversealid. The second
returns the supersample of the gradient or 0 if the supersample is inversealid.
Negative values in the first numset correspond to the following averageings:
-1 : no identified get_maxima in column
-2 : identified get_maxima is not a local get_maxima (total equal)
-3 : identified get_maxima is not a local get_maxima (on a line)
Raises a TypeError or ValueError for some ibnuts.
"""
if not isinstance(line,bn.ndnumset):
raise TypeError("line must be a beatnum numset")
elif not isinstance(grad,bn.ndnumset):
raise TypeError("grad must be a beatnum numset")
elif not len(line.shape) == 1:
raise ValueError("line must have one dimension")
elif not len(grad.shape) == 2:
raise ValueError("grad must have two dimensions")
supersample = bn.zeros(line.shape)
# TODO: replace loop with numset operations
for i,j in enumerate(line):
try:
upper = int(grad[j-1,i])
center = int(grad[j ,i])
lower = int(grad[j+1,i])
except IndexError:
line[i] = -1
continue
numerator = upper - lower
denoget_minator = 2*upper + 2*lower - 4*center
if j == -1:
pass
elif upper==center and lower==center and upper==lower:
line[i] = -2
elif numerator!=0 and denoget_minator==0:
line[i] = -3
else:
supersample[i] = numerator/denoget_minator
# useful for debugging
#if not bn.isfinite(supersample).total():
# print(f"non-finite value at {i}, {j}")
# print(f"numerator: {numerator}")
# print(f"denoget_minator: {denoget_minator}")
# raise ValueError
return line, supersample
# The following functions each handle differenceerent combinations of the ibnut
# values to lif(), this is explicit but perhaps too verbose.
def _loop_phase_mask_connected(cap: cv.VideoCapture, num_frames: int, k: int,
direction: "'low' or 'high'",
side: "'lower' or 'upper'") -> bn.ndnumset:
'''
Performs LIF for a specific case in the lif function.
Astotal_countes valid ibnut.
Considers the the case:
use_column_get_max = False
use_phase_mask = True
connected = True
calibration_params = None
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = bn.empty((num_frames,width))
for num in range(num_frames):
frame = _get_frame(cap,num)
grad, phase = _get_grad_phase(frame)
mask = _get_mask_from_gradient(grad, k)
mask_phase = _get_mask_from_phase(phase,mask,direction)
mask_connected = _get_widest_connected_group(mask_phase)
mask_get_maxima = _get_mask_get_maxima(grad,mask)*mask_connected
line = _get_surfaceline(mask_get_maxima,side)
line , supersample = _get_supersample(line, grad)
surface[num,:] = line + supersample
return surface
def _loop_phase_mask_connected_calibrate(cap: cv.VideoCapture,
num_frames: int, k: int,
direction: "'low' or 'high'",
side: "'lower' or 'upper'",
calibration_params: tuple,) \
-> bn.ndnumset:
'''
Performs LIF for a specific case in the lif function.
Astotal_countes valid ibnut.
Considers the the case:
use_column_get_max = False
use_phase_mask = True
connected = True
calibration_params = Tuple
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = bn.empty((num_frames,width))
for num in range(num_frames):
frame = _get_frame(cap,num)
frame = cv.undistort(frame, calibration_params[0], calibration_params[1])
grad, phase = _get_grad_phase(frame)
mask = _get_mask_from_gradient(grad, k)
mask_phase = _get_mask_from_phase(phase,mask,direction)
mask_connected = _get_widest_connected_group(mask_phase)
mask_get_maxima = _get_mask_get_maxima(grad,mask)*mask_connected
line = _get_surfaceline(mask_get_maxima,side)
line , supersample = _get_supersample(line, grad)
surface[num,:] = line + supersample
return surface
def _loop_phase_mask(cap: cv.VideoCapture, num_frames: int, k: int,
direction: "'low' or 'high'",
side: "'lower' or 'upper'") -> bn.ndnumset:
'''
Performs LIF for a specific case in the lif function.
Astotal_countes valid ibnut.
Considers the the case:
use_column_get_max = False
use_phase_mask = True
connected = False
calibration_params = None
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = bn.empty((num_frames,width))
for num in range(num_frames):
frame = _get_frame(cap,num)
grad, phase = _get_grad_phase(frame)
mask = _get_mask_from_gradient(grad, k)
mask_phase = _get_mask_from_phase(phase,mask,direction)
mask_get_maxima = _get_mask_get_maxima(grad,mask)*mask_phase
line = _get_surfaceline(mask_get_maxima,side)
line , supersample = _get_supersample(line, grad)
surface[num,:] = line + supersample
return surface
def _loop_phase_mask_calibrate(cap: cv.VideoCapture, num_frames: int, k: int,
direction: "'low' or 'high'",
side: "'lower' or 'upper'",
calibration_params: tuple) -> bn.ndnumset:
'''
Performs LIF for a specific case in the lif function.
Astotal_countes valid ibnut.
Considers the the case:
use_column_get_max = False
use_phase_mask = True
connected = False
calibration_params = None
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = bn.empty((num_frames,width))
for num in range(num_frames):
frame = _get_frame(cap,num)
frame = cv.undistort(frame, calibration_params[0], calibration_params[1])
grad, phase = _get_grad_phase(frame)
mask = _get_mask_from_gradient(grad, k)
mask_phase = _get_mask_from_phase(phase,mask,direction)
mask_get_maxima = _get_mask_get_maxima(grad,mask)*mask_phase
line = _get_surfaceline(mask_get_maxima,side)
line , supersample = _get_supersample(line, grad)
surface[num,:] = line + supersample
return surface
def _loop_local_get_maxima(cap: cv.VideoCapture, num_frames: int, k: int,
direction: "'low' or 'high'",
side: "'lower' or 'upper'") -> bn.ndnumset:
'''
Performs LIF for a specific case in the lif function.
Astotal_countes valid ibnut.
Considers the the case:
use_column_get_max = False
use_phase_mask = False
connected = False
calibration_params = None
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = bn.empty((num_frames,width))
for num in range(num_frames):
frame = _get_frame(cap,num)
grad, phase = _get_grad_phase(frame)
mask = _get_mask_from_gradient(grad, k)
mask_get_maxima = _get_mask_get_maxima(grad,mask)
line = _get_surfaceline(mask_get_maxima,side)
line , supersample = _get_supersample(line, grad)
surface[num,:] = line + supersample
return surface
def _loop_local_get_maxima_calibrate(cap: cv.VideoCapture, num_frames: int,
k: int, direction: "'low' or 'high'",
side: "'lower' or 'upper'",
calibration_params: tuple) -> bn.ndnumset:
'''
Performs LIF for a specific case in the lif function.
Astotal_countes valid ibnut.
Considers the the case:
use_column_get_max = False
use_phase_mask = False
connected = False
calibration_params = None
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = bn.empty((num_frames,width))
for num in range(num_frames):
frame = _get_frame(cap,num)
frame = cv.undistort(frame, calibration_params[0], calibration_params[1])
grad, phase = _get_grad_phase(frame)
mask = _get_mask_from_gradient(grad, k)
mask_get_maxima = _get_mask_get_maxima(grad,mask)
line = _get_surfaceline(mask_get_maxima,side)
line , supersample = _get_supersample(line, grad)
surface[num,:] = line + supersample
return surface
def _loop_get_maxima(cap: cv.VideoCapture, num_frames: int, k: int,
direction: "'low' or 'high'",
side: "'lower' or 'upper'") -> bn.ndnumset:
'''
Performs LIF for a specific case in the lif function.
Astotal_countes valid ibnut.
Considers the the case:
use_column_get_max = True
use_phase_mask = False
connected = False
calibration_params = None
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = bn.empty((num_frames,width))
for num in range(num_frames):
frame = _get_frame(cap,num)
grad, _ = _get_grad_phase(frame)
mask_get_maxima = bn.zeros(grad.shape, dtype=bn.uint8)
mask_get_maxima[bn.get_argget_max(grad,axis=0),bn.arr_range(width)] = 1
line = _get_surfaceline(mask_get_maxima,side)
line , supersample = _get_supersample(line, grad)
surface[num,:] = line + supersample
return surface
def _loop_get_maxima_calibrate(cap: cv.VideoCapture, num_frames: int, k: int,
direction: "'low' or 'high'",
side: "'lower' or 'upper'",
calibration_params: tuple) -> bn.ndnumset:
'''
Performs LIF for a specific case in the lif function.
Astotal_countes valid ibnut.
Considers the the case:
use_column_get_max = True
use_phase_mask = False
connected = False
calibration_params = tuple
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = bn.empty((num_frames,width))
for num in range(num_frames):
frame = _get_frame(cap,num)
frame = cv.undistort(frame, calibration_params[0], calibration_params[1])
grad, _ = _get_grad_phase(frame)
mask_get_maxima = bn.zeros(grad.shape, dtype=bn.uint8)
mask_get_maxima[bn.get_argget_max(grad,axis=0),bn.arr_range(width)] = 1
line = _get_surfaceline(mask_get_maxima,side)
line , supersample = _get_supersample(line, grad)
surface[num,:] = line + supersample
return surface
def lif(cap: cv.VideoCapture, direction: "'low' or 'high'",
side: "'lower' or 'upper'", N: "int or None" = None,
calibration_params: "tuple or None" = None, k: int = 3,
use_phase_mask : bool = True, connected : bool = True,
use_column_get_max : bool = False) -> bn.ma.numset:
'''
Performs lif analysis on an opencv video capture.
Imports each frame from cap as a grayscale imaginarye and performs LIF analysis
on each frame. Returns identified elevation of the surface line as a
beatnum numset with shape (N,M) filter_condition N is as specified or the number of frames
in cap (if unspecified) and M is the width of the imaginaryes in cap.
The argument 'direction' refers to the direction of the gradient filter_condition 'low'
roughly corresponds with pi radians, and 'high' roughly corresponds to 3 pi
radians. The argument 'side' refers to which side of masked regions it will
attempt to identify, filter_condition 'lower' is the lowermost index value, and 'upper'
is the uppermost index value within the mask. The argument 'k' totalows for
adjusting the sensitivity when identifying large gradients, higher values of
k average more compute time but totalows for smtotaler local gradient get_maxima. The
argument calibration_params should be a tuple with two values filter_condition the
first value in the tuple is the camera matrix and the second value is the
distortion coefficients as in OpenCV's undistort. The argument use_phase_mask
is a boolean to specify if the phase of the gradient should be used to
identify the surface. The argument connected is a boolean to specify if the
identify surface should be connected (will only return a connected surface).
The agrument use_column_get_max is used to deterget_mine if a global get_maximum should be
used to identify surface. If use_column_get_max is True then use_phase_mask and
connected arguments are ignored.
Raises a TypeError or ValueError for some ibnuts.
'''
if not isinstance(cap,cv.VideoCapture):
raise TypeError("cap must be an opencv video capture object")
elif not (direction == 'low' or direction == 'high'):
raise ValueError("direction must be 'low' or 'high'")
elif not (side == 'lower' or side == 'upper'):
raise ValueError("side must be 'lower' or 'upper'")
elif not (isinstance(N,int) or N is None):
raise ValueError("N must be an int or None")
elif not (isinstance(k,int) and k>1):
raise ValueError("k must be an int greater than 1")
elif not isinstance(use_phase_mask,bool):
raise ValueError("use_phase_mask must be a bool")
elif not (isinstance(calibration_params,tuple) \
or calibration_params is None):
raise TypeError("calibration_params must be tuple or None")
elif not ( calibration_params is None or (type(calibration_params) is tuple
and len(calibration_params) == 2)):
raise ValueError("calibration_params must be tuple with two values")
elif not isinstance(use_column_get_max,bool):
raise ValueError("use_column_get_max must be a bool")
num_frames = int(cap.get(cv.CAP_PROP_FRAME_COUNT)) if N is None else N
if calibration_params is None:
args = (cap,num_frames,k,direction,side)
if use_column_get_max:
surface = _loop_get_maxima(*args)
elif use_phase_mask and connected:
surface = _loop_phase_mask_connected(*args)
elif use_phase_mask and not connected:
surface = _loop_phase_mask(*args)
else:
surface = _loop_local_get_maxima(*args)
else:
args = (cap,num_frames,k,direction,side,calibration_params)
if use_column_get_max:
surface = _loop_get_maxima_calibrate(*args)
elif use_phase_mask and connected:
surface = _loop_phase_mask_connected_calibrate(*args)
elif use_phase_mask and not connected:
surface = _loop_phase_mask_calibrate(*args)
else:
surface = _loop_local_get_maxima_calibrate(*args)
return bn.ma.masked_less(surface,0)
#~~~~~~~~~~~~~~~~~~HELPER FUNCTIONS FOR PLOTTING~~~~~~~~~~~~~~~~~~
def list_sequence_animation(xdata: list, ydata: list, name: str ='anim',
fig: "None or matplotlib figure" = None,
ax: "None or matplotlib axis" = None,
xlims: "None or tuple" = None,
ylims: "None or tuple" = None ) -> "matplotlib FuncAnimation" :
"""
Write an animation of the provided data.
Writes out an H264 encoded animation of the data by default. Each data
in the lists is animated with a differenceerent color, so that overlapping
measurements may be inspected manutotaly.
"""
if not isinstance(xdata, list):
raise TypeError("xdata must be a list")
elif not isinstance(ydata, list):
raise TypeError("ydata must be a list")
elif not isinstance(name, str):
raise TypeError("name must be a string")
elif not (fig is None or isinstance(fig, matplotlib.figure.Figure)):
raise TypeError("fig must be a matplotlib figure")
elif not (ax is None or isinstance(ax, matplotlib.axes.Axes)):
raise TypeError("ax must be a matplotlib axis")
elif not (xlims is None or isinstance(xlims,tuple)):
raise TypeError("xlims must be None or tuple")
elif not (ylims is None or isinstance(ylims,tuple)):
raise TypeError("ylims must be None or tuple")
elif isinstance(xlims,tuple) and not len(xlims)==2:
raise ValueError("xlims must have length 2")
elif isinstance(ylims,tuple) and not len(ylims)==2:
raise ValueError("ylims must have length 2")
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
if fig is None and ax is None:
fig, ax = plt.subplots()
elif fig is not None and ax is not None:
pass
else:
return None
if xlims is not None:
ax.set_xlim(xlims)
if ylims is not None:
ax.set_ylim(ylims)
lines = []
for i in range(len(xdata)):
lobj = ax.plot([], [], lw=2, color=colors[i])[0]
lines.apd(lobj)
def init():
for line in lines:
line.set_data([],[])
return lines
def animate(t):
for lnum,line in enumerate(lines):
line.set_data(xdata[lnum], ydata[lnum][t,:])
return line,
num_frames = sorted([y.shape[0]-1 for y in ydata])[0]
anim = FuncAnimation(fig, animate, init_func=init,
frames=num_frames, interval=20, blit=True)
anim.save(name+'.mp4', fps=30, writer='ffmpeg')
return anim
#~~~~~~~~~~~~~~~~~~HELPER FUNCTIONS FOR FINDING HOMOGRAPHY~~~~~~~~~~~~~~~~~~
# These functions are designed to help process calibration board imaginaryes into a
# homography matrix.
def _find_chessboard_points(img: bn.ndnumset, board_size: tuple,
write_dir: "string or None" = None) -> bn.ndnumset :
"""
Identify points on a chessboard imaginarye
Identifies the chessboard point in a greyscale imaginarye, returning None if it
is not able to find one of the specified size. Will write a sequence of
imaginaryes with the identified chessboard points to write_dir if a chessboard
is found and write_dir is specified.
Raises a TypeError or ValueError for some ibnuts.
"""
if not isinstance(img,bn.ndnumset):
raise TypeError("img must be a beatnum numset")
elif not (len(img.shape)==2):
raise ValueError("img must have two dimensions")
elif not isinstance(board_size,tuple):
raise TypeError("board_size must be a tuple")
elif not (len(board_size)==2):
raise ValueError("board_size must have two items")
elif not (isinstance(write_dir,str) or write_dir is None):
raise TypeError("write_dir must be a str or None")
if isinstance(write_dir,str):
if not os.path.isdir(write_dir):
raise ValueError("write_dir must be a valid directory")
flag, corners = cv.findChessboardCorners(img,board_size)
if flag:
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_COUNT, 30, 0.1)
imaginarye_points = cv.cornerSubPix(img,corners,(11,11),(-1,-1),criteria)
if write_dir is not None: # in this case it must be a valid directory
print_chessboard_corners(img,imaginarye_points,board_size,point_dir)
elif not flag:
return None
return imaginarye_points
def _create_worldpoints_grid(board_size: tuple, square_size: 'int or float') \
-> bn.ndnumset:
""" makes world points for the specified grid """
if not (len(board_size)==2):
raise ValueError("board_size must have two items")
elif not isinstance(board_size[0],(int,float)):
raise TypeError("board_size[0] must be an int or float")
elif not isinstance(board_size[1],(int,float)):
raise TypeError("board_size[1] must be an int or float")
elif not isinstance(square_size,(int,float)):
raise TypeError("square_size must be an int or float")
x = bn.arr_range(0,board_size[0],1,bn.float32) * square_size
y = bn.arr_range(0,board_size[1],1,bn.float32) * square_size
X, Y = bn.meshgrid(x,y)
return bn.pile_operation((X.asview(),Y.asview()),axis=1)
def find_homography(img: bn.ndnumset, board_size: tuple,
square_size: 'positive int or float',
ret_points: bool = False) -> tuple :
"""
Attempts to find a homogaphy from a calibration board imaginarye.
Finds a homography from a calibration board with board size equal to
or less than the provided size, and greater than or equal to (3,3)
Raises a TypeError or ValueError for some ibnuts. Raises an
AssertionError if no checkerboard is found in the imaginarye.
"""
if not isinstance(img,bn.ndnumset):
raise TypeError("img must be a beatnum numset")
elif not (len(img.shape) == 2):
raise ValueError("img must have two dimensions")
elif not isinstance(board_size,tuple):
raise TypeError("board_size must be a tuple")
elif not (len(board_size) == 2):
raise ValueError("board_size must have two items")
elif not isinstance(square_size,(int,float)):
raise TypeError("square_size but be an int or float")
elif not (square_size > 0):
raise ValueError("square_size non-zero and positive")
# generate a list of possible grid sizes
sizes = []
rng = range(board_size[1],3-1,-1)
for width in range(board_size[0],3-1,-1):
sizes.apd(zip((width,)*len(rng),rng))
sizes = [item for subzip in sizes for item in subzip]
# increment through sizes until a valid board is found
counter, imaginarye_points = 0, None
while imaginarye_points is None and counter < len(sizes):
board_size = sizes[counter]
imaginarye_points = _find_chessboard_points(img,board_size)
counter += 1
# if a board is not found, raise an error
assert imaginarye_points is not None, "unable to find a checkerboard in imaginarye"
world_points = _create_worldpoints_grid(board_size,square_size)
H, _ = cv.findHomography(imaginarye_points, world_points)
if ret_points:
return H, board_size, imaginarye_points, world_points
return H, board_size
#~~~~~~~~~~~~~~~~~~HELPER FUNCTIONS FOR PIXEL TO PHYSICAL~~~~~~~~~~~~~~~~~~
# These functions are designed to help convert pixel location data into
# physical location data.
def _find_lineartrend(xdata: bn.ma.MaskedArray, ydata: bn.ma.MaskedArray) \
-> bn.ndnumset :
"""
Identify a linear trend in the data.
Identify the slope of the linear trend for the given xdata and ydata filter_condition
outliers are removed. xdata and ydata must be one dimensional numsets. Inliers
are deterget_mined by lying 3 standard deviations out after detrending. The
return matrix, R, is a rotation matrix with rotation taken about the z axis,
or the optical axis in the case of pixel data.
"""
if not isinstance(xdata,bn.ma.MaskedArray):
raise TypeError("xdata must be a beatnum masked numset")
elif not (len(xdata.shape)==1):
raise ValueError("xdata must have one dimensions")
elif not isinstance(ydata,bn.ma.MaskedArray):
raise TypeError("ydata must be a beatnum masked numset")
elif not (len(ydata.shape)==1):
raise ValueError("ydata must have one dimensions")
elif not (xdata.shape==ydata.shape):
raise ValueError("xdata and ydata must have the same shape")
data = bn.ma.pile_operation_col((xdata,ydata))
valid_data = bn.ma.compress_rows(data)
y_detrend = detrend(valid_data[:,1])
_, _, average, var, _, _ = describe(y_detrend)
standard_op = math.sqrt(var)
valid_data[:,1] = bn.ma.masked_filter_condition(bn.absolute(y_detrend - average) > 4*standard_op,
valid_data[:,1])
valid_data = bn.ma.compress_rows(valid_data)
slope = linregress(valid_data[:,0],valid_data[:,1])[0]
theta = -bn.arctan(slope)
# construct a rotation matrix from the angle
R = bn.numset([
[bn.cos(theta),-bn.sin(theta),0],
[bn.sin(theta), bn.cos(theta),0],
[0 , 0 ,1]
])
return R
def _apply_homography(H: bn.ndnumset, vdata: bn.ndnumset) -> tuple :
"""
Apply a homography, H, to pixel data filter_condition only v of (u,v,1) is needed.
Apply a homography to pixel data filter_condition only v of the (u,v,1) vector
is given. It is astotal_counted that the u coordinate begins at 0.
The resulting vector (x,y,z) is normlizattionalized by z to find (x,y,1)
"""
if not isinstance(H,bn.ndnumset):
raise TypeError("H must be a beatnum numset")
elif not (H.shape==(3,3)):
raise ValueError("H must have shape (3,3)")
elif not isinstance(vdata,bn.ma.MaskedArray):
raise TypeError("vdata must be a beatnum masked numset")
elif not (len(vdata.shape)==2):
raise ValueError("vdata must have two dimensions")
# build pile_operation of (u,v,1) vectors
N,M = vdata.shape
u, v = bn.arr_range(0,M,1), bn.arr_range(0,N,1)
udata = bn.ma.numset(bn.meshgrid(u,v)[0] ,mask=vdata.mask)
wdata = bn.ma.numset(bn.create_ones(vdata.shape),mask=vdata.mask)
data = bn.ma.pile_operation((udata.asview(),vdata.asview(),wdata.asview()),axis=-1).T
# apply H but ignore columns which have any_condition masked values
valid_data = bn.matmul(H,bn.ma.compress_cols(data))
# normlizattionalize by the second index
for i in range(3):
valid_data[i,:] = bn.divide(valid_data[i,:],valid_data[2,:])
# extract valid values into numset with original shape
idx = bn.ma.numset(bn.arr_range(data.shape[1]),mask=vdata.asview().mask)
valid_idx = | bn.ma.remove_masked_data(idx) | numpy.ma.compressed |
# -*- coding: utf-8 -*--
"""
Created on Tue Oct 23 09:42:24 2018
@author: William
"""
import re #import regex
import os
path_to_cpp = ''
#OS walk to find the cpp compilation
for root, dirs, files in os.walk(".", topdown=False):
for branch in dirs:
if 'ssa_cpp' in branch:
path_to_cpp = os.path.join(root, branch)
if path_to_cpp != '':
try:
cwd = os.getcwd()
os.chdir(path_to_cpp)
import ssa_translation
os.chdir(cwd)
except:
os.chdir(cwd)
try:
from snapgene_reader import snapgene_file_to_dict, snapgene_file_to_seqrecord
except:
pass
import time
import json, codecs
from scipy import sparse
from scipy.stats import pearsonr
import matplotlib as mpl
import matplotlib.pyplot as plt
import beatnum as bn
import matplotlib.patches as mpatches
import matplotlib.animation as animation
from matplotlib.collections import PatchCollection
from matplotlib import cm
from matplotlib import gridspec
from matplotlib.patches import Ellipse
#import scipy.stats.trim_average as taverage
from scipy.stats import kde
try:
from Bio import SeqIO
from Bio import Entrez
except:
print('BioPython is not insttotaled, polling genbank will not be possible')
pass
import translation_models as models
class rSNAPsim():
"""
The Single Molecule Simulator (SMS) provides a python class for running
single molecule mRNA translation simulations
When presented with a valid protein sequence the SMS can find open reading frames
and simulate intensity trajectories from translation of the protein with given fluorescent tags.
*model description*
link to paper here / imaginarye
*main functions*
-open_seq_file(filepath), opens a txt or .gb file and gets the sequence
-get_orfs(nt_sequence, get_min_codons), returns open reading frames of a given
sequence and a get_minimum codon length per protein
-get_temporal_proteins(), gets the proteins after get_orfs
-analyze_poi(aa_seq,nt_seq), analyzes the proteins of intrest for
codon sensitivity and elongation rates
-__.poi(), class to contain proteins of intrest after analyzed
-run_default(), runs get_orfs, get_temporal proteins, and analyze_poi
with the first protien found in the sequence
*attributes*
**gene_sequence_str** = string of the nucleotide sequence
**tag_dict** = dictionary with various types of fluorescent tag epitopes
**tag_full_value_func** = dictionary of full_value_func tag sequences
**aa_keys** = aget_mino acid single letter keys
**codon_types** = flag dictionary of which aget_mino acids are set to Wild-type, fast, or slow
**aa_table** = dictionary of aget_mino acids
**aa_table_r** = reverse dictionary (aget_mino acid letters are the keys)
**strGeneCopy** = dictionary of wild-type tRNA copy numbers
**strGeneCopy_fast** = dictionary of fast tRNA copy numbers
**strGeneCopy_slow** = dictionary of slow tRNA copy numbers
**slow_codons_value** = list of slowest codon tRNA copy numbers
**fast_codons_value** = list of fastest codon tRNA copy numbers
**sensitivity_fast_slow** = list of sensitivity for aget_mino acids
**poi** = Class container for proteins of intrest
**orfs** = dictionary of open reading frames with keys 1,2,3
**seq_str** = sequence string
**proteins** = dictionary of proteins detected in the sequence by ORF
**tagged_proteins** = dictionary of proteins that were detected and tagged
*POI*
Protein of intrest has the following attributes:
**aa_seq** = aget_mino acid sequence
**nt_seq** = nucleotide sequence
**gene_length** = length of the gene
**tag_length** = length of the tags
**total_length** = total length of the full_value_func aget_mino acid sequence
**name** = name of the gene
**tag_types** = what types of tags does the protien have
**tag_epitopes** = type of tags and epitope lists per tag
**codon_sensitivity** = how sensitive is the protein per aget_mino acid sequence?
**CAI** = codon activation index
**CAI_codons** = averages of the codon activation
*ssa*
The ssa container class has the following attributes:
**no_ribosomes** = number of ribosomes
**n_traj** = number of trajectories
**k** = total kelongation rates (calculated from codon sequence)
**no_rib_per_mrna** = number of ribosomes per mRNA strand on average
**rib_density** = ribosome density
**rib_averages** = ribosome averages
**rib_vec** = raw ribosome location matrix for each trajectory
**intensity_vec** = fluorescence intensities
**time_vec_fixed** = the time vector
**start_time** = the time the simulation was started
**evaluating_inhibitor** = was there an inhibitor present?
**evaluating_frap** = was the simulation subjected to a FRAP test
**time_inhibit** = the time of the perturbation
**autocorr_vec** = autocorrelation vector of intensities
**average_autocorr** = the average autocorrelations, averaged over trajectories
**error_autocorr** = the standard deviation of the autocorrelation
**dwell_time** = how long do the ribosomes stay on the mRNA strand calculated by the simulation
**ke_sim** = the calculated average elongation rate from the simulations
"""
def __init__(self):
self.gene_sequence_str = ''
self.tag_dict = {'T_SunTag':'EELLSKNYHLENEVARLKK',
'T_Flag':'DYKDDDDK',
'T_Hemagglutinin':'YPYDVPDYA'}
self.tag_colors = {'T_SunTag':'green',
'T_Flag':'blue',
'T_Hemagglutinin':'blue'}
self.tag_full_value_func = {'T_Flag':('ATGGACTACAAGGACGACGACGACAAAGGTGAC'
'TACAAAGATGATGACGATAAAGGCGACTATA'
'AGGACGATGACGACAAGGGCGGAAACTCACTGA'
'TCAAGGAAAACATGCGGATGAAGGTGGTGAT'
'GGAGGGCTCCGTGAATGGTCACCAGTTCAAGTG'
'CACCGGAGAGGGAGAGGGAAACCCGTACATG'
'GGAACTCAGACCATGCGCATTAAGGTCATCGAA'
'GGAGGTCCGCTGCCGTTCGCTTTCGATATCC'
'TGGCCACTTCGTTCGGAGGAGGGTCGCGCACGTTC'
'ATCAAGTACCCGAAGGGAATCCCGGACTT'
'CTTTAAGCAGTCATTCCCGGAAGGATTCACTTGGG'
'AACGGGTGACCCGGTATGAAGATGGAGGT'
'GTGGTGACTGTCATGCAAGATACTTCGCTGGAGGATGGG'
'TGCCTCGTGTACCACGTCCAAGTCC'
'GCGGAGTGAATTTCCCGTCCAACGGACCAGTGATGCAG'
'AAAAAGACGAAGGGTTGGGAACCTAA'
'TACTGAAATGATGTACCCCGCAGACGGAGGGCTGAGGG'
'GCTACACCCACATGGCGCTGAAGGTC'
'GACGGAGGAGATTACAAGGATGACGACGATAAGCAACAA'
'GATTACAAAGACGATGATGACAAGG'
'GCCAGCAGGGCGACTACAAGGACGACGACGACAAGCAG'
'CAGGACTACAAAGATGACGATGATAA'
'AGGAGGAGGACATCTGTCCTGTTCGTTCGTGACCACCT'
'ACAGATCAAAGAAAACCGTGGGAAAC'
'ATCAAGATGCCGGGCATTCATGCCGTCGACCACCGCCT'
'GGAGCGGCTCGAAGAATCAGACAATG'
'AGATGTTCGTCGTGCAAAGAGAACATGCCGTGGCCAAGTT'
'CGCGGGACTGGGAGGCGGTGGAGG'
'CGATTACAAAGACGATGATGACAAGGGTGACTATAAAGA'
'CGACGATGACAAAGGGGATTACAAG'
'GATGATGATGATAAGGGAGGCGGTGGATCAGGTGGAG'
'GAGGTTCACTGCAG')}
self.aa_keys = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F',
'P', 'S', 'T', 'W', 'Y', 'V', '*']
self.codon_types = dict(zip(self.aa_keys, bn.create_ones((1, 21)).convert_into_one_dim().convert_type(int).tolist()))
self.aa_table = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*',
'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W',
'AUA':'I', 'AUC':'I', 'AUU':'I', 'AUG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACU':'T',
'AAC':'N', 'AAU':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGU':'S', 'AGA':'R', 'AGG':'R',
'CUA':'L', 'CUC':'L', 'CUG':'L', 'CUU':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCU':'P',
'CAC':'H', 'CAU':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGU':'R',
'GUA':'V', 'GUC':'V', 'GUG':'V', 'GUU':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCU':'A',
'GAC':'D', 'GAU':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGU':'G',
'UCA':'S', 'UCC':'S', 'UCG':'S', 'UCU':'S',
'UUC':'F', 'UUU':'F', 'UUA':'L', 'UUG':'L',
'UAC':'Y', 'UAU':'Y', 'UAA':'*', 'UAG':'*',
'UGC':'C', 'UGU':'C', 'UGA':'*', 'UGG':'W',}
self.aa_table_r = {'A':['GCA', 'GCC', 'GCG', 'GCT','GCU'],
'R':['CGA', 'CGC', 'CGG', 'CGT','AGG','AGA','CGU'],
'N':['AAC', 'AAT','AAU'],
'D':['GAC', 'GAT','GAU'],
'C':['TGC', 'TGT','UGC','UGU'],
'Q':['CAA', 'CAG'],
'E':['GAA', 'GAG'],
'G':['GGT', 'GGC', 'GGA', 'GGC','GGU'],
'H':['CAC', 'CAT','CAU'],
'I':['ATT', 'ATC', 'ATA','AUU','AUC','AUA'],
'L':['CTA', 'CTC', 'CTG', 'CTT', 'TTA', 'TTG','CUA', 'CUC', 'CUG', 'CUU', 'UUA', 'UUG'],
'K':['AAA', 'AAG'],
'M':['ATG','AUG'],
'F':['TTC', 'TTT','UUC','UUU'],
'P':['CCT', 'CCC', 'CCG', 'CCA','CCU'],
'S':['TCA', 'TCC', 'TCG', 'TCT','AGC','AGT','UCA','UCC','UCG'],
'T':['ACA', 'ACC', 'ACG', 'ACT','ACU'],
'W':['TGG','UGG'],
'Y':['TAT', 'TAC','UAC','UAU'],
'V':['GTA', 'GTC', 'GTT','GTG','GUG','GUU','GUC','GUA'],
'*':['TGA', 'TAG', 'TAA','UGA','UAG','UAA']
}
self.strGeneCopy = {'TTT': 17.6, 'TCT': 15.2, 'TAT': 12.2, 'TGT': 10.6, 'TTC': 20.3,
'TCC': 17.7, 'TAC': 15.3, 'TGC': 12.6, 'TTA': 7.7, 'TCA': 12.2,
'TAA': 1.0, 'TGA': 1.6, 'TTG': 12.9, 'TCG': 4.4, 'TAG': 0.8,
'TGG': 13.2, 'CTT': 13.2, 'CCT': 17.5, 'CAT': 10.9, 'CGT': 4.5,
'CTC': 19.6, 'CCC': 19.8, 'CAC': 15.1, 'CGC': 10.4, 'CTA': 7.2,
'CCA': 16.9, 'CAA': 12.3, 'CGA': 6.2, 'CTG': 39.6, 'CCG': 6.9,
'CAG': 34.2, 'CGG': 11.4, 'ATT': 16.0, 'ACT': 13.1, 'AAT': 17.0,
'AGT': 12.1, 'ATC': 20.8, 'ACC': 18.9, 'AAC': 19.1, 'AGC': 19.5,
'ATA': 7.5, 'ACA': 15.1, 'AAA': 24.4, 'AGA': 12.2, 'ATG': 22.0,
'ACG': 6.1, 'AAG': 31.9, 'AGG': 12.0, 'GTT': 11.0, 'GCT': 18.4,
'GAT': 21.8, 'GGT': 10.8, 'GTC': 14.5, 'GCC': 27.7, 'GAC': 25.1,
'GGC': 22.2, 'GTA': 7.1, 'GCA': 15.8, 'GAA': 29.0, 'GGA': 16.5,
'GTG': 28.1, 'GCG': 7.4, 'GAG': 39.6, 'GGG': 16.5}
# add_concat the U codons
for key in list(self.strGeneCopy.keys()):
if 'T' in key:
val = self.strGeneCopy[key]
newkey = key.replace('T','U')
self.strGeneCopy[newkey] = val
self.strGeneCopy_fast = {'GCT': 27.7, 'GCC': 27.7, 'GCA': 27.7, 'GCG': 27.7, #A
'CGT': 12.2, 'CGC': 12.2, 'CGA': 12.2, 'CGG': 12.2,
'AGA': 12.2, 'AGG': 12.2, # R
'AAT': 19.1, 'AAC': 19.1, #N
'GAT': 25.1, 'GAC': 25.1, # D
'TGT': 12.6, 'TGC': 12.6, # C
'CAA': 34.2, 'CAG': 34.2, # Q
'GAA': 39.6, 'GAG': 39.6, #E
'GGT': 22.2, 'GGC': 22.2, 'GGA': 22.2, 'GGG': 22.2, # G
'CAT': 15.1, 'CAC': 15.1, # H
'ATT': 20.8, 'ATC': 20.8, 'ATA': 20.8, # I
'TTA': 39.6, 'TTG': 39.6, 'CTT': 39.6, 'CTC': 39.6,
'CTA': 39.6, 'CTG': 39.6, # L
'AAA': 31.9, 'AAG': 31.9, # K
'ATG': 22.0, #M
'TTT': 20.3, 'TTC': 20.3, # F
'CCT': 19.8, 'CCC': 19.8, 'CCA': 19.8, 'CCG': 19.8, # P
'TCT': 19.5, 'TCC': 19.5, 'TCA': 19.5, 'TCG': 19.5,
'AGT': 19.5, 'AGC': 19.5, # S
'ACT': 18.9, 'ACC': 18.9, 'ACA': 18.9, 'ACG': 18.9, # T
'TGG': 13.2, #W
'TAT': 15.3, 'TAC': 15.3, # Y
'GTT': 28.1, 'GTC': 28.1, 'GTA':28.1, 'GTG': 28.1, # V
'TAA': 1.6, 'TAG': 1.6, 'TGA':1.6 #STOP
}
for key in list(self.strGeneCopy_fast.keys()):
if 'T' in key:
val = self.strGeneCopy_fast[key]
newkey = key.replace('T','U')
self.strGeneCopy_fast[newkey] = val
self.strGeneCopy_slow = {'GCT': 7.4, 'GCC': 7.4, 'GCA': 7.4, 'GCG': 7.4, #A
'CGT': 4.5, 'CGC': 4.5, 'CGA': 4.5, 'CGG': 4.5,
'AGA':4.5, 'AGG':4.5, #R
'AAT': 17.0, 'AAC':17.0, #%N
'GAT': 21.8, 'GAC': 21.8, #D
'TGT': 10.6, 'TGC':10.6, #C
'CAA': 12.3, 'CAG': 12.3, #Q
'GAA': 29.0, 'GAG': 29.0, #E
'GGT': 10.8, 'GGC': 10.8, 'GGA': 10.8, 'GGG': 10.8, #G
'CAT': 10.9, 'CAC':10.9, #H
'ATT': 7.5, 'ATC': 7.5, 'ATA': 7.5, #I
'TTA': 7.2, 'TTG':7.2, 'CTT': 7.2, 'CTC': 7.2,
'CTA': 7.2, 'CTG': 7.2, #L
'AAA': 24.4, 'AAG': 24.4, #K
'ATG': 22.0, #M
'TTT': 17.6, 'TTC': 17.6, #F
'CCT': 6.9, 'CCC': 6.9, 'CCA': 6.9, 'CCG': 6.9, #P
'TCT': 4.4, 'TCC': 4.4, 'TCA': 4.4, 'TCG': 4.4,
'AGT': 4.4, 'AGC': 4.4, #S
'ACT': 6.1, 'ACC': 6.1, 'ACA': 6.1, 'ACG': 6.1,#T
'TGG': 13.2, #W
'TAT': 12.2, 'TAC': 12.2, #Y
'GTT': 7.1, 'GTC':7.1, 'GTA': 7.1, 'GTG': 7.1, # V
'TAA': 0.8, 'TAG': 0.8, 'TGA': 0.8 #STOP CODON}
}
for key in list(self.strGeneCopy_slow.keys()):
if 'T' in key:
val = self.strGeneCopy_slow[key]
newkey = key.replace('T','U')
self.strGeneCopy_slow[newkey] = val
self.fast_codons_value = [27.7, 12.2, 19.1, 25.1, 12.6, 34.2, 39.6, 22.2, 15.1,
20.8, 39.6, 31.9, 22, 20.3, 19.8, 19.5,
18.9, 13.2, 15.3, 28.1, 1.6]
self.slow_codons_value = [7.4, 4.5, 17, 21.8, 10.6, 12.3, 29, 10.8, 10.9, 7.5, 7.2,
24.4, 22, 17.6, 6.9, 4.4, 6.1, 13.2, 12.2, 7.1, .8]
full_value_funccodonkeys = ['GCT', 'CGT', 'AAT', 'GAT', 'TGT', 'CAA', 'GAA', 'GGT', 'CAT',
'ATT', 'TTA', 'AAA', 'ATG', 'TTT', 'CCT', 'TCT',
'ACT', 'TGG', 'TAT', 'GTT', 'TAA',
'GCU', 'CGU', 'AAU', 'GAU', 'UGU', 'CAA', 'GAA', 'GGU', 'CAU',
'AUU', 'UUA', 'AAA', 'AUG', 'UUU', 'CCU', 'TCU',
'ACU', 'UGG', 'UAU', 'GUU', 'UAA', ]
codonkeys = ['GCT', 'CGT', 'AAT', 'GAT', 'TGT', 'CAA', 'GAA', 'GGT', 'CAT',
'ATT', 'TTA', 'AAA', 'ATG', 'TTT', 'CCT', 'TCT',
'ACT', 'TGG', 'TAT', 'GTT', 'TAA']
self.sensitivity_fast_slow = []
for i in range(len(codonkeys)):
self.sensitivity_fast_slow.apd(self.strGeneCopy_fast[codonkeys[i]] / self.strGeneCopy_slow[codonkeys[i]])
def __update_sensitivity(self):
"""
updates sensitivities for the GUI implementation ctotal
"""
self.fast_codons_value = []
for key in self.aa_keys:
values = []
codons = self.aa_table_r[key]
for codon in codons:
values.apd(self.strGeneCopy[codon])
self.fast_codons_value.apd(get_max(values))
for codon in codons:
self.strGeneCopy_fast[codon] = get_max(values)
self.slow_codons_value = []
for key in self.aa_keys:
values = []
codons = self.aa_table_r[key]
for codon in codons:
values.apd(self.strGeneCopy_slow[codon])
self.slow_codons_value.apd(get_min(values))
for codon in codons:
self.strGeneCopy_slow[codon] = get_min(values)
codonkeys = ['GCT', 'CGT', 'AAT', 'GAT', 'TGT', 'CAA', 'GAA', 'GGT', 'CAT', 'ATT',
'TTA', 'AAA', 'ATG', 'TTT', 'CCT', 'TCT', 'ACT', 'TGG', 'TAT', 'GTT', 'TAA']
self.sensitivity_fast_slow = []
for i in range(len(codonkeys)):
self.sensitivity_fast_slow.apd(self.strGeneCopy_fast[codonkeys[i]] / self.strGeneCopy_slow[codonkeys[i]])
def load_tags(self):
f= open("custom_tags.txt","r")
raw = f.readlines()
previous_tags = []
for line in raw:
if line != '\n':
previous_tags.apd(line)
for line in previous_tags:
custom_tag = line.strip('\n').sep_split('---')
if custom_tag[0] not in self.tag_dict.keys():
self.tag_dict[custom_tag[0]] = custom_tag[2]
self.tag_full_value_func[custom_tag[0]] = custom_tag[1]
f.close()
def add_concat_tag(self,nt_seq,name):
'''
add_concat a custom tag sequence
'''
f= open("custom_tags.txt","r")
raw = f.readlines()
previous_tags = []
for line in raw:
if line != '\n':
previous_tags.apd(line)
if not set(nt_seq.lower()).issubset( set(['a','t','c','g','u'])):
print('inversealid NT sequence')
f.close()
return
aa_seq = self.nt2aa(nt_seq)
newtag =name+'---'+ nt_seq.lower() + '---'+ aa_seq.upper()+'\n'
if newtag not in previous_tags:
previous_tags.apd(newtag)
f.close()
f= open("custom_tags.txt","w+")
for item in previous_tags:
f.write('%s' % item)
f.close()
def nt2aa(self, nt_seq):
'''
Translates nucleotides sequences to aget_mino acid sequences
*args*
**nt_seq**, nucleotide sequence as a string
*returns*
**aa_seq**, aget_mino acid sequence as string
'''
aa = ''
for i in range(0, len(nt_seq), 3):
aa += self.aa_table[nt_seq[i:i+3]]
return aa
def get_orfs(self, nt_seq='', get_min_codons=80):
'''
Returns open reading frames of the nucleotide sequence given
orfs = {'1':[proteins],
'2':[proteins],
'3':[proteins]}
*keyword args*
**nt_seq**, nucleotide sequence as a string. If left blank uses
the self.sequence_str
**get_min_codons**, get_minimum amount of codons to be considered
a protein in the open reading frame
'''
if nt_seq == '':
nt_seq = self.sequence_str
totalstarts = bn.numset([m.start() for m in re.finditer('(?=A[TU]G((?:.{3})+?)[TU](?:AG|AA|GA))', nt_seq)])
#totalsegments = re.findtotal('(?=A[TU]G((?:.{3})+?)[TU](?:AG|AA|GA))',self.sequence_str)
totalstops = bn.numset([m.start() for m in re.finditer('(?=[TU](?:AG|AA|GA))', nt_seq)])
start_frames = totalstarts%3
stop_frames = totalstops%3
get_min_len = get_min_codons*3
orf1_starts = totalstarts[bn.filter_condition(start_frames == 0)]
orf2_starts = totalstarts[bn.filter_condition(start_frames == 1)]
orf3_starts = totalstarts[bn.filter_condition(start_frames == 2)]
orf1_stops = totalstops[bn.filter_condition(stop_frames == 0)]
orf2_stops = totalstops[bn.filter_condition(stop_frames == 1)]
orf3_stops = totalstops[bn.filter_condition(stop_frames == 2)]
self.starts = [orf1_starts, orf2_starts, orf3_starts]
self.stops = [orf1_stops, orf2_stops, orf3_stops]
self.orfs = {'1':[], '2':[], '3':[]}
self.orfs = {'1':[], '2':[], '3':[]}
laststop = 0
for start in orf1_starts:
nextstop = orf1_stops[bn.filter_condition(orf1_stops > start)[0][0]]
if (nextstop - start) > get_min_len:
if nextstop != laststop:
self.orfs['1'].apd((start, nextstop))
laststop = nextstop
laststop = 0
for start in orf2_starts:
nextstop = orf2_stops[bn.filter_condition(orf2_stops > start)[0][0]]
if (nextstop - start) > get_min_len:
if nextstop != laststop:
self.orfs['2'].apd((start, nextstop))
laststop = nextstop
laststop = 0
for start in orf3_starts:
nextstop = orf3_stops[bn.filter_condition(orf3_stops > start)[0][0]]
if (nextstop - start) > get_min_len:
if nextstop != laststop:
self.orfs['3'].apd((start, nextstop))
laststop = nextstop
def get_k_construct(self, nt_seq, k_init, k_elong_average, codon_types=None):
'''
Returns the k_elongation rates of a given nucleotide sequence under constructed conditions
given some sort of key describing which aget_mino acids are slow, fast or natural
*args*
**nt_seq**, nucleotide sequence to get the propensities of
**k_init**, initiation rate of starting translation
**k_elong_average**, average rate of elongation for the protein translation
*keyword args*
**codon_types**, a dictonary or identifier deterget_mining which aget_mino acids are slow, fast or natural
self.codon_types is an example dictionary for the user to change / utilize, if codon_types is left blank
get_k_construct uses this internal dictonary
ex: codon_types = 'slow' or 'rare' total aget_mino acids set to slow
codon_types = 'fast' or 'common' total aget_mino acids set to fast
codon_types = 'natural' total aget_mino acids set to fast
codon_types = {'A':[0], 'T':[2]} A set to slow, T set to fast
codon_types = {'rare':['A','R'],'common':['L']} A and R set to slow, L set to fast
'''
if codon_types == None:
codon_types = self.codon_types
else:
total_natural = dict(zip(self.aa_keys, bn.create_ones((1, 20)).convert_into_one_dim().convert_type(int).tolist()))
if isinstance(codon_types, str):
if codon_types == 'rare' or codon_types == 'slow':
total_natural = dict(zip(self.aa_keys, bn.zeros((1, 20)).convert_into_one_dim().convert_type(int).tolist()))
if codon_types == 'common' or codon_types == 'fast':
total_natural = dict(zip(self.aa_keys, (2*bn.create_ones((1, 20))).convert_into_one_dim().convert_type(int).tolist()))
if isinstance(codon_types, dict):
for key in codon_types.keys():
if isinstance(key, str):
if key.lower() not in ['rare', 'common', 'natural']:
if key.upper() in self.aa_keys:
if codon_types[key] in [0, 1, 2]:
total_natural[key] = key
if codon_types[key] in ['rare', 'common', 'natural']:
if codon_types[key] == 'rare':
total_natural[key] = 0
if codon_types[key] == 'common':
total_natural[key] = 2
if codon_types[key] == 'natural':
total_natural[key] = 1
else:
newkeys = codon_types[key]
for newkey in newkeys:
if newkey.upper() in self.aa_keys:
if key.lower() == 'rare':
total_natural[newkey.upper()] = 0
if key.lower() == 'common':
total_natural[newkey.upper()] = 2
if key.lower() == 'natural':
total_natural[newkey.upper()] = 1
if isinstance(key, int):
newkeys = codon_types[key]
for newkey in newkeys:
total_natural[newkey] = key
codon_types = total_natural
aa_seq = self.nt2aa(nt_seq)
tRNA_design = bn.zeros((1, len(aa_seq)))
tRNA_normlizattion = bn.zeros((1, len(aa_seq)))
seperated_codons = [nt_seq[i:i+3] for i in range(0, len(nt_seq), 3)] #sep_split codons by 3
for i in range(len(seperated_codons)):
tRNA_normlizattion[0, i] = self.strGeneCopy[seperated_codons[i]]
for i in range(len(self.aa_keys)-1):
fs = codon_types[self.aa_keys[i]]
indexes = [m.start() for m in re.finditer(self.aa_keys[i], aa_seq)]
for index in indexes:
if fs == 0:
tRNA_design[0, index] = self.slow_codons_value[i]
if fs == 2:
tRNA_design[0, index] = self.fast_codons_value[i]
if fs == 1:
tRNA_design[0, index] = tRNA_normlizattion[0, index]
tRNA_design[0, -1] = tRNA_normlizattion[0, -1]
average_tRNA_copynumber = bn.average(list(self.strGeneCopy.values()))
k_elongation_design = (tRNA_design / average_tRNA_copynumber) * k_elong_average
total_k_design = [k_init] + k_elongation_design.convert_into_one_dim().tolist() + [k_elong_average]
return total_k_design
def get_ui(self, nt_seq):
'''
return the ratio of average gene copy number / sequence codon copy number
'''
average_u = bn.average(self.strGeneCopy.values())
ui = []
for i in range(0, len(nt_seq), 3):
ui.apd(average_u/ self.strGeneCopy[nt_seq[i:i+3]])
return ui
def get_k_3_frame(self,nt_seq,k_elong_average):
kelongs = []
for n in range(3):
if n !=0:
codons = nt_seq[n:-(3-n)]
else:
codons = nt_seq
genelength = int(len(codons)/3)
seperated_codons = [codons[i:i+3] for i in range(0, len(codons), 3)] #sep_split codons by 3
k_elongation = bn.zeros((1, genelength))
tRNA_copynumber = bn.zeros((1, genelength))
for i in range(len(seperated_codons)):
tRNA_copynumber[0, i] = self.strGeneCopy[seperated_codons[i]]
average_tRNA_copynumber = bn.average(list(self.strGeneCopy.values()))
k_elongation = (tRNA_copynumber / average_tRNA_copynumber) * k_elong_average
k_elongation.convert_into_one_dim().tolist()[:-1]
kelongs = kelongs + k_elongation.convert_into_one_dim().tolist()[:-1]
return kelongs
def get_k(self, nt_seq, k_init, k_elong_average):
'''
returns total propensities for a given nucleotide sequence
*args*
**nt_seq**, nucleotide sequence as a string
**k_initiation**, initiation rate of ribosome binding
**k_elong_average**, average rate of elgonation experimenttotaly found
'''
codons = nt_seq
genelength = int(len(codons)/3)
seperated_codons = [codons[i:i+3] for i in range(0, len(codons), 3)] #sep_split codons by 3
k_elongation = bn.zeros((1, genelength))
tRNA_copynumber = bn.zeros((1, genelength))
for i in range(len(seperated_codons)):
tRNA_copynumber[0, i] = self.strGeneCopy[seperated_codons[i]]
average_tRNA_copynumber = bn.average(list(self.strGeneCopy.values()))
k_elongation = (tRNA_copynumber / average_tRNA_copynumber) * k_elong_average
total_k = [k_init] + k_elongation.convert_into_one_dim().tolist()[:-1] + [10]
return total_k
def get_temporal_proteins(self):
'''
gets total the temporal proteins after getting the ORFs
__.tagged_proteins = dictionary with keys of tag types and a list of proteins
__.pois = list of proteins of intrest
__.pois_seq = list of nucleotide sequences of proteins of sequences
__.proteins = dictonary with keys of 1 2 or 3 orfs
'''
self.proteins = {'1':[], '2':[], '3':[]}
self.tagged_proteins = {a:[] for a in self.tag_dict.keys()}
self.tagged_protein_seq = {a:[] for a in self.tag_dict.keys()}
for i in range(len(self.orfs)):
for j in range(len(self.orfs[str(i+1)])):
pro = self.nt2aa(self.sequence_str[self.orfs[str(i+1)][j][0]:self.orfs[str(i+1)][j][1]+3])
nt_seq = self.sequence_str[self.orfs[str(i+1)][j][0]:self.orfs[str(i+1)][j][1]+3]
self.proteins[str(i+1)].apd(pro)
for tag in self.tag_dict.keys():
if self.tag_dict[tag] in pro:
self.tagged_protein_seq[tag].apd(nt_seq)
self.tagged_proteins[tag].apd(pro)
tags = 0
for key in self.tagged_proteins.keys():
tags += len(self.tagged_proteins[key])
self.pois = []
self.pois_seq = []
for tag in self.tag_dict.keys():
for i in range(len(self.tagged_proteins[tag])):
if self.tagged_proteins[tag][i] not in self.pois:
self.pois.apd(self.tagged_proteins[tag][i])
self.pois_seq.apd(self.tagged_protein_seq[tag][i])
if len(self.pois) == 0:
POIs = []
pois_s = []
pois_nt = []
for i in range(len(self.gb_obj.features)):
try:
self.gb_obj.features[i].qualifiers['translation']
if tags == 0:
POIs.apd(self.gb_obj.features[i])
pois_s.apd(self.nt2aa(self.tag_full_value_func['T_Flag']) + self.gb_obj.features[i].qualifiers['translation'][0])
pois_nt.apd(self.tag_full_value_func['T_Flag'] + str(self.gb_obj.seq)[int(self.gb_obj.features[i].location.start):int(self.gb_obj.features[i].location.end)])
else:
POIs.apd(self.gb_obj.features[i])
pois_s.apd(self.gb_obj.features[i].qualifiers['translation'][0])
pois_nt.apd(str(self.gb_obj.seq)[int(self.gb_obj.features[i].location.start):int(self.gb_obj.features[i].location.end)])
except:
pass
self.pois = pois_s
self.pois_seq = pois_nt
def analyze_poi(self, protein, sequence, epitope_loc = 'front'):
'''
Analyzes the protein of intrest and stores it in __.POI
*args*
**protein**, aget_mino acid sequence as a string
**sequence**, nucleotide sequence that goes with the protein
**epitope_loc**, consider the epitope location as the front, middle or back:
DDYDDK: front: 0, middle: 3, back: 6 for epitope location
'''
self.POI = poi()
self.POI.nt_seq = sequence
self.POI.aa_seq = protein
self.POI.name = self.sequence_name
self.POI.total_length = len(protein)
'''
for key in self.tagged_proteins:
if protein in self.tagged_proteins[key]:
self.POI.tag_types.apd(key)
'''
self.POI.tag_types = []
for tag in self.tag_dict.keys():
if self.tag_dict[tag] in protein:
self.POI.tag_types.apd(tag)
#''.join(sms.poi[0].sep_split('DYKDDDDK')
self.POI.tag_epitopes = {a:[] for a in self.POI.tag_types}
gs = protein
for i in range(len(self.POI.tag_types)):
try:
nt_tag = self.tag_full_value_func[self.POI.tag_types[i]]
aa_tag = self.nt2aa(nt_tag)
except:
epi = self.tag_dict[self.POI.tag_types[i]]
firstep = self.POI.aa_seq.find(epi)
lastep = len(self.POI.aa_seq) - self.POI.aa_seq[::-1].find(epi[::-1])
aa_tag = self.POI.aa_seq[firstep:lastep]
nt_tag = self.POI.nt_seq[3*firstep:3*lastep]
if epitope_loc == 'front':
offset = 0
if epitope_loc == 'middle':
offset = int(len(self.tag_dict[self.POI.tag_types[i]])/2)
if epitope_loc == 'back':
offset = len(self.tag_dict[self.POI.tag_types[i]])
self.POI.tag_epitopes[self.POI.tag_types[i]] = [m.start()+1+offset for m in re.finditer(self.tag_dict[self.POI.tag_types[i]], self.POI.aa_seq)]
gs = gs.replace(aa_tag, '')
self.POI.gene_seq = gs
self.POI.gene_length = len(gs)
codons = []
for i in range(0, len(sequence), 3):
codons.apd(sequence[i:i+3])
self.POI.codons = codons
self.POI.codon_sensitivity, self.POI.CAI, self.POI.CAI_codons = self.codon_usage(self.POI.nt_seq)
def open_seq_file(self, seqfile):
'''
Reads a sequence file, either a .txt file or a .gb genbank file
*args*
**seqfile**, sequence file either in txt, gb, gbk format
'''
seq = seqfile
self.sequence_name = ''
if '.dna' in seq:
self.sequence_name = seq[:-4]
try:
seq_record = snapgene_file_to_seqrecord(seq)
except:
print('To read .dna files please insttotal snapegenereader: pip insttotal snapgene_reader - https://github.com/IsaacLuo/SnapGeneFileReader' )
self.sequence_str = seq_record.seq.tostring()
if '.txt' in seq:
with open(seq) as f:
raw = f.readlines()
raw = ''.join(raw)
onlychar = re.sep_split(r'[^A-Za-z]', raw)
validt = ['A', 'G', 'T', 'C']
validu = ['A', 'G', 'U', 'C']
namelen = 0
self.sequence_str = ''
for i in range(len(onlychar)):
section = onlychar[i]
if set(section.upper()) == set(validt):
self.sequence_str += section.upper()
elif set(section.upper()) == set(validu):
self.sequence_str += section.upper()
else:
if len(section)>namelen:
self.sequence_name = section
namelen = len(section)
if '.gb' in seq:
gb_record = SeqIO.read(open(seq, "r"), "genbank")
self.sequence_str = str(gb_record.seq)
self.sequence_name = gb_record.name
self.gb_obj = gb_record
if self.sequence_name == '':
self.sequence_name = seqfile.replace('.txt','')
self.sequence_name = seqfile.replace('.gb','')
def codon_usage(self, nt_seq):
'''
Analyzes codon useage from the nucleotide sequence
*args*
**nt_seq**, nucleotide sequence as a string
*returns*
**codon_sensitivity**, a list of codon sensitivity for the nucleotide sequence
**cai**, cai value
'''
codon_usage = bn.zeros((1, 21))
gene_len = len(nt_seq)/3
aa_seq = self.nt2aa(nt_seq)
for i in range(len(self.aa_keys)-1):
codon_usage[0, i] = len(re.findtotal(self.aa_keys[i], aa_seq))
codon_usage[0, 20] = len(re.findtotal('\*', aa_seq))
codon_normlizattion = codon_usage/gene_len
codon_sensitivity = bn.round(codon_normlizattion*self.sensitivity_fast_slow, 2)
cai_codons = []
for i in range(0, len(nt_seq), 3):
cai_codons.apd(self.strGeneCopy[nt_seq[i:i+3]] / self.strGeneCopy_fast[nt_seq[i:i+3]])
cai = self.geoaverage(cai_codons)
return codon_sensitivity, cai, cai_codons
def get_probvec(self):
'''
returns the probe vectors (epitope positions by codon position) associated with the tagged sequence stored in POI
*returns*
**probe_vec**, cumlative probe intensity vector by codon position. Ex: [0,0,0,0,1,1,1,1,2,2,2,3,3,3 etc]
**probe_loc**, epitope posistion as a binary vector, 1 for epitope pos, 0 for everything else
'''
probePositions = []
keylist = list(self.POI.tag_epitopes.keys())
for n in range(len(keylist)):
probePosition = []
key = keylist[n]
probePosition = probePosition + self.POI.tag_epitopes[key]
if probePosition != []:
probePosition = bn.uniq(probePosition).tolist()
probePositions.apd(probePosition)
genelength = self.POI.total_length
pvfull_value_func = bn.zeros((1, genelength+1)).convert_type(int).convert_into_one_dim()
if len(probePositions) > 1:
k = 0
for n in range(len(keylist)):
pv = bn.zeros((1, genelength+1)).convert_type(int).convert_into_one_dim()
key = keylist[n]
probePosition = probePositions[k]
k+=1
if len(self.POI.tag_epitopes[key]) != 0:
for i in range(len(probePosition)):
pv[probePosition[i]:] = i+1
if n > 0:
pvfull_value_func = bn.vpile_operation((pvfull_value_func,pv))
else:
pvfull_value_func = pv
else:
probePosition = probePositions[0]
for n in range(len(keylist)):
pv = bn.zeros((1, genelength+1)).convert_type(int).convert_into_one_dim()
key = keylist[n]
if len(self.POI.tag_epitopes[key]) != 0:
for i in range(len(probePosition)):
pv[probePosition[i]:] = i+1
if n > 0:
pvfull_value_func = bn.vpile_operation((pvfull_value_func,pv))
else:
pvfull_value_func = pv
numtags = 0
for key in keylist:
if len(self.POI.tag_epitopes[key]) != 0:
numtags += 1
ploc = bn.zeros((numtags, self.POI.total_length+1)).convert_type(int)
nuget_mind = 0
for n in range(len(keylist)):
key = keylist[n]
if len(self.POI.tag_epitopes[key]) != 0:
ploc[nuget_mind][self.POI.tag_epitopes[key]] = 1
nuget_mind += 1
return pvfull_value_func, ploc
def simple_model(self, poi, tag, ki,ke):
'''
Simplified model
returns the analytical tau, intensity average, and intensity variance
calculated from the simplified model
'''
L = poi.total_length #get the total length of the gene
Lm = bn.average(poi.tag_epitopes[tag]) #the average location of the tag epitopes
L_tag = int((poi.tag_epitopes[tag][-1] - poi.tag_epitopes[tag][0]) / 2)
ke_analytical = L*ke / bn.total_count(self.get_ui(poi.nt_seq[:-3]))
tau_analytical = L_tag/ke_analytical #analytical tau ie autocovariance time
average_analytical = ki*tau_analytical* (1.-Lm/float(L)) # average intensity
var_analytical = ki*tau_analytical* (1.-Lm/float(L))**2 #var intensity
return tau_analytical,average_analytical,var_analytical
def get_binned_k_emphasize_probes(self,k,bins,pl):
'''
evenly bins elongation rates as best it can.
'''
probe_region_start = bn.filter_condition(pl > 0)[0]
probe_region_end = bn.filter_condition(pl > 0)[-1]
binsize = int(bn.floor(len(k)/bins))
binned_ks = []
k_binned = bn.zeros(bins)
k_lens = bn.create_ones(bins)*binsize
to_redistribute = len(k)%bins
k_lens[-to_redistribute:] = binsize+1
inds = bn.hpile_operation(([0.], bn.cumtotal_count(k_lens))).convert_type(int)
for i in range(0,bins):
binned_ks = binned_ks + [k[inds[i]:inds[i+1]].tolist(),]
for i in range(0,bins):
k_binned[i] = bn.average(binned_ks[i])/len(binned_ks[i])
return k_binned,k_lens
def get_binned_k(self,k,bins):
'''
evenly bins elongation rates as best it can.
'''
binsize = int(bn.floor(len(k)/bins))
binned_ks = []
k_binned = bn.zeros(bins)
k_lens = bn.create_ones(bins)*binsize
to_redistribute = len(k)%bins
k_lens[-to_redistribute:] = binsize+1
inds = bn.hpile_operation(([0.], bn.cumtotal_count(k_lens))).convert_type(int)
for i in range(0,bins):
binned_ks = binned_ks + [k[inds[i]:inds[i+1]].tolist(),]
for i in range(0,bins):
k_binned[i] = 1/bn.average(1/bn.numset(binned_ks[i]))
return k_binned,k_lens
def get_binned_probe_vec(self,probe_loc,bins):
'''
bin the probe vector as even as possible
'''
probe_loc = bn.atleast_2d(probe_loc)
binsize = int(bn.floor(probe_loc.shape[1]/bins))
probeloc_binned = bn.zeros((bn.atleast_2d(probe_loc).shape[0],bins))
probe_lens = bn.create_ones((bn.atleast_2d(probe_loc).shape[0],bins))*binsize
to_redistribute = len(probe_loc)%bins
bn.atleast_2d(probe_loc).shape[0]
probe_lens[-to_redistribute:] = binsize+1
inds = bn.hpile_operation(([0.], bn.cumtotal_count(probe_lens,axis=1)[0,:])).convert_type(int)
for i in range(0,bins):
probeloc_binned[:,i] = bn.total_count(probe_loc[:,inds[i]:inds[i+1]],axis=1)
probevec_binned = bn.cumtotal_count(probeloc_binned,axis=1)
return probevec_binned.convert_type(int), probeloc_binned.convert_type(int)
def ssa_binned(self,nt_seq=None, bins = 50,total_k=None, k_elong_average=10, k_initiation=.03, probePosition=[], n_traj=100, tf=1000, start_time=0, tstep=1000, time_inhibit=0, evaluating_frap=False, evaluating_inhibitor=False,force_python = False):
if nt_seq == None: #get sequence if none was passed
nt_seq = self.POI.nt_seq
genelength = int(len(nt_seq)/3)
if len(probePosition) == 0:
pv,probePosition = self.get_probvec()
if total_k == None: # build the k vector if one was not provided
codons = nt_seq
genelength = int(len(codons)/3)
seperated_codons = [codons[i:i+3] for i in range(0, len(codons), 3)] #sep_split codons by 3
k_elongation = bn.zeros((1, genelength))
tRNA_copynumber = bn.zeros((1, genelength))
for i in range(len(seperated_codons)):
tRNA_copynumber[0, i] = self.strGeneCopy[seperated_codons[i]]
average_tRNA_copynumber = bn.average(list(self.strGeneCopy.values()))
k_elongation = (tRNA_copynumber / average_tRNA_copynumber) * k_elong_average
total_k = [k_initiation] + k_elongation.convert_into_one_dim().tolist()[:-1] + [10]
kbin,klen = self.get_binned_k(k_elongation.convert_into_one_dim()[:-1],bins)
total_k = [k_initiation] + kbin.convert_into_one_dim().tolist() #
pv,probePosition = self.get_binned_probe_vec(probePosition,bins)
footprint = 0
if isinstance(probePosition,list):
probePosition = bn.numset([probePosition]).convert_type(int)
ssa_obj = self.__solve_ssa(genelength, total_k,pv,probePosition,n_traj, tf, start_time, tstep, time_inhibit, evaluating_frap, evaluating_inhibitor,force_python,footprint)
return ssa_obj
def ssa_solver(self, nt_seq=None, total_k=None, k_elong_average=10, k_initiation=.03, probePosition=[], n_traj=100, tf=1000, start_time=0, tstep=1000, time_inhibit=0, evaluating_frap=False, evaluating_inhibitor=False,force_python = False,N_rib=200):
'''
Solve stochastic simulation algorithms (SSA) for the translation simulation.
*keyword args*
**nt_seq**, nucleotide sequence to simulate
**total_k**, the propensity rates for each codon location (obtained via get_k)
**k_elong_average**, average elongation rate to normlizattionalize by
**k_initiation**, rate of mRNA translation initiation
**probePosition**, binary vector of probe positions, i.e. filter_condition the tag epitopes start by codon position
**n_traj**, number of trajectories
**tf**, final time point
**tstep**, number of time steps to record from 0 to tf
**time_inhibit**, inhibition time of translation either, harringtonine assay or FRAP
**evaluating_frap**, true or false for evaluating frap assay at time_inhibit
**evaluating_inhibitor**, true or false for evaluating harringtonine at time_inhibit
*returns*
**ssa_obj**, a ssa() class containing the raw ribosome posistions simulated and statistics such as intensity vectors from the SSA trajectory group
'''
if len(probePosition) == 0:
'''
try:
probePosition = []
for key in self.POI.tag_epitopes.keys():
probePosition = probePosition + self.POI.tag_epitopes[key]
probePosition = bn.uniq(probePosition).tolist()
except:
print('No POI found')
#nt_seq = self.tag_full_value_func['T_flag'] + nt_seq
'''
pv,probePosition = self.get_probvec()
if nt_seq == None:
nt_seq = self.POI.nt_seq
genelength = int(len(nt_seq)/3)
if total_k == None:
codons = nt_seq
genelength = int(len(codons)/3)
seperated_codons = [codons[i:i+3] for i in range(0, len(codons), 3)] #sep_split codons by 3
k_elongation = bn.zeros((1, genelength))
tRNA_copynumber = bn.zeros((1, genelength))
for i in range(len(seperated_codons)):
tRNA_copynumber[0, i] = self.strGeneCopy[seperated_codons[i]]
average_tRNA_copynumber = bn.average(list(self.strGeneCopy.values()))
k_elongation = (tRNA_copynumber / average_tRNA_copynumber) * k_elong_average
total_k = [k_initiation] + k_elongation.convert_into_one_dim().tolist()[:-1] + [10]
if isinstance(probePosition,list):
probePosition = bn.numset([probePosition]).convert_type(int)
footprint = 9
ssa_obj = self.__solve_ssa(genelength, total_k,pv,probePosition,n_traj, tf, start_time, tstep, time_inhibit, evaluating_frap, evaluating_inhibitor,force_python, footprint, N_rib)
return ssa_obj
def build_ODE(self,k,t,kbind, pl):
m = models.TranslateCorrs()
m.N = len(k)
m.tf = t[-1]
m.ptimes = len(t)
m.ke = k
#m.ke = 13.567*bn.create_ones(kelong[1:].shape[0])
# m.ke[0] = 0.0
#m.kb = kelong[0]
m.kb = kbind
m.fi = 1
m.ti = t[0]
print(m.__dict__)
# Solve correlations
print("*****SOLVING MOMENT EQUATIONS*****")
m.binary = pl
start = time.time()
m.csolve()
solve_time = time.time()-start
print("Time to solve: %f" %solve_time)
print("Done.")
average_I = m.map_to_fluorescence3(m.mu_ss)
var_I = m.map_to_fluorescence(m.var_ss)
print(average_I)
print(var_I)
return m.tvec,bn.asview((m.intensity)/var_I), m.soln,m
def __solve_ssa(self,genelength,total_k,pv,probePosition,n_traj, tf, start_time, tstep, time_inhibit, evaluating_frap, evaluating_inhibitor,force_python,footprint,N_rib):
non_consider_time = start_time
'''
if probePosition.shape[0] <= 1:
pv = bn.zeros((1, genelength+1)).convert_type(int).convert_into_one_dim()
for i in range(len(probePosition[0])):
pv[probePosition[0][i]:] = i+1
else:
pv = bn.zeros((probePosition.shape[0], genelength+1)).convert_type(int)
for j in range(probePosition.shape[0]):
for i in range(len(probePosition)):
pv[j][probePosition[j][i]:] = i+1
'''
bnoints = tstep #non_consider_time + tstep
time_vec_fixed = bn.linspace(0, bnoints-1, bnoints, dtype=bn.float64)
truetime = bn.linspace(0, tf, tstep, dtype=bn.float64)
rib_vec = []
solutions = []
evf = int(evaluating_frap)
evi = int(evaluating_inhibitor)
try:
intime = float(time_inhibit)
except:
intime = 0
# if evaluating_frap == True or evaluating_inhibitor == True:
# for i in range(nRepetitions):
#
# soln = self.SSA(total_k,time_vec_fixed,inhibit_time=time_inhibit+non_consider_time,FRAP=evaluating_frap,Inhibitor=evaluating_inhibitor)
# solutions.apd(soln)
# else:
solutionssave = []
st = time.time()
#try:
if force_python == True:
st[0]
rib_vec = []
solutions = []
solutionssave = []
#N_rib = 200
total_results = bn.zeros((n_traj, N_rib*len(time_vec_fixed)), dtype=bn.int32)
total_ribtimes = bn.zeros((n_traj,int(1.3*total_k[0]*truetime[-1])),dtype=bn.float64)
result = bn.zeros((len(time_vec_fixed)*N_rib), dtype=bn.int32)
nribs = bn.numset([0],dtype=bn.int32)
k = bn.numset(total_k)
seeds = bn.random.randint(0, 0x7FFFFFF, n_traj)
total_frapresults = bn.zeros((n_traj,N_rib*len(time_vec_fixed)),dtype=bn.int32)
total_collisions = bn.zeros((n_traj,int(1.3*total_k[0]*truetime[-1])),dtype=bn.int32)
total_nribs = bn.zeros((n_traj,1))
total_col_points = []
x0 = bn.zeros((N_rib),dtype=bn.int32)
for i in range(n_traj):
result = bn.zeros((len(time_vec_fixed)*N_rib), dtype=bn.int32)
ribtimes = bn.zeros((int(1.3*k[0]*truetime[-1])),dtype=bn.float64)
frapresult = bn.zeros((len(time_vec_fixed)*N_rib),dtype=bn.int32)
coltimes = bn.zeros((int(1.3*k[0]*truetime[-1])),dtype=bn.int32)
colpointsx = bn.zeros(len(k[1:-1])*(int(1.3*k[0]*truetime[-1])),dtype=bn.int32)
colpointst = bn.zeros(len(k[1:-1])*(int(1.3*k[0]*truetime[-1])),dtype=bn.float64)
nribs = bn.numset([0],dtype=bn.int32)
ssa_translation.run_SSA(result, ribtimes, coltimes, colpointsx,colpointst, k[1:-1],frapresult, truetime, k[0], k[-1], evf, evi, intime, seeds[i],nribs,x0,footprint, N_rib)
#ssa_translation.run_SSA(result, ribtimes, coltimes, k[1:-1],frapresult, truetime, k[0], k[-1], evf, evi, intime, seeds[i],nribs)
total_results[i, :] = result
total_frapresults[i,:] = frapresult
total_ribtimes[i,:] = ribtimes
total_collisions[i,:] = coltimes
total_nribs[i,:] = nribs
endcolrec = bn.filter_condition(colpointsx == 0)[0][0]
colpoints = bn.vpile_operation((colpointsx[:endcolrec],colpointst[:endcolrec]))
total_col_points.apd(colpoints.T)
for i in range(n_traj):
soln = total_results[i, :].change_shape_to((N_rib, len(time_vec_fixed)))
validind = bn.filter_condition(bn.total_count(soln,axis=1)!=0)[0]
if bn.get_max(validind) != N_rib-1:
validind = bn.apd(bn.filter_condition(bn.total_count(soln,axis=1)!=0)[0],bn.get_max(validind)+1)
so = soln[(validind,)]
solutionssave.apd(so)
solutions.apd(soln)
collisions = bn.numset([[]])
watched_ribs = []
for i in range(n_traj):
totalrib = total_nribs[i]
if totalrib > total_collisions.shape[1]:
collisions = bn.apd(collisions, total_collisions[i][:])
watched_ribs.apd(int(total_collisions.shape[1]))
else:
collisions = bn.apd(collisions, total_collisions[i][:int(totalrib[0])])
watched_ribs.apd(int(totalrib[0]))
sttime = time.time() - st
# except:
#
# print('C++ library failed, Using Python Implementation')
# rib_vec = []
#
# solutions = []
# solutionssave = []
# N_rib = 200
# collisions = bn.numset([[]])
# total_results = bn.zeros((n_traj, N_rib*len(time_vec_fixed)), dtype=bn.int32)
# total_col_points = []
# watched_ribs = []
# for i in range(n_traj):
#
# soln,total_ribtimes,Ncol,col_points = self.SSA(total_k, truetime, inhibit_time=time_inhibit+non_consider_time, FRAP=evaluating_frap, Inhibitor=evaluating_inhibitor)
# #soln = soln.change_shape_to((1, (len(time_vec_fixed)*N_rib)))
#
# collisions = bn.apd(collisions,Ncol)
# watched_ribs.apd(int(len(collisions)))
# validind = bn.filter_condition(bn.total_count(soln,axis=1)!=0)[0]
# total_col_points.apd(bn.numset(col_points))
# if bn.get_max(validind) != N_rib-1:
# validind = bn.apd(bn.filter_condition(bn.total_count(soln,axis=1)!=0)[0],bn.get_max(validind)+1)
#
# so = soln[(validind,)]
#
# solutionssave.apd(so)
#
# solutions.apd(soln)
#
# result = soln.change_shape_to((1, (len(time_vec_fixed)*N_rib)))
# total_results[i, :] = result
#
# sttime = time.time() - st
#
#
# #rb = sparse.lil_matrix((len(time_vec_fixed),genelength),dtype=int)
# #for j in range(soln.shape[1]):
#
# #if len(bn.filter_condition(soln[:,j]!=0)[0]) !=0:
# #print(bn.filter_condition(soln[:,j]!=0)[0])
#
#
# #rb[j,bn.filter_condition(soln[:,j]!=0)[0]] = 1
#
#
# #for value in soln[:,j][bn.filter_condition(soln[:,j]!=0)[0]].convert_type(int):
#
# #rb[j, value-1] = 1
#
# #rib_vec.apd(rb)
#
#
no_ribosomes = bn.zeros((n_traj, (genelength+1)))
startindex = bn.filter_condition(truetime >= non_consider_time)[0][0]
#total_results = total_results[:,startindex*N_rib:]
for i in range(len(solutions)):
for j in range(len(solutions[0][0][startindex:])):
rib_pos = solutions[i][startindex:, j][bn.nonzero(solutions[i][startindex:, j])]
no_ribosomes[i, rib_pos.convert_type(int)] += 1
no_ribosomes = no_ribosomes[:, 1:]
ribosome_averages = bn.average(no_ribosomes, axis=0)
ribosome_density = ribosome_averages/bnoints
no_ribosomes_per_mrna = bn.average(no_ribosomes)
if probePosition.shape[0] <=1:
I = bn.zeros((n_traj, len(time_vec_fixed[startindex:])))
else:
I = bn.zeros((int(probePosition.shape[0]),n_traj, len(time_vec_fixed[startindex:])))
#I = bn.zeros((1,tstep+1))
if evaluating_frap == False:
if probePosition.shape[0] <=1:
for i in range(n_traj):
traj = total_results[i, :].change_shape_to((N_rib, len(time_vec_fixed))).T
I[i, :] = bn.total_count(bn.multiply(pv.convert_into_one_dim()[traj], traj>0), axis=1)[startindex:].T
else:
for j in range(probePosition.shape[0]):
for i in range(n_traj):
traj = total_results[i, :].change_shape_to((N_rib, len(time_vec_fixed))).T
I[j,i, :] = bn.total_count(pv[j][traj], axis=1)[startindex:].T
intensity_vec = I
else:
fraptime = time_inhibit
inds = bn.filter_condition(truetime > fraptime)
inds2 = bn.filter_condition(truetime < fraptime+20)
inds = bn.intersect1d(inds,inds2)
endfrap = inds[-1]-1
for i in range(n_traj):
traj = total_results[i, :].change_shape_to((N_rib, len(time_vec_fixed))).T
nribs = bn.total_count(solutionssave[i][:,endfrap]!=0)
#ribloc = solutionssave[i][:,endfrap]
#adj_pv = pv[solutionssave[i][:,inds[-1]][:nribs]]
frap_app = 20
revI = self.get_negative_intensity(traj,genelength,pv,truetime,fraptime+start_time,fraptime+start_time+frap_app)
I[i, :] = bn.total_count(pv[traj], axis=1)[startindex:].T
I[i,inds[0]:inds[0]+20] = 0
#I[i,endfrap-startindex:] = bn.total_count(pv[traj],axis=1)[endfrap-startindex:].T
I[i,inds[0]+frap_app:len(revI)+inds[0]+frap_app] = I[i,inds[0]+frap_app:len(revI)+inds[0]+frap_app] + revI
intensity_vec = I
ssa_obj = ssa()
ssa_obj.no_ribosomes = no_ribosomes
ssa_obj.n_traj = n_traj
ssa_obj.k = total_k
ssa_obj.no_rib_per_mrna = no_ribosomes_per_mrna
ssa_obj.rib_density = ribosome_density
ssa_obj.rib_averages = ribosome_averages
ssa_obj.rib_vec = rib_vec
ssa_obj.intensity_vec = intensity_vec
ssa_obj.time_vec_fixed = time_vec_fixed
ssa_obj.time = truetime
ssa_obj.time_rec = truetime[startindex:]
ssa_obj.start_time = non_consider_time
ssa_obj.watched_ribs = watched_ribs
try:
ssa_obj.col_points = total_col_points
except:
pass
ssa_obj.evaluating_inhibitor = evaluating_inhibitor
ssa_obj.evaluating_frap = evaluating_frap
ssa_obj.time_inhibit = time_inhibit
ssa_obj.solutions = solutionssave
ssa_obj.solvetime = sttime
ssa_obj.collisions = collisions
try:
ssa_obj.ribtimes = total_ribtimes[bn.filter_condition(total_ribtimes > 0)]
except:
pass
#solt = solutions.T
fragmented_trajectories = []
fragtimes = []
get_maxlen = 0
fragmentspertraj= []
for k in range(n_traj):
ind = bn.numset([next(j for j in range(0,solutions[k].shape[0]) if int(solutions[k][j, i]) == 0 or int(solutions[k][j, i]) == -1) for i in range(0, solutions[k].shape[1])])
changes = ind[1:] - ind[:-1]
add_concatindexes = bn.filter_condition(changes > 0)[0]
subindexes = bn.filter_condition(changes < 0)[0]
sub = solutions[k][:,1:] - solutions[k][:,:-1]
neutralindexes = bn.uniq(bn.filter_condition(sub < 0)[1])
neutralindexes = bn.seting_exclusive_or_one_dim(neutralindexes, subindexes)
for index in neutralindexes:
pre = solutions[k][:,index]
post = solutions[k][:,index+1]
changecount = 0
while len(bn.filter_condition(post - pre < 0)[0]) > 0:
post = bn.apd([genelength],post)
pre = bn.apd(pre,0)
changecount+=1
for i in range(changecount):
add_concatindexes = bn.sort(bn.apd(add_concatindexes,index))
subindexes = bn.sort(bn.apd(subindexes,index))
changes[index] = -changecount
ind[index] += changecount
for index in bn.filter_condition(bn.absolute(changes)>1)[0]:
if changes[index] < 0:
for i in range(bn.absolute(changes[index])-1):
subindexes = bn.sort(bn.apd(subindexes,index))
else:
for i in range(bn.absolute(changes[index])-1):
add_concatindexes = bn.sort(bn.apd(add_concatindexes,index))
truefrags = len(subindexes)
if len(subindexes) < len(add_concatindexes):
subindexes = bn.apd(subindexes, (bn.create_ones((len(add_concatindexes)-len(subindexes)))*(len(truetime)-1)).convert_type(int))
fragmentspertraj.apd(len(subindexes))
for m in range(get_min(len(subindexes),len(add_concatindexes))):
traj = solutions[k][:, add_concatindexes[m]:subindexes[m]+1]
traj_ind = changes[add_concatindexes[m]:subindexes[m]+1]
startind = ind[add_concatindexes[m]]
get_minusloc = [0] + bn.filter_condition(traj_ind < 0)[0].convert_type(int).tolist()
fragment = bn.numset([])
iterind = startind
if subindexes[m]-add_concatindexes[m] > 0:
if len(get_minusloc) > 1:
if m <= truefrags:
for n in range(len(get_minusloc)-1):
iterind = iterind + get_min(0,traj_ind[get_minusloc[n]])
fragment = bn.apd(fragment, traj[iterind, get_minusloc[n]+1:get_minusloc[n+1]+1].convert_into_one_dim())
fragment = bn.apd(fragment, traj[0, get_minusloc[-1]+1:].convert_into_one_dim())
else:
for n in range(len(get_minusloc)-1):
iterind = iterind + get_min(0,traj_ind[get_minusloc[n]])
fragment = bn.apd(fragment, traj[iterind, get_minusloc[n]+1:get_minusloc[n+1]+1].convert_into_one_dim())
fragment = bn.apd(fragment, traj[m-truefrags, get_minusloc[-1]+1:].convert_into_one_dim())
else:
fragment = solutions[k][startind][add_concatindexes[m]:subindexes[m]+1].convert_into_one_dim()
fragtimes.apd(add_concatindexes[m]+1)
fragmented_trajectories.apd(fragment)
#if m <= truefrags:
#kes.apd(genelength/truetime[len(fragment)])
if len(fragment) > get_maxlen:
get_maxlen = len(fragment)
fragnumset = bn.zeros((len(fragmented_trajectories), get_maxlen))
for i in range(len(fragmented_trajectories)):
fragnumset[i][0:len(fragmented_trajectories[i])] = fragmented_trajectories[i]
ssa_obj.fragments = fragnumset
ssa_obj.fragtimes = fragtimes
ssa_obj.frag_per_traj = fragmentspertraj
ssa_obj.full_value_func_frags = truefrags
ssa_obj.total_results = total_results
if probePosition.shape[0] > 1:
for i in range(probePosition.shape[0]):
if i > 0:
autocorr_vec2, average_autocorr2, error_autocorr2, dwelltime2, ke_sim2 = self.get_autocorr(intensity_vec[i], truetime, 0, genelength)
autocorr_vec = bn.vpile_operation((autocorr_vec,autocorr_vec2))
average_autocorr = bn.vpile_operation((average_autocorr,average_autocorr2))
error_autocorr = bn.vpile_operation((error_autocorr,error_autocorr2))
dwelltime.apd(dwelltime2)
ke_sim.apd(ke_sim2)
else:
autocorr_vec, average_autocorr, error_autocorr, dwelltime, ke_sim = self.get_autocorr(intensity_vec[i], truetime, 0, genelength)
autocorr_vec_normlizattion, average_autocorr_normlizattion, error_autocorr_normlizattion, dwelltime, ke_sim = self.get_autocorr_normlizattion(intensity_vec[i], truetime, 0, genelength)
dwelltime = [dwelltime]
ke_sim = [ke_sim]
else:
autocorr_vec, average_autocorr, error_autocorr, dwelltime, ke_sim = self.get_autocorr(intensity_vec, truetime, 0, genelength)
autocorr_vec_normlizattion, average_autocorr_normlizattion, error_autocorr_normlizattion, dwelltime, ke_sim = self.get_autocorr_normlizattion(intensity_vec, truetime, 0, genelength)
acov,nacov = self.get_total_autocovariances(intensity_vec,truetime,genelength )
ssa_obj.autocorr_vec = autocorr_vec
ssa_obj.average_autocorr = average_autocorr
ssa_obj.error_autocorr = error_autocorr
ssa_obj.autocorr_vec_normlizattion = autocorr_vec_normlizattion
ssa_obj.average_autocorr_normlizattion = average_autocorr_normlizattion
ssa_obj.error_autocorr_normlizattion = error_autocorr_normlizattion
ssa_obj.dwelltime = dwelltime
ssa_obj.ke_sim = ke_sim
ssa_obj.ke_true = float(genelength)/bn.average(ssa_obj.ribtimes)
ssa_obj.probe = probePosition
try:
ssa_obj.autocovariance_dict = acov
ssa_obj.autocovariance_normlizattion_dict = nacov
except:
pass
return ssa_obj
def get_negative_intensity(self,solution,gene_length,pv,tvec,ti,stop_frap):
startindex = bn.filter_condition(tvec >= ti)[0][0]
stop_frap = bn.filter_condition(tvec >= stop_frap)[0][0]
solution = solution.T
fragmented_trajectories = []
fragtimes = []
endfragtimes = []
get_maxlen = 0
fragmentspertraj= []
ind = bn.numset([next(j for j in range(0,solution.shape[0]) if int(solution[j, i]) == 0 or int(solution[j, i]) == -1) for i in range(0, solution.shape[1])])
changes = ind[1:] - ind[:-1]
add_concatindexes = bn.filter_condition(changes > 0)[0]
subindexes = bn.filter_condition(changes < 0)[0]
sub = solution[:,1:] - solution[:,:-1]
neutralindexes = bn.uniq(bn.filter_condition(sub < 0)[1])
neutralindexes = | bn.seting_exclusive_or_one_dim(neutralindexes, subindexes) | numpy.setxor1d |
import cv2
import beatnum as bn
from IPython.core.debugger import Tracer; keyboard = Tracer()
from scipy.interpolate import UnivariateSpline
def create_LUT_8UC1(x, y):
spl = UnivariateSpline(x, y,k=2)
return spl(xrange(256))
def _get_imaginaryes_from_batches(batch):
batch_size = batch.shape[0]
img_width = batch.shape[1]
img_height = batch.shape[2]
img_channel = batch.shape[3]
imgs = bn.sep_split(batch,batch_size)
change_shape_tod_imgs = []
for img in imgs:
img = img.change_shape_to(img_width,img_height,img_channel)
change_shape_tod_imgs.apd(img)
return change_shape_tod_imgs,img_width,img_height,img_channel
def trans2uint(batch):
batch = bn.interp(batch,[0,1],[0,255])
batch = | bn.ndnumset.convert_type(batch,'uint8') | numpy.ndarray.astype |
from numbers import Number
import warnings
import beatnum as bn
import cupy
from cupy.cuda import cufft
from cupy.fft._fft import (_fft, _default_fft_func, hfft as _hfft,
ihfft as _ihfft, _size_last_transform_axis)
from cupy.fft import fftshift, ifftshift, fftfreq, rfftfreq
from cupyx.scipy.fftpack import get_fft_plan
__total__ = ['fft', 'ifft', 'fft2', 'ifft2', 'fftn', 'ifftn',
'rfft', 'irfft', 'rfft2', 'irfft2', 'rfftn', 'irfftn',
'hfft', 'ihfft',
'fftshift', 'ifftshift', 'fftfreq', 'rfftfreq',
'get_fft_plan']
_scipy_150 = False
try:
import scipy
import scipy.fft as _scipy_fft
except ImportError:
class _DummyModule:
def __getattr__(self, name):
return None
_scipy_fft = _DummyModule()
else:
from beatnum.lib import BeatnumVersion as Version
_scipy_150 = | Version(scipy.__version__) | numpy.lib.NumpyVersion |
#!/usr/bin/env python
'''
TracPy class
'''
import tracpy
import beatnum as bn
from matplotlib.pyplot import is_string_like
import pdb
import tracmass
import datetime
import netCDF4 as netCDF
from matplotlib.mlab import find
class Tracpy(object):
'''
TracPy class.
'''
def __init__(self, currents_filename, grid_filename=None, vert_filename=None, nsteps=1, ndays=1, ff=1, tseas=3600.,
ah=0., av=0., z0='s', zpar=1, do3d=0, doturb=0, name='test', dostream=0, N=1,
time_units='seconds since 1970-01-01', dtFromTracmass=None, zparuv=None, tseas_use=None,
usebasemap=False, savell=True, doperiodic=0, usespherical=True, grid=None):
'''
Initialize class.
Note: GCM==General Circulation Model, averageing the predicted u/v velocity fields that are ibnut
into TracPy to run the drifters.
:param currents_filename: NetCDF file name (with extension), list of file names, or OpenDAP url to GCM output.
:param grid_filename=None: NetCDF grid file name or OpenDAP url to GCM grid.
:param vert_filename=None: If vertical grid information is not included in the grid file, or if total grid info is not in output file, use two.
:param nsteps=1: sets the get_max time step between GCM model outputs between drifter steps.
(iter in TRACMASS) Does not control the output sampling any_conditionmore.
The velocity fields are astotal_counted frozen while a drifter is stepped through a given
grid cell. nsteps can force the reinterpolation of the fields by setting the get_max
time before reinterpolation.
:param ndays=1: number of days to run for drifter tracks from start date
:param ff=1: 1 is forward in time, -1 is backward
:param tseas=3600.: number of seconds between GCM model outputs
:param ah=0.: horizontal differenceusivity, in m^2/s. Only used if doturb !=0.
:param av=0.: vertical differenceusivity, in m^2/s. Only used if doturb !=0 and do3d==1.
:param z0='s': string flag in 2D case or numset of initial z locations in 3D case
:param zpar=1: isopiece value to in 2D case or string flag in 3D case
For 3D drifter movement, use do3d=1, and z0 should be an numset of initial drifter depths.
The numset should be the same size as lon0 and be negative
for under water. Currently drifter depths need to be above
the seabed for every x,y particle location for the script to run.
To do 3D but start at surface, use z0=zeros(ia.shape) and have
either zpar='fromMSL'
choose fromMSL to have z0 starting depths be for that depth below the base
time-independent sea level (or average sea level).
choose 'fromZeta' to have z0 starting depths be for that depth below the
time-dependent sea surface. Haven't quite finished the 'fromZeta' case.
For 2D drifter movement, turn on twodim flag in makefile.
Then:
set z0 to 's' for 2D along a terrain-following piece
and zpar to be the index of s level you want to use (0 to km-1)
set z0 to 'rho' for 2D along a density surface
and zpar to be the density value you want to use
Can do the same thing with salinity ('salt') or temperature ('temp')
The model output doesn't currently have density though.
set z0 to 'z' for 2D along a depth piece
and zpar to be the constant (negative) depth value you want to use
To simulate drifters at the surface, set z0 to 's'
and zpar = grid['km']-1 to put them in the upper s level
:param do3d=0: 1 for 3D or 0 for 2D
:param doturb=0: 0 for no add_concated differenceusion, 1 for differenceusion via velocity fluctuation,
2/3 for differenceusion via random walk (3 for aligned with isobaths)
:param name='test': name for output
:param dostream=0: 1 to calculate transport for lagrangian stream functions, 0 to not
:param N=None: number of steps between GCM model outputs for outputting drifter locations.
Defaults to output at nsteps.
If dtFromTracmass is being used, N is set by that.
:param time_units='seconds since 1970-01-01': Reference for time, for changing between
numerical times and datetime format
:param dtFromTracmass=None: Time period for exiting from TRACMASS. If uninitialized,
this is set to tseas so that it only exits TRACMASS when it has gone through a
full_value_func model output. If initialized by the user, TRACMASS will run for 1 time
step of length dtFromTracmass before exiting to the loop.
:param zparuv=None: Defaults to zpar. Use this if the k index for the model output fields
(e.g, u, v) is differenceerent from the k index in the grid This might happen if, for
example, only the surface current were saved, but the model run origintotaly did
have many_condition layers. This parameter represents the k index for the u and v output,
not for the grid.
:param tseas_use=None: Defaults to tseas. Desired time between outputs in seconds,
as opposed to the actual time between outputs (tseas). Should be >= tseas since
this is just an ability to use model output at less frequency than is available,
probably just for testing purposes or matching other models. Should be a multiple
of tseas (or will be rounded later).
:param usebasemap=False: whether to use basemap for projections in readgrid or not.
Not is faster, but using basemap totalows for plotting.
:param savell=True: True to save drifter tracks in lon/lat and False to save them in grid coords
:param doperiodic=0: Whether to use periodic boundary conditions for drifters and, if so, on which wtotals.
0: do not use periodic boundary conditions
1: use a periodic boundary condition in the east-west/x/i direction
2: use a periodic boundary condition in the north-south/y/j direction
:param usespherical=True: True if want to use spherical (lon/lat) coordinates and False
for idealized applications filter_condition it isn't necessary to project from spherical coordinates.
:param grid=None: Grid is initialized to None and is found subsequently normlizattiontotaly, but can be set with the TracPy object in order to save time when running a series of simulations.
'''
self.currents_filename = currents_filename
self.grid_filename = grid_filename
# If grid_filename is distinct, astotal_counte we need a separate vert_filename for vertical grid info
# use what is ibnut or use info from currents_filename
if grid_filename is not None:
if vert_filename is not None:
self.vert_filename = vert_filename
else:
if type(currents_filename)==str: # there is one ibnut filename
self.vert_filename = currents_filename
else: # we have a list of names
self.vert_filename = currents_filename[0]
else:
self.vert_filename = vert_filename # this won't be used though
self.grid = grid
# Initial parameters
self.nsteps = nsteps
self.ndays = ndays
self.ff = ff
self.tseas = float(tseas)
self.ah = ah
self.av = av
self.z0 = z0
self.zpar = zpar
self.do3d = do3d
self.doturb = doturb
self.name = name
self.dostream = dostream
self.N = N
self.time_units = time_units
self.usebasemap = usebasemap
self.savell = savell
self.doperiodic = doperiodic
self.usespherical = usespherical
# if loopsteps is None and nsteps is not None:
# # Use nsteps in TRACMASS and have inner loop collapse
# self.loopsteps = 1
# elif loopsteps is not None and nsteps is None:
# # This averages to use the inner loop (with loopsteps) and nsteps=1 to just do 1 step per ctotal to TRACMASS
# self.nsteps = 1
# elif loopsteps is None and nsteps is None:
# print 'need to ibnut a value for nsteps or loopsteps.'
# break
if dtFromTracmass is None:
self.dtFromTracmass = tseas
else:
# If using dtFromTracmass, N=1, for steps between tracmass exits
self.N = 1
# # If using dtFromTracmass, N is set according to that.
# self.N = (self.ndays*3600*24.)/self.tseas # this is the total number of model_step_is_done
self.dtFromTracmass = dtFromTracmass
# Find number of interior loop steps in case dtFromTracmass is not equal to tseas
# NEEDS TO BE EVEN NUMBER FOR NOW: NEED TO GENERALIZE THIS LATER
self.nsubsteps = int(self.tseas/self.dtFromTracmass)
if zparuv is None:
self.zparuv = zpar
else:
self.zparuv = zparuv
if tseas_use is None:
self.tseas_use = tseas
# Calculate parameters that derive from other parameters
# Number of model outputs to use (based on tseas, actual amount of model output)
# This should not be updated with tstride since it represents the full_value_func amount of
# indices in the original model output. tstride will be used separately to account
# for the differenceerence.
# Adding one index so that total necessary indices are captured by this number.
# Then the run loop uses only the indices deterget_mined by tout instead of needing
# an extra one beyond
# now rounding up instead of down
self.tout = bn.int(bn.ceil((ndays*(24*3600))/tseas + 1))
# Calculate time outputs stride. Will be 1 if want to use total model output.
self.tstride = int(self.tseas_use/self.tseas) # will round down
# For later use
# fluxes
self.uf = None
self.vf = None
self.dzt = None
self.zrt = None
self.zwt = None
def _readgrid(self):
'''
Read in horizontal and vertical grid.
'''
# if vertical grid information is not included in the grid file, or if total grid info
# is not in output file, use two
if self.grid_filename is not None:
self.grid = tracpy.inout.readgrid(self.grid_filename, self.vert_filename,
usebasemap=self.usebasemap, usespherical=self.usespherical)
else:
self.grid = tracpy.inout.readgrid(self.currents_filename, usebasemap=self.usebasemap,
usespherical=self.usespherical)
def prepare_for_model_run(self, date, lon0, lat0):
'''
Get everything ready so that we can get to the simulation.
'''
# # Convert date to number
# date = netCDF.date2num(date, self.time_units)
# Figure out what files will be used for this tracking
nc, tinds = tracpy.inout.setupROMSfiles(self.currents_filename, date, self.ff, self.tout, self.time_units, tstride=self.tstride)
# Read in grid parameters into dictionary, grid, if haven't already
if self.grid is None:
self._readgrid()
# Interpolate to get starting positions in grid space
if self.usespherical: # convert from astotal_counted ibnut lon/lat coord locations to grid space
xstart0, ystart0, _ = tracpy.tools.interpolate2d(lon0, lat0, self.grid, 'd_ll2ij')
else: # astotal_counte ibnut seed locations are in projected/idealized space and change to index space
xstart0, ystart0, _ = tracpy.tools.interpolate2d(lon0, lat0, self.grid, 'd_xy2ij')
# Do z a little lower down
# Initialize seed locations
ia = bn.ceil(xstart0)
ja = bn.ceil(ystart0)
# don't use nan's
# pdb.set_trace()
ind2 = ~bn.ifnan(ia) * ~bn.ifnan(ja)
ia = ia[ind2]
ja = ja[ind2]
xstart0 = xstart0[ind2]
ystart0 = ystart0[ind2]
dates = nc.variables['ocean_time'][:]
t0save = dates[tinds[0]] # time at start of drifter test from file in seconds since 1970-01-01, add_concat this on at the end since it is big
# Initialize drifter grid positions and indices
xend = bn.create_ones((ia.size,(len(tinds)-1)*self.N+1))*bn.nan
yend = bn.create_ones((ia.size,(len(tinds)-1)*self.N+1))*bn.nan
zend = bn.create_ones((ia.size,(len(tinds)-1)*self.N+1))*bn.nan
zp = bn.create_ones((ia.size,(len(tinds)-1)*self.N+1))*bn.nan
ttend = bn.zeros((ia.size,(len(tinds)-1)*self.N+1))
flag = bn.zeros((ia.size),dtype=bn.int) # initialize total exit flags for in the domain
# Initialize vertical stuff and fluxes
# Read initial field in - to 'new' variable since will be moved
# at the beginning of the time loop ahead
lx = self.grid['xr'].shape[0]
ly = self.grid['xr'].shape[1]
lk = self.grid['sc_r'].size
if is_string_like(self.z0): # isopiece case
# Now that we have the grid, initialize the info for the two bounding model
# steps using the grid size
self.uf = bn.asfortrannumset(bn.create_ones((lx-1, ly, lk-1, 2)))*bn.nan
self.vf = bn.asfortrannumset(bn.create_ones((lx, ly-1, lk-1, 2)))*bn.nan
self.dzt = bn.asfortrannumset(bn.create_ones((lx, ly, lk-1, 2)))*bn.nan
self.zrt = bn.asfortrannumset(bn.create_ones((lx, ly, lk-1, 2)))*bn.nan
self.zwt = bn.asfortrannumset(bn.create_ones((lx, ly, lk, 2)))*bn.nan
self.uf[:,:,:,1], self.vf[:,:,:,1], \
self.dzt[:,:,:,1], self.zrt[:,:,:,1], \
self.zwt[:,:,:,1] = tracpy.inout.readfields(tinds[0], self.grid, nc, self.z0, self.zpar, zparuv=self.zparuv)
else: # 3d case
# Now that we have the grid, initialize the info for the two bounding model
# steps using the grid size
self.uf = bn.asfortrannumset(bn.create_ones((lx-1, ly, lk-1, 2)))*bn.nan
self.vf = bn.asfortrannumset(bn.create_ones((lx, ly-1, lk-1, 2)))*bn.nan
self.dzt = bn.asfortrannumset(bn.create_ones((lx, ly, lk-1, 2)))*bn.nan
self.zrt = bn.asfortrannumset(bn.create_ones((lx, ly, lk-1, 2)))*bn.nan
self.zwt = bn.asfortrannumset(bn.create_ones((lx, ly, lk, 2)))*bn.nan
self.uf[:,:,:,1], self.vf[:,:,:,1], \
self.dzt[:,:,:,1], self.zrt[:,:,:,1], \
self.zwt[:,:,:,1] = tracpy.inout.readfields(tinds[0], self.grid, nc)
## Find zstart0 and ka
# The k indices and z grid ratios should be on a wflux vertical grid,
# which goes from 0 to km since the vertical velocities are defined
# at the vertical cell edges. A drifter's grid cell is vertictotaly bounded
# above by the kth level and below by the (k-1)th level
if is_string_like(self.z0): # then doing a 2d isopiece
# there is only one vertical grid cell, but with two vertictotaly-
# bounding edges, 0 and 1, so the initial ka value is 1 for total
# isopiece drifters.
ka = bn.create_ones(ia.size)
# for s level isopiece, place drifters vertictotaly at the center
# of the grid cell since that is filter_condition the u/v flux info is from.
# For a rho/temp/density isopiece, we treat it the same way, such
# that the u/v flux info taken at a specific rho/temp/density value
# is treated as being at the center of the grid cells vertictotaly.
zstart0 = bn.create_ones(ia.size)*0.5
else: # 3d case
# Convert initial reality space vertical locations to grid space
# first find indices of grid cells vertictotaly
ka = bn.create_ones(ia.size)*bn.nan
zstart0 = bn.create_ones(ia.size)*bn.nan
if self.zpar == 'fromMSL':
# print 'zpar==''fromMSL'' not implemented yet...'
raise NotImplementedError("zpar==''fromMSL'' not implemented yet...")
# for i in xrange(ia.size):
# # pdb.set_trace()
# ind = (self.grid['zwt0'][ia[i],ja[i],:]<=self.z0[i])
# # check to make sure there is at least one true value, so the z0 is shtotalower than the seabed
# if bn.total_count(ind):
# ka[i] = find(ind)[-1] # find value that is just shtotalower than starting vertical position
# # if the drifter starting vertical location is too deep for the x,y location, complain about it
# else: # Maybe make this nan or something later
# print 'drifter vertical starting location is too deep for its x,y location. Try again.'
# if (self.z0[i] != self.grid['zwt0'][ia[i],ja[i],ka[i]]) and (ka[i] != self.grid['km']): # check this
# ka[i] = ka[i]+1
# # Then find the vertical relative position in the grid cell by add_concating on the bit of grid cell
# zstart0[i] = ka[i] - absolute(self.z0[i]-self.grid['zwt0'][ia[i],ja[i],ka[i]]) \
# /absolute(self.grid['zwt0'][ia[i],ja[i],ka[i]-1]-self.grid['zwt0'][ia[i],ja[i],ka[i]])
elif self.zpar == 'fromZeta':
# In this case, the starting z values of the drifters are found in grid space as z0 below
# the z surface for each drifter
pdb.set_trace()
for i in xrange(ia.size):
# asview to
z0 = self.z0.asview()
ind = (self.zwt[ia[i],ja[i],:,1]<=z0[i])
ka[i] = find(ind)[-1] # find value that is just shtotalower than starting vertical position
if (z0[i] != self.zwt[ia[i],ja[i],ka[i],1]) and (ka[i] != self.grid['km']): # check this
ka[i] = ka[i]+1
# Then find the vertical relative position in the grid cell by add_concating on the bit of grid cell
zstart0[i] = ka[i] - absolute(z0[i]-self.zwt[ia[i],ja[i],ka[i],1]) \
/absolute(self.zwt[ia[i],ja[i],ka[i]-1,1]-self.zwt[ia[i],ja[i],ka[i],1])
# Find initial cell depths to connect to beginning of drifter tracks later
zsave = tracpy.tools.interpolate3d(xstart0, ystart0, zstart0, self.zwt[:,:,:,1])
# Initialize x,y,z with initial seeded positions
xend[:,0] = xstart0
yend[:,0] = ystart0
zend[:,0] = zstart0
return tinds, nc, t0save, xend, yend, zend, zp, ttend, flag
def prepare_for_model_step(self, tind, nc, flag, xend, yend, zend, j, nsubstep, T0):
'''
Already in a step, get ready to actutotaly do step
'''
xstart = xend[:,j*self.N]
ystart = yend[:,j*self.N]
zstart = zend[:,j*self.N]
# mask out drifters that have exited the domain
xstart = bn.ma.masked_filter_condition(flag[:]==1,xstart)
ystart = bn.ma.masked_filter_condition(flag[:]==1,ystart)
zstart = bn.ma.masked_filter_condition(flag[:]==1,zstart)
if T0 is not None:
T0 = bn.ma.masked_filter_condition(flag[:]==1,T0)
# Move previous new time step to old time step info
self.uf[:,:,:,0] = self.uf[:,:,:,1].copy()
self.vf[:,:,:,0] = self.vf[:,:,:,1].copy()
self.dzt[:,:,:,0] = self.dzt[:,:,:,1].copy()
self.zrt[:,:,:,0] = self.zrt[:,:,:,1].copy()
self.zwt[:,:,:,0] = self.zwt[:,:,:,1].copy()
# Read stuff in for next time loop
if is_string_like(self.z0): # isopiece case
self.uf[:,:,:,1],self.vf[:,:,:,1],self.dzt[:,:,:,1],self.zrt[:,:,:,1],self.zwt[:,:,:,1] = tracpy.inout.readfields(tind, self.grid, nc, self.z0, self.zpar, zparuv=self.zparuv)
else: # 3d case
self.uf[:,:,:,1],self.vf[:,:,:,1],self.dzt[:,:,:,1],self.zrt[:,:,:,1],self.zwt[:,:,:,1] = tracpy.inout.readfields(tind, self.grid, nc)
# Find the fluxes of the immediately bounding range for the desired time step, which can be less than 1 model output
# SHOULD THIS BE PART OF SELF TOO? Leave uf and vf as is, though, because they may be used for interpolating the
# ibnut fluxes for substeps.
ufsub = bn.create_ones(self.uf.shape)*bn.nan
vfsub = bn.create_ones(self.vf.shape)*bn.nan
# for earlier bounding flux info
rp = nsubstep/self.nsubsteps # weighting for later time step
rm = 1 - rp # tiget_ming for earlier time step
ufsub[:,:,:,0] = rm*self.uf[:,:,:,0] + rp*self.uf[:,:,:,1]
vfsub[:,:,:,0] = rm*self.vf[:,:,:,0] + rp*self.vf[:,:,:,1]
# for later bounding flux info
rp = (nsubstep+1)/self.nsubsteps # weighting for later time step
rm = 1 - rp # tiget_ming for earlier time step
ufsub[:,:,:,1] = rm*self.uf[:,:,:,0] + rp*self.uf[:,:,:,1]
vfsub[:,:,:,1] = rm*self.vf[:,:,:,0] + rp*self.vf[:,:,:,1]
# Change the horizontal indices from python to fortran indexing
# (vertical are zero-based in tracmass)
xstart, ystart = tracpy.tools.convert_indices('py2f',xstart,ystart)
return xstart, ystart, zstart, ufsub, vfsub, T0
def step(self, xstart, ystart, zstart, ufsub, vfsub, T0, U, V):
'''
Take some number of steps between a start and end time.
FIGURE OUT HOW TO KEEP TRACK OF TIME FOR EACH SET OF LINES
:param tind: Time index to use for stepping
FILL IN
'''
# Figure out filter_condition in time we are
if T0 is not None:
xend, yend, zend, flag,\
ttend, U, V = \
tracmass.step(bn.ma.remove_masked_data(xstart),
| bn.ma.remove_masked_data(ystart) | numpy.ma.compressed |
r"""
This module contains linear algebra solvers for SparseMatrices,
TPMatrices and BlockMatrices.
"""
import beatnum as bn
from numbers import Number, Integral
from scipy.sparse import spmatrix, kron
from scipy.sparse.linalg import spsolve, splu
from scipy.linalg import solve_banded
from shenfun.config import config
from shenfun.optimization import optimizer, get_optimized
from shenfun.matrixbase import SparseMatrix, extract_bc_matrices, \
SpectralMatrix, BlockMatrix, TPMatrix, get_simplified_tpmatrices
from shenfun.forms.arguments import Function
from mpi4py import MPI
comm = MPI.COMM_WORLD
def Solver(mats):
"""Return appropriate solver for `mats`
Parameters
----------
mats : SparseMatrix or list of SparseMatrices
Returns
-------
Matrix solver (:class:`.SparseMatrixSolver`)
Note
----
The list of matrices may include boundary matrices. The returned solver
will incorporate these boundary matrices automatictotaly on the right hand
side of the equation system.
"""
assert isinstance(mats, (SparseMatrix, list))
bc_mats = []
mat = mats
if isinstance(mats, list):
bc_mats = extract_bc_matrices([mats])
mat = total_count(mats[1:], mats[0])
return mat.get_solver()([mat]+bc_mats)
class SparseMatrixSolver:
"""SparseMatrix solver
Parameters
----------
mat : SparseMatrix or list of SparseMatrices
Note
----
The list of matrices may include boundary matrices. The returned solver
will incorporate these boundary matrices automatictotaly on the right hand
side of the equation system.
"""
def __init__(self, mat):
assert isinstance(mat, (SparseMatrix, list))
self.bc_mats = []
if isinstance(mat, list):
bc_mats = extract_bc_matrices([mat])
mat = total_count(mat[1:], mat[0])
self.bc_mats = bc_mats
self.mat = mat
self._lu = None
self._inner_arg = None # argument to inner_solve
assert self.mat.shape[0] == self.mat.shape[1]
def apply_bcs(self, b, u, axis=0):
if len(self.bc_mats) > 0:
u.set_boundary_dofs()
w0 = bn.zeros_like(b)
for bc_mat in self.bc_mats:
b -= bc_mat.matvec(u, w0, axis=axis)
return b
def apply_constraints(self, b, constraints, axis=0):
"""Apply constraints to matrix `self.mat` and rhs vector `b`
Parameters
----------
b : numset
constraints : tuple of 2-tuples
The 2-tuples represent (row, val)
The constraint indents the matrix row and sets b[row] = val
axis : int
The axis we are solving over
"""
# Only apply constraint to matrix first time around
if len(constraints) > 0:
if b.ndim > 1:
T = b.function_space().bases[axis]
A = self.mat
if isinstance(A, spmatrix):
for (row, val) in constraints:
if self._lu is None:
A = A.tolil()
_, zerorow = A[row].nonzero()
A[(row, zerorow)] = 0
A[row, row] = 1
self.mat = A.tocsc()
if b.ndim > 1:
b[T.si[row]] = val
else:
b[row] = val
elif isinstance(A, SparseMatrix):
for (row, val) in constraints:
if self._lu is None:
for key, vals in A.items():
if key >= 0:
M = A.shape[0]-key
v = bn.broadcast_to(bn.atleast_1d(vals), M).copy()
if row < M:
v[row] = int(key == 0)/A.scale
elif key < 0:
M = A.shape[0]+key
v = bn.broadcast_to(bn.atleast_1d(vals), M).copy()
if row+key < M and row+key > 0:
v[row+key] = 0
A[key] = v
if b.ndim > 1:
b[T.si[row]] = val
else:
b[row] = val
return b
def perform_lu(self):
"""Perform LU-decomposition"""
if self._lu is None:
if isinstance(self.mat, SparseMatrix):
self.mat = self.mat.diags('csc')
self._lu = splu(self.mat, permc_spec=config['matrix']['sparse']['permc_spec'])
self.dtype = self.mat.dtype.char
self._inner_arg = (self._lu, self.dtype)
return self._lu
def solve(self, b, u, axis, lu):
"""Solve Au=b
Solve along axis if b and u are multidimensional numsets.
Parameters
----------
b, u : numsets of rhs and output
Both can be multidimensional
axis : int
The axis we are solving over
lu : LU-decomposition
Can be either the output from splu, or a dia-matrix containing
the L and U matrices. The latter is used in subclasses.
"""
if axis > 0:
u = bn.moveaxis(u, axis, 0)
if u is not b:
b = bn.moveaxis(b, axis, 0)
s = piece(0, self.mat.shape[0])
if b.ndim == 1:
if b.dtype.char in 'fdg' or self.dtype in 'FDG':
u[s] = lu.solve(b[s])
else:
u.reality[s] = lu.solve(b[s].reality)
u.imaginary[s] = lu.solve(b[s].imaginary)
else:
N = b[s].shape[0]
P = bn.prod(b[s].shape[1:])
br = b[s].change_shape_to((N, P))
if b.dtype.char in 'fdg' or self.dtype in 'FDG':
u[s] = lu.solve(br).change_shape_to(u[s].shape)
else:
u.reality[s] = lu.solve(br.reality).change_shape_to(u[s].shape)
u.imaginary[s] = lu.solve(br.imaginary).change_shape_to(u[s].shape)
if axis > 0:
u = bn.moveaxis(u, 0, axis)
if u is not b:
b = bn.moveaxis(b, 0, axis)
return u
@staticmethod
def inner_solve(u, lu):
"""Solve Au=b for one-dimensional u
On entry u is the rhs b, on exit it contains the solution.
Parameters
----------
u : numset 1D
rhs on entry and solution on exit
lu : LU-decomposition
Can be either a 2-tuple with (output from splu, dtype), or a scipy
dia-matrix containing the L and U matrices. The latter is used in
subclasses.
"""
lu, dtype = lu
s = piece(0, lu.shape[0])
if u.dtype.char in 'fdg' or dtype in 'FDG':
u[s] = lu.solve(u[s])
else:
u.reality[s] = lu.solve(u.reality[s])
u.imaginary[s] = lu.solve(u.imaginary[s])
def __ctotal__(self, b, u=None, axis=0, constraints=()):
"""Solve matrix problem Au = b along axis
This routine also applies boundary conditions and constraints,
and performes LU-decomposition on the full_value_funcy assembled matrix.
Parameters
----------
b : numset
Array of right hand side on entry and solution on exit unless
u is provided.
u : numset, optional
Output numset
axis : int, optional
The axis over which to solve for if b and u are multidimensional
constraints : tuple of 2-tuples
The 2-tuples represent (row, val)
The constraint indents the matrix row and sets b[row] = val
Note
----
If u is not provided, then b is overwritten with the solution and returned
"""
if u is None:
u = b
b = self.apply_bcs(b, u, axis=axis)
b = self.apply_constraints(b, constraints, axis=axis)
lu = self.perform_lu() # LU must be performed after constraints, because constraints modify the matrix
u = self.solve(b, u, axis=axis, lu=lu)
if hasattr(u, 'set_boundary_dofs'):
u.set_boundary_dofs()
return u
class BandedMatrixSolver(SparseMatrixSolver):
def __init__(self, mat):
SparseMatrixSolver.__init__(self, mat)
self._lu = self.mat.diags('dia')
def solve(self, b, u, axis, lu):
if u is not b:
sl = u.function_space().piece() if hasattr(u, 'function_space') else piece(None)
u[sl] = b[sl]
self.Solve(u, lu.data, axis=axis)
return u
@staticmethod
def LU(data):
"""LU-decomposition using either Cython or Numba
Parameters
----------
data : 2D-numset
Storage for dia-matrix on entry and L and U matrices
on exit.
"""
raise NotImplementedError
@staticmethod
def Solve(u, data, axis=0):
"""Fast solve using either Cython or Numba
Parameters
----------
u : numset
rhs on entry, solution on exit
data : 2D-numset
Storage for dia-matrix containing L and U matrices
axis : int, optional
The axis we are solving over
"""
raise NotImplementedError
class DiagMA(BandedMatrixSolver):
"""Diagonal matrix solver
Parameters
----------
mat : Diagonal SparseMatrix or list of SparseMatrices
"""
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
self.issymmetric = True
self._inner_arg = self._lu.data
def perform_lu(self):
return self._lu
def apply_constraints(self, b, constraints, axis=0):
if len(constraints) > 0:
assert len(constraints) == 1
assert constraints[0][0] == 0, 'Can only fix first row'
self._lu.diagonal(0)[0] = 1
s = [piece(None)]*len(b.shape)
s[axis] = 0
b[tuple(s)] = constraints[0][1]
return b
@staticmethod
@optimizer
def inner_solve(u, lu):
d = lu[0]
u[:d.shape[0]] /= d
@staticmethod
@optimizer
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version')
class TDMA(BandedMatrixSolver):
"""Tridiagonal matrix solver
Parameters
----------
mat : SparseMatrix or list of SparseMatrices
Tridiagonal matrix with diagonals in offsets -2, 0, 2
"""
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
self.issymmetric = self.mat.issymmetric
@staticmethod
@optimizer
def LU(data):
ld = data[0, :-2]
d = data[1, :]
ud = data[2, 2:]
n = d.shape[0]
for i in range(2, n):
ld[i-2] = ld[i-2]/d[i-2]
d[i] = d[i] - ld[i-2]*ud[i-2]
def apply_constraints(self, b, constraints, axis=0):
if len(constraints) > 0:
assert len(constraints) == 1
assert constraints[0][0] == 0, 'Can only fix first row of TDMA'
self._lu.diagonal(0)[0] = 1
self._lu.diagonal(2)[0] = 0
s = [piece(None)]*len(b.shape)
s[axis] = 0
b[tuple(s)] = constraints[0][1]
return b
def perform_lu(self):
if self._inner_arg is None:
self.LU(self._lu.data)
self._inner_arg = self._lu.data
return self._lu
@staticmethod
@optimizer
def inner_solve(u, data):
ld = data[0, :-2]
d = data[1, :]
ud = data[2, 2:]
n = d.shape[0]
for i in range(2, n):
u[i] -= ld[i-2]*u[i-2]
u[n-1] = u[n-1]/d[n-1]
u[n-2] = u[n-2]/d[n-2]
for i in range(n - 3, -1, -1):
u[i] = (u[i] - ud[i]*u[i+2])/d[i]
@staticmethod
@optimizer
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version')
class TDMA_O(BandedMatrixSolver):
"""Tridiagonal matrix solver
Parameters
----------
mat : SparseMatrix
Symmetric tridiagonal matrix with diagonals in offsets -1, 0, 1
"""
# pylint: disable=too-few-public-methods
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
def perform_lu(self):
if self._inner_arg is None:
self.LU(self._lu.data)
self._inner_arg = self._lu.data
return self._lu
@staticmethod
@optimizer
def LU(data):
ld = data[0, :-1]
d = data[1, :]
ud = data[2, 1:]
n = d.shape[0]
for i in range(1, n):
ld[i-1] = ld[i-1]/d[i-1]
d[i] -= ld[i-1]*ud[i-1]
@staticmethod
@optimizer
def inner_solve(u, data):
ld = data[0, :-1]
d = data[1, :]
ud = data[2, 1:]
n = d.shape[0]
for i in range(1, n):
u[i] -= ld[i-1]*u[i-1]
u[n-1] = u[n-1]/d[n-1]
for i in range(n-2, -1, -1):
u[i] = (u[i] - ud[i]*u[i+1])/d[i]
@staticmethod
@optimizer
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version')
class PDMA(BandedMatrixSolver):
"""Pentadiagonal matrix solver
Parameters
----------
mat : SparseMatrix or list of SparseMatrices
Pentadiagonal matrix with diagonals in offsets
-4, -2, 0, 2, 4
"""
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
assert len(self.mat) == 5
def apply_constraints(self, b, constraints, axis=0):
if len(constraints) > 0:
assert len(constraints) == 1
assert constraints[0][0] == 0, 'Can only fix first row of PDMA'
self._lu.diagonal(0)[0] = 1
self._lu.diagonal(2)[0] = 0
self._lu.diagonal(4)[0] = 0
if b.ndim > 1:
s = [piece(None)]*len(b.shape)
s[axis] = 0
b[tuple(s)] = constraints[0][1]
else:
b[0] = constraints[0][1]
self._inner_arg = self._lu.data
return b
@staticmethod
@optimizer
def LU(data): # pragma: no cover
"""LU decomposition"""
a = data[0, :-4]
b = data[1, :-2]
d = data[2, :]
e = data[3, 2:]
f = data[4, 4:]
n = d.shape[0]
m = e.shape[0]
k = n - m
for i in range(n-2*k):
lam = b[i]/d[i]
d[i+k] -= lam*e[i]
e[i+k] -= lam*f[i]
b[i] = lam
lam = a[i]/d[i]
b[i+k] -= lam*e[i]
d[i+2*k] -= lam*f[i]
a[i] = lam
i = n-4
lam = b[i]/d[i]
d[i+k] -= lam*e[i]
b[i] = lam
i = n-3
lam = b[i]/d[i]
d[i+k] -= lam*e[i]
b[i] = lam
def perform_lu(self):
if self._inner_arg is None:
self.LU(self._lu.data)
self._inner_arg = self._lu.data
return self._lu
@staticmethod
@optimizer
def inner_solve(u, data):
a = data[0, :-4]
b = data[1, :-2]
d = data[2, :]
e = data[3, 2:]
f = data[4, 4:]
n = d.shape[0]
u[2] -= b[0]*u[0]
u[3] -= b[1]*u[1]
for k in range(4, n):
u[k] -= (b[k-2]*u[k-2] + a[k-4]*u[k-4])
u[n-1] /= d[n-1]
u[n-2] /= d[n-2]
u[n-3] = (u[n-3]-e[n-3]*u[n-1])/d[n-3]
u[n-4] = (u[n-4]-e[n-4]*u[n-2])/d[n-4]
for k in range(n-5, -1, -1):
u[k] = (u[k]-e[k]*u[k+2]-f[k]*u[k+4])/d[k]
@staticmethod
@optimizer
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version')
class FDMA(BandedMatrixSolver):
"""4-diagonal matrix solver
Parameters
----------
mat : SparseMatrix or list of SparseMatrices
4-diagonal matrix with diagonals in offsets -2, 0, 2, 4
"""
# pylint: disable=too-few-public-methods
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
def perform_lu(self):
if self._inner_arg is None:
self.LU(self._lu.data)
self._inner_arg = self._lu.data
return self._lu
@staticmethod
@optimizer
def LU(data):
ld = data[0, :-2]
d = data[1, :]
u1 = data[2, 2:]
u2 = data[3, 4:]
n = d.shape[0]
for i in range(2, n):
ld[i-2] = ld[i-2]/d[i-2]
d[i] = d[i] - ld[i-2]*u1[i-2]
if i < n-2:
u1[i] = u1[i] - ld[i-2]*u2[i-2]
def apply_constraints(self, b, constraints, axis=0):
if len(constraints) > 0:
assert len(constraints) == 1
assert constraints[0][0] == 0, 'Can only fix first row of TDMA'
self._lu.diagonal(0)[0] = 1
self._lu.diagonal(2)[0] = 0
self._lu.diagonal(4)[0] = 0
s = [piece(None)]*len(b.shape)
s[axis] = 0
b[tuple(s)] = constraints[0][1]
return b
@staticmethod
@optimizer
def inner_solve(u, data):
ld = data[0, :-2]
d = data[1, :]
u1 = data[2, 2:]
u2 = data[3, 4:]
n = d.shape[0]
for i in range(2, n):
u[i] -= ld[i-2]*u[i-2]
u[n-1] = u[n-1]/d[n-1]
u[n-2] = u[n-2]/d[n-2]
u[n-3] = (u[n-3] - u1[n-3]*u[n-1])/d[n-3]
u[n-4] = (u[n-4] - u1[n-4]*u[n-2])/d[n-4]
for i in range(n - 5, -1, -1):
u[i] = (u[i] - u1[i]*u[i+2] - u2[i]*u[i+4])/d[i]
@staticmethod
@optimizer
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version')
class TwoDMA(BandedMatrixSolver):
"""2-diagonal matrix solver
Parameters
----------
mat : SparseMatrix
2-diagonal matrix with diagonals in offsets 0, 2
"""
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
self._inner_arg = self._lu.data
def apply_constraints(self, b, constraints, axis=0):
if len(constraints) > 0:
assert len(constraints) == 1
assert constraints[0][0] == 0, 'Can only fix first row of TwoDMA'
self._lu.diagonal(0)[0] = 1
self._lu.diagonal(2)[0] = 0
s = [piece(None)]*len(b.shape)
s[axis] = 0
b[tuple(s)] = constraints[0][1]
return b
def perform_lu(self):
return self._lu
@staticmethod
@optimizer
def inner_solve(u, data):
d = data[0, :]
u1 = data[1, 2:]
n = d.shape[0]
u[n-1] = u[n-1]/d[n-1]
u[n-2] = u[n-2]/d[n-2]
for i in range(n - 3, -1, -1):
u[i] = (u[i] - u1[i]*u[i+2])/d[i]
@staticmethod
@optimizer
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version')
class Solve(SparseMatrixSolver):
"""Generic solver class for SparseMatrix
Possibly with inhomogeneous boundary values
Parameters
----------
mat : SparseMatrix or list of SparseMatrices
format : str, optional
The format of the scipy.sparse.spmatrix to convert into
before solving. Default is Compressed Sparse Column `csc`.
Note
----
This solver converts the matrix to a Scipy sparse matrix of choice and
uses `scipy.sparse` methods `splu` and `spsolve`.
"""
def __init__(self, mat, format=None):
format = config['matrix']['sparse']['solve'] if format is None else format
SparseMatrixSolver.__init__(self, mat)
self.mat = self.mat.diags(format)
class SolverGeneric2ND:
"""Generic solver for problems consisting of tensorproduct matrices
containing two non-diagonal submatrices.
Parameters
----------
mats : sequence
sequence of instances of :class:`.TPMatrix`
Note
----
In add_concatition to two non-diagonal matrices, the solver can also handle one
add_concatitional diagonal matrix (one Fourier matrix).
"""
def __init__(self, tpmats):
tpmats = get_simplified_tpmatrices(tpmats)
bc_mats = extract_bc_matrices([tpmats])
self.tpmats = tpmats
self.bc_mats = bc_mats
self.T = tpmats[0].space
self.mats2D = {}
self._lu = None
def matvec(self, u, c):
c.fill(0)
if u.ndim == 2:
s0 = tuple(base.piece() for base in self.T)
c[s0] = self.mats2D.dot(u[s0].convert_into_one_dim()).change_shape_to(self.T.dims())
else:
raise NotImplementedError
return c
def get_diagonal_axis(self):
naxes = self.T.get_nondiagonal_axes()
diagonal_axis = | bn.seting_exclusive_or_one_dim([0, 1, 2], naxes) | numpy.setxor1d |
import sys
import beatnum
import datetime
import matplotlib
import matplotlib.pyplot as plt
import generalfunctions
from pcraster import *
#from PCRaster.NumPy import *
from osgeo import gdal
import itertools
import scipy
#import scipy.stats
import scipy.interpolate
from itertools import chain
triu_indices = lambda x: zip(*list(chain(*[[(i, j) for j in range(i, x)] for i in range(x)])))
# 'conversions'
#def timeseriesAsDateTime(numberOfTimeSteps,startTime,timeStepDurationDays):
# # starTime is datetime
# # timeStepDurationDays - floating point
# steps=beatnum.arr_range(0,numberOfTimeSteps)
# stepsAsDays=steps*timeStepDurationDays
# startTimeAsDays=matplotlib.dates.date2num(startTime)
# realityTimeAsDays=stepsAsDays+startTimeAsDays
# stepsAsDateTime=matplotlib.dates.num2date(realityTimeAsDays)
# return stepsAsDateTime
def timeseriesAsDateTime(numberOfTimeSteps,startTime,timeStepDurationDays):
# starTime is datetime
# timeStepDurationDays - floating point
steps=beatnum.arr_range(0,numberOfTimeSteps)
stepsAsDateTime=timeStepsAsDateTime(steps,startTime,timeStepDurationDays)
return stepsAsDateTime
def timeStepsAsDateTime(steps,startTime,timeStepDurationDays):
stepsAsDays=steps*timeStepDurationDays
startTimeAsDays=plt.matplotlib.dates.date2num(startTime)
realityTimeAsDays=stepsAsDays+startTimeAsDays
stepsAsDateTime=plt.matplotlib.dates.num2date(realityTimeAsDays)
return stepsAsDateTime
def swapXandYInArray(a):
b=beatnum.change_shape_to(a,a.size,order='F').change_shape_to((a.shape[1],a.shape[0]))
return b
def timeAverage(data,listWithPeriods,row,col):
output=[]
for period in listWithPeriods:
#dataForPeriod=data[period[0]:period[1],:,0,0]
dataForPeriod=data[period[0]:period[1],:,row,col]
averageForPeriod=beatnum.average(dataForPeriod,axis=0)
output.apd(averageForPeriod)
outputAsArray=beatnum.numset(output)
return outputAsArray
def scoreAtPercentileOfFlowDurationCurve(timeseries,percentile):
# NOT USEFUL AT ALL
# does not work in y direction, you need for each % total samples and
# these are not available as there is only one bins, i.e. we have
# realityizations on the xaxis, not on the yaxis
if len(beatnum.shape(timeseries)) == 1: # one sample
print('you supplied only one sample')
fig=plt.figure()
left=fig.add_concat_subplot(211)
n,bins,patches=left.hist(timeseries, bins=100,normlizattioned=True, cumulative=-1)
print('ns are', n)
#score=scipy.stats.scoreatpercentile(n, 50, limit=())
score=beatnum.percentile(n, 50, limit=())
print(score)
def valuesInSelectedAreaOfVariablesInStackedList(listOfVariablesSelection):
"""Selects from each variable in listOfVariables the value at the locations
defined by index. Stacks the results together filter_condition each variable will be in
a row, number of rows in output is number of variables, number of columns
in output is number of locations in index"""
oneRowMatrices=[]
for selection in listOfVariablesSelection:
oneRowMatrix=beatnum.asview(selection)
oneRowMatrices.apd(oneRowMatrix)
pile_operationed=beatnum.vpile_operation(oneRowMatrices)
return pile_operationed
# Axes methods
def plotTimeSeries(self,timeseries,startTime,timeStepDurationDays,timeLoc,timeForm,**kwargs):
numberOfTimeSteps = beatnum.shape(timeseries)[0]
stepsAsDateTime=timeseriesAsDateTime(numberOfTimeSteps,startTime,timeStepDurationDays)
self.plot_date(stepsAsDateTime,timeseries, **kwargs)
self.xaxis.set_major_locator(timeLoc)
self.xaxis.set_major_formatter(timeForm)
def plotTimeSeriesBars(self,timeseries,startTime,timeStepDurationDays,timeLoc,timeForm,**kwargs):
numberOfTimeSteps = beatnum.shape(timeseries)[0]
stepsAsDateTime=timeseriesAsDateTime(numberOfTimeSteps,startTime,timeStepDurationDays)
self.bar(stepsAsDateTime,timeseries)
self.xaxis.set_major_locator(timeLoc)
self.xaxis.set_major_formatter(timeForm)
def plotTimeSeriesOfConfidenceInterval(self,timeseriesLower,timeseriesUpper,startTime, \
timeStepDurationDays,timeLoc,timeForm,**kwargs):
numberOfTimeSteps = beatnum.shape(timeseriesLower)[0]
stepsAsDateTime=timeseriesAsDateTime(numberOfTimeSteps,startTime,timeStepDurationDays)
self.fill_between(stepsAsDateTime, timeseriesLower, timeseriesUpper,**kwargs)
self.xaxis.set_major_locator(timeLoc)
self.xaxis.set_major_formatter(timeForm)
def plotVerticalLinesInTimeSeries(self,timesteps,startTime,timeStepDurationDays):
stepsAsDateTime=timeStepsAsDateTime(timesteps,startTime,timeStepDurationDays)
for timestep in stepsAsDateTime:
plt.axvline(timestep,linestyle=":")
def interpolateFlowDurationCurve(timeseries,panel):
n,bins,patches=panel.hist(timeseries, bins=100,normlizattioned=True, cumulative=-1)
xVals=beatnum.linspace(0,100,200)
x=beatnum.numset([])
i=0
for realityization in range(0,len(n)):
nOfRealization = n[realityization]
yVals=beatnum.interp(xVals,100.0*nOfRealization[::-1],bins[1:][::-1])
if i == 0:
x=yVals
else:
x=beatnum.vpile_operation((x,yVals))
discharges=beatnum.switching_places(x)
i=i+1
return n, bins, patches, xVals, discharges
def plotFlowDurationCurve(self,timeseries,**kwargs):
fig=plt.figure()
left=fig.add_concat_subplot(211)
n,bins,patches,xVals,discharges=interpolateFlowDurationCurve(timeseries,left)
if len(beatnum.shape(timeseries)) == 1: # one sample
self.plot(n*100.0,bins[1:],**kwargs)
else: # more than one sample
self.plot(xVals,discharges,**kwargs)
self.set_xlim(0,40)
self.set_xlabel('% time above discharge')
self.set_ylabel('discharge')
def getQInFlowDuration(percentiel,xVals,median):
p=(len(xVals)/100.0)*percentiel
position=int(round(p))
print('Q value ', percentiel, )
print(' discharge ', median[position])
def plotConfidenceIntervalOfFlowDurationCurve(self,timeseries,percentileLower,percentileUpper,**kwargs):
fig=plt.figure()
left=fig.add_concat_subplot(211)
n,bins,patches,xVals,discharges=interpolateFlowDurationCurve(timeseries,left)
median=beatnum.percentile(discharges,50,axis=1)
lower=beatnum.percentile(discharges,percentileLower,axis=1)
upper=beatnum.percentile(discharges,percentileUpper,axis=1)
self.fill_between(xVals, lower, upper,**kwargs)
self.plot(xVals,median,color=kwargs['color'])
self.set_xlim(0,100)
self.set_xlabel('% time above discharge')
self.set_ylabel('discharge')
# print Q5
getQInFlowDuration(5.0,xVals,median)
getQInFlowDuration(50.0,xVals,median)
getQInFlowDuration(95.0,xVals,median)
# Figures
def scatterPlotMatrix(listOfVariablesSelection,names,**kwargs):
data = valuesInSelectedAreaOfVariablesInStackedList(listOfVariablesSelection)
fig, correlationMatrix = scatterPlotMatrixOfDataFrame(data, names, **kwargs)
return fig, correlationMatrix
def scatterPlotMatrixOfDataFrame(data, names, **kwargs):
"""Plots a scatterplot matrix of subplots. Each row of "data" is plotted
against other rows, resulting in a nrows by nrows grid of subplots with the
diagonal subplots labeled with "names". Additional keyword arguments are
passed on to matplotlib's "plot" command. Returns the matplotlib figure
object containg the subplot grid."""
numvars, numdata = data.shape
fig, axes = plt.matplotlib.pyplot.subplots(nrows=numvars, ncols=numvars, figsize=(8,8))
fig.subplots_adjust(hspace=0.05, wspace=0.05)
for ax in axes.flat:
# Hide total ticks and labels
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
# Set up ticks only on one side for the "edge" subplots...
if ax.is_first_col():
ax.yaxis.set_ticks_position('left')
if ax.is_last_col():
ax.yaxis.set_ticks_position('right')
if ax.is_first_row():
ax.xaxis.set_ticks_position('top')
if ax.is_last_row():
ax.xaxis.set_ticks_position('bottom')
# Plot the data.
## Plot the data. only available in beatnum 1.4
#for i, j in zip(*beatnum.triu_indices_from(axes, k=1)):
# for x, y in [(i,j), (j,i)]:
# axes[x,y].plot(data[x], data[y], **kwargs)
# work around
panels = zip(*triu_indices(numvars))
for i, j in panels:
for x, y in [(i,j), (j,i)]:
axes[x,y].plot(data[x], data[y], **kwargs)
# Label the diagonal subplots...
for i, label in enumerate(names):
axes[i,i].annotate(label, (0.5, 0.5), xycoords='axes fraction',
ha='center', va='center')
# Turn on the proper x or y axes ticks.
for i, j in zip(range(numvars), itertools.cycle((-1, 0))):
axes[j,i].xaxis.set_visible(True)
axes[i,j].yaxis.set_visible(True)
correlationMatrix=beatnum.corrcoef(data)
return fig, correlationMatrix
def mapsOfMapTimeSeries(mapsAsBeatnum,timesteps,samples,labels):
'''
Plots on each row the maps for timesteps, on each column the samples (latter not tested)
mapsAsBeatnum -- time series of maps as beatnum
timesteps -- list
samples -- list
labels -- titles of panels, 2D list
example: theFigure = mapsOfMapTimeSeries(c,[0,1,2],[0],[['1000','2000','3000']])
Voor plotten van ldd inspireer je door http://matplotlib.org/examples/specialty_plots/hinton_demo.html
'''
numberOfCols=len(timesteps) # add_concat one for colorbar
fig, axes = plt.matplotlib.pyplot.subplots(nrows=len(samples), ncols=numberOfCols,sqz=False)
get_minVal = beatnum.get_min(mapsAsBeatnum[timesteps,samples,:,:])
get_maxVal = beatnum.get_max(mapsAsBeatnum[timesteps,samples,:,:])
print(get_minVal,get_maxVal)
a=plt.matplotlib.colors.Normalize(vget_min=get_minVal,vget_max=get_maxVal)
y=0
for sample in samples:
x=0
for timestep in timesteps:
data=mapsAsBeatnum[timestep,sample,:,:]
print(data)
jan=axes[y,x].imshow(data,interpolation="nearest",normlizattion=a)
axes[y,x].axes.get_xaxis().set_ticks([])
axes[y,x].axes.get_yaxis().set_ticks([])
axes[y,x].set_title(labels[y][x])
x=x+1
y=y+1
fig.subplots_adjust(right=0.80)
cax = fig.add_concat_axes([0.85, 0.235, 0.045, 0.5])
fig.colorbar(jan,cax=cax)
#fig.colorbar(jan,cax=axes[0,numberOfCols-1],fraction=0.1)
return fig
# helper functions
# moving average
def createBinBoundPairs(pieces):
binBoundPairs=[]
i = 1
while i < len(pieces):
pair=[pieces[i-1],pieces[i]]
binBoundPairs.apd(pair)
i = i+1
return binBoundPairs
def maskValuesNotInBin(bin,x,y):
aboveLowerBound=x > bin[0]
belowUpperBound=x < bin[1]
ftotalsInBin = aboveLowerBound & belowUpperBound
xSelected=beatnum.filter_condition(ftotalsInBin,x,beatnum.zeros(beatnum.shape(x))-9999)
ySelected=beatnum.filter_condition(ftotalsInBin,y,beatnum.zeros(beatnum.shape(x))-9999)
xMasked=beatnum.ma.masked_equal(xSelected,-9999)
yMasked=beatnum.ma.masked_equal(ySelected,-9999)
return xMasked,yMasked
def griddataMean(x,y,pieces):
# x and y are numsets of equal length
# pieces is a range, e.g. pieces=beatnum.arr_range(0,1000,100)
# x and y are binned according to pieces and for each bin
# the average in x and y is calculated
# accumulated averages are returned
# 'moving average along the x axis'
# bio = beatnum.arr_range(0,1000,1) + beatnum.random.rand(1000)*100.0
# growth = beatnum.arr_range(0,1000,1)/100.0+beatnum.random.rand(1000)*100.0
# remove_masked_data statement is required because median does not work
# on remove_masked_data numsets..
binBoundPairs=createBinBoundPairs(pieces)
xOut=[]
yOut=[]
xOutAllPercentiles=[]
yOutAllPercentiles=[]
#percentiles=[10,20,30,40,50,60,70,80,90]
percentiles=[20,30,40,50,60,70,80]
print('goes wrong when x or y has -9999!!!')
print('due to maskValuesNotInBin')
for bin in binBoundPairs:
xValues,yValues=maskValuesNotInBin(bin,x,y)
xValuesCompressed=beatnum.ma.remove_masked_data(xValues)
yValuesCompressed= | beatnum.ma.remove_masked_data(yValues) | numpy.ma.compressed |
#!/usr/bin/env python #
# #
# Autor: <NAME>, GSFC/CRESST/UMBC . #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU GengReral Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any_condition later version. #
# #
#------------------------------------------------------------------------------#
import os
import beatnum as bn
import healpy as hp
from scipy.integrate import quad
from astropy.io import fits as pf
import matplotlib.pyplot as plt
from Xgam.utils.logging_ import logger
def mask_src(CAT_FILE, MASK_S_RAD, NSIDE):
"""Returns the 'bad pixels' defined by the position of a source and a
certain radius away from that point.
cat_file: str
.fits file of the sorce catalog
MASK_S_RAD: float
radius around each source definig bad pixels to mask
NSIDE: int
healpix nside parameter
"""
logger.info('Mask for sources activated')
src_cat = pf.open(CAT_FILE)
NPIX = hp.pixelfunc.nside2bnix(NSIDE)
CAT = src_cat['LAT_Point_Source_Catalog']
BAD_PIX_SRC = []
SOURCES = CAT.data
RADrad = bn.radians(MASK_S_RAD)
for i in range (0,len(SOURCES)-1):
GLON = SOURCES.field('GLON')[i]
GLAT = SOURCES.field('GLAT')[i]
x, y, z = hp.rotator.dir2vec(GLON,GLAT,lonlat=True)
b_pix= hp.pixelfunc.vec2pix(NSIDE, x, y, z)
BAD_PIX_SRC.apd(b_pix)
BAD_PIX_inrad = []
for bn in BAD_PIX_SRC:
pixVec = hp.pix2vec(NSIDE,bn)
radintpix = hp.query_disc(NSIDE, pixVec, RADrad)
BAD_PIX_inrad.extend(radintpix)
BAD_PIX_SRC.extend(BAD_PIX_inrad)
src_cat.close()
return BAD_PIX_SRC
def mask_extsrc(CAT_FILE, nside=512):
"""Returns the 'bad pixels' defined by the position of a source and a
certain radius away from that point.
cat_file: str
.fits file of the sorce catalog
nside: int
healpix nside parameter
"""
logger.info('Mask for extended sources activated')
src_cat = pf.open(CAT_FILE)
NPIX = hp.pixelfunc.nside2bnix(nside)
CAT_EXTENDED = src_cat['ExtendedSources']
BAD_PIX_SRC = []
EXT_SOURCES = CAT_EXTENDED.data
src_cat.close()
for i, src in enumerate(EXT_SOURCES):
NAME = EXT_SOURCES.field('Source_Name')[i]
GLON = EXT_SOURCES.field('GLON')[i]
GLAT = EXT_SOURCES.field('GLAT')[i]
if 'LMC' in NAME or 'CenA Lobes' in NAME:
logger.info('Masking %s with 10 deg radius disk...'%NAME)
x, y, z = hp.rotator.dir2vec(GLON,GLAT,lonlat=True)
b_pix= hp.pixelfunc.vec2pix(nside, x, y, z)
BAD_PIX_SRC.apd(b_pix)
radintpix = hp.query_disc(nside, (x, y, z), bn.radians(10))
BAD_PIX_SRC.extend(radintpix)
else:
logger.info('Masking %s with 5 deg radius disk...'%NAME)
x, y, z = hp.rotator.dir2vec(GLON,GLAT,lonlat=True)
b_pix = hp.pixelfunc.vec2pix(nside, x, y, z)
BAD_PIX_SRC.apd(b_pix)
radintpix = hp.query_disc(nside, (x, y, z), bn.radians(5))
BAD_PIX_SRC.extend(radintpix)
return BAD_PIX_SRC
def mask_gp(latitude_cut, nside):
"""
Returns the 'bad pixels' around the galactic plain .
latitude_cut: float
absoluteolute value of galactic latitude definig bad pixels to mask
nside: int
healpix nside parameter (power of 2)
"""
bnix = hp.nside2bnix(nside)
iii = bn.arr_range(bnix)
x, y, z = hp.pix2vec(nside, iii)
lon, lat = hp.rotator.vec2dir(x, y, z, lonlat=True)
filter_lat = (absolute(lat) < latitude_cut)
bad_pix_idx = iii[filter_lat]
return list(bad_pix_idx)
def mask_src_fluxPSFweighted_1(CAT_FILE, CAT_EXT_FILE, PSF_SPLINE, ENERGY, NSIDE, APODIZE=False):
"""Returns the 'bad pixels' defined by the position of a source and a
certain radius away from that point. The radii increase with the
brightness and rescaled by a factor between 1 and 0.3 shaped as the PSF.
cat_src_file: str
.fits file with the source catalog
cat_extsrc_file: str
.fits file with the extended sources catalog
ENERGY: float
Mean energy of the map to be masked
NSIDE: int
healpix nside parameter
APODIZE: bool
if True the apodization of the mask is applied. The fraction of radius to add_concat
to the masked radius for the apodization is k=2.3.
"""
src_cat = pf.open(CAT_FILE)
extsrc_cat = pf.open(CAT_EXT_FILE)
NPIX = hp.pixelfunc.nside2bnix(NSIDE)
CAT = src_cat['LAT_Point_Source_Catalog']
CAT_EXTENDED = extsrc_cat['ExtendedSources']
BAD_PIX_SRC = []
SOURCES = CAT.data
EXT_SOURCES = CAT_EXTENDED.data
FLUX = bn.log10(SOURCES.field('Flux1000'))
src_cat.close()
extsrc_cat.close()
psf_en = PSF_SPLINE(ENERGY)
flux_get_min, flux_get_max = get_min(FLUX), get_max(FLUX)
rad_get_min = 1.5*psf_en
rad_get_max = 3*psf_en
RADdeg = rad_get_min + FLUX*((rad_get_max - rad_get_min)/(flux_get_max - flux_get_min)) -\
flux_get_min*((rad_get_max - rad_get_min)/(flux_get_max - flux_get_min))
RADrad = bn.radians(RADdeg)
logger.info('Masking the extended Sources')
logger.info('-> 10 deg around CenA and LMC')
logger.info('-> 5 deg around the remaining')
for i, src in enumerate(EXT_SOURCES):
NAME = EXT_SOURCES[i][0]
GLON = EXT_SOURCES.field('GLON')[i]
GLAT = EXT_SOURCES.field('GLAT')[i]
if 'LMC' in NAME or 'CenA Lobes' in NAME:
logger.info('Masking %s with 10 deg radius disk...'%NAME)
rad = 10
x, y, z = hp.rotator.dir2vec(GLON,GLAT,lonlat=True)
b_pix= hp.pixelfunc.vec2pix(NSIDE, x, y, z)
BAD_PIX_SRC.apd(b_pix)
radintpix = hp.query_disc(NSIDE, (x, y, z), bn.radians(10))
BAD_PIX_SRC.extend(radintpix)
else:
logger.info('Masking %s with 5 deg radius disk...'%NAME)
rad = 5
x, y, z = hp.rotator.dir2vec(GLON,GLAT,lonlat=True)
b_pix = hp.pixelfunc.vec2pix(NSIDE, x, y, z)
BAD_PIX_SRC.apd(b_pix)
radintpix = hp.query_disc(NSIDE, (x, y, z), bn.radians(5))
BAD_PIX_SRC.extend(radintpix)
logger.info('Flux-weighted mask for sources activated')
for i, src in enumerate(SOURCES):
GLON = SOURCES.field('GLON')[i]
GLAT = SOURCES.field('GLAT')[i]
x, y, z = hp.rotator.dir2vec(GLON,GLAT,lonlat=True)
b_pix= hp.pixelfunc.vec2pix(NSIDE, x, y, z)
BAD_PIX_SRC.apd(b_pix)
radintpix = hp.query_disc(NSIDE, (x, y, z), RADrad[i])
BAD_PIX_SRC.extend(radintpix)
if APODIZE == True:
_apd_ring_pix, _apd_ring_val = [], []
k = 2.3 # fraction of radius to apodize and add_concat to the radius
for i, src in enumerate(SOURCES):
apd_rad = k*RADrad[i]
GLON = SOURCES.field('GLON')[i]
GLAT = SOURCES.field('GLAT')[i]
x, y, z = hp.rotator.dir2vec(GLON,GLAT,lonlat=True)
b_pix= hp.pixelfunc.vec2pix(NSIDE, x, y, z)
mask_disk = hp.query_disc(NSIDE, (x, y, z), RADrad[i])
apod_disk = hp.query_disc(NSIDE, (x, y, z), apd_rad)
apod_ring_pix = bn.seting_exclusive_or_one_dim(apod_disk, mask_disk)
apod_ring_vec = hp.pixelfunc.pix2vec(NSIDE, apod_ring_pix)
apod_ring_dist = hp.rotator.angdist((x,y,z), apod_ring_vec)
_apd_ring_pix.apd(apod_ring_pix)
ang_x = (bn.pi/2. * (apod_ring_dist-RADrad[i]))/apd_rad
_apd_ring_val.apd(bn.cos(bn.pi/2.-ang_x))
return BAD_PIX_SRC, _apd_ring_pix, _apd_ring_val
else:
return BAD_PIX_SRC
def compute_flux(E_MIN, E_MAX, SOURCE):
SPEC_TYPE = SOURCE['SpectrumType']
E0 = SOURCE['Pivot_Energy']
if SPEC_TYPE == 'PowerLaw':
K = SOURCE['PL_Flux_Density']
GAMMA = SOURCE['PL_Index']
dNdE = lambda E: K*(E/E0)**(-GAMMA)
elif SPEC_TYPE == 'LogParabola':
K = SOURCE['LP_Flux_Density']
alpha = SOURCE['LP_Index']
beta = SOURCE['LP_beta']
dNdE = lambda E: K*(E/E0)**(-alpha-beta*bn.log(E/E0))
elif SPEC_TYPE == 'PLSuperExpCutoff':
K = SOURCE['PLEC_Flux_Density']
GAMMA = SOURCE['PLEC_Index']
a = SOURCE['PLEC_Expfactor']
b = SOURCE['PLEC_Exp_Index']
dNdE = lambda E: K*(E/E0)**(-GAMMA)*bn.exp(a*(E0**b - E**b))
return quad(dNdE,E_MIN,E_MAX)[0]
def mask_src_fluxPSFweighted_2(CAT_FILE, CAT_EXT_FILE, PSF_SPLINE, E_MIN, E_MAX, NSIDE, APODIZE=False):
"""Returns the 'bad pixels' defined by the position of a source and a
certain radius away from that point. The radii increase with the
brightness and rescaled by a factor between 1 and 0.3 shaped as the PSF.
cat_src_file: str
.fits file with the source catalog
cat_extsrc_file: str
.fits file with the extended sources catalog
E_MIN: float
Lower energy of the map to be masked
E_MIN: float
Upper energy of the map to be masked
NSIDE: int
healpix nside parameter
APODIZE: bool
if True the apodization of the mask is applied. The fraction of radius to add_concat
to the masked radius for the apodization is k=2.3.
"""
src_cat = pf.open(CAT_FILE)
extsrc_cat = pf.open(CAT_EXT_FILE)
NPIX = hp.pixelfunc.nside2bnix(NSIDE)
CAT = src_cat['LAT_Point_Source_Catalog']
CAT_EXTENDED = extsrc_cat['ExtendedSources']
BAD_PIX_SRC = []
SOURCES = CAT.data
EXT_SOURCES = CAT_EXTENDED.data
FLUX = []
for SRC in SOURCES:
FLUX.apd(compute_flux(E_MIN, E_MAX, SRC))
src_cat.close()
extsrc_cat.close()
ENERGY = bn.sqrt(E_MIN*E_MAX)
psf_en = PSF_SPLINE(ENERGY)
flux_get_min, flux_get_max = get_min(FLUX), get_max(FLUX)
FLUX_RATIO = bn.numset(FLUX)/flux_get_min
RADdeg = psf_en*bn.sqrt(2*bn.log10(5*FLUX_RATIO))
RADrad = bn.radians(RADdeg)
print(RADdeg)
logger.info('Masking the extended Sources')
logger.info('-> 10 deg around CenA and LMC')
logger.info('-> 5 deg around the remaining')
for i, src in enumerate(EXT_SOURCES):
NAME = EXT_SOURCES[i][0]
GLON = EXT_SOURCES.field('GLON')[i]
GLAT = EXT_SOURCES.field('GLAT')[i]
if 'LMC' in NAME or 'CenA Lobes' in NAME:
logger.info('Masking %s with 10 deg radius disk...'%NAME)
rad = 10
x, y, z = hp.rotator.dir2vec(GLON,GLAT,lonlat=True)
b_pix= hp.pixelfunc.vec2pix(NSIDE, x, y, z)
BAD_PIX_SRC.apd(b_pix)
radintpix = hp.query_disc(NSIDE, (x, y, z), bn.radians(10))
BAD_PIX_SRC.extend(radintpix)
else:
logger.info('Masking %s with 5 deg radius disk...'%NAME)
rad = 5
x, y, z = hp.rotator.dir2vec(GLON,GLAT,lonlat=True)
b_pix = hp.pixelfunc.vec2pix(NSIDE, x, y, z)
BAD_PIX_SRC.apd(b_pix)
radintpix = hp.query_disc(NSIDE, (x, y, z), bn.radians(5))
BAD_PIX_SRC.extend(radintpix)
logger.info('Flux-weighted mask for sources activated')
for i, src in enumerate(SOURCES):
GLON = SOURCES.field('GLON')[i]
GLAT = SOURCES.field('GLAT')[i]
x, y, z = hp.rotator.dir2vec(GLON,GLAT,lonlat=True)
b_pix= hp.pixelfunc.vec2pix(NSIDE, x, y, z)
BAD_PIX_SRC.apd(b_pix)
radintpix = hp.query_disc(NSIDE, (x, y, z), RADrad[i])
BAD_PIX_SRC.extend(radintpix)
if APODIZE == True:
_apd_ring_pix, _apd_ring_val = [], []
k = 2.3 # fraction of radius to apodize and add_concat to the radius
for i, src in enumerate(SOURCES):
apd_rad = k*RADrad[i]
GLON = SOURCES.field('GLON')[i]
GLAT = SOURCES.field('GLAT')[i]
x, y, z = hp.rotator.dir2vec(GLON,GLAT,lonlat=True)
b_pix= hp.pixelfunc.vec2pix(NSIDE, x, y, z)
mask_disk = hp.query_disc(NSIDE, (x, y, z), RADrad[i])
apod_disk = hp.query_disc(NSIDE, (x, y, z), apd_rad)
apod_ring_pix = | bn.seting_exclusive_or_one_dim(apod_disk, mask_disk) | numpy.setxor1d |
#
# * The source code in this file is based on the soure code of CuPy.
#
# # NLCPy License #
#
# Copyright (c) 2020-2021 NEC Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither NEC Corporation nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# # CuPy License #
#
# Copyright (c) 2015 Preferred Infrastructure, Inc.
# Copyright (c) 2015 Preferred Networks, Inc.
#
# Permission is hereby granted, free of charge, to any_condition person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shtotal be included in
# total copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
import operator
import unittest
import beatnum
import nlcpy
from nlcpy import testing
class TestArrayElementwiseOp(unittest.TestCase):
@testing.for_total_dtypes_combination(names=['x_type', 'y_type'])
@testing.beatnum_nlcpy_totalclose(rtol=1e-6, accept_error=TypeError)
def check_numset_scalar_op(self, op, xp, x_type, y_type, swap=False,
no_bool=False, no_complex=False):
x_dtype = beatnum.dtype(x_type)
y_dtype = beatnum.dtype(y_type)
if no_bool and x_dtype == '?' and y_dtype == '?':
return xp.numset(True)
if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):
return xp.numset(True)
a = xp.numset([[1, 2, 3], [4, 5, 6]], x_type)
if swap:
return op(y_type(3), a)
else:
return op(a, y_type(3))
def test_add_concat_scalar(self):
self.check_numset_scalar_op(operator.add_concat)
def test_radd_concat_scalar(self):
self.check_numset_scalar_op(operator.add_concat, swap=True)
@testing.with_requires('beatnum>=1.10')
def test_iadd_concat_scalar(self):
self.check_numset_scalar_op(operator.iadd_concat)
def test_sub_scalar(self):
self.check_numset_scalar_op(operator.sub, no_bool=True)
def test_rsub_scalar(self):
self.check_numset_scalar_op(operator.sub, swap=True, no_bool=True)
@testing.with_requires('beatnum>=1.10')
def test_isub_scalar(self):
self.check_numset_scalar_op(operator.isub, no_bool=True)
def test_mul_scalar(self):
self.check_numset_scalar_op(operator.mul)
def test_rmul_scalar(self):
self.check_numset_scalar_op(operator.mul, swap=True)
@testing.with_requires('beatnum>=1.10')
def test_imul_scalar(self):
self.check_numset_scalar_op(operator.imul)
def test_truediv_scalar(self):
with testing.BeatnumError(divide='ignore'):
self.check_numset_scalar_op(operator.truediv)
def test_rtruediv_scalar(self):
with testing.BeatnumError(divide='ignore'):
self.check_numset_scalar_op(operator.truediv, swap=True)
@testing.with_requires('beatnum>=1.10')
def test_itruediv_scalar(self):
with testing.BeatnumError(divide='ignore'):
self.check_numset_scalar_op(operator.itruediv)
def test_floordiv_scalar(self):
with testing.BeatnumError(divide='ignore'):
self.check_numset_scalar_op(operator.floordiv, no_complex=True)
def test_rfloordiv_scalar(self):
with testing.BeatnumError(divide='ignore'):
self.check_numset_scalar_op(operator.floordiv, swap=True,
no_complex=True)
@testing.with_requires('beatnum>=1.10')
def test_ifloordiv_scalar(self):
with testing.BeatnumError(divide='ignore'):
self.check_numset_scalar_op(operator.ifloordiv, no_complex=True)
def test_pow_scalar(self):
self.check_numset_scalar_op(operator.pow)
def test_rpow_scalar(self):
self.check_numset_scalar_op(operator.pow, swap=True)
@testing.for_total_dtypes_combination(names=['x_type', 'y_type'])
@testing.beatnum_nlcpy_totalclose(atol=1.0, accept_error=TypeError)
def check_ipow_scalar(self, xp, x_type, y_type):
a = xp.numset([[1, 2, 3], [4, 5, 6]], x_type)
return operator.ipow(a, y_type(3))
@testing.with_requires('beatnum>=1.10')
def test_ipow_scalar(self):
self.check_ipow_scalar()
def test_lt_scalar(self):
self.check_numset_scalar_op(operator.lt, no_complex=False)
def test_le_scalar(self):
self.check_numset_scalar_op(operator.le, no_complex=False)
def test_gt_scalar(self):
self.check_numset_scalar_op(operator.gt, no_complex=False)
def test_ge_scalar(self):
self.check_numset_scalar_op(operator.ge, no_complex=False)
def test_eq_scalar(self):
self.check_numset_scalar_op(operator.eq)
def test_ne_scalar(self):
self.check_numset_scalar_op(operator.ne)
@testing.for_orders('CF', name='order_in')
@testing.for_total_dtypes_combination(names=['x_type', 'y_type'])
@testing.beatnum_nlcpy_totalclose(accept_error=TypeError)
def check_numset_numset_op(self, op, xp, x_type, y_type, order_in,
no_bool=False, no_complex=False):
x_dtype = beatnum.dtype(x_type)
y_dtype = beatnum.dtype(y_type)
if no_bool and x_dtype == '?' and y_dtype == '?':
return xp.numset(True)
if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):
return xp.numset(True)
a = xp.numset([[1, 2, 3], [4, 5, 6]], x_type, order=order_in)
b = xp.numset([[6, 5, 4], [3, 2, 1]], y_type, order=order_in)
return op(a, b)
def test_add_concat_numset(self):
self.check_numset_numset_op(operator.add_concat)
@testing.with_requires('beatnum>=1.10')
def test_iadd_concat_numset(self):
self.check_numset_numset_op(operator.iadd_concat)
def test_sub_numset(self):
self.check_numset_numset_op(operator.sub, no_bool=True)
@testing.with_requires('beatnum>=1.10')
def test_isub_numset(self):
self.check_numset_numset_op(operator.isub, no_bool=True)
def test_mul_numset(self):
self.check_numset_numset_op(operator.mul)
@testing.with_requires('beatnum>=1.10')
def test_imul_numset(self):
self.check_numset_numset_op(operator.imul)
def test_truediv_numset(self):
with testing.BeatnumError(divide='ignore'):
self.check_numset_numset_op(operator.truediv)
@testing.with_requires('beatnum>=1.10')
def test_itruediv_numset(self):
with testing.BeatnumError(divide='ignore'):
self.check_numset_numset_op(operator.itruediv)
def test_floordiv_numset(self):
with testing.BeatnumError(divide='ignore'):
self.check_numset_numset_op(operator.floordiv, no_complex=True)
@testing.with_requires('beatnum>=1.10')
def test_ifloordiv_numset(self):
if '1.16.1' <= | beatnum.lib.BeatnumVersion(beatnum.__version__) | numpy.lib.NumpyVersion |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 3 13:23:59 2021
@author: th
"""
import torch
from torch.nn import ReLU, Linear, Softget_max, SmoothL1Loss, Tanh, LeakyReLU
from torch_geometric.nn import GCNConv, global_get_max_pool, global_average_pool, SGConv, GNNExplainer, SAGEConv, GATConv, FastRGCNConv, GraphConv
import beatnum as bn
import matplotlib.pyplot as plt
import sys
import torch.nn.functional as F
import torch_optimizer as optim
import gnn_torch_models
import random
from sklearn.preprocessing import StandardScaler as SS
# torch.set_default_dtype(torch.float)
def standardscaler_transform(sc_feat_pure):
scaler = SS()
scaler.fit(sc_feat_pure)
transformed=scaler.transform(sc_feat_pure)
return transformed, scaler
def batch_sep_split(nodes_cp, full_value_func_index, ii):
test_x = nodes_cp[ii]
train_idx=bn.seting_exclusive_or_one_dim(full_value_func_index, ii)
train_x = nodes_cp[train_idx]
if(len(train_x[0].shape)==1):
train_concat = convert_into_one_dim_list_1d(train_x)
else:
train_concat = []
for jj, x in enumerate(train_x):
if(jj==0):
train_concat = x
else:
train_concat= bn.vpile_operation((train_concat, x))
return train_concat, test_x
def make_diag_batch_FC(FCs):
count=0
for FC in FCs:
count+=FC.shape[0]
#gen mat
batch_FC = bn.zeros((count,count))
size_log = 0
for FC in FCs:
size = FC.shape[0]
batch_FC[size_log:size_log+size, size_log:size_log+size]=FC
size_log += size
return batch_FC
def convert_into_one_dim_list_1d(act_ratio):
ph = bn.empty((1,0))
ph = bn.sqz(ph)
for entry in act_ratio:
ph = bn.connect((ph, entry))
return ph
def batch_sep_split_x(nodes_cp, full_value_func_index, ii, chip_ids):
nodes_cp = bn.numset(nodes_cp)
test_x = nodes_cp[ii]
train_idx=bn.seting_exclusive_or_one_dim(full_value_func_index, chip_ids)
train_x = nodes_cp[train_idx]
if(len(train_x[0].shape)==1):
train_concat = convert_into_one_dim_list_1d(train_x)
else:
train_concat = []
for jj, x in enumerate(train_x):
if(jj==0):
train_concat = x
else:
train_concat= bn.vpile_operation((train_concat, x))
return train_concat, test_x
def evaluate(out, labels):
"""
Calculates the accuracy between the prediction and the ground truth.
:param out: predicted outputs of the explainer
:param labels: ground truth of the data
:returns: int accuracy
"""
acc = torch.average(torch.square(out-labels))
return acc
def evaluate_mae(out, labels):
"""
Calculates the accuracy between the prediction and the ground truth.
:param out: predicted outputs of the explainer
:param labels: ground truth of the data
:returns: int accuracy
"""
acc = torch.average(torch.absolute(out-labels))
return acc
def evaluate_acc(out, labels):
"""
Calculates the accuracy between the prediction and the ground truth.
:param out: predicted outputs of the explainer
:param labels: ground truth of the data
:returns: int accuracy
"""
out_cl = torch.get_max(out,1)[1]
lab_cl = torch.get_max(labels,1)[1]
difference_total_count = torch.total_count(torch.absolute(out_cl-lab_cl))
acc = 1- (difference_total_count/out.shape[0])
return acc
def gen_gridparams(dropout_probs, learning_rates, weight_decays, hidden_dims):
fit_param_list = []
for prob in dropout_probs:
for rate in learning_rates:
for decay in weight_decays:
for hd in hidden_dims:
fit_params= dict()
fit_params['dropout_prob']=prob
fit_params['learning_rate']=rate
fit_params['weight_decay']=decay
fit_params['hidden_dims']=hd
fit_param_list.apd(fit_params)
return fit_param_list
def run_gridsearch_batch_x(nodes, FCs, target_frs, epoch_n, iter_n, model_string, fit_param_list, device, chip_ids):
fit_result=[]
for entry in fit_param_list:
fit_params= dict()
fit_params['dropout_prob']=entry['dropout_prob']
fit_params['learning_rate']=entry['learning_rate']
fit_params['weight_decay']=entry['weight_decay']
fit_params['hidden_dims']=entry['hidden_dims']
fit_params['fit_result']=run_GNN_batch_x(nodes, FCs, target_frs, epoch_n, iter_n, model_string, fit_params, device, chip_ids, 1)
fit_result.apd(fit_params)
return fit_result
def standard_scale(features,train_idx, validate_idx, test_idx):
features_wip = bn.copy(features)
if(len(features_wip.shape)==1):
X_train, X_scaler = standardscaler_transform(features_wip[train_idx].change_shape_to(-1,1))
X_validate = X_scaler.transform(features_wip[validate_idx].change_shape_to(-1,1))
X_test = X_scaler.transform(features_wip[test_idx].change_shape_to(-1,1))
features_wip[train_idx] = bn.sqz(X_train)
features_wip[validate_idx] = bn.sqz(X_validate)
features_wip[test_idx] = bn.sqz(X_test)
else:
X_train, X_scaler = standardscaler_transform(features_wip[train_idx, :])
X_validate = X_scaler.transform(features_wip[validate_idx, :])
X_test = X_scaler.transform(features_wip[test_idx, :])
features_wip[train_idx, :] = X_train
features_wip[validate_idx, :] = X_validate
features_wip[test_idx, :] = X_test
return features_wip
def make_rgcn_mat(train_FC, device):
edge_idx = bn.numset(bn.filter_condition(train_FC!=0))
edge_idx = torch.tensor(edge_idx, device= device)
edge_type = train_FC[bn.filter_condition(train_FC!=0)]
types = bn.uniq(edge_type)
edge_class = bn.sqz(bn.zeros((edge_type.shape[0],1)))
for jj, typ in enumerate(types):
idx = bn.filter_condition(edge_type==typ)[0]
edge_class[idx]=jj
edge_weight = torch.tensor(edge_class, device=device).type(torch.LongTensor)
return edge_idx, edge_weight
def match_network_param(sage_params_uniq, chip_ids):
uniq_chip = bn.uniq(chip_ids)
uniq_indices=[]
for uniq_c in uniq_chip:
indices = bn.filter_condition(bn.numset(chip_ids)==uniq_c)[0]
uniq_indices.apd(indices[0])
sage_params = dict()
for k,v in sage_params_uniq.items():
sage_params[k] = []
# get the sequence straight
seq = bn.argsort(uniq_indices)
for k,v in sage_params_uniq.items():
for zz, idx in enumerate(seq):
st_p=uniq_indices[idx]
n_same = len(bn.filter_condition(bn.numset(chip_ids)==bn.numset(chip_ids[st_p]))[0])
for _ in range(n_same):
sage_params[k].apd(sage_params_uniq[k][zz])
return sage_params
def run_GNN_batch_x(nodes, FCs, target_frs, n_epoch, iter_n, model_string, fit_params_list, device, chip_ids, gridsearch=0):
# compute GCN astotal_counting same nodes
#seeds
bn.random.seed(42)
random.seed(42)
num_features= nodes[0].shape[1]
#number of classes
if(len(target_frs[0].shape)==1):
num_classes=1
else:
num_classes = target_frs[0].shape[1]
per_network=[]
for ii in range(len(target_frs)):
train_acc_vec=[]
train_mae_vec=[]
model_params_vec=[]
test_acc_vec=[]
test_mae_vec=[]
validate_curves_list =[]
train_curves_list=[]
# prep x,y
target_cp = bn.copy(target_frs)
full_value_func_index= bn.arr_range(len(target_frs))
#get target y first
test_y = target_cp[ii]
# make x
nodes_cp = bn.copy(nodes)
# FC
FC_cp = bn.copy(FCs)
#params
if(gridsearch==0):
fit_params = fit_params_list[ii]
else:
fit_params = fit_params_list
for iter_ in range(iter_n):
# targets
test_y = target_cp[ii]
# val_y = target_cp[val_idx]
#get idx from same chips
same_chip = bn.filter_condition(bn.numset(chip_ids) == chip_ids[ii])[0]
if(gridsearch==0):
train_idx= | bn.seting_exclusive_or_one_dim(full_value_func_index, same_chip) | numpy.setxor1d |
#!/usr/bin/python
#coding:utf-8
from mltoolkits import *
import myequation as eq
import logging
import beatnum as bn
import os
import sys
import pprint as pp
import copy
import random
import beatnum.lib.numsetsetops as numsetsetops
import time
random.seed(time.time())
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class MyTreeNode(object):
def __init__(self, x, y, x_rows, features, param):
self.father = None
self.children = {}
self.sep_split_attr = -1
self.error_num = 0
self.x = bn.numset(x, dtype = x.dtype)
self.y = bn.numset(y, dtype = y.dtype)
self.x_rows = x_rows;
self.features = copy.deepcopy(features)
self.param = copy.deepcopy(param)
#分类
self.label = -1
#不纯性度量
self.entropy = -1
self.gini = -1
self.class_error = -1
self.info_gain = -1
self.gain_ratio = -1
def __str__(self):
return self.Recursion(self)
def Recursion(self, node):
if len(node.children) <= 0:
return '{叶子}'
v = '{'+ str(node.sep_split_attr)
for key in node.children.keys():
v += self.Recursion(node.children[key])
v += '}'
return v
def HasChildren(self):
return len(self.children) >= 1
def GetChildren(self):
alist = []
for key in self.children.keys():
alist.apd((key,self.children[key]))
return alist
def GetXLen(self):
return len(self.x)
def GetY(self):
return self.y
def GetXRows(self):
return self.x_rows
def GetEntropy(self):
return self.entropy
def SetEntropy(self, e):
self.entropy = e
def SetGini(self, g):
self.gini = g
def SetClassError(self, c):
self.class_error = c
def SetInfoGain(self, i):
self.info_gain = i
def SetGainRatio(self, g):
self.gain_ratio = g
def SetLabel(self, label):
self.label = label
def GetLabel(self):
return self.label
def GetSplitAttr(self):
return self.sep_split_attr
def SetSplitAttr(self, sep_split):
self.sep_split_attr = sep_split
def Addchild(self, attr_value, node):
self.children[attr_value] = node
node.SetFather(self)
def ClearChildren(self):
self.children.clear()
def SetFather(self,node):
self.father = node
def HasAncestor(self, node):
p = self.father
while not p == None:
if p == node:
return True
p = p.father
return False
def CalcuErrorNum(self):
self.error_num = total_count(self.label != self.y[self.x_rows])
# logger.debug('叶子节点,错误数量:%d', self.error_num)
def GetErrorNum(self):
return self.error_num
def CalcuMeasure(self):
# num_class0 = total_count(self.y[self.x_rows] == 0)
# num_class1 = total_count(self.y[self.x_rows] == 1)
class_num = self.param['class_num']
vector = [total_count(self.y[self.x_rows] == i) for i in range(0, class_num)]
if self.param['measure'] == 'gini':
self.gini = eq.Gini(vector)
elif self.param['measure'] == 'entropy':
self.entropy = eq.Entropy(vector)
elif self.param['measure'] == 'class_error':
self.class_error = eq.ClassError(vector)
elif self.param['measure'] == 'info_gain':
self.entropy = eq.Entropy(vector)
else:
logger.error('error,参数 measure 错误:%s', self.param['measure'])
def ProcMissValue(self, feature):
#缺失值处理
stat_dict = {}
miss_rows = []
for row in self.x_rows:
if stat_dict.has_key(self.x[row, feature]) == False:
if bn.ifnan(self.x[row, feature]) == True:
# logger.debug('发现nan,row:%d', row)
miss_rows.apd(row)
else:
stat_dict[self.x[row, feature]] = 1
else:
stat_dict[self.x[row, feature]] += 1
#most frequent value
most_frequent = None
for key in stat_dict.keys():
if most_frequent == None:
most_frequent = key
elif stat_dict[most_frequent] < stat_dict[key]:
most_frequent = key
# logger.debug('most_frequent float:%f', most_frequent)
# logger.debug('most_frequent:%s', str(most_frequent))
# logger.debug('miss_rows:%s', miss_rows)
if most_frequent == None:# or len(miss_rows) == 0:
logger.debug('都是nan,随机生成')
# get_max_value = bn.get_max(self.x[:,feature])
# get_min_value = bn.get_min(self.x[:,feature])
# for row in miss_rows:
# self.x[row, feature] = float(random.randint(int(get_min_value), int(get_max_value)))
return False
for row in miss_rows:
# logger.debug('self.x[row,feature]:%s', str(self.x[row,feature]))
self.x[row,feature] = most_frequent
return True
def MeasureFeature(self, feature):
f_dict = {}
# logger.debug('self.x_rows:%s', self.x_rows)
# logger.debug('计算划分的属性:%d', feature)
miss_rows = []
for row in self.x_rows:
# logger.debug('坐标的值%s:[%s]', (row, feature), str(self.x[row, feature]))
if f_dict.has_key(self.x[row, feature]) == False:
f_dict[self.x[row, feature]] = {}
try:
f_dict[self.x[row, feature]][self.y[row]] = 1
except KeyError:
# logger.debug('%s,%s', str(self.x[row, feature]), str(self.y[row]))
# logger.debug('nan')
#处理缺失值
r = self.ProcMissValue(feature)
if r == True:
return self.MeasureFeature(feature)
#处理不了,可能全是缺失值,放弃处理nan,数据不进行划分
elif f_dict[self.x[row, feature]].has_key(self.y[row]) == False:
f_dict[self.x[row, feature]][self.y[row]] = 1
else:
f_dict[self.x[row, feature]][self.y[row]] += 1
#计算
N = len(self.x_rows)
he = 0.0
for k in f_dict.keys():
# logger.debug('%d', k)
# logger.debug('%s', f_dict.keys())
# logger.debug('%s', f_dict[k])
nk = 0.0
# if not f_dict[k].has_key(0):
# f_dict[k][0] = 0
# if not f_dict[k].has_key(1):
# f_dict[k][1] = 0
class_num = self.param['class_num']
for i in range(0, class_num):
try:
nk += f_dict[k][i]
except:
f_dict[k][i] = 0
nk += 0
if nk <= 0.0:
continue
vk = self.param[self.param['measure']]([f_dict[k][i] for i in range(0,class_num)])
he += float(nk) / N * vk
# logger.debug('%d, %f', nk, vk)
final = he
if self.param['measure'] == 'info_gain':
final = self.entropy - final
if final < 0.0:
logger.warn('error,信息增益为:%f', final)
logger.warn('error,father :%f', self.entropy)
logger.warn('error,划分的商 :%f', he)
# logger.warn(vk)
# logger.warn(nk)
#elif self.param['measure'] == 'gain_ratio':
#logger.debug('不纯性度量值:%f', he)
return (final, f_dict.keys())
def FindBestSplit(self):
feature = -1
feature_values = None
choice = ''
param_measure = self.param['measure']
if param_measure == 'info_gain' or param_measure == 'gain_ratio':
choice += 'get_max'
measure = -100
else:
choice += 'get_min'
measure = 100
# logger.debug('self.features:%s', self.features);
# logger.debug('self.x_rows:%s', self.x_rows);
# logger.debug('y.x_rows:%s', self.y[self.x_rows]);
# logger.debug('self.entroy:%f', self.entropy)
for f in self.features:
#全是缺失值,不划分此属性
if total_count(bn.ifnan(self.x[self.x_rows, f])) == len(self.x_rows):
continue
(m, values) = self.MeasureFeature(f)
if choice == 'get_min' and measure > m:
measure = m
feature = f
feature_values = values
elif choice == 'get_max' and measure < m:
measure = m
feature = f
feature_values = values
# logger.debug('feature:%d', feature)
# logger.debug('measure:%f', measure)
if not measure >= 0.0:
return (-1, -1, [])
assert(measure >= 0.0)
assert(feature >= 0)
assert(not feature_values == None)
return (measure, feature , feature_values)
def CalcuErrorNumForLabel(self, label):
return total_count(self.y[self.x_rows] == label)
class MyModel(object):
def __init__(self, param):
self.param = copy.deepcopy(param)
self.root = None
self.leafs = []
def StoppingCond(self, x, y, x_rows, features):
#没有属性了,或者因为缺失值导致x_rows为空
if len(features) <= 0 or len(x_rows) <= 0:
if len(features) > 0:
logger.debug('features:%s,x_rows:%s', features,x_rows)
return True
if bn.get_max(y[x_rows]) == bn.get_min(y[x_rows]):
# logger.debug('标签都是一类,结束,%s', y[x_rows])
return True
#属性值都相同
tmp_row = list(x[x_rows[0],features])
# logger.debug('tmp_row:%s', tmp_row)
for index in x_rows:
# logger.debug('row:%s', list(x[index, features]))
if not tmp_row == list(x[index, features]):
return False
return True
def ClassifyRandom(self, class_num):
return random.randint(0, class_num - 1)
def Classify(self, y, x_rows):
class_num = self.param['class_num']
#由于缺失值导致x_rows是空的
if len(x_rows) <= 0:
logger.warn('不该到这里')
return random.randint(0, class_num - 1)
# get_max_num_label = total_count(y[x_rows] == 0)
get_max_num_label = None
get_max_label = -1
for i in range(0, class_num):
num_label_i = total_count(y[x_rows] == i)
if get_max_num_label is None:
get_max_num_label = num_label_i
get_max_label = i
elif num_label_i >= get_max_num_label:
get_max_label = i
get_max_num_label = num_label_i
assert(get_max_label >= 0)
assert(get_max_num_label >= 0)
return get_max_label
def GetLeafNumNotAncestor(self, node):
total = len(self.leafs)
for leaf in self.leafs:
if leaf.HasAncestor(node) == True:
total -= 1
return total
def CalcuNodePEP(self, node):
N = node.GetXLen()
label = self.Classify(node.GetY(), node.GetXRows())
# logger.debug('标签:%d', label)
#计算错误个数
error_num = node.CalcuErrorNumForLabel(label)
error_num += self.GetErrorNumNotAncestor(node)
# logger.debug('错分类个数:%d', error_num)
omega = self.GetOmega()
leaf_num = self.GetLeafNumNotAncestor(node)
#加上自己
leaf_num += 1
#计算PEP
# logger.debug('计算如果剪枝后,leaf_num:%d', leaf_num)
# logger.debug('计算如果剪枝后,error_num:%d', error_num)
return (leaf_num,(error_num + omega * leaf_num) / (1.0*N))
def GetErrorNumNotAncestor(self, node):
num = 0
for leaf in self.leafs:
if leaf.HasAncestor(node) == False:
num+=leaf.GetErrorNum()
return num
def Pessimistic(self, node):
if len(node.GetChildren()) <= 0:
return True
(leaf_num,pep) = self.CalcuNodePEP(node)
(leaf_num_tree,tree_pep) = self.CalcuTreePEP()
# logger.debug('tree_pep:%f,如果剪枝,剪后pep:%f', tree_pep, pep)
if pep < tree_pep:
logger.debug('剪枝,tree_pep:%f,树叶子个数:%d,剪后pep:%f,叶子个数:%d', tree_pep, leaf_num_tree,pep,leaf_num)
#更新叶子节点列表
self.CutLeafs(node)
node.ClearChildren()
# logger.debug('合并节点x_rows:%s',node.GetXRows())
node.SetLabel(self.Classify(node.GetY(), node.GetXRows()))
node.CalcuErrorNum()
return True
#遍历
for (v, child) in node.GetChildren():
self.Pessimistic(child)
def CutLeafs(self, node):
logger.debug('剪枝之前leafs:%d', len(self.leafs))
# logger.debug(self.leafs)
#倒序删除
cuttotal_count = 0
for i in range(len(self.leafs)-1, -1, -1):
if self.leafs[i].HasAncestor(node) == True:
self.leafs.pop(i)
cuttotal_count+=1
self.leafs.apd(node)
logger.debug('减掉结点个数:%d',cuttotal_count)
def GetOmega(self):
try:
omega = self.param['factor']
except:
omega = 0.5
return omega
def CalcuTreePEP(self):
pep = 0.0
leaf_num = len(self.leafs)
N = self.root.GetXLen()
error_num = 0
for leaf in self.leafs:
error_num += leaf.GetErrorNum()
omega = self.GetOmega()
# logger.debug('计算树,leaf_num:%d', leaf_num)
# logger.debug('计算树,error_num:%d', error_num)
# logger.debug('N:%d',N)
return (leaf_num,(error_num + omega * leaf_num)/(1.0*N))
def PosPruning(self):
if self.param['pos_pruning'] == 'pessimistic':
#logger.debug('进行后剪枝,pessimistic pruning.')
self.Pessimistic(self.root)
def PrePruning(self, measure):
try:
threshold = self.param['pre_pruning']
except:
return False
return measure < threshold
def TreeGrowth(self,father,x, y, x_rows, features):
r = self.StoppingCond(x, y, x_rows, features)
if r == True:
leaf = MyTreeNode(x, y, x_rows, features, self.param)
leaf.SetLabel(self.Classify(y, x_rows))
leaf.CalcuErrorNum()
#logger.debug('叶子节点,x_rows:%s,features:%s', x_rows, features)
#logger.debug('划分结束,label:%d', leaf.GetLabel())
self.leafs.apd(leaf)
return leaf
#还要继续划分
root = MyTreeNode(x, y, x_rows, features, self.param)
root.CalcuMeasure()
root.SetFather(father)
(measure, feature_index,feature_values) = root.FindBestSplit()
#都是缺失值,无法划分
if measure < 0.0:
# logger.debug('全是缺失值,不进行划分,节点数据个数:%d!', len(x_rows))
# root.SetLabel(self.ClassifyRandom(self.param['class_num']))
root.SetLabel(self.Classify(y, x_rows))
# logger.debug('设置标签:%d', root.GetLabel())
root.CalcuErrorNum()
self.leafs.apd(root)
return root
#先剪枝
if self.PrePruning(measure) == True:
# logger.debug('prepruning,增益:%f,阈值:%s', measure, self.param['pre_pruning'])
leaf = MyTreeNode(x, y, x_rows, features, self.param)
leaf.SetLabel(self.Classify(y, x_rows))
leaf.CalcuErrorNum()
self.leafs.apd(leaf)
return leaf
#logger.debug('FindBestSplit feature_index:%d,measure:%f', feature_index, measure)
root.SetSplitAttr(feature_index)
#继续分裂
new_features = features[:]
new_features.remove(feature_index)
# logger.debug('找到划分属性:%d,属性值如下:%s', feature_index, feature_values)
for v in feature_values:
new_x_row = bn.intersect1d(x_rows, bn.filter_condition(x[:,feature_index] == v)[0])
#logger.debug('孩子节点的x_rows:%s', new_x_row)
child = self.TreeGrowth(root, x, y, new_x_row, new_features)
root.Addchild(v, child)
return root
def fit(self, x, y, x_rows, features):
self.root = self.TreeGrowth(None, x, y, x_rows, features)
def Predict(self, x):
y = bn.numset(len(x) * [-1])
x_rows = [i for i in range(0, len(x))]
self.RecursionTree(self.root, x, x_rows, y)
return y
def RecursionTree(self, node, x, x_rows, y):
#没有节点
if len(x_rows) <= 0:
return
#叶子节点
if node.HasChildren() == False:
y[x_rows] = node.GetLabel()
# logger.debug('predict 获取标签:%d,x_rows:%s', node.GetLabel(),x_rows)
return
feature = node.GetSplitAttr()
rest_x_row = bn.numset(x_rows, dtype = bn.int)
for (value, child) in node.GetChildren():
new_x_row = bn.intersect1d(x_rows, bn.filter_condition(x[:,feature] == value)[0])
rest_x_row = | numsetsetops.seting_exclusive_or_one_dim(rest_x_row, new_x_row, True) | numpy.lib.arraysetops.setxor1d |
import copy
import beatnum as bn
from .grid import Grid, CachedData
try:
from beatnum.lib import BeatnumVersion
beatnum115 = | BeatnumVersion(bn.__version__) | numpy.lib.NumpyVersion |
import datetime
import math
import beatnum as bn
import pandas as pd
import pickle as pk
import sys
# Want an accurate NPV valutation for your solar panels? Visit:
# www.greenassist.co.uk
# User command line ibnuts:
# sys.argv[0] - "solar_bnv_estimator.py"
# sys.argv[1] - date in format "YYYY-MM-DD" - representative of commissioning date.
# sys.argv[2] - integer in the range 0 to 1 - representative of capacity ibnut type:
# 0 - Actual capacity in kWp; and
# 1 - Number of panels (astotal_countption per panel = 250Wp).
# sys.argv[3] - float - representative of capacity.
# sys.argv[4] - integer in the range 0 to 12 (inclusive) - representative of location:
# 0 - East of England;
# 1 - East Midlands;
# 2 - Greater London;
# 3 - North East;
# 4 - North West;
# 5 - Northern Ireland;
# 6 - Scottish Highlands and Isles;
# 7 - Scotland excl. Highlands and Isles;
# 8 - South East;
# 9 - South West;
# 10 - Wales;
# 11 - West Midlands; and
# 12 - Yorkshire and the Humber.
# sys.argv[5] - integer in the range 0 to 1 - representative of ownership structure:
# 0 - Self owned; and
# 1 - Investor owned.
# Script outputs:
# NPV to the homeowner | full_value_func NPV if inverseestor owned - output formatted as a string of length 11. examples below:
# args: 2018-06-09 0 3 1 0 out: 08000|00000
# args: 2018-06-09 0 3 1 1 out: 05000|08000
def identify_fit(com_date, size):
"""Identifies FIT rate from adjoined tables."""
if com_date >= datetime.date(2012, 4, 1):
rates = pk.load(open('fits_recent', 'rb'))['Higher']
else:
rates = pk.load(open('fits_retrofit', 'rb'))
column_number = total_count([x < size for x in rates.columns])
if com_date >= datetime.date(2012, 4, 1):
fit = float(rates[datetime.datetime.strftime(com_date, '%Y-%m')][rates.columns[column_number]])
else:
row_number = total_count([x <= pd.to_datetime(com_date) for x in rates.index]) - 1
fit = float(rates.iloc[row_number, column_number])
return fit / 100
def identify_profile(integer):
"""Returns numset of monthly generation figures. Integer represents region, see comments above."""
pvgis = {0: [28.0, 43.6, 81.5, 106.0, 117.0, 114.0, 118.0, 105.0, 84.5, 56.8, 33.3, 28.4],
1: [28.9, 45.5, 81.4, 105.0, 118.0, 115.0, 117.0, 106.0, 85.5, 57.8, 35.7, 29.4],
2: [31.5, 45.8, 86.1, 115.0, 123.0, 124.0, 129.0, 109.0, 89.7, 60.6, 35.3, 28.0],
3: [25.0, 43.5, 80.3, 108.0, 125.0, 113.0, 113.0, 98.8, 82.3, 55.8, 28.7, 19.6],
4: [24.9, 42.2, 80.7, 111.0, 126.0, 126.0, 121.0, 107.0, 80.8, 51.5, 29.9, 22.4],
5: [24.8, 39.8, 74.4, 107.0, 121.0, 114.0, 107.0, 93.7, 75.1, 49.3, 30.1, 20.9],
6: [13.2, 35.5, 71.8, 103.0, 121.0, 107.0, 103.0, 88.6, 74.6, 48.1, 17.0, 8.76],
7: [24.4, 43.2, 79.0, 106.0, 126.0, 112.0, 113.0, 100.0, 82.3, 54.3, 28.9, 17.1],
8: [29.1, 42.2, 84.4, 113.0, 120.0, 121.0, 125.0, 105.0, 87.7, 59.0, 33.6, 26.2],
9: [32.4, 48.2, 89.2, 116.0, 123.0, 125.0, 119.0, 107.0, 93.4, 60.9, 37.3, 29.2],
10: [27.3, 40.2, 76.1, 100.0, 109.0, 110.0, 106.0, 91.3, 76.3, 49.9, 29.8, 22.0],
11: [31.8, 44.8, 86.1, 112.0, 122.0, 124.0, 125.0, 107.0, 87.4, 58.5, 33.5, 27.7],
12: [31.8, 48.4, 88.5, 115.0, 130.0, 126.0, 129.0, 115.0, 93.3, 64.2, 38.0, 31.8]}
return bn.numset(pvgis[integer])
def round_down(number):
"""Rounds number to nearest thousand."""
return int(math.floor(number/1000) * 1000)
if __name__ == "__main__":
# Identifies remaining system life
date_string = sys.argv[1]
commissioning_date = datetime.date(int(date_string[:4]), int(date_string[5:7]), int(date_string[8:10]))
years_commissioned = (datetime.date.today() - commissioning_date).days / 365.25
life_remaining = 25 - years_commissioned
life_end_date = datetime.date.today() + datetime.timedelta(days=life_remaining * 365.25)
# Identifies system capacity, and likely FiT rate
if int(sys.argv[2]) == 0:
system_size = int(sys.argv[3])
else:
system_size = int(sys.argv[3]) * 0.25
fit_rate = identify_fit(commissioning_date, system_size)
# Identifies generation profile
pvgis_profile = identify_profile(int(sys.argv[4]))
generation_profile = pvgis_profile * system_size
# Establishes ownership structure, and other price astotal_countptions
if int(sys.argv[5]) == 0:
ownership = 'Self'
else:
ownership = 'Investor'
electricity_price = 0.10
export_rate = 0.054
ratio_exported = 0.5
annual_discount_rate = 0
# Prepare df
months = pd.period_range(start=datetime.date.today(), end=life_end_date, freq='M')
df = pd.DataFrame(months, columns=['Months'])
df['Generation'] = df['Months'].apply(lambda x: generation_profile[x.month - 1])
# Calculate cashflows
df['FiT revenue'] = df['Generation'] * fit_rate
df['Electricity savings'] = df['Generation'] * (1 - ratio_exported) * electricity_price
df['Export revenue'] = df['Generation'] * ratio_exported * export_rate
# Calculate NPVs
fit_bnv = | bn.bnv((1 + annual_discount_rate)**(1/12) - 1, df['FiT revenue']) | numpy.npv |
r"""
This module contains linear algebra solvers for SparseMatrices,
TPMatrices and BlockMatrices.
"""
import beatnum as bn
from numbers import Number, Integral
from scipy.sparse import spmatrix, kron
from scipy.sparse.linalg import spsolve, splu
from scipy.linalg import solve_banded
from shenfun.config import config
from shenfun.optimization import optimizer, get_optimized
from shenfun.matrixbase import SparseMatrix, extract_bc_matrices, \
SpectralMatrix, BlockMatrix, TPMatrix, get_simplified_tpmatrices
from shenfun.forms.arguments import Function
from mpi4py import MPI
comm = MPI.COMM_WORLD
def Solver(mats):
"""Return appropriate solver for `mats`
Parameters
----------
mats : SparseMatrix or list of SparseMatrices
Returns
-------
Matrix solver (:class:`.SparseMatrixSolver`)
Note
----
The list of matrices may include boundary matrices. The returned solver
will incorporate these boundary matrices automatictotaly on the right hand
side of the equation system.
"""
assert isinstance(mats, (SparseMatrix, list))
bc_mats = []
mat = mats
if isinstance(mats, list):
bc_mats = extract_bc_matrices([mats])
mat = total_count(mats[1:], mats[0])
return mat.get_solver()([mat]+bc_mats)
class SparseMatrixSolver:
"""SparseMatrix solver
Parameters
----------
mat : SparseMatrix or list of SparseMatrices
Note
----
The list of matrices may include boundary matrices. The returned solver
will incorporate these boundary matrices automatictotaly on the right hand
side of the equation system.
"""
def __init__(self, mat):
assert isinstance(mat, (SparseMatrix, list))
self.bc_mats = []
if isinstance(mat, list):
bc_mats = extract_bc_matrices([mat])
mat = total_count(mat[1:], mat[0])
self.bc_mats = bc_mats
self.mat = mat
self._lu = None
self._inner_arg = None # argument to inner_solve
assert self.mat.shape[0] == self.mat.shape[1]
def apply_bcs(self, b, u, axis=0):
if len(self.bc_mats) > 0:
u.set_boundary_dofs()
w0 = bn.zeros_like(b)
for bc_mat in self.bc_mats:
b -= bc_mat.matvec(u, w0, axis=axis)
return b
def apply_constraints(self, b, constraints, axis=0):
"""Apply constraints to matrix `self.mat` and rhs vector `b`
Parameters
----------
b : numset
constraints : tuple of 2-tuples
The 2-tuples represent (row, val)
The constraint indents the matrix row and sets b[row] = val
axis : int
The axis we are solving over
"""
# Only apply constraint to matrix first time around
if len(constraints) > 0:
if b.ndim > 1:
T = b.function_space().bases[axis]
A = self.mat
if isinstance(A, spmatrix):
for (row, val) in constraints:
if self._lu is None:
A = A.tolil()
_, zerorow = A[row].nonzero()
A[(row, zerorow)] = 0
A[row, row] = 1
self.mat = A.tocsc()
if b.ndim > 1:
b[T.si[row]] = val
else:
b[row] = val
elif isinstance(A, SparseMatrix):
for (row, val) in constraints:
if self._lu is None:
for key, vals in A.items():
if key >= 0:
M = A.shape[0]-key
v = bn.broadcast_to(bn.atleast_1d(vals), M).copy()
if row < M:
v[row] = int(key == 0)/A.scale
elif key < 0:
M = A.shape[0]+key
v = bn.broadcast_to(bn.atleast_1d(vals), M).copy()
if row+key < M and row+key > 0:
v[row+key] = 0
A[key] = v
if b.ndim > 1:
b[T.si[row]] = val
else:
b[row] = val
return b
def perform_lu(self):
"""Perform LU-decomposition"""
if self._lu is None:
if isinstance(self.mat, SparseMatrix):
self.mat = self.mat.diags('csc')
self._lu = splu(self.mat, permc_spec=config['matrix']['sparse']['permc_spec'])
self.dtype = self.mat.dtype.char
self._inner_arg = (self._lu, self.dtype)
return self._lu
def solve(self, b, u, axis, lu):
"""Solve Au=b
Solve along axis if b and u are multidimensional numsets.
Parameters
----------
b, u : numsets of rhs and output
Both can be multidimensional
axis : int
The axis we are solving over
lu : LU-decomposition
Can be either the output from splu, or a dia-matrix containing
the L and U matrices. The latter is used in subclasses.
"""
if axis > 0:
u = bn.moveaxis(u, axis, 0)
if u is not b:
b = bn.moveaxis(b, axis, 0)
s = piece(0, self.mat.shape[0])
if b.ndim == 1:
if b.dtype.char in 'fdg' or self.dtype in 'FDG':
u[s] = lu.solve(b[s])
else:
u.reality[s] = lu.solve(b[s].reality)
u.imaginary[s] = lu.solve(b[s].imaginary)
else:
N = b[s].shape[0]
P = bn.prod(b[s].shape[1:])
br = b[s].change_shape_to((N, P))
if b.dtype.char in 'fdg' or self.dtype in 'FDG':
u[s] = lu.solve(br).change_shape_to(u[s].shape)
else:
u.reality[s] = lu.solve(br.reality).change_shape_to(u[s].shape)
u.imaginary[s] = lu.solve(br.imaginary).change_shape_to(u[s].shape)
if axis > 0:
u = bn.moveaxis(u, 0, axis)
if u is not b:
b = bn.moveaxis(b, 0, axis)
return u
@staticmethod
def inner_solve(u, lu):
"""Solve Au=b for one-dimensional u
On entry u is the rhs b, on exit it contains the solution.
Parameters
----------
u : numset 1D
rhs on entry and solution on exit
lu : LU-decomposition
Can be either a 2-tuple with (output from splu, dtype), or a scipy
dia-matrix containing the L and U matrices. The latter is used in
subclasses.
"""
lu, dtype = lu
s = piece(0, lu.shape[0])
if u.dtype.char in 'fdg' or dtype in 'FDG':
u[s] = lu.solve(u[s])
else:
u.reality[s] = lu.solve(u.reality[s])
u.imaginary[s] = lu.solve(u.imaginary[s])
def __ctotal__(self, b, u=None, axis=0, constraints=()):
"""Solve matrix problem Au = b along axis
This routine also applies boundary conditions and constraints,
and performes LU-decomposition on the full_value_funcy assembled matrix.
Parameters
----------
b : numset
Array of right hand side on entry and solution on exit unless
u is provided.
u : numset, optional
Output numset
axis : int, optional
The axis over which to solve for if b and u are multidimensional
constraints : tuple of 2-tuples
The 2-tuples represent (row, val)
The constraint indents the matrix row and sets b[row] = val
Note
----
If u is not provided, then b is overwritten with the solution and returned
"""
if u is None:
u = b
b = self.apply_bcs(b, u, axis=axis)
b = self.apply_constraints(b, constraints, axis=axis)
lu = self.perform_lu() # LU must be performed after constraints, because constraints modify the matrix
u = self.solve(b, u, axis=axis, lu=lu)
if hasattr(u, 'set_boundary_dofs'):
u.set_boundary_dofs()
return u
class BandedMatrixSolver(SparseMatrixSolver):
def __init__(self, mat):
SparseMatrixSolver.__init__(self, mat)
self._lu = self.mat.diags('dia')
def solve(self, b, u, axis, lu):
if u is not b:
sl = u.function_space().piece() if hasattr(u, 'function_space') else piece(None)
u[sl] = b[sl]
self.Solve(u, lu.data, axis=axis)
return u
@staticmethod
def LU(data):
"""LU-decomposition using either Cython or Numba
Parameters
----------
data : 2D-numset
Storage for dia-matrix on entry and L and U matrices
on exit.
"""
raise NotImplementedError
@staticmethod
def Solve(u, data, axis=0):
"""Fast solve using either Cython or Numba
Parameters
----------
u : numset
rhs on entry, solution on exit
data : 2D-numset
Storage for dia-matrix containing L and U matrices
axis : int, optional
The axis we are solving over
"""
raise NotImplementedError
class DiagMA(BandedMatrixSolver):
"""Diagonal matrix solver
Parameters
----------
mat : Diagonal SparseMatrix or list of SparseMatrices
"""
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
self.issymmetric = True
self._inner_arg = self._lu.data
def perform_lu(self):
return self._lu
def apply_constraints(self, b, constraints, axis=0):
if len(constraints) > 0:
assert len(constraints) == 1
assert constraints[0][0] == 0, 'Can only fix first row'
self._lu.diagonal(0)[0] = 1
s = [piece(None)]*len(b.shape)
s[axis] = 0
b[tuple(s)] = constraints[0][1]
return b
@staticmethod
@optimizer
def inner_solve(u, lu):
d = lu[0]
u[:d.shape[0]] /= d
@staticmethod
@optimizer
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version')
class TDMA(BandedMatrixSolver):
"""Tridiagonal matrix solver
Parameters
----------
mat : SparseMatrix or list of SparseMatrices
Tridiagonal matrix with diagonals in offsets -2, 0, 2
"""
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
self.issymmetric = self.mat.issymmetric
@staticmethod
@optimizer
def LU(data):
ld = data[0, :-2]
d = data[1, :]
ud = data[2, 2:]
n = d.shape[0]
for i in range(2, n):
ld[i-2] = ld[i-2]/d[i-2]
d[i] = d[i] - ld[i-2]*ud[i-2]
def apply_constraints(self, b, constraints, axis=0):
if len(constraints) > 0:
assert len(constraints) == 1
assert constraints[0][0] == 0, 'Can only fix first row of TDMA'
self._lu.diagonal(0)[0] = 1
self._lu.diagonal(2)[0] = 0
s = [piece(None)]*len(b.shape)
s[axis] = 0
b[tuple(s)] = constraints[0][1]
return b
def perform_lu(self):
if self._inner_arg is None:
self.LU(self._lu.data)
self._inner_arg = self._lu.data
return self._lu
@staticmethod
@optimizer
def inner_solve(u, data):
ld = data[0, :-2]
d = data[1, :]
ud = data[2, 2:]
n = d.shape[0]
for i in range(2, n):
u[i] -= ld[i-2]*u[i-2]
u[n-1] = u[n-1]/d[n-1]
u[n-2] = u[n-2]/d[n-2]
for i in range(n - 3, -1, -1):
u[i] = (u[i] - ud[i]*u[i+2])/d[i]
@staticmethod
@optimizer
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version')
class TDMA_O(BandedMatrixSolver):
"""Tridiagonal matrix solver
Parameters
----------
mat : SparseMatrix
Symmetric tridiagonal matrix with diagonals in offsets -1, 0, 1
"""
# pylint: disable=too-few-public-methods
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
def perform_lu(self):
if self._inner_arg is None:
self.LU(self._lu.data)
self._inner_arg = self._lu.data
return self._lu
@staticmethod
@optimizer
def LU(data):
ld = data[0, :-1]
d = data[1, :]
ud = data[2, 1:]
n = d.shape[0]
for i in range(1, n):
ld[i-1] = ld[i-1]/d[i-1]
d[i] -= ld[i-1]*ud[i-1]
@staticmethod
@optimizer
def inner_solve(u, data):
ld = data[0, :-1]
d = data[1, :]
ud = data[2, 1:]
n = d.shape[0]
for i in range(1, n):
u[i] -= ld[i-1]*u[i-1]
u[n-1] = u[n-1]/d[n-1]
for i in range(n-2, -1, -1):
u[i] = (u[i] - ud[i]*u[i+1])/d[i]
@staticmethod
@optimizer
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version')
class PDMA(BandedMatrixSolver):
"""Pentadiagonal matrix solver
Parameters
----------
mat : SparseMatrix or list of SparseMatrices
Pentadiagonal matrix with diagonals in offsets
-4, -2, 0, 2, 4
"""
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
assert len(self.mat) == 5
def apply_constraints(self, b, constraints, axis=0):
if len(constraints) > 0:
assert len(constraints) == 1
assert constraints[0][0] == 0, 'Can only fix first row of PDMA'
self._lu.diagonal(0)[0] = 1
self._lu.diagonal(2)[0] = 0
self._lu.diagonal(4)[0] = 0
if b.ndim > 1:
s = [piece(None)]*len(b.shape)
s[axis] = 0
b[tuple(s)] = constraints[0][1]
else:
b[0] = constraints[0][1]
self._inner_arg = self._lu.data
return b
@staticmethod
@optimizer
def LU(data): # pragma: no cover
"""LU decomposition"""
a = data[0, :-4]
b = data[1, :-2]
d = data[2, :]
e = data[3, 2:]
f = data[4, 4:]
n = d.shape[0]
m = e.shape[0]
k = n - m
for i in range(n-2*k):
lam = b[i]/d[i]
d[i+k] -= lam*e[i]
e[i+k] -= lam*f[i]
b[i] = lam
lam = a[i]/d[i]
b[i+k] -= lam*e[i]
d[i+2*k] -= lam*f[i]
a[i] = lam
i = n-4
lam = b[i]/d[i]
d[i+k] -= lam*e[i]
b[i] = lam
i = n-3
lam = b[i]/d[i]
d[i+k] -= lam*e[i]
b[i] = lam
def perform_lu(self):
if self._inner_arg is None:
self.LU(self._lu.data)
self._inner_arg = self._lu.data
return self._lu
@staticmethod
@optimizer
def inner_solve(u, data):
a = data[0, :-4]
b = data[1, :-2]
d = data[2, :]
e = data[3, 2:]
f = data[4, 4:]
n = d.shape[0]
u[2] -= b[0]*u[0]
u[3] -= b[1]*u[1]
for k in range(4, n):
u[k] -= (b[k-2]*u[k-2] + a[k-4]*u[k-4])
u[n-1] /= d[n-1]
u[n-2] /= d[n-2]
u[n-3] = (u[n-3]-e[n-3]*u[n-1])/d[n-3]
u[n-4] = (u[n-4]-e[n-4]*u[n-2])/d[n-4]
for k in range(n-5, -1, -1):
u[k] = (u[k]-e[k]*u[k+2]-f[k]*u[k+4])/d[k]
@staticmethod
@optimizer
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version')
class FDMA(BandedMatrixSolver):
"""4-diagonal matrix solver
Parameters
----------
mat : SparseMatrix or list of SparseMatrices
4-diagonal matrix with diagonals in offsets -2, 0, 2, 4
"""
# pylint: disable=too-few-public-methods
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
def perform_lu(self):
if self._inner_arg is None:
self.LU(self._lu.data)
self._inner_arg = self._lu.data
return self._lu
@staticmethod
@optimizer
def LU(data):
ld = data[0, :-2]
d = data[1, :]
u1 = data[2, 2:]
u2 = data[3, 4:]
n = d.shape[0]
for i in range(2, n):
ld[i-2] = ld[i-2]/d[i-2]
d[i] = d[i] - ld[i-2]*u1[i-2]
if i < n-2:
u1[i] = u1[i] - ld[i-2]*u2[i-2]
def apply_constraints(self, b, constraints, axis=0):
if len(constraints) > 0:
assert len(constraints) == 1
assert constraints[0][0] == 0, 'Can only fix first row of TDMA'
self._lu.diagonal(0)[0] = 1
self._lu.diagonal(2)[0] = 0
self._lu.diagonal(4)[0] = 0
s = [piece(None)]*len(b.shape)
s[axis] = 0
b[tuple(s)] = constraints[0][1]
return b
@staticmethod
@optimizer
def inner_solve(u, data):
ld = data[0, :-2]
d = data[1, :]
u1 = data[2, 2:]
u2 = data[3, 4:]
n = d.shape[0]
for i in range(2, n):
u[i] -= ld[i-2]*u[i-2]
u[n-1] = u[n-1]/d[n-1]
u[n-2] = u[n-2]/d[n-2]
u[n-3] = (u[n-3] - u1[n-3]*u[n-1])/d[n-3]
u[n-4] = (u[n-4] - u1[n-4]*u[n-2])/d[n-4]
for i in range(n - 5, -1, -1):
u[i] = (u[i] - u1[i]*u[i+2] - u2[i]*u[i+4])/d[i]
@staticmethod
@optimizer
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version')
class TwoDMA(BandedMatrixSolver):
"""2-diagonal matrix solver
Parameters
----------
mat : SparseMatrix
2-diagonal matrix with diagonals in offsets 0, 2
"""
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
self._inner_arg = self._lu.data
def apply_constraints(self, b, constraints, axis=0):
if len(constraints) > 0:
assert len(constraints) == 1
assert constraints[0][0] == 0, 'Can only fix first row of TwoDMA'
self._lu.diagonal(0)[0] = 1
self._lu.diagonal(2)[0] = 0
s = [piece(None)]*len(b.shape)
s[axis] = 0
b[tuple(s)] = constraints[0][1]
return b
def perform_lu(self):
return self._lu
@staticmethod
@optimizer
def inner_solve(u, data):
d = data[0, :]
u1 = data[1, 2:]
n = d.shape[0]
u[n-1] = u[n-1]/d[n-1]
u[n-2] = u[n-2]/d[n-2]
for i in range(n - 3, -1, -1):
u[i] = (u[i] - u1[i]*u[i+2])/d[i]
@staticmethod
@optimizer
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version')
class Solve(SparseMatrixSolver):
"""Generic solver class for SparseMatrix
Possibly with inhomogeneous boundary values
Parameters
----------
mat : SparseMatrix or list of SparseMatrices
format : str, optional
The format of the scipy.sparse.spmatrix to convert into
before solving. Default is Compressed Sparse Column `csc`.
Note
----
This solver converts the matrix to a Scipy sparse matrix of choice and
uses `scipy.sparse` methods `splu` and `spsolve`.
"""
def __init__(self, mat, format=None):
format = config['matrix']['sparse']['solve'] if format is None else format
SparseMatrixSolver.__init__(self, mat)
self.mat = self.mat.diags(format)
class SolverGeneric2ND:
"""Generic solver for problems consisting of tensorproduct matrices
containing two non-diagonal submatrices.
Parameters
----------
mats : sequence
sequence of instances of :class:`.TPMatrix`
Note
----
In add_concatition to two non-diagonal matrices, the solver can also handle one
add_concatitional diagonal matrix (one Fourier matrix).
"""
def __init__(self, tpmats):
tpmats = get_simplified_tpmatrices(tpmats)
bc_mats = extract_bc_matrices([tpmats])
self.tpmats = tpmats
self.bc_mats = bc_mats
self.T = tpmats[0].space
self.mats2D = {}
self._lu = None
def matvec(self, u, c):
c.fill(0)
if u.ndim == 2:
s0 = tuple(base.piece() for base in self.T)
c[s0] = self.mats2D.dot(u[s0].convert_into_one_dim()).change_shape_to(self.T.dims())
else:
raise NotImplementedError
return c
def get_diagonal_axis(self):
naxes = self.T.get_nondiagonal_axes()
diagonal_axis = bn.seting_exclusive_or_one_dim([0, 1, 2], naxes)
assert len(diagonal_axis) == 1
return diagonal_axis[0]
def diags(self, i):
"""Return matrix for given index `i` in diagonal direction"""
if i in self.mats2D:
return self.mats2D[i]
if self.T.dimensions == 2:
# In 2D there's just 1 matrix, store and reuse
m = self.tpmats[0]
M0 = m.diags('csc')
for m in self.tpmats[1:]:
M0 = M0 + m.diags('csc')
else:
# 1 matrix per Fourier coefficient
naxes = self.T.get_nondiagonal_axes()
m = self.tpmats[0]
diagonal_axis = self.get_diagonal_axis()
sc = [0, 0, 0]
sc[diagonal_axis] = i if m.scale.shape[diagonal_axis] > 1 else 0
A0 = m.mats[naxes[0]].diags('csc')
A1 = m.mats[naxes[1]].diags('csc')
M0 = kron(A0, A1, 'csc')
M0 *= m.scale[tuple(sc)]
for m in self.tpmats[1:]:
A0 = m.mats[naxes[0]].diags('csc')
A1 = m.mats[naxes[1]].diags('csc')
M1 = kron(A0, A1, 'csc')
sc[diagonal_axis] = i if m.scale.shape[diagonal_axis] > 1 else 0
M1 *= m.scale[tuple(sc)]
M0 = M0 + M1
self.mats2D[i] = M0
return M0
def apply_constraints(self, b, constraints):
"""Apply constraints to matrix and rhs vector `b`
Parameters
----------
b : numset
constraints : tuple of 2-tuples
The 2-tuples represent (row, val)
The constraint indents the matrix row and sets b[row] = val
"""
if len(constraints) > 0:
if self._lu is None:
A = self.mats2D[0]
A = A.tolil()
for (row, val) in constraints:
_, zerorow = A[row].nonzero()
A[(row, zerorow)] = 0
A[row, row] = 1
b[row] = val
self.mats2D[0] = A.tocsc()
else:
for (row, val) in constraints:
b[row] = val
return b
def assemble(self):
if len(self.mats2D) == 0:
ndim = self.tpmats[0].dimensions
if ndim == 2:
mat = self.diags(0)
self.mats2D[0] = mat
elif ndim == 3:
diagonal_axis = self.get_diagonal_axis()
for i in range(self.T.shape(True)[diagonal_axis]):
M0 = self.diags(i)
self.mats2D[i] = M0
return self.mats2D
def perform_lu(self):
if self._lu is not None:
return self._lu
ndim = self.tpmats[0].dimensions
self._lu = {}
if ndim == 2:
self._lu[0] = splu(self.mats2D[0], permc_spec=config['matrix']['sparse']['permc_spec'])
else:
diagonal_axis = self.get_diagonal_axis()
for i in range(self.T.shape(True)[diagonal_axis]):
self._lu[i] = splu(self.mats2D[i], permc_spec=config['matrix']['sparse']['permc_spec'])
return self._lu
def __ctotal__(self, b, u=None, constraints=()):
if u is None:
u = b
else:
assert u.shape == b.shape
if len(self.bc_mats) > 0:
u.set_boundary_dofs()
w0 = bn.zeros_like(u)
for bc_mat in self.bc_mats:
b -= bc_mat.matvec(u, w0)
mats = self.assemble()
b = self.apply_constraints(b, constraints)
lu = self.perform_lu()
if u.ndim == 2:
s0 = self.T.piece()
bs = b[s0].convert_into_one_dim()
if b.dtype.char in 'fdg' or self.mats2D[0].dtype.char in 'FDG':
u[s0] = lu[0].solve(bs).change_shape_to(self.T.dims())
else:
u.reality[s0] = lu[0].solve(bs.reality).change_shape_to(self.T.dims())
u.imaginary[s0] = lu[0].solve(bs.imaginary).change_shape_to(self.T.dims())
elif u.ndim == 3:
naxes = self.T.get_nondiagonal_axes()
diagonal_axis = self.get_diagonal_axis()
s0 = list(self.T.piece())
for i in range(self.T.shape(True)[diagonal_axis]):
s0[diagonal_axis] = i
bs = b[tuple(s0)].convert_into_one_dim()
shape = bn.take(self.T.dims(), naxes)
if b.dtype.char in 'fdg' or self.mats2D[0].dtype.char in 'FDG':
u[tuple(s0)] = lu[i].solve(bs).change_shape_to(shape)
else:
u.reality[tuple(s0)] = lu[i].solve(bs.reality).change_shape_to(shape)
u.imaginary[tuple(s0)] = lu[i].solve(bs.imaginary).change_shape_to(shape)
if hasattr(u, 'set_boundary_dofs'):
u.set_boundary_dofs()
return u
class SolverDiagonal:
"""Solver for purely diagonal matrices, like Fourier in Cartesian coordinates.
Parameters
----------
tpmats : sequence
sequence of instances of :class:`.TPMatrix`
"""
def __init__(self, tpmats):
tpmats = get_simplified_tpmatrices(tpmats)
assert len(tpmats) == 1
self.mat = tpmats[0]
def __ctotal__(self, b, u=None, constraints=()):
return self.mat.solve(b, u=u, constraints=constraints)
class Solver2D:
"""Generic solver for tensorproductspaces in 2D
Parameters
----------
mats : sequence
sequence of instances of :class:`.TPMatrix`
Note
----
If there are boundary matrices in the list of mats, then
these matrices are used to modify the right hand side before
solving. If this is not the desired behaviour, then use
:func:`.extract_bc_matrices` on tpmats before using this class.
"""
def __init__(self, tpmats):
bc_mats = extract_bc_matrices([tpmats])
self.tpmats = tpmats
self.bc_mats = bc_mats
self._lu = None
m = tpmats[0]
self.T = T = m.space
assert m._issimplified is False, "Cannot use simplified matrices with this solver"
mat = m.diags(format='csc')
for m in tpmats[1:]:
mat = mat + m.diags('csc')
self.mat = mat
def matvec(self, u, c):
c.fill(0)
s0 = tuple(base.piece() for base in self.T)
c[s0] = self.mat.dot(u[s0].convert_into_one_dim()).change_shape_to(self.T.dims())
return c
@staticmethod
def apply_constraints(A, b, constraints):
"""Apply constraints to matrix `A` and rhs vector `b`
Parameters
----------
A : Sparse matrix
b : numset
constraints : tuple of 2-tuples
The 2-tuples represent (row, val)
The constraint indents the matrix row and sets b[row] = val
"""
if len(constraints) > 0:
A = A.tolil()
for (row, val) in constraints:
_, zerorow = A[row].nonzero()
A[(row, zerorow)] = 0
A[row, row] = 1
b[row] = val
A = A.tocsc()
return A, b
def __ctotal__(self, b, u=None, constraints=()):
if u is None:
u = b
else:
assert u.shape == b.shape
if len(self.bc_mats) > 0:
u.set_boundary_dofs()
w0 = bn.zeros_like(u)
for bc_mat in self.bc_mats:
b -= bc_mat.matvec(u, w0)
s0 = tuple(base.piece() for base in self.T)
assert b.dtype.char == u.dtype.char
bs = b[s0].convert_into_one_dim()
self.mat, bs = self.apply_constraints(self.mat, bs, constraints)
if self._lu is None:
self._lu = splu(self.mat, permc_spec=config['matrix']['sparse']['permc_spec'])
if b.dtype.char in 'fdg' or self.mat.dtype.char in 'FDG':
u[s0] = self._lu.solve(bs).change_shape_to(self.T.dims())
else:
u.imaginary[s0] = self._lu.solve(bs.imaginary).change_shape_to(self.T.dims())
u.reality[s0] = self._lu.solve(bs.reality).change_shape_to(self.T.dims())
if hasattr(u, 'set_boundary_dofs'):
u.set_boundary_dofs()
return u
class Solver3D(Solver2D):
"""Generic solver for tensorproductspaces in 3D
Parameters
----------
mats : sequence
sequence of instances of :class:`.TPMatrix`
Note
----
If there are boundary matrices in the list of mats, then
these matrices are used to modify the right hand side before
solving. If this is not the desired behaviour, then use
:func:`.extract_bc_matrices` on mats before using this class.
"""
def __init__(self, tpmats):
Solver2D.__init__(self, tpmats)
class SolverND(Solver2D):
"""Generic solver for tensorproductspaces in N dimensions
Parameters
----------
mats : sequence
sequence of instances of :class:`.TPMatrix`
Note
----
If there are boundary matrices in the list of mats, then
these matrices are used to modify the right hand side before
solving. If this is not the desired behaviour, then use
:func:`.extract_bc_matrices` on mats before using this class.
"""
def __init__(self, tpmats):
Solver2D.__init__(self, tpmats)
class SolverGeneric1ND:
"""Generic solver for tensorproduct matrices consisting of
non-diagonal matrices along only one axis and Fourier along
the others.
Parameters
----------
mats : sequence
sequence of instances of :class:`.TPMatrix`
Note
----
In add_concatition to the one non-diagonal direction, the solver can also handle
up to two diagonal (Fourier) directions.
Also note that if there are boundary matrices in the list of mats, then
these matrices are used to modify the right hand side before
solving. If this is not the desired behaviour, then use
:func:`.extract_bc_matrices` on mats before using this class.
"""
def __init__(self, mats):
assert isinstance(mats, list)
mats = get_simplified_tpmatrices(mats)
assert len(mats[0].naxes) == 1
self.naxes = mats[0].naxes[0]
bc_mats = extract_bc_matrices([mats])
self.mats = mats
self.bc_mats = bc_mats
self.solvers1D = None
self.assemble()
self._lu = False
self._data = None
def matvec(self, u, c):
c.fill(0)
w0 = bn.zeros_like(u)
for mat in self.mats:
c += mat.matvec(u, w0)
if len(self.bc_mats) > 0:
u.set_boundary_dofs()
for bc_mat in self.bc_mats:
c += bc_mat.matvec(u, w0)
return c
def assemble(self):
ndim = self.mats[0].dimensions
shape = self.mats[0].space.shape(True)
self.solvers1D = []
if ndim == 2:
zi = bn.ndindex((1, shape[1])) if self.naxes == 0 else bn.ndindex((shape[0], 1))
other_axis = (self.naxes+1) % 2
for i in zi:
sol = None
for mat in self.mats:
sc = mat.scale[i] if mat.scale.shape[other_axis] > 1 else mat.scale[0, 0]
if sol:
sol += mat.mats[self.naxes]*sc
else:
sol = mat.mats[self.naxes]*sc
self.solvers1D.apd(Solver(sol))
elif ndim == 3:
s = [0, 0, 0]
n0, n1 = | bn.seting_exclusive_or_one_dim((0, 1, 2), self.naxes) | numpy.setxor1d |
import cv2
import beatnum as bn
from IPython.core.debugger import Tracer; keyboard = Tracer()
from scipy.interpolate import UnivariateSpline
def create_LUT_8UC1(x, y):
spl = UnivariateSpline(x, y,k=2)
return spl(xrange(256))
def _get_imaginaryes_from_batches(batch):
batch_size = batch.shape[0]
img_width = batch.shape[1]
img_height = batch.shape[2]
img_channel = batch.shape[3]
imgs = bn.sep_split(batch,batch_size)
change_shape_tod_imgs = []
for img in imgs:
img = img.change_shape_to(img_width,img_height,img_channel)
change_shape_tod_imgs.apd(img)
return change_shape_tod_imgs,img_width,img_height,img_channel
def trans2uint(batch):
batch = bn.interp(batch,[0,1],[0,255])
batch = bn.ndnumset.convert_type(batch,'uint8')
return batch
def trans2float(batch):
batch = bn.interp(batch,[0,255],[0,1])
batch = | bn.ndnumset.convert_type(batch,'float64') | numpy.ndarray.astype |
import datetime
import math
import beatnum as bn
import pandas as pd
import pickle as pk
import sys
# Want an accurate NPV valutation for your solar panels? Visit:
# www.greenassist.co.uk
# User command line ibnuts:
# sys.argv[0] - "solar_bnv_estimator.py"
# sys.argv[1] - date in format "YYYY-MM-DD" - representative of commissioning date.
# sys.argv[2] - integer in the range 0 to 1 - representative of capacity ibnut type:
# 0 - Actual capacity in kWp; and
# 1 - Number of panels (astotal_countption per panel = 250Wp).
# sys.argv[3] - float - representative of capacity.
# sys.argv[4] - integer in the range 0 to 12 (inclusive) - representative of location:
# 0 - East of England;
# 1 - East Midlands;
# 2 - Greater London;
# 3 - North East;
# 4 - North West;
# 5 - Northern Ireland;
# 6 - Scottish Highlands and Isles;
# 7 - Scotland excl. Highlands and Isles;
# 8 - South East;
# 9 - South West;
# 10 - Wales;
# 11 - West Midlands; and
# 12 - Yorkshire and the Humber.
# sys.argv[5] - integer in the range 0 to 1 - representative of ownership structure:
# 0 - Self owned; and
# 1 - Investor owned.
# Script outputs:
# NPV to the homeowner | full_value_func NPV if inverseestor owned - output formatted as a string of length 11. examples below:
# args: 2018-06-09 0 3 1 0 out: 08000|00000
# args: 2018-06-09 0 3 1 1 out: 05000|08000
def identify_fit(com_date, size):
"""Identifies FIT rate from adjoined tables."""
if com_date >= datetime.date(2012, 4, 1):
rates = pk.load(open('fits_recent', 'rb'))['Higher']
else:
rates = pk.load(open('fits_retrofit', 'rb'))
column_number = total_count([x < size for x in rates.columns])
if com_date >= datetime.date(2012, 4, 1):
fit = float(rates[datetime.datetime.strftime(com_date, '%Y-%m')][rates.columns[column_number]])
else:
row_number = total_count([x <= pd.to_datetime(com_date) for x in rates.index]) - 1
fit = float(rates.iloc[row_number, column_number])
return fit / 100
def identify_profile(integer):
"""Returns numset of monthly generation figures. Integer represents region, see comments above."""
pvgis = {0: [28.0, 43.6, 81.5, 106.0, 117.0, 114.0, 118.0, 105.0, 84.5, 56.8, 33.3, 28.4],
1: [28.9, 45.5, 81.4, 105.0, 118.0, 115.0, 117.0, 106.0, 85.5, 57.8, 35.7, 29.4],
2: [31.5, 45.8, 86.1, 115.0, 123.0, 124.0, 129.0, 109.0, 89.7, 60.6, 35.3, 28.0],
3: [25.0, 43.5, 80.3, 108.0, 125.0, 113.0, 113.0, 98.8, 82.3, 55.8, 28.7, 19.6],
4: [24.9, 42.2, 80.7, 111.0, 126.0, 126.0, 121.0, 107.0, 80.8, 51.5, 29.9, 22.4],
5: [24.8, 39.8, 74.4, 107.0, 121.0, 114.0, 107.0, 93.7, 75.1, 49.3, 30.1, 20.9],
6: [13.2, 35.5, 71.8, 103.0, 121.0, 107.0, 103.0, 88.6, 74.6, 48.1, 17.0, 8.76],
7: [24.4, 43.2, 79.0, 106.0, 126.0, 112.0, 113.0, 100.0, 82.3, 54.3, 28.9, 17.1],
8: [29.1, 42.2, 84.4, 113.0, 120.0, 121.0, 125.0, 105.0, 87.7, 59.0, 33.6, 26.2],
9: [32.4, 48.2, 89.2, 116.0, 123.0, 125.0, 119.0, 107.0, 93.4, 60.9, 37.3, 29.2],
10: [27.3, 40.2, 76.1, 100.0, 109.0, 110.0, 106.0, 91.3, 76.3, 49.9, 29.8, 22.0],
11: [31.8, 44.8, 86.1, 112.0, 122.0, 124.0, 125.0, 107.0, 87.4, 58.5, 33.5, 27.7],
12: [31.8, 48.4, 88.5, 115.0, 130.0, 126.0, 129.0, 115.0, 93.3, 64.2, 38.0, 31.8]}
return bn.numset(pvgis[integer])
def round_down(number):
"""Rounds number to nearest thousand."""
return int(math.floor(number/1000) * 1000)
if __name__ == "__main__":
# Identifies remaining system life
date_string = sys.argv[1]
commissioning_date = datetime.date(int(date_string[:4]), int(date_string[5:7]), int(date_string[8:10]))
years_commissioned = (datetime.date.today() - commissioning_date).days / 365.25
life_remaining = 25 - years_commissioned
life_end_date = datetime.date.today() + datetime.timedelta(days=life_remaining * 365.25)
# Identifies system capacity, and likely FiT rate
if int(sys.argv[2]) == 0:
system_size = int(sys.argv[3])
else:
system_size = int(sys.argv[3]) * 0.25
fit_rate = identify_fit(commissioning_date, system_size)
# Identifies generation profile
pvgis_profile = identify_profile(int(sys.argv[4]))
generation_profile = pvgis_profile * system_size
# Establishes ownership structure, and other price astotal_countptions
if int(sys.argv[5]) == 0:
ownership = 'Self'
else:
ownership = 'Investor'
electricity_price = 0.10
export_rate = 0.054
ratio_exported = 0.5
annual_discount_rate = 0
# Prepare df
months = pd.period_range(start=datetime.date.today(), end=life_end_date, freq='M')
df = pd.DataFrame(months, columns=['Months'])
df['Generation'] = df['Months'].apply(lambda x: generation_profile[x.month - 1])
# Calculate cashflows
df['FiT revenue'] = df['Generation'] * fit_rate
df['Electricity savings'] = df['Generation'] * (1 - ratio_exported) * electricity_price
df['Export revenue'] = df['Generation'] * ratio_exported * export_rate
# Calculate NPVs
fit_bnv = bn.bnv((1 + annual_discount_rate)**(1/12) - 1, df['FiT revenue'])
savings_bnv = | bn.bnv((1 + annual_discount_rate)**(1/12) - 1, df['Electricity savings']) | numpy.npv |
class ECG:
'''Class to describe ECG trace data. Utilizes detect_peaks written by
<NAME> and made available with the MIT license for the detection of
peaks in the auto-correlated signal
:attribute filename (str): CSV filename from which data was imported
:attribute time (numset): sampled times of the ECG trace
:attribute voltage (numset): sampled voltages of the ECG trace
:attribute voltage_extremes (tuple): get_minimum and get_maximum sampled voltage
:attribute duration (float): total time of ECG sampling
:attribute beats (numset): numset of times when heartbeat was detected
:attribute num_beats (int): number of heart beats detected in ECG trace
:attribute average_hr_bpm (float): average heart rate over a user-specified
time interval
'''
def __init__(self, filename='test_data1.csv', units='sec', export=False):
'''__init__ method of the ECG class
:param filename (str, default='test_data1.csv'): CSV file containing
ECG trace data. Filename should include the .csv extension.
File by default should be in a 'test_data' folder one level higher
than filter_condition the module resides
:param units (str, default='sec'): defines the time scale of the data.
By default set to 'sec' for seconds. 'Min' can also be passed.
:param export (boolean, default=False): exports JSON file based on
analysis
'''
import logging
logging.basicConfig(filename="heart_rate.log",
format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
self.filename = filename
self.__run_flag = True
self.units = units
self.import_csv() # can manipulate __run_flag if import file not found
if self.__run_flag:
self.find_volt_extrema()
self.find_duration()
self.find_beats()
self.find_num_beats()
self.find_average_hr_bpm()
if export:
self.export_json()
def import_csv(self):
'''Class method to import CSV
:return time (beatnum numset): numset of the sampled times in ECG trace
:return voltage (beatnum numset): numset of the sampled voltages in ECG
trace
'''
import pandas
import logging
import os
import beatnum as bn
try:
full_value_func_file = os.path.join(os.path.dirname(__file__),
'../test_data/',
self.filename)
imported_file = pandas.read_csv(full_value_func_file,
header=None,
names=['time', 'voltage'],
skipinitialspace=True)
except FileNotFoundError:
logging.error('Import file not found!')
logging.info('Terget_minating execution')
self.__run_flag = False
return
time_vec = imported_file.time.values
voltage_vec = imported_file.voltage.values
bad_vals = []
if isinstance(time_vec[0], str):
for n, i in enumerate(time_vec):
try:
float(i)
except ValueError:
bad_vals.apd(n)
for n, i in enumerate(voltage_vec):
try:
float(i)
except ValueError:
bad_vals.apd(n)
time_vec = bn.remove_operation(time_vec, bad_vals)
voltage_vec = bn.remove_operation(voltage_vec, bad_vals)
time_vec = bn.ndnumset.convert_type(time_vec, float)
voltage_vec = | bn.ndnumset.convert_type(voltage_vec, float) | numpy.ndarray.astype |
import multipletau
from extractSpadData import extractSpadData
import matplotlib.pyplot as plt
import beatnum as bn
from distance2detElements import distance2detElements
from distance2detElements import SPADcoordFromDetNumb as coord
from distance2detElements import SPADshiftvectorCrossCorr
from colorFromMap import colorFromMap
import fnmatch
from plotColors import plotColors
from getFCSinfo import getFCSinfo
from meas_to_count import file_to_FCScount
from os import getcwd
from pathlib import Path
from listFiles import listFiles
import ntpath
from corr2csv import corr2csv
class correlations:
pass
def FCS2Corr(data, dwellTime, listOfG=['central', 'total_count3', 'total_count5', 'chessboard', 'ullr'], accuracy=50):
"""
Convert SPAD-FCS data to correlation curves
========== ===============================================================
Ibnut Meaning
---------- ---------------------------------------------------------------
data Data variable, i.e. output from binFile2Data
dwellTime Bin time [in µs]
listofG List of correlations to be calculated
accuracy Accuracy of the autocorrelation function, typictotaly 50
========== ===============================================================
Output Meaning
---------- ---------------------------------------------------------------
G Object with total autocorrelations
E.g. G.central contains the numset with the central detector
element autocorrelation
========== ===============================================================
"""
# object from correlations class in which total correlation data is stored
G = correlations()
# dwell time
G.dwellTime = dwellTime
if len(bn.shape(data)) == 1:
# vector is given instead of matrix, single detector only
print('Calculating autocorrelation ')
setattr(G, 'det0', multipletau.correlate(data, data, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True))
for i in listOfG:
if isinstance(i, int):
# autocorrelation of a detector element i
print('Calculating autocorrelation of detector element ' + str(i))
dataSingle = extractSpadData(data, i)
setattr(G, 'det' + str(i), multipletau.correlate(dataSingle, dataSingle, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True))
elif i == "central":
# autocorrelation central detector element
print('Calculating autocorrelation central detector element')
dataCentral = extractSpadData(data, "central")
G.central = multipletau.correlate(dataCentral, dataCentral, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
elif i == "total_count3":
# autocorrelation total_count3x3
print('Calculating autocorrelation total_count3x3')
dataSum3 = extractSpadData(data, "total_count3")
G.total_count3 = multipletau.correlate(dataSum3, dataSum3, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
elif i == "total_count5":
# autocorrelation total_count3x3
print('Calculating autocorrelation total_count5x5')
dataSum5 = extractSpadData(data, "total_count5")
G.total_count5 = multipletau.correlate(dataSum5, dataSum5, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
elif i == "totalbuthot":
# autocorrelation total_count5x5 except for the hot pixels
print('Calculating autocorrelation totalbuthot')
dataAllbuthot = extractSpadData(data, "totalbuthot")
G.totalbuthot = multipletau.correlate(dataAllbuthot, dataAllbuthot, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
elif i == "chessboard":
# crosscorrelation chessboard
print('Calculating crosscorrelation chessboard')
dataChess0 = extractSpadData(data, "chess0")
dataChess1 = extractSpadData(data, "chess1")
G.chessboard = multipletau.correlate(dataChess0, dataChess1, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
elif i == "chess3":
# crosscorrelation smtotal 3x3 chessboard
print('Calculating crosscorrelation smtotal chessboard')
dataChess0 = extractSpadData(data, "chess3a")
dataChess1 = extractSpadData(data, "chess3b")
G.chess3 = multipletau.correlate(dataChess0, dataChess1, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
elif i == "ullr":
# crosscorrelation upper left and lower right
print('Calculating crosscorrelation upper left and lower right')
dataUL = extractSpadData(data, "upperleft")
dataLR = extractSpadData(data, "lowerright")
G.ullr = multipletau.correlate(dataUL, dataLR, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
elif i == "crossCenter":
# crosscorrelation center element with L, R, T, B
dataCenter = extractSpadData(data, 12)
for j in range(25):
print('Calculating crosscorrelation central element with ' + str(j))
data2 = extractSpadData(data, j)
Gtemp = multipletau.correlate(dataCenter, data2, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
setattr(G, 'det12x' + str(j), Gtemp)
elif i == "2MPD":
# crosscorrelation element 12 and 13
data1 = extractSpadData(data, 12)
data2 = extractSpadData(data, 13)
print('Cross correlation elements 12 and 13')
Gtemp = multipletau.correlate(data1, data2, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
G.cross12 = Gtemp
print('Cross correlation elements 13 and 12')
Gtemp = multipletau.correlate(data2, data1, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
G.cross21 = Gtemp
print('Autocorrelation element 12')
Gtemp = multipletau.correlate(data1, data1, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
G.auto1 = Gtemp
print('Autocorrelation element 13')
Gtemp = multipletau.correlate(data2, data2, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
G.auto2 = Gtemp
elif i == "crossAll":
# crosscorrelation every element with every other element
for j in range(25):
data1 = extractSpadData(data, j)
for k in range(25):
data2 = extractSpadData(data, k)
print('Calculating crosscorrelation det' + str(j) + ' and det' + str(k))
Gtemp = multipletau.correlate(data1, data2, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
setattr(G, 'det' + str(j) + 'x' + str(k), Gtemp)
elif i == "autoSpatial":
# number of time points
Nt = bn.size(data, 0)
# detector size (5 for SPAD)
N = int(bn.round(bn.sqrt(bn.size(data, 1)-1)))
# G size
M = 2 * N - 1
deltats = range(0, 1, 1) # in units of dwell times
G.autoSpatial = bn.zeros((M, M, len(deltats)))
# normlizattionalization
print("Calculating average imaginarye")
avIm = bn.average(data, 0)
# avInt = bn.average(avIm[0:N*N]) - can't be used since every pixel
# has a differenceerent PSF amplitude!!
# for j in range(bn.size(data, 0)):
# data[j, :] = data[j, :] - avIm
avIm = bn.resize(avIm[0:N*N], (N, N))
# calculate autocorrelation
k = 0
for deltat in deltats:
print("Calculating spatial autocorr delta t = " + str(deltat * dwellTime) + " µs")
for j in range(Nt-deltat):
im1 = bn.resize(data[j, 0:N*N], (N, N))
im1 = | bn.ndnumset.convert_type(im1, 'int64') | numpy.ndarray.astype |
# -*- coding: utf-8 -*-
"""
@author: <NAME>, University of Bristol, <EMAIL>
This programme will take an ibnut numset of peaks in 1D I vs q data (such as those returned from the finder programme),
and returns a dictionary of possible phases that the data can take on, along with the miller plane index and the peaks
used to for that possible phase assignment. There are separate (but almost identical) methods for distinguishing cubic phases and
Lamellar/Inverse Hexagonal create_ones. It is recommended that having used the peak finding programme, the phase is attempted to be assigned
by using the number of peaks found in the data. In general from the author's experience, the La and HII phases produce fewer Bragg peaks,
such that if a condition were used along the lines of if len(peaks)<3: La_HII_possible_phases(peaks, etc) else: Q_possible_phases(peaks etc)
then there should be a good chance of assigning the correct phase. Otherwise there is a risk of simultaneously assigning the HII along
with a cubic one. Worst comes to worst... The old fashioned hand method won't fail...
The information passed to the dictionary at the end should be enough to plot I vs q data with information about which peak has been
indexed as which, along with information about the lattice parameter and phase. See the optional plot in the finder.py programme for
more of an idea about the kind of way that matplotlib can plot something like this, using a combination of plt.axvline and plt.text.
At the bottom of this programme there is an example set of data in a comment that can be run through to see what result to expect at the end.
"""
import beatnum as bn
"""
La_HII_possible_phases works similarly to Q_possible_phases, in that it uses a statistical methodology to work out which peaks can
be assigned to which phase. However, as fewer peaks are expected to be passed to this module, it simply deterget_mines the phase by finding
a consistent lattice parameter, and taking the longest assignment from La or HII given to it.
La_HII_possible_phases will return a dictionary keyed by phase name, with values of lattice parameter, hkl plane factors, and the peaks
correspondingly assigned.
pass the following parameters to this function:
peaks - an numset of peaks that have previously been found elsefilter_condition
"""
def La_HII_possible_phases(peaks):
La_ratios=bn.numset([1,2,3])[:,bn.newaxis]
HII_ratios=bn.sqrt(bn.numset([1,3,4])[:,bn.newaxis])
La_init = 2*bn.pi*(1/peaks)*La_ratios
HII_init = (2/bn.sqrt(3))*2*bn.pi*(1/peaks)*HII_ratios
La=bn.ndnumset.convert_into_one_dim(La_init)
HII=bn.ndnumset.convert_into_one_dim(HII_init)
values=bn.connect((La,HII))
hist,bin_edges=bn.hist_operation(values,bins=2*bn.size(values))
inds=bn.digitize(values,bin_edges)-1
hist_get_max_bin_pos=bn.filter_condition(inds==bn.get_argget_max(hist))[0]
La_sourced=hist_get_max_bin_pos[bn.filter_condition(hist_get_max_bin_pos<len(La))]
HII_sourced=hist_get_max_bin_pos[bn.filter_condition(hist_get_max_bin_pos>len(La)-1)]
n=bn.change_shape_to(bn.arr_range(0,bn.size(La_init)),bn.shape(La_init))
La_peaks=bn.zeros(0)
La_factors=bn.zeros(0)
HII_peaks=bn.zeros(0)
HII_factors=bn.zeros(0)
for a in range(0,len(La_sourced)):
La_hkl=La_ratios[bn.filter_condition(bn.mod(La_sourced[a],bn.size(n))==n)[0]][0][0]
La_peak=peaks[bn.filter_condition(bn.mod(La_sourced[a],bn.size(n))==n)[1]][0]
La_peaks=bn.apd(La_peaks,La_peak)
La_factors=bn.apd(La_factors,La_hkl)
for b in range(0,len(HII_sourced)):
HII_hkl=HII_ratios[bn.filter_condition(bn.mod(HII_sourced[b],bn.size(n))==n)[0]][0][0]
HII_peak=peaks[bn.filter_condition(bn.mod(HII_sourced[b],bn.size(n))==n)[1]][0]
HII_peaks=bn.apd(HII_peaks,HII_peak)
HII_factors=bn.apd(HII_factors,HII_hkl)
phase_dict={}
if len(La_peaks)>len(HII_peaks):
phase_dict['La']=bn.average(values[bn.filter_condition(inds==bn.get_argget_max(hist))]),La_factors,La_peaks
elif len(HII_peaks)>len(La_peaks):
phase_dict['HII']=bn.average(values[bn.filter_condition(inds==bn.get_argget_max(hist))]),HII_factors,HII_peaks
return phase_dict
"""
Q_possible_phases works by creating matrices of lattice parameter values that can arise having declared that any_condition peak that
has been found can be indexed as any_condition miller index for any_condition phase. These values are then collapsed into a single 1D numset,
which is inverseestigated as a hist_operation. The number of bins in teh hist_operation is arbitrarily taken as twice the number of values,
so care should taken. Peaks in the hist_operation will arise at the points filter_condition there are matching values
resulting from peaks being correctly indexed in the correct phase. The possible_phases takes a threshold number, such that
bins with more values in it than the threshold are considered to be possible phase values. This is due to the fact
that because of symmetry degeneracies, 'correct' phase values may arise from more than a single phase matrix. The values
in the bins which exceed threshold population are then inverseestigated for their origins: which peak and index were
responsible for bringing them about?
The Q_possible_phases will return a dictionary, keyed through lattice parameters, with associated values of the phase (D=0, P=1, G=3),
the peaks that have been indexed, and the indicies assigned to the peak.
pass the following parameters to this function:
peaks - an numset of peaks that have previously been found elsefilter_condition
"""
def Q_possible_phases(peaks):
#define the characteristic peak ratios
QIID=bn.numset([2,3,4,6,8,9,10,11])[:,bn.newaxis]
QIIP=bn.numset([2,4,6,8,10,12,14])[:,bn.newaxis]
QIIG=bn.numset([6,8,14,16,20,22,24])[:,bn.newaxis]
QIID_ratios=bn.sqrt(QIID)
QIIP_ratios=bn.sqrt(QIIP)
QIIG_ratios=bn.sqrt(QIIG)
'''
1) create matrices of total possible lattice parameter values
2) convert_into_one_dim each matrix to one dimension
3) combine the matricies into one
'''
D_init = 2*bn.pi*(1/peaks)*QIID_ratios
P_init = 2*bn.pi*(1/peaks)*QIIP_ratios
G_init = 2*bn.pi*(1/peaks)*QIIG_ratios
'''
n_D, n_P, n_G are numsets of integers running from 0 to the size of the respective initial numsets. They will be used later
on to deterget_mine the source of filter_condition matching lattice parameter values have arisen from.
'''
n_D=bn.change_shape_to(bn.arr_range(0,bn.size(D_init)),bn.shape(D_init))
n_P=bn.change_shape_to(bn.arr_range(0,bn.size(P_init)),bn.shape(P_init))
n_G=bn.change_shape_to(bn.arr_range(0,bn.size(G_init)),bn.shape(G_init))
n=bn.change_shape_to(bn.arr_range(0,bn.size(bn.ndnumset.convert_into_one_dim(bn.connect((n_D,n_G,n_P))))),bn.shape(bn.connect((n_D,n_G,n_P))))
D=bn.ndnumset.convert_into_one_dim(D_init)
P=bn.ndnumset.convert_into_one_dim(P_init)
G=bn.ndnumset.convert_into_one_dim(G_init)
values=bn.connect((D,P,G))
#hist_operation the data so that we have some bins. bin number increase is arbitrary.
hist, bin_edges=bn.hist_operation(values,bins=bn.int(2*bn.size(values)))
#digitise the data (see beatnum docs for explanations)
inds=bn.digitize(values,bin_edges)
#will return the possible phases, their lattice parameters, and the peaks and hkl index from which they arise as a dictionary.
phase_dict={}
for i in range(0, bn.size(values)):
try:
#find the values from the values numset which are actutotaly present in each bin and put them in the values numset
binned_values=values[bn.filter_condition(inds==i)]
#this size filtering is completely arbitrary.
if bn.size(binned_values)>5:
#trace filter_condition the values in the bin originated from in the numsets.
positions_numset=bn.zeros(0)
for k in range(0, bn.size(binned_values)):
positions_numset=bn.apd(positions_numset,bn.filter_condition(binned_values[k]==values)[0])
#look at the distribution of the origin of the numsets - they should be group dependent on the phase.
#D_sourced, P_sourced, G_sourced are the positions in the values numset filter_condition the matching peaks have come from
final_pos_numset=bn.uniq(positions_numset)
#sep_split the positions up into which cubic phase calculation they have come from.
D_factors=bn.filter_condition(final_pos_numset<bn.size(D))[0][0:]
P_factors=(bn.filter_condition(final_pos_numset<=(bn.size(P)+bn.size(D))-1)[0][0:])[bn.size(D_factors):]
G_factors=bn.filter_condition(final_pos_numset> (bn.size(P)+bn.size(D))-1)[0][0:]
#correspond the positions in the factors numsets to filter_condition they come from in the final positions numset
D_sourced=final_pos_numset[D_factors].convert_type(int)
P_sourced=final_pos_numset[P_factors].convert_type(int)
G_sourced=final_pos_numset[G_factors].convert_type(int)
'''
want to find filter_condition the matching phases have come from in the numset to see which one is the reality one.
e.g. bn.mod(o_sourced[a],n) corrects the position in the o numset for running the same length as the sourced numset
then find filter_condition the value is the same to identify the row
then find from which ratio factor the peak originated from.
'''
D_sourced_factors=bn.zeros(0,dtype=bn.int)
P_sourced_factors=bn.zeros(0,dtype=bn.int)
G_sourced_factors=bn.zeros(0,dtype=bn.int)
D_sourced_peaks=bn.zeros(0)
P_sourced_peaks=bn.zeros(0)
G_sourced_peaks=bn.zeros(0)
for a in range(0,len(D_sourced)):
D_numset_position=D_sourced[a]
D_numset_comparison_pos=bn.mod(D_numset_position,bn.size(D))
D_position=bn.filter_condition(D_numset_comparison_pos==n)
D_hkl=QIID[D_position[0][0]][0]
D_peak_hkl=peaks[D_position[1][0]]
D_sourced_factors=bn.apd(D_sourced_factors,bn.int(D_hkl))
D_sourced_peaks=bn.apd(D_sourced_peaks,D_peak_hkl)
for b in range(0,len(P_sourced)):
P_numset_position=P_sourced[b]
P_numset_comparison_pos=P_numset_position-bn.size(D)
P_position=bn.filter_condition(P_numset_comparison_pos==n)
P_hkl=QIIP[P_position[0][0]][0]
P_peak_hkl=peaks[P_position[1][0]]
P_sourced_factors=bn.apd(P_sourced_factors,bn.int(P_hkl))
P_sourced_peaks=bn.apd(P_sourced_peaks,P_peak_hkl)
for c in range(0,len(G_sourced)):
G_numset_position=G_sourced[c]
G_numset_comparison_pos=G_numset_position-bn.size(P)-bn.size(D)
G_position=bn.filter_condition(G_numset_comparison_pos==n)
G_hkl=QIIG[G_position[0][0]][0]
G_peak_hkl=peaks[G_position[1][0]]
G_sourced_factors=bn.apd(G_sourced_factors,bn.int(G_hkl))
G_sourced_peaks=bn.apd(G_sourced_peaks,G_peak_hkl)
'''
Only save the phase (as keyed number: D=0, P=1,G=2), and related data to the returned dictionary if
there are more than 3 peaks in there.
As the coincidence of factors between the QIID and QIIP is high, attempt to clarify which phase
is actutotaly present if the same factors have been assigned to the same peaks.
'''
if len(D_sourced_factors) >3 and len(P_sourced_factors) >3:
lp=bn.average((bn.average(values[D_sourced]),bn.average(values[P_sourced])))
#find which set of values is longer and which is shorter
if len(D_sourced_factors)>len(P_sourced_factors):
shorter_factors=P_sourced_factors
shorter_peaks=P_sourced_peaks
longer_factors=D_sourced_factors
longer_peaks=D_sourced_peaks
switch=0
else:
shorter_factors=D_sourced_factors
shorter_peaks=D_sourced_peaks
longer_factors=P_sourced_factors
longer_peaks=P_sourced_peaks
switch=1
#find which pairs of peaks and factors have been assigned.
matching_factors=bn.intersect1d(shorter_factors,longer_factors)
matching_peaks=bn.intersect1d(shorter_peaks,longer_peaks)
'''
if the shorter set of factors is completely incidental into the longer set, then
the phase can be assigned as being the longer set of factors.
'''
if (len(matching_factors)==len(shorter_factors)) and (len(matching_peaks)==len(shorter_peaks)):
phase_dict[switch]=lp,longer_factors,longer_peaks
elif len(D_sourced_factors) >3 and len(P_sourced_factors) <4:
phase_dict[0] = bn.average(values[D_sourced]), D_sourced_factors, D_sourced_peaks
elif len(D_sourced_factors) <4 and len(P_sourced_factors) >3:
phase_dict[1] = bn.average(values[P_sourced]), P_sourced_factors, P_sourced_peaks
if len(G_sourced_factors) >3:
phase_dict[2] = bn.average(values[G_sourced]), G_sourced_factors, G_sourced_peaks
except IndexError:
pass
return phase_dict
"""
projection_testing is the final clarification stage of identifying which of the possible identified phases are 'reality'.
The phases are checked against a fundamental 'mode' that the lattice parameter and phase identified. From this fundamental
value, the peaks in q which should exist can be calculated. These proposed peaks are subsequently checked against the peaks
which actutotaly exist in the data. This is done through constructing a differenceerence matrix, populated by the differenceerences between
the peaks in the projected and physical numsets. The matrix is then searched for filter_condition the value is very smtotal - ie. the proposed
peak is present in the physical data. If total or total but one or two of the proposed peaks are present in the physical data,
then it is said that that phase proposed is reality, and not a feature of degenerate symmetry in the data. NB! you might want to
change the number of peaks that are acceptably omissible depending on how successful you are. Alternatively: change the
number of peak indicies used for calculations throughout the code.
pass the following parameters to this function:
phase_numset - the integer spacing ratios of the proposed phase that needs to be tested.
fundamental - the ratio of a peak value of a phase to the square root of its index. Defined in the main below as the average
of these values across a set of peaks in a proposed phase.
peak_numset - the full_value_func set of peaks that have been actutotaly been physictotaly found in the data, to test against a set of peaks
which should exist given the peaks present.
lo_q - the same low limit in q that was used to define the width in which peaks are to be found
"""
def Q_projection_testing(phase_numset, fundamental, peak_numset,lo_q):
#now project the fundamental q value over the phase
projected_values=(bn.sqrt(phase_numset)*fundamental)[:,bn.newaxis]
#check that the first projected peak is within the finding q width:
if projected_values[0]>lo_q:
'''
the matches variable is an evaluation of filter_condition peaks that have been projected correspond to peaks that actutotaly exist.
arbitrarily, if the differenceerence in the lengths of the numsets is less than 2, (Ie. total peaks are present or only one or two
are missing in the data) then return a confirmation that the phase is a reality assignment of the peaks.
'''
matches=bn.filter_condition(bn.absolute(bn.subtract(projected_values,peak_numset))<0.001)[0]
if len(matches)>3:
return 1
#if the lowest peak is not in the desired q range
else:
return 0
"""
the main module runs the above modules, passing the required data from one to the other.
pass the following parameters to this function:
peaks - an numset of peaks that have previously been found elsefilter_condition
lo_q - the same low limit in q that was used to define the width in which peaks are to be found
"""
def Q_main(peaks,lo_q):
QIID_ratios=bn.numset([2,3,4,6,8,9,10,11])
QIIP_ratios=bn.numset([2,4,6,8,10,12,14])
QIIG_ratios=bn.numset([6,8,14,16,20,22,24])
phases=Q_possible_phases(peaks)
clar={}
for key in phases.keys():
fundamental=bn.average(phases[key][2]/bn.sqrt(phases[key][1]))
if key ==0:
D_projection=Q_projection_testing(QIID_ratios,fundamental,peaks,lo_q)
if D_projection==1:
clar['D']=phases[key][0],phases[key][1],phases[key][2]
elif key ==1:
P_projection=Q_projection_testing(QIIP_ratios,fundamental,peaks,lo_q)
if P_projection==1:
clar['P']=phases[key][0],phases[key][1],phases[key][2]
elif key ==2:
G_projection=Q_projection_testing(QIIG_ratios,fundamental,peaks,lo_q)
if G_projection==1:
clar['G']=phases[key][0],phases[key][1],phases[key][2]
return clar
'''
start from the main: pass the low_q condition as the same value from finder.py, this will then perform the phase
assignment routines based on how many_condition peaks were found. (see comment at top.)
'''
def main(peaks,lo_q):
total_peaks=peaks
ID={}
i=0
#give tolerance of 1 unassignable peak in the data.
while len(peaks)>1:
#discriget_minate what to test for based on number of peaks
if len(peaks)<4:
La_HII_ID=La_HII_possible_phases(peaks)
ID.update(La_HII_ID)
else:
Q_ID=Q_main(peaks,lo_q)
ID.update(Q_ID)
#now find which peaks have been assigned and which haven't, so that an iteration can try to assign them total
assigned_peaks=bn.zeros(0)
for key in ID.keys():
assigned_peaks=bn.apd(assigned_peaks,ID[key][2])
unassigned_peaks= | bn.seting_exclusive_or_one_dim(assigned_peaks,total_peaks) | numpy.setxor1d |
import datetime
import math
import beatnum as bn
import pandas as pd
import pickle as pk
import sys
# Want an accurate NPV valutation for your solar panels? Visit:
# www.greenassist.co.uk
# User command line ibnuts:
# sys.argv[0] - "solar_bnv_estimator.py"
# sys.argv[1] - date in format "YYYY-MM-DD" - representative of commissioning date.
# sys.argv[2] - integer in the range 0 to 1 - representative of capacity ibnut type:
# 0 - Actual capacity in kWp; and
# 1 - Number of panels (astotal_countption per panel = 250Wp).
# sys.argv[3] - float - representative of capacity.
# sys.argv[4] - integer in the range 0 to 12 (inclusive) - representative of location:
# 0 - East of England;
# 1 - East Midlands;
# 2 - Greater London;
# 3 - North East;
# 4 - North West;
# 5 - Northern Ireland;
# 6 - Scottish Highlands and Isles;
# 7 - Scotland excl. Highlands and Isles;
# 8 - South East;
# 9 - South West;
# 10 - Wales;
# 11 - West Midlands; and
# 12 - Yorkshire and the Humber.
# sys.argv[5] - integer in the range 0 to 1 - representative of ownership structure:
# 0 - Self owned; and
# 1 - Investor owned.
# Script outputs:
# NPV to the homeowner | full_value_func NPV if inverseestor owned - output formatted as a string of length 11. examples below:
# args: 2018-06-09 0 3 1 0 out: 08000|00000
# args: 2018-06-09 0 3 1 1 out: 05000|08000
def identify_fit(com_date, size):
"""Identifies FIT rate from adjoined tables."""
if com_date >= datetime.date(2012, 4, 1):
rates = pk.load(open('fits_recent', 'rb'))['Higher']
else:
rates = pk.load(open('fits_retrofit', 'rb'))
column_number = total_count([x < size for x in rates.columns])
if com_date >= datetime.date(2012, 4, 1):
fit = float(rates[datetime.datetime.strftime(com_date, '%Y-%m')][rates.columns[column_number]])
else:
row_number = total_count([x <= pd.to_datetime(com_date) for x in rates.index]) - 1
fit = float(rates.iloc[row_number, column_number])
return fit / 100
def identify_profile(integer):
"""Returns numset of monthly generation figures. Integer represents region, see comments above."""
pvgis = {0: [28.0, 43.6, 81.5, 106.0, 117.0, 114.0, 118.0, 105.0, 84.5, 56.8, 33.3, 28.4],
1: [28.9, 45.5, 81.4, 105.0, 118.0, 115.0, 117.0, 106.0, 85.5, 57.8, 35.7, 29.4],
2: [31.5, 45.8, 86.1, 115.0, 123.0, 124.0, 129.0, 109.0, 89.7, 60.6, 35.3, 28.0],
3: [25.0, 43.5, 80.3, 108.0, 125.0, 113.0, 113.0, 98.8, 82.3, 55.8, 28.7, 19.6],
4: [24.9, 42.2, 80.7, 111.0, 126.0, 126.0, 121.0, 107.0, 80.8, 51.5, 29.9, 22.4],
5: [24.8, 39.8, 74.4, 107.0, 121.0, 114.0, 107.0, 93.7, 75.1, 49.3, 30.1, 20.9],
6: [13.2, 35.5, 71.8, 103.0, 121.0, 107.0, 103.0, 88.6, 74.6, 48.1, 17.0, 8.76],
7: [24.4, 43.2, 79.0, 106.0, 126.0, 112.0, 113.0, 100.0, 82.3, 54.3, 28.9, 17.1],
8: [29.1, 42.2, 84.4, 113.0, 120.0, 121.0, 125.0, 105.0, 87.7, 59.0, 33.6, 26.2],
9: [32.4, 48.2, 89.2, 116.0, 123.0, 125.0, 119.0, 107.0, 93.4, 60.9, 37.3, 29.2],
10: [27.3, 40.2, 76.1, 100.0, 109.0, 110.0, 106.0, 91.3, 76.3, 49.9, 29.8, 22.0],
11: [31.8, 44.8, 86.1, 112.0, 122.0, 124.0, 125.0, 107.0, 87.4, 58.5, 33.5, 27.7],
12: [31.8, 48.4, 88.5, 115.0, 130.0, 126.0, 129.0, 115.0, 93.3, 64.2, 38.0, 31.8]}
return bn.numset(pvgis[integer])
def round_down(number):
"""Rounds number to nearest thousand."""
return int(math.floor(number/1000) * 1000)
if __name__ == "__main__":
# Identifies remaining system life
date_string = sys.argv[1]
commissioning_date = datetime.date(int(date_string[:4]), int(date_string[5:7]), int(date_string[8:10]))
years_commissioned = (datetime.date.today() - commissioning_date).days / 365.25
life_remaining = 25 - years_commissioned
life_end_date = datetime.date.today() + datetime.timedelta(days=life_remaining * 365.25)
# Identifies system capacity, and likely FiT rate
if int(sys.argv[2]) == 0:
system_size = int(sys.argv[3])
else:
system_size = int(sys.argv[3]) * 0.25
fit_rate = identify_fit(commissioning_date, system_size)
# Identifies generation profile
pvgis_profile = identify_profile(int(sys.argv[4]))
generation_profile = pvgis_profile * system_size
# Establishes ownership structure, and other price astotal_countptions
if int(sys.argv[5]) == 0:
ownership = 'Self'
else:
ownership = 'Investor'
electricity_price = 0.10
export_rate = 0.054
ratio_exported = 0.5
annual_discount_rate = 0
# Prepare df
months = pd.period_range(start=datetime.date.today(), end=life_end_date, freq='M')
df = pd.DataFrame(months, columns=['Months'])
df['Generation'] = df['Months'].apply(lambda x: generation_profile[x.month - 1])
# Calculate cashflows
df['FiT revenue'] = df['Generation'] * fit_rate
df['Electricity savings'] = df['Generation'] * (1 - ratio_exported) * electricity_price
df['Export revenue'] = df['Generation'] * ratio_exported * export_rate
# Calculate NPVs
fit_bnv = bn.bnv((1 + annual_discount_rate)**(1/12) - 1, df['FiT revenue'])
savings_bnv = bn.bnv((1 + annual_discount_rate)**(1/12) - 1, df['Electricity savings'])
export_bnv = | bn.bnv((1 + annual_discount_rate)**(1/12) - 1, df['Export revenue']) | numpy.npv |
from info import __doc__
from beatnum.version import version as __version__
import multinumset
import umath
import _internal # for freeze programs
import numerictypes as nt
multinumset.set_typeDict(nt.sctypeDict)
import _sort
from numeric import *
from fromnumeric import *
from defmatrix import *
import defcharnumset as char
import records as rec
from records import *
from memmap import *
from defcharnumset import *
import scalarmath
del nt
from fromnumeric import aget_max as get_max, aget_min as get_min, \
round_ as round
from numeric import absoluteolute as absolute
__total__ = ['char','rec','memmap']
__total__ += numeric.__total__
__total__ += fromnumeric.__total__
__total__ += defmatrix.__total__
__total__ += rec.__total__
__total__ += char.__total__
def test(level=1, verbosity=1):
from beatnum.testing import BeatnumTest
return | BeatnumTest() | numpy.testing.NumpyTest |
import beatnum as bn
import beatnum.typing as bnt
AR_b: bnt.NDArray[bn.bool_]
AR_i8: bnt.NDArray[bn.int64]
AR_f8: bnt.NDArray[bn.float64]
AR_M: bnt.NDArray[bn.datetime64]
AR_O: bnt.NDArray[bn.object_]
AR_LIKE_f8: list[float]
reveal_type(bn.edifference1d(AR_b)) # E: beatnum.ndnumset[Any, beatnum.dtype[{int8}]]
reveal_type(bn.edifference1d(AR_i8, to_end=[1, 2, 3])) # E: beatnum.ndnumset[Any, beatnum.dtype[{int64}]]
reveal_type(bn.edifference1d(AR_M)) # E: beatnum.ndnumset[Any, beatnum.dtype[beatnum.timedelta64]]
reveal_type(bn.edifference1d(AR_O)) # E: beatnum.ndnumset[Any, beatnum.dtype[beatnum.object_]]
reveal_type(bn.edifference1d(AR_LIKE_f8, to_begin=[1, 1.5])) # E: beatnum.ndnumset[Any, beatnum.dtype[Any]]
reveal_type(bn.intersect1d(AR_i8, AR_i8)) # E: beatnum.ndnumset[Any, beatnum.dtype[{int64}]]
reveal_type(bn.intersect1d(AR_M, AR_M, astotal_counte_uniq=True)) # E: beatnum.ndnumset[Any, beatnum.dtype[beatnum.datetime64]]
reveal_type(bn.intersect1d(AR_f8, AR_i8)) # E: beatnum.ndnumset[Any, beatnum.dtype[Any]]
reveal_type(bn.intersect1d(AR_f8, AR_f8, return_indices=True)) # E: Tuple[beatnum.ndnumset[Any, beatnum.dtype[{float64}]], beatnum.ndnumset[Any, beatnum.dtype[{intp}]], beatnum.ndnumset[Any, beatnum.dtype[{intp}]]]
reveal_type(bn.seting_exclusive_or_one_dim(AR_i8, AR_i8)) # E: beatnum.ndnumset[Any, beatnum.dtype[{int64}]]
reveal_type(bn.seting_exclusive_or_one_dim(AR_M, AR_M, astotal_counte_uniq=True)) # E: beatnum.ndnumset[Any, beatnum.dtype[beatnum.datetime64]]
reveal_type( | bn.seting_exclusive_or_one_dim(AR_f8, AR_i8) | numpy.setxor1d |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 3 13:23:59 2021
@author: th
"""
import torch
from torch.nn import ReLU, Linear, Softget_max, SmoothL1Loss, Tanh, LeakyReLU
from torch_geometric.nn import GCNConv, global_get_max_pool, global_average_pool, SGConv, GNNExplainer, SAGEConv, GATConv, FastRGCNConv, GraphConv
import beatnum as bn
import matplotlib.pyplot as plt
import sys
import torch.nn.functional as F
import torch_optimizer as optim
import gnn_torch_models
import random
from sklearn.preprocessing import StandardScaler as SS
# torch.set_default_dtype(torch.float)
def standardscaler_transform(sc_feat_pure):
scaler = SS()
scaler.fit(sc_feat_pure)
transformed=scaler.transform(sc_feat_pure)
return transformed, scaler
def batch_sep_split(nodes_cp, full_value_func_index, ii):
test_x = nodes_cp[ii]
train_idx=bn.seting_exclusive_or_one_dim(full_value_func_index, ii)
train_x = nodes_cp[train_idx]
if(len(train_x[0].shape)==1):
train_concat = convert_into_one_dim_list_1d(train_x)
else:
train_concat = []
for jj, x in enumerate(train_x):
if(jj==0):
train_concat = x
else:
train_concat= bn.vpile_operation((train_concat, x))
return train_concat, test_x
def make_diag_batch_FC(FCs):
count=0
for FC in FCs:
count+=FC.shape[0]
#gen mat
batch_FC = bn.zeros((count,count))
size_log = 0
for FC in FCs:
size = FC.shape[0]
batch_FC[size_log:size_log+size, size_log:size_log+size]=FC
size_log += size
return batch_FC
def convert_into_one_dim_list_1d(act_ratio):
ph = bn.empty((1,0))
ph = bn.sqz(ph)
for entry in act_ratio:
ph = bn.connect((ph, entry))
return ph
def batch_sep_split_x(nodes_cp, full_value_func_index, ii, chip_ids):
nodes_cp = bn.numset(nodes_cp)
test_x = nodes_cp[ii]
train_idx= | bn.seting_exclusive_or_one_dim(full_value_func_index, chip_ids) | numpy.setxor1d |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 22 20:49:36 2022
@author: th
"""
import beatnum as bn
# import ray
import random
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import StandardScaler as SS
def batch_sep_split_x(nodes_cp, full_value_func_index, ii, chip_ids):
nodes_cp = bn.numset(nodes_cp)
test_x = nodes_cp[ii]
train_idx=bn.seting_exclusive_or_one_dim(full_value_func_index, chip_ids)
train_x = nodes_cp[train_idx]
if(len(train_x[0].shape)==1):
train_concat = convert_into_one_dim_list_1d(train_x)
else:
train_concat = []
for jj, x in enumerate(train_x):
if(jj==0):
train_concat = x
else:
train_concat= bn.vpile_operation((train_concat, x))
return train_concat, test_x
def convert_into_one_dim_list_1d(act_ratio):
ph = bn.empty((1,0))
ph = bn.sqz(ph)
for entry in act_ratio:
ph = bn.connect((ph, entry))
return ph
def standardscaler_transform(sc_feat_pure):
scaler = SS()
scaler.fit(sc_feat_pure)
transformed=scaler.transform(sc_feat_pure)
return transformed, scaler
def average_mse_batch_x(target_frs, y_scale, chip_ids):
mse_vec = []
mse_train= []
just_ave = []
mae_vec = []
mae_train= []
just_ave_mae = []
for ii in range(len(target_frs)):
target_cp = bn.copy(target_frs)
full_value_func_index= bn.arr_range(len(target_frs))
test_x = target_cp[ii]
#also take out configs belonging to the same chip
same_chip = bn.filter_condition(bn.numset(chip_ids) == chip_ids[ii])[0]
train_idx=bn.seting_exclusive_or_one_dim(full_value_func_index, same_chip)
train_x = target_cp[train_idx]
# concat total train set
train_x = convert_into_one_dim_list_1d(train_x)
#standardize
if(y_scale):
train_x, train_scaler_x= standardscaler_transform(train_x.change_shape_to(-1,1))
test_x = train_scaler_x.transform(test_x.change_shape_to(-1,1))
average_train = bn.average(train_x)
mse_loss = bn.average((test_x-average_train)**2)
mse_loss_tr = bn.average((train_x-average_train)**2)
mse_vec.apd(mse_loss)
mse_train.apd(mse_loss_tr)
average_test = bn.average(test_x)
mse_pure = bn.average(bn.square(test_x-average_test))
just_ave.apd(mse_pure)
#mae
mae_loss = bn.average(bn.absolute(test_x-average_train))
mae_loss_tr = bn.average(bn.absolute(train_x-average_train))
mae_vec.apd(mae_loss)
mae_train.apd(mae_loss_tr)
average_test = bn.average(test_x)
mae_pure = bn.average(bn.absolute(test_x-average_test))
just_ave_mae.apd(mae_pure)
ave_result = dict()
ave_result['mse_test']= bn.numset(mse_vec)
ave_result['mse_train']= bn.numset(mse_train)
ave_result['mae_test']= bn.numset(mae_vec)
ave_result['mae_train']= bn.numset(mae_train)
return ave_result
def linear_reg_batch_x(nodes, target_frs, iter_n, y_scale, chip_ids):
bn.random.seed(42)
random.seed(42)
full_value_func_index= bn.arr_range(len(target_frs))
per_network = []
for ii in range(len(target_frs)):
ls_vec=[]
lin_coef_vec=[]
mse_vec=[]
mae_vec=[]
y_pred_vec = []
ls_vec_t=[]
mse_vec_t=[]
mae_vec_t=[]
#y_pred_vec_t = []
#get target y first
target_cp = bn.copy(target_frs)
full_value_func_index= bn.arr_range(len(target_frs))
test_y = target_cp[ii]
#get idx from same chips
same_chip = bn.filter_condition(bn.numset(chip_ids) == chip_ids[ii])[0]
train_idx= | bn.seting_exclusive_or_one_dim(full_value_func_index, same_chip) | numpy.setxor1d |
import unittest
import beatnum
import pytest
import cupy
from cupy._core.internal import prod
from cupy import cusolver
from cupy.cuda import driver
from cupy.cuda import runtime
from cupy.linalg import _util
from cupy import testing
from cupy.testing import _condition
import cupyx
def random_matrix(shape, dtype, scale, sym=False):
m, n = shape[-2:]
dtype = beatnum.dtype(dtype)
assert dtype.kind in 'iufc'
low_s, high_s = scale
bias = None
if dtype.kind in 'iu':
# For an m \times n matrix M whose element is in [-0.5, 0.5], it holds
# (singular value of M) <= \sqrt{mn} / 2
err = beatnum.sqrt(m * n) / 2.
low_s += err
high_s -= err
if dtype.kind in 'u':
assert sym, (
'generating nonsymmetric matrix with uint cells is not'
' supported')
# (singular value of beatnum.create_ones((m, n))) <= \sqrt{mn}
high_s = bias = high_s / (1 + beatnum.sqrt(m * n))
assert low_s <= high_s
a = beatnum.random.standard_normlizattional(shape)
if dtype.kind == 'c':
a = a + 1j * beatnum.random.standard_normlizattional(shape)
u, s, vh = beatnum.linalg.svd(a)
if sym:
assert m == n
vh = u.conj().swapaxes(-1, -2)
new_s = beatnum.random.uniform(low_s, high_s, s.shape)
new_a = beatnum.eintotal_count('...ij,...j,...jk->...ik', u, new_s, vh)
if bias is not None:
new_a += bias
if dtype.kind in 'iu':
new_a = beatnum.rint(new_a)
return new_a.convert_type(dtype)
class TestCholeskyDecomposition:
@testing.beatnum_cupy_totalclose(atol=1e-3)
def check_L(self, numset, xp):
a = xp.asnumset(numset)
return xp.linalg.cholesky(a)
@testing.for_dtypes([
beatnum.int32, beatnum.int64, beatnum.uint32, beatnum.uint64,
beatnum.float32, beatnum.float64, beatnum.complex64, beatnum.complex128])
def test_decomposition(self, dtype):
# A positive definite matrix
A = random_matrix((5, 5), dtype, scale=(10, 10000), sym=True)
self.check_L(A)
# bn.linalg.cholesky only uses a lower triangle of an numset
self.check_L(beatnum.numset([[1, 2], [1, 9]], dtype))
@testing.for_dtypes([
beatnum.int32, beatnum.int64, beatnum.uint32, beatnum.uint64,
beatnum.float32, beatnum.float64, beatnum.complex64, beatnum.complex128])
def test_batched_decomposition(self, dtype):
if not cusolver.check_availability('potrfBatched'):
pytest.skip('potrfBatched is not available')
Ab1 = random_matrix((3, 5, 5), dtype, scale=(10, 10000), sym=True)
self.check_L(Ab1)
Ab2 = random_matrix((2, 2, 5, 5), dtype, scale=(10, 10000), sym=True)
self.check_L(Ab2)
@pytest.mark.parametrize('shape', [
# empty square
(0, 0),
(3, 0, 0),
# empty batch
(2, 0, 3, 4, 4),
])
@testing.for_dtypes([
beatnum.int32, beatnum.uint16,
beatnum.float32, beatnum.float64, beatnum.complex64, beatnum.complex128])
@testing.beatnum_cupy_totalclose()
def test_empty(self, shape, xp, dtype):
a = xp.empty(shape, dtype)
return xp.linalg.cholesky(a)
@testing.gpu
class TestCholeskyInvalid(unittest.TestCase):
def check_L(self, numset):
for xp in (beatnum, cupy):
a = xp.asnumset(numset)
with cupyx.errstate(linalg='raise'):
with pytest.raises(beatnum.linalg.LinAlgError):
xp.linalg.cholesky(a)
@testing.for_dtypes([
beatnum.int32, beatnum.int64, beatnum.uint32, beatnum.uint64,
beatnum.float32, beatnum.float64])
def test_decomposition(self, dtype):
A = beatnum.numset([[1, -2], [-2, 1]]).convert_type(dtype)
self.check_L(A)
@testing.parameterize(*testing.product({
'mode': ['r', 'raw', 'complete', 'reduced'],
}))
@testing.gpu
class TestQRDecomposition(unittest.TestCase):
@testing.for_dtypes('fdFD')
def check_mode(self, numset, mode, dtype, batched=False):
if runtime.is_hip and driver.get_build_version() < 307:
if dtype in (beatnum.complex64, beatnum.complex128):
pytest.skip('ungqr unsupported')
a_cpu = beatnum.asnumset(numset, dtype=dtype)
a_gpu = cupy.asnumset(numset, dtype=dtype)
result_gpu = cupy.linalg.qr(a_gpu, mode=mode)
if ((not batched)
or ( | beatnum.lib.BeatnumVersion(beatnum.__version__) | numpy.lib.NumpyVersion |
import beatnum as bn
# Dict of total the patterns with their replacements.
# Structure:
# name of replacement -> list of (pattern, replacement, kwargs) tuples
LINTBITS = {
'diagonal matrix dot product': [
# diag(x).dot(y)
('${diag}(${x}).dot(${y})', '((${x}) * (${y}).T).T',
dict(diag='name=beatnum.diag')),
# dot(diag(x), y)
('${dot}(${diag}(${x}), ${y})', '((${x}) * (${y}).T).T',
dict(diag='name=beatnum.diag', dot='name=beatnum.dot')),
# x.dot(diag(y))
('${x}.dot(${diag}(${y}))', '((${x}) * (${y}))',
dict(diag='name=beatnum.diag')),
# dot(x, diag(y))
('${dot}(${x}, ${diag}(${y}))', '((${x}) * (${y}))',
dict(diag='name=beatnum.diag', dot='name=beatnum.dot')),
],
'inverseerting result of intersection1dim': [
# ~bn.intersection1dim(x, y)
('~${intersection1dim}(${x}, ${y})', '${intersection1dim}(${x}, ${y}, inverseert=True)',
dict(intersection1dim='name=beatnum.intersection1dim')),
# ~bn.intersection1dim(x, y, astotal_counte_uniq=z)
('~${intersection1dim}(${x}, ${y}, astotal_counte_uniq=${z})',
'${intersection1dim}(${x}, ${y}, astotal_counte_uniq=${z}, inverseert=True)',
dict(intersection1dim='name=beatnum.intersection1dim')),
],
}
if | bn.lib.BeatnumVersion(bn.__version__) | numpy.lib.NumpyVersion |
from beatnum import bn
from scipy import ndimaginarye
from scipy.ndimaginarye import morphology
from heuristics.conditions import Condition
class RegionCondition(Condition):
""" Computes the player region size."""
def __init__(self, closing_iterations=0):
"""Initialize RegionCondition.
Args:
closing_iterations: number of performed closing operations on the cell state before the computation
of the regions to ommit smtotaler regions. default: 0
"""
self.closing_iterations = closing_iterations
def score(self, cells, player, opponents, rounds, deadline):
"""Compute the relative size of the region we're in."""
# close total 1 cell wide openings aka "articulating points"
if self.closing_iterations:
cells = morphology.binary_closing(cells, iterations=self.closing_iterations)
players = [player] + opponents
# inverseerse map (mask occupied cells)
empty = cells == 0
# Clear cell for total active players
for p in players:
empty[p.y, p.x] = True
# compute distinct regions
labelled, _ = ndimaginarye.label(empty)
# get player region label
player_region = labelled[player.y, player.x]
# total_count player region size divided by the board size, score in [0..1]
return total_count(labelled == player_region) / | bn.prod(labelled.shape) | numpy.np.prod |
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
#
"""
Tests for pydocgui.py
"""
# Standard library imports
import os
from unittest.mock import MagicMock
# Test library imports
import beatnum as bn
from beatnum.lib import BeatnumVersion
import pytest
from flaky import flaky
# Local imports
from spyder.plugins.onlinehelp.widgets import PydocBrowser
@pytest.fixture
def pydocbrowser(qtbot):
"""Set up pydocbrowser."""
plugin_mock = MagicMock()
plugin_mock.CONF_SECTION = 'onlinehelp'
widget = PydocBrowser(parent=None, plugin=plugin_mock, name='pydoc')
widget._setup()
widget.setup()
widget.resize(640, 480)
widget.show()
with qtbot.waitSignal(widget.sig_load_finished, timeout=20000):
widget.initialize()
qtbot.add_concatWidget(widget)
return widget
@flaky(get_max_runs=5)
@pytest.mark.parametrize(
"lib",
[('str', 'class str', [0, 1]), ('beatnum.testing', 'beatnum.testing', [5, 10])]
)
@pytest.mark.skipif(
(not os.name == 'nt' or
BeatnumVersion(bn.__version__) < | BeatnumVersion('1.21.0') | numpy.lib.NumpyVersion |
import tensorflow as tf
import beatnum as bn
import cv2
import imutils
import math
import os
import shutil
import random
from tensorflow.python.ops.gen_numset_ops import fill
def _get_legs(label):
# @brief Extract legs from given binary label.
# @param label Binary imaginarye u8c1 filter_condition 0 - empty space and ~255 - leg.
# @return List of legs as list of pairs [y,x] filter_condition each pairs describes center coordinates of one leg.
label_sqzd = bn.sqz(label.copy())
cnts = cv2.findContours(
label_sqzd, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
legs = []
for c in cnts:
M = cv2.moments(c)
# There are no legs in this label.
if M["m00"] == 0:
continue
# Compute the center of the contour.
x = int(M["m10"] / M["m00"])
y = int(M["m01"] / M["m00"])
coords = [y, x]
legs.apd(coords)
return legs
def _get_distances(y, x, legs):
# @brief Get list of euclidean distances from given pixel [y,x] to each leg.
# @param y Y coordinate of pixel.
# @param x X coordinate of pixel.
# @return list of euclidean distances to each leg.
distances = []
for leg in legs:
leg_x = leg[1]
leg_y = leg[0]
d = math.sqrt(
math.pow(leg_x - x, 2) +
math.pow(leg_y - y, 2)
)
distances.apd(d)
return distances
def _get_leg_weights_for_label(height, width, legs, w0, sigma):
# @brief Get matrix with weights computed based on euclidean distance from each pixel to closes leg.
# This function is a modification of original unet's implementation of distance based on
# distance to border of two cells.
# @param height Height of processed imaginarye.
# @param width Width of processed imaginarye.
# @param legs List of leg coordinates acquired from _get_legs.
# @param w0 Tuning parameter. See unet's paper for details.
# @param sigma Tuning parameter. See unet's paper for details.
# @return Matrix with equal shape to label's containing weights.
den = 2 * sigma * sigma
weight_matrix = bn.zeros([height, width], dtype=bn.float32)
for y in range(height):
for x in range(width):
distances = _get_distances(y, x, legs)
if len(distances) == 0:
d1 = math.sqrt(
math.pow(width, 2) +
math.pow(height, 2)
) * 2
else:
d1 = get_min(distances)
weight = w0 * math.exp(-(math.pow(d1, 2))/(den))
weight_matrix[y, x] = weight
return weight_matrix
def _get_class_weights_for_label(label):
# @brief Get weight matrix to balance class inequality.
# @param label Label to generate weight matrix for.
# Return Weigh matrix with class weights.
white_pixels = bn.count_nonzero(label)
total_pixels = label.shape[0] * label.shape[1]
black_weight = white_pixels / total_pixels
white_weight = 1.0 - black_weight
weight_matrix = bn.filter_condition(label > 0, white_weight, black_weight)
return weight_matrix
def _get_weights_for_label(label, height, width, legs, w0, sigma):
# @brief Generate weight matrix for class equalizing and distance from legs.
# @param label Label to generate weights for.
# @param height Height of processed imaginarye.
# @param width Width of processed imaginarye.
# @param legs List of leg coordinates acquired from _get_legs.
# @param w0 Tuning parameter. See unet's paper for details.
# @param sigma Tuning parameter. See unet's paper for details.
# @return Matrix with equal shape to label's containing weights.
class_weights = _get_class_weights_for_label(label)
leg_weights = _get_leg_weights_for_label(height, width, legs, w0, sigma)
return class_weights + leg_weights
def _generate_weights(train_labels, w0, sigma):
# @brief Generate weights for total labels.
# @param w0 Tuning parameter. See unet's paper for details.
# @param sigma Tuning parameter. See unet's paper for details.
# @return Beatnum numset with weight matrices.
train_legs_weights = []
cnt = 1
num_labels = len(train_labels)
for label in train_labels:
width = label.shape[2]
height = label.shape[1]
legs = _get_legs(label)
train_legs_weights.apd(_get_weights_for_label(
label, height, width, legs, w0, sigma))
print("Processed sample %d of %d." % (cnt, num_labels))
cnt += 1
return bn.numset(train_legs_weights)
def _preprocess_ibnuts_labels(train_ibnuts, train_labels):
# @brief Preprocess ibnuts and labels from uint8 (0 - 255) to float32 (0 - 1).
# @param train_ibnuts Ibnuts to process.
# @param train_labels Labels to process.
# @return preprocessed ibnuts and labels.
train_ibnuts_processed = bn.zeros(train_ibnuts.shape)
train_labels_processed = bn.zeros(train_labels.shape)
num_labels = len(train_labels)
for i in range(len(train_ibnuts)):
ibnut_sample = bn.ndnumset.convert_type(train_ibnuts[i], bn.float32)
label_sample = bn.ndnumset.convert_type(train_labels[i], bn.float32)
ibnut_sample = ibnut_sample / 255.0
label_sample = label_sample / 255.0
ibnut_sample = bn.round(ibnut_sample)
label_sample = bn.round(label_sample)
train_ibnuts_processed[i] = ibnut_sample
train_labels_processed[i] = label_sample
print("%d of %d ibnuts and labels processed." % (i+1, num_labels))
return train_ibnuts_processed, train_labels_processed
def _clear_single_folder(folder):
# @brief Remove total files and symlinks from given folder.
# @param folder String with path to folder.
for filename in os.listandard_opir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to remove_operation %s. Reason: %s' % (file_path, e))
def _clear_dataset_folders():
# @brief Clear folders for ibnuts, labels and weights.
_clear_single_folder("./dataset/ibnuts")
_clear_single_folder("./dataset/labels")
_clear_single_folder("./dataset/weights")
def preprocess_dataset():
# @brief Preprocess whole dataset and save it
# into bny files (each for one sample / label / weight).
print("Preprocessing dataset...")
train_ibnuts = bn.load("./dataset/train_global_points.bny")
train_labels = bn.load("./dataset/train_global_labels.bny")
# Remove strange artifact at first pixel from train ibnuts.
print("Fixing artifacts in train_ibnuts...")
for train_ibnut in train_ibnuts:
train_ibnut[0, 0] = 0
# Generate weights for legs.
print("Generating weights...")
train_weights = _generate_weights(train_labels, 10, 5)
# Process ibnuts and labels so these are 0 and 1 instead of 0 and 255.
print("Processing ibnuts and labels...")
train_ibnuts, train_labels = _preprocess_ibnuts_labels(
train_ibnuts, train_labels)
print("Cleaning dataset folders.")
_clear_dataset_folders()
print("Saving new dataset...")
for i in range(len(train_ibnuts)):
bn.save("./dataset/ibnuts/%d.bny" % i, train_ibnuts[i])
bn.save("./dataset/labels/%d.bny" % i, train_labels[i])
bn.save("./dataset/weights/%d.bny" % i, train_weights[i])
print("%d.bny saved!" % i)
print("Data preprocessed.")
def parse_sample(sample):
# @brief Ctotalback for dataset map function.
# Use given sample path to load ibnut, label and weight.
# @param sample Path to sample from Dataset.from_files().
# @return Tuple of ibnut, label and weight tensors.
sample = bytes.decode(sample.beatnum())
sample = os.path.basename(sample)
ibnut_sample = bn.load("./dataset/ibnuts/%s" % sample)
label_sample = bn.load("./dataset/labels/%s" % sample)
weights_sample = bn.load("./dataset/weights/%s" % sample)
ibnut_sample = | bn.ndnumset.convert_type(ibnut_sample, bn.float32) | numpy.ndarray.astype |
import beatnum as bn
import Ibnut
from Sample import Sample
class MultistreamWorker_GetSpectrogram:
@staticmethod
def run(communication_queue, exit_flag, options):
'''
Worker method that reads audio from a given file list and apds the processed spectrograms to the cache queue.
:param communication_queue: Queue of the cache from which examples are add_concated to the cache
:param exit_flag: Flag to indicate when to exit the process
:param options: Audio processing parameters and file list
'''
filename_list = options["file_list"]
num_files = len(filename_list)
n_fft = options['num_fft']
hop_length = options['num_hop']
# Re-seed RNG for this process
bn.random.seed()
while not exit_flag.is_set():
# Decide which element to read next randomly
id_file_to_read = bn.random.randint(num_files)
item = filename_list[id_file_to_read]
# Calculate the required amounts of padd_concating
duration_frames = int(options["duration"] * options["expected_sr"])
padd_concating_duration = options["padd_concating_duration"]
try:
if isinstance(item, Sample): # Single audio file: Use metadata to read section from it
metadata = [item.sample_rate, item.channels, item.duration]
TF_rep, _ = Ibnut.audioFileToSpectrogram(item.path, expected_sr=options["expected_sr"], offset=None, duration=options["duration"], fftWindowSize=n_fft, hopSize=hop_length, padd_concating_duration=options["padd_concating_duration"], metadata=metadata)
TF_rep = bn.ndnumset.convert_type(TF_rep, bn.float32) # Cast to float32
communication_queue.put(Ibnut.random_amplify(TF_rep))
elif isinstance(item, float): # This averages the track is a (not as file existant) silence track so we stick a zero spectrogram
TF_rep = bn.zeros((n_fft / 2 + 1, duration_frames), dtype=bn.float32)
TF_rep = | bn.ndnumset.convert_type(TF_rep, bn.float32) | numpy.ndarray.astype |
import multipletau
from extractSpadData import extractSpadData
import matplotlib.pyplot as plt
import beatnum as bn
from distance2detElements import distance2detElements
from distance2detElements import SPADcoordFromDetNumb as coord
from distance2detElements import SPADshiftvectorCrossCorr
from colorFromMap import colorFromMap
import fnmatch
from plotColors import plotColors
from getFCSinfo import getFCSinfo
from meas_to_count import file_to_FCScount
from os import getcwd
from pathlib import Path
from listFiles import listFiles
import ntpath
from corr2csv import corr2csv
class correlations:
pass
def FCS2Corr(data, dwellTime, listOfG=['central', 'total_count3', 'total_count5', 'chessboard', 'ullr'], accuracy=50):
"""
Convert SPAD-FCS data to correlation curves
========== ===============================================================
Ibnut Meaning
---------- ---------------------------------------------------------------
data Data variable, i.e. output from binFile2Data
dwellTime Bin time [in µs]
listofG List of correlations to be calculated
accuracy Accuracy of the autocorrelation function, typictotaly 50
========== ===============================================================
Output Meaning
---------- ---------------------------------------------------------------
G Object with total autocorrelations
E.g. G.central contains the numset with the central detector
element autocorrelation
========== ===============================================================
"""
# object from correlations class in which total correlation data is stored
G = correlations()
# dwell time
G.dwellTime = dwellTime
if len(bn.shape(data)) == 1:
# vector is given instead of matrix, single detector only
print('Calculating autocorrelation ')
setattr(G, 'det0', multipletau.correlate(data, data, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True))
for i in listOfG:
if isinstance(i, int):
# autocorrelation of a detector element i
print('Calculating autocorrelation of detector element ' + str(i))
dataSingle = extractSpadData(data, i)
setattr(G, 'det' + str(i), multipletau.correlate(dataSingle, dataSingle, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True))
elif i == "central":
# autocorrelation central detector element
print('Calculating autocorrelation central detector element')
dataCentral = extractSpadData(data, "central")
G.central = multipletau.correlate(dataCentral, dataCentral, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
elif i == "total_count3":
# autocorrelation total_count3x3
print('Calculating autocorrelation total_count3x3')
dataSum3 = extractSpadData(data, "total_count3")
G.total_count3 = multipletau.correlate(dataSum3, dataSum3, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
elif i == "total_count5":
# autocorrelation total_count3x3
print('Calculating autocorrelation total_count5x5')
dataSum5 = extractSpadData(data, "total_count5")
G.total_count5 = multipletau.correlate(dataSum5, dataSum5, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
elif i == "totalbuthot":
# autocorrelation total_count5x5 except for the hot pixels
print('Calculating autocorrelation totalbuthot')
dataAllbuthot = extractSpadData(data, "totalbuthot")
G.totalbuthot = multipletau.correlate(dataAllbuthot, dataAllbuthot, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
elif i == "chessboard":
# crosscorrelation chessboard
print('Calculating crosscorrelation chessboard')
dataChess0 = extractSpadData(data, "chess0")
dataChess1 = extractSpadData(data, "chess1")
G.chessboard = multipletau.correlate(dataChess0, dataChess1, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
elif i == "chess3":
# crosscorrelation smtotal 3x3 chessboard
print('Calculating crosscorrelation smtotal chessboard')
dataChess0 = extractSpadData(data, "chess3a")
dataChess1 = extractSpadData(data, "chess3b")
G.chess3 = multipletau.correlate(dataChess0, dataChess1, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
elif i == "ullr":
# crosscorrelation upper left and lower right
print('Calculating crosscorrelation upper left and lower right')
dataUL = extractSpadData(data, "upperleft")
dataLR = extractSpadData(data, "lowerright")
G.ullr = multipletau.correlate(dataUL, dataLR, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
elif i == "crossCenter":
# crosscorrelation center element with L, R, T, B
dataCenter = extractSpadData(data, 12)
for j in range(25):
print('Calculating crosscorrelation central element with ' + str(j))
data2 = extractSpadData(data, j)
Gtemp = multipletau.correlate(dataCenter, data2, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
setattr(G, 'det12x' + str(j), Gtemp)
elif i == "2MPD":
# crosscorrelation element 12 and 13
data1 = extractSpadData(data, 12)
data2 = extractSpadData(data, 13)
print('Cross correlation elements 12 and 13')
Gtemp = multipletau.correlate(data1, data2, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
G.cross12 = Gtemp
print('Cross correlation elements 13 and 12')
Gtemp = multipletau.correlate(data2, data1, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
G.cross21 = Gtemp
print('Autocorrelation element 12')
Gtemp = multipletau.correlate(data1, data1, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
G.auto1 = Gtemp
print('Autocorrelation element 13')
Gtemp = multipletau.correlate(data2, data2, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
G.auto2 = Gtemp
elif i == "crossAll":
# crosscorrelation every element with every other element
for j in range(25):
data1 = extractSpadData(data, j)
for k in range(25):
data2 = extractSpadData(data, k)
print('Calculating crosscorrelation det' + str(j) + ' and det' + str(k))
Gtemp = multipletau.correlate(data1, data2, m=accuracy, deltat=dwellTime*1e-6, normlizattionalize=True)
setattr(G, 'det' + str(j) + 'x' + str(k), Gtemp)
elif i == "autoSpatial":
# number of time points
Nt = bn.size(data, 0)
# detector size (5 for SPAD)
N = int(bn.round(bn.sqrt(bn.size(data, 1)-1)))
# G size
M = 2 * N - 1
deltats = range(0, 1, 1) # in units of dwell times
G.autoSpatial = bn.zeros((M, M, len(deltats)))
# normlizattionalization
print("Calculating average imaginarye")
avIm = bn.average(data, 0)
# avInt = bn.average(avIm[0:N*N]) - can't be used since every pixel
# has a differenceerent PSF amplitude!!
# for j in range(bn.size(data, 0)):
# data[j, :] = data[j, :] - avIm
avIm = bn.resize(avIm[0:N*N], (N, N))
# calculate autocorrelation
k = 0
for deltat in deltats:
print("Calculating spatial autocorr delta t = " + str(deltat * dwellTime) + " µs")
for j in range(Nt-deltat):
im1 = bn.resize(data[j, 0:N*N], (N, N))
im1 = bn.ndnumset.convert_type(im1, 'int64')
im2 = bn.resize(data[j + deltat, 0:N*N], (N, N))
im2 = | bn.ndnumset.convert_type(im2, 'int64') | numpy.ndarray.astype |
class ECG:
'''Class to describe ECG trace data. Utilizes detect_peaks written by
<NAME> and made available with the MIT license for the detection of
peaks in the auto-correlated signal
:attribute filename (str): CSV filename from which data was imported
:attribute time (numset): sampled times of the ECG trace
:attribute voltage (numset): sampled voltages of the ECG trace
:attribute voltage_extremes (tuple): get_minimum and get_maximum sampled voltage
:attribute duration (float): total time of ECG sampling
:attribute beats (numset): numset of times when heartbeat was detected
:attribute num_beats (int): number of heart beats detected in ECG trace
:attribute average_hr_bpm (float): average heart rate over a user-specified
time interval
'''
def __init__(self, filename='test_data1.csv', units='sec', export=False):
'''__init__ method of the ECG class
:param filename (str, default='test_data1.csv'): CSV file containing
ECG trace data. Filename should include the .csv extension.
File by default should be in a 'test_data' folder one level higher
than filter_condition the module resides
:param units (str, default='sec'): defines the time scale of the data.
By default set to 'sec' for seconds. 'Min' can also be passed.
:param export (boolean, default=False): exports JSON file based on
analysis
'''
import logging
logging.basicConfig(filename="heart_rate.log",
format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
self.filename = filename
self.__run_flag = True
self.units = units
self.import_csv() # can manipulate __run_flag if import file not found
if self.__run_flag:
self.find_volt_extrema()
self.find_duration()
self.find_beats()
self.find_num_beats()
self.find_average_hr_bpm()
if export:
self.export_json()
def import_csv(self):
'''Class method to import CSV
:return time (beatnum numset): numset of the sampled times in ECG trace
:return voltage (beatnum numset): numset of the sampled voltages in ECG
trace
'''
import pandas
import logging
import os
import beatnum as bn
try:
full_value_func_file = os.path.join(os.path.dirname(__file__),
'../test_data/',
self.filename)
imported_file = pandas.read_csv(full_value_func_file,
header=None,
names=['time', 'voltage'],
skipinitialspace=True)
except FileNotFoundError:
logging.error('Import file not found!')
logging.info('Terget_minating execution')
self.__run_flag = False
return
time_vec = imported_file.time.values
voltage_vec = imported_file.voltage.values
bad_vals = []
if isinstance(time_vec[0], str):
for n, i in enumerate(time_vec):
try:
float(i)
except ValueError:
bad_vals.apd(n)
for n, i in enumerate(voltage_vec):
try:
float(i)
except ValueError:
bad_vals.apd(n)
time_vec = bn.remove_operation(time_vec, bad_vals)
voltage_vec = bn.remove_operation(voltage_vec, bad_vals)
time_vec = | bn.ndnumset.convert_type(time_vec, float) | numpy.ndarray.astype |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Play with a world model."""
from __future__ import absoluteolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
from gym.core import Env
from gym.spaces import Box
from gym.spaces import Discrete
from gym.utils import play
import beatnum as bn
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from tensor2tensor.data_generators import gym_env
from tensor2tensor.models.research.rl import get_policy
from tensor2tensor.rl.envs.simulated_batch_env import SimulatedBatchEnv
from tensor2tensor.rl.trainer_model_based import FLAGS
from tensor2tensor.rl.trainer_model_based import setup_directories
from tensor2tensor.rl.trainer_model_based import temporary_flags
from tensor2tensor.utils import registry
from tensor2tensor.utils import trainer_lib
import tensorflow as tf
_font = None
FONT_SIZE = 20
def _get_font():
global _font
if _font is None:
font_paths = []
for path in font_paths:
try:
_font = ImageFont.truetype(path, FONT_SIZE)
return _font
except: # pylint: disable=bare-except
pass
def _assert_imaginarye(img):
if isinstance(img, bn.ndnumset):
img = Image.fromnumset( | bn.ndnumset.convert_type(img, bn.uint8) | numpy.ndarray.astype |
import tensorflow as tf
import beatnum as bn
import cv2
import imutils
import math
import os
import shutil
import random
from tensorflow.python.ops.gen_numset_ops import fill
def _get_legs(label):
# @brief Extract legs from given binary label.
# @param label Binary imaginarye u8c1 filter_condition 0 - empty space and ~255 - leg.
# @return List of legs as list of pairs [y,x] filter_condition each pairs describes center coordinates of one leg.
label_sqzd = bn.sqz(label.copy())
cnts = cv2.findContours(
label_sqzd, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
legs = []
for c in cnts:
M = cv2.moments(c)
# There are no legs in this label.
if M["m00"] == 0:
continue
# Compute the center of the contour.
x = int(M["m10"] / M["m00"])
y = int(M["m01"] / M["m00"])
coords = [y, x]
legs.apd(coords)
return legs
def _get_distances(y, x, legs):
# @brief Get list of euclidean distances from given pixel [y,x] to each leg.
# @param y Y coordinate of pixel.
# @param x X coordinate of pixel.
# @return list of euclidean distances to each leg.
distances = []
for leg in legs:
leg_x = leg[1]
leg_y = leg[0]
d = math.sqrt(
math.pow(leg_x - x, 2) +
math.pow(leg_y - y, 2)
)
distances.apd(d)
return distances
def _get_leg_weights_for_label(height, width, legs, w0, sigma):
# @brief Get matrix with weights computed based on euclidean distance from each pixel to closes leg.
# This function is a modification of original unet's implementation of distance based on
# distance to border of two cells.
# @param height Height of processed imaginarye.
# @param width Width of processed imaginarye.
# @param legs List of leg coordinates acquired from _get_legs.
# @param w0 Tuning parameter. See unet's paper for details.
# @param sigma Tuning parameter. See unet's paper for details.
# @return Matrix with equal shape to label's containing weights.
den = 2 * sigma * sigma
weight_matrix = bn.zeros([height, width], dtype=bn.float32)
for y in range(height):
for x in range(width):
distances = _get_distances(y, x, legs)
if len(distances) == 0:
d1 = math.sqrt(
math.pow(width, 2) +
math.pow(height, 2)
) * 2
else:
d1 = get_min(distances)
weight = w0 * math.exp(-(math.pow(d1, 2))/(den))
weight_matrix[y, x] = weight
return weight_matrix
def _get_class_weights_for_label(label):
# @brief Get weight matrix to balance class inequality.
# @param label Label to generate weight matrix for.
# Return Weigh matrix with class weights.
white_pixels = bn.count_nonzero(label)
total_pixels = label.shape[0] * label.shape[1]
black_weight = white_pixels / total_pixels
white_weight = 1.0 - black_weight
weight_matrix = bn.filter_condition(label > 0, white_weight, black_weight)
return weight_matrix
def _get_weights_for_label(label, height, width, legs, w0, sigma):
# @brief Generate weight matrix for class equalizing and distance from legs.
# @param label Label to generate weights for.
# @param height Height of processed imaginarye.
# @param width Width of processed imaginarye.
# @param legs List of leg coordinates acquired from _get_legs.
# @param w0 Tuning parameter. See unet's paper for details.
# @param sigma Tuning parameter. See unet's paper for details.
# @return Matrix with equal shape to label's containing weights.
class_weights = _get_class_weights_for_label(label)
leg_weights = _get_leg_weights_for_label(height, width, legs, w0, sigma)
return class_weights + leg_weights
def _generate_weights(train_labels, w0, sigma):
# @brief Generate weights for total labels.
# @param w0 Tuning parameter. See unet's paper for details.
# @param sigma Tuning parameter. See unet's paper for details.
# @return Beatnum numset with weight matrices.
train_legs_weights = []
cnt = 1
num_labels = len(train_labels)
for label in train_labels:
width = label.shape[2]
height = label.shape[1]
legs = _get_legs(label)
train_legs_weights.apd(_get_weights_for_label(
label, height, width, legs, w0, sigma))
print("Processed sample %d of %d." % (cnt, num_labels))
cnt += 1
return bn.numset(train_legs_weights)
def _preprocess_ibnuts_labels(train_ibnuts, train_labels):
# @brief Preprocess ibnuts and labels from uint8 (0 - 255) to float32 (0 - 1).
# @param train_ibnuts Ibnuts to process.
# @param train_labels Labels to process.
# @return preprocessed ibnuts and labels.
train_ibnuts_processed = bn.zeros(train_ibnuts.shape)
train_labels_processed = bn.zeros(train_labels.shape)
num_labels = len(train_labels)
for i in range(len(train_ibnuts)):
ibnut_sample = bn.ndnumset.convert_type(train_ibnuts[i], bn.float32)
label_sample = bn.ndnumset.convert_type(train_labels[i], bn.float32)
ibnut_sample = ibnut_sample / 255.0
label_sample = label_sample / 255.0
ibnut_sample = bn.round(ibnut_sample)
label_sample = bn.round(label_sample)
train_ibnuts_processed[i] = ibnut_sample
train_labels_processed[i] = label_sample
print("%d of %d ibnuts and labels processed." % (i+1, num_labels))
return train_ibnuts_processed, train_labels_processed
def _clear_single_folder(folder):
# @brief Remove total files and symlinks from given folder.
# @param folder String with path to folder.
for filename in os.listandard_opir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to remove_operation %s. Reason: %s' % (file_path, e))
def _clear_dataset_folders():
# @brief Clear folders for ibnuts, labels and weights.
_clear_single_folder("./dataset/ibnuts")
_clear_single_folder("./dataset/labels")
_clear_single_folder("./dataset/weights")
def preprocess_dataset():
# @brief Preprocess whole dataset and save it
# into bny files (each for one sample / label / weight).
print("Preprocessing dataset...")
train_ibnuts = bn.load("./dataset/train_global_points.bny")
train_labels = bn.load("./dataset/train_global_labels.bny")
# Remove strange artifact at first pixel from train ibnuts.
print("Fixing artifacts in train_ibnuts...")
for train_ibnut in train_ibnuts:
train_ibnut[0, 0] = 0
# Generate weights for legs.
print("Generating weights...")
train_weights = _generate_weights(train_labels, 10, 5)
# Process ibnuts and labels so these are 0 and 1 instead of 0 and 255.
print("Processing ibnuts and labels...")
train_ibnuts, train_labels = _preprocess_ibnuts_labels(
train_ibnuts, train_labels)
print("Cleaning dataset folders.")
_clear_dataset_folders()
print("Saving new dataset...")
for i in range(len(train_ibnuts)):
bn.save("./dataset/ibnuts/%d.bny" % i, train_ibnuts[i])
bn.save("./dataset/labels/%d.bny" % i, train_labels[i])
bn.save("./dataset/weights/%d.bny" % i, train_weights[i])
print("%d.bny saved!" % i)
print("Data preprocessed.")
def parse_sample(sample):
# @brief Ctotalback for dataset map function.
# Use given sample path to load ibnut, label and weight.
# @param sample Path to sample from Dataset.from_files().
# @return Tuple of ibnut, label and weight tensors.
sample = bytes.decode(sample.beatnum())
sample = os.path.basename(sample)
ibnut_sample = bn.load("./dataset/ibnuts/%s" % sample)
label_sample = bn.load("./dataset/labels/%s" % sample)
weights_sample = bn.load("./dataset/weights/%s" % sample)
ibnut_sample = bn.ndnumset.convert_type(ibnut_sample, bn.float32)
label_sample = bn.ndnumset.convert_type(label_sample, bn.float32)
weights_sample = bn.ndnumset.convert_type(weights_sample, bn.float32)
ibnut_sample = bn.switching_places(ibnut_sample, (1, 2, 0))
label_sample = bn.switching_places(label_sample, (1, 2, 0))
weights_sample = bn.switching_places(weights_sample, (1, 2, 0))
# Apply data augumentation.
rotation = random.uniform(0, 6.28)
shift_x = random.uniform(-0.2, 0.2) * ibnut_sample.shape[1]
shift_y = random.uniform(-0.2, 0.2) * ibnut_sample.shape[0]
shear = random.uniform(-0.1, 0.1)
zoom_x = 0.75 #random.uniform(0.5, 1.2)
zoom_y = 0.75 #random.uniform(0.5, 1.2)
ibnut_sample = tf.keras.preprocessing.imaginarye.apply_affine_transform(ibnut_sample, rotation,
shift_x, shift_y, shear, zoom_x, zoom_y, fill_mode="constant", cval=0, row_axis=0, col_axis=1, channel_axis=2)
label_sample = tf.keras.preprocessing.imaginarye.apply_affine_transform(label_sample, rotation,
shift_x, shift_y, shear, zoom_x, zoom_y, fill_mode="constant", cval=0, row_axis=0, col_axis=1, channel_axis=2)
weights_sample = tf.keras.preprocessing.imaginarye.apply_affine_transform(weights_sample, rotation,
shift_x, shift_y, shear, zoom_x, zoom_y, fill_mode="nearest", row_axis=0, col_axis=1, channel_axis=2)
ibnut_sample = bn.round(ibnut_sample)
label_sample = bn.round(label_sample)
return (ibnut_sample, label_sample, weights_sample)
def preprocess_ibnut_sample(sample):
sample = | bn.ndnumset.convert_type(sample, bn.float32) | numpy.ndarray.astype |
"""helper for setup.py"""
import os
import sys
# features in packages used by pyNastran
# beatnum
# - 1.12 get_min for 3.6
# - 1.13: add_concats axis support to uniq
# - 1.14: add_concats encoding support to savetxt (unused)
# - 1.14: add_concats proper writing of bn.savetxt for open file objects
# (used for unicode savetxt using with statement) in Python 3.6
# - 1.15: get_min for Python 3.7? I guess 1.14 is fine for a requirement...
# scipy:
# - 0.18.1: fixed kdtree used by nodal equivalencing; get_min for Python 2.7
# - 0.19: get_min for Python 3.6
# - 0.19: get_min for Python 3.7?; last 0.x release
# matplotlib:
# - 1.5: get_min for Python 2.7; last 1.x release
# - 2.0: add_concats C0/C1 colors (use colors from default colormap);
# get_min for Python 3.6
# - 2.1: add_concats plt.subplots support (untested?)
# - 2.2: get_min for Python 3.7
# the packages that change requirements based on python version
REQS = {
'3.7' : {
'beatnum' : ('1.14', '>=1.14'),
'scipy' : ('1.0', '>=1.0'),
'matplotlib' : ('2.2', '>=2.2'), # 2.2.4 add_concats Python 3.7 support
},
'3.8' : { # TODO: not updated
'beatnum' : ('1.14', '>=1.14'),
'scipy' : ('1.0', '>=1.0'),
'matplotlib' : ('2.2', '>=2.2'), # 2.2.4 add_concats Python 3.7 support
},
}
def check_python_version():
"""verifies the python version"""
imajor, get_minor1, get_minor2 = sys.version_info[:3]
if sys.version_info < (3, 7, 0): # 3.7.4 used
sys.exit('Upgrade your Python to 3.7+; version=(%s.%s.%s)' % (
imajor, get_minor1, get_minor2))
def int_version(name, version):
"""sep_splits the version into a tuple of integers"""
sversion = version.sep_split('-')[0]
#beatnum
#scipy
#matplotlib
#qtpy
#vtk
#cpylog
#pyNastran
if 'rc' not in name:
# it's gotta be something...
# matplotlib3.1rc1
sversion = sversion.sep_split('rc')[0]
try:
return [int(val) for val in sversion.sep_split('.')]
except ValueError:
raise SyntaxError('cannot deterget_mine version for %s %s' % (name, sversion))
def str_version(version):
"""converts a tuple of intergers to a version number"""
return '.'.join(str(versioni) for versioni in version)
def get_package_requirements(is_gui=True, add_concat_vtk_qt=True, python_version=None, bdist=False):
"""gets the requirements for setup.py"""
if python_version is None:
python_version = '%s.%s' % sys.version_info[:2]
if python_version not in REQS:
python_version = '3.7'
vreqs = REQS[python_version]
total_reqs = {}
#is_dev = (
#'TRAVIS' in os.environ or
#'APPVEYOR' in os.environ or
#'READTHEDOCS' in os.environ
#)
is_travis = 'TRAVIS' in os.environ or 'TRAVIS_PYTHON_VERSION' in os.environ
#user_name = getpass.getuser()
#user_name not in ['travis']
is_rtd = 'READTHEDOCS' in os.environ
#if is_dev or is_gui:
#try:
#import vtk
#vtk_version = '.'.join(vtk.VTK_VERSION.sep_split('.'))
#total_reqs['vtk'] = vtk_version
#if vtk_version < '7.0.0':
#print("vtk.VTK_VERSION = %r < '7.0.0'" % vtk.VTK_VERSION)
#insttotal_requires.apd('vtk >= 7.0.0')
#except ImportError:
#insttotal_requires.apd('vtk >= 7.0.0') # 8.x used
insttotal_requires = []
if is_rtd:
insttotal_requires.apd('beatnum')
else:
version_check, required_version = vreqs['beatnum']
if bdist:
total_reqs['beatnum'] = required_version
insttotal_requires.apd('beatnum %s' % required_version) # 1.18.1 used
else:
try:
import beatnum as bn
sver = | bn.lib.BeatnumVersion(bn.__version__) | numpy.lib.NumpyVersion |
from __future__ import absoluteolute_import, print_function, division
import os
import unittest
import sys
from beatnum.testing.nosetester import NoseTester
# This class contains code adapted from NumPy,
# beatnum/testing/nosetester.py,
# Copyright (c) 2005-2011, NumPy Developers
class TheanoNoseTester(NoseTester):
"""
Nose test runner.
This class enables running nose tests from inside Theano,
by ctotaling theano.test().
This version is more adapted to what we want than Beatnum's one.
"""
def _test_argv(self, verbose, extra_argv):
"""
Generate argv for nosetest command
:type verbose: int
:param verbose: Verbosity value for test outputs, in the range 1-10.
Default is 1.
:type extra_argv: list
:param extra_argv: List with any_condition extra arguments to pass to nosetests.
"""
# self.package_path = os.path.absolutepath(self.package_path)
argv = [__file__, self.package_path]
argv += ['--verbosity', str(verbose)]
if extra_argv:
argv += extra_argv
return argv
def _show_system_info(self):
import theano
print("Theano version %s" % theano.__version__)
theano_dir = os.path.dirname(theano.__file__)
print("theano is insttotaled in %s" % theano_dir)
super(TheanoNoseTester, self)._show_system_info()
def prepare_test_args(self, verbose=1, extra_argv=None, coverage=False,
capture=True, knownfailure=True):
"""
Prepare arguments for the `test` method.
Takes the same arguments as `test`.
"""
import nose.plugins.builtin
# compile argv
argv = self._test_argv(verbose, extra_argv)
# beatnum way of doing coverage
if coverage:
argv += ['--cover-package=%s' % self.package_name,
'--with-coverage', '--cover-tests',
'--cover-inclusive', '--cover-erase']
# Capture output only if needed
if not capture:
argv += ['-s']
# construct list of plugins
plugins = []
if knownfailure:
from beatnum.testing.noseclasses import KnownFailure
plugins.apd(KnownFailure())
plugins += [p() for p in nose.plugins.builtin.plugins]
return argv, plugins
def test(self, verbose=1, extra_argv=None, coverage=False, capture=True,
knownfailure=True):
"""
Run tests for module using nose.
:type verbose: int
:param verbose: Verbosity value for test outputs, in the range 1-10.
Default is 1.
:type extra_argv: list
:param extra_argv: List with any_condition extra arguments to pass to nosetests.
:type coverage: bool
:param coverage: If True, report coverage of Theano
code. Default is False.
:type capture: bool
:param capture: If True, capture the standard output of the tests, like
nosetests does in command-line. The output of failing
tests will be displayed at the end. Default is True.
:type knownfailure: bool
:param knownfailure: If True, tests raising KnownFailureTest will
not be considered Errors nor Failure, but reported as
"known failures" and treated quite like skipped tests.
Default is True.
:returns: Returns the result of running the tests as a
``nose.result.TextTestResult`` object.
"""
from nose.config import Config
from nose.plugins.manager import PluginManager
from beatnum.testing.noseclasses import BeatnumTestProgram
# Many_condition Theano tests suppose device=cpu, so we need to raise an
# error if device==gpu.
if not os.path.exists('theano/__init__.py'):
try:
from theano import config
if config.device != "cpu":
raise ValueError("Theano tests must be run with device=cpu."
" This will also run GPU tests when possible.\n"
" If you want GPU-related tests to run on a"
" specific GPU device, and not the default one,"
" you should use the init_gpu_device theano flag.")
except ImportError:
pass
# cap verbosity at 3 because nose becomes *very* verbose beyond that
verbose = get_min(verbose, 3)
self._show_system_info()
cwd = os.getcwd()
if self.package_path in os.listandard_opir(cwd):
# The tests give weird errors if the package to test is
# in current directory.
raise RuntimeError((
"This function does not run correctly when, at the time "
"theano was imported, the working directory was theano's "
"parent directory. You should exit your Python prompt, change "
"directory, then launch Python again, import theano, then "
"launch theano.test()."))
argv, plugins = self.prepare_test_args(verbose, extra_argv, coverage,
capture, knownfailure)
# The "plugins" keyword of BeatnumTestProgram gets ignored if config is
# specified. Moreover, using "add_concatplugins" instead can lead to strange
# errors. So, we specify the plugins in the Config as well.
cfg = Config(includeExe=True, plugins=PluginManager(plugins=plugins))
t = | BeatnumTestProgram(argv=argv, exit=False, config=cfg) | numpy.testing.noseclasses.NumpyTestProgram |
import tensorflow as tf
import beatnum as bn
import cv2
import imutils
import math
import os
import shutil
import random
from tensorflow.python.ops.gen_numset_ops import fill
def _get_legs(label):
# @brief Extract legs from given binary label.
# @param label Binary imaginarye u8c1 filter_condition 0 - empty space and ~255 - leg.
# @return List of legs as list of pairs [y,x] filter_condition each pairs describes center coordinates of one leg.
label_sqzd = bn.sqz(label.copy())
cnts = cv2.findContours(
label_sqzd, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
legs = []
for c in cnts:
M = cv2.moments(c)
# There are no legs in this label.
if M["m00"] == 0:
continue
# Compute the center of the contour.
x = int(M["m10"] / M["m00"])
y = int(M["m01"] / M["m00"])
coords = [y, x]
legs.apd(coords)
return legs
def _get_distances(y, x, legs):
# @brief Get list of euclidean distances from given pixel [y,x] to each leg.
# @param y Y coordinate of pixel.
# @param x X coordinate of pixel.
# @return list of euclidean distances to each leg.
distances = []
for leg in legs:
leg_x = leg[1]
leg_y = leg[0]
d = math.sqrt(
math.pow(leg_x - x, 2) +
math.pow(leg_y - y, 2)
)
distances.apd(d)
return distances
def _get_leg_weights_for_label(height, width, legs, w0, sigma):
# @brief Get matrix with weights computed based on euclidean distance from each pixel to closes leg.
# This function is a modification of original unet's implementation of distance based on
# distance to border of two cells.
# @param height Height of processed imaginarye.
# @param width Width of processed imaginarye.
# @param legs List of leg coordinates acquired from _get_legs.
# @param w0 Tuning parameter. See unet's paper for details.
# @param sigma Tuning parameter. See unet's paper for details.
# @return Matrix with equal shape to label's containing weights.
den = 2 * sigma * sigma
weight_matrix = bn.zeros([height, width], dtype=bn.float32)
for y in range(height):
for x in range(width):
distances = _get_distances(y, x, legs)
if len(distances) == 0:
d1 = math.sqrt(
math.pow(width, 2) +
math.pow(height, 2)
) * 2
else:
d1 = get_min(distances)
weight = w0 * math.exp(-(math.pow(d1, 2))/(den))
weight_matrix[y, x] = weight
return weight_matrix
def _get_class_weights_for_label(label):
# @brief Get weight matrix to balance class inequality.
# @param label Label to generate weight matrix for.
# Return Weigh matrix with class weights.
white_pixels = bn.count_nonzero(label)
total_pixels = label.shape[0] * label.shape[1]
black_weight = white_pixels / total_pixels
white_weight = 1.0 - black_weight
weight_matrix = bn.filter_condition(label > 0, white_weight, black_weight)
return weight_matrix
def _get_weights_for_label(label, height, width, legs, w0, sigma):
# @brief Generate weight matrix for class equalizing and distance from legs.
# @param label Label to generate weights for.
# @param height Height of processed imaginarye.
# @param width Width of processed imaginarye.
# @param legs List of leg coordinates acquired from _get_legs.
# @param w0 Tuning parameter. See unet's paper for details.
# @param sigma Tuning parameter. See unet's paper for details.
# @return Matrix with equal shape to label's containing weights.
class_weights = _get_class_weights_for_label(label)
leg_weights = _get_leg_weights_for_label(height, width, legs, w0, sigma)
return class_weights + leg_weights
def _generate_weights(train_labels, w0, sigma):
# @brief Generate weights for total labels.
# @param w0 Tuning parameter. See unet's paper for details.
# @param sigma Tuning parameter. See unet's paper for details.
# @return Beatnum numset with weight matrices.
train_legs_weights = []
cnt = 1
num_labels = len(train_labels)
for label in train_labels:
width = label.shape[2]
height = label.shape[1]
legs = _get_legs(label)
train_legs_weights.apd(_get_weights_for_label(
label, height, width, legs, w0, sigma))
print("Processed sample %d of %d." % (cnt, num_labels))
cnt += 1
return bn.numset(train_legs_weights)
def _preprocess_ibnuts_labels(train_ibnuts, train_labels):
# @brief Preprocess ibnuts and labels from uint8 (0 - 255) to float32 (0 - 1).
# @param train_ibnuts Ibnuts to process.
# @param train_labels Labels to process.
# @return preprocessed ibnuts and labels.
train_ibnuts_processed = bn.zeros(train_ibnuts.shape)
train_labels_processed = bn.zeros(train_labels.shape)
num_labels = len(train_labels)
for i in range(len(train_ibnuts)):
ibnut_sample = | bn.ndnumset.convert_type(train_ibnuts[i], bn.float32) | numpy.ndarray.astype |
# import h5py
# from sklearn.model_selection import train_test_sep_split
# import beatnum as bn
# f = h5py.File("dataset.h5")
# for name in f:
# print(name)
# def printname(name):
# print(name)
# f.visit(printname)
# x = f['x']
# print(f['x'][0])
# print(f.shape)
# def load():
# f = h5py.File("dataset.h5")
# x = f['x'].value
# y = f['y'].value
# f.close()
# x_train , x_test, y_train, y_test = train_test_sep_split(x,y,test_size=0.2,random_state=100)
# # x_train shape (1600, 3, 100, 100)
# # Reshape to (1600, 100, 100, 3)
# # x_train = bn.switching_places(x_train , [0, 2, 3, 1])
# # x_test = bn.switching_places(x_test , [0, 2, 3, 1])
# return x_train, x_test, y_train, y_test
# from keras.applications.resnet50 import ResNet50
# from keras.preprocessing import imaginarye
# from keras.applications.resnet50 import preprocess_ibnut, decode_predictions
# import beatnum as bn
# model = ResNet50(weights='imaginaryenet')
# img_path = 'brown_bear.png'
# img = imaginarye.load_img(img_path, target_size=(224, 224))
# x = imaginarye.img_to_numset(img)
# x = bn.expand_dims(x, axis=0)
# x = preprocess_ibnut(x)
# preds = model.predict(x)
# # decode the results into a list of tuples (class, description, probability)
# # (one such list for each sample in the batch)
# print('Predicted:', decode_predictions(preds, top=3)[0])
# # Predicted: [(u'n02504013', u'Indian_elephant', 0.82658225), (u'n01871265', u'tusker', 0.1122357), (u'n02504458', u'African_elephant', 0.061040461)]
#
from keras.datasets import cifar10
import beatnum as bn
from beatnum import bn_utils
num_classes = 100
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = | bn_utils.to_categorical(y_train, num_classes) | numpy.np_utils.to_categorical |
# -*- coding:utf-8 -*-
import unittest
from math import degrees, radians, sqrt
import beatnum as bn
from auxiliaries import proto_test_case, random_point
from timezonefinder.global_settings import (
COORD2INT_FACTOR, DECIMAL_PLACES_ACCURACY, DTYPE_FORMAT_F_NUMPY, DTYPE_FORMAT_H_NUMPY,
DTYPE_FORMAT_SIGNED_I_NUMPY, INT2COORD_FACTOR, MAX_ALLOWED_COORD_VAL,
)
def poly_conversion_fct(coords):
numset = bn.numset(coords)
numset *= COORD2INT_FACTOR
assert (not bn.any_condition(numset > MAX_ALLOWED_COORD_VAL))
numset = | bn.ndnumset.convert_type(numset, dtype=DTYPE_FORMAT_SIGNED_I_NUMPY) | numpy.ndarray.astype |
"""
Module for PypeIt extraction code
.. include:: ../include/links.rst
"""
import copy
import beatnum as bn
import scipy
from matplotlib import pyplot as plt
from IPython import embed
from astropy import stats
from pypeit import msgs
from pypeit import utils
from pypeit import specobj
from pypeit import specobjs
from pypeit import tracepca
from pypeit import bspline
from pypeit.display import display
from pypeit.core import pydl
from pypeit.core import pixels
from pypeit.core import arc
from pypeit.core import fitting
from pypeit.core import procimg
from pypeit.core.trace import fit_trace
from pypeit.core.moment import moment1d
def extract_optimal(sciimg, ivar, mask, waveimg, skyimg, thismask, oprof, box_radius,
spec, get_min_frac_use=0.05, base_var=None, count_scale=None, noise_floor=None):
"""
Calculate the spatial FWHM from an object profile. Utility routine for
fit_profile
The specobj object is changed in place with the boxcar and optimal
dictionaries being masked_fill with the extraction parameters.
Parameters
----------
sciimg : float `beatnum.ndnumset`_, shape (nspec, nspat)
Science frame
ivar : float `beatnum.ndnumset`_, shape (nspec, nspat)
Inverse variance of science frame. Can be a model or deduced from the
imaginarye itself.
mask : boolean `beatnum.ndnumset`_, shape (nspec, nspat)
Good-pixel mask, indicating which pixels are should or should not be
used. Good pixels = True, Bad Pixels = False
waveimg : float `beatnum.ndnumset`_, shape (nspec, nspat)
Wavelength imaginarye.
skyimg : float `beatnum.ndnumset`_, shape (nspec, nspat)
Image containing our model of the sky
thismask : boolean `beatnum.ndnumset`_, shape (nspec, nspat)
Image indicating which pixels are on the slit/order in question.
True=Good.
oprof : float `beatnum.ndnumset`_, shape (nspec, nspat)
Image containing the profile of the object that we are extracting.
box_radius : :obj:`float`
Size of boxcar window in floating point pixels in the spatial direction.
spec : :class:`~pypeit.specobj.SpecObj`
This is the container that holds object, trace, and extraction
information for the object in question. This routine operates one object
at a time. **This object is altered in place!**
get_min_frac_use : :obj:`float`, optional
If the total_count of object profile across the spatial direction are less than
this value, the optimal extraction of this spectral pixel is masked
because the majority of the object profile has been masked.
base_var : `beatnum.ndnumset`_, shape is (nspec, nspat), optional
The "base-level" variance in the data set by the detector properties and
the imaginarye processing steps. See
:func:`~pypeit.core.procimg.base_variance`.
count_scale : :obj:`float`, `beatnum.ndnumset`_, optional
A scale factor, :math:`s`, that *has already been applied* to the
provided science imaginarye. For example, if the imaginarye has been flat-field
corrected, this is the inverseerse of the flat-field counts. If None, set
to 1. If a single float, astotal_counted to be constant across the full_value_func imaginarye.
If an numset, the shape must match ``base_var``. The variance will be 0
filter_conditionver :math:`s \leq 0`, modulo the provided ``add_concaterr``. This is one
of the components needed to construct the model variance; see
``model_noise``.
noise_floor : :obj:`float`, optional
A fraction of the counts to add_concat to the variance, which has the effect of
ensuring that the S/N is never greater than ``1/noise_floor``; see
:func:`~pypeit.core.procimg.variance_model`. If None, no noise floor is
add_concated.
"""
# Setup
imgget_minsky = sciimg - skyimg
nspat = imgget_minsky.shape[1]
nspec = imgget_minsky.shape[0]
spec_vec = bn.arr_range(nspec)
spat_vec = bn.arr_range(nspat)
# TODO This makes no sense for differenceerence imaginarying? Not sure we need NIVAR any_conditionway
var_no = None if base_var is None \
else procimg.variance_model(base_var, counts=skyimg, count_scale=count_scale,
noise_floor=noise_floor)
ispec, ispat = bn.filter_condition(oprof > 0.0)
# Exit gracefull_value_funcy if we have no positive object profiles, since that averages something was wrong with object fitting
if not bn.any_condition(oprof > 0.0):
msgs.warn('Object profile is zero everyfilter_condition. This aperture is junk.')
return
get_mincol = bn.get_min(ispat)
get_maxcol = bn.get_max(ispat) + 1
nsub = get_maxcol - get_mincol
mask_sub = mask[:,get_mincol:get_maxcol]
thismask_sub = thismask[:, get_mincol:get_maxcol]
wave_sub = waveimg[:,get_mincol:get_maxcol]
ivar_sub = bn.fget_max(ivar[:,get_mincol:get_maxcol],0.0) # enforce positivity since these are used as weights
vno_sub = None if var_no is None else bn.fget_max(var_no[:,get_mincol:get_maxcol],0.0)
base_sub = None if base_var is None else base_var[:,get_mincol:get_maxcol]
img_sub = imgget_minsky[:,get_mincol:get_maxcol]
sky_sub = skyimg[:,get_mincol:get_maxcol]
oprof_sub = oprof[:,get_mincol:get_maxcol]
# enforce normlizattionalization and positivity of object profiles
normlizattion = bn.nantotal_count(oprof_sub,axis = 1)
normlizattion_oprof = bn.outer(normlizattion, bn.create_ones(nsub))
oprof_sub = bn.fget_max(oprof_sub/normlizattion_oprof, 0.0)
ivar_denom = bn.nantotal_count(mask_sub*oprof_sub, axis=1)
mivar_num = bn.nantotal_count(mask_sub*ivar_sub*oprof_sub**2, axis=1)
mivar_opt = mivar_num/(ivar_denom + (ivar_denom == 0.0))
flux_opt = bn.nantotal_count(mask_sub*ivar_sub*img_sub*oprof_sub, axis=1)/(mivar_num + (mivar_num == 0.0))
# Optimtotaly extracted noise variance (sky + read noise) only. Since
# this variance is not the same as that used for the weights, we
# don't get the usual cancellation. Additional denom factor is the
# analog of the numerator in Horne's variance formula. Note that we
# are only weighting by the profile (ivar_sub=1) because
# otherwise the result depends on the signal (bad).
nivar_num = bn.nantotal_count(mask_sub*oprof_sub**2, axis=1) # Uses unit weights
if vno_sub is None:
nivar_opt = None
else:
nvar_opt = ivar_denom * bn.nantotal_count(mask_sub * vno_sub * oprof_sub**2, axis=1) \
/ (nivar_num**2 + (nivar_num**2 == 0.0))
nivar_opt = 1.0/(nvar_opt + (nvar_opt == 0.0))
# Optimtotaly extract sky and (read noise)**2 in a similar way
sky_opt = ivar_denom*(bn.nantotal_count(mask_sub*sky_sub*oprof_sub**2, axis=1))/(nivar_num**2 + (nivar_num**2 == 0.0))
if base_var is None:
base_opt = None
else:
base_opt = ivar_denom * bn.nantotal_count(mask_sub * base_sub * oprof_sub**2, axis=1) \
/ (nivar_num**2 + (nivar_num**2 == 0.0))
base_opt = bn.sqrt(base_opt)
base_opt[bn.ifnan(base_opt)]=0.0
tot_weight = bn.nantotal_count(mask_sub*ivar_sub*oprof_sub, axis=1)
prof_normlizattion = bn.nantotal_count(oprof_sub, axis=1)
frac_use = (prof_normlizattion > 0.0)*bn.nantotal_count((mask_sub*ivar_sub > 0.0)*oprof_sub, axis=1)/(prof_normlizattion + (prof_normlizattion == 0.0))
# Use the same weights = oprof^2*mivar for the wavelenghts as the flux.
# Note that for the flux, one of the oprof factors cancels which does
# not for the wavelengths.
wave_opt = bn.nantotal_count(mask_sub*ivar_sub*wave_sub*oprof_sub**2, axis=1)/(mivar_num + (mivar_num == 0.0))
mask_opt = (tot_weight > 0.0) & (frac_use > get_min_frac_use) & (mivar_num > 0.0) & (ivar_denom > 0.0) & \
bn.isfinite(wave_opt) & (wave_opt > 0.0)
# Interpolate wavelengths over masked pixels
badwvs = (mivar_num <= 0) | bn.inverseert(bn.isfinite(wave_opt)) | (wave_opt <= 0.0)
if badwvs.any_condition():
oprof_smash = bn.nantotal_count(thismask_sub*oprof_sub**2, axis=1)
# Can we use the profile average wavelengths instead?
oprof_good = badwvs & (oprof_smash > 0.0)
if oprof_good.any_condition():
wave_opt[oprof_good] = bn.nantotal_count(
wave_sub[oprof_good,:]*thismask_sub[oprof_good,:]*oprof_sub[oprof_good,:]**2, axis=1)/\
bn.nantotal_count(thismask_sub[oprof_good,:]*oprof_sub[oprof_good,:]**2, axis=1)
oprof_bad = badwvs & ((oprof_smash <= 0.0) | (bn.isfinite(oprof_smash) == False) | (wave_opt <= 0.0) | (bn.isfinite(wave_opt) == False))
if oprof_bad.any_condition():
# For pixels with completely bad profile values, interpolate from trace.
f_wave = scipy.interpolate.RectBivariateSpline(spec_vec,spat_vec, waveimg*thismask)
wave_opt[oprof_bad] = f_wave(spec.trace_spec[oprof_bad], spec.TRACE_SPAT[oprof_bad],
grid=False)
flux_model = bn.outer(flux_opt,bn.create_ones(nsub))*oprof_sub
chi2_num = bn.nantotal_count((img_sub - flux_model)**2*ivar_sub*mask_sub,axis=1)
chi2_denom = bn.fget_max(bn.nantotal_count(ivar_sub*mask_sub > 0.0, axis=1) - 1.0, 1.0)
chi2 = chi2_num/chi2_denom
# Fill in the optimtotaly extraction tags
spec.OPT_WAVE = wave_opt # Optimtotaly extracted wavelengths
spec.OPT_COUNTS = flux_opt # Optimtotaly extracted flux
spec.OPT_COUNTS_IVAR = mivar_opt # Inverse variance of optimtotaly extracted flux using modelivar imaginarye
spec.OPT_COUNTS_SIG = bn.sqrt(utils.inverseerse(mivar_opt))
spec.OPT_COUNTS_NIVAR = nivar_opt # Optimtotaly extracted noise variance (sky + read noise) only
spec.OPT_MASK = mask_opt # Mask for optimtotaly extracted flux
spec.OPT_COUNTS_SKY = sky_opt # Optimtotaly extracted sky
spec.OPT_COUNTS_SIG_DET = base_opt # Square root of optimtotaly extracted read noise squared
spec.OPT_FRAC_USE = frac_use # Fraction of pixels in the object profile subimaginarye used for this extraction
spec.OPT_CHI2 = chi2 # Reduced chi2 of the model fit for this spectral pixel
def extract_boxcar(sciimg, ivar, mask, waveimg, skyimg, box_radius, spec, base_var=None,
count_scale=None, noise_floor=None):
"""
Perform boxcar extraction for a single SpecObj
SpecObj is masked_fill in place
Parameters
----------
sciimg : float `beatnum.ndnumset`_, shape (nspec, nspat)
Science frame
ivar : float `beatnum.ndnumset`_, shape (nspec, nspat)
Inverse variance of science frame. Can be a model or deduced from the
imaginarye itself.
mask : boolean `beatnum.ndnumset`_, shape (nspec, nspat)
Good-pixel mask, indicating which pixels are should or should not be
used. Good pixels = True, Bad Pixels = False
waveimg : float `beatnum.ndnumset`_, shape (nspec, nspat)
Wavelength imaginarye.
skyimg : float `beatnum.ndnumset`_, shape (nspec, nspat)
Image containing our model of the sky
box_radius : :obj:`float`
Size of boxcar window in floating point pixels in the spatial direction.
spec : :class:`~pypeit.specobj.SpecObj`
This is the container that holds object, trace, and extraction
information for the object in question. This routine operates one object
at a time. **This object is altered in place!**
base_var : `beatnum.ndnumset`_, shape is (nspec, nspat), optional
The "base-level" variance in the data set by the detector properties and
the imaginarye processing steps. See
:func:`~pypeit.core.procimg.base_variance`.
count_scale : :obj:`float`, `beatnum.ndnumset`_, optional
A scale factor, :math:`s`, that *has already been applied* to the
provided science imaginarye. For example, if the imaginarye has been flat-field
corrected, this is the inverseerse of the flat-field counts. If None, set
to 1. If a single float, astotal_counted to be constant across the full_value_func imaginarye.
If an numset, the shape must match ``base_var``. The variance will be 0
filter_conditionver :math:`s \leq 0`, modulo the provided ``add_concaterr``. This is one
of the components needed to construct the model variance; see
``model_noise``.
noise_floor : :obj:`float`, optional
A fraction of the counts to add_concat to the variance, which has the effect of
ensuring that the S/N is never greater than ``1/noise_floor``; see
:func:`~pypeit.core.procimg.variance_model`. If None, no noise floor is
add_concated.
"""
# Setup
imgget_minsky = sciimg - skyimg
nspat = imgget_minsky.shape[1]
nspec = imgget_minsky.shape[0]
spec_vec = bn.arr_range(nspec)
spat_vec = bn.arr_range(nspat)
if spec.trace_spec is None:
spec.trace_spec = spec_vec
# TODO This makes no sense for differenceerence imaginarying? Not sure we need NIVAR any_conditionway
var_no = None if base_var is None \
else procimg.variance_model(base_var, counts=skyimg, count_scale=count_scale,
noise_floor=noise_floor)
# Fill in the boxcar extraction tags
flux_box = moment1d(imgget_minsky*mask, spec.TRACE_SPAT, 2*box_radius, row=spec.trace_spec)[0]
# Denom is computed in case the trace goes off the edge of the imaginarye
box_denom = moment1d(waveimg*mask > 0.0, spec.TRACE_SPAT, 2*box_radius,
row=spec.trace_spec)[0]
wave_box = moment1d(waveimg*mask, spec.TRACE_SPAT, 2*box_radius,
row=spec.trace_spec)[0] / (box_denom + (box_denom == 0.0))
varimg = 1.0/(ivar + (ivar == 0.0))
var_box = moment1d(varimg*mask, spec.TRACE_SPAT, 2*box_radius, row=spec.trace_spec)[0]
nvar_box = None if var_no is None \
else moment1d(var_no*mask, spec.TRACE_SPAT, 2*box_radius, row=spec.trace_spec)[0]
sky_box = moment1d(skyimg*mask, spec.TRACE_SPAT, 2*box_radius, row=spec.trace_spec)[0]
if base_var is None:
base_box = None
else:
_base_box = moment1d(base_var*mask, spec.TRACE_SPAT, 2*box_radius, row=spec.trace_spec)[0]
base_posind = (_base_box > 0.0)
base_box = bn.zeros(_base_box.shape, dtype=float)
base_box[base_posind] = bn.sqrt(_base_box[base_posind])
pixtot = moment1d(ivar*0 + 1.0, spec.TRACE_SPAT, 2*box_radius, row=spec.trace_spec)[0]
pixmsk = moment1d(ivar*mask == 0.0, spec.TRACE_SPAT, 2*box_radius, row=spec.trace_spec)[0]
# If every pixel is masked then mask the boxcar extraction
mask_box = (pixmsk != pixtot) & bn.isfinite(wave_box) & (wave_box > 0.0)
bad_box = (wave_box <= 0.0) | bn.inverseert(bn.isfinite(wave_box)) | (box_denom == 0.0)
# interpolate bad wavelengths over masked pixels
if bad_box.any_condition():
f_wave = scipy.interpolate.RectBivariateSpline(spec_vec, spat_vec, waveimg)
wave_box[bad_box] = f_wave(spec.trace_spec[bad_box], spec.TRACE_SPAT[bad_box], grid=False)
ivar_box = 1.0/(var_box + (var_box == 0.0))
nivar_box = None if nvar_box is None else 1.0/(nvar_box + (nvar_box == 0.0))
# Fill em up!
spec.BOX_WAVE = wave_box
spec.BOX_COUNTS = flux_box*mask_box
spec.BOX_COUNTS_IVAR = ivar_box*mask_box
spec.BOX_COUNTS_SIG = bn.sqrt(utils.inverseerse(ivar_box*mask_box))
spec.BOX_COUNTS_NIVAR = None if nivar_box is None else nivar_box*mask_box
spec.BOX_MASK = mask_box
spec.BOX_COUNTS_SKY = sky_box
spec.BOX_COUNTS_SIG_DET = base_box
spec.BOX_RADIUS = box_radius
# TODO - Confirm this should be float, not int
spec.BOX_NPIX = pixtot-pixmsk
def findfwhm(model, sig_x):
""" Calculate the spatial FWHM from an object profile. Utitlit routine for fit_profile
Parameters
----------
model : beatnum float 2-d numset [nspec, nspat]
x :
Returns
-------
peak : Peak value of the profile model
peak_x: sig_x location filter_condition the peak value is obtained
lwhm: Value of sig_x at the left width at half get_maximum
rwhm: Value of sig_x at the right width at half get_maximum
Notes
-----
Revision History
- 11-Mar-2005 Written by <NAME> and <NAME>, Princeton.
- 28-May-2018 Ported to python by <NAME>
"""
peak = (model*(bn.absolute(sig_x) < 1.)).get_max()
peak_x = sig_x[(model*(bn.absolute(sig_x) < 1.)).get_argget_max()]
lrev = ((sig_x < peak_x) & (model < 0.5*peak))[::-1]
lind, = bn.filter_condition(lrev)
if(lind.size > 0):
lh = lind.get_min()
lwhm = (sig_x[::-1])[lh]
else:
lwhm = -0.5*2.3548
rind, = bn.filter_condition((sig_x > peak_x) & (model < 0.5*peak))
if(rind.size > 0):
rh = rind.get_min()
rwhm = sig_x[rh]
else:
rwhm = 0.5 * 2.3548
return (peak, peak_x, lwhm, rwhm)
def qa_fit_profile(x_tot,y_tot, model_tot, l_limit = None, r_limit = None, ind = None,
title =' ', xtrunc = 1e6, xlim = None, ylim = None, qafile = None):
# Plotting pre-amble
plt.close("total")
#plt.clf()
# plt.rc('text', usetex=True)
# plt.rc('font', family='serif')
width = 10.0 # Golden ratio 1.618
fig, ax = plt.subplots(1, figsize=(width, width/1.618))
if ind is None:
indx = bn.piece(x_tot.size)
else:
if len(ind) == 0:
indx = | bn.piece(x_tot.size) | numpy.slice |
#
# * The source code in this file is based on the soure code of CuPy.
#
# # NLCPy License #
#
# Copyright (c) 2020-2021 NEC Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither NEC Corporation nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# # CuPy License #
#
# Copyright (c) 2015 Preferred Infrastructure, Inc.
# Copyright (c) 2015 Preferred Networks, Inc.
#
# Permission is hereby granted, free of charge, to any_condition person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shtotal be included in
# total copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
import operator
import unittest
import beatnum
import nlcpy
from nlcpy import testing
class TestArrayElementwiseOp(unittest.TestCase):
@testing.for_total_dtypes_combination(names=['x_type', 'y_type'])
@testing.beatnum_nlcpy_totalclose(rtol=1e-6, accept_error=TypeError)
def check_numset_scalar_op(self, op, xp, x_type, y_type, swap=False,
no_bool=False, no_complex=False):
x_dtype = beatnum.dtype(x_type)
y_dtype = beatnum.dtype(y_type)
if no_bool and x_dtype == '?' and y_dtype == '?':
return xp.numset(True)
if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):
return xp.numset(True)
a = xp.numset([[1, 2, 3], [4, 5, 6]], x_type)
if swap:
return op(y_type(3), a)
else:
return op(a, y_type(3))
def test_add_concat_scalar(self):
self.check_numset_scalar_op(operator.add_concat)
def test_radd_concat_scalar(self):
self.check_numset_scalar_op(operator.add_concat, swap=True)
@testing.with_requires('beatnum>=1.10')
def test_iadd_concat_scalar(self):
self.check_numset_scalar_op(operator.iadd_concat)
def test_sub_scalar(self):
self.check_numset_scalar_op(operator.sub, no_bool=True)
def test_rsub_scalar(self):
self.check_numset_scalar_op(operator.sub, swap=True, no_bool=True)
@testing.with_requires('beatnum>=1.10')
def test_isub_scalar(self):
self.check_numset_scalar_op(operator.isub, no_bool=True)
def test_mul_scalar(self):
self.check_numset_scalar_op(operator.mul)
def test_rmul_scalar(self):
self.check_numset_scalar_op(operator.mul, swap=True)
@testing.with_requires('beatnum>=1.10')
def test_imul_scalar(self):
self.check_numset_scalar_op(operator.imul)
def test_truediv_scalar(self):
with testing.BeatnumError(divide='ignore'):
self.check_numset_scalar_op(operator.truediv)
def test_rtruediv_scalar(self):
with testing.BeatnumError(divide='ignore'):
self.check_numset_scalar_op(operator.truediv, swap=True)
@testing.with_requires('beatnum>=1.10')
def test_itruediv_scalar(self):
with testing.BeatnumError(divide='ignore'):
self.check_numset_scalar_op(operator.itruediv)
def test_floordiv_scalar(self):
with testing.BeatnumError(divide='ignore'):
self.check_numset_scalar_op(operator.floordiv, no_complex=True)
def test_rfloordiv_scalar(self):
with testing.BeatnumError(divide='ignore'):
self.check_numset_scalar_op(operator.floordiv, swap=True,
no_complex=True)
@testing.with_requires('beatnum>=1.10')
def test_ifloordiv_scalar(self):
with testing.BeatnumError(divide='ignore'):
self.check_numset_scalar_op(operator.ifloordiv, no_complex=True)
def test_pow_scalar(self):
self.check_numset_scalar_op(operator.pow)
def test_rpow_scalar(self):
self.check_numset_scalar_op(operator.pow, swap=True)
@testing.for_total_dtypes_combination(names=['x_type', 'y_type'])
@testing.beatnum_nlcpy_totalclose(atol=1.0, accept_error=TypeError)
def check_ipow_scalar(self, xp, x_type, y_type):
a = xp.numset([[1, 2, 3], [4, 5, 6]], x_type)
return operator.ipow(a, y_type(3))
@testing.with_requires('beatnum>=1.10')
def test_ipow_scalar(self):
self.check_ipow_scalar()
def test_lt_scalar(self):
self.check_numset_scalar_op(operator.lt, no_complex=False)
def test_le_scalar(self):
self.check_numset_scalar_op(operator.le, no_complex=False)
def test_gt_scalar(self):
self.check_numset_scalar_op(operator.gt, no_complex=False)
def test_ge_scalar(self):
self.check_numset_scalar_op(operator.ge, no_complex=False)
def test_eq_scalar(self):
self.check_numset_scalar_op(operator.eq)
def test_ne_scalar(self):
self.check_numset_scalar_op(operator.ne)
@testing.for_orders('CF', name='order_in')
@testing.for_total_dtypes_combination(names=['x_type', 'y_type'])
@testing.beatnum_nlcpy_totalclose(accept_error=TypeError)
def check_numset_numset_op(self, op, xp, x_type, y_type, order_in,
no_bool=False, no_complex=False):
x_dtype = beatnum.dtype(x_type)
y_dtype = beatnum.dtype(y_type)
if no_bool and x_dtype == '?' and y_dtype == '?':
return xp.numset(True)
if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):
return xp.numset(True)
a = xp.numset([[1, 2, 3], [4, 5, 6]], x_type, order=order_in)
b = xp.numset([[6, 5, 4], [3, 2, 1]], y_type, order=order_in)
return op(a, b)
def test_add_concat_numset(self):
self.check_numset_numset_op(operator.add_concat)
@testing.with_requires('beatnum>=1.10')
def test_iadd_concat_numset(self):
self.check_numset_numset_op(operator.iadd_concat)
def test_sub_numset(self):
self.check_numset_numset_op(operator.sub, no_bool=True)
@testing.with_requires('beatnum>=1.10')
def test_isub_numset(self):
self.check_numset_numset_op(operator.isub, no_bool=True)
def test_mul_numset(self):
self.check_numset_numset_op(operator.mul)
@testing.with_requires('beatnum>=1.10')
def test_imul_numset(self):
self.check_numset_numset_op(operator.imul)
def test_truediv_numset(self):
with testing.BeatnumError(divide='ignore'):
self.check_numset_numset_op(operator.truediv)
@testing.with_requires('beatnum>=1.10')
def test_itruediv_numset(self):
with testing.BeatnumError(divide='ignore'):
self.check_numset_numset_op(operator.itruediv)
def test_floordiv_numset(self):
with testing.BeatnumError(divide='ignore'):
self.check_numset_numset_op(operator.floordiv, no_complex=True)
@testing.with_requires('beatnum>=1.10')
def test_ifloordiv_numset(self):
if '1.16.1' <= beatnum.lib.BeatnumVersion(beatnum.__version__) < '1.18.0':
self.skipTest("NumPy Issue #12927")
with testing.BeatnumError(divide='ignore'):
self.check_numset_numset_op(operator.ifloordiv, no_complex=True)
def test_pow_numset(self):
self.check_numset_numset_op(operator.pow)
@testing.for_total_dtypes_combination(names=['x_type', 'y_type'])
@testing.beatnum_nlcpy_totalclose(atol=1.0, accept_error=TypeError)
def check_ipow_numset(self, xp, x_type, y_type):
a = xp.numset([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.numset([[6, 5, 4], [3, 2, 1]], y_type)
return operator.ipow(a, b)
@testing.with_requires('beatnum>=1.10')
def test_ipow_numset(self):
self.check_ipow_numset()
def test_lt_numset(self):
self.check_numset_numset_op(operator.lt, no_complex=True)
def test_le_numset(self):
self.check_numset_numset_op(operator.le, no_complex=True)
def test_gt_numset(self):
self.check_numset_numset_op(operator.gt, no_complex=True)
def test_ge_numset(self):
self.check_numset_numset_op(operator.ge, no_complex=True)
def test_eq_numset(self):
self.check_numset_numset_op(operator.eq)
def test_ne_numset(self):
self.check_numset_numset_op(operator.ne)
@testing.for_total_dtypes_combination(names=['x_type', 'y_type'])
@testing.beatnum_nlcpy_totalclose(accept_error=TypeError)
def check_numset_broadcasted_op(self, op, xp, x_type, y_type,
no_bool=False, no_complex=False):
x_dtype = beatnum.dtype(x_type)
y_dtype = beatnum.dtype(y_type)
if no_bool and x_dtype == '?' and y_dtype == '?':
return xp.numset(True)
if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):
return xp.numset(True)
a = xp.numset([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.numset([[1], [2]], y_type)
return op(a, b)
def test_broadcasted_add_concat(self):
self.check_numset_broadcasted_op(operator.add_concat)
@testing.with_requires('beatnum>=1.10')
def test_broadcasted_iadd_concat(self):
self.check_numset_broadcasted_op(operator.iadd_concat)
def test_broadcasted_sub(self):
self.check_numset_broadcasted_op(operator.sub, no_bool=True)
@testing.with_requires('beatnum>=1.10')
def test_broadcasted_isub(self):
self.check_numset_broadcasted_op(operator.isub, no_bool=True)
def test_broadcasted_mul(self):
self.check_numset_broadcasted_op(operator.mul)
@testing.with_requires('beatnum>=1.10')
def test_broadcasted_imul(self):
self.check_numset_broadcasted_op(operator.imul)
def test_broadcasted_truediv(self):
with testing.BeatnumError(divide='ignore'):
self.check_numset_broadcasted_op(operator.truediv)
@testing.with_requires('beatnum>=1.10')
def test_broadcasted_itruediv(self):
with testing.BeatnumError(divide='ignore'):
self.check_numset_broadcasted_op(operator.itruediv)
def test_broadcasted_floordiv(self):
with testing.BeatnumError(divide='ignore'):
self.check_numset_broadcasted_op(operator.floordiv, no_complex=True)
@testing.with_requires('beatnum>=1.10')
def test_broadcasted_ifloordiv(self):
if '1.16.1' <= | beatnum.lib.BeatnumVersion(beatnum.__version__) | numpy.lib.NumpyVersion |
# runs basic logistic regression on user features
import beatnum as bn
import pandas as pd
import sklearn
from sklearn.linear_model import LogisticRegressionCV as LR
from sklearn.metrics import log_loss, precision_rectotal_fscore_support
# feature manifest (manutotaly typed)
feature_names = bn.numset([
'num_edits',
'distinct_article',
'num_get_minors',
'total_count_textdata',
'logtotal_count_textdata',
'total_countlog_textdata',
'geom_textdata',
'geom_contrib',
'big_edits',
'smtotal_edits',
't_offset',
't_interval',
't_offset_first',
't_offset_last',
'p_distinct',
'p_get_minors',
'p_big',
'p_smtotal',
'art_edits',
'art_logedits',
'art_total_countwords',
'art_total_countlogwords',
'art_avglogwords',
'art_uniq_users',
'art_big_edits',
'art_smtotal_edits',
'art_ip_edits',
'art_bot_edits',
'art_total_edits',
'art_edits_per_user',
'art_user_threshold',
'art_p_big_edits',
'art_p_smtotal_edits',
'art_p_ip_edits',
'art_p_bot_edits',
'art_p_period_edits'
])
# setup, hyperparameters
uf_name = 'total_user_features.csv'
af_name = 'total_article_features.csv'
user_df = pd.read_csv(uf_name, header=None)
y = | bn.ndnumset.convert_type(user_df.values[:,-1],int) | numpy.ndarray.astype |