prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# coding=utf-8
# <NAME>
# <EMAIL>
# 2022-03-15
# 100 Days of Code: The Complete Python Pro Bootcamp for 2022
# Day 31 - Flash Card App
# Constants
BACKGROUND_COLOR = "#B1DDC6"
LANGUAGE_A = "English"
LANGUAGE_B = "French"
FONT_SMALL = ("Arial", 40, "italic")
FONT_LARGE = ("Arial", 60, "bold")
DATAFILE = "data/french_words.csv"
DATAFILE_TRAINED = "data/words_to_learn.csv"
SLEEP_TIMER = 3
from tkinter import *
import pandas
import random
import time
# Import data
try:
df = pandas.read_csv(DATAFILE_TRAINED)
# Load initial file if no trained file is found or if all words have been learned
except (FileNotFoundError, pandas.errors.EmptyDataError) as e:
df = | pandas.read_csv(DATAFILE) | pandas.read_csv |
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import (
IS64,
is_platform_windows,
)
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or not IS64
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
| tm.reset_display_options() | pandas._testing.reset_display_options |
"""Collect commit data from the user's diffs."""
# pyright: reportMissingImports=false
# pylint: disable=E0401
import pandas as pd
from reporover import get_commit_data
def short_stat(decoded_diff):
"""Get the commit data from git shortstat."""
added = None
deleted = None
changes = decoded_diff.split(",")
for i in changes:
if "+" in i:
added = [int(s) for s in i.split() if s.isdigit()][0]
if "-" in i:
deleted = [int(s) for s in i.split() if s.isdigit()][0]
if not added:
added = 0
if not deleted:
deleted = 0
return (added, deleted)
def staged_stats(stats, file_names, commit_subject):
"""Convert data that will be used for predicting the label."""
decoded_diff = stats.decode("utf-8")
added, deleted = short_stat(decoded_diff)
decoded_files = file_names.decode("utf-8")
files_list = decoded_files.split("\n")
file_extensions = get_commit_data.get_file_extensions(files_list)
test_files_count = get_commit_data.test_files(files_list)
staged_changes_stats = {
"commit_subject": commit_subject,
"num_files": len(files_list),
"test_files": test_files_count,
"test_files_ratio": get_commit_data.get_ratio(
test_files_count, len(files_list)
),
"unique_file_extensions": file_extensions,
"num_unique_file_extensions": len(file_extensions),
"num_lines_added": added,
"num_lines_removed": deleted,
"num_lines_total": added + deleted,
}
return | pd.DataFrame([staged_changes_stats]) | pandas.DataFrame |
'''
This script contains examples of functions that can be used from the Pandas
module.
'''
# Series ---------------------------------------------------------------------
import pandas as pd
import numpy as np
# Creating series
pd.Series(data=[1,2,3,4]) # list
pd.Series(data=[1,2,3,4], index=['a','b','c','d']) # custom index
pd.Series(data={'a':1, 'b':2, 'c':3, 'd':4}) # dictionary
# Indexing series
ser_1 = pd.Series(data=[1,2,3,4], index=['a','b','c','d']); ser_1
ser_1['b']
ser_1[2]
# Joining Series
ser_1 = pd.Series(data=[1,2,3,4], index=['a','b','c','d']); ser_1
ser_2 = pd.Series(data=[1,2,5,4], index=['a','b','e','d']); ser_2
ser_1 + ser_2
# NOTE: Pandas joins series by INDEX. This is why there are 2 NaN values.
# DataFrames - Basics --------------------------------------------------------
import pandas as pd
import numpy as np
df_a = pd.DataFrame({'A': list(range(43))[-6:],
'B': [pd.Timestamp('20180725')] * 5 + [None],
'C': ['cat', 'dog', 'fish', None, 'bird', 'snail'],
'D': [2/3, 1/2, None, 8/3, 1/9, 6/2]})
df_b = pd.DataFrame(data=np.random.randn(6, 4),
index=pd.date_range(start = '20180621', periods = 6),
columns=['col{}'.format(num) for num in list('1234')])
df_a
df_b
# Column types
df_a.dtypes
df_b.dtypes
df_a.head()
df_a.tail()
df_a.index
df_b.index
df_b.reset_index()
df_a.columns
df_b.columns
df_a.values
df_b.values
df_b.shift(periods = 1)
df_b.sub(100)
df_b.add(100)
df_a.info()
df_a.describe() # Summary Metrics
df_a.T # Transpose
df_a.transpose() # Same thing as T
df_b.sort_index(axis = 1, ascending = False) # Sort column or row order
df_b.sort_values(by = 'col2', ascending = True) # Sort rows
# DataFrames - Selecting -----------------------------------------------------
import pandas as pd
import numpy as np
# Select Columns
df_b.col1 # NOTE: Don't use this way b/c it will be confused with methods.
df_b['col1']
df_b[['col1', 'col3']]
# Select Rows
df_b[:3]
df_b['20180623':'20180625']
# .loc - Select by INDEX (Label)
df_a
df_b
df_a.loc[2] # row @ index 2
df_b.loc['20180623'] # row @ index '20180623'
df_b.loc[:, ['col1', 'col3']]
df_b.loc['20180623':'20180625', ['col1', 'col3']]
df_a.loc[3, 'C']
df_a.at[3, 'C'] # .at is faster than .loc for single values
df_a.loc[df_a['D'].idxmax()]
df_a.loc[df_a['D'].idxmin()]
# .iloc - Select by POSITION
df_a.iloc[3] # Slice of 3rd row
df_a.iloc[3:5, :2]
df_a.iloc[[1, 4], [0, 2]]
df_a.iloc[1:3, :]
df_a.iloc[2, 2]
df_a.iat[2, 2] # .iat is faster than .iloc for single values
# Boolean Indexing & Filtering
df_b
df_b[df_b>0]
df_b[df_b['col1']<0]
df_b[(df_b['col1']>0) & (df_b['col2']<0)] # Filtering by 2 columns (and)
df_b[(df_b['col1']>0) | (df_b['col2']<0)] # Filtering by 2 columns (or)
df_a[df_a['C'].isin(['fish', 'bird'])]
# String Functions
series_a = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat'])
series_a.str.lower()
series_a.str.upper()
# DataFrames - Sorting -------------------------------------------------------
import pandas as pd
import numpy as np
df_a
df_a.sort_values(by='C')
df_a.sort_values(by=['B','D'], axis=0, ascending=[True,False])
# DataFrames - Creating & Modifying Columns & Rows ---------------------------
# Creating Columns
df_a['E'] = df_a['A']; df_a
# Rename Columns
df_a.rename(columns = {'A':'col_a', 'B':'col_b', 'C':'col_c', 'D':'col_d'})
# Reset & Set Index
df_c = df_b.copy(); df_c
df_c.reset_index()
df_c # Reset did not set in place. Use 'inplace=True' for that.
df_c.reset_index(inplace=True); df_c
df_c.loc[:, 'States'] = pd.Series('CA NY WY OR CO TX'.split()); df_c
df_c.set_index('States')
# Dropping Columns
df_a['E'] = df_a['A']; df_a
df_a.drop(labels='E', axis=1) # Doesn't affect original table
df_a
df_a.drop(labels='E', axis=1, inplace=True); df_a # Affects original table
# Dropping Rows
df_a.drop(labels=2, axis=0) # Doesn't affect original table
df_a
df_a.drop(labels=2, axis=0, inplace=True); df_a # Affects original table
# Replace column values
df_a['C'].replace(['cat', 'dog', 'fish'], ['kittie', 'doggie', 'fishie'])
# DataFrames - Missing Values ------------------------------------------------
import pandas as pd
import numpy as np
# Create dataframe with missing values (NaN)
df_miss = pd.DataFrame({'A':[1,2,np.nan], 'B':[5,np.nan,np.nan], 'C':[1,2,3]}); df_miss
# Find missing values
df_miss.isna()
df_miss.isnull() # same as .isna()
# Drop missing values
df_miss.dropna(axis=0)
df_miss.dropna(axis=1)
df_miss.dropna(axis=0, thresh=2)
df_miss.dropna(axis=0, how = 'any')
df_miss.dropna(axis=0, how = 'all')
# Fill missing values
df_miss.fillna(value='FILLED')
df_miss['A'].fillna(value=df_miss['A'].mean()) # Fill w/ mean to avoid skewing data
# DataFrames - Multi-Level Indexes -------------------------------------------
import pandas as pd
import numpy as np
# Create multi-level index
multi_index = list(zip(((['G1']*3) + (['G2']*3)), [1,2,3,1,2,3])); multi_index
multi_index = pd.MultiIndex.from_tuples(multi_index); multi_index
# Create multi-level dataframe
df_multi = pd.DataFrame(data=np.random.rand(6,2), index = multi_index, columns=['A','B']); df_multi
# Select data from multi-level dataframe
df_multi.loc['G1']
df_multi.loc['G1'].loc[:, ['A']]
df_multi.loc['G2'].loc[1,'B']
# Get & Set index names
df_multi.index.names
df_multi.index.names = ['lvl1','lvl2']; df_multi
# Get cross sections of multi-level index
df_multi.xs(key='G1', level='lvl1')
df_multi.xs(key=1, level='lvl2') # can select data from any level, which is better than loc for multi-level DFs.
# DataFrames - Math, Statistics, & Operations --------------------------------
import pandas as pd
import numpy as np
# Stats
df_b['col1'].count() # Row count
df_b.mean() # Mean on y-axis
df_b.mean(axis=1) # Mean on x-axis
df_b.shift(periods=1)
df_b
# Unique Values
df_c = pd.DataFrame(data=np.random.randint(low=11, high=14, size=(20,4)), columns=list('abcd')); df_c
df_c['b'].unique() # unique values
df_c['b'].nunique() # # of unique values
df_c['b'].value_counts() # count of unique values
# Apply
df_apply = pd.DataFrame({'A':[1,2,3,4,5,6], 'B':[2,4,6,8,10,12], 'C':np.random.rand(6)}); df_apply
df_apply.apply(np.sqrt) # apply built-in function across all elements
df_apply.apply(lambda x: x**x) # apply custom function across elements
df_apply.apply(lambda x: x.max() - x.min(), axis=0) # apply aggregate function across index
df_apply.apply(lambda x: x.max() - x.min(), axis=1) # apply aggregate function across columns
df_apply['NewCol'] = df_apply['B'].apply(lambda x: 'I yam '+str(x)+' years old.')
# Map
df_map = pd.DataFrame({'DayNum':np.random.randint(0,7,20)}); df_map
dict_numToDay = {0:'Mon', 1:'Tue', 2:'Wed', 3:'Thu', 4:'Fri', 5:'Sat', 6:'Sun'}
df_map['Day'] = df_map['DayNum'].map(dict_numToDay); df_map
# Cumulative Sum
df_apply[['A','B','C']].cumsum()
# Correlation between variables
df_apply[['A','C']].corr()
# DataFrames - Group Functions -----------------------------------------------
import pandas as pd
import numpy as np
# Grouping
df_c = pd.DataFrame({'col1': list('AAAAAABBBBBBCCCCCC'),
'col2': list('IIJJKKIIJJKKIIJJKK'),
'col3': np.random.randn(18),
'col4': np.random.randn(18)})
df_c
# Groupby Functions
df_c.groupby('col2').std()
df_c.groupby('col1').sum()
df_c.groupby('col1').max()
df_c.groupby('col1').min()
df_c.groupby('col1', as_index=False).sum()
df_c.groupby('col2', as_index=False).mean()
df_c.groupby(['col1','col2'], as_index=False).sum()
df_c.groupby(['col1','col2'], as_index=False)['col4'].sum()
df_c['col5'] = df_c.groupby(['col1','col2'], as_index=False)['col4'].transform('sum')
# NOTE: Transform doesn't 'squish' rows by group.
# Summary statistict
df_c.groupby('col1').describe()
df_c.groupby('col1').describe().transpose()
# DataFrames - Merging -------------------------------------------------------
import pandas as pd
import numpy as np
# Make example dataframes
df_a = pd.DataFrame({'A':[1,2,3,4,5,6],
'B':'G1 G1 G1 G2 G2 G2'.split(),
'C':list(np.random.randint(low=10, size=6)),
'D':list(np.random.randint(low=10, size=6))}); df_a
df_b = pd.DataFrame({'A':[0,2,4,6,8,10],
'B':'G1 G1 G1 G2 G2 G2'.split(),
'E':list(np.random.randint(low=10, size=6)),
'F':list(np.random.randint(low=10, size=6))}); df_b
# Merging Vertically
pd.concat([df_a, df_b])
df_a.append(other=df_b)
# NOTE: There are no major differences between pd.concat() vs df.append()
# Merge Horizontally with Merge
df_a
df_b
pd.merge(left=df_a, right=df_b, how='outer', on='A')
pd.merge(left=df_a, right=df_b, how='left', on='A')
pd.merge(left=df_a, right=df_b, how='right', on='A')
pd.merge(left=df_a, right=df_b, how='inner', on='A')
pd.merge(left=df_a, right=df_b, how='outer', left_on=['A','B'], right_on=['A','B'])
# Merge Horizontally with Join
df_a.join(other=df_b.loc[:,['E','F']]) # Joins on index
# DataFrames - Pivoting ------------------------------------------------------
import pandas as pd
import numpy as np
# Pivot Table
df_a = pd.DataFrame({'A':['foo','foo','foo','bar','bar','bar'],'B':['one','one','two','two','one','one'],'C':list('xyxyxy'),'D':list(range(6))}); df_a
df_a.pivot_table(values='D', index=['A','B'], columns='C')
# Unstack
index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), ('two', 'a'), ('two', 'b')])
df_b = pd.Series(np.arange(1.0, 5.0), index=index); df_b
df_b.unstack(level=-1)
df_b.unstack(level=0)
df_b.unstack(level=0).unstack() # Reverts it
# T
df_c = pd.DataFrame({'A':list(range(5)), 'B':np.random.randn(5), 'C':list('abcde')}); df_c
df_c.T
df_c.transpose() # Same thing
# DataFrames - Reading & Writing Files ---------------------------------------
import pandas as pd
import numpy as np
str_inDir = 'C:/Users/robbi/Dropbox/Work & Learning/Language - Python/Udemy - Python for Data Science and Machine Learning/Refactored_Py_DS_ML_Bootcamp-master/03-Python-for-Data-Analysis-Pandas/'
# CSV
df_csv = | pd.read_csv(str_inDir+'example') | pandas.read_csv |
import numpy as np
import pandas as pd
import random as random
import pickle
def formatRank_german(df):
tmp = pd.DataFrame()
tmp['y']=df.sort_values('y_pred',ascending=False).index
tmp['y_pred']=tmp.index
tmp['g']=df.sort_values('y_pred',ascending=False).reset_index()['g']
return tmp
def formatRank_compas(df):
tmp = pd.DataFrame()
tmp['y']=df.sort_values('y_pred').index
tmp['y_pred']=tmp.index
tmp['g']=df.sort_values('y_pred')['g']
return tmp
def readFA_IRData(inpath, filename, funct):
return funct(pd.read_pickle(inpath+filename))
def getAllFA_IRData(inpath, funct):
d ={}
d['cb'] = readFA_IRData(inpath, 'ColorblindRanking.pickle', funct)
d['base'] = d['cb'].copy()
d['base']['y_pred']=d['base']['y']
d['feld'] = readFA_IRData(inpath, 'FeldmanRanking.pickle', funct)
d['feld']['y'] = d['cb']['y']
d['fair1'] = readFA_IRData(inpath, 'FairRanking01PercentProtected.pickle', funct)
d['fair2'] = readFA_IRData(inpath, 'FairRanking02PercentProtected.pickle', funct)
d['fair3'] = readFA_IRData(inpath, 'FairRanking03PercentProtected.pickle', funct)
d['fair4'] = readFA_IRData(inpath, 'FairRanking04PercentProtected.pickle', funct)
d['fair5'] = readFA_IRData(inpath, 'FairRanking05PercentProtected.pickle', funct)
d['fair6'] = readFA_IRData(inpath, 'FairRanking06PercentProtected.pickle', funct)
d['fair7'] = readFA_IRData(inpath, 'FairRanking07PercentProtected.pickle', funct)
d['fair8'] = readFA_IRData(inpath, 'FairRanking08PercentProtected.pickle', funct)
d['fair9'] = readFA_IRData(inpath, 'FairRanking09PercentProtected.pickle', funct)
return d
def plainFA_IRData(inpath):
d ={}
d['cb'] = pd.read_pickle(inpath+'ColorblindRanking.pickle')
d['base'] = d['cb'].copy()
d['base']['y_pred']=d['base']['y']
d['feld'] = pd.read_pickle(inpath+'FeldmanRanking.pickle')
d['feld']['y'] = d['cb']['y']
d['fair1'] = pd.read_pickle(inpath+'FairRanking01PercentProtected.pickle')
d['fair2'] = pd.read_pickle(inpath+'FairRanking02PercentProtected.pickle')
d['fair3'] = pd.read_pickle(inpath+'FairRanking03PercentProtected.pickle')
d['fair4'] = | pd.read_pickle(inpath+'FairRanking04PercentProtected.pickle') | pandas.read_pickle |
import pandas as pd
#import sys
import requests
import numpy as np
import utils
from sodapy import Socrata
import re
'''
MIT License
Copyright (c) 2021 <NAME> - dLab - Fundación Ciencia y Vida
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
class hospitalData:
def __init__(self):
client = Socrata("healthdata.gov", None)
self.results = client.get("anag-cw7u", limit=7000)
def retrieveLastFileData(self):
print('reading file...')
df = pd.DataFrame.from_records(self.results)
print('file read')
self.cnt_data = pd.read_csv('../input/utilities/population_NY_2019_versionJH.csv')
self.hosp_list = pd.read_csv('../input/utilities/list_hospital_NY.csv')
df2 = utils.hospitalData(df)
temp_ny = df2.loc[df['state'] == 'NY'].copy()
temp_ny.sort_values('fips', inplace=True)
temp_ny['county'] = temp_ny['fips'].copy()
temp_ny['fips'] = temp_ny['fips'].astype(np.int64)
temp_ny['hospital_pk'] = temp_ny['hospital_pk'].astype(np.int64)
temp_ny['date'] = pd.to_datetime(temp_ny['date'], infer_datetime_format=True)
temp_ny['date'] = pd.to_datetime(temp_ny['date']).dt.strftime('%Y-%m-%d')
temp_ny.reset_index(drop = True, inplace = True)
i = 0
for row in self.cnt_data['fips']:
idx = temp_ny['fips'].loc[temp_ny['fips'] == row].index
temp_ny.loc[idx, 'county'] = self.cnt_data.loc[i, 'county']
i += 1
ny_h = temp_ny.sort_values(by=['date', 'fips'])
for row in self.hosp_list['fips']:
idx = temp_ny.loc[temp_ny['fips'] == row].index
if len(idx) == 0:
print('There is a new Hospital in the state. Please complte the file ../input/utilities/list_hospital_NY.csv')
ny_h['fips'] = ny_h['fips'].astype(int)
ny_h.reset_index(drop=True,inplace=True)
temp = list(ny_h.columns)
cols = temp[0:2] + [temp[-1]] + temp[2:-1]
ny_h = ny_h[cols]
file1 = pd.read_csv('../output/Hospital-Data/raw_hospitalData_Hospital_NY.csv')
temp_hosp = pd.concat([file1,ny_h], axis=0)
temp_hosp.sort_values(by=['date','fips','hospital_pk'], inplace=True)
identifiers = ['date', 'fips', 'county', 'hospital_pk']
variables = [x for x in temp_hosp.columns if x not in identifiers]
self.ny_hosp = temp_hosp.copy()
self.ny_hosp[variables] = temp_hosp[variables].astype(float)
self.ny_hosp.drop_duplicates(subset=['date','hospital_pk'], inplace=True)
self.ny_hosp.sort_values(by=['date','fips','hospital_pk'], inplace=True)
def groupCounty(self):
ny_hospital = utils.dataDrop(self.ny_hosp)
identifiers = ['date', 'fips', 'county', 'hospital_pk']
variables = [x for x in ny_hospital.columns if x not in identifiers]
self.listDates = ny_hospital['date'].unique()
self.lim = [1.0, 4.0]
for n in range(2):
self.ny_hosp2 = ny_hospital.replace(to_replace='-999999.0', value=self.lim[n],
inplace=False, method=None)
hosp_county_sum = pd.DataFrame()
i = 0
for row in self.listDates:
aux1 = self.ny_hosp2.loc[self.ny_hosp2['date'] == row].copy()
for code in self.cnt_data['fips']:
temp = aux1[variables].loc[aux1['fips'] == code].copy()
idx1 = temp.astype(float).sum(axis=0)
idx1 = idx1.round(2)
idx3 = aux1[identifiers].loc[aux1['fips'] == code].copy()
if idx3.size == 0:
aux6 = self.cnt_data['county'].loc[self.cnt_data['fips'] == code].copy()
cnt = aux6.item()
date = self.ny_hosp2['date'].loc[self.ny_hosp2['date'] == row].copy()
day = date.unique().item()
lista = pd.DataFrame([day, code, cnt, 'NA']).T
idx3[identifiers] = lista
idx3['boundary'] = self.lim[n]
idx3.reset_index(drop=True,inplace=True)
if idx1.size > 0:
idx1_t = idx1.to_frame().T
idaux = idx3.loc[0:0,:]
aux2 = | pd.concat([idaux, idx1_t], axis=1) | pandas.concat |
import pandas as pd
import requests
from bs4 import BeautifulSoup, Comment
import json
import re
from datetime import datetime
import numpy as np
comm = re.compile("<!--|-->")
class Team: #change team player object
def __init__(self, team, year, player=None):
self.year = year
self.team = team
self.team_stat = requests.get(
"https://www.basketball-reference.com/teams/{}/{}.html".format(self.team, self.year)).text
self.soup = BeautifulSoup(re.sub("<!--|-->","",self.team_stat),"html.parser")
def team_sum(self, four_factor = False):
summary_container = self.soup.find("table",id="team_misc")
summary_table = summary_container.find("tbody")
team_sum_row = summary_table.find_all("tr")
dict_league_rank = {row['data-stat']:row.get_text() for row in team_sum_row[1]}
dict_team_sum = {row['data-stat']:row.get_text() for row in team_sum_row[0]}
del dict_team_sum['player'], dict_league_rank['player']
df_team = pd.DataFrame(data = [dict_team_sum, dict_league_rank],index = ['TEAM','LEAGUE']).T
for column in df_team.columns:
try:
df_team[column] = pd.to_numeric(df_team[column])
except:
pass
if four_factor:
off_stats = df_team.loc[['tov_pct',
'pace', 'orb_pct', 'efg_pct', 'ft_rate']]
off_stats.columns = ['Team','OFF']
# off_stats['Team'] = off_stats['Team'].apply(lambda x: float(x))
def_stats = df_team.loc[['opp_tov_pct',
'pace', 'drb_pct', 'opp_efg_pct', 'opp_ft_rate']]
def_stats.columns = ['Team','DEF']
# def_stats['Team'] = def_stats['Team'].apply(lambda x: float(x))
return off_stats, def_stats
return df_team
def roster(self, player = None):
roster_containter = self.soup.find("tbody")
roster_vals = roster_containter.find_all('tr')
data_list = []
for row in range(len(roster_vals)):
table_data = roster_vals[row].find_all("td")
data_list.append({table_data[data_row]['data-stat']
:table_data[data_row].get_text() for data_row in range(len(table_data))})
df_roster = pd.DataFrame(data=data_list)
if player:
return df_roster[df_roster['player'].str.contains(player)].T
return df_roster
def injury_report(self,roster_update=False):
injury_table = self.soup.find("table",id="injury")
inj_body = injury_table.find("tbody")
inj_data = inj_body.find_all("tr")
df_injury = pd.DataFrame({
"player": [inj_data[data].find("th").get_text()
for data in range(len(inj_data))],
"team": [inj_data[data].find_all("td")[0].get_text() for data in range(len(inj_data))],
"date": [inj_data[data].find_all("td")[1].get_text() for data in range(len(inj_data))],
"description": [inj_data[data].find_all("td")[2].get_text() for data in range(len(inj_data))]
})
if roster_update == True:
updated = df_injury['description'].apply(lambda x: 0 if 'OUT' in x.upper().split(' ') else 1)
df_injury.description = updated
return df_injury
return df_injury
def per_game(self,player = None):
per_game_table = self.soup.find("table", id="per_game")
table_body = per_game_table.find("tbody")
table_row = table_body.find_all("tr")
data_row = []
for row in range(len(table_row)):
table_data = table_row[row].find_all("td")
data_row.append({table_data[data_row]['data-stat']
:table_data[data_row].get_text() for data_row in range(len(table_data))})
df_per_game = pd.DataFrame(data=data_row)
for column in df_per_game.columns:
try:
df_per_game[column] = pd.to_numeric(df_per_game[column])
except:
pass
if player:
return df_per_game[df_per_game['player'].str.contains(player)].T
return df_per_game
def totals(self, player = None):
totals_table = self.soup.find("table", id="totals")
totals_body = totals_table.find("tbody")
table_row = totals_body.find_all("tr")
data_row = []
for row in range(len(table_row)):
table_data = table_row[row].find_all("td")
data_row.append({table_data[data_row]['data-stat']: table_data[data_row].get_text()
for data_row in range(len(table_data))})
df_totals = pd.DataFrame(data=data_row)
for column in df_totals.columns:
try:
df_totals[column] = pd.to_numeric(df_totals[column])
except:
pass
if player:
return df_totals[df_totals['player'].str.contains(player)].T
return df_totals
def per_minute(self, player = None):
six_table = self.soup.find("table", id="per_minute")
six_body = six_table.find("tbody")
table_row = six_body.find_all("tr")
data_row = []
for row in range(len(table_row)):
table_data = table_row[row].find_all("td")
data_row.append({table_data[data_row]['data-stat']: table_data[data_row].get_text()
for data_row in range(len(table_data))})
df_minutes = pd.DataFrame(data=data_row)
for column in df_minutes.columns:
try:
df_minutes[column] = pd.to_numeric(df_minutes[column])
except:
pass
if player:
return df_minutes[df_minutes['player'].str.contains(player)].T
return df_minutes
def per_poss(self, player = None):
poss_table = self.soup.find("table", id="per_poss")
poss_body = poss_table.find("tbody")
table_row = poss_body.find_all("tr")
data_row = []
for row in range(len(table_row)):
table_data = table_row[row].find_all("td")
data_row.append({table_data[data_row]['data-stat']: table_data[data_row].get_text()
for data_row in range(len(table_data))})
df_poss = pd.DataFrame(data=data_row)
for column in df_poss.columns:
try:
df_poss[column] = pd.to_numeric(df_poss[column])
except:
pass
if player:
return df_poss[df_poss['player'].str.contains(player)].T
return df_poss
def advanced(self, player = None):
poss_table = self.soup.find("table", id="advanced")
poss_body = poss_table.find("tbody")
table_row = poss_body.find_all("tr")
data_row = []
for row in range(len(table_row)):
table_data = table_row[row].find_all("td")
data_row.append({table_data[data_row]['data-stat']: table_data[data_row].get_text()
for data_row in range(len(table_data))})
df_poss = pd.DataFrame(data=data_row)
for column in df_poss.columns:
try:
df_poss[column] = pd.to_numeric(df_poss[column])
except:
pass
if player:
return df_poss[df_poss['player'].str.contains(player)].T
return df_poss
def shooting(self, player = None):
shooting_table = self.soup.find("table", id="shooting")
shooting_body = shooting_table.find("tbody")
table_row = shooting_body.find_all("tr")
data_row = []
for row in range(len(table_row)):
table_data = table_row[row].find_all("td")
data_row.append({table_data[data_row]['data-stat']: table_data[data_row].get_text()
for data_row in range(len(table_data))})
df_shooting = pd.DataFrame(data=data_row)
for column in df_shooting.columns:
try:
df_shooting[column] = pd.to_numeric(df_shooting[column])
except:
pass
if player:
return df_shooting[df_shooting['player'].str.contains(player)].T
return df_shooting
def pbp(self, player = None):
pbp_table = self.soup.find("table", id="pbp")
pbp_body = pbp_table.find("tbody")
table_row = pbp_body.find_all("tr")
data_row = []
for row in range(len(table_row)):
table_data = table_row[row].find_all("td")
data_row.append({table_data[data_row]['data-stat']: table_data[data_row].get_text()
for data_row in range(len(table_data))})
df_pbp = pd.DataFrame(data=data_row)
for column in df_pbp.columns:
try:
df_pbp[column] = pd.to_numeric(df_pbp[column])
except:
pass
if player:
return df_pbp[df_pbp['player'].str.contains(player)].T
return df_pbp
def salaries(self, plater = None):
salaries_table = self.soup.find("table", id="salaries2")
salaries_body = salaries_table.find_all("tr")
sal_dict = {salaries_body[row].find("td",class_='left').get_text():salaries_body[row].find("td",class_='right').get_text()
for row in range(1,len(salaries_body))}
df_sal = pd.DataFrame(sal_dict, index=[0]).T
for column in df_sal.columns:
try:
df_sal[column] = | pd.to_numeric(df_sal[column]) | pandas.to_numeric |
#!/usr/bin/env python
from pandas.io.formats.format import SeriesFormatter
from Bio.SeqUtils import seq1
from Bio import SeqIO
import pandas as pd
import argparse
from pathlib import Path
import numpy as np
from summarise_snpeff import parse_vcf, write_vcf
import csv
import re
from functools import reduce
from bindingcalculator import BindingCalculator
from itertools import takewhile
def get_contextual_bindingcalc_values(residues_list,binding_calculator, option, bindingcalc_data = None):
if option == "res_ret_esc":
residues_df = residues_list.copy()
res_ret_esc_df = binding_calculator.escape_per_site(residues_df.loc[(residues_df["Gene_Name"] == "S") & (residues_df["respos"] >= 331) & (residues_df["respos"] <= 531) & (residues_df["respos"].isin(bindingcalc_data["site"].unique())), "respos"])
res_ret_esc_df["Gene_Name"] = "S"
res_ret_esc_df.rename(columns = {"retained_escape" : "BEC_RES"}, inplace = True)
residues_df = residues_df.merge(res_ret_esc_df[["site", "BEC_RES", "Gene_Name"]], left_on = ["Gene_Name", "respos"], right_on = ["Gene_Name", "site"],how = "left")
residues_df.drop(axis = 1 , columns = ["site"], inplace = True)
return(residues_df)
else:
ab_escape_fraction = 1 - binding_calculator.binding_retained(residues_list)
return(ab_escape_fraction)
def summarise_score(summary_df, metric):
#assumes grouping by sample_id and summarising for each sample
summary_df_info = summary_df.groupby("sample_id").agg({metric: ['sum', 'min', 'max']})
summary_df_info.columns = summary_df_info.columns.droplevel(0)
summary_df_info = summary_df_info.reset_index()
summary_df_info = summary_df_info.rename_axis(None, axis=1)
summary_df_mins = pd.merge(left = summary_df, right = summary_df_info[["sample_id", "min"]], left_on = ["sample_id", metric], right_on = ["sample_id", "min"])
summary_df_mins[metric + "_min"] = summary_df_mins["residues"] + ":" + summary_df_mins[metric].fillna("").astype(str)
summary_df_mins = summary_df_mins[["sample_id",metric + "_min"]].groupby("sample_id").agg({metric + "_min" : lambda x : list(x)})
summary_df_mins[metric + "_min"] = summary_df_mins[metric + "_min"].str.join(",")
summary_df_max = pd.merge(left = summary_df, right = summary_df_info[["sample_id", "max"]], left_on = ["sample_id", metric], right_on = ["sample_id", "max"])
summary_df_max[metric + "_max"] = summary_df_max["residues"] + ":" + summary_df_max[metric].fillna("").astype(str)
summary_df_max = summary_df_max[["sample_id",metric + "_max"]].groupby("sample_id").agg({metric + "_max" : lambda x : list(x)})
summary_df_max[metric + "_max"] = summary_df_max[metric + "_max"].str.join(",")
summary_df_sum = summary_df.groupby("sample_id").agg({metric: sum})
summary_df_sum.columns = [metric + "_sum"]
summary_df_final = summary_df_sum.merge(summary_df_max,on='sample_id').merge(summary_df_mins,on='sample_id')
return(summary_df_final)
def sample_header_format(item,sample,vcf,filtered,vcf_loc):
if vcf == True:
if item.startswith("##bcftools_mergeCommand=merge"):
if filtered:
item = re.sub(r'(?<=merged\.vcf )[a-zA-Z0-9_\. \/]+(?=;)', vcf_loc, item)
else:
item = re.sub(r'(?<=merged\.vcf )[a-zA-Z0-9_\. \/]+(?=;)', vcf_loc, item)
else:
if item.startswith("##reference="):
item = re.sub(r'(?<=muscle\/)[a-zA-Z0-9_\.\/]+(?=\.fasta)', f'{sample}', item)
if item.startswith("##source="):
item = re.sub(r'(?<=muscle\/)[a-zA-Z0-9_\.]+(?=\.fasta)', f'{sample}', item)
item = re.sub(r'(?<=fatovcf\/)[a-zA-Z0-9_\.]+(?=\.vcf)', f'{sample}', item)
if item.startswith("##bcftools_mergeCommand=merge"):
if filtered:
item = re.sub(r'(?<=merged\.vcf )[a-zA-Z0-9_\. \/]+(?=;)', vcf_loc, item)
else:
item = re.sub(r'(?<=merged\.vcf )[a-zA-Z0-9_\. \/]+(?=;)', vcf_loc, item)
return(item)
def main():
parser = argparse.ArgumentParser(description='')
parser.add_argument('input_vcf', metavar='anno_concat.tsv', type=str,
help='Concatenated SPEAR anno file')
parser.add_argument('output_dir', metavar='spear_vcfs/', type=str,
help='Destination dir for summary tsv files')
parser.add_argument('data_dir', metavar='data/', type=str,
help='Data dir for binding calculator data files')
parser.add_argument('input_header', metavar='merged.vcf', type=str,
help='Merged VCF file for header retrieval')
parser.add_argument('sample_list', metavar='', nargs="+",
help='list of inputs to detect no variant samples ')
parser.add_argument('--is_vcf_input', default="False", type=str,
help = "Set input file type to VCF")
parser.add_argument('--is_filtered', default="False", type=str,
help = "Specify files come from filtered directory")
args = parser.parse_args()
Path(f'{args.output_dir}/per_sample_annotation').mkdir(parents=True, exist_ok=True)
if args.is_vcf_input == True:
if args.is_filtered:
infiles = f'{args.output_dir}/intermediate_output/masked/*.masked.vcf'
else:
infiles = f'{args.output_dir}/intermediate_output/indels/*.indels.vcf'
else:
infiles = f'{args.output_dir}/intermediate_output/indels/*.indels.vcf'
with open(args.input_header, 'r') as fobj:
headiter = takewhile(lambda s: s.startswith('#'), fobj)
merged_header = pd.Series(headiter)
merged_header = merged_header.str.replace('\n','')
merged_header = merged_header.str.replace('"','')
cols = ["#CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT", "sample"]
merged_header = merged_header[~merged_header.str.startswith('#CHROM')]
input_file = pd.read_csv(args.input_vcf, sep = "\t", names = ["sample_id", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT", "end"])
if len(input_file) > 0:
input_file[["AN", "AC", "problem_exc", "problem_filter", "ANN", "SUM", "SPEAR"]] = input_file["INFO"].str.split(';',expand=True)
original_cols = input_file.columns.tolist()
input_file[["AN", "AC", "problem_exc", "problem_filter", "ANN", "SUM", "SPEAR"]] = input_file[["AN", "AC", "problem_exc", "problem_filter", "ANN", "SUM", "SPEAR"]].replace("^[A-Z]+=", "", regex = True)
input_file = input_file.loc[input_file["ANN"] != "no_annotation"]
input_file["SUM"] = input_file["SUM"].str.split(",", expand = False)
input_file = input_file.explode("SUM") #explode on this list of SUM values
input_file[["Gene_Name", "HGVS.c", "Annotation", "variant", "product", "protein_id", "residues"]] = input_file["SUM"].str.split("|", expand = True)
total_variants = input_file.loc[input_file["Annotation"].isin(["intergenic", "non-synonymous"]) == False, ["sample_id", "POS"]].groupby("sample_id").nunique().reset_index()
total_variants.columns = ["sample_id", "total_variants"]
total_variants["total_variants"] = total_variants["total_variants"].astype("int")
consequence_type_counts = input_file[["sample_id", "Annotation"]].groupby(["sample_id", "Annotation"], as_index = False)["Annotation"].value_counts()
consequence_type_counts["consequence_count"] = consequence_type_counts["Annotation"] + ":" + consequence_type_counts["count"].astype(str)
type_string = consequence_type_counts[["sample_id" ,"consequence_count"]].groupby("sample_id").agg({"consequence_count" : lambda x : list(x)})
type_string["consequence_count"] = type_string["consequence_count"].str.join(",")
type_string.columns = ["consequence_type_variants"]
type_string.reset_index(drop = False)
input_file["SPEAR"] = input_file["SPEAR"].str.split(",", expand = False)
input_file = input_file.explode("SPEAR")
input_file[["spear-product", "residues","region", "domain", "feature", "contact_type", "NAb", "barns_class", "bloom_ACE2", "VDS", "serum_escape", "mAb_escape_all_classes", "cm_mAb_escape_all_classes","mAb_escape_class_1","mAb_escape_class_2","mAb_escape_class_3","mAb_escape_class_4", "BEC_RES", "BEC_EF"]] = input_file["SPEAR"].str.split("|", expand = True)
input_file = input_file.loc[input_file["product"] == input_file["spear-product"]]
pattern = re.compile(r"[a-zA-Z\*]+([0-9]+)") #matches any point mutations or deletions , not insertions.
input_file["respos"] = input_file["residues"].str.extract(pattern).fillna(-1).astype("int")
input_file["refres"] = input_file["residues"].str.extract(r"([a-zA-Z\*]+)[0-9]+[a-zA-Z\?\*]+")
input_file["altres"] = input_file["residues"].str.extract(r"[a-zA-Z\*]+[0-9]+([a-zA-Z\?\*]+)")
input_file.loc[input_file["Gene_Name"].str.contains('-'), "Gene_Name"] = "Intergenic_" + input_file.loc[input_file["Gene_Name"].str.contains('-'), "Gene_Name"]
input_file.loc[input_file["Annotation"] == "synonymous_variant", "variant"] = input_file.loc[input_file["Annotation"] == "synonymous_variant", "HGVS.c"]
bindingcalc = BindingCalculator(csv_or_url = f'{args.data_dir}/escape_calculator_data.csv')
bindingcalc_data = | pd.read_csv(f'{args.data_dir}/escape_calculator_data.csv') | pandas.read_csv |
"""
Prepares PUMS Data-Dict CVS for use as panda data frames.
todo: review function comments for accuracy
"""
# %%
import pandas as pd
import json
from _constants import recent_years
# dict.column.value
zero_prefix_rules = {
'DetailedAncestryRecode1': 3,
'DetailedAncestryRecode2': 3,
'DetailedHispanicOriginRecode': 2,
'DetailedRaceRecode2': 2,
'DetailedRaceRecode3': 3,
'EducationalAttainment': 2,
'GradeLevelAttending': 2,
'HouseholdType': 2,
'IndustryRecode': 4,
'MigrationPUMA': 5,
'MigrationStateOrCountryRecode': 4,
'OccupationRecode': 4,
'PlaceOfBirthRecode': 3,
'PlaceOfWorkPUMA': 5,
'PlaceOfWorkStateOrForeignRecode': 3,
'Relationship': 2,
'State': 2,
'TimeOfArrivalAtWork': 3,
'TimeOfDepartureForWork': 3,
'TransportationToWork': 2,
'UnitsInStructure': 2,
'VeteranPeriodOfService': 2,
'WhenStructureBuilt': 2,
'WorkExperienceOfHouseholderAndSpouse': 2,
'WorkStatusOfHouseholderOrSpouseInFamilyHome': 2,
'YearlyPropertyTaxes': 2
}
skip_map_columns = [
'MigrationPUMA',
'MigrationStateOrCountryRecode',
'PlaceOfWorkPUMA'
]
custom_transform_columns = {
'IncomeAdjustmentFactor': lambda x: x[:1]+'.'+x[1:],
'RecordType': lambda x: x
}
def get_dict_year(year):
if year > 2017:
return str(year)
if year > 2012:
return '2013-2017'
return None
def get_vals_dict_path(year):
if year > 2017:
return f'./compiled_data/dictionaries/{year}_values.csv'
if year > 2012:
return f'./compiled_data/dictionaries/2013-2017_values.csv'
return None
def get_values_dict(year):
"""
Reads a values csv file, and returns a dict of value mapping
"""
vals_file = get_vals_dict_path(year)
val_dict = {}
# Produce values JSON
values = | pd.read_csv(vals_file) | pandas.read_csv |
# bchhun, {2020-03-22}
import csv
import natsort
import numpy as np
import os
import xmltodict
from xml.parsers.expat import ExpatError
import xml.etree.ElementTree as ET
import pandas as pd
import math
import array_analyzer.extract.constants as constants
"""
functions like "create_<extension>_dict" parse files of <extension> and return:
fiduc: list of dict describing fiducial positions
spots: list of dict, other spot info
repl: list of dict describing 'replicates' AKA antigens
params: dict containing hardware and array parameters
functions like "populate_array_<type>" take <type> from above (like fiduc, spots, repl, param) and
populate np.ndarrays indices correspond to array positions
The values of the arrays depend on the function call
- populate_array_id : Cell id like "spot-6-2", "spot-5-5-" etc..
- populate_array_spots_type : Type like "Diagnostic", "Positive Control"
- populate_array_antigen : Antigen
*** NOTE ***
populating antigens is more complicated for .xml parsing than for .csv or .xlsx
.xml files have keys:"antigen", values: multiple "spot_ID"
.csv or .xlsx can map (col, row) directly to "antigen"
"""
def create_xml_dict(path_):
"""
receives an .xml file generated by the Scienion sciReader software
and returns dictionaries containing info.
:param str path_: Full path of xml file
:return dict fiduc: Fiducials and control info
:return dict spots: Spot info
:return dict repl: Replicate info
:return dict params: Additional parameters
"""
try:
with open(path_) as fd:
doc = xmltodict.parse(fd.read())
except ExpatError:
tree = ET.parse(path_)
xml_data = tree.getroot()
# here you can change the encoding type to be able to set it to the one you need
xmlstr = ET.tostring(xml_data, encoding='utf-8', method='xml')
doc = xmltodict.parse(xmlstr)
try:
# layout of array
layout = doc['configuration']['well_configurations']['configuration']['array']['layout']
# fiducials
fiduc = layout['marker']
# spot IDs
spots = doc['configuration']['well_configurations']['configuration']['array']['spots']['spot']
# replicates
repl = doc['configuration']['well_configurations']['configuration']['array']['spots']['multiplet']
array_params = dict()
array_params['rows'] = int(layout['@rows'])
array_params['columns'] = int(layout['@cols'])
array_params['v_pitch'] = float(layout['@vspace'])
array_params['h_pitch'] = float(layout['@hspace'])
array_params['spot_width'] = float(layout['@expected_diameter'])
array_params['bg_offset'] = float(layout['@background_offset'])
array_params['bg_thickness'] = float(layout['@background_thickness'])
array_params['max_diam'] = float(layout['@max_diameter'])
array_params['min_diam'] = float(layout['@min_diameter'])
except Exception as ex:
raise AttributeError(f"exception while parsing .xml : {ex}")
return fiduc, spots, repl, array_params
def create_csv_dict(path_):
"""
Looks for three .csv files:
"array_parameters.csv" contains array printing parameters as well as hardware parameters
"array_format_type.csv" contains fiducial and control names, locations
"array_format_antigen.csv" contains specific antigen names for only diagnostic spots
Then, parses the .csvs and creates the four dictionaries:
:param path_: list
of strings to the .csv paths
:return:
"""
# assign names to each of the types == params, spot types, antigens
fiduc = list()
csv_antigens = list()
array_params = dict()
for meta_csv in path_:
with open(meta_csv, newline='') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
# header row
if "Parameter" in str(row[0]) or '' in str(row[0]):
continue
# parse params
elif "array_format_parameters" in meta_csv:
array_params[row[0]] = row[1]
# parse fiducials
elif "array_format_type" in meta_csv:
for col, value in enumerate(row[1:]):
pos = {'@row': str(row[0]),
'@col': str(col - 1),
'@spot_type': str(value)}
fiduc.append(pos)
# parse antigens
elif "array_format_antigen" in meta_csv:
for col, value in enumerate(row[1:]):
pos = {'@row': str(row[0]),
'@col': str(col - 1),
'@antigen': str(value)}
csv_antigens.append(pos)
return fiduc, None, csv_antigens, array_params
def create_xlsx_dict(xlsx):
"""
extracts fiducial, antigen, and array parameter metadata from .xlsx sheets
then populates dictionaries or lists with appropriate information
The output dictionaries and lists conform the .xml-style parsing. This is for consistency
:param dict xlsx: Opened xlsx sheets
:return list fiduc: Fiducials and control info
:return list spots: None. spots IDs not needed for .xlsx
:return list repl: Replicate (antigen)
:return dict params: Additional parameters about hardware and array
"""
fiduc = list()
xlsx_antigens = list()
array_params = dict()
# populate array parameters
for idx, value in enumerate(xlsx['imaging_and_array_parameters']['Parameter']):
array_params[value] = xlsx['imaging_and_array_parameters']['Value'][idx]
# Unless specified, run analysis with fiducials only
# Otherwise run with all non-negative spots
fiducials_only = True
if 'fiducials_only' in array_params:
if array_params['fiducials_only'] != 1:
fiducials_only = False
# populate fiduc list
for col in xlsx['antigen_type'].keys()[1:]:
for row, value in enumerate(xlsx['antigen_type'][col]):
if type(value) is float:
if math.isnan(value):
continue
else:
if not fiducials_only and "Negative" not in value:
pos = {'@row': row,
'@col': col,
'@spot_type': "Fiducial"}
fiduc.append(pos)
elif "Fiducial" in value or "xkappa-biotin" in value or "Fiducial, Diagnostic" in value:
pos = {'@row': row,
'@col': col,
'@spot_type': "Fiducial"}
fiduc.append(pos)
# find and populate antigen list
for col in xlsx['antigen_array'].keys()[1:]:
for row, value in enumerate(xlsx['antigen_array'][col]):
if type(value) is float:
if math.isnan(value):
continue
else:
pos = {'@row': row,
'@col': col,
'@antigen': str(value)}
xlsx_antigens.append(pos)
return fiduc, xlsx_antigens, array_params
def create_xlsx_array(path_):
"""
(unfinished attempt to convert arrays in xlsx to np.ndarrays directly, using pandas)
:param path_:
:return:
"""
array_params = dict()
# populate array parameters
params = pd.read_excel(path_, sheet_name='imaging_and_array_parameters')
for idx, value in enumerate(params['Parameter']):
array_params[value] = params['Value'][idx]
# populate fiducials array
fiduc_df = | pd.read_excel(path_, sheet_name='antigen_type') | pandas.read_excel |
# coding: utf-8
__author__ = 'ersh'
__email__ = '<EMAIL>'
__version__ = '1.1113'
#There is a link to group github where you can find library manoelgadi12 and all the files
#and instructions
#https://github.com/ersh24/manoelgadi12
################
#L Automated data cleaning
####################
import pandas as pd
import numpy as np
import re
####################
#L Automated data cleaning
####################
def Faa1():
import pandas as pd
import numpy as np
import re
data = pd.read_csv("https://dl.dropboxusercontent.com/u/28535341/dev.csv")
np.seterr(invalid='ignore')
print("Original Data Frame\n", data)
#==============================================================================
# GOAL: Clean files trying to get numerical columns:
# - Usually NaN are 0 as there is no value.
# - Whitespaces which can appear when copying data are noisy as they convert
# numbers into strings that are not operable.
# - Outliers usually are errors which can modify average values. Then it is
# better to sustitute them for more reasonable values.
#==============================================================================
# Replace all NaN with 0
data.fillna(0, inplace=True)
# If all the values in the column are float and whitespaces (one or several),
# replaces the latter with 0.
# Removes whitespaces before or after the numbers.
for column in data.columns:
if data[column].dtypes in ["object"]:
change = True
# The column is going to change if all the values (without whitespaces)
# match numbers. Numbers need to have int side, though it could be easily
# changed to accept numbers like .35 as 0.35
for i in range (0,len(data)):
if (re.match(r"[-+]?\d+(\.\d+)?$", str(data[column][i]).strip()) is None):
if (not pd.isnull(data[column][i]) and data[column][i].strip() != ''):
change = False
if change:
# If the value is a set of whitespaces, they are replaced by 0, otherwise
# whitespaces are deleted and finally the column type is changed to numeric
data[column]= data[column].replace(r"^\s+$", '0', regex=True)
data[column]= data[column].replace(r"\s+", '', regex=True)
data[column] = pd.to_numeric(data[column], errors='coerce')
# Replace outliers for the border values
# For each column several values which define it, are created
# Values out of the upper and lower limits are replaced for the limit values
datadict = {}
for column in data.columns:
if (data[column].dtypes in ["int64", "float64"]):
max = np.max(data[column])
p75 = data[column].quantile(0.75)
p50 = data[column].quantile(0.5)
p25 = data[column].quantile(0.25)
min = np.min(data[column])
mean = data[column].mean()
iqr = p75 - p25
lower = p25-1.5*iqr
upper = p75 + 1.5*iqr
valueslist = [lower, min, p25, p50, mean, p75, max, upper]
tagslist = ["LOWER", "MIN", "P25", "P50", "Mean", "P75", "MAX", "UPPER"]
datadict.update({column : pd.Series([data[column].dtypes]+valueslist, index=["Type"]+tagslist)})
# If it is binary don't detect outliers
if (set(data[column]) == {0,1}):
continue
# Loops the values in a column looking for extreme values
# When it finds extreme values sustitutes them, offering several choices
for i in range (0,len(data)):
if (data[column][i] > upper):
data.set_value(i, column, upper)
if (data[column][i] < lower):
data.set_value(i, column, lower)
print ("\nInfo about the columns to transform:\n", pd.DataFrame(datadict),"\n")
print("Transformed Data Frame\n", data)
data.to_csv("transformed.csv", index=False)
####################
#L Human assisted data cleaning
####################
# Human assisted data cleaning
def HAdatacleaning():
####################
#L Human assisted data cleaning
####################
import pandas as pd
import numpy as np
import re
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
data = pd.read_csv("https://dl.dropboxusercontent.com/u/28535341/dev.csv")
def open_file():
filename = filedialog.askopenfilename(
title = "Choose your file",
filetypes = (("csv files", "*.csv"), ("all files", "*.*")),
defaultextension = '.csv',
initialdir = '.')
return filename
def save_file():
filename = filedialog.asksaveasfilename(
title = "Save file",
filetypes = (("csv files", "*.csv"), ("all files", "*.*")),
defaultextension = '.csv',
initialdir = '.',
initialfile = 'transformed.csv')
if filename != "":
data.to_csv(filename, index=False)
def start():
global data
# fich = open_file()
# id_entry.set (fich)
# data = pd.read_csv (fich)
print("Original Data Frame:\n", data)
# Prepare interface to ask about NaN
nan = ttk.Label(win_root, text = "Convert 'NaN'?:")
nan.grid(row=1, column=0, sticky=tk.E)
nanrad1 = tk.Radiobutton(win_root, text="No", variable = nanrad, value=0)
nanrad2 = tk.Radiobutton(win_root, text="0", variable = nanrad, value=1)
nanrad3 = tk.Radiobutton(win_root, text="Most Freq", variable = nanrad, value=2)
nanrad1.grid(row=1, column = 1, sticky=tk.W)
nanrad2.grid(row=1, column = 1)
nanrad3.grid(row=1, column = 1, sticky=tk.E)
nanrad1.deselect()
nanrad2.select()
nanrad3.deselect()
button1.grid(row=1, column=2, sticky=tk.W)
state.config(text = "\nHow to proceed with NaN?" )
def cleannan():
global data
global c
# NaN are not replaced
if nanrad.get() == 0:
state.config(text = "NaN not converted. Select columns to remove whitespaces." )
# NaN are replaced by 0
if nanrad.get() == 1:
data.fillna(0, inplace=True)
state.config(text = "'NaN' -> 0. Select columns to remove whitespaces." )
# NaN are replaced by the most frequent vlaue: mode
if nanrad.get() == 2:
modes = data.mode()
for column in data.columns:
data[column].fillna(modes[column][0], inplace=True)
state.config(text = "NaN to Most Frequent. Select columns to remove whitespaces." )
button1.config(state="disabled")
# button0.config(state="disabled")
button2.focus()
# Prepare intereface to remove whitespaces from columns if all the values can be numeric
c=0
first = True
for column in data.columns:
if data[column].dtypes in ["object"]:
change = True
for i in range (0,len(data)):
if (re.match(r"[-+]?\d+(\.\d+)?$", str(data[column][i]).strip()) is None):
if (not pd.isnull(data[column][i]) and data[column][i].strip() != ''):
change = False
if change:
if first:
a = tk.Label(win_root, text="Do you want to remove whitespaces from numeric columns?")
a.grid(row=4, column=0, sticky=tk.W, columnspan=2)
first=False
a = tk.Label(win_root, text=column)
a.grid(row=5+c, column=0, sticky=tk.E)
enval = tk.StringVar()
en1 = tk.Radiobutton(win_root, text="Yes", variable = enval, value=column)
en2 = tk.Radiobutton(win_root, text="No", variable = enval, value="_NO_")
en1.grid(row=5+c, column = 1, sticky=tk.W)
en2.grid(row=5+c, column = 1)
en1.deselect()
en2.select()
entriesval.append(enval)
c += 1
button2.grid(row=4+c, column=2, sticky=tk.W)
def cleanspaces():
global data
global c2
button2.config(state="disabled")
button3.focus()
mess = "Whitespaces removed from: "
for entry in entriesval:
e = entry.get()
if (e != "_NO_"):
# If the value is a set of whitespaces, they are replaced by 0, otherwise
# whitespaces are deleted and finally the column type is changed to numeric
data[e].replace(r"^\s+$", '0', regex=True, inplace=True)
data[e].replace(r"\s+", '', regex=True, inplace=True)
data[e] = pd.to_numeric(data[e], errors='coerce')
mess += str(entry.get() + ", ")
mess = mess[:-2] + ". What about outliers?"
state.config(text = mess )
# Prepares interface to process outliers. Calculates possible values to sustitute outliers
datadict = {}
c2=0
first = True
for column in data.columns:
if data[column].dtypes in ["int64", "float64"]:
max = np.max(data[column])
p75 = data[column].quantile(0.75)
p50 = data[column].quantile(0.5)
p25 = data[column].quantile(0.25)
min = np.min(data[column])
mean = data[column].mean()
iqr = p75 - p25
valueslist = [p25-1.5*iqr, min, p25, p50, mean, p75, max, p75 + 1.5*iqr]
tagslist = ["LOWER", "MIN", "P25", "P50", "Mean", "P75", "MAX", "UPPER"]
datadict.update({column : pd.Series([data[column].dtypes]+valueslist, index=["Type"]+tagslist)})
# If it is binary don't detect outliers
if (set(data[column]) == {0,1}):
continue
# Loops the values in a column looking for extreme values
# When it finds extreme values prepares the interface to sustitute them, offering several choices
for i in range (0,len(data)):
if (data[column][i] > (p75 + 1.5*iqr)) or (data[column][i] < (p25 - 1.5*iqr)):
if first:
a = tk.Label(win_root, text="How do you want to process outliers?")
a.grid(row=5+c, column=0, columnspan=2, sticky=tk.W)
first=False
a = tk.Label(win_root, text=column + ": " + str(data[column][i]))
a.grid(row=6+c+c2, column=0, sticky=tk.E)
choice = tk.StringVar()
chosen = ttk.Combobox(win_root, width=12, textvariable=choice, value=column, state="readonly")
# There is a choice "ITSELF" if this outlier is not desired to be changed
chosenlist = ["ITSELF: " + str(data[column][i])]
for j in range (0,len(tagslist)):
chosenlist.append(tagslist[j] + ": " + str(valueslist[j]))
chosen['values']= tuple(chosenlist)
chosen.grid(row=6+c+c2, column=1)
c2 += 1
chosen.current(0)
choices.append([column, i, choice])
button3.grid(row=7+c+c2, column=2, sticky=tk.W)
def processoutliers():
global data
mess = "\nOutliers replaced:\n"
# Changes outliers for the selected values
for choice in choices:
col = choice[0]
i = choice[1]
ch = choice[2].get().split()[1]
data.set_value(i, col, ch)
mess += "- " + col + "[" + str(i) + "] -> " +str(ch) + "\n"
mess = mess + "New changes can be proposed.\n"
mess = mess + "Click 'Save Results' after.\n"
mess = mess + "Thank you for using this program!!!"
state.config(text = mess )
print("Transformed Data Frame\n", data)
# print(data.dtypes)
button4=tk.Button(win_root,text="Save Restults",command=lambda: save_file())
button4.grid(row=8+c+c2, column=1, sticky=tk.W)
button5=tk.Button(win_root,text="Exit",command=lambda: root.destroy())
button5.grid(row=8+c+c2, column=1, sticky=tk.E)
button4.focus()
def onFrameConfigure(canvas):
'''Reset the scroll region to encompass the inner frame'''
canvas.configure(scrollregion=canvas.bbox("all"))
# START MAIN
np.seterr(invalid='ignore')
entriesval = []
choices = []
# data = pd.DataFrame
# Creates main window
root = tk.Tk()
root.title("Data Cleaning")
root.geometry("600x800")
root.resizable(width=True, height=True)
canvas = tk.Canvas(root, borderwidth=0) #, background="#ffffff")
win_root = tk.Frame(canvas) #, background="#ffffff")
vsb = tk.Scrollbar(root, orient="vertical", command=canvas.yview)
vsb.pack(side="right", fill="y")
canvas.configure(yscrollcommand=vsb.set)
canvas.pack(side="left", fill="both", expand=True)
canvas.create_window((4,4), window=win_root, anchor="nw")
win_root.bind("<Configure>", lambda event, canvas=canvas: onFrameConfigure(canvas))
label1 = ttk.Label(win_root, text="- Human Assisted Cleaner -\n",font=("Helvetica", 12), foreground="black")
label1.grid(row=0,column=1)
#
# id_entry = tk.StringVar()
# id_entered = ttk.Entry(win_root, width = 30, textvariable = id_entry)
# id_entered.grid(row = 0, column = 1, sticky = tk.E)
# button0 = tk.Button(win_root, text = "Browse computer")
# button0.bind ("<Button-1>", start)
# button0.grid(row=0, column=2, sticky=tk.W)
# button0.focus()
button1 = tk.Button(win_root,text = "Go", command=lambda: cleannan())
button2 = tk.Button(win_root,text = "Go", command=lambda: cleanspaces())
button3 = tk.Button(win_root,text = "Go", command=lambda: processoutliers())
nanrad = tk.IntVar()
state = ttk.Label(win_root, text="\nPlease, press \"Browse computer\" to select a file to clean.")
state.grid(row=1000, column=0, columnspan=3, sticky=tk.W)
start()
root.mainloop()
########################################
#L H6 Human assisted feature selection
########################################
# data = pd.read_csv("Example.csv")
def HAfeatureselection ():
import pandas as pd
import numpy as np
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
from sklearn import linear_model
data = pd.read_csv("https://dl.dropboxusercontent.com/u/28535341/dev.csv")
def open_file():
filename = filedialog.askopenfilename(
title = "Choose your file",
filetypes = (("csv files", "*.csv"), ("all files", "*.*")),
defaultextension = '.csv',
initialdir = '.')
return filename
def save_file():
filename = filedialog.asksaveasfilename(
title = "Save file",
filetypes = (("csv files", "*.csv"), ("all files", "*.*")),
defaultextension = '.csv',
initialdir = '.',
initialfile = 'transformed.csv')
if filename != "":
data.to_csv(filename, index=False)
def start():
# global data
global c
# fich = open_file()
# id_entry.set (fich)
# data = pd.read_csv (fich)
# state.config(text = "" )
# a = tk.Label(win_root, text="Select the features you want to include:")
# a.grid(row=1, column=0, sticky=tk.W, columnspan=2)
c=0
# Usually the target variable is the last column, then it is not offered as a choice
for column in data.columns[:-1]:
# a = tk.Label(win_root, text=column)
# a.grid(row=2+c, column=0, sticky=tk.E)
enval = tk.StringVar()
en = tk.Checkbutton(win_root, text=column, variable = enval, textvariable = column)
en.grid(row=2+c, column = 0, sticky=tk.W)
en.deselect()
entriesval.append(enval)
c += 1
button1.grid(row=2+c, column=0, sticky=tk.W)
button1.focus()
def checkselected():
global data
global c
checked_fields = []
count = 0
cols = list(data.columns)
for entry in entriesval:
e = entry.get()
if e=="1":
checked_fields.append(cols[count])
count += 1
button5=tk.Button(win_root,text="Exit",command=lambda: root.destroy())
button5.grid(row=2+c, column=1, sticky=tk.W)
# button4.focus()
x = data[checked_fields]
y = data['ob_target']
Fin_model = linear_model.LinearRegression()
Fin_model.fit(x, y)
sw_fin = Fin_model.score(x,y)
a = tk.Label(win_root, text="Using the variables you selected, the model score is "+str(sw_fin))
a.grid(row=4+c, column=0, columnspan = 3, sticky=tk.W)
state.config(text = "Select new variables to optimise your model, press Go to re-score" )
def onFrameConfigure(canvas):
import pandas as pd
'''Reset the scroll region to encompass the inner frame'''
canvas.configure(scrollregion=canvas.bbox("all"))
# START MAIN
np.seterr(invalid='ignore')
entriesval = []
# choices = []
data = pd.DataFrame
c = 0
# Creates main window
root = tk.Tk()
root.title("Feature Selection")
root.geometry("600x800")
root.resizable(width=True, height=True)
canvas = tk.Canvas(root, borderwidth=0) #, background="#ffffff")
win_root = tk.Frame(canvas) #, background="#ffffff")
vsb = tk.Scrollbar(root, orient="vertical", command=canvas.yview)
vsb.pack(side="right", fill="y")
canvas.configure(yscrollcommand=vsb.set)
canvas.pack(side="left", fill="both", expand=True)
canvas.create_window((4,4), window=win_root, anchor="nw")
win_root.bind("<Configure>", lambda event, canvas=canvas: onFrameConfigure(canvas))
label1 = ttk.Label(win_root, text="Choose features to test:",font=("Helvetica", 12), foreground="black")
label1.grid(row=0,column=0)
# id_entry = tk.StringVar()
# id_entered = ttk.Entry(win_root, width = 30, textvariable = id_entry)
# id_entered.grid(row = 0, column = 1, sticky = tk.E)
#button0 = tk.Button(win_root, text = "Browse computer", command=lambda: start())
#button0.bind ("<Button-1>")
#button0.grid(row=0, column=2, sticky=tk.W)
#button0.focus()
button1 = tk.Button(win_root,text = "Go", command=lambda: checkselected())
# state = ttk.Label(win_root, text="\nPlease, press \"Browse computer\" to select a file to clean.")
state = ttk.Label(win_root, text="")
state.grid(row=1000, column=0, columnspan=3, sticky=tk.W)
data = pd.read_csv("https://dl.dropboxusercontent.com/u/28535341/dev.csv")
start()
#def human_variable_selection(data):
root.mainloop()
data = pd.read_csv("https://dl.dropboxusercontent.com/u/28535341/dev.csv")
########################################
#A Genetic algorithm
########################################
def ga():
import pandas as pd
import numpy as np
import re
import deap
from deap import creator, base, tools, algorithms
import random
from sklearn import metrics, linear_model
data = pd.read_csv("https://dl.dropboxusercontent.com/u/28535341/dev.csv")
#df = pd.read_csv("dev.csv") #DEV-SAMPLE
#dfo = pd.read_csv("oot0.csv")#OUT-OF-TIME SAMPLE
#df = pd.read_csv("/home/ab/Documents/MBD/financial_analytics/variable_creation/data/data.csv")
#len(df.columns)
in_model = []
list_ib = set() #input binary
list_icn = set() #input categorical nominal
list_ico = set() #input categorical ordinal
list_if = set() #input numerical continuos (input float)
list_inputs = set()
output_var = 'ob_target'
for var_name in data.columns:
if re.search('^ib_',var_name):
list_inputs.add(var_name)
list_ib.add(var_name)
elif re.search('^icn_',var_name):
list_inputs.add(var_name)
list_icn.add(var_name)
elif re.search('^ico_',var_name):
list_inputs.add(var_name)
list_ico.add(var_name)
elif re.search('^if_',var_name):
list_inputs.add(var_name)
list_if.add(var_name)
elif re.search('^ob_',var_name):
output_var = var_name
#####
#SETING UP THE GENETIC ALGORITHM and CALCULATING STARTING POOL (STARTING CANDIDATE POPULATION)
#####
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
toolbox.register("attr_bool", random.randint, 0, 1)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, n=len(list_inputs))
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
def evalOneMax(individual):
return sum(individual),
toolbox.register("evaluate", evalOneMax)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
toolbox.register("select", tools.selTournament, tournsize=3)
NPOPSIZE = (len(data.columns) -2) #RANDOM STARTING POOL SIZE
population = toolbox.population(n=NPOPSIZE)
#####
#ASSESSING GINI ON THE STARTING POOL
#####
dic_gini={}
for i in range(np.shape(population)[0]):
# TRASLATING DNA INTO LIST OF VARIABLES (1-81)
var_model = []
for j in range(np.shape(population)[0]):
if (population[i])[j]==1:
var_model.append(list(list_inputs)[j])
# ASSESSING GINI INDEX FOR EACH INVIVIDUAL IN THE INITIAL POOL
X_train=data[var_model]
Y_train=data[output_var]
######
# CHANGE_HERE - START: YOU ARE VERY LIKELY USING A DIFFERENT TECHNIQUE BY NOW. SO CHANGE TO YOURS.
#####
'''
rf = RandomForestClassifier(n_estimators=100, random_state=50)
rf1 = rf.fit(X_train, Y_train)
Y_predict = rf1.predict_proba(X_train)
Y_predict = Y_predict[:,1]
'''
Fin_model = linear_model.LinearRegression()
Fin_model.fit(X_train, Y_train)
Y_predict = Fin_model.predict(X_train)
######
# CHANGE_HERE - END: YOU ARE VERY LIKELY USING A DIFFERENT TECHNIQUE BY NOW. SO CHANGE TO YOURS.
#####
######
# CHANGE_HERE - START: HERE IT USES THE DEVELOPMENT GINI TO SELECT VARIABLES, YOU SHOULD A DIFFERENT GINI. EITHER THE OOT GINI OR THE SQRT(DEV_GINI*OOT_GINI)
#####
fpr, tpr, thresholds = metrics.roc_curve(Y_train, Y_predict)
auc = metrics.auc(fpr, tpr)
gini_power = abs(2*auc-1)
######
# CHANGE_HERE - END: HERE IT USES THE DEVELOPMENT GINI TO SELECT VARIABLES, YOU SHOULD A DIFFERENT GINI. EITHER THE OOT GINI OR THE SQRT(DEV_GINI*OOT_GINI)
#####
gini=str(gini_power)+";"+str(population[j]).replace('[','').replace(', ','').replace(']','')
dic_gini[gini]=population[j]
list_gini=sorted(dic_gini.keys(),reverse=True)
#####
#GENETIC ALGORITHM MAIN LOOP - START
# - ITERATING MANY TIMES UNTIL NO IMPROVMENT HAPPENS IN ORDER TO FIND THE OPTIMAL SET OF CHARACTERISTICS (VARIABLES)
#####
sum_current_gini=0.0
sum_current_gini_1=0.0
sum_current_gini_2=0.0
first=0
OK = 1
a=0
while OK: #REPEAT UNTIL IT DO NOT IMPROVE, AT LEAST A LITLE, THE GINI IN 2 GENERATIONS
a=a+1
OK=0
####
# GENERATING OFFSPRING - START
####
offspring = algorithms.varAnd(population, toolbox, cxpb=0.5, mutpb=0.1) #CROSS-X PROBABILITY = 50%, MUTATION PROBABILITY=10%
fits = toolbox.map(toolbox.evaluate, offspring)
for fit, ind in zip(fits, offspring):
ind.fitness.values = fit
population =toolbox.select(offspring, k=len(population))
####
# GENERATING OFFSPRING - END
####
sum_current_gini_2=sum_current_gini_1
sum_current_gini_1=sum_current_gini
sum_current_gini=0.0
#####
#ASSESSING GINI ON THE OFFSPRING - START
#####
for j in range(np.shape(population)[0]):
if population[j] not in dic_gini.values():
var_model = []
for i in range(np.shape(population)[0]):
if (population[j])[i]==1:
var_model.append(list(list_inputs)[i])
X_train=data[var_model]
Y_train=data[output_var]
######
# CHANGE_HERE - START: YOU ARE VERY LIKELY USING A DIFFERENT TECHNIQUE BY NOW. SO CHANGE TO YOURS.
#####
Fin_model = linear_model.LinearRegression()
Fin_model.fit(X_train, Y_train)
Y_predict = Fin_model.predict(X_train)
'''
rf = RandomForestClassifier(n_estimators=100, random_state=50)
rf1 = rf.fit(X_train, Y_train)
Y_predict = rf1.predict_proba(X_train)
Y_predict = Y_predict[:,1]
'''
######
# CHANGE_HERE - END: YOU ARE VERY LIKELY USING A DIFFERENT TECHNIQUE BY NOW. SO CHANGE TO YOURS.
#####
######
# CHANGE_HERE - START: HERE IT USES THE DEVELOPMENT GINI TO SELECT VARIABLES, YOU SHOULD A DIFFERENT GINI. EITHER THE OOT GINI OR THE SQRT(DEV_GINI*OOT_GINI)
#####
fpr, tpr, thresholds = metrics.roc_curve(Y_train, Y_predict)
auc = metrics.auc(fpr, tpr)
gini_power = abs(2*auc-1)
######
# CHANGE_HERE - END: HERE IT USES THE DEVELOPMENT GINI TO SELECT VARIABLES, YOU SHOULD A DIFFERENT GINI. EITHER THE OOT GINI OR THE SQRT(DEV_GINI*OOT_GINI)
#####
gini=str(gini_power)+";"+str(population[j]).replace('[','').replace(', ','').replace(']','')
dic_gini[gini]=population[j]
#####
#ASSESSING GINI ON THE OFFSPRING - END
#####
#####
#SELECTING THE BEST FITTED AMONG ALL EVER CREATED POPULATION AND CURRENT OFFSPRING - START
#####
list_gini=sorted(dic_gini.keys(),reverse=True)
population=[]
for i in list_gini[:NPOPSIZE]:
population.append(dic_gini[i])
gini=float(i.split(';')[0])
sum_current_gini+=gini
#####
#SELECTING THE BEST FITTED AMONG ALL EVER CREATED POPULATION AND CURRENT OFFSPRING - END
#####
#HAS IT IMPROVED AT LEAST A LITLE THE GINI IN THE LAST 2 GENERATIONS
#print ('sum_current_gini=', sum_current_gini, 'sum_current_gini_1=', sum_current_gini_1, 'sum_current_gini_2=', sum_current_gini_2)
if(sum_current_gini>sum_current_gini_1+0.0001 or sum_current_gini>sum_current_gini_2+0.0001):
OK=1
#####
#GENETIC ALGORITHM MAIN LOOP - END
#####
gini_max=list_gini[0]
gini=float(gini_max.split(';')[0])
features=gini_max.split(';')[1]
####
# PRINTING OUT THE LIST OF FEATURES
#####
use_these = []
f=0
for i in range(len(features)):
if features[i]=='1':
f+=1
use_these.append(list(list_inputs)[i])
X = data[use_these]
Y = data[output_var]
Fin_model = linear_model.LinearRegression()
Fin_model.fit(X, Y)
Y_predict = Fin_model.predict(X)
cv_score = Fin_model.score(X, Y)
'''
rf = RandomForestClassifier(n_estimators=100, random_state=50)
rf1 = rf.fit(X, Y)
Y_predict = rf1.predict_proba(X)
Y_predict = Y_predict[:,1]
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, Y, test_size=0.2, random_state=10)
rfcv = Fin_model.fit(X_train, y_train)
'''
print("Genetic Algorithm Score", cv_score)
print("Using", use_these)
return(cv_score)
# -*- coding: utf-8 -*-
def sw():
from sklearn import linear_model,metrics
import numpy as np
import pandas as pd
data = pd.read_csv("https://dl.dropboxusercontent.com/u/28535341/dev.csv")
def xattrSelect(x, idxset):
""" Takes X matrix as list of list and returns subset containing columns in idxSet """
xout = []
for row in x:
xout.append([row[i] for i in idxset])
return xout
#Read the data in: choose the first FA dataset OR the second FA dataset
xLists = []
labels = []
names = pd.Series(data.columns)
firstline = True
for line in data.values:
row = list(line)
# Populate labels list
labels.append(row[-1])
# Remove headers
row.pop()
# Ensure everything is a float
floatrow = [float(s) for s in row]
xLists.append(floatrow)
#Training and test data sets
indices = range(len(xLists))
xListtrain = [xLists[i] for i in indices if i % 3 != 0]
xListtest = [xLists[i] for i in indices if i % 3 == 0]
labeltest = [labels[i] for i in indices if i % 3 == 0]
labeltrain = [labels[i] for i in indices if i % 3 != 0]
#Stepwise Regression
attributeList = []
index = range(len(xLists[1]))
indexSet = set(index)
indexSeq = []
oosError = []
for i in index:
attSet = set(attributeList)
# attributes not currently included
attTryset = indexSet - attSet
# form into list
attTry = [o for o in attTryset]
errorList = []
attTemp = []
# experiment with each feature
# select features with least oos error
for j in attTry:
attTemp = [] + attributeList
attTemp.append(j)
#Form training and testing sub matrixes lists of lists
xTraintemp = xattrSelect(xListtrain, attTemp)
xTesttemp = xattrSelect(xListtest, attTemp)
#Convert into arrays because that is all the scikit learnregression can accept
xTrain = np.array(xTraintemp)
yTrain = np.array(labeltrain)
xTest = np.array(xTesttemp)
yTest = np.array(labeltest)
# use scikitlearn linear regression
Fin_model = linear_model.LinearRegression()
Fin_model.fit(xTrain, yTrain)
a = Fin_model.predict(xTrain)
fpr, tpr, thresholds = metrics.roc_curve(yTrain, a)
auc = metrics.auc(fpr, tpr)
gini_power = abs(2*auc-1)
errorList.append(gini_power)
# Use trained model to generate prediction and calculate rmsError
'''rmsError = np.linalg.norm((yTest - Fin_model.predict(xTest)), 2) / math.sqrt(len(yTest))
errorList.append(rmsError)
attTemp = []
'''
iBest = np.argmin(errorList)
attributeList.append(attTry[iBest])
oosError.append(errorList[iBest])
#If you want the sample error
#print("Out of sample error versus attribute set size" )
#print(oosError)
#If you want the indices of the most useful attributes
#print("\n" + "Best attribute indices")
#print(attributeList)
namesList = [names[i] for i in attributeList]
x = data[namesList]
y = data['ob_target']
Fin_model = linear_model.LinearRegression()
Fin_model.fit(x, y)
sw_fin = Fin_model.score(x,y)
print("Step Wise Score", sw_fin)
print("Using", namesList)
return(sw_fin)
def compare_stepwise_genetic():
data = pd.read_csv("https://dl.dropboxusercontent.com/u/28535341/dev.csv")
ga()
sw()
####################
#V dummi creation
####################
def dummycreation ():
import pandas as pd
import numpy as np
import re
from deap import creator, base, tools, algorithms
import random
from sklearn import metrics, linear_model
data = | pd.read_csv("https://dl.dropboxusercontent.com/u/28535341/dev.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
def main():
df = pd.read_csv('../../data/complete_df_7.csv')
if df.columns[0] == 'Unnamed: 0':
df.drop('Unnamed: 0', axis=1, inplace=True)
if 'stock_open' in df.columns:
df['stock_open'] = df['stock_open'].astype(float)
#aggregate to the product level across stores
aggregate = df.groupby(['sku_key', 'tran_date'])\
.agg({'sales':'sum', 'selling_price':'mean',
'avg_discount': 'mean', 'stock_open': 'mean'})
aggregate.reset_index(inplace=True)
#Get the categorical variables for each product
categorical = df[['sku_key', 'sku_department', 'sku_subdepartment',
'sku_category', 'sku_subcategory', 'sku_label']]
nw_df = pd.DataFrame([], columns=['sku_key','sku_department',
'sku_subdepartment','sku_category',
'sku_subcategory', 'sku_label'])
for i in categorical['sku_key'].unique():
nw_df = pd.concat([nw_df, | pd.DataFrame(categorical[categorical['sku_key'] == i].iloc[0]) | pandas.DataFrame |
from numpy import loadtxt
import streamlit as st
import numpy as np
import pandas as pd
import altair as alt
n = 25
particle = ['NO2', 'O3', 'NO', 'CO', 'PM1', 'PM2.5', 'PM10']
def actual_vs_predictedpj():
select_model = st.sidebar.radio(
"Choose Model ?", ('Xgboost', 'Randomforest', 'KNN', 'Linear Regression', 'Lasso'))
select_particle = st.sidebar.radio(
"Choose Particle ?", ('NO2', 'O3', 'NO', 'CO', 'PM2.5', 'PM10'))
if select_particle == 'NO2':
loc = 0
if select_particle == 'O3':
loc = 1
if select_particle == 'NO':
loc = 2
if select_particle == 'CO':
loc = 3
# if select_particle == 'PM1':
# loc = 4
if select_particle == 'PM2.5':
loc = 4
if select_particle == 'PM10':
loc = 5
if select_model == 'Xgboost':
get_xgboost(loc)
if select_model == 'KNN':
get_knn(loc)
if select_model == 'Randomforest':
get_randomforest(loc)
if select_model == 'Linear Regression':
get_linear_regression(loc)
if select_model == 'Lasso':
get_lasso(loc)
def get_knn(loc):
knn_y_test = loadtxt('ModelsPJ/knn_y_test.csv', delimiter=',')
knn_y_test_pred = loadtxt('ModelsPJ/knn_y_test_pred.csv', delimiter=',')
l1 = list()
l1.append(['Y_Actual']*n)
l1.append(np.round(knn_y_test[:n, loc], 9))
l1.append(list(range(1, n+1)))
temp1 = np.array(l1).transpose()
x1 = list(range(1, n+1))
chart_data1 = pd.DataFrame(temp1, x1, columns=['Data', particle[loc], 'x'])
l2 = list()
l2.append(['Y_Predicted']*n)
l2.append(np.round(knn_y_test_pred[:n, loc], 9))
l2.append(list(range(1, n+1)))
temp2 = np.array(l2).transpose()
x2 = list(range(n+1, 2*n+1))
chart_data2 = pd.DataFrame(temp2, x2, columns=['Data', particle[loc], 'x'])
frames = [chart_data1, chart_data2]
results = pd.concat(frames)
chart = alt.Chart(results).mark_line().encode(
x='x',
y=particle[loc],
color='Data',
strokeDash='Data',
).properties(
title='Plot of Actual vs Predicted for KNN model for ' +
particle[loc]+' particle'
)
st.altair_chart(chart, use_container_width=True)
def get_xgboost(loc):
xgboost_y_test = loadtxt('ModelsPJ/xgboost_y_test.csv', delimiter=',')
xgboost_y_test_pred = loadtxt(
'ModelsPJ/xgboost_y_test_pred.csv', delimiter=',')
l1 = list()
l1.append(['Y_Actual']*n)
l1.append(np.round(xgboost_y_test[:n, loc], 9))
l1.append(list(range(1, n+1)))
temp1 = np.array(l1).transpose()
x1 = list(range(1, n+1))
chart_data1 = | pd.DataFrame(temp1, x1, columns=['Data', particle[loc], 'X']) | pandas.DataFrame |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pandas_datareader as web
import datetime as dt
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM
#loading data
company = 'FB'
start = dt.datetime(2012,1,1)
end = dt.datetime(2021,1,11)
data = web.DataReader(company, 'yahoo', start, end)
#preparing the data
scaler = MinMaxScaler(feature_range=(0,1))
scaled_data = scaler.fit_transform(data['Close'].values.reshape(-1,1))
prediction_days = 60
x_train = []
y_train = []
for x in range(prediction_days, len(scaled_data)):
x_train.append(scaled_data[x-prediction_days:x, 0])
y_train.append(scaled_data[x, 0])
x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
#Creating the Neural Network Model
model = Sequential()
model.add(LSTM(units=50, return_sequences=True, input_shape=(x_train.shape[1], 1)))
model.add(Dropout(0.2))
model.add(LSTM(units=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=50))
model.add(Dropout(0.2))
model.add(Dense(units=1))
model.compile(optimizer='harry', loss='mean_squared_error')
model.fit(x_train, y_train, epochs=25, batch_size=32)
test_start = dt.datetime(2021,1,11)
test_end = dt.datetime.now()
test_data = web.DataReader(company, 'yahoo', test_start, test_end)
actual_prices = test_data['Close'].values
total_dataset = | pd.concat((data['Close'], test_data['Close']), axis=0) | pandas.concat |
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize(
"values, dtype",
[
([], "object"),
([1, 2, 3], "int64"),
([1.0, 2.0, 3.0], "float64"),
(["a", "b", "c"], "object"),
(["a", "b", "c"], "string"),
([1, 2, 3], "datetime64[ns]"),
([1, 2, 3], "datetime64[ns, CET]"),
([1, 2, 3], "timedelta64[ns]"),
(["2000", "2001", "2002"], "Period[D]"),
([1, 0, 3], "Sparse"),
([pd.Interval(0, 1), pd.Interval(1, 2), pd.Interval(3, 4)], "interval"),
],
)
@pytest.mark.parametrize(
"mask", [[True, False, False], [True, True, True], [False, False, False]]
)
@pytest.mark.parametrize("indexer_class", [list, pd.array, pd.Index, pd.Series])
@pytest.mark.parametrize("frame", [True, False])
def test_series_mask_boolean(values, dtype, mask, indexer_class, frame):
# In case len(values) < 3
index = ["a", "b", "c"][: len(values)]
mask = mask[: len(values)]
obj = pd.Series(values, dtype=dtype, index=index)
if frame:
if len(values) == 0:
# Otherwise obj is an empty DataFrame with shape (0, 1)
obj = pd.DataFrame(dtype=dtype)
else:
obj = obj.to_frame()
if indexer_class is pd.array:
mask = | pd.array(mask, dtype="boolean") | pandas.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Run nonparametric ridge estimation.
"""
import os
from optparse import OptionParser
import networkx as nx
import numpy as np
import pandas as pd
import time
from networkx.algorithms.centrality import betweenness_centrality
from plotnine import *
from scipy.sparse.csgraph import minimum_spanning_tree
from scipy.io import mmwrite
from sklearn.decomposition import PCA
import quasildr
from quasildr import structdr
from quasildr.graphdr import *
from quasildr.utils import *
parser = OptionParser()
# main options
parser.add_option("--input", dest="input", type="str",
help="Input file")
parser.add_option("--anno_file", dest="anno_file", type="str",
help="Annotation file for plotting")
parser.add_option("--anno_column", dest="anno_column", type="str", default="group_id",
help="Name of the column to use in annotation file")
parser.add_option("--output", dest="output", type="str",
help="Output prefix")
parser.add_option("--suffix", dest="suffix", type="str", default="",
help="Output suffix")
parser.add_option("--niter", dest="niter", type="int", default=30,
help="Number of iterations. Default is 30.")
parser.add_option("--ndim", dest="ndim", type="int", default=15,
help="Number of input dimensions to use. Default is 15.")
parser.add_option("--bw", dest="bw", type="float", default=0.,
help="Gaussian KDE kernel bandwidth. "
"This is not needed if `automatic_bw` is specified. Default is 0.")
parser.add_option("--adaptive_bw", dest="adaptive_bw", type="int", default=10,
help="Set data point-specific minimum bandwidth to its distance to "
"`adaptive_bw`-th nearest neighbor. Default is 10.")
parser.add_option("--automatic_bw", dest="automatic_bw", type="float", default=2.0,
help="Use pca to select bw. Default is 2.0.")
parser.add_option("--relaxation", dest="relaxation", type="float", default=0,
help="Ridge dimensionality relaxation. Default is 0.")
parser.add_option("--stepsize", dest="stepsize", type="float", default=0.5,
help="Step size relative to standard SCMS. Default is 0.5.")
parser.add_option("--method", dest="method", type="str", default="LocInv",
help="Method. Default is LocInv")
parser.add_option("--batchsize", dest="batchsize", type="int", default=500,
help="Decreasing the batch size reduces memory consumption. Default is 500.")
parser.add_option("--ridge_dimensionality", dest="ridge_dimensionality", type="float", default=1, help="ridge dim")
parser.add_option("--bigdata", action="store_true", dest="bigdata",
help="Speed up computation for big datasets with multilevel data represention"
" (Experimental feature).")
parser.add_option("--n_jobs", dest="n_jobs", type="int", default=1, help="number of jobs")
# extract backbones options
parser.add_option("--extract_backbone", action="store_true", dest="extract_backbone")
parser.add_option("--k", dest="k", type="int", default=50, help="Number of NN to prescreen edges")
parser.add_option("--maxangle", dest="maxangle", type="float", default=90.,
help="Prefilter edges by angles")
np.random.seed(0)
(opt, args) = parser.parse_args()
docstr = "niter" + str(opt.niter) + "_ndim" + str(opt.ndim) + "_bw" + str(opt.bw) + \
"_adaptive_bw" + str(opt.adaptive_bw) + \
"_automatic_bw" + str(opt.automatic_bw) + "_relaxation" + str(opt.relaxation) + \
"_ridge" + str(opt.ridge_dimensionality) + \
"_method" + str(opt.method) + "_stepsize" + str(opt.stepsize) + "_maxangle" + str(opt.maxangle) + "_k" + str(
opt.k) + opt.suffix + ("_big" if opt.bigdata else "")
outdir = os.path.dirname(opt.output)
if not os.path.exists(outdir):
os.makedirs(outdir)
data = pd.read_csv(opt.input, delimiter='\t')
data = np.asarray(data)
data = data / data[:, 0].std(axis=0)
if opt.ndim > 0:
data = data[:, :opt.ndim]
if opt.automatic_bw != 0:
bw = PCA(np.minimum(20, data.shape[1])).fit_transform(data).std(axis=0)[-1] * np.sqrt(opt.ndim)
bw *= opt.automatic_bw
else:
bw = opt.bw
print("bw: "+str(bw))
if opt.anno_file != "":
anno = pd.read_csv(opt.anno_file, sep='\t')
if opt.bigdata and data.shape[0] > 5000:
datas = structdr.multilevel_compression(data)
s = structdr.Scms(datas[0], bw=bw, min_radius=opt.adaptive_bw)
else:
s = structdr.Scms(data, bw=bw, min_radius=opt.adaptive_bw)
T, ifilter = s.scms(data, method=opt.method, stepsize=opt.stepsize, n_iterations=opt.niter, threshold=0,
ridge_dimensionality=opt.ridge_dimensionality,
relaxation=opt.relaxation, n_jobs=opt.n_jobs)
np.savetxt(X=T, fname=opt.output + '.' + docstr + '.trajectory')
np.savetxt(X=ifilter, fname=opt.output + '.' + docstr + '.trajectory.ifilter')
if opt.anno_file != "":
df = pd.DataFrame({'x': T[:, 0], 'y': T[:, 1], 'c': anno[opt.anno_column].map(str)})
p = ggplot(df, aes('x', 'y', color='c')) + geom_point(size=0.5) + theme_minimal()
else:
df = pd.DataFrame({'x': T[:, 0], 'y': T[:, 1]})
p = ggplot(df, aes('x', 'y')) + geom_point(size=0.5) + theme_minimal()
p.save(opt.output + '.' + docstr + '.pdf')
if opt.extract_backbone:
g_simple, g_mst, ridge_dims = extract_structural_backbone(T, data, s, max_angle=opt.maxangle,
relaxation=opt.relaxation)
mmwrite(opt.output + '.' + docstr + '.g_simple.mm', g_simple)
mmwrite(opt.output + '.' + docstr + '.g_mst.mm', g_mst)
np.savetxt(opt.output + '.' + docstr + '.ridge_dims', ridge_dims, fmt='%d')
df = pd.DataFrame({'x': T[:, 0], 'y': T[:, 1], 'c': anno[opt.anno_column].map(str)})
df_e = pd.DataFrame({'xs': T[g_simple.nonzero()[0], 0], 'xe': T[g_simple.nonzero()[1], 0],
'ys': T[g_simple.nonzero()[0], 1], 'ye': T[g_simple.nonzero()[1], 1]})
p = ggplot(df) + \
geom_segment(mapping=aes(x='xs', xend='xe', y='ys', yend='ye'), data=df_e, size=0.5) + \
geom_point(mapping=aes('x', 'y', color='c'), size=0.5) + theme_minimal()
p.save(opt.output + '.' + docstr + '.g_simple.pdf')
df_e = pd.DataFrame({'xs': T[g_mst.nonzero()[0], 0], 'xe': T[g_mst.nonzero()[1], 0],
'ys': T[g_mst.nonzero()[0], 1], 'ye': T[g_mst.nonzero()[1], 1]})
p = ggplot(df) + \
geom_segment(mapping=aes(x='xs', xend='xe', y='ys', yend='ye'), data=df_e, size=0.5) + \
geom_point(mapping=aes('x', 'y', color='c'), size=0.5) + theme_minimal()
p.save(opt.output + '.' + docstr + '.g_mst.pdf')
G_simple = nx.from_scipy_sparse_matrix(g_simple)
nodes_bc = betweenness_centrality(G_simple, k=np.minimum(500, g_simple.shape[0]), normalized=False)
pd.DataFrame( | pd.Series(nodes_bc) | pandas.Series |
import os
import numpy as np
import pandas as pd
import json
import lib.galaxy_utilities as gu
from astropy.io import fits
from tqdm import tqdm
aggregated_models = pd.read_pickle('lib/models.pickle')['tuned_aggregate']
def get_n_arms(gal):
keys = (
't11_arms_number_a31_1_debiased',
't11_arms_number_a32_2_debiased',
't11_arms_number_a33_3_debiased',
't11_arms_number_a34_4_debiased',
't11_arms_number_a36_more_than_4_debiased',
)
return sum((i + 1) * gal[k] for i, k in enumerate(keys))
def get_winding_score(gal):
keys = (
't10_arms_winding_a28_tight_debiased',
't10_arms_winding_a29_medium_debiased',
't10_arms_winding_a30_loose_debiased',
)
return sum((i + 1) * gal[k] for i, k in enumerate(keys))
def get_pitch_angle(gal):
m = get_n_arms(gal)
w = get_winding_score(gal)
return 6.37 * w + 1.3 * m + 4.34
def has_comp(annotation, comp=0):
try:
drawn_shapes = annotation[comp]['value'][0]['value']
return len(drawn_shapes) > 0
except (IndexError, KeyError):
return False
if __name__ == '__main__':
loc = os.path.abspath(os.path.dirname(__file__))
# open the GZ2 catalogue
NSA_GZ = fits.open(os.path.join(loc, '../source_files/NSA_GalaxyZoo.fits'))
sid_list_loc = os.path.join(loc, 'lib/subject-id-list.csv')
sid_list = | pd.read_csv(sid_list_loc) | pandas.read_csv |
import pandas as pd
import numpy as np
import scipy.stats as stats
import copy
import sys
import os
from argotools.config import *
import time
''' Auxiliary functions '''
def preds2matrix(preds_dict):
# Receives preds in Predictor object format.
pred_arrays = []
for model, preds in preds_dict.items():
pred_arrays.append(np.vstack(preds))
return np.hstack(pred_arrays)
def convert_index(dataFrame, index_type):
'Function that transforms a PDs dataframe index before data proccessing'
if index_type == 'date':
dataFrame.index = pd.to_datetime(dataFrame.index)
return dataFrame
def np2dataframe(index, column_titles, data):
'''
Generates a dataframe based off Predictor object's data.
Input:
index (list or pandas timeseries): the index for each of the data samples
(N of index labels == N of rows in data)
column_titles (str or list of str) : Same N than N of columns in data
data: data to be converted.
'''
s = np.shape(data)
if isinstance(column_titles,str):
column_titles = [column_titles]*np.shape(data)[1]
for i, title in enumerate(column_titles):
if i > 0:
column_titles[i] += '_{0}'.format(i)
return pd.DataFrame(data=data, index=index, columns=column_titles)
def gen_folder(folder_name, c = 0):
if c == 0:
if os.path.exists(folder_name):
new_folder_name = gen_folder(folder_name, c+1)
return new_folder_name
else:
os.makedirs(folder_name)
return folder_name
else:
if os.path.exists(folder_name+'_{0}'.format(c)):
new_folder_name = gen_folder(folder_name, c+1)
return new_folder_name
else:
os.makedirs(folder_name+'_{0}'.format(c))
return folder_name+'_{0}'.format(c)
'''
Loading functions for data class
A loading function must satisfy the following conditions:
Input:
fname : path to the file to load.
index_type : 'date' / 'other'.
index_label :
start_period :
end_period :
Output:
target = The gold standard for the experiment
features = The input to the prediction algorithms
benchmarks = if found, return other models to compare with during
analysis phase. If not found in the ordered file, they
can be loaded using the load_benchmark() function.
Standard structuring of ordered data is as follows:
- CSV file
- First column : index (e.g. dates for the timeseries in disease forecasting)
- Second column : gold standard (e.g. Flunet series, CDC health reports, etc)
- Next n columns : Features
If the ordered data file contains benchmarks or it doesnt follow the previously
mentioned structure,
'''
def read_standard_features(path_to_file, start_period, end_period):
try:
dataFrame = | pd.read_csv(path_to_file, index_col=0) | pandas.read_csv |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import os
import json
# Feature selection strategies
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import SelectFromModel
# Scale feature scores
from sklearn.preprocessing import MinMaxScaler
# SKLearn estimators list
from sklearn.utils import all_estimators
# MLRun utils
from mlrun.mlutils.plots import gcf_clear
from mlrun.utils.helpers import create_class
from mlrun.artifacts import PlotArtifact
def show_values_on_bars(axs, h_v="v", space=0.4):
def _show_on_single_plot(ax):
if h_v == "v":
for p in ax.patches:
_x = p.get_x() + p.get_width() / 2
_y = p.get_y() + p.get_height()
value = int(p.get_height())
ax.text(_x, _y, value, ha="center")
elif h_v == "h":
for p in ax.patches:
_x = p.get_x() + p.get_width() + float(space)
_y = p.get_y() + p.get_height()
value = int(p.get_width())
ax.text(_x, _y, value, ha="left")
if isinstance(axs, np.ndarray):
for idx, ax in np.ndenumerate(axs):
_show_on_single_plot(ax)
else:
_show_on_single_plot(axs)
def plot_stat(context,
stat_name,
stat_df):
gcf_clear(plt)
# Add chart
ax = plt.axes()
stat_chart = sns.barplot(x=stat_name,
y='index',
data=stat_df.sort_values(stat_name, ascending=False).reset_index(),
ax=ax)
plt.tight_layout()
for p in stat_chart.patches:
width = p.get_width()
plt.text(5 + p.get_width(), p.get_y() + 0.55 * p.get_height(),
'{:1.2f}'.format(width),
ha='center', va='center')
context.log_artifact(PlotArtifact(f'{stat_name}', body=plt.gcf()),
local_path=os.path.join('plots', 'feature_selection', f'{stat_name}.html'))
gcf_clear(plt)
def feature_selection(context,
df_artifact,
k=2,
min_votes=0.5,
label_column: str = 'Y',
stat_filters=['f_classif', 'mutual_info_classif', 'chi2', 'f_regression'],
model_filters={'LinearSVC': 'LinearSVC',
'LogisticRegression': 'LogisticRegression',
'ExtraTreesClassifier': 'ExtraTreesClassifier'},
max_scaled_scores=True):
"""Applies selected feature selection statistical functions
or models on our 'df_artifact'.
Each statistical function or model will vote for it's best K selected features.
If a feature has >= 'min_votes' votes, it will be selected.
:param context: the function context
:param k: number of top features to select from each statistical
function or model
:param min_votes: minimal number of votes (from a model or by statistical
function) needed for a feature to be selected.
Can be specified by percentage of votes or absolute
number of votes
:param label_column: ground-truth (y) labels
:param stat_filters: statistical functions to apply to the features
(from sklearn.feature_selection)
:param model_filters: models to use for feature evaluation, can be specified by
model name (ex. LinearSVC), formalized json (contains 'CLASS',
'FIT', 'META') or a path to such json file.
:param max_scaled_scores: produce feature scores table scaled with max_scaler
"""
# Read input DF
df_path = str(df_artifact)
context.logger.info(f'input dataset {df_path}')
if df_path.endswith('csv'):
df = pd.read_csv(df_path)
elif df_path.endswith('parquet') or df_path.endswith('pq'):
df = pd.read_parquet(df_path)
# Set feature vector and labels
y = df.pop(label_column)
X = df
# Create selected statistical estimators
stat_functions_list = {stat_name: SelectKBest(create_class(f'sklearn.feature_selection.{stat_name}'), k)
for stat_name in stat_filters}
requires_abs = ['chi2']
# Run statistic filters
selected_features_agg = {}
stats_df = pd.DataFrame(index=X.columns)
for stat_name, stat_func in stat_functions_list.items():
try:
# Compute statistics
params = (X, y) if stat_name in requires_abs else (abs(X), y)
stat = stat_func.fit(*params)
# Collect stat function results
stat_df = pd.DataFrame(index=X.columns,
columns=[stat_name],
data=stat.scores_)
plot_stat(context, stat_name, stat_df)
stats_df = stats_df.join(stat_df)
# Select K Best features
selected_features = X.columns[stat_func.get_support()]
selected_features_agg[stat_name] = selected_features
except Exception as e:
context.logger.info(f"Couldn't calculate {stat_name} because of: {e}")
# Create models from class name / json file / json params
all_sklearn_estimators = dict(all_estimators()) if len(model_filters) > 0 else {}
selected_models = {}
for model_name, model in model_filters.items():
if '.json' in model:
current_model = json.load(open(model, 'r'))
ClassifierClass = create_class(current_model["META"]["class"])
selected_models[model_name] = ClassifierClass(**current_model["CLASS"])
elif model in all_sklearn_estimators:
selected_models[model_name] = all_sklearn_estimators[model_name]()
else:
try:
current_model = json.loads(model) if isinstance(model, str) else current_model
ClassifierClass = create_class(current_model["META"]["class"])
selected_models[model_name] = ClassifierClass(**current_model["CLASS"])
except:
context.logger.info(f'unable to load {model}')
# Run model filters
models_df = | pd.DataFrame(index=X.columns) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import pygrib
from tqdm import tqdm
import logging
import datetime
#########################
###### Definitions ######
#########################
abs_base_path = os.path.dirname(os.path.abspath(__file__))
'''/home/collin/visibility-China/time_series_analysis/src/data'''
permitted_fts = ["{0:0=3d}".format(ft) for ft in range(0, 22, 3)]
param_levels = ['Visibility_0', 'Wind speed (gust)_0',
'Temperature_1000', 'Relative humidity_1000', 'U component of wind_1000', 'V component of wind_1000',
'Surface pressure_0', 'Orography_0', 'Temperature_0',
'2 metre temperature_2', '2 metre dewpoint temperature_2', '2 metre relative humidity_2',
'10 metre U wind component_10', '10 metre V wind component_10',
'Precipitation rate_0', 'Pressure reduced to MSL_0']
fieldnames = ["VIS", "WG_Surf",
"T_1000", "RH_1000", "U_1000", "V_1000",
"P_Surf", "HGT", "T_Surf",
"T_2m", "DT_2m", "RH_2m",
"U_10m", "V_10m",
"PR", "MSLP"]
name_converter_dict = {param_level:fieldnames[pl] for pl, param_level in enumerate(param_levels)}
parameterNames = [param_level.split("_")[0] for param_level in param_levels]
#########################
####### Functions #######
#########################
def logger_setup():
ima = datetime.datetime.now()
logger_date = ima.strftime("%Y%m%d_%H:%M:%S")
# Gets or creates a logger
logger = logging.getLogger(__name__)
# set log level
logger.setLevel(logging.DEBUG)
# define file handler and set formatter
file_handler = logging.FileHandler(abs_base_path + "/../../data/processed_logs/{}.log".format(logger_date))
formatter = logging.Formatter('%(asctime)s : %(levelname)s : %(name)s : %(message)s')
file_handler.setFormatter(formatter)
# add file handler to logger
logger.addHandler(file_handler)
return(logger)
### Set up logger
logger = logger_setup()
def get_filepaths(directory):
file_paths = []
for root, directories, files in os.walk(directory):
for filename in files:
filepath = os.path.join(root, filename)
file_paths.append(filepath)
return(sorted(file_paths))
def find_nearest(a, v):
i = (np.abs(a - v)).argmin()
return(a[i])
def get_grb_info_GFS(file, stn_latlon_dict):
info_dict = {}
stn_list = list(stn_latlon_dict.keys())
info_dict["airport"] = stn_list
grib_file = pygrib.open(file)
try:
grbs = grib_file.select(name = parameterNames,
level = [0, 2, 10, 1000])
except ValueError:
grbs = None
if grbs == None:
'''info_dict["tag"] = "No Tag"
dummy_array = np.arange(81*141).reshape(81, 141)
info_dict["values"], info_dict["lats"], info_dict["lons"] = dummy_array, dummy_array, dummy_array'''
info_df = pd.DataFrame()
else:
single_grb = grbs[0]
'''for key in single_grb.keys():
print("{0}: {1}".format(key, single_grb[key]))'''
ft = "{0:0=3d}".format(single_grb.forecastTime)
bt = "{0:0=2d}".format(single_grb.hour)
day = "{0:0=2d}".format(single_grb.day)
month = "{0:0=2d}".format(single_grb.month)
year = "{0:0=2d}".format(single_grb.year)
tag = year + month + day + "_" + bt + "_" + ft
logger.info("Now processing tag: {}".format(tag))
### Skip case if forecast too long
if ft not in permitted_fts:
info_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import pandas as pd
from trading_ig import IGService
from trading_ig.config import config
from datetime import timedelta
import requests_cache
import time
import os
import json
counter = -1
ig_service = None
list_of_instruments = []
def login():
expire_after = timedelta(hours=1)
session = requests_cache.CachedSession(
cache_name='cache', backend='sqlite', expire_after=expire_after
)
api_key = increment_api_key()
global ig_service
# no cache
ig_service = IGService(
config.username, config.password, api_key, config.acc_type
)
ig_service.create_session()
def main():
login()
map_dataframe = ig_service.fetch_top_level_navigation_nodes()
# list_of_stringNodes = (map_dataframe["nodes"])["id"].tolist()
template_recurse(map_dataframe, ig_service)
def template_recurse(map_dataframe, ig_service):
node_list = map_dataframe["nodes"]["id"].tolist()
# node_list = ["97601","195235"]
name_list = map_dataframe["nodes"]["name"].tolist()
# name_list = ["Indices","Forex"]
set_nodes = set()
recurse_overNodes(name_list, node_list, set_nodes, ig_service, [])
def recurse_overNodes(name_list, node_list, set_nodes, ig_service, current_name):
while len(node_list) != 0 :
# needs to be -1 as the last item gets popped
node = node_list[-1]
if node in set_nodes:
node_list.pop()
name_list.pop()
continue
current_name.append(name_list[-1])
set_nodes.add(node)
map_dataframe = get_node_to_node_data(ig_service, node)
if (map_dataframe["nodes"].size == 0):
# save the data
try:
epic_id = map_dataframe["markets"]["epic"][0]
if epic_id == "":
raise Exception("No id")
# save data
map_data = get_details_about_epic(ig_service, epic_id)
map_data["instrument"]["location"] = "_".join(current_name)
# data_string = json.dumps(map_data)
temp = map_data["instrument"].copy()
temp.update(map_data["dealingRules"])
temp.update(map_data["snapshot"])
global list_of_instruments
list_of_instruments.append(temp)
save_to_file(list_of_instruments)
except Exception as e:
print(e)
current_name.pop()
else:
temp_names = map_dataframe["nodes"]["name"].tolist()
temp_id = map_dataframe["nodes"]["id"].tolist()
recurse_overNodes(temp_names, temp_id, set_nodes, ig_service , current_name)
current_name.pop()
def get_node_to_node_data(ig_service,node):
while(True):
try:
map_dataframe = ig_service.fetch_sub_nodes_by_node(node=node)
return map_dataframe
except Exception as e:
print(e)
login()
def get_details_about_epic(ig_service, epic):
while(True):
try:
map_of_data = ig_service.fetch_market_by_epic(epic)
return map_of_data
except Exception as e:
print(e)
login()
# time.sleep(2)
def save_to_file(data):
try:
directory = r"D:/Stock_Analysis/ig-markets-api-python-library-master/Data/SpreadBetting/"
if not os.path.exists(directory):
os.mkdir(directory)
file = "instruments_new.csv"
filename= directory+file
df = | pd.DataFrame(data) | pandas.DataFrame |
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from scipy.special import logit
from sklearn.linear_model import LogisticRegression
from sklearn.isotonic import IsotonicRegression
from benchmark_tools.constants import METHOD
import benchmark_tools.classification as btc
import benchmark_tools.data_splitter as ds
LABEL = 'label'
class Calibrator(object):
def fit(self, y_pred, y_true):
raise NotImplementedError
def predict(self, y_pred):
raise NotImplementedError
@staticmethod
def validate(y_pred, y_true=None):
y_pred = np.asarray(y_pred)
assert y_pred.ndim == 1
assert y_pred.dtype.kind == 'f'
assert np.all(0 <= y_pred) and np.all(y_pred <= 1)
if y_true is not None:
y_true = np.asarray(y_true)
assert y_true.shape == y_pred.shape
assert y_true.dtype.kind == 'b'
return y_pred, y_true
class Identity(Calibrator):
def fit(self, y_pred, y_true):
assert y_true is not None
Calibrator.validate(y_pred, y_true)
def predict(self, y_pred):
Calibrator.validate(y_pred)
# Could make copy to be consistent with other methods, but prob does
# not matter.
return y_pred
class Linear(Calibrator):
def __init__(self):
self.clf = LogisticRegression()
def fit(self, y_pred, y_true):
assert y_true is not None
y_pred, y_true = Calibrator.validate(y_pred, y_true)
self.clf.fit(y_pred[:, None], y_true)
def predict(self, y_pred):
y_pred, _ = Calibrator.validate(y_pred)
y_calib = self.clf.predict_proba(y_pred[:, None])[:, 1]
return y_calib
class Isotonic(Calibrator):
def __init__(self):
self.clf = IsotonicRegression(y_min=0.0, y_max=1.0,
out_of_bounds='clip')
def fit(self, y_pred, y_true):
assert y_true is not None
y_pred, y_true = Calibrator.validate(y_pred, y_true)
self.clf.fit(y_pred, y_true)
def predict(self, y_pred):
y_pred, _ = Calibrator.validate(y_pred)
y_calib = self.clf.predict(y_pred)
return y_calib
class Beta1(Calibrator):
def __init__(self, epsilon=1e-12):
self.epsilon = epsilon
self.clf = LogisticRegression()
def fit(self, y_pred, y_true):
assert y_true is not None
y_pred, y_true = Calibrator.validate(y_pred, y_true)
y_pred = logit(np.clip(y_pred, self.epsilon, 1.0 - self.epsilon))
self.clf.fit(y_pred[:, None], y_true)
def predict(self, y_pred):
y_pred, _ = Calibrator.validate(y_pred)
y_pred = logit(np.clip(y_pred, self.epsilon, 1.0 - self.epsilon))
y_calib = self.clf.predict_proba(y_pred[:, None])[:, 1]
return y_calib
class Beta2(Calibrator):
def __init__(self, epsilon=1e-12):
self.epsilon = epsilon
self.clf = LogisticRegression()
def fit(self, y_pred, y_true):
assert y_true is not None
y_pred, y_true = Calibrator.validate(y_pred, y_true)
y_pred = np.clip(y_pred.astype(np.float_),
self.epsilon, 1.0 - self.epsilon)
y_pred = np.stack((np.log(y_pred), np.log(1.0 - y_pred)), axis=1)
self.clf.fit(y_pred, y_true)
def predict(self, y_pred):
y_pred, _ = Calibrator.validate(y_pred)
y_pred = np.clip(y_pred.astype(np.float_),
self.epsilon, 1.0 - self.epsilon)
y_pred = np.stack((np.log(y_pred), np.log(1.0 - y_pred)), axis=1)
y_calib = self.clf.predict_proba(y_pred)[:, 1]
return y_calib
CALIB_DICT = {'raw': Identity, 'iso': Isotonic}
def flat(tup, delim='_'):
'''Join only invertible if delim not in elements.'''
assert not any(delim in x for x in tup)
flat_str = delim.join(tup)
return flat_str
def flat_cols(cols, delim='_', name=None):
assert isinstance(cols, pd.MultiIndex)
cols = pd.Index([flat(tup, delim=delim) for tup in cols.values], name=name)
return cols
def combine_class_df(neg_class_df, pos_class_df):
'''
neg_class_df : DataFrame, shape (n, n_features)
pos_class_df : DataFrame, shape (n, n_features)
Must have same keys as `neg_class_df`
df : DataFrame, shape (2 * n, n_features)
y_true : ndarray, shape (2 * n,)
'''
# Adding a new col won't change anything in original
neg_class_df = pd.DataFrame(neg_class_df, copy=True)
pos_class_df = | pd.DataFrame(pos_class_df, copy=True) | pandas.DataFrame |
#!//usr/local/bin/python2
import math
import operator
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from plotly.graph_objs import Scatter, Figure, Layout
import plotly.plotly as py
import plotly.graph_objs as go
import pandas as pd
import numpy as np
Xmin=3
Xmax=9
for Abuse in ("Spam Domains", "Phishing Domains", "Malware Domains", "Botnet Domains"):
InitialDataPoint = '2017-may-tlds.csv'
DataPoints = ['2017-may-tlds.csv', '2017-june-tlds.csv', '2017-july-tlds.csv', '2017-aug-tlds.csv', '2017-sept-tlds.csv']
# read data files
datasets= | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
CHORUS_DT_DATA_PATH = "/data/cleaned/data_chorus_dt.csv"
class ChorusDtHandler:
""""
Class for loading chorus DT data and returning
"""
def __init__(self):
self.data_path = CHORUS_DT_DATA_PATH
self.prestation_dict = {"A": "Avion", "T": "Train", "TC": "Transport en commun"}
self.data = self.load_data()
self.preprocess_data()
self.prestation_options = self.get_prestation_options()
self.year_options = self.get_year_options()
def load_data(self):
col_types = {"distance": np.float64, "CO2e/trip": np.float64}
df = | pd.read_csv(self.data_path, dtype=col_types) | pandas.read_csv |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .plotter import _Plotter
__all__ = [
"bar_plot",
"time_bar_plot",
"line_plot",
"time_line_plot"
]
def bar_plot(data, y, x=None, hue=None, norm=False,
ax=None, figsize=None, orient="v", aggfunc=np.mean,
logx=False, logy=False):
"""
Simple bar plot.
Parameters
----------
:param data: DataFrame, required.
Input data.
:param x: str, optional.
x-axis values column name. If there is None, x-axis values are index.
:param y: str, required.
y-axis values column name.
:param hue: str, optional.
Grouped column name.
:param norm: bool, optional.
If true, every bar will be normalized.
:param ax: matplotlib.axes.Axes, optional.
Axes to plot on, otherwise uses current axes.
:param figsize: tuple, optional.
Tuple (width, height) in inches.
:param orient: str, optional.
Orient of plot. Can equal "v" or "h".
:param aggfunc: function, list of functions, dict, default numpy.mean, optional.
If list of functions passed, the resulting pivot table will have hierarchical columns
whose top level are the function names (inferred from the function objects themselves).
If dict is passed, the key is column to aggregate and value is function or list of functions.
:param logx: bool, optional.
If true, x-axis will be logarithmic.
:param logy: bool, optional
If true, y-axis will be logarithmic.
Returns
----------
:return: matplotlib.axes.Axes, optional.
Axes to plot on.
"""
plotter = _Plotter(data, x, y, figsize)
if ax is None:
ax = plt.gca()
kind = "bar" if orient == "v" else "barh"
if hue is None:
plotter.plot(kind=kind, ax=ax, aggfunc=aggfunc, logx=logx, logy=logy)
else:
plotter.grouped_plot(kind=kind, stacked=True,
ax=ax, hue=hue, norm=norm, logx=logx, logy=logy)
return ax
def time_bar_plot(data, y, x=None, hue=None, period=None,
norm=False, ax=None, figsize=None, xlabelformat="%d-%m",
aggfunc=np.mean, logx=False, logy=False):
"""
Bar plot with time x-axis.
Parameters
----------
:param data: DataFrame, required.
Input data.
:param x: str, optional.
x-axis values column name. If there is None, x-axis values are index.
:param y: str, required.
y-axis values column name.
:param hue: str, optional.
Grouped column name.
:param period: str or pandas.Offset, required.
One of pandas’ offset strings or an Offset object.
See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases.
:param norm: bool, optional.
If true, every bar will be normalized.
:param ax: matplotlib.axes.Axes, optional.
Axes to plot on, otherwise uses current axes.
:param figsize: tuple, optional.
Tuple (width, height) in inches.
:param xlabelformat: str, optional.
Explicit format string.
See https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior.
:param aggfunc: function, list of functions, dict, default numpy.mean, optional.
If list of functions passed, the resulting pivot table will have hierarchical columns
whose top level are the function names (inferred from the function objects themselves).
If dict is passed, the key is column to aggregate and value is function or list of functions.
:param logx: bool, optional.
If true, x-axis will be logarithmic.
:param logy: bool, optional.
If true, y-axis will be logarithmic.
Returns
----------
:return: matplotlib.axes.Axes, optional.
Axes to plot on.
"""
data = data.copy()
if x is None:
x = "_index"
data[x] = pd.to_datetime(data.index).tz_localize(None) \
.to_period(period) \
.start_time
else:
data[x] = pd.to_datetime(data[x]).tz_localize(None) \
.to_period(period) \
.start_time
plotter = _Plotter(data, x, y, figsize)
if ax is None:
ax = plt.gca()
if hue is None:
plotter.plot(kind="bar",
ax=ax, aggfunc=aggfunc, logx=logx, logy=logy)
else:
plotter.grouped_plot(kind="bar", stacked=True,
ax=ax, hue=hue, norm=norm, logx=logx, logy=logy)
ticks, labels = plotter.calculate_pretty_ticks(plt.xticks(), plt.rcParams.get('figure.figsize')[0])
ax.set_xticks(ticks)
ax.set_xticklabels(map(lambda period: pd.Timestamp(period.get_text()).strftime(xlabelformat), labels),
rotation=0 if "y" not in xlabelformat.lower() else 45)
ax.set_xlabel("time")
return ax
def line_plot(data, y, x=None, hue=None, norm=False,
stacked=False, ax=None, figsize=None, aggfunc=np.mean,
logx=False, logy=False):
"""
Simple line plot.
Parameters
----------
:param data: DataFrame, required.
Input data.
:param x: str, optional.
x-axis values column name. If there is None, x-axis values are index.
:param y: str, required.
y-axis values column name.
:param hue: str, optional.
Grouped column name.
:param norm: bool, optional.
If true, every bar will be normalized.
:param stacked: bool, optional.
If true, create stacked plot.
:param ax: matplotlib.axes.Axes, optional.
Axes to plot on, otherwise uses current axes.
:param figsize: tuple, optional.
Tuple (width, height) in inches.
:param aggfunc: function, list of functions, dict, default numpy.mean, optional.
If list of functions passed, the resulting pivot table will have hierarchical columns
whose top level are the function names (inferred from the function objects themselves).
If dict is passed, the key is column to aggregate and value is function or list of functions.
:param logx: bool, optional.
If true, x-axis will be logarithmic.
:param logy: bool, oprional.
If true, y-axis will be logarithmic.
Returns
----------
:return: matplotlib.axes.Axes, optional.
Axes to plot on.
"""
plotter = _Plotter(data, x, y, figsize)
if ax is None:
ax = plt.gca()
if hue is None:
plotter.plot(kind="line", ax=ax, aggfunc=aggfunc, logx=logx, logy=logy)
else:
plotter.grouped_plot(kind="line", stacked=stacked,
ax=ax, hue=hue, norm=norm, logx=logx, logy=logy)
return ax
def time_line_plot(data, y, x=None, hue=None, period=None, stacked=False,
norm=False, ax=None, figsize=None, xlabelformat="%d-%m",
aggfunc=np.mean, logx=False, logy=False):
"""
Line plot with time x-axis.
Parameters
----------
:param data: DataFrame, required.
Input data.
:param x: str, optional.
x-axis values column name. If there is None, x-axis values are index.
:param y: str, required.
y-axis values column name.
:param hue: str, optional.
Grouped column name.
:param period: str or pandas.Offset, required.
One of pandas’ offset strings or an Offset object.
See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases.
:param stacked: bool, optional.
If true, create stacked plot.
:param norm: bool, optional.
If true, every bar will be normalized.
:param ax: matplotlib.axes.Axes, optional.
Axes to plot on, otherwise uses current axes.
:param figsize: tuple, optional.
Tuple (width, height) in inches.
:param xlabelformat: str, optional.
Explicit format string.
See https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior.
:param aggfunc: function, list of functions, dict, default numpy.mean, optional.
If list of functions passed, the resulting pivot table will have hierarchical columns
whose top level are the function names (inferred from the function objects themselves).
If dict is passed, the key is column to aggregate and value is function or list of functions.
:param logx: bool, optional.
If true, x-axis will be logarithmic.
:param logy: bool, optional.
If true, y-axis will be logarithmic.
Returns
----------
:return: matplotlib.axes.Axes, optional.
Axes to plot on.
"""
data = data.copy()
if x is None:
x = "_index"
data[x] = pd.to_datetime(data.index).tz_localize(None) \
.to_period(period) \
.start_time
else:
data[x] = | pd.to_datetime(data[x]) | pandas.to_datetime |
from __future__ import print_function
import sklearn
#%%
import lime
import os
import numpy as np
import pandas as pd
pd.set_option('display.max_colwidth', -1)
pd.set_option('display.max_columns', None)
import sklearn
import sklearn.ensemble
import sklearn.metrics
import seaborn as sns
from scipy.special import softmax
import matplotlib.pyplot as plt
from sklearn.utils import resample
from transformers import BertTokenizer, BertForSequenceClassification, Trainer, TrainingArguments
from transformers import RobertaTokenizer, RobertaForSequenceClassification
from transformers import AlbertTokenizer, AlbertForSequenceClassification
import torch
import torch.nn as nn
import torch
#%%
import os
os.chdir('/home/ubuntu/transformers/Dataset/Restaurant Reviews')
# %%
# Set Directory as appropiate
df_RR = pd.read_csv('Dataset/Restaurant Reviews/original_data/Restaurant_Reviews.tsv',delimiter='\t',nrows = 10000)
sns.countplot(df_RR['Liked'])
plt.show()
#%%
zero = df_RR[df_RR.Liked==0]
one = df_RR[df_RR.Liked==1]
# upsample minority
one = resample(one,
replace=True, # sample with replacement
n_samples=len(zero), # match number with majority class
random_state=42) # reproducible results
# combine majority and upsampled minority
df_RR = | pd.concat([zero,one]) | pandas.concat |
import json
import pandas as pd
import os
import fiona
import geopandas as gpd
import numpy as np
from copy import deepcopy
from pathlib import Path
from flatten_dict import flatten
from poi_conflation_tool import POIConflationTool
# load config file
with open(Path(os.path.dirname(os.path.realpath(__file__)), '../config.json')) as f:
config = json.load(f)
class DataProcessor:
"""
Perform processing on the HVP dataset by combining verified stop information, data from
operational survey and vehicle information.
"""
def __init__(self):
"""
Initialises the class object with an empty dataframe to store the combined data for each batch.
"""
self.combined_trip_data = pd.DataFrame()
self.combined_stop_data = gpd.GeoDataFrame()
self.conflation_tool = POIConflationTool()
print('Loading vehicle type, place type, land use, and activity type mapping data...')
vehicletype_mapping = pd.read_excel(os.path.join(os.path.dirname(__file__), config['vehicletype_mapping']))
self.vehicletype_mapping = dict(zip(vehicletype_mapping['OriginalVehicleType'],
vehicletype_mapping['MappedVehicleType']))
placetype_mapping = pd.read_excel(os.path.join(os.path.dirname(__file__), config['placetype_mapping']))
self.placetype_mapping = dict(zip(placetype_mapping['OriginalPlaceType'],
placetype_mapping['NewPlaceType']))
landusetype_mapping = pd.read_excel(os.path.join(os.path.dirname(__file__), config['landusetype_mapping']))
self.landusetype_mapping = dict(zip(landusetype_mapping['OriginalLandUseType'],
landusetype_mapping['MappedLandUseType']))
activitytype_mapping = pd.read_excel(os.path.join(os.path.dirname(__file__), config['activitytype_mapping']))
self.activitytype_mapping = dict(zip(activitytype_mapping['OriginalActivityType'],
activitytype_mapping['MappedActivityType']))
print('Loading SLA land use data...')
self.landuse_data = self._load_landuse_data()
def load_batch_data(self, batch_num):
"""
Loads the batch stop data from local directory.
Return:
batch_data: geopandas.GeoDataFrame
Contains the processed stop data for a particular batch.
"""
batch_data = gpd.read_file(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] +
'batch_stop_data_{}.shp'.format(batch_num)),
encoding='utf-8')
self.combined_stop_data = pd.concat([self.combined_stop_data, batch_data], ignore_index=True)
return batch_data
def load_combined_data(self):
"""
Loads the combined stop data of all batches from local directory.
Return:
combined_stop_data: geopandas.GeoDataFrame
Contains the processed stop data for all batches.
"""
self.combined_stop_data = gpd.read_file(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] +
'combined_stop_data.shp'),
encoding='utf-8')
return self.combined_stop_data
def _vehicle_type_mapping(self, vehicle_type):
"""
Performs a vehicle type mapping to merge similar vehicle types together.
Parameters:
vehicle_type: str
Contains the original vehicle type.
Return:
self.vehicle_mapping[vehicle_type]: str
Contains the mapped vehicle type. Returns "Unknown" if vehicle_type is None.
"""
if (vehicle_type is None) or (vehicle_type == 'Nil') or (vehicle_type == ''):
return "Unknown"
if vehicle_type in self.vehicletype_mapping:
return self.vehicletype_mapping[vehicle_type]
else:
return "Unknown"
def _load_verified_trips(self, batch_num):
"""
Loads the verified trips data for a particular batch and removes the irrelevant columns.
Parameters:
batch_num: int
Contains the batch number.
Return:
verified_trips: pandas.DataFrame
Contains the verified trips information for a particular batch.
"""
with open(os.path.join(os.path.dirname(__file__),
config['verified_stop_directory'].format(batch_num=batch_num))) as f:
verified_trips = json.load(f)
verified_trips = pd.json_normalize(verified_trips)
# filter important features
retained_columns = ['DriverID', 'VehicleType', 'Stops', 'Travels', 'YMD', 'Timeline', 'DayOfWeekStr']
verified_trips = verified_trips[retained_columns]
# perform mapping for vehicle type information
verified_trips['VehicleType'] = verified_trips['VehicleType'].apply(self._vehicle_type_mapping)
return verified_trips
def _load_operation_survey(self, batch_num):
"""
Loads the operation survey for a particular batch and removes the irrelevant columns.
Parameters:
batch_num: int
Contains the batch number.
Return:
operation_data: pandas.DataFrame
Contains the operation survey data for a particular batch.
"""
# load operational survey
with open(os.path.join(os.path.dirname(__file__),
config['operation_survey_directory'].format(batch_num=batch_num))) as f:
operation_data = json.load(f)
operation_data = pd.json_normalize(operation_data)
# filter important features
important_features = ['Commodity', 'SpecialCargo', 'Company.Type', 'Industry',
'Driver.ID']
retained_columns = [column
for column in operation_data.columns
for feature in important_features
if feature in column]
retained_columns.remove('Commodity.OtherStr')
operation_data = operation_data[retained_columns]
return operation_data
def _generate_trip_id(self, verified_trips, batch_num):
"""
Assigns a unique ID to each trip that contains the batch number as well.
Parameters:
verified_trips: pandas.DataFrame
Contains the trip information for a particular batch.
batch_num: int
Contains the batch number.
Return:
verified_trips: pandas.DataFrame
Contains the trip information for a particular batch with unique ID for each trip.
"""
verified_trips = verified_trips.rename_axis('TripID').reset_index()
verified_trips['TripID'] = 'B{}_'.format(batch_num) + verified_trips['TripID'].astype(str)
return verified_trips
def _process_timeline(self, timeline):
"""
Process the timeline information of a particular trip to extract the stop information.
Parameters:
timeline: list of dictionaries
Contains the stops made during a particular trip.
Return:
stops_df: pandas.DataFrame
Contains the stops made during a particular trip, concatenated and formatted as a single Dataframe.
"""
timeline_list = []
for i in range(len(timeline)):
for j in range(len(timeline.loc[i, 'Timeline'])):
stop_dict = flatten(timeline.loc[i, 'Timeline'][j], reducer='dot')
stop_dict['TripID'] = timeline.loc[i, 'TripID']
timeline_list.append(stop_dict)
# filter out stops and travel
timeline_df = pd.DataFrame(timeline_list)
stops_df = timeline_df[timeline_df['Type'] == 'Stop'].reset_index(drop=True)
# drop redundant columns
stops_df.rename(columns={'ID': 'StopID'}, inplace=True)
interested_columns = ['Attribute.PlaceType.', 'Attribute.Address', 'Attribute.StopLon', 'Attribute.StopLat',
'Attribute.Activity.', 'StartTime', 'EndTime', 'Duration', 'StopID', 'TripID']
retained_columns = [column
for column in stops_df.columns
for interested_column in interested_columns
if interested_column in column]
retained_columns.remove('Attribute.PlaceType.Applicable')
retained_columns.remove('Attribute.Activity.OtherStr')
stops_df = stops_df[retained_columns]
# remove 'Attribute_' from column name
stops_df.columns = [col_name.replace('Attribute.', '') for col_name in stops_df.columns]
return stops_df
def _activity_type_mapping(self, verified_stops):
"""
Performs an activity type mapping to merge similar activity types together.
Parameters:
verified_stops: pd.DataFrame
Contains the verified stops information with original activity types.
Return:
verified_stops: pd.DataFrame
Contains the verified stops information with the newly mapped activity types.
"""
activity_types = ['DeliverCargo', 'PickupCargo', 'Other', 'Shift', 'ProvideService',
'OtherWork', 'Meal', 'DropoffTrailer', 'PickupTrailer', 'Fueling',
'Personal', 'Passenger', 'Resting', 'Queuing', 'DropoffContainer',
'PickupContainer', 'Fail', 'Maintenance']
for activity in activity_types:
if 'MappedActivity.{}'.format(self.activitytype_mapping[activity]) not in verified_stops.columns:
verified_stops['MappedActivity.{}'.format(self.activitytype_mapping[activity])] = deepcopy(
verified_stops['Activity.{}'.format(activity)]
)
else:
verified_stops['MappedActivity.{}'.format(self.activitytype_mapping[activity])] = \
verified_stops['MappedActivity.{}'.format(self.activitytype_mapping[activity])] + \
verified_stops['Activity.{}'.format(activity)]
idx = verified_stops[verified_stops['MappedActivity.{}'.format(
self.activitytype_mapping[activity])] > 0].index.tolist()
verified_stops.loc[idx, 'MappedActivity.{}'.format(self.activitytype_mapping[activity])] = 1
return verified_stops
def _extract_verified_stops(self, verified_trips, batch_num):
"""
Extracts the verified stop information based on the verified trips.
Parameters:
verified_trips: pandas.DataFrame
Contains the verified trip information for a particular batch.
batch_num: int
Contains the batch number.
Return:
verified_stops: pandas.DataFrame
Contains the verified stop information for a particular batch.
"""
# extract stop information and frequent places
verified_trips = self._generate_trip_id(verified_trips, batch_num)
timeline = verified_trips[['Timeline', 'TripID']]
other_trip_info = verified_trips.drop(columns=['Timeline'])
timeline_info = self._process_timeline(timeline)
# merge with other trip information
verified_stops = timeline_info.merge(other_trip_info, how='left', on='TripID')
# extract stop start time
verified_stops['StartHour'] = verified_stops['StartTime'].apply(lambda x: int(x.split(' ')[1].split('-')[0]))
# perform mapping of activity types
verified_stops = self._activity_type_mapping(verified_stops)
return verified_stops
def _remove_bus_data(self, trip_data):
"""
Removes all trip and stop data collected for buses.
Parameters:
trip_data: pandas.DataFrame
Contains the trip data for a particular batch.
Return:
filtered_trip_data: pandas.DataFrame
Contains the filtered trip data for a particular batch without any bus-related trips.
"""
filtered_trip_data = trip_data[trip_data['VehicleType'] != 'Bus']
return filtered_trip_data
def _landuse_type_mapping(self, landuse_type):
"""
Performs a land use type mapping to merge similar land use types together.
Parameters:
landuse_type: str
Contains the original landuse type from URA.
Return:
self.landuse_mapping[landuse_type]: str
Contains the mapped landuse type.
"""
if (landuse_type is None) or (landuse_type == 'Nil') or (landuse_type == '') or \
(landuse_type not in self.landusetype_mapping):
raise ValueError('Land use type {} is invalid'.format(landuse_type))
else:
return self.landusetype_mapping[landuse_type]
def _load_landuse_data(self):
""""
Loads the URA 2019 land use data.
Return:
landuse_data: pd.DataFrame
Contains the land use information from URA.
"""
fiona.drvsupport.supported_drivers['KML'] = 'rw'
landuse_data = gpd.read_file(os.path.join(os.path.dirname(__file__), config['ura_landuse']),
driver='KML')
landuse_data['LandUseType'] = landuse_data['Description'].apply(lambda x:
pd.read_html(x)[0].loc[0, 'Attributes.1'])
landuse_data['MappedLandUseType'] = landuse_data['LandUseType'].apply(lambda x: self._landuse_type_mapping(x))
landuse_data.drop(columns=['Name', 'Description'], inplace=True)
return landuse_data
def _place_type_mapping(self, place_types):
"""
Performs a place type mapping to merge similar place types together.
Parameters:
place_types: str
Contains the original place types.
Return:
mapped_placetypes: list
Contains the mapped place type information. Returns "Unknown" if there are no place type information
or if it is not following Google's taxonomy.
"""
if (place_types is None) or (place_types == 'Nil') or (place_types == ''):
return ["POI.Unknown"]
mapped_placetypes = ['POI.{}'.format(self.placetype_mapping[place_type])
for place_type in place_types.split('; ')
if place_type in self.placetype_mapping]
if mapped_placetypes:
return list(set(mapped_placetypes))
else:
return ["POI.Unknown"]
def _load_poi_data(self, stop_data, batch_num):
"""
Extracts the nearby POIs and calculates the number of different place types at each stop.
Return:
poi_data: pd.DataFrame
Contains the number of each POI types around each stop.
batch_num: int
Contains the batch number
"""
# extract neighbouring POIs using conflation tool
poi_data = stop_data['StopID'].to_frame(name='StopID')
poi_data['NumPOIs'] = 0
placetype_df = | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
from unidecode import unidecode
def evaluate_banner():
print('\n**************************************************')
print('************Evalulting Reconciliations************')
print('**************************************************')
def evaluate_and_clean_merge(df, rawpath):
df = df.reset_index().drop('index', axis=1)
print('\n** Evaluating the merged dataset and' +
' sequentially cleaning it!**')
print('** We have ' + str(len(df)) +
' total rows of payments data to begin with.')
print('** We have £' + str(round(df['amount'].sum() / 1000000000, 2)) +
'bn worth of data to begin with.')
print('** We have ' + str(len(df['supplier'].unique())) +
' unique suppliers to begin with.')
df['expensetype'] = df['expensetype'].astype(str)
df['expensetype'] = df['expensetype'].str.lower()
df['expensetype'] = df['expensetype'].str.strip()
df['expensetype'] = df['expensearea'].astype(str)
df['expensetype'] = df['expensearea'].str.lower()
df['expensetype'] = df['expensearea'].str.strip()
df['supplier'] = df['supplier'].astype(str)
df['supplier'] = df['supplier'].str.strip()
initial = len(df)
df['amount'] = pd.to_numeric(df['amount'], errors='coerce')
df = df[~pd.isnull(df['amount'])]
df = df[df['amount'] >= 25000]
print('Dropped ' + str(initial - len(df)) +
' null, non-numeric and payments below £25k in value.')
initial = len(df)
df = df[~ | pd.isnull(df['supplier']) | pandas.isnull |
import numpy as np
import os
import pandas as pd
import pickle
class Predictor(object):
"""
Class for predicting.
"""
def predict(self, data=None, linear=True, model_filename="linear-accelerometer.pcl", features="simple", filtering=None, **kwargs):
"""
:param data: data on which predict
:param linear: linear model will be used or not (if True than do scaling if models are saved)
:param model_filename: filename of the model (model have to be saved in the roor in "models" folder)
:param filtering: filtering method
:param kwargs: arguments for filtering
:return: prediction on the data
"""
if data is not None:
with open(os.path.join("models", model_filename), "rb") as file:
model = pickle.load(file)
if filtering is not None:
data = self.preprocess(data, linear=linear, filtering=filtering, **kwargs)
if features == "simple":
data['acceleration'] = np.sqrt(
data['x_accelerometer'] ** 2 + data['y_accelerometer'] ** 2 + data['z_accelerometer'] ** 2)
y_pred = model.predict(data).tolist()
return y_pred
@staticmethod
def preprocess_feature(feature=None, path_to_the_scaler=None, filtering=None, **kwargs):
"""
:param feature: feature vector which have to be preprocessed
:param scaler_filename: filename of the stored scaler
:param filtering: filtering method
:param kwargs: arguments for filtering
:return: preprocessed feature
"""
if feature is not None:
feature = filtering(feature, **kwargs)
if path_to_the_scaler is not None:
with open(path_to_the_scaler, "rb") as file:
scaler = pickle.load(file)
feature = scaler.transform(feature.reshape(-1, 1))
return feature
def preprocess(self, data=None, linear=False, filtering=None, **kwargs):
"""
:param data: data for preprocessing
:param linear: linear model will be used or not (if True than do scaling if scalers are saved)
:param filtering: filtering method
:param kwargs: arguments for filtering
:return:
"""
if data is not None:
data_new = | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
from src.commons.process_dataframe import keep_valid_columns, change_col_value_type, insert_new_col_from_two_cols, \
get_sub_df_according2col_value, get_mean, get_std
from src.commons.process_number import get_deviation, get_percent_change
from src.constants.ms2_uniform_prolific_1_constants import KEEP_COLS
from src.preprocess.sub.get_data2analysis import drop_df_rows_according2_one_col
if __name__ == '__main__':
write_to_excel = False
# read data
PATH_DATA = "../../data/ms2_uniform_prolific_1_data/raw/"
dir_list = os.listdir(PATH_DATA)
df_list_all = [pd.read_csv(PATH_DATA + file) for file in dir_list]
# preprocess
df_list = list()
for df in df_list_all:
# keep useful clos
df = keep_valid_columns(df = df, kept_columns_list = KEEP_COLS)
# drop practice trials
df = df.dropna(subset = ["trials.thisN"])
# remove spaces
if df["responseN"].dtypes == "object":
df["responseN"] = df["responseN"].str.strip()
# remove non numeric responses
df["is_num"] = df["responseN"].str.isnumeric()
drop_index = df[df["is_num"] == False].index
df.drop(drop_index, inplace = True)
# change responseN to float
change_col_value_type(df, "responseN", float)
df_list.append(df)
# drop participants more than 5% of invalid trials
# remove pp 12 data: only 311 valid trials out of 330 trials
df_list.pop(2)
# add deviation score col
for df in df_list:
insert_new_col_from_two_cols(df, "responseN", "numerosity", "deviation_score", get_deviation)
insert_new_col_from_two_cols(df, "deviation_score", "numerosity", "percent_change", get_percent_change)
# check subitizing results
# take subitizing trials out
subitizing_df_list = list()
for df in df_list:
sub_df = df.loc[df["numerosity"] <= 4]
subitizing_df_list.append(sub_df)
# 30 subitizing trials (only keep participant has 28, 29 and 30 correct)
correct_trial_list = list()
for sub_df in subitizing_df_list:
correct_trial_list.append((sub_df["deviation_score"] == 0).sum())
# removed index
index_list = list()
for i, n_correct in enumerate(correct_trial_list):
if n_correct < 28:
index_list.append(i)
# removed participant performance not more than 90%
df_list = [df for i, df in enumerate(df_list) if i not in index_list]
# remove subitizing trials
df_list_t1 = list()
for df in df_list:
df_list_t1.append(df.loc[df["numerosity"] > 4])
# drop obvious wrong response:
min_res = 10
max_res = 128
df_list_prepro = list()
for df in df_list_t1:
df_list_prepro.append(drop_df_rows_according2_one_col(df, "responseN", min_res, max_res))
# concat all participant
df_data = | pd.concat(df_list_prepro) | pandas.concat |
import ast
import os
import glob
from io import BytesIO
import base64
import json
from IPython import display as ipd
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from scipy.misc import imresize
import pretty_midi
import librosa
import PIL.Image
import soundfile as sf
from flask import render_template, flash, url_for, request, redirect, session, Markup, current_app
from . import refiner
from .. import common
def get_paths():
return {
'data_AUDIO': os.path.join(current_app.config['dir_mtd'], '03_MTD-medium', 'data_AUDIO'),
'data_ALIGNMENT': os.path.join(current_app.config['dir_mtd'], '03_MTD-medium', 'data_ALIGNMENT'),
'data_EDM-corr_MID': os.path.join(current_app.config['dir_mtd'], '03_MTD-medium', 'data_EDM-corr_MID'),
'data_ALIGNMENT-annotated': os.path.join(common.STATIC_DIR, 'data_ALIGNMENT-annotated'),
'data_AUDIO_IIRT-annotated': os.path.join(common.STATIC_DIR, 'data_AUDIO_IIRT-annotated'),
'data_AUDIO_IIRT': os.path.join(common.STATIC_DIR, 'data_AUDIO_IIRT'),
'data_AUDIO-annotated': os.path.join(common.STATIC_DIR, 'data_AUDIO-annotated'),
}
def get_mtd_row(mtd_id):
df_medium = pd.read_csv(os.path.join(current_app.config['dir_mtd'], '03_MTD-medium.csv'), sep=';')
row = df_medium[df_medium['MTDID'] == mtd_id]
assert len(row) == 1
return row
def get_wcm_for_mtd(mtd_id):
row = get_mtd_row(mtd_id)
directory = current_app.config['data_AUDIO-WCM']
if not os.path.exists(directory):
flash(f'The WCM directory ({directory}) does not exist.')
return None
wcm_id = str(row['WCMID'].iloc[0])
file = glob.glob(os.path.join(directory, '*WCM' + wcm_id + '.wav'))
assert len(file) == 1, (file, os.path.join(directory, '*WCM' + wcm_id + '.wav'))
return file[0]
def get_mtd_str(s):
if '-' in s:
part1, part2 = s.split('-')
return 'MTD%04d-%s' % (int(part1), part2)
else:
return 'MTD%04d' % int(s)
def check_file_glob(globber, mtd_id_str, entry_type):
fn = glob.glob(globber)
if len(fn) != 1:
flash('There are %d %s-entries for the MTDID "%s".' % (len(fn), entry_type, mtd_id_str))
return None
else:
return fn[0]
def adjust_midi(midi_data, wp):
f = interp1d(wp[:, 0], wp[:, 1], fill_value='extrapolate')
for cur_instrument in midi_data.instruments:
for cur_note in cur_instrument.notes:
cur_note.start = max(0, f(cur_note.start))
cur_note.end = max(0, f(cur_note.end))
return midi_data
def time_str_to_sec(t):
mm, ss = t.split(':')
return float(mm) * 60 + float(ss)
def get_start_end_time_duration(mtd_id):
mtd_row = get_mtd_row(mtd_id)
path_dict = get_paths()
fn_wcm_wav = get_wcm_for_mtd(mtd_id)
with sf.SoundFile(fn_wcm_wav, 'r') as stream:
duration = len(stream) / stream.samplerate
mtd_id_str = get_mtd_str(mtd_id)
fn_json = os.path.join(path_dict['data_AUDIO_IIRT-annotated'], mtd_id_str + '.json')
if os.path.exists(fn_json):
with open(fn_json, 'r') as stream:
data = json.load(stream)
return data['start'], data['end'], duration
else:
start = round(time_str_to_sec(mtd_row.iloc[0]['StartTime']), 1)
end = round(time_str_to_sec(mtd_row.iloc[0]['EndTime']), 1)
return start, end, duration
def write_sync_csv(wp, fn_out, fn_mid, fn_mtd_wav):
midi_data = pretty_midi.PrettyMIDI(fn_mid)
mid_symbol_end = max(note.end for inst in midi_data.instruments for note in inst.notes)
with sf.SoundFile(fn_mtd_wav, 'r') as stream:
duration_wav = len(stream) / stream.samplerate
duration_wav = duration_wav + 0.05 # half frame for safety
wp = np.concatenate((wp, [[mid_symbol_end, duration_wav]]))
df = pd.DataFrame(wp, columns=['"MID"', '"WAV"'])
df.to_csv(fn_out, sep=';', index=False, float_format='%.5f', quoting=3)
@refiner.route('/<mtd_id>.html')
def index(mtd_id):
mtd_id_str = get_mtd_str(mtd_id)
df_medium_row = get_mtd_row(mtd_id)
wcm_id = df_medium_row['WCMID'].values[0]
path_dict = get_paths()
# files from original mtd data set
fn_wav = check_file_glob(os.path.join(path_dict['data_AUDIO'], f'{mtd_id_str}_*.wav'), mtd_id_str, 'wav')
fn_csv = check_file_glob(os.path.join(path_dict['data_ALIGNMENT'], f'{mtd_id_str}_*.csv'), mtd_id_str, 'sync-csv')
fn_mid = check_file_glob(os.path.join(path_dict['data_EDM-corr_MID'], f'{mtd_id_str}_*.mid'), mtd_id_str, 'mid')
# files computed for this interface
fn_npz = check_file_glob(os.path.join(path_dict['data_AUDIO_IIRT'], f'{mtd_id_str}_*.npz'), mtd_id_str, 'npz')
# files generated by the interface
fn_csv_new = os.path.join(path_dict['data_ALIGNMENT-annotated'], os.path.basename(fn_csv))
fn_npz_new = os.path.join(path_dict['data_AUDIO_IIRT-annotated'], os.path.basename(fn_npz))
fn_wav_new = os.path.join(path_dict['data_AUDIO-annotated'], os.path.basename(fn_wav))
cur_transpose = df_medium_row.iloc[0]['MidiTransposition']
remove_new_annotations = False
if os.path.exists(fn_csv_new):
remove_new_annotations = True
flash('You are using an already modified warping path.')
fn_csv = fn_csv_new
if os.path.exists(fn_npz_new) and os.path.exists(fn_wav_new):
remove_new_annotations = True
flash('You are using an audio excerpt with changed durations.')
fn_npz = fn_npz_new
fn_wav = fn_wav_new
if remove_new_annotations:
flash(Markup('<a href="%s">Remove annotations!</a>' % url_for('refiner.remove_annotations', mtd_id=mtd_id)))
if get_wcm_for_mtd(mtd_id):
wcm_start_time, wcm_end_time, wcm_duration = get_start_end_time_duration(mtd_id)
change_duration_url = url_for('refiner.change_duration', mtd_id=mtd_id)
else:
wcm_start_time, wcm_end_time, wcm_duration = 0.0, 0.0, 0.0
change_duration_url = '#'
if 'wp' in session and session['wp_mtd_id'] == mtd_id:
wp = np.array(session['wp'])
elif fn_csv:
df_sync = pd.read_csv(fn_csv, sep=';')
wp = df_sync.values
else:
wp = np.array([])
session.pop('wp', None)
session.pop('wp_mtd_id', None)
if fn_npz and fn_mid:
midi_data = pretty_midi.PrettyMIDI(fn_mid)
x_mid = common.midi_data_to_chroma(midi_data, common.FEATURE_RATE)
x_mid = np.roll(x_mid, cur_transpose, axis=0)
x_wav = np.load(fn_npz)['f_chroma']
x_mid = librosa.util.normalize(x_mid, norm=2, fill=True, axis=0)
x_wav = librosa.util.normalize(x_wav, norm=2, fill=True, axis=0)
x_mid_img = np.uint8((1 - x_mid) * ((2 ** 8) - 1))
x_wav_img = np.uint8((1 - x_wav) * ((2 ** 8) - 1))
x_mid_img = np.flipud(x_mid_img)
x_wav_img = np.flipud(x_wav_img)
scale_fac = np.ceil(1170.0 / min(x_mid.shape[1], x_wav.shape[0]))
x_mid_img = imresize(x_mid_img, scale_fac, 'nearest')
x_wav_img = imresize(x_wav_img, scale_fac, 'nearest')
buffered = BytesIO()
PIL.Image.fromarray(x_mid_img).save(buffered, 'png')
img_src_mid = 'data:image/png;base64,' + base64.b64encode(buffered.getvalue()).decode("utf-8")
buffered = BytesIO()
PIL.Image.fromarray(x_wav_img).save(buffered, 'png')
img_src_wav = 'data:image/png;base64,' + base64.b64encode(buffered.getvalue()).decode("utf-8")
else:
img_src_mid = ''
img_src_wav = ''
scale_fac = 1.0
if fn_wav:
x_wav, sr = librosa.load(fn_wav, sr=22050, mono=True)
audio_wav = ipd.Audio(data=x_wav, rate=sr).src_attr()
else:
audio_wav = ''
if fn_mid and fn_csv:
midi_data_align = pretty_midi.PrettyMIDI(fn_mid)
for instrument in midi_data_align.instruments:
for note in instrument.notes:
note.pitch += cur_transpose
wp_strict = common.make_warping_path_strictly_monotonic(wp)
midi_data_align = adjust_midi(midi_data_align, wp_strict)
x_audio, sr = librosa.load(fn_wav, sr=22050, mono=True)
x_midi_align = midi_data_align.synthesize(fs=22050)
audio_mid_align = ipd.Audio(data=x_midi_align, rate=22050).src_attr()
if x_midi_align.shape[0] > x_audio.shape[0]:
x_midi_align = x_midi_align[:x_audio.shape[0]:]
elif x_audio.shape[0] > x_midi_align.shape[0]:
x_midi_align = np.concatenate((x_midi_align, np.zeros(x_audio.shape[0] - x_midi_align.shape[0])))
# normalize
x_audio = x_audio / np.max(x_audio)
x_midi_align = x_midi_align / np.max(x_midi_align)
x = np.stack([x_audio, x_midi_align], axis=0)
audio_both = ipd.Audio(data=x, rate=22050).src_attr()
midi_data = pretty_midi.PrettyMIDI(fn_mid)
for instrument in midi_data.instruments:
for note in instrument.notes:
note.pitch += cur_transpose
x_midi = midi_data.synthesize(fs=22050)
audio_mid = ipd.Audio(data=x_midi, rate=22050).src_attr()
f = interp1d(wp_strict[:, 0], wp_strict[:, 1], fill_value='extrapolate')
wp_display = []
for cur_instrument in midi_data.instruments:
for cur_note in cur_instrument.notes:
wp_display.append([cur_note.start, f(cur_note.start)])
wp_display = np.array(wp_display) * common.FEATURE_RATE
wp_display = wp_display.tolist()
else:
audio_wav = ''
audio_mid = ''
audio_both = ''
audio_mid = ''
wp_display = []
return render_template('refiner.html',
mtd_id=str(mtd_id),
audio_wav=audio_wav,
audio_mid=audio_mid,
audio_mid_align=audio_mid_align,
audio_both=audio_both,
img_src_mid=img_src_mid,
img_src_wav=img_src_wav,
scale_fac=scale_fac,
wp=wp_display,
feature_rate=common.FEATURE_RATE,
alignment_url=url_for('refiner.process_alignment', mtd_id=mtd_id),
save_url=url_for('refiner.save_alignment', mtd_id=mtd_id),
duration_url=change_duration_url,
linear_url=url_for('refiner.linearize_wp', mtd_id=mtd_id),
wcm_start_time=wcm_start_time, wcm_end_time=wcm_end_time, wcm_duration=wcm_duration,
wcm_id=wcm_id)
@refiner.route('/<mtd_id>/alignment', methods=['POST'])
def process_alignment(mtd_id):
wp = ast.literal_eval(request.form['alignment'])
wp = np.array(wp).astype(float) / common.FEATURE_RATE
sort_idx = wp[:, 0].argsort()
wp = wp[sort_idx, :]
session['wp'] = wp.tolist()
session['wp_mtd_id'] = mtd_id
return redirect(url_for('refiner.index', mtd_id=mtd_id))
@refiner.route('/<mtd_id>/save', methods=['POST'])
def save_alignment(mtd_id):
mtd_id_str = get_mtd_str(mtd_id)
path_dict = get_paths()
fn_wav = check_file_glob(os.path.join(path_dict['data_AUDIO'], f'{mtd_id_str}_*.wav'), mtd_id_str, 'wav')
fn_mid = check_file_glob(os.path.join(path_dict['data_EDM-corr_MID'], f'{mtd_id_str}_*.mid'), mtd_id_str, 'mid')
fn_wav_new = os.path.join(path_dict['data_AUDIO-annotated'], os.path.basename(fn_wav))
old_csv_dir = path_dict['data_ALIGNMENT']
new_csv_dir = path_dict['data_ALIGNMENT-annotated']
fn_csv = check_file_glob(os.path.join(old_csv_dir, f'{mtd_id_str}_*.csv'), mtd_id_str, 'sync-csv')
fn_out = os.path.join(new_csv_dir, os.path.basename(fn_csv))
wp = ast.literal_eval(request.form['alignment'])
wp = np.array(wp).astype(float) / common.FEATURE_RATE
sort_idx = wp[:, 0].argsort()
wp = wp[sort_idx, :]
write_sync_csv(wp, fn_out, fn_mid, fn_wav_new if os.path.exists(fn_wav_new) else fn_wav)
return redirect(url_for('refiner.index', mtd_id=mtd_id))
@refiner.route('/<mtd_id>/startend', methods=['POST'])
def change_duration(mtd_id):
mtd_id_str = get_mtd_str(mtd_id)
path_dict = get_paths()
start_time_old, _, _ = get_start_end_time_duration(mtd_id)
fn_mid = check_file_glob(os.path.join(path_dict['data_EDM-corr_MID'], f'{mtd_id_str}_*.mid'), mtd_id_str, 'mid')
fn_npz_old = check_file_glob(os.path.join(path_dict['data_AUDIO_IIRT'], f'{mtd_id_str}_*.npz'), mtd_id_str, 'npz')
fn_wav_old = check_file_glob(os.path.join(path_dict['data_AUDIO'], f'{mtd_id_str}_*.wav'), mtd_id_str, 'wav')
fn_csv_old = check_file_glob(os.path.join(path_dict['data_ALIGNMENT'], f'{mtd_id_str}_*.csv'), mtd_id_str, 'sync-csv')
fn_npz_new = os.path.join(path_dict['data_AUDIO_IIRT-annotated'], os.path.basename(fn_npz_old))
fn_wav_new = os.path.join(path_dict['data_AUDIO-annotated'], os.path.basename(fn_wav_old))
fn_json = os.path.join(path_dict['data_AUDIO_IIRT-annotated'], mtd_id_str + '.json')
fn_csv_new = os.path.join(path_dict['data_ALIGNMENT-annotated'], os.path.basename(fn_csv_old))
start_time = round(float(request.form['StartTime']), 1)
end_time = round(float(request.form['EndTime']), 1)
if not (start_time < end_time):
flash('Start time (%.1f) must be smaller than end time (%.1f)!' % (start_time, end_time))
return redirect(url_for('refiner.index', mtd_id=mtd_id))
if not os.path.exists(current_app.config['data_AUDIO-WCM']):
flash('data_AUDIO-WCM is not reachable: %s' % current_app.config['data_AUDIO-WCM'])
return redirect(url_for('refiner.index', mtd_id=mtd_id))
fn_wav = get_wcm_for_mtd(mtd_id)
if not os.path.exists(fn_wav):
flash('Could not find wav file: %s' % fn_wav)
return redirect(url_for('refiner.index', mtd_id=mtd_id))
x, sr = librosa.load(fn_wav, sr=22050, mono=True)
x = x[int(round(start_time * sr)):int(round(end_time * sr))]
with open(fn_json, 'w') as stream:
json.dump({'start': start_time, 'end': end_time, 'mtd_id': mtd_id}, stream, indent=4)
f_chroma, times = common.extract_iir_chroma(x, sr)
np.savez_compressed(fn_npz_new, f_chroma=f_chroma, f_chroma_ax_time=times, f_chroma_ax_freq=np.arange(12))
sf.write(fn_wav_new, x, sr)
# need to adjust wapring path if start has changed!
if 'wp' in session and session['wp_mtd_id'] == mtd_id:
wp = np.array(session['wp'])
session.pop('wp', None)
session.pop('wp_mtd_id', None)
else:
if os.path.exists(fn_csv_new):
fn_csv = fn_csv_new
else:
fn_csv = fn_csv_old
wp = | pd.read_csv(fn_csv, sep=';') | pandas.read_csv |
from python_speech_features import mfcc
import scipy.io.wavfile as wav
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import os
import random
from tqdm import tqdm
from audiomentations import Compose, AddGaussianNoise, TimeStretch, PitchShift, Shift
augmenter = Compose([
AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.015, p=0.5),
TimeStretch(min_rate=0.8, max_rate=1.25, p=0.5),
PitchShift(min_semitones=-4, max_semitones=4, p=0.5),
Shift(min_fraction=-0.5, max_fraction=0.5, p=0.5),
])
def load_noise(path='/home/CAIL/Speaker_R/data/voice/background_noise/'):
noise = []
files = os.listdir(path)
for f in files:
filename = f
if ('wav' not in filename):
continue
f = os.path.join(path, f)
(rate, sig) = wav.read(f)
noise.append(sig)
return noise
def generate_mfcc(sig, rate, sig_len, noise=None, noise_weight=0.1, winlen=0.03125, winstep=0.03125/2, numcep=13, nfilt=26, nfft=512, lowfreq=20, highfreq=4000, winfunc=np.hanning, ceplifter=0, preemph=0.97):
if(len(sig) != sig_len):
if(len(sig)< sig_len):
sig = np.pad(sig, (0, sig_len - len(sig)), 'constant')
if(len(sig) >sig_len):
sig = sig[0:sig_len]
# i dont know, 'tensorflow' normalization
sig = sig.astype('float') / 32768
if(noise is not None):
noise = noise[random.randint(0, len(noise)-1)] # pick a noise
start = random.randint(0, len(noise)-sig_len) # pick a sequence
noise = noise[start:start+sig_len]
noise = noise.astype('float')/32768
sig = sig * (1-noise_weight) + noise * noise_weight
#wav.write('noise_test.wav', rate, sig)
mfcc_feat = mfcc(sig, rate, winlen=winlen, winstep=winstep, numcep=numcep, nfilt=nfilt, nfft=nfft, lowfreq=lowfreq,
highfreq=highfreq, winfunc=winfunc, ceplifter=ceplifter, preemph=preemph)
mfcc_feat = mfcc_feat.astype('float32')
return mfcc_feat
def merge_mfcc_file(input_path='dat/', mix_noise=True, sig_len=16000, winlen=0.03125, winstep=0.03125/2, numcep=13, nfilt=26, nfft=512,
lowfreq=20, highfreq=4000, winfunc=np.hanning, ceplifter=0, preemph=0.97, enroll = False, augmentation =True, augmentation_num = 20):
train_data = []
train_lablel = []
if mix_noise:
noise = load_noise()
else:
noise = None
files = os.listdir(input_path)
for fi in tqdm(files):
fi_d = os.path.join(input_path, fi)
# # folders of each cmd
if os.path.isdir(fi_d):
label = fi_d.split('/')[-1] # get the label from the dir
# dataset
for f in os.listdir(fi_d):
f = os.path.join(fi_d, f)
(rate, sig) = wav.read(f)
if augmentation:
sig = sig.astype(np.float32)
augmentation_data = [augmenter(samples=sig, sample_rate=rate) for _ in range(augmentation_num)] + [sig]
else:
augmentation_data = [sig]
for sig in augmentation_data:
if enroll:
data = generate_mfcc(sig, rate, sig_len, noise=noise, winlen=winlen,
winstep=winstep, numcep=numcep, nfilt=nfilt, nfft=nfft, lowfreq=lowfreq,
highfreq=highfreq, winfunc=winfunc, ceplifter=ceplifter, preemph=preemph)
data = np.array(data)
train_data.append(data)
train_lablel.append(label)
else:
for i in range(len(sig)//sig_len):
data = generate_mfcc(sig[i * sig_len:(i + 1) * sig_len], rate, sig_len, noise=noise,
winlen=winlen, winstep=winstep, numcep=numcep, nfilt=nfilt,
nfft=nfft, lowfreq=lowfreq,
highfreq=highfreq, winfunc=winfunc, ceplifter=ceplifter,
preemph=preemph)
data = np.array(data)
train_data.append(data)
train_lablel.append(label)
# finalize
train_data = np.array(train_data)
return train_data, train_lablel
if __name__ == "__main__":
train_path = "/home/CAIL/Speaker_R/data/voice/train_voice"
test_path = "/home/CAIL/Speaker_R/data/voice/test_voice"
enroll_path = "/home/CAIL/Speaker_R/data/voice/enroll_voice"
x_train, y_train = merge_mfcc_file(input_path=train_path, sig_len=64000, enroll=False, mix_noise=False, augmentation= True, augmentation_num= 3)
x_test, y_test = merge_mfcc_file(input_path=test_path, sig_len=64000, enroll=False, mix_noise=False, augmentation= False)
x_enroll, y_enroll = merge_mfcc_file(input_path=enroll_path, sig_len=64000, enroll=True, mix_noise=False, augmentation= False)
np.save('train_data.npy', x_train)
np.save('train_label.npy', y_train)
np.save('test_data.npy', x_test)
np.save('test_label.npy', y_test)
np.save('enroll_data.npy', x_enroll)
np.save('enroll_label.npy', y_enroll)
print('x_train shape:', x_train.shape, 'max', x_train.max(), 'min', x_train.min())
print('x_test shape:', x_test.shape, 'max', x_test.max(), 'min', x_test.min())
print(x_enroll.shape, np.shape(y_enroll))
import pandas as pd
train_pd = pd.DataFrame({'train_speaker':y_train})
print(train_pd.apply(pd.value_counts))
test_pd = | pd.DataFrame({'train_speaker': y_test}) | pandas.DataFrame |
from pathlib import Path
import numpy as np
import pandas as pd
from sykepic.compute.prediction import prediction_dataframe, threshold_dictionary
def parse_evaluations(
evaluations,
pred_dir,
thresholds=None,
threshold_search=False,
search_precision=0.01,
empty="unclassifiable",
unsure="unsure",
):
"""Parses evaluation files into various classification measurements.
Parameters
----------
evaluations : str, Path, list of str
Path to evaluation files / directory.
pred_dir : str, Path
Path to prediction-csv directory.
thresholds : float, str, Path
Single value or file with classification thresholds for each class.
threshold_search : bool
Evaluate classifications based on various threshold values.
search_precision : float
Increment threshold search values by this amount.
empty : str
Name used for unclassifiable images in evaluation files.
Returns
-------
pandas.DataFrame
Has multi-index composed of class name and threhold value
when `threshold_search` is set to True. Has combined scores
('all' row) when `threshold_search` is False.
"""
eval_df, samples = read_evaluations(evaluations)
predictions = []
for sample in samples:
try:
predictions.append(next(Path(pred_dir).rglob(f"{sample}.csv")))
except StopIteration:
print(f"[ERROR] Cannot find prediction files for {sample}")
raise
if threshold_search:
# Set initial threshold value
thresholds = 0.0
elif not thresholds:
raise ValueError("Thresholds not provided")
if isinstance(thresholds, (str, Path)):
thresholds = threshold_dictionary(thresholds)
pred_df = prediction_dataframe(predictions, thresholds)
search_range = np.arange(0, 1 + search_precision, search_precision)
result_df = results_as_df(
eval_df, pred_df, thresholds, threshold_search, search_range, empty, unsure
)
if threshold_search:
# No specificity without 'all' class
result_df.drop("specificity", axis=1, inplace=True)
return result_df
def read_evaluations(evaluations):
if isinstance(evaluations, (str, Path)):
evaluations = Path(evaluations)
if evaluations.is_dir():
evaluations = list(evaluations.glob("*.select.csv"))
if evaluations:
print("[INFO] Evaluations are from these files:")
print("\t" + "\n\t".join([str(f) for f in evaluations]))
else:
raise FileNotFoundError("[ERROR] No evaluation files found")
else:
evaluations = [evaluations]
df_list = []
samples = []
for file in evaluations:
sample = Path(file).with_suffix("").with_suffix("").name
samples.append(sample)
df = | pd.read_csv(file, header=None, names=["roi", "actual"]) | pandas.read_csv |
import numpy as np
import pandas as pd
from collections import Counter
from sklearn.utils import resample
from tqdm.notebook import tqdm_notebook
import copy
from sklearn.base import is_classifier
class DSClassifier:
"""This classifier is designed to handle unbalanced data.
The classification is based on an ensemble of sub-sets.
Input:
base_estimator A base model that the classifier will use to make a prediction on each subset.
ratio The ratio of the minority group to the rest of the data.
The default is 1.
The ratio describes a ratio of 1: ratio.
For example:
Ratio = 1 Creates a 1: 1 ratio (50% / 50%) between the minority group and the rest of the data.
Ratio = 2 Creates a 2: 1 ratio (33% / 66%) between the minority group and the rest of the data.
ensemble The method by which the models of each subset will be combined together.
The default is mean.
For numeric labels you can select max or min to tilt the classifier to a certain side.
random_state Seed for the distribution of the majority population in each subset.
The default is 42.
Attributes:
fit(X_train, y_train)
predict(X)
predict_proba(X)
list_of_df List of all created sub-sets.
list_models List of all the models that make up the final model.
"""
def __init__(self, base_estimator, ratio = 1, ensemble = 'mean', random_state = 42):
def get_ensemble(ensemble):
if ensemble == 'mean':
return np.mean
if ensemble == 'max':
return np.max
if ensemble == 'min':
return np.min
else:
raise ValueError("ensemble must be one of these options: 'mean', 'max', 'min' not " + ensemble)
if is_classifier(base_estimator):
self.base_estimator = base_estimator
else:
raise ValueError("base_estimator must be a classifier not " + base_estimator)
self._estimator_type = 'classifier'
self._ratio = ratio
self.ensemble = get_ensemble(ensemble)
self._random_state = random_state
self.classes_ = None
self._target = None
self.list_of_df = None
self.list_models = None
def __repr__(self):
return self._estimator_type
def fit(self, X_train, y_train):
def balance(X_train, y_train, ratio, random_state):
model_input_data = pd.concat([X_train, y_train], axis=1)
counter = Counter(y_train).most_common()
minority = counter[-1][0]
majority = counter[0][0]
row_by_class = {majority: model_input_data[model_input_data[self.target] != minority], \
minority: model_input_data[model_input_data[self.target] == minority],}
num_of_samples_minority = int(row_by_class[minority].shape[0])
num_of_samples_majority = int(num_of_samples_minority)*ratio
list_of_df = []
while len(row_by_class[majority])>num_of_samples_majority:
majority_sample = resample(row_by_class[majority],
replace = True,
n_samples = num_of_samples_majority, random_state=random_state)
row_by_class[majority] = row_by_class[majority].drop(majority_sample.index.values.tolist())
subsets = | pd.concat([row_by_class[minority], majority_sample]) | pandas.concat |
# -*- coding: utf-8 -*-
"""data-analysis.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1RKjHEUT1uIYiaDQt2YffetZ0gaRCwlk1
"""
from nltk.tokenize import word_tokenize
from nltk.tokenize import sent_tokenize
import nltk
import os
import glob
import plotly.graph_objects as go
# import cufflinks as cf
from textblob import TextBlob
import re
import numpy as np
import pandas as pd
def get_subreddit_data(subreddit: str):
base_path = '../datasets'
train_file_template = os.path.join(
base_path, '{}_100000_train_part*.npy'.format(subreddit))
val_file_template = os.path.join(
base_path, '{}_100000_val_part*.npy'.format(subreddit))
train_files = glob.glob(train_file_template)
val_files = glob.glob(val_file_template)
train_comments = []
for file_name in train_files:
train_comments += np.load(file_name)
val_comments = []
for file_name in val_files:
val_comments += np.load(file_name)
return train_comments, val_comments
# cf.go_offline()
# cf.set_config_file(offline=False, world_readable=True)
# def enable_plotly_in_cell():
# import IPython
# from plotly.offline import init_notebook_mode
# display(IPython.core.display.HTML(
# '''<script src="/static/components/requirejs/require.js"></script>'''))
# init_notebook_mode(connected=False)
# Ankur
# PATH = "/content/drive/My Drive/Google Colab/wstm-project/"
# data = np.load(PATH + "5k/soccer.npy")
# Tarun
#PATH = "/content/drive/My Drive/CS_6240_Scrapping/data_5000/"
#data = np.load(PATH + "soccer_5000.npy")
subreddit = 'soccer'
train_comments, val_comments = get_subreddit_data(subreddit)
df = | pd.DataFrame(train_comments, columns=['comments']) | pandas.DataFrame |
# pylint: disable=W0102
import nose
import numpy as np
from pandas import Index, MultiIndex, DataFrame, Series
from pandas.compat import OrderedDict, lrange
from pandas.sparse.array import SparseArray
from pandas.core.internals import *
import pandas.core.internals as internals
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, randn)
from pandas.compat import zip, u
def assert_block_equal(left, right):
assert_almost_equal(left.values, right.values)
assert(left.dtype == right.dtype)
assert_almost_equal(left.mgr_locs, right.mgr_locs)
def get_numeric_mat(shape):
arr = np.arange(shape[0])
return np.lib.stride_tricks.as_strided(
x=arr, shape=shape,
strides=(arr.itemsize,) + (0,) * (len(shape) - 1)).copy()
N = 10
def create_block(typestr, placement, item_shape=None, num_offset=0):
"""
Supported typestr:
* float, f8, f4, f2
* int, i8, i4, i2, i1
* uint, u8, u4, u2, u1
* complex, c16, c8
* bool
* object, string, O
* datetime, dt
* sparse (SparseArray with fill_value=0.0)
* sparse_na (SparseArray with fill_value=np.nan)
"""
placement = BlockPlacement(placement)
num_items = len(placement)
if item_shape is None:
item_shape = (N,)
shape = (num_items,) + item_shape
mat = get_numeric_mat(shape)
if typestr in ('float', 'f8', 'f4', 'f2',
'int', 'i8', 'i4', 'i2', 'i1',
'uint', 'u8', 'u4', 'u2', 'u1'):
values = mat.astype(typestr) + num_offset
elif typestr in ('complex', 'c16', 'c8'):
values = 1.j * (mat.astype(typestr) + num_offset)
elif typestr in ('object', 'string', 'O'):
values = np.reshape(['A%d' % i for i in mat.ravel() + num_offset],
shape)
elif typestr in ('bool'):
values = np.ones(shape, dtype=np.bool_)
elif typestr in ('datetime', 'dt'):
values = (mat * 1e9).astype('M8[ns]')
elif typestr in ('sparse', 'sparse_na'):
# FIXME: doesn't support num_rows != 10
assert shape[-1] == 10
assert all(s == 1 for s in shape[:-1])
if typestr.endswith('_na'):
fill_value = np.nan
else:
fill_value = 0.0
values = SparseArray([fill_value, fill_value, 1, 2, 3, fill_value,
4, 5, fill_value, 6], fill_value=fill_value)
arr = values.sp_values.view()
arr += (num_offset - 1)
else:
raise ValueError('Unsupported typestr: "%s"' % typestr)
return make_block(values, placement=placement, ndim=len(shape))
def create_single_mgr(typestr, num_rows=None):
if num_rows is None:
num_rows = N
return SingleBlockManager(
create_block(typestr, placement=slice(0, num_rows), item_shape=()),
np.arange(num_rows))
def create_mgr(descr, item_shape=None):
"""
Construct BlockManager from string description.
String description syntax looks similar to np.matrix initializer. It looks
like this::
a,b,c: f8; d,e,f: i8
Rules are rather simple:
* see list of supported datatypes in `create_block` method
* components are semicolon-separated
* each component is `NAME,NAME,NAME: DTYPE_ID`
* whitespace around colons & semicolons are removed
* components with same DTYPE_ID are combined into single block
* to force multiple blocks with same dtype, use '-SUFFIX'::
'a:f8-1; b:f8-2; c:f8-foobar'
"""
if item_shape is None:
item_shape = (N,)
offset = 0
mgr_items = []
block_placements = OrderedDict()
for d in descr.split(';'):
d = d.strip()
names, blockstr = d.partition(':')[::2]
blockstr = blockstr.strip()
names = names.strip().split(',')
mgr_items.extend(names)
placement = list(np.arange(len(names)) + offset)
try:
block_placements[blockstr].extend(placement)
except KeyError:
block_placements[blockstr] = placement
offset += len(names)
mgr_items = | Index(mgr_items) | pandas.Index |
import pandas as pd
from typing import List
def check_missing_value(df: pd.DataFrame, cols: List[str]) -> pd.DataFrame:
"""
Count missing values in specified columns.
@param df: dataframe
@param cols: columns to be calculated
return: summary information
"""
res = | pd.DataFrame(cols, columns=['Feature']) | pandas.DataFrame |
import streamlit as st
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from plotly import express
def accept_user_data():
duration = st.text_input("Enter the duration: ")
start_station = st.text_input("Enter the start area: ")
end_station = st.text_input("Enter the end station: ")
return np.array([duration, start_station, end_station]).reshape(1,-1)
# =================== ML Models Below =================
@st.cache(suppress_st_warning=True)
def decisionTree(X_train, X_test, Y_train, Y_test):
# training the model
tree = DecisionTreeClassifier(max_leaf_nodes=3, random_state=0)
tree.fit(X_train, Y_train)
Y_pred = tree.predict(X_test)
score = accuracy_score(Y_test, Y_pred) * 100
report = classification_report(Y_test, Y_pred)
return score, report, tree
@st.cache(suppress_st_warning=True)
def neuralNet(X_train, X_test, Y_train, Y_test):
# scaling data
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# start classifier
clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5,2), random_state=1)
clf.fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
score = accuracy_score(Y_test, Y_pred) * 100
report = classification_report(Y_test, Y_pred)
return score, report, clf
@st.cache
def Knn_Classifier(X_train, X_test, Y_train, Y_test):
clf = KNeighborsClassifier(n_neighbors=5)
clf.fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
score = accuracy_score(Y_test, Y_pred) * 100
report = classification_report(Y_test, Y_pred)
return score, report, clf
# =================== ML Models End =================
@st.cache
def showMap():
plotData = | pd.read_csv('dataset-2010-latlong.csv') | pandas.read_csv |
"""Deconvolution plotter for plotting figures from deconvolution"""
import matplotlib
import matplotlib.pyplot
import numpy as np
import torch
import pyro
import math
from matplotlib.pyplot import cm
import pandas as pd
import seaborn as sns
from typing import Optional, Tuple, Dict
from ternadecov.time_deconv import TimeRegularizedDeconvolutionModel
from ternadecov.plotting_functions import (
generate_posterior_samples,
get_iqr_from_posterior_samples,
summarize_posterior_samples,
)
class DeconvolutionPlotter:
"""Class for plotting deconvolution results"""
def __init__(self, deconvolution: TimeRegularizedDeconvolutionModel):
"""Initializer for DeconvolutionPlotter
:param self: Instance of object
:param deconvolution: A TimeRegularizedDeconvolutionModel to plot the results of
"""
self.deconvolution = deconvolution
def plot_loss(self, filenames=()) -> matplotlib.axes.Axes:
"""Plot of ELBO loss during training from the deconvolution object.
:param self: An instance of self.
:param filenames: An iterable of filenames to save the plot to.
:return: A matplotlib.axes.Axes object.
"""
fig, ax = matplotlib.pyplot.subplots()
ax.plot(self.deconvolution.loss_hist)
ax.set_title("Losses")
ax.set_xlabel("iteration")
ax.set_ylabel("ELBO Loss")
for filename in filenames:
matplotlib.pyplot.savefig(filename)
return ax
def plot_phi_g_distribution(self, filenames=()) -> matplotlib.axes.Axes:
"""Plot the distribution of $phi_g$ values from the param_store.
:param self: An instance of self
:param filenames: An iterable of filenames to save the plot to
:return: A matplotlib.axes.Axes object.
"""
phi_g = pyro.param("log_phi_posterior_loc_g").clone().detach().exp().cpu()
fig, ax = matplotlib.pyplot.subplots()
ax.hist(phi_g.numpy(), bins=100)
ax.set_xlabel("$\phi_g$")
ax.set_ylabel("Counts")
for filename in filenames:
matplotlib.pyplot.savefig(filename)
return ax
def plot_beta_g_distribution(self, filenames=()) -> matplotlib.axes.Axes:
"""Plot distribution of beta_g from the param_store.
:param self: An instance of self
:param filenames: An iterable of filenames to save the plot to
:return: A matplotlib.axes.Axes object.
"""
beta_g = pyro.param("log_beta_posterior_loc_g").clone().detach().exp().cpu()
fig, ax = matplotlib.pyplot.subplots()
ax.hist(beta_g.numpy(), bins=100)
ax.set_xlabel("$beta_g$")
ax.set_ylabel("Counts")
for filename in filenames:
matplotlib.pyplot.savefig(filename)
return ax
def plot_sample_compositions_scatter(
self, figsize=(16, 9), ignore_hypercluster=False, filenames=()
):
"""Plot a scatter plot of the sample composition facetted by celltype
:param self: An instance of self
:param figsize: tuple of size 2 with figure size information
:param ignore_hypercluster: ignore hyperclustering and plot individual clusters without summarization
:param filenames: An iterable of filenames to save the plot to.
"""
if self.deconvolution.dataset.is_hyperclustered:
if ignore_hypercluster:
self.__plot_sample_compositions_scatter_default(figsize=figsize)
else:
self.__plot_sample_compositions_scatter_hyperclustered(figsize=figsize)
else:
if ignore_hypercluster:
raise ValueError(
"ignore_hypercluster is not supported for non-hyperclustered objecets"
)
self.__plot_sample_compositions_scatter_default(figsize=figsize)
for filename in filenames:
matplotlib.pyplot.savefig(filename)
def plot_composition_trajectories_via_posterior_sampling(
self,
show_iqr: bool = True,
show_combined: bool = True,
iqr_alpha: float = 0.2,
t_begin: float = 0.0,
t_end: float = 1.0,
n_bins: int = 1000,
n_samples_per_bin: int = 2000,
n_windows: int = 10,
savgol_polyorder: int = 1,
figsize: Tuple[float, float] = (3.0, 2.0),
celltype_summarization: dict = dict(),
sharey: bool = True,
lw: float = 1.0,
cell_type_to_color_dict: Optional[Dict[str, str]] = None,
filenames=(),
return_data=False,
**kwargs,
):
"""Plot the composition trajectories by sampling from the posterior.
:param self: An instance of self
:param show_iqr: Plot the Inter-quantile ranges
:param show_combined: Show all trajectories on one plot
:param iqr_alpha: alpha transparency for the IQR ranges
:param t_begin:
:param t_end:
:param n_bins: number of time bins
:param n_samples_per_bin: number of samples per bin
:param n_windows: number of windows
:param savgol_polyorder: smoothing polynomial order
:param figsize: Figure size
:param celltype_summarization: celltype summarization dictionary (for plotting only)
:param sharey: Share the y axis
:param lw: line width
:param cell_type_to_color_dict: Cell type to color dictionary
:param filenames: Filenames to save the plots to
:param \**kwargs:
Everything else
"""
assert (
self.deconvolution.trajectory_model_type == "gp"
), "plot_composition_trajectories_via_posterior_sampling is only possible for GP deconvolution"
# obtain posterior samples
xi_nq, pi_sampled_scn = generate_posterior_samples(
self.deconvolution,
t_begin=t_begin,
t_end=t_end,
n_bins=n_bins,
n_samples_per_bin=n_samples_per_bin,
)
cell_type_labels = self.deconvolution.dataset.cell_type_str_list
# optionally, summarize
if len(celltype_summarization) >= 1:
pi_sampled_scn = summarize_posterior_samples(
self.deconvolution, pi_sampled_scn, celltype_summarization
)
cell_type_labels = list(celltype_summarization.keys())
# estimate IQR and smooth
iqr_lo_cn, iqr_mid_cn, iqr_hi_cn = get_iqr_from_posterior_samples(
pi_sampled_scn,
perform_smoothing=True,
n_windows=n_windows,
savgol_polyorder=savgol_polyorder,
)
# plotting
# take care of colors
if cell_type_to_color_dict is None:
cell_type_to_color_dict = self.deconvolution.dataset.cell_type_to_color_dict
for cell_type in cell_type_labels:
assert (
cell_type in cell_type_to_color_dict
), f"Color for cell type {cell_type} is not specified!"
colors = list(map(cell_type_to_color_dict.get, cell_type_labels))
n_cell_types = pi_sampled_scn.shape[1]
xi_n = xi_nq.cpu().numpy()[:, 0]
if show_combined:
fig, ax = matplotlib.pyplot.subplots(figsize=figsize)
else:
if "ncols" in kwargs.keys():
ncols = kwargs["ncols"]
else:
ncols = 3
nrows = int(np.ceil(len(cell_type_labels) / ncols))
fig, axs = matplotlib.pyplot.subplots(
nrows,
ncols,
figsize=(figsize[0] * ncols, figsize[1] * nrows),
sharey=sharey,
)
actual_time_n = (
self.deconvolution.dataset.time_min
+ self.deconvolution.dataset.time_range * xi_n
)
for i_cell_type in range(n_cell_types):
color = colors[i_cell_type]
if not show_combined:
ax = axs.flatten()[i_cell_type]
ax.plot(
actual_time_n,
iqr_mid_cn[i_cell_type],
c=color,
label=cell_type_labels[i_cell_type],
lw=lw,
)
if show_iqr and iqr_alpha > 0:
ax.fill_between(
actual_time_n,
iqr_lo_cn[i_cell_type],
iqr_hi_cn[i_cell_type],
alpha=iqr_alpha,
color=color,
edgecolor="none",
)
ax.set_xlabel("Time")
ax.set_ylabel("Proportion")
if show_combined:
ax.set_title("Predicted cell proportions")
ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left", fontsize="small")
else:
ax.set_title(f"{cell_type_labels[i_cell_type]}")
ax.set_xlim((np.min(actual_time_n), np.max(actual_time_n)))
# get rid of extra axes
if not show_combined:
for idx in range(i_cell_type + 1, ncols * nrows):
axs.flatten()[idx].axis("off")
fig.tight_layout()
for filename in filenames:
matplotlib.pyplot.savefig(filename)
if return_data:
return {
"actual_time_n": actual_time_n,
"iqr_mid_cn": iqr_mid_cn,
"cell_type_labels": cell_type_labels,
}
def plot_gp_composition_trajectories(self, n_samples=500, filenames=()):
"""" Plot per-celltype (deprecated)
:param self: An instance of self
:param n_samples: Number of samples to draw from GP
:param filenames: Filenames to save to
"""
assert (
self.deconvolution.trajectory_model_type == "gp"
), "plot_composition_trajectories_via_posterior_sampling is only possible for GP deconvolution"
with torch.no_grad():
traj = self.deconvolution.population_proportion_model
xi_new_nq = torch.linspace(
0.0,
1.0,
n_samples,
device=self.deconvolution.device,
dtype=self.deconvolution.dtype,
)[..., None]
f_new_loc_cn, f_new_var_cn = traj.gp.forward(xi_new_nq, full_cov=False)
f_new_scale_cn = f_new_var_cn.sqrt()
f_new_sampled_scn = torch.distributions.Normal(
f_new_loc_cn, f_new_scale_cn
).sample([n_samples])
pi_new_sampled_scn = torch.softmax(f_new_sampled_scn, dim=1)
# pi_new_loc_cn = torch.softmax(f_new_loc_cn, dim=0)
plotrange_kcn = torch.quantile(
pi_new_sampled_scn,
torch.Tensor([0.25, 0.5, 0.75]).to(self.deconvolution.device),
0,
).cpu()
n_celltypes = plotrange_kcn.shape[1]
nrow = math.ceil(math.sqrt(n_celltypes))
ncol = math.ceil(math.sqrt(n_celltypes))
fig, ax = matplotlib.pyplot.subplots(nrow, ncol, figsize=(10, 8))
# TODO: Add colors, add titles
for i in range(n_celltypes):
ax_x_ind = i // nrow
ax_y_ind = i % nrow
ax[ax_x_ind, ax_y_ind].fill_between(
xi_new_nq.cpu().numpy()[:, 0],
plotrange_kcn[0, i, :].numpy().T,
plotrange_kcn[2, i].numpy().T,
)
ax[ax_x_ind, ax_y_ind].plot(
xi_new_nq.cpu().numpy(), plotrange_kcn[1, i].cpu().numpy().T, c="black"
)
ax[ax_x_ind, ax_y_ind].set_ylabel('Proportion')
ax[ax_x_ind, ax_y_ind].set_xlabel('Time')
celltype_name = self.deconvolution.dataset.cell_type_str_list[i]
ax[ax_x_ind, ax_y_ind].set_title(celltype_name)
fig.tight_layout()
for filename in filenames:
matplotlib.pyplot.savefig(filename)
def plot_sample_compositions_boxplot_confidence(
self,
n_draws=100,
verbose=False,
figsize=(20, 15),
dpi=80,
spacing=1,
filenames=(),
):
"""Plot individual sample compositions as boxplots representing confidence in predictions
:param self: An instance of object
:param n_draws: Number of draws to perform for CI estimation
:param verbose: Verbosity
:param figsize: Size of figure to plot
:param dpi: DPI of output figure
:param spacing: x-axis spacing of groups of samples from different timepoints
:param filenames: Filenames to save the files as
"""
# l -- draw index
assert (
self.deconvolution.trajectory_model_type == "gp"
), "Only GP deconvolution is supported!"
n_samples = self.deconvolution.dataset.num_samples
n_celltypes = self.deconvolution.dataset.num_cell_types
cell_pop_lmc = torch.zeros([n_draws, n_samples, n_celltypes])
sort_order = torch.argsort(self.deconvolution.dataset.t_m)
fig_nrow = math.ceil(math.sqrt(n_celltypes))
fig_ncol = math.ceil(math.sqrt(n_celltypes))
# Generate draws from posterior
for i in range(n_draws):
cell_pop_lmc[i, :] = (
self.deconvolution.population_proportion_model.guide(torch.Tensor([]))
.clone()
.detach()
.cpu()
)[None, :]
# Get quantiles of draw
plot_quantiles = torch.quantile(cell_pop_lmc, q=torch.linspace(0, 1, 5), dim=0)
# Generate figure and axis
fig, ax = matplotlib.pyplot.subplots(
fig_nrow, fig_ncol, figsize=figsize, dpi=dpi
)
# Get plotting positions
z = self.deconvolution.dataset.t_m[sort_order].cpu()
extra_spacing = torch.where(
torch.diff(z, append=z[None, -1]) > 1e-6, spacing, 0
)
positions = torch.cumsum(torch.ones(extra_spacing.shape) + extra_spacing, 0)
for c in range(plot_quantiles.shape[2]): # celltypes
if verbose:
print(f"Processing {self.deconvolution.dataset.cell_type_str_list[c]}")
# Generate data for this panel
plot_data = list()
for m in range(plot_quantiles.shape[1]): # samples
m = sort_order[m]
plot_data.append(
{
"whislo": plot_quantiles[0, m, c].item(),
"q1": plot_quantiles[1, m, c].item(),
"med": plot_quantiles[2, m, c].item(),
"q3": plot_quantiles[3, m, c].item(),
"whishi": plot_quantiles[4, m, c].item(),
"label": f"{self.deconvolution.dataset.bulk_sample_names[m]}",
}
)
# Plot panel
boxprops = dict(facecolor=cm.tab10(c))
cur_axis = ax[c // fig_nrow, c % fig_nrow]
cur_axis.bxp(
bxpstats=plot_data,
showfliers=False,
shownotches=False,
showmeans=False,
boxprops=boxprops,
patch_artist=True,
positions=positions,
)
cur_axis.set_title(self.deconvolution.dataset.cell_type_str_list[c])
cur_axis.set_xticklabels(
list(
self.deconvolution.dataset.bulk_sample_names[x.item()]
for x in sort_order
),
rotation=90,
)
cur_axis.set_ylabel('Proportion')
cur_axis.set_xlabel('Time')
fig.tight_layout()
for filename in filenames:
matplotlib.pyplot.savefig(filename)
def plot_sample_compositions_boxplot(self, figsize=(16, 9), filenames=()):
"""Plot sample compositions in boxplot form
:param self: An instance of self.
:param figsize: Figure size
:param filenames: Filename to save the plots to
"""
if self.deconvolution.trajectory_model_type == "polynomial":
cell_pop = pyro.param("cell_pop_posterior_loc_mc").clone().detach().cpu()
elif self.deconvolution.trajectory_model_type == "gp":
cell_pop = (
self.deconvolution.population_proportion_model.guide(torch.Tensor([]))
.clone()
.detach()
.cpu()
)
t_m = self.deconvolution.dataset.t_m.clone().detach().cpu()
sort_order = torch.argsort(self.deconvolution.dataset.t_m)
n_cell_types = cell_pop.shape[1]
n_rows = math.ceil(math.sqrt(n_cell_types))
n_cols = math.ceil(n_cell_types / n_rows)
fig, ax = matplotlib.pyplot.subplots(n_rows, n_cols, figsize=figsize)
for i in range(cell_pop.shape[1]):
r_i = int(i // n_rows)
c_i = int(i % n_rows)
t = (
t_m[sort_order] * self.deconvolution.dataset.time_range
+ self.deconvolution.dataset.time_min
)
prop = cell_pop[sort_order, i].clone().detach().cpu()
df1 = | pd.DataFrame({"time": t, "proportion": prop}) | pandas.DataFrame |
import datetime
import numpy as np
import pandas as pd
import pytest
from .utils import (
get_extension,
to_json_string,
to_days_since_epoch,
extend_dict,
filter_by_columns,
breakdown_by_month,
breakdown_by_month_sum_days,
to_bin,
)
@pytest.fixture
def issues():
return pd.DataFrame(
[
{
"key": "ABC-1",
"priority": "high",
"start": pd.Timestamp(2018, 1, 1),
"end": pd.Timestamp(2018, 3, 20),
},
{
"key": "ABC-2",
"priority": "med",
"start": pd.Timestamp(2018, 1, 2),
"end": pd.Timestamp(2018, 1, 20),
},
{
"key": "ABC-3",
"priority": "high",
"start": pd.Timestamp(2018, 2, 3),
"end": pd.Timestamp(2018, 3, 20),
},
{
"key": "ABC-4",
"priority": "med",
"start": pd.Timestamp(2018, 1, 4),
"end": pd.Timestamp(2018, 3, 20),
},
{
"key": "ABC-5",
"priority": "high",
"start": pd.Timestamp(2018, 2, 5),
"end": pd.Timestamp(2018, 2, 20),
},
{
"key": "ABC-6",
"priority": "med",
"start": pd.Timestamp(2018, 3, 6),
"end": pd.Timestamp(2018, 3, 20),
},
],
columns=["key", "priority", "start", "end"],
)
def test_extend_dict():
assert extend_dict({"one": 1}, {"two": 2}) == {"one": 1, "two": 2}
def test_get_extension():
assert get_extension("foo.csv") == ".csv"
assert get_extension("/path/to/foo.csv") == ".csv"
assert get_extension("\\path\\to\\foo.csv") == ".csv"
assert get_extension("foo") == ""
assert get_extension("foo.CSV") == ".csv"
def test_to_json_string():
assert to_json_string(1) == "1"
assert to_json_string("foo") == "foo"
assert to_json_string(None) == ""
assert to_json_string(np.NaN) == ""
assert to_json_string(pd.NaT) == ""
assert to_json_string(pd.Timestamp(2018, 2, 1)) == "2018-02-01"
def test_to_days_since_epoch():
assert to_days_since_epoch(datetime.date(1970, 1, 1)) == 0
assert to_days_since_epoch(datetime.date(1970, 1, 15)) == 14
def test_filter_by_columns():
df = pd.DataFrame(
[
{"high": 1, "med": 2, "low": 0},
{"high": 3, "med": 1, "low": 2},
{"high": 2, "med": 2, "low": 3},
],
columns=["high", "med", "low"],
)
# Check without values, original data frame will be returned.
result = filter_by_columns(df, None)
assert result.equals(df)
# Check with values, columns will be filtered and reordered
result = filter_by_columns(df, ["med", "high"])
assert list(result.columns) == ["med", "high"]
assert result.to_dict("records") == [
{"high": 1, "med": 2},
{"high": 3, "med": 1},
{"high": 2, "med": 2},
]
def test_breakdown_by_month(issues):
breakdown = breakdown_by_month(issues, "start", "end", "key", "priority")
assert list(breakdown.columns) == ["high", "med"] # alphabetical
assert list(breakdown.index) == [
pd.Timestamp(2018, 1, 1),
pd.Timestamp(2018, 2, 1),
| pd.Timestamp(2018, 3, 1) | pandas.Timestamp |
from requests import Session
from bs4 import BeautifulSoup
import pandas as pd
HEADERS = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) '\
'AppleWebKit/537.36 (KHTML, like Gecko) '\
'Chrome/75.0.3770.80 Safari/537.36'}
def zacks_extract(ratio_name, period='weekly_'):
"""
Function to extract Ratios from Zacks
"""
# Create Empty list
list_to_append = []
# Read list of stocks and get all symbols
stocks = pd.read_csv('../docs/my_stocks.csv')
list_of_stocks = stocks['symbol']
# Start loop by creating empty list and calculate lenght, so we can track completion
lenght = len(list_of_stocks)
# Create Session
s = Session()
# Add headers
s.headers.update(HEADERS)
# JSON Key Field
json_field = period + ratio_name
# For every single stock, do the following
for idx, stock in enumerate(list_of_stocks):
# Print Progress
print((idx+1)/lenght)
# Create URL
url = f'https://widget3.zacks.com/data/chart/json/{stock}/' + ratio_name + '/www.zacks.com'
# Request and transform response in json
screener = s.get(url)
json = screener.json()
# Check for error
if len(json) > 1:
try:
# Append results into list
[list_to_append.append([i[0], i[1], stock]) for idx, i in enumerate(json[json_field].items()) if idx < 300]
except (KeyError, AttributeError) as e:
continue
# Create dataframe with results
df = pd.DataFrame(list_to_append)
df.columns = ['timestamp', ratio_name, 'symbol']
# Export
df['timestamp'] = pd.to_datetime(df['timestamp'])
filepath = '../docs/' + ratio_name + '.csv'
df.to_csv(filepath, index=0)
return df
def merge_ratio(df, all_prices, ratio_name):
"""
Function to merge ratio with all prices
"""
# Metric Dictionary
dic = {'pe_ratio': 'eps_ttm',
'price_to_book_value': 'book_value_ttm'}
# Field name
field_name = dic[ratio_name]
# Rename columns, convert column to datetime and keep only records where date > 2017-01-01
df.columns = ['timestamp_merge', ratio_name, 'symbol']
df['timestamp_merge'] = pd.to_datetime(df['timestamp_merge'])
df = df[df['timestamp_merge'] > '2014-01-01']
# Convert all prices column to datetime
all_prices['just_date_merge'] = pd.to_datetime(all_prices['just_date'])
# Merge both dataframes
merge_df = pd.merge(all_prices,
df,
left_on = ['just_date_merge', 'symbol'],
right_on = ['timestamp_merge', 'symbol'],
how='left')
# Calculate EPS TTM based on weekly PE Ratios
merge_df[field_name] = merge_df['close_price'] / merge_df[ratio_name]
merge_df[field_name] = merge_df[field_name].round(3)
# Since we have only Weekly Value we can Forward/Backward Fill the EPS TTM
merge_df[field_name] = merge_df.groupby('symbol').ffill()[field_name]
merge_df[field_name] = merge_df.groupby('symbol').bfill()[field_name]
# Calculate PE Ratio with EPS TTM and round numbers
merge_df[ratio_name] = merge_df['close_price'] / merge_df[field_name]
merge_df[ratio_name] = merge_df[ratio_name].round(3)
# Drop columns
merge_df.drop(['just_date_merge', 'timestamp_merge'], inplace=True, axis=1)
# Export
merge_df.to_csv('../docs/' + field_name + '.csv', index=0)
return merge_df
def pe_analysis(prices_pe):
# Calculate Average Market Cap by Industry/Sector overtime
daily_pe_ratio_mean = prices_pe[['just_date', 'industry', 'sector', 'pe_ratio']].groupby(['just_date', 'industry', 'sector']).mean()['pe_ratio']
daily_pe_ratio_std = prices_pe[['just_date', 'industry', 'sector', 'pe_ratio']].groupby(['just_date', 'industry', 'sector']).std()['pe_ratio']
# Convert to Data Frame
daily_pe_ratio_mean = daily_pe_ratio_mean.reset_index()
daily_pe_ratio_std = daily_pe_ratio_std.reset_index()
# Rename Columns
daily_pe_ratio_mean.columns = ['just_date', 'industry', 'sector', 'avg_pe_ratio']
daily_pe_ratio_std.columns = ['just_date', 'industry', 'sector', 'std_pe_ratio']
# Merge with main
daily_pe_stats = pd.merge(daily_pe_ratio_mean, daily_pe_ratio_std, on=['just_date', 'industry', 'sector'])
daily_pe_stats = daily_pe_stats.dropna()
# Merge with original
prices_pe['just_date'] = pd.to_datetime(prices_pe['just_date'])
daily_pe_stats['just_date'] = pd.to_datetime(daily_pe_stats['just_date'])
prices_pe = pd.merge(prices_pe, daily_pe_stats, how='left', on=['just_date', 'industry', 'sector'])
# See how many STDs a PE Ratio is far from mean
prices_pe['pe_ratio_std_diff'] = (prices_pe['avg_pe_ratio'] - prices_pe['pe_ratio']) / prices_pe['std_pe_ratio']
return prices_pe
def main_ratio(all_prices, ratio_name, full_refresh=False):
# If a full refresh is not necessary
if full_refresh == False:
# Read existing PE Ratios and merge it with all prices
df = | pd.read_csv('../docs/' + ratio_name + '.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 15 11:51:39 2020
This is best run inside Spyder, not as standalone script.
Author: @hk_nien on Twitter.
"""
import re
import sys
import io
import urllib
import urllib.request
from pathlib import Path
import time
import locale
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import nl_regions
import scipy.signal
import scipy.interpolate
import scipy.integrate
import tools
from g_mobility_data import get_g_mobility_data
from nlcovidstats_data import (
init_data,
DFS,
get_municipalities_by_pop,
load_cumulative_cases,
)
# These delay values are tuned to match the RIVM Rt estimates.
# The represent the delay (days) from infection to report date,
# referencing the report date.
# Extrapolation: constant value.
DELAY_INF2REP = [
('2020-07-01', 7.5),
('2020-09-01', 7),
('2020-09-15', 9),
('2020-10-09', 9),
('2020-11-08', 7),
('2020-12-01', 6.5),
('2021-02-15', 6.5),
('2021-04-05', 4),
('2021-07-06', 4),
('2021-07-15', 5),
('2021-07-23', 4),
('2021-07-30', 4),
('2021-11-04', 4),
('2021-11-11', 4.5),
('2021-11-20', 5),
('2021-11-25', 5),
('2021-12-04', 4.5), # test capacity increased
('2021-12-08', 4), # Speculation...
]
_DOW_CORR_CACHE = {} # keys: dayrange tuples.
def get_dow_correction_rolling(nweeks=7, taper=0.5):
"""Return DoW correction factors for all dates.
Parameters:
- nweeks: number of preceding weeks to use for each date.
- taper: which fraction of old data to taper to lower weight.
Return:
- Series with same timestamp index as cases data.
"""
df, _ = get_region_data('Nederland', lastday=-1, correct_dow=None)
# df = df.iloc[3:-3].copy() # strip edge points without well defined 7d mean.
# Correction factor - 1
df['Delta_factor'] = df['Delta']/df['Delta7r']
ntaper = int(nweeks*taper + 0.5)
kernel = np.zeros(nweeks*2 + 1)
kernel[-nweeks:] = 1
kernel[-nweeks:-nweeks+ntaper] = np.linspace(1/ntaper, 1-1/ntaper, ntaper)
kernel /= kernel.sum()
df['Dow_factor'] = np.nan
for idow in range(7):
row_select = df.index[df.index.dayofweek == idow]
facs = df.loc[row_select, 'Delta_factor']
n = len(facs)
assert len(facs) > nweeks
mean_factors = np.convolve(facs, kernel, mode='same')
mean_factors[mean_factors == 0] = np.nan
df.loc[row_select, 'Dow_factor'] = 1/mean_factors
df.loc[df.index[:8], 'Dow_factor'] = np.nan
return df['Dow_factor']
def get_dow_correction(dayrange=(-50, -1), verbose=False):
"""Return array with day-of-week correction factors.
- dayrange: days to consider for DoW correction.
- verbose: whether to show plots and print diagnostics.
Return:
- dow_corr_factor: array (7,) with DoW correction (0=Monday).
"""
dayrange = tuple(dayrange)
if dayrange in _DOW_CORR_CACHE and not verbose:
return _DOW_CORR_CACHE[dayrange].copy()
# timestamp index, columns Delta, Delta7r, and others.
df, _ = get_region_data('Nederland', lastday=dayrange[-1], correct_dow=None)
df = df.iloc[:-4] # Discard the last rows that have no correct rolling average.
df = df.iloc[dayrange[0]-dayrange[1]:]
# Correction factor - 1
df['Delta_factor'] = df['Delta']/df['Delta7r']
# Collect by day of week (0=Monday)
factor_by_dow = np.zeros(7)
for i in range(7):
factor_by_dow[i] = 1 / df.loc[df.index.dayofweek == i, 'Delta_factor'].mean()
factor_by_dow /= factor_by_dow.mean()
df['Delta_est_factor'] = factor_by_dow[df.index.dayofweek]
df['Delta_corrected'] = df['Delta'] * df['Delta_est_factor']
rms_dc = (df['Delta_corrected']/df['Delta7r']).std()
rms_d = df['Delta_factor'].std()
if verbose:
print('DoW effect: deviations from 7-day rolling average.\n'
f' Original: RMS={rms_d:.3g}; after correction: RMS={rms_dc:.3g}')
fig, ax = plt.subplots(tight_layout=True)
ax.plot(df['Delta_factor'], label='Delta')
ax.plot(df['Delta_corrected'] / df['Delta7r'], label='Delta_corrected')
ax.plot(df['Delta_est_factor'], label='Correction factor')
tools.set_xaxis_dateformat(ax, 'Date')
ax.legend()
ax.set_ylabel('Daily cases deviation')
title = 'Day-of-week correction on daily cases'
ax.set_title(title)
fig.canvas.set_window_title(title)
fig.show()
if rms_dc > 0.8*rms_d:
print(f'WARNING: DoW correction for dayrange={dayrange} does not seem to work.\n'
' Abandoning this correction.')
factor_by_dow = np.ones(7)
_DOW_CORR_CACHE[dayrange] = factor_by_dow.copy()
return factor_by_dow
def get_region_data(region, lastday=-1, printrows=0, correct_anomalies=True,
correct_dow='r7'):
"""Get case counts and population for one municipality.
It uses the global DFS['mun'], DFS['cases'] dataframe.
Parameters:
- region: region name (see below)
- lastday: last day to include.
- printrows: print this many of the most recent rows
- correct_anomalies: correct known anomalies (hiccups in reporting)
by reassigning cases to earlier dates.
- correct_dow: None, 'r7' (only for extrapolated rolling-7 average)
Special municipalities:
- 'Nederland': all
- 'HR:Zuid', 'HR:Noord', 'HR:Midden', 'HR:Midden+Zuid', 'HR:Midden+Noord':
holiday regions.
- 'MS:xx-yy': municipalities with population xx <= pop/1000 < yy'
- 'P:xx': province
Use data up to lastday.
Return:
- df: dataframe with added columns:
- Delta: daily increase in case count (per capita).
- Delta_dowc: daily increase, day-of-week correction applied
based on national pattern in most recent 7 weeks.
- Delta7r: daily increase as 7-day rolling average
(last 3 days are estimated).
- DeltaSG: daily increase, smoothed with (15, 2) Savitsky-Golay filter.Region selec
- pop: population.
"""
df1, npop = nl_regions.select_cases_region(DFS['cases'], region)
# df1 will have index 'Date_of_report', columns:
# 'Total_reported', 'Hospital_admission', 'Deceased'
assert correct_dow in [None, 'r7']
if lastday < -1 or lastday > 0:
df1 = df1.iloc[:lastday+1]
if len(df1) == 0:
raise ValueError(f'No data for region={region!r}.')
# nc: number of cases
nc = df1['Total_reported'].diff()
if printrows > 0:
print(nc[-printrows:])
nc.iat[0] = 0
df1['Delta'] = nc/npop
if correct_anomalies:
_correct_delta_anomalies(df1)
nc = df1['Delta'] * npop
nc7 = nc.rolling(7, center=True).mean()
nc7[np.abs(nc7) < 1e-10] = 0.0 # otherwise +/-1e-15 issues.
nc7a = nc7.to_numpy()
# last 3 elements are NaN, use mean of last 4 raw (dow-corrected) to
# get an estimated trend and use exponential growth or decay
# for filling the data.
if correct_dow == 'r7':
# mean number at t=-1.5 days
dow_correction = get_dow_correction((lastday-49, lastday)) # (7,) array
df1['Delta_dowc'] = df1['Delta'] * dow_correction[df1.index.dayofweek]
nc1 = np.mean(nc.iloc[-4:] * dow_correction[nc.index[-4:].dayofweek])
else:
nc1 = nc.iloc[-4:].mean() # mean number at t=-1.5 days
log_slope = (np.log(nc1) - np.log(nc7a[-4]))/1.5
nc7.iloc[-3:] = nc7a[-4] * np.exp(np.arange(1, 4)*log_slope)
# 1st 3 elements are NaN
nc7.iloc[:3] = np.linspace(0, nc7.iloc[3], 3, endpoint=False)
df1['Delta7r'] = nc7/npop
df1['DeltaSG'] = scipy.signal.savgol_filter(
nc/npop, 15, 2, mode='interp')
return df1, npop
def _correct_delta_anomalies(df):
"""Apply anomaly correction to 'Delta' column.
Store original values to 'Delta_orig' column.
Pull data from DFS['anomalies']
"""
dfa = DFS['anomalies']
df['Delta_orig'] = df['Delta'].copy()
dt_tol = pd.Timedelta(12, 'h') # tolerance on date matching
match_date = lambda dt: abs(df.index - dt) < dt_tol
preserve_n = True
for (date, data) in dfa.iterrows():
if date == '2021-02-08':
print('@foo')
f = data['fraction']
dt = data['days_back']
dn = df.loc[match_date(date), 'Delta_orig'] * f
if len(dn) == 0:
print(f'Anomaly correction: no match for {date}; skipping.')
continue
assert len(dn) == 1
dn = dn[0]
df.loc[match_date(date + pd.Timedelta(dt, 'd')), 'Delta'] += dn
if dt != 0:
df.loc[match_date(date), 'Delta'] -= dn
else:
preserve_n = False
if preserve_n:
assert np.isclose(df["Delta"].sum(), df["Delta_orig"].sum(), rtol=1e-6, atol=0)
else:
delta = df["Delta"].sum() - df["Delta_orig"].sum()
print(f'Note: case count increased by {delta*17.4e6:.0f} cases due to anomalies.')
def construct_Dfunc(delays, plot=False):
"""Return interpolation functions fD(t) and fdD(t).
fD(t) is the delay between infection and reporting at reporting time t.
fdD(t) is its derivative.
Parameter:
- delays: tuples (datetime_report, delay_days). Extrapolation is at
constant value.
- plot: whether to generate a plot.
Return:
- fD: interpolation function for D(t) with t in nanoseconds since epoch.
- fdD: interpolation function for dD/dt.
(taking time in ns but returning dD per day.)
- delay_str: delay string e.g. '7' or '7-9'
"""
ts0 = [float(pd.to_datetime(x[0]).to_datetime64()) for x in delays]
Ds0 = [float(x[1]) for x in delays]
if len(delays) == 1:
# prevent interp1d complaining.
ts0 = [ts0[0], ts0[0]+1e9]
Ds0 = np.concatenate([Ds0, Ds0])
# delay function as linear interpolation;
# nanosecond timestamps as t value.
fD0 = scipy.interpolate.interp1d(
ts0, Ds0, kind='linear', bounds_error=False,
fill_value=(Ds0[0], Ds0[-1])
)
# construct derivative dD/dt, smoothen out
day = 1e9*86400 # one day in nanoseconds
ts = np.arange(ts0[0]-3*day, ts0[-1]+3.01*day, day)
dDs = (fD0(ts+3*day) - fD0(ts-3*day))/6
fdD = scipy.interpolate.interp1d(
ts, dDs, 'linear', bounds_error=False,
fill_value=(dDs[0], dDs[-1]))
# reconstruct D(t) to be consistent with the smoothened derivative.
Ds = scipy.integrate.cumtrapz(dDs, ts/day, initial=0) + Ds0[0]
fD = scipy.interpolate.interp1d(
ts, Ds, 'linear', bounds_error=False,
fill_value=(Ds[0], Ds[-1]))
Dmin, Dmax = np.min(Ds0), np.max(Ds0)
if Dmin == Dmax:
delay_str = f'{Dmin:.0f}'
else:
delay_str = f'{Dmin:.0f}-{Dmax:.0f}'
if plot:
fig, ax = plt.subplots(1, 1, figsize=(7, 3), tight_layout=True)
tsx = np.linspace(
ts[0],
int(pd.to_datetime('now').to_datetime64())
)
ax.plot(pd.to_datetime(tsx.astype(np.int64)), fD(tsx))
ax.set_ylabel('Vertraging (dagen)')
tools.set_xaxis_dateformat(ax, 'Rapportagedatum')
title = 'Vertraging = t_rapportage - t_infectie - t_generatie/2'
fig.canvas.set_window_title(title)
ax.set_title(title)
fig.show()
return fD, fdD, delay_str
def estimate_Rt_df(r, delay=9, Tc=4.0):
"""Return Rt data, assuming delay infection-reporting.
- r: Series with smoothed new reported cases.
(e.g. 7-day rolling average or other smoothed data).
- delay: assume delay days from infection to positive report.
alternatively: list of (timestamp, delay) tuples if the delay varies over time.
The timestamps refer to the date of report.
- Tc: assume generation interval.
Return:
- DataFrame with columns 'Rt' and 'delay'.
"""
if not hasattr(delay, '__getitem__'):
# simple delay - attach data to index with proper offset
log_r = np.log(r.to_numpy()) # shape (n,)
assert len(log_r.shape) == 1
log_slope = (log_r[2:] - log_r[:-2])/2 # (n-2,)
Rt = np.exp(Tc*log_slope) # (n-2,)
index = r.index[1:-1] - pd.Timedelta(delay, unit='days')
Rdf = pd.DataFrame(
dict(Rt=pd.Series(index=index, data=Rt, name='Rt'))
)
Rdf['delay'] = delay
else:
# the hard case: delay varies over time.
# if ri is the rate of infections, tr the reporting date, and D
# the delay, then:
# ri(tr-D(tr)) = r(tr) / (1 - dD/dt)
fD, fdD, _ = construct_Dfunc(delay)
# note: timestamps in nanoseconds since epoch, rates in 'per day' units.
day_ns = 86400e9
tr = r.index.astype(int)
ti = tr - fD(tr) * day_ns
ri = r.to_numpy() / (1 - fdD(tr))
# now get log-derivative the same way as above
log_ri = np.log(np.where(ri==0, np.nan, ri))
log_slope = (log_ri[2:] - log_ri[:-2])/2 # (n-2,)
Rt = np.exp(Tc*log_slope) # (n-2,)
# build series with timestamp index
# (Note: int64 must be specified explicitly in Windows, 'int' will be
# int32.)
Rt_series = pd.Series(
data=Rt, name='Rt',
index=pd.to_datetime(ti[1:-1].astype(np.int64))
)
Rdf = pd.DataFrame(dict(Rt=Rt_series))
Rdf['delay'] = fD(tr[1:-1])
return Rdf
def get_t2_Rt(ncs, delta_t, i0=-3):
"""Return most recent doubling time and Rt, from case series"""
# exponential fit
t_gen = 4.0 # generation time (d)
t_double = delta_t / np.log2(ncs.iloc[i0]/ncs.iloc[i0-delta_t])
Rt = 2**(t_gen / t_double)
return t_double, Rt
def add_labels(ax, labels, xpos, mindist_scale=1.0, logscale=True):
"""Add labels, try to have them avoid bumping.
- labels: list of tuples (y, txt)
- mindist_scale: set to >1 or <1 to tweak label spacing.
"""
from scipy.optimize import fmin_cobyla
ymin, ymax = ax.get_ylim()
if logscale:
mindist = np.log10(ymax/ymin)*0.025*mindist_scale
else:
mindist = (ymax - ymin)*0.025*mindist_scale
labels = sorted(labels)
# log positions and sorted$ffmpeg -i Rt_%03d.png -c:v libx264 -r 25 -pix_fmt yuv420p out.mp4
if logscale:
Ys = np.log10([l[0] for l in labels])
else:
Ys = np.array([l[0] for l in labels])
n = len(Ys)
# Distance matrix: D @ y = distances between adjacent y values
D = np.zeros((n-1, n))
for i in range(n-1):
D[i, i] = -1
D[i, i+1] = 1
def cons(Y):
ds = D @ Y
errs = np.array([ds - mindist, ds])
#print(f'{np.around(errs, 2)}')
return errs.reshape(-1)
# optimization function
def func(Y):
return ((Y - Ys)**2).sum()
new_Ys = fmin_cobyla(func, Ys, cons, catol=mindist*0.05)
for Y, (_, txt) in zip(new_Ys, labels):
y = 10**Y if logscale else Y
ax.text(xpos, y, txt, verticalalignment='center')
def _zero2nan(s):
"""Return copy of array/series s, negative/zeros replaced by NaN."""
sc = s.copy()
sc[s <= 0] = np.nan
return sc
def _add_event_labels(ax, tmin, tmax, with_ribbons=True, textbox=False, bottom=True,
flagmatch='RGraph'):
"""Add event labels and ribbons to axis (with date on x-axis).
- ax: axis object
- tmin, tmax: time range to assume for x axis.
- textbox: whether to draw text in a semi-transparent box.
- bottom: whether to put labels at the bottom rather than top.
- flagmatch: which flags to match (regexp).
"""
ymin, ymax = ax.get_ylim()
y_lab = ymin if bottom else ymax
ribbon_yspan = (ymax - ymin)*0.35
ribbon_hgt = ribbon_yspan*0.1 # ribbon height
ribbon_ystep = ribbon_yspan*0.2
df_events = DFS['events']
ribbon_colors = ['#ff0000', '#cc7700'] * 10
if df_events is not None:
i_res = 0
for _, (res_t, res_t_end, res_d, flags) in df_events.reset_index().iterrows():
if not (tmin <= res_t <= tmax):
continue
if flags and not re.match(flagmatch, flags):
continue
res_d = res_d.replace('\\n', '\n')
# note; with \n in text, alignment gets problematic.
txt = ax.text(res_t, y_lab, f' {res_d}', rotation=90, horizontalalignment='center',
verticalalignment='bottom' if bottom else 'top',
fontsize=8)
if textbox:
txt.set_bbox(dict(facecolor='white', alpha=0.4, linewidth=0))
if pd.isna(res_t_end):
continue
if with_ribbons:
res_t_end = min(res_t_end, tmax)
a, b = (ribbon_ystep * i_res), (ribbon_yspan - ribbon_hgt)
rect_y_lo = a % b + y_lab
color = ribbon_colors[int(a // b)]
rect = matplotlib.patches.Rectangle((res_t, rect_y_lo), res_t_end-res_t, ribbon_hgt,
color=color, alpha=0.15, lw=0, zorder=20)
ax.add_patch(rect)
i_res += 1
def plot_daily_trends(ndays=100, lastday=-1, mun_regexp=None, region_list=None,
source='r7', subtitle=None):
"""Plot daily-case trends (pull data from global DFS dict).
- lastday: up to this day.
- source: 'r7' (7-day rolling average), 'raw' (no smoothing), 'sg'
(Savitsky-Golay smoothed).
- mun_regexp: regular expression matching municipalities.
- region_list: list of municipalities (including e.g. 'HR:Zuid',
'POP:100-200', 'JSON:{...}'.
if mun_regexp and mun_list are both specified, then concatenate.
If neither are specified, assume 'Nederland'.
JSON is a json-encoded dict with:
- 'label': short label string
- 'color': for plotting, optional.
- 'fmt': format for plotting, e.g. 'o--', optional.
- 'muns': list of municipality names
- subtitle: second title line (optional)
"""
df_events = DFS['events']
df_mun = DFS['mun']
fig, ax = plt.subplots(figsize=(12, 6))
fig.subplots_adjust(top=0.945-0.03*(subtitle is not None),
bottom=0.1, left=0.09, right=0.83)
if region_list is None:
region_list = []
if mun_regexp:
region_list = [m for m in df_mun.index if re.match(mun_regexp, m)] + region_list
if region_list == []:
region_list = ['Nederland']
labels = [] # tuples (y, txt)f
citystats = [] # tuples (Rt, T2, cp100k, cwk, popk, city_name)
for region in region_list:
df1, n_inw = get_region_data(region, lastday=lastday)
df1 = df1.iloc[-ndays:]
fmt = 'o-' if ndays < 70 else '-'
psize = 5 if ndays < 30 else 3
dnc_column = dict(r7='Delta7r', raw='Delta', sg='DeltaSG')[source]
if region.startswith('JSON:'):
reg_dict = json.loads(region[5:])
reg_label = reg_dict['label']
if 'fmt' in reg_dict:
fmt = reg_dict['fmt']
color = reg_dict['color'] if 'color' in reg_dict else None
else:
reg_label = re.sub(r'POP:(.*)-(.*)', r'\1k-\2k inw.', region)
reg_label = re.sub(r'^[A-Z]+:', '', reg_label)
color = None
ax.semilogy(df1[dnc_column]*1e5, fmt, color=color, label=reg_label, markersize=psize)
delta_t = 7
i0 = dict(raw=-1, r7=-3, sg=-3)[source]
t_double, Rt = get_t2_Rt(df1[dnc_column], delta_t, i0=i0)
citystats.append((np.around(Rt, 2), np.around(t_double, 2),
np.around(df1['Delta'][-1]*1e5, 2),
int(df1['Delta7r'][-4] * n_inw * 7 + 0.5),
int(n_inw/1e3 + .5), reg_label))
if abs(t_double) > 60:
texp = f'Stabiel'
elif t_double > 0:
texp = f'×2: {t_double:.3g} d'
elif t_double < 0:
texp = f'×½: {-t_double:.2g} d'
ax.semilogy(
df1.index[[i0-delta_t, i0]], df1[dnc_column].iloc[[i0-delta_t, i0]]*1e5,
'k--', zorder=-10)
labels.append((df1[dnc_column][-1]*1e5, f' {reg_label} ({texp})'))
_add_event_labels(
ax, df1.index[0], df1.index[-1], with_ribbons=False,
flagmatch='CaseGraph'
)
dfc = pd.DataFrame.from_records(
sorted(citystats), columns=['Rt', 'T2', 'C/100k', 'C/wk', 'Pop/k', 'Region'])
dfc.set_index('Region', inplace=True)
print(dfc)
lab_x = df1.index[-1] + | pd.Timedelta('1.2 d') | pandas.Timedelta |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Geoip Lookup module using IPStack and Maxmind GeoLite2.
Geographic location lookup for IP addresses. This module has two classes
for different services:
- GeoLiteLookup - Maxmind Geolite (see https://www.maxmind.com)
- IPStackLookup - IPStack (see https://ipstack.com)
Both services offer
a free tier for non-commercial use. However, a paid tier will
normally get you more accuracy, more detail and a higher throughput
rate. Maxmind geolite uses a downloadable database, while IPStack is
an online lookup (API key required).
"""
import math
import os
import random
import tarfile
import warnings
from abc import ABCMeta, abstractmethod
from collections.abc import Iterable
from datetime import datetime, timedelta
from json import JSONDecodeError
from pathlib import Path
from time import sleep
from typing import Any, Dict, List, Mapping, Optional, Tuple
import geoip2.database # type: ignore
import pandas as pd
import requests
from geoip2.errors import AddressNotFoundError # type: ignore
from IPython import get_ipython
from IPython.display import HTML, display
from requests.exceptions import HTTPError
from .._version import VERSION
from ..common.exceptions import MsticpyUserConfigError
from ..common.provider_settings import ProviderSettings, get_provider_settings
from ..common.utility import export
from ..datamodel.entities import GeoLocation, IpAddress
__version__ = VERSION
__author__ = "<NAME>"
class GeoIPDatabaseException(Exception):
"""Exception when GeoIP database cannot be found."""
class GeoIpLookup(metaclass=ABCMeta):
"""
Abstract base class for GeoIP Lookup classes.
See Also
--------
IPStackLookup : IPStack GeoIP Implementation
GeoLiteLookup : MaxMind GeoIP Implementation
"""
_LICENSE_TXT: Optional[str] = None
_LICENSE_HTML: Optional[str] = None
_license_shown: bool = False
def __init__(self):
"""Initialize instance of GeoIpLookup class."""
self._print_license()
@abstractmethod
def lookup_ip(
self,
ip_address: str = None,
ip_addr_list: Iterable = None,
ip_entity: IpAddress = None,
) -> Tuple[List[Any], List[IpAddress]]:
"""
Lookup IP location abstract method.
Parameters
----------
ip_address : str, optional
a single address to look up (the default is None)
ip_addr_list : Iterable, optional
a collection of addresses to lookup (the default is None)
ip_entity : IpAddress, optional
an IpAddress entity (the default is None) - any existing
data in the Location property will be overwritten
Returns
-------
Tuple[List[Any], List[IpAddress]]:
raw geolocation results and same results as IpAddress entities with
populated Location property.
"""
def df_lookup_ip(self, data: pd.DataFrame, column: str) -> pd.DataFrame:
"""
Lookup Geolocation data from a pandas Dataframe.
Parameters
----------
data : pd.DataFrame
pandas dataframe containing IpAddress column
column : str
the name of the dataframe column to use as a source
Returns
-------
pd.DataFrame
Copy of original dataframe with IP Location information columns
appended (where a location lookup was successful)
"""
return data.merge(
self.lookup_ips(data, column),
how="left",
left_on=column,
right_on="IpAddress",
)
def lookup_ips(self, data: pd.DataFrame, column: str) -> pd.DataFrame:
"""
Lookup Geolocation data from a pandas Dataframe.
Parameters
----------
data : pd.DataFrame
pandas dataframe containing IpAddress column
column : str
the name of the dataframe column to use as a source
Returns
-------
pd.DataFrame
IpLookup results as DataFrame.
"""
ip_list = list(data[column].values)
_, entities = self.lookup_ip(ip_addr_list=ip_list)
ip_dicts = [
{**ent.Location.properties, "IpAddress": ent.Address}
for ent in entities
if ent.Location is not None
]
return | pd.DataFrame(data=ip_dicts) | pandas.DataFrame |
"""
Cause-effect model training
"""
# Author: <NAME> <<EMAIL>>
#
# License: Apache, Version 2.0
import sys
import numpy as np
from .estimator import CauseEffectSystemCombination
# import features as f
import pandas as pd
# from scipy.optimize import fmin
# import _pickle as pickle
from .util import random_permutation
MODEL = CauseEffectSystemCombination
MODEL_PARAMS = {'weights': [0.383, 0.370, 0.247], 'njobs': 1}
def train(df, tar):
set1 = 'train' if len(sys.argv) < 2 else sys.argv[1]
# set2 = [] if len(sys.argv) < 3 else sys.argv[2:]
train_filter = None
# if len(df) % 2:
# df.drop(df.tail(1).index, inplace=True)
# tar.drop(tar.tail(1).index, inplace=True)
model = MODEL(**MODEL_PARAMS)
print("Reading in training data " + set1)
train = df
print("Extracting features")
train = model.extract(train)
# if save:
# print("Saving train features")
# write_data(set1, train)
# target = data_io.read_target(set1)
# Data selection
train, target = random_permutation(train, tar)
train_filter = None
if train_filter is not None:
train = train[train_filter]
target = target[train_filter]
print("Training model with optimal weights")
# print(train)
# print(tar.values)
X = | pd.concat([train]) | pandas.concat |
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas._libs.tslibs import period as libperiod
import pandas as pd
from pandas import DatetimeIndex, Period, PeriodIndex, Series, notna, period_range
import pandas._testing as tm
class TestGetItem:
def test_ellipsis(self):
# GH#21282
idx = period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
result = idx[...]
assert result.equals(idx)
assert result is not idx
def test_getitem(self):
idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx[0]
assert result == pd.Period("2011-01-01", freq="D")
result = idx[-1]
assert result == pd.Period("2011-01-31", freq="D")
result = idx[0:5]
expected = pd.period_range("2011-01-01", "2011-01-05", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[0:10:2]
expected = pd.PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-07", "2011-01-09"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[-20:-5:3]
expected = pd.PeriodIndex(
["2011-01-12", "2011-01-15", "2011-01-18", "2011-01-21", "2011-01-24"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[4::-1]
expected = PeriodIndex(
["2011-01-05", "2011-01-04", "2011-01-03", "2011-01-02", "2011-01-01"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
def test_getitem_index(self):
idx = period_range("2007-01", periods=10, freq="M", name="x")
result = idx[[1, 3, 5]]
exp = pd.PeriodIndex(["2007-02", "2007-04", "2007-06"], freq="M", name="x")
tm.assert_index_equal(result, exp)
result = idx[[True, True, False, False, False, True, True, False, False, False]]
exp = pd.PeriodIndex(
["2007-01", "2007-02", "2007-06", "2007-07"], freq="M", name="x"
)
tm.assert_index_equal(result, exp)
def test_getitem_partial(self):
rng = period_range("2007-01", periods=50, freq="M")
ts = Series(np.random.randn(len(rng)), rng)
with pytest.raises(KeyError, match=r"^'2006'$"):
ts["2006"]
result = ts["2008"]
assert (result.index.year == 2008).all()
result = ts["2008":"2009"]
assert len(result) == 24
result = ts["2008-1":"2009-12"]
assert len(result) == 24
result = ts["2008Q1":"2009Q4"]
assert len(result) == 24
result = ts[:"2009"]
assert len(result) == 36
result = ts["2009":]
assert len(result) == 50 - 24
exp = result
result = ts[24:]
tm.assert_series_equal(exp, result)
ts = ts[10:].append(ts[10:])
msg = "left slice bound for non-unique label: '2008'"
with pytest.raises(KeyError, match=msg):
ts[slice("2008", "2009")]
def test_getitem_datetime(self):
rng = period_range(start="2012-01-01", periods=10, freq="W-MON")
ts = Series(range(len(rng)), index=rng)
dt1 = datetime(2011, 10, 2)
dt4 = datetime(2012, 4, 20)
rs = ts[dt1:dt4]
tm.assert_series_equal(rs, ts)
def test_getitem_nat(self):
idx = pd.PeriodIndex(["2011-01", "NaT", "2011-02"], freq="M")
assert idx[0] == pd.Period("2011-01", freq="M")
assert idx[1] is pd.NaT
s = pd.Series([0, 1, 2], index=idx)
assert s[pd.NaT] == 1
s = pd.Series(idx, index=idx)
assert s[pd.Period("2011-01", freq="M")] == pd.Period("2011-01", freq="M")
assert s[pd.NaT] is pd.NaT
def test_getitem_list_periods(self):
# GH 7710
rng = period_range(start="2012-01-01", periods=10, freq="D")
ts = Series(range(len(rng)), index=rng)
exp = ts.iloc[[1]]
tm.assert_series_equal(ts[[Period("2012-01-02", freq="D")]], exp)
def test_getitem_seconds(self):
# GH#6716
didx = pd.date_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
pidx = period_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = [
"2014",
"2013/02",
"2013/01/02",
"2013/02/01 9H",
"2013/02/01 09:00",
]
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s["2013/01/01 10:00"], s[3600:3660])
tm.assert_series_equal(s["2013/01/01 9H"], s[:3600])
for d in ["2013/01/01", "2013/01", "2013"]:
tm.assert_series_equal(s[d], s)
def test_getitem_day(self):
# GH#6716
# Confirm DatetimeIndex and PeriodIndex works identically
didx = pd.date_range(start="2013/01/01", freq="D", periods=400)
pidx = period_range(start="2013/01/01", freq="D", periods=400)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = [
"2014",
"2013/02",
"2013/01/02",
"2013/02/01 9H",
"2013/02/01 09:00",
]
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s["2013/01"], s[0:31])
tm.assert_series_equal(s["2013/02"], s[31:59])
tm.assert_series_equal(s["2014"], s[365:])
invalid = ["2013/02/01 9H", "2013/02/01 09:00"]
for v in invalid:
with pytest.raises(KeyError, match=v):
s[v]
class TestWhere:
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where(self, klass):
i = period_range("20130101", periods=5, freq="D")
cond = [True] * len(i)
expected = i
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * (len(i) - 1)
expected = PeriodIndex([pd.NaT] + i[1:].tolist(), freq="D")
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = period_range("20130101", periods=5, freq="D")
for arr in [np.nan, pd.NaT]:
result = i.where(notna(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
result = i.where(notna(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
result = i.where( | notna(i2) | pandas.notna |
import os, sys, re, json, random, copy, argparse, pickle, importlib
import numpy as np
import pandas as pd
from collections import OrderedDict
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import logomaker as lm
from util import *
import warnings
warnings.filterwarnings('ignore')
class Predictor():
def __init__(self, mhc_encode_dict, model_file, model_state_files, encoding_method):
# MHC binding domain encoding
self.mhc_encode_dict = mhc_encode_dict
# device: gpu or cpu
if torch.cuda.is_available():
self.device = torch.device('cuda')
self.batch_size = 4096
else:
self.device = torch.device('cpu')
self.batch_size = 64
# model
if encoding_method == 'onehot':
dim = 21
elif encoding_method == 'blosum':
dim = 24
else:
print("Wrong encoding method")
raise ValueError
model_file = '.'.join(model_file.split('.')[0].split('/'))
module = importlib.import_module(model_file)
self.model = module.CombineModel(module.MHCModel(dim), module.EpitopeModel(dim))
# model states
self.models = OrderedDict()
for i in range(len(model_state_files)):
basename = re.split(r'[\/\.]', model_state_files[i])[-2]
model_state_dict = torch.load(model_state_files[i], map_location=self.device)
self.models[basename] = copy.deepcopy(self.model)
self.models[basename].load_state_dict(model_state_dict['model_state_dict'])
self.models[basename].to(self.device)
def __call__(self, df, dataset, allele=None):
result_df = pd.DataFrame(index=df.index, columns=list(self.models.keys()))
result_df['sequence'] = df['sequence']
# general mode
if allele:
dataloader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size, shuffle=False)
preds = self._predict(allele, dataloader)
result_df.loc[:, list(self.models.keys())] = preds
# specific mode
else:
result_df['mhc'] = df['mhc']
for allele, sub_df in tqdm(df.groupby('mhc'), desc='alleles', leave=False, position=0):
idx = sub_df.index
sub_dataset = torch.utils.data.Subset(dataset, idx)
sub_dataloader = torch.utils.data.DataLoader(sub_dataset, batch_size=self.batch_size, shuffle=False)
preds = self._predict(allele, sub_dataloader)
result_df.loc[idx, list(self.models.keys())] = preds
return result_df
def _predict(self, allele, dataloader):
mhc_encode = self.mhc_encode_dict[allele]
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import time
def patient(rdb):
""" Returns list of patients """
patients = """SELECT "Name" FROM patient ORDER BY index"""
try:
patients = | pd.read_sql(patients, rdb) | pandas.read_sql |
# _*_ coding: utf-8 _*_
"""
Prepare level-3 distribution data.
Author: <NAME>
"""
import os
import numpy as np
import pandas as pd
from typing import Union, List
from sklearn.preprocessing import LabelEncoder
# Own Customized modules
from base.base_data_loader import BaseDataLoader
from util.data_util import transform_channel, remove_whitespace
from util.date_util import get_days_of_month, infer_month
from util.feature_util import (prepare_training_set_for_level3,
prepare_val_set_for_level3,
prepare_testing_set_for_level3,
modify_training_set)
from global_vars import (DIS_DATA_DIR, DIS_DATA_COLUMN_NAMES,
SUPPORTED_CATE_NAMES, CATE_NAME_2_CATE_CODE)
class Level3DisDataLoader(BaseDataLoader):
"""Distribution data loader of Level-3 (per customer per sku)."""
def __init__(self, year, month, categories='all', need_unitize=True):
self._year, self._month = year, month
self._categories = categories
self._all_cate_names = self._get_all_cates(self._categories)
self._all_cate_codes = set([CATE_NAME_2_CATE_CODE[cate] for cate in self._all_cate_names])
self._version_flag = "%d-%02d" % (self._year, self._month)
self._dis = self._get_dis_data(need_unitize)
self._dis_cus_sku_month = self._get_month_dis_per_cus_sku() # 得到每个代理商每个SKU每个月分销
self._index = self._dis_cus_sku_month.index
self._dis_cus_sku_month_pre15 = self._get_pre15_dis_per_cus_sku() # 得到每个代理商每个SKU前15天的分销
self._customer_info, self._customer_info_encoded = self._get_cus_info() # 得到代理商的信息
self._sku_info, self._sku_info_encoded = self._get_sku_info() # 得到SKU的信息
self._dis_sku_month = self._get_month_dis_per_sku() # 得到每个SKU每个月的分销
self._dis_cate1_month = self._get_month_dis_per_cate1() # 得到每个大类每个月的分销
self._dis_cate2_month = self._get_month_dis_per_cate2() # 得到每个小类每个月的分销
self._dis_cus_cate1_month = self._get_month_dis_per_cus_cate1() # 得到每个代理商每个大类的分销
self._dis_cus_cate2_month = self._get_month_dis_per_cus_cate2() # 得到每个代理商每个小类的分销
self._dis_cus_chan_month = self._get_month_dis_per_cus_chan() # 得到每个代理商每个渠道的分销
self._dis_cus_sales_chan_month = self._get_month_dis_per_cus_sales_chan() # 得到每个代理商每个销售渠道的分销
def _get_all_cates(self, categories):
if isinstance(categories, list):
all_cates = set([cate for cate in categories if cate in SUPPORTED_CATE_NAMES])
elif categories == 'all':
all_cates = set(SUPPORTED_CATE_NAMES)
else:
raise Exception("[INFO] The input `categories` is illegal!!!")
return all_cates
def _get_data_path(self):
filename = "m111-dis_%s.txt" % self._version_flag
return os.path.join(DIS_DATA_DIR, self._version_flag, filename)
def _get_dis_data(self, need_unitize=True):
print("[INFO] Start loading distribution data...")
dis_data_path = self._get_data_path()
dis = pd.read_csv(
dis_data_path, sep='\u001e', header=None,
names=DIS_DATA_COLUMN_NAMES, parse_dates=[0],
dtype={5: str, 7: str, 15: str, 22: str, 27: str, 28: str}
)
print("[INFO] Loading finished!")
print("[INFO] Start preprocessing distribution data...")
dis = self._preprocess_dis_data(dis, need_unitize)
print("[INFO] Preprocessing finished!")
return dis
def _preprocess_dis_data(self, dis, need_unitize=True):
dis.drop(
columns=['bu_code', 'bu_name', 'region_code', 'region_name', 'road'],
inplace=True
)
dis = dis.loc[dis.first_cate_code.isin(self._all_cate_codes)]
dis = dis.loc[dis.customer_code.str.startswith('C')]
dis = dis.sort_values(by='order_date').reset_index(drop=True)
str_column_names = ['sales_cen_code', 'customer_code', 'item_code']
remove_whitespace(dis, str_column_names)
dis['district'] = dis.district.str.replace(r'\\N', '未知')
dis['customer_type'] = dis.customer_type.str.replace(r'\\N', '未知')
dis['is_usable'] = dis.is_usable.astype(str)
dis['is_usable'] = dis.is_usable.str.replace(r'\\N', '未知')
dis['channel_name'] = dis.channel_name.apply(lambda x: transform_channel(x))
dis['dis_qty'] = np.round(dis.dis_qty).astype(int)
dis['item_price'] = dis.item_price.astype(str).str.replace(r'\\N', '-1.0').astype(float)
dis['dis_amount'] = dis.dis_qty * dis.item_price
if need_unitize:
dis['dis_qty'] = dis.dis_qty / 10000
dis['dis_amount'] = dis.dis_amount / 10000
return dis
def _get_month_dis_per_cus_sku(self):
"""Get monthly distribution data per customer per sku."""
tmp = self._dis.copy()
tmp['order_month'] = tmp.order_date.astype(str).apply(lambda x: x[:7])
tmp = tmp.groupby(['customer_code', 'item_code', 'order_month'])[['dis_qty']].sum()
tmp['dis_qty'] = tmp.dis_qty.apply(lambda x: x if x > 0 else 0)
dis_cus_sku_month = tmp.unstack(level=-1).fillna(0)
dis_cus_sku_month.columns = pd.date_range(
start='2018-01-30', periods=len(dis_cus_sku_month.columns), freq='M'
)
return dis_cus_sku_month
def _get_pre15_dis_per_cus_sku(self):
"""Get half monthly distribution data per customer per sku."""
tmp = self._dis.copy()
tmp['day'] = tmp.order_date.dt.day
tmp = tmp.loc[tmp.day <= 15]
tmp['order_month'] = tmp.order_date.astype(str).apply(lambda x: x[:7])
dis_cus_sku_month_pre15 = tmp.groupby(['customer_code', 'item_code', 'order_month'])[['dis_qty']].sum()
dis_cus_sku_month_pre15['dis_qty'] = dis_cus_sku_month_pre15.dis_qty.apply(lambda x: 0 if x < 0 else x)
dis_cus_sku_month_pre15 = dis_cus_sku_month_pre15.unstack(level=-1).fillna(0.0)
dis_cus_sku_month_pre15.columns = pd.date_range(
start='2018-01-30', periods=len(dis_cus_sku_month_pre15.columns), freq='M'
)
dis_cus_sku_month_pre15 = dis_cus_sku_month_pre15.reindex(self._index).fillna(0)
return dis_cus_sku_month_pre15
def _get_month_dis_per_sku(self):
"""Get monthly distribution data per sku."""
dis_sku_month = self._dis_cus_sku_month.groupby(['item_code'])[self._dis_cus_sku_month.columns].sum()
dis_sku_month = dis_sku_month.reindex(self._index.get_level_values(1))
return dis_sku_month
def _get_cus_info(self):
"""Get information of all customers."""
label_enc = LabelEncoder()
customer_info = self._dis.drop_duplicates(['customer_code'], keep='last')
customer_info = customer_info[['customer_code', 'customer_name', 'sales_cen_code',
'sales_cen_name', 'sales_region_name', 'province',
'city', 'district', 'customer_type', 'is_usable', 'channel_level']]
customer_info['customer_id'] = label_enc.fit_transform(customer_info['customer_code'])
customer_info['sales_cen_id'] = label_enc.fit_transform(customer_info['sales_cen_code'])
customer_info['sales_region_id'] = label_enc.fit_transform(customer_info['sales_region_name'])
customer_info['province_id'] = label_enc.fit_transform(customer_info['province'])
customer_info['city_id'] = label_enc.fit_transform(customer_info['city'])
customer_info['district_id'] = label_enc.fit_transform(customer_info['district'])
customer_info['customer_type'] = label_enc.fit_transform(customer_info['customer_type'])
customer_info['is_usable'] = label_enc.fit_transform(customer_info['is_usable'])
customer_info['channel_level'] = label_enc.fit_transform(customer_info['channel_level'])
customer_info_encoded = customer_info.drop(
columns=['customer_name', 'sales_cen_code', 'sales_cen_name',
'sales_region_name', 'province', 'city', 'district']
).set_index('customer_code')
customer_info.set_index('customer_code', inplace=True)
customer_info_encoded = customer_info_encoded.reindex(self._index.get_level_values(0))
return customer_info, customer_info_encoded
def _get_sku_info(self):
"""Get information of all SKUs."""
label_enc = LabelEncoder()
sku_info = self._dis.drop_duplicates(['item_code'], keep='last')
sku_info = sku_info[[
'item_code', 'item_name', 'first_cate_code',
'first_cate_name', 'second_cate_code', 'second_cate_name',
'item_price', 'channel_name', 'sales_chan_name'
]]
sku_info['item_id'] = label_enc.fit_transform(sku_info.item_code)
sku_info['first_cate_id'] = label_enc.fit_transform(sku_info.first_cate_code)
sku_info['second_cate_id'] = label_enc.fit_transform(sku_info.second_cate_code)
sku_info['channel_id'] = label_enc.fit_transform(sku_info.channel_name)
sku_info['sales_chan_id'] = label_enc.fit_transform(sku_info.sales_chan_name)
sku_info_encoded = sku_info.drop(
columns=['item_name', 'first_cate_code', 'first_cate_name',
'second_cate_code', 'second_cate_name', 'channel_name', 'sales_chan_name']
).set_index('item_code')
sku_info = sku_info.set_index('item_code')
sku_info_encoded = sku_info_encoded.reindex(self._index.get_level_values(1))
return sku_info, sku_info_encoded
def _get_month_dis_per_cate1(self):
"""Get monthly distribution data per first level category."""
dis_cate1_month = self._dis_cus_sku_month.reset_index()
dis_cate1_month['first_cate_id'] = self._sku_info_encoded.first_cate_id.values
dis_cate1_month_index = dis_cate1_month['first_cate_id']
dis_cate1_month = dis_cate1_month.groupby(['first_cate_id'])[self._dis_cus_sku_month.columns].sum()
dis_cate1_month = dis_cate1_month.reindex(dis_cate1_month_index)
return dis_cate1_month
def _get_month_dis_per_cate2(self):
"""Get monthly distribution data per second level category."""
dis_cate2_month = self._dis_cus_sku_month.reset_index()
dis_cate2_month['second_cate_id'] = self._sku_info_encoded.second_cate_id.values
dis_cate2_month_index = dis_cate2_month['second_cate_id']
dis_cate2_month = dis_cate2_month.groupby(['second_cate_id'])[self._dis_cus_sku_month.columns].sum()
dis_cate2_month = dis_cate2_month.reindex(dis_cate2_month_index)
return dis_cate2_month
def _get_month_dis_per_cus_cate1(self):
"""Get monthly distribution data per customer per first level category."""
dis_cus_cate1_month = self._dis_cus_sku_month.reset_index()
dis_cus_cate1_month['first_cate_id'] = self._sku_info_encoded.first_cate_id.values
dis_cus_cate1_month_index = dis_cus_cate1_month[['customer_code', 'first_cate_id']]
dis_cus_cate1_month = dis_cus_cate1_month.groupby(
['customer_code', 'first_cate_id']
)[self._dis_cus_sku_month.columns].sum()
dis_cus_cate1_month = dis_cus_cate1_month.reindex(dis_cus_cate1_month_index)
return dis_cus_cate1_month
def _get_month_dis_per_cus_cate2(self):
"""Get monthly distribution data per customer per second level category."""
dis_cus_cate2_month = self._dis_cus_sku_month.reset_index()
dis_cus_cate2_month['second_cate_id'] = self._sku_info_encoded.second_cate_id.values
dis_cus_cate2_month_index = dis_cus_cate2_month[['customer_code', 'second_cate_id']]
dis_cus_cate2_month = dis_cus_cate2_month.groupby(
['customer_code', 'second_cate_id']
)[self._dis_cus_sku_month.columns].sum()
dis_cus_cate2_month = dis_cus_cate2_month.reindex(dis_cus_cate2_month_index)
return dis_cus_cate2_month
def _get_month_dis_per_cus_chan(self):
"""Get monthly distribution data per customer per channel."""
dis_cus_chan_month = self._dis_cus_sku_month.reset_index()
dis_cus_chan_month['channel_id'] = self._sku_info_encoded.channel_id.values
dis_cus_chan_month_index = dis_cus_chan_month[['customer_code', 'channel_id']]
dis_cus_chan_month = dis_cus_chan_month.groupby(
['customer_code', 'channel_id']
)[self._dis_cus_sku_month.columns].sum()
dis_cus_chan_month = dis_cus_chan_month.reindex(dis_cus_chan_month_index)
return dis_cus_chan_month
def _get_month_dis_per_cus_sales_chan(self):
"""Get monthly distribution data per customer per sales channel."""
dis_cus_sales_chan_month = self._dis_cus_sku_month.reset_index()
dis_cus_sales_chan_month['sales_chan_id'] = self._sku_info_encoded.sales_chan_id.values
dis_cus_sales_chan_month_index = dis_cus_sales_chan_month[['customer_code', 'sales_chan_id']]
dis_cus_sales_chan_month = dis_cus_sales_chan_month.groupby(
['customer_code', 'sales_chan_id'])[self._dis_cus_sku_month.columns].sum()
dis_cus_sales_chan_month = dis_cus_sales_chan_month.reindex(dis_cus_sales_chan_month_index)
return dis_cus_sales_chan_month
def prepare_training_set(self, months, gap=0):
X_train, y_train = prepare_training_set_for_level3(None, self._dis_cus_sku_month, None,
None, self._dis_cus_sku_month_pre15, None,
None, self._dis_cus_cate1_month, None,
None, self._dis_cus_cate2_month, None,
None, self._dis_cus_chan_month, None,
None, self._dis_cus_sales_chan_month, None,
None, self._dis_sku_month, None,
None, self._dis_cate1_month, None,
None, self._dis_cate2_month, None,
self._customer_info_encoded, self._sku_info_encoded,
months, gap, label_data='dis')
return modify_training_set(X_train, y_train)
def prepare_val_set(self, pred_year, pred_month, gap=0):
return prepare_val_set_for_level3(None, self._dis_cus_sku_month, None,
None, self._dis_cus_sku_month_pre15, None,
None, self._dis_cus_cate1_month, None,
None, self._dis_cus_cate2_month, None,
None, self._dis_cus_chan_month, None,
None, self._dis_cus_sales_chan_month, None,
None, self._dis_sku_month, None,
None, self._dis_cate1_month, None,
None, self._dis_cate2_month, None,
self._customer_info_encoded, self._sku_info_encoded,
pred_year, pred_month, gap, label_data='dis')
def prepare_testing_set(self, pred_year, pred_month, gap=0):
return prepare_testing_set_for_level3(None, self._dis_cus_sku_month, None,
None, self._dis_cus_sku_month_pre15, None,
None, self._dis_cus_cate1_month, None,
None, self._dis_cus_cate2_month, None,
None, self._dis_cus_chan_month, None,
None, self._dis_cus_sales_chan_month, None,
None, self._dis_sku_month, None,
None, self._dis_cate1_month, None,
None, self._dis_cate2_month, None,
self._customer_info_encoded, self._sku_info_encoded,
pred_year, pred_month, gap)
def get_true_data(self, true_pred_year: int, true_pred_month: int, reset_index: bool = False) -> pd.DataFrame:
start_dt_str = "%d-%02d-01" % (true_pred_year, true_pred_month)
end_dt_str = "%d-%02d-%02d" % (true_pred_year,
true_pred_month,
get_days_of_month(true_pred_year, true_pred_month))
df = self._dis.loc[(self._dis.order_date >= start_dt_str) & (self._dis.order_date <= end_dt_str)]
df['order_date'] = df.order_date.astype(str).apply(lambda x: x[:4] + x[5:7])
df = df.groupby(['customer_code', 'item_code', 'order_date'])[['dis_qty']].sum()
df = df.loc[df.dis_qty > 0]
df.rename(columns={'dis_qty': 'act_dis_qty'}, inplace=True)
return df.reset_index() if reset_index else df
def add_index(self,
preds: Union[np.ndarray, List[np.ndarray]],
start_pred_year: int,
start_pred_month: int) -> pd.DataFrame:
if isinstance(preds, np.ndarray):
preds = [preds]
months_pred = ['%d%02d' % infer_month(start_pred_year, start_pred_month, i) for i in range(len(preds))]
return pd.DataFrame(np.array(preds).transpose(), index=self._index, columns=months_pred)
def decorate_pred_result(self,
preds: Union[np.ndarray, List[np.ndarray]],
start_pred_year: int,
start_pred_month: int,
use_unitize: bool = True) -> pd.DataFrame:
df_preds = self.add_index(preds, start_pred_year, start_pred_month).stack().to_frame('pred_dis_qty')
df_preds.index.set_names(['customer_code', 'item_code', 'order_date'], inplace=True)
df_preds['pred_dis_qty'] = df_preds.pred_dis_qty.apply(lambda x: x if x > 0 else 0)
df_preds['pred_dis_qty'] = np.round(df_preds.pred_dis_qty, decimals=4 if use_unitize else 0)
return df_preds
def predict_by_history(self, start_pred_year, start_pred_month, gap=4, left_bound_dt='2018-01'):
left_bound_year, left_bound_month = map(int, left_bound_dt.split('-'))
start_aver_year, start_aver_month = infer_month(start_pred_year, start_pred_month, gap)
pred_len = 12 - gap
history = []
for i in range(1, 4):
if (start_aver_year - i > left_bound_year) or \
(start_aver_year - i == left_bound_year and start_aver_month >= left_bound_month):
start_dt = "%d-%02d-%d" % (start_aver_year - i, start_aver_month, 1)
tmp = self._dis_cus_sku_month[ | pd.date_range(start_dt, periods=pred_len, freq='M') | pandas.date_range |
# importing the dependencies
from string import ascii_uppercase
import pandas as pd
from openpyxl import load_workbook
from openpyxl.styles import PatternFill, Alignment, Border, Side
import numpy as np
#Taking data from GUI
Initiative = "GENESIS"
OutMonth = "July"
# Extracting data from the input calender which is in the day wise format
InputDataframe = pd.read_excel(r"C:\Users\vv972\OneDrive\Documents\MATLAB\Excel case study\Excel_Automation_Test\Automation_Sample Calender_v0.6.xlsx", sheet_name='Sample_GENESIS')
InputDataframe.columns = ['Month', 'Date', 'Day', 'Course Code', 'Module','Lead1', 'Lead2', 'Lead3', 'Session Slot', 'Session Time','Comments']
InputDataframe = InputDataframe.drop([0, 1])
Date = InputDataframe['Date']
Date = Date.dropna()
Date.index = Date.index - 1
Month = InputDataframe['Month']
Month = set(Month.dropna())
Day = InputDataframe['Day']
Day = Day.dropna()
Day.index = Day.index - 1
CourseCode = InputDataframe['Course Code']
CourseCode.index = CourseCode.index - 1
Module = InputDataframe['Module']
Module.index = Module.index - 1
Lead1 = InputDataframe['Lead1']
Lead1.index = Lead1.index - 1
Lead2 = InputDataframe['Lead2']
Lead2.index = Lead2.index - 1
Lead3 = InputDataframe['Lead3']
Lead3.index = Lead3.index - 1
SessionSlot = InputDataframe['Session Slot']
SessionSlot.index = SessionSlot.index - 1
Comments = InputDataframe['Comments']
Comments.index = Comments.index - 1
# Extracting data from the Keys sheet of the Master calendar
KeysDataframe = pd.read_excel(r"C:\Users\vv972\OneDrive\Documents\MATLAB\Calender auomation product\Product_Calender_Automation\V1\Implementation\MasterCalendar/Master.xlsx", sheet_name='Key')
KeysDataframe.columns = ["FixedInitiativeTitles", "FixedInitiativeCodes", "FixedInitiativeColourCodes", "VarName4", "VarName5", "VarName6", "FixedCourseCodes", "FixedCourseTitles"]
KeysDataframe = KeysDataframe.drop(["VarName4", "VarName5", "VarName6"], axis=1)
FixedInitiativeTitles = KeysDataframe['FixedInitiativeTitles']
FixedInitiativeTitles = FixedInitiativeTitles.dropna()
FixedInitiativeTitles.index = FixedInitiativeTitles.index + 1
FixedInitiativeCodes = KeysDataframe['FixedInitiativeCodes']
FixedInitiativeCodes = FixedInitiativeCodes.dropna()
FixedInitiativeCodes.index = FixedInitiativeCodes.index + 1
FixedInitiativeColourCodes = KeysDataframe['FixedInitiativeColourCodes'] # reads empty data
FixedCourseCodes = KeysDataframe['FixedCourseCodes']
FixedCourseCodes = FixedCourseCodes.dropna()
FixedCourseCodes.index = FixedCourseCodes.index + 1
FixedCourseTitles = KeysDataframe['FixedCourseTitles']
FixedCourseTitles = FixedCourseTitles.dropna()
FixedCourseTitles.index = FixedCourseTitles.index + 1
#FixedCourses = [FixedCourseCodes,FixedCourseTitles]
#FixedCourses = pd.concat([FixedCourseCodes, FixedCourseTitles], axis = 1) # Fixed Courses dataframe
#print(FixedCourses)
#print(CourseCode)
# print(Date, Day, CourseCode, Module, SessionSlot, Lead1, Lead2, Lead3)
# print(FixedInitiativeTitles, FixedInitiativeCodes, FixedCourseCodes, FixedCourseTitles)
# TO DO fix errors
# ExistingDataframe = pd.read_excel('/Users/achu/Downloads/Calendar/Master.xlsx', sheet_name=OutMonth)
# ExistingDataframe = ExistingDataframe.drop([0,1])
# ExistingDataframe.index = ExistingDataframe.index -1
# UniqueCourseCode = ExistingDataframe.iloc[:,0]
# RespectiveCourseTitleOutMonth = ExistingDataframe.iloc[:,1]
# RespectiveFacultyOutMonth = ExistingDataframe.iloc[:2:6]
# TimeTableOutMonth = ExistingDataframe.iloc[:,7:68]
# print(UniqueCourseCode, RespectiveCourseTitleOutMonth, RespectiveFacultyOutMonth ,TimeTableOutMonth)
#print(CourseCode)
#print(CourseCode[1])
#print(FixedCourseCodes)
#print(FixedCourses.iloc[1,0])
"""Error correction :
if course code incorrect , course title correct = corrects course code
if course code correct , course title incorrect = corrects course title
if course code incorrect , course title also incorrect = replaces the course code with ""
"""
"""Fixing the error course codes"""
for i in range(1, len(CourseCode)+1):
TempFlag=0
for j in range(1, len(FixedCourseCodes)+1):
if (CourseCode[i] == FixedCourseCodes[j]):
TempFlag = 1
if TempFlag == 0:
TempFlagError = 1
for k in range(1, len(FixedCourseTitles)+1):
if (Module[i] == FixedCourseTitles[k]):
CourseCode[i] = FixedCourseCodes[k]
TempFlagError = 0
if TempFlagError == 1:
CourseCode[i] = ""
"Fixing the error course titles"
for i in range(1, len(Module)+1):
TempFlag=0
for j in range(1, len(FixedCourseTitles)+1):
if (Module[i] == FixedCourseTitles[j]):
TempFlag = 1
if TempFlag == 0:
TempFlagError = 1
for k in range(1, len(FixedCourseCodes)+1):
if (CourseCode[i] == FixedCourseCodes[k]):
Module[i] = FixedCourseTitles[k]
TempFlagError = 0
if TempFlagError == 1:
CourseCode[i] = ""
"""Selecting the particular initaitive code"""
InitiativeCode = 11
for i in range(1, len(FixedInitiativeTitles) + 1):
if Initiative == FixedInitiativeTitles[i]:
InitiativeCode = FixedInitiativeCodes[i]
"""UniqueCourseCode containing unique data for CourseCode"""
UniqueCourseCode = []
for i in range(1, len(CourseCode)+1):
if CourseCode[i] != '' and CourseCode[i] not in UniqueCourseCode:
UniqueCourseCode.append(CourseCode[i])
UniqueCourseCode= | pd.Series(UniqueCourseCode) | pandas.Series |
"""
Script to make a plot of the feature variances across the bags.
Use the normalized bags as input, and then show which features have low
variances, and are therefore not useful for the model.
"""
import sys
import os
import numpy as np
import pickle as pkl
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.feature_selection import VarianceThreshold
import matplotlib
matplotlib.use('Agg')
outDir = sys.argv[1]
#1. First gather the normalized bags
svTypes = ['DEL', 'DUP', 'INV', 'ITX']
#input the normalized bags
with open(outDir + '/linkedSVGenePairs/normalizedBags.pkl', 'rb') as handle:
bagDict = pkl.load(handle)
#get the information for the bag labels
degPairs = np.loadtxt(outDir + '/tadDisruptionsZScores/zScores.txt', dtype='object') #labels
mutDir = outDir + '/patientGeneMutationPairs/'
cnvPatientsAmp = np.load(mutDir + 'cnvPatientsAmp.npy', allow_pickle=True, encoding='latin1').item()
svPatientsDup = np.load(mutDir + 'svPatientsDup.npy', allow_pickle=True, encoding='latin1').item()
svGenePairs = np.loadtxt(outDir + '/linkedSVGenePairs/nonCoding_geneSVPairs.txt_', dtype='object')
splitSVGenePairs = []
for pair in svGenePairs:
splitPair = pair[0].split('_')
splitSVGenePairs.append(splitPair[0] + '_' + splitPair[7] + '_' + splitPair[12])
#2. Divide into positive and negative instances
for svType in svTypes:
bagLabels = []
positiveBagPairNames = []
negativeBagPairNames = []
positiveInstanceLabels = []
positiveBags = []
negativeBags = []
#for each SV-gene pair, get the instances
for pair in bagDict:
#check if the SV type matches our selection
splitPair = pair.split("_")
shortPair = splitPair[7] + '_' + splitPair[0]
if svType != '' and svType != 'ALL':
if splitPair[12] != svType:
continue
#get the label of the bag by checking if it exists in degPairs, some pairs do not have a z-score because the gene is excluded due to mutations.
if shortPair in degPairs[:,0]:
#get the z-score of the pair.
degPairInfo = degPairs[degPairs[:,0] == shortPair][0]
#if the z-score matches this criterion, the SV-gene pair is positive
if float(degPairInfo[5]) > 1.5 or float(degPairInfo[5]) < -1.5:
#go through the instances of this SV-gene pair, and include only those that have gains and losses, and more than 1 instance. This should in principle not happen, but good to keep a check.
instances = []
for instance in bagDict[pair]:
if instance[0] == 0 and instance[1] == 0:
continue
instances.append(instance)
if len(instances) < 1:
continue
###Here do an extra check:
#we only look at TADs with SVs across the boundary when computing z-scores, so those z-scores are in the set.
#BUT some of these genes are not actually affected by the SV, since this doesn't lead to
#regulatory elements gained/lost. SO, we need to remove those here to get the actual pairs.
#This only goes wrong for duplications, because we also keep CNV amps that could be the same event,
#but then the duplication does not lead to gains/losses, while the CNV amp does because it is slightly
#longer. So if there is evidence of a cnv AMP, but no non-coding duplication linked, we can remove
#this as a positive pair.
if splitPair[7] not in cnvPatientsAmp:
positiveBagPairNames.append(pair)
positiveBags.append(instances)
else:
dupMatch = splitPair[0] + '_' + splitPair[7] + '_DUP'
if splitPair[0] in cnvPatientsAmp[splitPair[7]] and dupMatch not in splitSVGenePairs:
negativeBags.append(instances)
negativeBagPairNames.append(pair)
else:
positiveBagPairNames.append(pair)
positiveBags.append(instances)
else: #if the z-score is anything else, this bag will be labeled negative.
#get the right number of features per instance
instances = []
for instance in bagDict[pair]:
if instance[0] == 0 and instance[1] == 0:
continue
instances.append(instance)
if len(instances) < 1:
continue
negativeBags.append(instances)
negativeBagPairNames.append(pair)
positiveBags = np.array(positiveBags)
negativeBags = np.array(negativeBags)
#add the number of instances per bag as feature to the instances, which
#is not part of the normalized bags.
#normalize by the number of bags equal to how other features are normalized.
for bag in positiveBags:
instCount = len(bag)
for instance in bag:
instance.append(instCount / positiveBags.shape[0])
for bag in negativeBags:
instCount = len(bag)
for instance in bag:
instance.append(instCount / negativeBags.shape[0])
#remove instances with no variance
posInstances = np.vstack(positiveBags)
negInstances = np.vstack(negativeBags)
allInstances = np.concatenate((posInstances, negInstances))
#3. Calculate variances
t = 0
vt = VarianceThreshold(threshold=t)
vt.fit(allInstances)
#normalize variances for visualization purposes
normalizedVariances = np.log(vt.variances_+0.0000001)
#Then make a plot of the variances for each SV type model.
plotData = []
for ind in range(0, len(vt.variances_)):
plotData.append([ind, normalizedVariances[ind]])
data = | pd.DataFrame(plotData) | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import unittest
import warnings
import pandas as pd
import numpy as np
from qiime2 import Artifact
from qiime2.metadata import (Metadata, CategoricalMetadataColumn,
NumericMetadataColumn)
from qiime2.core.testing.util import get_dummy_plugin, ReallyEqualMixin
class TestInvalidMetadataConstruction(unittest.TestCase):
def test_non_dataframe(self):
with self.assertRaisesRegex(
TypeError, 'Metadata constructor.*DataFrame.*not.*Series'):
Metadata(pd.Series([1, 2, 3], name='col',
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_no_ids(self):
with self.assertRaisesRegex(ValueError, 'Metadata.*at least one ID'):
Metadata(pd.DataFrame({}, index=pd.Index([], name='id')))
with self.assertRaisesRegex(ValueError, 'Metadata.*at least one ID'):
Metadata(pd.DataFrame({'column': []},
index=pd.Index([], name='id')))
def test_invalid_id_header(self):
# default index name
with self.assertRaisesRegex(ValueError, r'Index\.name.*None'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', 'b', 'c'])))
with self.assertRaisesRegex(ValueError, r'Index\.name.*my-id-header'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'b', 'c'], name='my-id-header')))
def test_non_str_id(self):
with self.assertRaisesRegex(
TypeError, 'non-string metadata ID.*type.*float.*nan'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', np.nan, 'c'], name='id')))
def test_non_str_column_name(self):
with self.assertRaisesRegex(
TypeError, 'non-string metadata column name.*type.*'
'float.*nan'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
np.nan: [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_empty_id(self):
with self.assertRaisesRegex(
ValueError, 'empty metadata ID.*at least one character'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', '', 'c'], name='id')))
def test_empty_column_name(self):
with self.assertRaisesRegex(
ValueError, 'empty metadata column name.*'
'at least one character'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
'': [4, 5, 6]}, index=pd.Index(['a', 'b', 'c'], name='id')))
def test_pound_sign_id(self):
with self.assertRaisesRegex(
ValueError, "metadata ID.*begins with a pound sign.*'#b'"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', '#b', 'c'], name='id')))
def test_id_conflicts_with_id_header(self):
with self.assertRaisesRegex(
ValueError, "metadata ID 'sample-id'.*conflicts.*reserved.*"
"ID header"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'sample-id', 'c'], name='id')))
def test_column_name_conflicts_with_id_header(self):
with self.assertRaisesRegex(
ValueError, "metadata column name 'featureid'.*conflicts.*"
"reserved.*ID header"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
'featureid': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_duplicate_ids(self):
with self.assertRaisesRegex(ValueError, "Metadata IDs.*unique.*'a'"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'b', 'a'], name='id')))
def test_duplicate_column_names(self):
data = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
with self.assertRaisesRegex(ValueError,
"Metadata column names.*unique.*'col1'"):
Metadata(pd.DataFrame(data, columns=['col1', 'col2', 'col1'],
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_unsupported_column_dtype(self):
with self.assertRaisesRegex(
TypeError, "Metadata column 'col2'.*unsupported.*dtype.*bool"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': [True, False, True]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_categorical_column_unsupported_type(self):
with self.assertRaisesRegex(
TypeError, "CategoricalMetadataColumn.*strings or missing "
r"values.*42\.5.*float.*'col2'"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', 'bar', 42.5]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_categorical_column_empty_str(self):
with self.assertRaisesRegex(
ValueError, "CategoricalMetadataColumn.*empty strings.*"
"column 'col2'"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', '', 'bar']},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_numeric_column_infinity(self):
with self.assertRaisesRegex(
ValueError, "NumericMetadataColumn.*positive or negative "
"infinity.*column 'col2'"):
Metadata(pd.DataFrame(
{'col1': ['foo', 'bar', 'baz'],
'col2': [42, float('+inf'), 4.3]},
index=pd.Index(['a', 'b', 'c'], name='id')))
class TestMetadataConstructionAndProperties(unittest.TestCase):
def assertEqualColumns(self, obs_columns, exp):
obs = [(name, props.type) for name, props in obs_columns.items()]
self.assertEqual(obs, exp)
def test_minimal(self):
md = Metadata(pd.DataFrame({}, index=pd.Index(['a'], name='id')))
self.assertEqual(md.id_count, 1)
self.assertEqual(md.column_count, 0)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('a',))
self.assertEqualColumns(md.columns, [])
def test_single_id(self):
index = pd.Index(['id1'], name='id')
df = pd.DataFrame({'col1': [1.0], 'col2': ['a'], 'col3': ['foo']},
index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 1)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1',))
self.assertEqualColumns(md.columns,
[('col1', 'numeric'), ('col2', 'categorical'),
('col3', 'categorical')])
def test_no_columns(self):
index = pd.Index(['id1', 'id2', 'foo'], name='id')
df = pd.DataFrame({}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 0)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'id2', 'foo'))
self.assertEqualColumns(md.columns, [])
def test_single_column(self):
index = pd.Index(['id1', 'a', 'my-id'], name='id')
df = pd.DataFrame({'column': ['foo', 'bar', 'baz']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 1)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'a', 'my-id'))
self.assertEqualColumns(md.columns, [('column', 'categorical')])
def test_retains_column_order(self):
# Supply DataFrame constructor with explicit column ordering instead of
# a dict.
index = pd.Index(['id1', 'id2', 'id3'], name='id')
columns = ['z', 'a', 'ch']
data = [
[1.0, 'a', 'foo'],
[2.0, 'b', 'bar'],
[3.0, 'c', '42']
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'id2', 'id3'))
self.assertEqualColumns(md.columns,
[('z', 'numeric'), ('a', 'categorical'),
('ch', 'categorical')])
def test_supported_id_headers(self):
case_insensitive = {
'id', 'sampleid', 'sample id', 'sample-id', 'featureid',
'feature id', 'feature-id'
}
exact_match = {
'#SampleID', '#Sample ID', '#OTUID', '#OTU ID', 'sample_name'
}
# Build a set of supported headers, including exact matches and headers
# with different casing.
headers = set()
for header in case_insensitive:
headers.add(header)
headers.add(header.upper())
headers.add(header.title())
for header in exact_match:
headers.add(header)
count = 0
for header in headers:
index = pd.Index(['id1', 'id2'], name=header)
df = pd.DataFrame({'column': ['foo', 'bar']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_header, header)
count += 1
# Since this test case is a little complicated, make sure that the
# expected number of comparisons are happening.
self.assertEqual(count, 26)
def test_recommended_ids(self):
index = pd.Index(['c6ca034a-223f-40b4-a0e0-45942912a5ea', 'My.ID'],
name='id')
df = pd.DataFrame({'col1': ['foo', 'bar']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 2)
self.assertEqual(md.column_count, 1)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids,
('c6ca034a-223f-40b4-a0e0-45942912a5ea', 'My.ID'))
self.assertEqualColumns(md.columns, [('col1', 'categorical')])
def test_non_standard_characters(self):
index = pd.Index(['©id##1', '((id))2', "'id_3<>'", '"id#4"',
'i d\r\t\n5'], name='id')
columns = ['↩c@l1™', 'col(#2)', "#col'3", '"<col_4>"', 'col\t \r\n5']
data = [
['ƒoo', '(foo)', '#f o #o', 'fo\ro', np.nan],
["''2''", 'b#r', 'ba\nr', np.nan, np.nan],
['b"ar', 'c\td', '4\r\n2', np.nan, np.nan],
['b__a_z', '<42>', '>42', np.nan, np.nan],
['baz', np.nan, '42']
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
self.assertEqual(md.id_count, 5)
self.assertEqual(md.column_count, 5)
self.assertEqual(md.id_header, 'id')
self.assertEqual(
md.ids, ('©id##1', '((id))2', "'id_3<>'", '"id#4"', 'i d\r\t\n5'))
self.assertEqualColumns(md.columns, [('↩c@l1™', 'categorical'),
('col(#2)', 'categorical'),
("#col'3", 'categorical'),
('"<col_4>"', 'categorical'),
('col\t \r\n5', 'numeric')])
def test_missing_data(self):
index = pd.Index(['None', 'nan', 'NA', 'foo'], name='id')
df = pd.DataFrame(collections.OrderedDict([
('col1', [1.0, np.nan, np.nan, np.nan]),
('NA', [np.nan, np.nan, np.nan, np.nan]),
('col3', ['null', 'N/A', np.nan, 'NA']),
('col4', np.array([np.nan, np.nan, np.nan, np.nan],
dtype=object))]),
index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 4)
self.assertEqual(md.column_count, 4)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('None', 'nan', 'NA', 'foo'))
self.assertEqualColumns(md.columns, [('col1', 'numeric'),
('NA', 'numeric'),
('col3', 'categorical'),
('col4', 'categorical')])
def test_does_not_cast_ids_or_column_names(self):
index = pd.Index(['0.000001', '0.004000', '0.000000'], dtype=object,
name='id')
columns = ['42.0', '1000', '-4.2']
data = [
[2.0, 'b', 2.5],
[1.0, 'b', 4.2],
[3.0, 'c', -9.999]
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('0.000001', '0.004000', '0.000000'))
self.assertEqualColumns(md.columns, [('42.0', 'numeric'),
('1000', 'categorical'),
('-4.2', 'numeric')])
def test_mixed_column_types(self):
md = Metadata(
pd.DataFrame({'col0': [1.0, 2.0, 3.0],
'col1': ['a', 'b', 'c'],
'col2': ['foo', 'bar', '42'],
'col3': ['1.0', '2.5', '-4.002'],
'col4': [1, 2, 3],
'col5': [1, 2, 3.5],
'col6': [1e-4, -0.0002, np.nan],
'col7': ['cat', np.nan, 'dog'],
'col8': ['a', 'a', 'a'],
'col9': [0, 0, 0]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 10)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'id2', 'id3'))
self.assertEqualColumns(md.columns, [('col0', 'numeric'),
('col1', 'categorical'),
('col2', 'categorical'),
('col3', 'categorical'),
('col4', 'numeric'),
('col5', 'numeric'),
('col6', 'numeric'),
('col7', 'categorical'),
('col8', 'categorical'),
('col9', 'numeric')])
def test_case_insensitive_duplicate_ids(self):
index = pd.Index(['a', 'b', 'A'], name='id')
df = pd.DataFrame({'column': ['1', '2', '3']}, index=index)
metadata = Metadata(df)
self.assertEqual(metadata.ids, ('a', 'b', 'A'))
def test_case_insensitive_duplicate_column_names(self):
index = pd.Index(['a', 'b', 'c'], name='id')
df = pd.DataFrame({'column': ['1', '2', '3'],
'Column': ['4', '5', '6']}, index=index)
metadata = Metadata(df)
self.assertEqual(set(metadata.columns), {'column', 'Column'})
def test_categorical_column_leading_trailing_whitespace_value(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', ' bar ', 'baz']},
index=pd.Index(['a', 'b', 'c'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', 'bar', 'baz']},
index=pd.Index(['a', 'b', 'c'], name='id')))
self.assertEqual(md1, md2)
def test_leading_trailing_whitespace_id(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': [4, 5, 6]},
index=pd.Index(['a', ' b ', 'c'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
self.assertEqual(md1, md2)
def test_leading_trailing_whitespace_column_name(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], ' col2 ': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
self.assertEqual(md1, md2)
class TestSourceArtifacts(unittest.TestCase):
def setUp(self):
self.md = Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', 'b', 'c'], name='id')))
def test_no_source_artifacts(self):
self.assertEqual(self.md.artifacts, ())
def test_add_zero_artifacts(self):
self.md._add_artifacts([])
self.assertEqual(self.md.artifacts, ())
def test_add_artifacts(self):
# First two artifacts have the same data but different UUIDs.
artifact1 = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
self.md._add_artifacts([artifact1])
artifact2 = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
artifact3 = Artifact.import_data('IntSequence1', [1, 2, 3, 4])
self.md._add_artifacts([artifact2, artifact3])
self.assertEqual(self.md.artifacts, (artifact1, artifact2, artifact3))
def test_add_non_artifact(self):
artifact = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
with self.assertRaisesRegex(TypeError, "Artifact object.*42"):
self.md._add_artifacts([artifact, 42])
# Test that the object hasn't been mutated.
self.assertEqual(self.md.artifacts, ())
def test_add_duplicate_artifact(self):
artifact1 = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
artifact2 = Artifact.import_data('IntSequence1', [1, 2, 3, 4])
self.md._add_artifacts([artifact1, artifact2])
with self.assertRaisesRegex(
ValueError, "Duplicate source artifacts.*artifact: Mapping"):
self.md._add_artifacts([artifact1])
# Test that the object hasn't been mutated.
self.assertEqual(self.md.artifacts, (artifact1, artifact2))
class TestRepr(unittest.TestCase):
def test_singular(self):
md = Metadata(pd.DataFrame({'col1': [42]},
index=pd.Index(['a'], name='id')))
obs = repr(md)
self.assertIn('Metadata', obs)
self.assertIn('1 ID x 1 column', obs)
self.assertIn("col1: ColumnProperties(type='numeric')", obs)
def test_plural(self):
md = Metadata(pd.DataFrame({'col1': [42, 42], 'col2': ['foo', 'bar']},
index=pd.Index(['a', 'b'], name='id')))
obs = repr(md)
self.assertIn('Metadata', obs)
self.assertIn('2 IDs x 2 columns', obs)
self.assertIn("col1: ColumnProperties(type='numeric')", obs)
self.assertIn("col2: ColumnProperties(type='categorical')", obs)
def test_column_name_padding(self):
data = [[0, 42, 'foo']]
index = pd.Index(['my-id'], name='id')
columns = ['col1', 'longer-column-name', 'c']
md = Metadata(pd.DataFrame(data, index=index, columns=columns))
obs = repr(md)
self.assertIn('Metadata', obs)
self.assertIn('1 ID x 3 columns', obs)
self.assertIn(
"col1: ColumnProperties(type='numeric')", obs)
self.assertIn(
"longer-column-name: ColumnProperties(type='numeric')", obs)
self.assertIn(
"c: ColumnProperties(type='categorical')", obs)
class TestEqualityOperators(unittest.TestCase, ReallyEqualMixin):
def setUp(self):
get_dummy_plugin()
def test_type_mismatch(self):
md = Metadata(
pd.DataFrame({'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
mdc = md.get_column('col1')
self.assertIsInstance(md, Metadata)
self.assertIsInstance(mdc, NumericMetadataColumn)
self.assertReallyNotEqual(md, mdc)
def test_id_header_mismatch(self):
data = {'col1': ['foo', 'bar'], 'col2': [42, 43]}
md1 = Metadata(pd.DataFrame(
data, index=pd.Index(['id1', 'id2'], name='id')))
md2 = Metadata(pd.DataFrame(
data, index=pd.Index(['id1', 'id2'], name='ID')))
self.assertReallyNotEqual(md1, md2)
def test_source_mismatch(self):
# Metadata created from an artifact vs not shouldn't compare equal,
# even if the data is the same.
artifact = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
md_from_artifact = artifact.view(Metadata)
md_no_artifact = Metadata(md_from_artifact.to_dataframe())
pd.testing.assert_frame_equal(md_from_artifact.to_dataframe(),
md_no_artifact.to_dataframe())
self.assertReallyNotEqual(md_from_artifact, md_no_artifact)
def test_artifact_mismatch(self):
# Metadata created from different artifacts shouldn't compare equal,
# even if the data is the same.
artifact1 = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
artifact2 = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
md1 = artifact1.view(Metadata)
md2 = artifact2.view(Metadata)
pd.testing.assert_frame_equal(md1.to_dataframe(), md2.to_dataframe())
self.assertReallyNotEqual(md1, md2)
def test_id_mismatch(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['1'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_column_name_mismatch(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'c': '2'},
index=pd.Index(['0'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_column_type_mismatch(self):
md1 = Metadata(pd.DataFrame({'col1': ['42', '43']},
index=pd.Index(['id1', 'id2'], name='id')))
md2 = Metadata(pd.DataFrame({'col1': [42, 43]},
index=pd.Index(['id1', 'id2'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_column_order_mismatch(self):
index = pd.Index(['id1', 'id2'], name='id')
md1 = Metadata(pd.DataFrame([[42, 'foo'], [43, 'bar']], index=index,
columns=['z', 'a']))
md2 = Metadata(pd.DataFrame([['foo', 42], ['bar', 43]], index=index,
columns=['a', 'z']))
self.assertReallyNotEqual(md1, md2)
def test_data_mismatch(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '3'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['0'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_equality_without_artifact(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '3'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'b': '3'},
index=pd.Index(['0'], name='id')))
self.assertReallyEqual(md1, md2)
def test_equality_with_artifact(self):
artifact = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
md1 = artifact.view(Metadata)
md2 = artifact.view(Metadata)
self.assertReallyEqual(md1, md2)
def test_equality_with_missing_data(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, np.nan, 4.2],
'col2': [np.nan, 'foo', np.nan]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, np.nan, 4.2],
'col2': [np.nan, 'foo', np.nan]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertReallyEqual(md1, md2)
class TestToDataframe(unittest.TestCase):
def test_minimal(self):
df = pd.DataFrame({}, index=pd.Index(['id1'], name='id'))
md = Metadata(df)
obs = md.to_dataframe()
pd.testing.assert_frame_equal(obs, df)
def test_id_header_preserved(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='#SampleID'))
md = Metadata(df)
obs = md.to_dataframe()
pd.testing.assert_frame_equal(obs, df)
self.assertEqual(obs.index.name, '#SampleID')
def test_dataframe_copy(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='id'))
md = Metadata(df)
obs = md.to_dataframe()
pd.testing.assert_frame_equal(obs, df)
self.assertIsNot(obs, df)
def test_retains_column_order(self):
index = pd.Index(['id1', 'id2'], name='id')
columns = ['z', 'a', 'ch']
data = [
[1.0, 'a', 'foo'],
[2.0, 'b', 'bar']
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
obs = md.to_dataframe()
pd.testing.assert_frame_equal(obs, df)
self.assertEqual(obs.columns.tolist(), ['z', 'a', 'ch'])
def test_missing_data(self):
# Different missing data representations should be normalized to np.nan
index = pd.Index(['None', 'nan', 'NA', 'id1'], name='id')
df = pd.DataFrame(collections.OrderedDict([
('col1', [42.5, np.nan, float('nan'), 3]),
('NA', [np.nan, 'foo', float('nan'), None]),
('col3', ['null', 'N/A', np.nan, 'NA']),
('col4', np.array([np.nan, np.nan, np.nan, np.nan],
dtype=object))]),
index=index)
md = Metadata(df)
obs = md.to_dataframe()
exp = pd.DataFrame(collections.OrderedDict([
('col1', [42.5, np.nan, np.nan, 3.0]),
('NA', [np.nan, 'foo', np.nan, np.nan]),
('col3', ['null', 'N/A', np.nan, 'NA']),
('col4', np.array([np.nan, np.nan, np.nan, np.nan],
dtype=object))]),
index=index)
pd.testing.assert_frame_equal(obs, exp)
self.assertEqual(obs.dtypes.to_dict(),
{'col1': np.float64, 'NA': object, 'col3': object,
'col4': object})
self.assertTrue(np.isnan(obs['col1']['NA']))
self.assertTrue(np.isnan(obs['NA']['NA']))
self.assertTrue(np.isnan(obs['NA']['id1']))
def test_dtype_int_normalized_to_dtype_float(self):
index = pd.Index(['id1', 'id2', 'id3'], name='id')
df = pd.DataFrame({'col1': [42, -43, 0],
'col2': [42.0, -43.0, 0.0],
'col3': [42, np.nan, 0]},
index=index)
self.assertEqual(df.dtypes.to_dict(),
{'col1': np.int64, 'col2': np.float64,
'col3': np.float64})
md = Metadata(df)
obs = md.to_dataframe()
exp = pd.DataFrame({'col1': [42.0, -43.0, 0.0],
'col2': [42.0, -43.0, 0.0],
'col3': [42.0, np.nan, 0.0]},
index=index)
pd.testing.assert_frame_equal(obs, exp)
self.assertEqual(obs.dtypes.to_dict(),
{'col1': np.float64, 'col2': np.float64,
'col3': np.float64})
class TestGetColumn(unittest.TestCase):
def setUp(self):
get_dummy_plugin()
def test_column_name_not_found(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='id'))
md = Metadata(df)
with self.assertRaisesRegex(ValueError,
"'col3'.*not a column.*'col1', 'col2'"):
md.get_column('col3')
def test_artifacts_are_propagated(self):
A = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
md = A.view(Metadata)
obs = md.get_column('b')
exp = CategoricalMetadataColumn(
pd.Series(['3'], name='b', index=pd.Index(['0'], name='id')))
exp._add_artifacts([A])
self.assertEqual(obs, exp)
self.assertEqual(obs.artifacts, (A,))
def test_categorical_column(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='id'))
md = Metadata(df)
obs = md.get_column('col2')
exp = CategoricalMetadataColumn(
pd.Series(['foo', 'bar'], name='col2',
index=pd.Index(['id1', 'id2'], name='id')))
self.assertEqual(obs, exp)
def test_numeric_column(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='id'))
md = Metadata(df)
obs = md.get_column('col1')
exp = NumericMetadataColumn(
pd.Series([42, 2.5], name='col1',
index=pd.Index(['id1', 'id2'], name='id')))
self.assertEqual(obs, exp)
def test_id_header_preserved(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['a', 'b'], name='#OTU ID'))
md = Metadata(df)
obs = md.get_column('col1')
exp = NumericMetadataColumn(
pd.Series([42, 2.5], name='col1',
index=pd.Index(['a', 'b'], name='#OTU ID')))
self.assertEqual(obs, exp)
self.assertEqual(obs.id_header, '#OTU ID')
class TestGetIDs(unittest.TestCase):
def test_default(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
actual = metadata.get_ids()
expected = {'S1', 'S2', 'S3'}
self.assertEqual(actual, expected)
def test_incomplete_where(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='sampleid'))
metadata = Metadata(df)
where = "Subject='subject-1' AND SampleType="
with self.assertRaises(ValueError):
metadata.get_ids(where)
where = "Subject="
with self.assertRaises(ValueError):
metadata.get_ids(where)
def test_invalid_where(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='sampleid'))
metadata = Metadata(df)
where = "not-a-column-name='subject-1'"
with self.assertRaises(ValueError):
metadata.get_ids(where)
def test_empty_result(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index= | pd.Index(['S1', 'S2', 'S3'], name='id') | pandas.Index |
"""
Parses each kind of spreadsheet into our data structures.
"""
from pathlib import PurePath
import logging, sys
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
import math
import os
from itertools import accumulate, islice, chain
import pandas as pd
from data_loader import spreadsheets, DATA_DIR
def adjust(acc, val):
val = 0 if math.isnan(val) else val
return acc * (1 + (val/100))
def adjust_pct(acc, val):
val = 0 if math.isnan(val) else val
return acc * (1 + val)
def combine_to_date(year, month):
date = f'{year:04d}-{month:02d}-01'
return pd.to_datetime(date)
def combine_to_date2(year, month):
date = f'{year}-{month}-01'
return | pd.to_datetime(date) | pandas.to_datetime |
import logging
import pandas as pd
from lib.constant import Datasets
from lib.features.dtypes import dtypes_clean, dtypes_featured
# features computing functions
def _compute_acc_severity(acc_severities: pd.Series) -> str:
"""Groupby method.
Return the worst victim state for each accident.
"""
# all_severity in ['safe', inj_light', 'inj_hosp', 'killed']
acc_severities_unique = acc_severities.unique()
if 'killed' in acc_severities_unique:
max_severity = 'killed'
elif 'inj_hosp' in acc_severities_unique:
max_severity = 'inj_hosp'
elif 'inj_light' in acc_severities_unique:
max_severity = 'inj_light'
else:
max_severity = 'safe'
return max_severity
def get_pct_drivers_by_sex(drivers_count: pd.DataFrame) -> pd.DataFrame:
"""Nombre de conducteur dans la population en pourcentage par sex."""
pct_drivers_sex = drivers_count \
.mean()[['prop_drive_male', 'prop_drive_female']] \
.mul(100).round(1)
return pct_drivers_sex
def get_summary_by_sex(drivers: pd.DataFrame, pct_drivers_sex: pd.DataFrame) -> pd.DataFrame:
summary = pd.crosstab(index=drivers['acc_severity'],
columns=drivers['sexe'],
margins=True, normalize=0) \
.mul(100).round(1) \
.append(pd.DataFrame({'female': pct_drivers_sex['prop_drive_female'],
'male': pct_drivers_sex['prop_drive_male']},
index=['Prop. conducteurs'])) \
.rename({'All': 'Tous accidents',
'killed': 'Acc. mortel',
'inj_light': 'Acc. leger',
'inj_hosp': 'Acc.grave'}, axis=0) \
.rename({'female': 'Femmes', 'male': 'Hommes'}, axis=1)
return summary
def get_drivers(acc_severity: pd.DataFrame, users: pd.DataFrame) -> pd.DataFrame:
"""Retourne l'ensemble des conducteurs impliqué dans un accident.
on ajoute à la table des usager la colonne correspondant au type d'accident
dans lequel il est impliqué (= état de la victime la plus grave)
on ne récupère de cette table que les usagers conducteurs
"""
drivers = pd.merge(acc_severity, users.loc[users['catu'] == 'driver', :],
on='Num_Acc', how='left') \
.loc[:, ['Num_Acc', 'acc_severity', 'sexe', 'trajet']]
return drivers
# build new datasets
def build_accidents_dataset(caracs: pd.DataFrame,
locations: pd.DataFrame,
users: pd.DataFrame,
dtypes_base_path: str = '') -> pd.DataFrame:
logging.info('merge caracs with locations (1 accident = 1 carac = 1 location)')
acc_df = pd.merge(caracs, locations, on='Num_Acc', how='inner')
logging.info('add nb victims by severity (4 columns)')
vict_by_severity_cnt = pd.merge(caracs, users, on='Num_Acc', how='inner') \
.groupby(by=['Num_Acc', 'grav']).count().reset_index()[['Num_Acc', 'grav', 'year']] \
.rename({'year': 'victims_nb'}, axis=1) \
.fillna({'victims_nb': 0}) \
.pivot(index='Num_Acc', columns='grav')
vict_by_severity_cnt.columns = vict_by_severity_cnt.columns.get_level_values(1)
vict_by_severity_cnt.rename(columns=str).reset_index()
acc_df = pd.merge(acc_df, vict_by_severity_cnt, on='Num_Acc', how='inner')
logging.info('add total victims number column')
victims_nb_by_acc = pd.merge(caracs, users, on='Num_Acc', how='inner') \
.groupby(by=['Num_Acc']).count().reset_index()[['Num_Acc', 'year']] \
.rename({'year': 'victims_nb'}, axis=1)
acc_df = pd.merge(acc_df, victims_nb_by_acc, on='Num_Acc', how='inner')
logging.info('add acc_severity column')
acc_severity = | pd.merge(acc_df, users, on='Num_Acc', how='inner') | pandas.merge |
"""
Protein sequence alignment creation protocols/workflows.
Authors:
<NAME>
<NAME> - complex protocol, hmm_build_and_search
<NAME> - hmm_build_and_search
"""
from collections import OrderedDict, Iterable
import re
from shutil import copy
import os
import numpy as np
import pandas as pd
from evcouplings.align import tools as at
from evcouplings.align.alignment import (
detect_format, parse_header, read_fasta,
write_fasta, Alignment
)
from evcouplings.couplings.mapping import Segment
from evcouplings.utils.config import (
check_required, InvalidParameterError, MissingParameterError,
read_config_file, write_config_file
)
from evcouplings.utils.system import (
create_prefix_folders, get, valid_file,
verify_resources, ResourceError
)
from evcouplings.align.ena import (
extract_embl_annotation,
extract_cds_ids,
add_full_header
)
def _verify_sequence_id(sequence_id):
"""
Verify if a target sequence identifier is in proper
format for the pipeline to run without errors
(not none, and contains no whitespace)
Parameters
----------
id : str
Target sequence identifier to verify
Raises
------
InvalidParameterError
If sequence identifier is not valid
"""
if sequence_id is None:
raise InvalidParameterError(
"Target sequence identifier (sequence_id) must be defined and "
"cannot be None/null."
)
try:
if len(sequence_id.split()) != 1 or len(sequence_id) != len(sequence_id.strip()):
raise InvalidParameterError(
"Target sequence identifier (sequence_id) may not contain any "
"whitespace (spaces, tabs, ...)"
)
except AttributeError:
raise InvalidParameterError(
"Target sequence identifier (sequence_id) must be a string"
)
def _make_hmmsearch_raw_fasta(alignment_result, prefix):
"""
HMMsearch results do not contain the query sequence
so we must construct a raw_fasta file with the query
sequence as the first hit, to ensure proper numbering.
The search result is filtered to only contain the columns with
match states to the HMM, which has a one to one mapping to the
query sequence.
Paramters
---------
alignment_result : dict
Alignment result dictionary, output by run_hmmsearch
prefix : str
Prefix for file creation
Returns
-------
str
path to raw focus alignment file
"""
def _add_gaps_to_query(query_sequence_ali, ali):
# get the index of columns that do not contain match states (indicated by an x)
gap_index = [
i for i, x in enumerate(ali.annotation["GC"]["RF"]) if x != "x"
]
# get the index of columns that contain match states (indicated by an x)
match_index = [
i for i, x in enumerate(ali.annotation["GC"]["RF"]) if x == "x"
]
# ensure that the length of the match states
# match the length of the sequence
if len(match_index) != query_sequence_ali.L:
raise ValueError(
"HMMsearch result {} does not have a one-to-one"
" mapping to the query sequence columns".format(
alignment_result["raw_alignment_file"]
)
)
gapped_query_sequence = ""
seq = list(query_sequence_ali.matrix[0, :])
# loop through every position in the HMMsearch hits
for i in range(len(ali.annotation["GC"]["RF"])):
# if that position should be a gap, add a gap
if i in gap_index:
gapped_query_sequence += "-"
# if that position should be a letter, pop the next
# letter in the query sequence
else:
gapped_query_sequence += seq.pop(0)
new_sequence_ali = Alignment.from_dict({
query_sequence_ali.ids[0]: gapped_query_sequence
})
return new_sequence_ali
# open the sequence file
with open(alignment_result["target_sequence_file"]) as a:
query_sequence_ali = Alignment.from_file(a, format="fasta")
# if the provided alignment is empty, just return the target sequence
raw_focus_alignment_file = prefix + "_raw.fasta"
if not valid_file(alignment_result["raw_alignment_file"]):
# write the query sequence to a fasta file
with open(raw_focus_alignment_file, "w") as of:
query_sequence_ali.write(of)
# return as an alignment object
return raw_focus_alignment_file
# else, open the HMM search result
with open(alignment_result["raw_alignment_file"]) as a:
ali = Alignment.from_file(a, format="stockholm")
# make sure that the stockholm alignment contains the match annotation
if not ("GC" in ali.annotation and "RF" in ali.annotation["GC"]):
raise ValueError(
"Stockholm alignment {} missing RF"
" annotation of match states".format(alignment_result["raw_alignment_file"])
)
# add insertions to the query sequence in order to preserve correct
# numbering of match sequences
gapped_sequence_ali = _add_gaps_to_query(query_sequence_ali, ali)
# write a new alignment file with the query sequence as
# the first entry
with open(raw_focus_alignment_file, "w") as of:
gapped_sequence_ali.write(of)
ali.write(of)
return raw_focus_alignment_file
def fetch_sequence(sequence_id, sequence_file,
sequence_download_url, out_file):
"""
Fetch sequence either from database based on identifier, or from
input sequence file.
Parameters
----------
sequence_id : str
Identifier of sequence that should be retrieved
sequence_file : str
File containing sequence. If None, sqeuence will
be downloaded from sequence_download_url
sequence_download_url : str
URL from which to download missing sequence. Must
contain "{}" at the position where sequence ID will
be inserted into download URL (using str.format).
out_file : str
Output file in which sequence will be stored, if
sequence_file is not existing.
Returns
-------
str
Path of file with stored sequence (can be sequence_file
or out_file)
tuple (str, str)
Identifier of sequence as stored in file, and sequence
"""
if sequence_file is None:
get(
sequence_download_url.format(sequence_id),
out_file,
allow_redirects=True
)
else:
# if we have sequence file, try to copy it
try:
copy(sequence_file, out_file)
except FileNotFoundError:
raise ResourceError(
"sequence_file does not exist: {}".format(
sequence_file
)
)
# also make sure input file has something in it
verify_resources(
"Input sequence missing", out_file
)
with open(out_file) as f:
seq = next(read_fasta(f))
return out_file, seq
def cut_sequence(sequence, sequence_id, region=None, first_index=None, out_file=None):
"""
Cut a given sequence to sub-range and save it in a file
Parameters
----------
sequence : str
Full sequence that will be cut
sequence_id : str
Identifier of sequence, used to construct header
in output file
region : tuple(int, int), optional (default: None)
Region that will be cut out of full sequence.
If None, full sequence will be returned.
first_index : int, optional (default: None)
Define index of first position in sequence.
Will be set to 1 if None.
out_file : str, optional (default: None)
Save sequence in a FASTA file (header:
>sequence_id/start_region-end_region)
Returns
------
str
Subsequence contained in region
tuple(int, int)
Region. If no input region is given, this will be
(1, len(sequence)); otherwise, the input region is
returned.
Raises
------
InvalidParameterError
Upon invalid region specification (violating boundaries
of sequence)
"""
cut_seq = None
# (not using 1 as default value to allow parameter
# to be unspecified in config file)
if first_index is None:
first_index = 1
# last index is *inclusive*!
if region is None:
region = (first_index, first_index + len(sequence) - 1)
cut_seq = sequence
else:
start, end = region
str_start = start - first_index
str_end = end - first_index + 1
cut_seq = sequence[str_start:str_end]
# make sure bounds are valid given the sequence that we have
if str_start < 0 or str_end > len(sequence):
raise InvalidParameterError(
"Invalid sequence range: "
"region={} first_index={} len(sequence)={}".format(
region,
first_index,
len(sequence)
)
)
# save sequence to file
if out_file is not None:
with open(out_file, "w") as f:
header = "{}/{}-{}".format(sequence_id, *region)
write_fasta([(header, cut_seq)], f)
return region, cut_seq
def search_thresholds(use_bitscores, seq_threshold, domain_threshold, seq_len):
"""
Set homology search inclusion parameters.
HMMER hits get included in the HMM according to a two-step rule
1. sequence passes sequence-level treshold
2. domain passes domain-level threshold
Therefore, search thresholds are set based on the following logic:
1. If only sequence threshold is given, a MissingParameterException is raised
2. If only bitscore threshold is given, sequence threshold is set to the same
3. If both thresholds are given, they are according to defined values
Valid inputs for bitscore thresholds:
1. int or str: taken as absolute score threshold
2. float: taken as relative threshold (absolute threshold derived by
multiplication with domain length)
Valid inputs for integer thresholds:
1. int: Used as negative exponent, threshold will be set to 1E-<exponent>
2. float or str: Interpreted literally
Parameters
----------
use_bitscores : bool
Use bitscore threshold instead of E-value threshold
domain_threshold : str or int or float
Domain-level threshold. See rules above.
seq_threshold : str or int or float
Sequence-level threshold. See rules above.
seq_len : int
Length of sequence. Used to calculate absolute bitscore
threshold for relative bitscore thresholds.
Returns
-------
tuple (str, str)
Sequence- and domain-level thresholds ready to be fed into HMMER
"""
def transform_bitscore(x):
if isinstance(x, float):
# float: interpret as relative fraction of length
return "{:.1f}".format(x * seq_len)
else:
# otherwise interpret as absolute score
return str(x)
def transform_evalue(x):
if isinstance(x, int):
# if integer, interpret as negative exponent
return "1E{}".format(-x)
else:
# otherwise interpret literally
# (mantissa-exponent string or float)
return str(x).upper()
if domain_threshold is None:
raise MissingParameterError(
"domain_threshold must be explicitly defined "
"and may not be None/empty"
)
if use_bitscores:
transform = transform_bitscore
else:
transform = transform_evalue
if seq_threshold is not None:
seq_threshold = transform(seq_threshold)
if domain_threshold is not None:
domain_threshold = transform(domain_threshold)
# set "outer" sequence threshold so that it matches domain threshold
if domain_threshold is not None and seq_threshold is None:
seq_threshold = domain_threshold
return seq_threshold, domain_threshold
def extract_header_annotation(alignment, from_annotation=True):
"""
Extract Uniprot/Uniref sequence annotation from Stockholm file
(as output by jackhmmer). This function may not work for other
formats.
Parameters
----------
alignment : Alignment
Multiple sequence alignment object
from_annotation : bool, optional (default: True)
Use annotation line (in Stockholm file) rather
than sequence ID line (e.g. in FASTA file)
Returns
-------
pandas.DataFrame
Table containing all annotation
(one row per sequence in alignment,
in order of occurrence)
"""
columns = [
("GN", "gene"),
("OS", "organism"),
("PE", "existence_evidence"),
("SV", "sequence_version"),
("n", "num_cluster_members"),
("Tax", "taxon"),
("RepID", "representative_member")
]
col_to_descr = OrderedDict(columns)
regex = re.compile("\s({})=".format(
"|".join(col_to_descr.keys()))
)
# collect rows for dataframe in here
res = []
for i, id_ in enumerate(alignment.ids):
# annotation line for current sequence
seq_id = None
anno = None
# look for annotation either in separate
# annotation line or in full sequence ID line
if from_annotation:
seq_id = id_
# query level by level to avoid creating new keys
# in DefaultOrderedDict
if ("GS" in alignment.annotation and
id_ in alignment.annotation["GS"] and
"DE" in alignment.annotation["GS"][id_]):
anno = alignment.annotation["GS"][id_]["DE"]
else:
split = id_.split(maxsplit=1)
if len(split) == 2:
seq_id, anno = split
else:
seq_id = id_
anno = None
# extract info from line if we got one
if anno is not None:
# do split on known field names o keep things
# simpler than a gigantic full regex to match
# (some fields are allowed to be missing)
pairs = re.split(regex, anno)
pairs = ["id", seq_id, "name"] + pairs
# create feature-value map
feat_map = dict(zip(pairs[::2], pairs[1::2]))
res.append(feat_map)
else:
res.append({"id": seq_id})
df = pd.DataFrame(res)
return df.loc[:, ["id", "name"] + list(col_to_descr.keys())]
def describe_seq_identities(alignment, target_seq_index=0):
"""
Calculate sequence identities of any sequence
to target sequence and create result dataframe.
Parameters
----------
alignment : Alignment
Alignment for which description statistics
will be calculated
Returns
-------
pandas.DataFrame
Table giving the identity to target sequence
for each sequence in alignment (in order of
occurrence)
"""
id_to_query = alignment.identities_to(
alignment[target_seq_index]
)
return pd.DataFrame(
{"id": alignment.ids, "identity_to_query": id_to_query}
)
def describe_frequencies(alignment, first_index, target_seq_index=None):
"""
Get parameters of alignment such as gaps, coverage,
conservation and summarize.
Parameters
----------
alignment : Alignment
Alignment for which description statistics
will be calculated
first_index : int
Sequence index of first residue in target sequence
target_seq_index : int, optional (default: None)
If given, will add the symbol in the target sequence
into a separate column of the output table
Returns
-------
pandas.DataFrame
Table detailing conservation and symbol frequencies
for all positions in the alignment
"""
fi = alignment.frequencies
conservation = alignment.conservation()
fi_cols = {c: fi[:, i] for c, i in alignment.alphabet_map.items()}
if target_seq_index is not None:
target_seq = alignment[target_seq_index]
else:
target_seq = np.full((alignment.L), np.nan)
info = pd.DataFrame(
{
"i": range(first_index, first_index + alignment.L),
"A_i": target_seq,
"conservation": conservation,
**fi_cols
}
)
# reorder columns
info = info.loc[:, ["i", "A_i", "conservation"] + list(alignment.alphabet)]
return info
def describe_coverage(alignment, prefix, first_index, minimum_column_coverage):
"""
Produce "classical" buildali coverage statistics, i.e.
number of sequences, how many residues have too many gaps, etc.
Only to be applied to alignments focused around the
target sequence.
Parameters
----------
alignment : Alignment
Alignment for which coverage statistics will be calculated
prefix : str
Prefix of alignment file that will be stored as identifier in table
first_index : int
Sequence index of first position of target sequence
minimum_column_coverage : Iterable(float) or float
Minimum column coverage threshold(s) that will be tested
(creating one row for each threshold in output table).
.. note::
``int`` values given to this function instead of a float will be divided by 100 to create the corresponding
floating point representation. This parameter is 1.0 - maximum fraction of gaps per column.
Returns
-------
pd.DataFrame
Table with coverage statistics for different gap thresholds
"""
res = []
NO_MEFF = np.nan
if not isinstance(minimum_column_coverage, Iterable):
minimum_column_coverage = [minimum_column_coverage]
pos = np.arange(first_index, first_index + alignment.L)
f_gap = alignment.frequencies[:, alignment.alphabet_map[alignment._match_gap]]
for threshold in minimum_column_coverage:
if isinstance(threshold, int):
threshold /= 100
# all positions that have enough sequence information (i.e. little gaps),
# and their indeces
uppercase = f_gap <= 1 - threshold
uppercase_idx = np.nonzero(uppercase)[0]
# where does coverage of sequence by good alignment start and end?
cov_first_idx, cov_last_idx = uppercase_idx[0], uppercase_idx[-1]
# calculate indeces in sequence numbering space
first, last = pos[cov_first_idx], pos[cov_last_idx]
# how many lowercase positions in covered region?
num_lc_cov = np.sum(~uppercase[cov_first_idx:cov_last_idx + 1])
# total number of upper- and lowercase positions,
# and relative percentage
num_cov = uppercase.sum()
num_lc = (~uppercase).sum()
perc_cov = num_cov / len(uppercase)
res.append(
(prefix, threshold, alignment.N, alignment.L,
num_cov, num_lc, perc_cov, first, last,
last - first + 1, num_lc_cov, NO_MEFF)
)
df = pd.DataFrame(
res, columns=[
"prefix", "minimum_column_coverage", "num_seqs",
"seqlen", "num_cov", "num_lc", "perc_cov",
"1st_uc", "last_uc", "len_cov",
"num_lc_cov", "N_eff",
]
)
return df
def existing(**kwargs):
"""
Protocol:
Use external sequence alignment and extract all relevant
information from there (e.g. sequence, region, etc.),
then apply gap & fragment filtering as usual
Parameters
----------
Mandatory kwargs arguments:
See list below in code where calling check_required
Returns
-------
outcfg : dict
Output configuration of the pipeline, including
the following fields:
* sequence_id (passed through from input)
* alignment_file
* raw_focus_alignment_file
* statistics_file
* sequence_file
* first_index
* target_sequence_file
* annotation_file (None)
* frequencies_file
* identities_file
* focus_mode
* focus_sequence
* segments
"""
check_required(
kwargs,
[
"prefix", "input_alignment",
"sequence_id", "first_index",
"extract_annotation"
]
)
prefix = kwargs["prefix"]
# make sure output directory exists
create_prefix_folders(prefix)
# this file is starting point of pipeline;
# check if input alignment actually exists
input_alignment = kwargs["input_alignment"]
verify_resources(
"Input alignment does not exist",
input_alignment
)
# first try to autodetect format of alignment
with open(input_alignment) as f:
format = detect_format(f)
if format is None:
raise InvalidParameterError(
"Format of input alignment {} could not be "
"automatically detected.".format(
input_alignment
)
)
with open(input_alignment) as f:
ali_raw = Alignment.from_file(f, format)
# save annotation in sequence headers (species etc.)
annotation_file = None
if kwargs["extract_annotation"]:
annotation_file = prefix + "_annotation.csv"
from_anno_line = (format == "stockholm")
annotation = extract_header_annotation(
ali_raw, from_annotation=from_anno_line
)
annotation.to_csv(annotation_file, index=False)
# Target sequence of alignment
sequence_id = kwargs["sequence_id"]
# check if sequence identifier is valid
_verify_sequence_id(sequence_id)
# First, find focus sequence in alignment
focus_index = None
for i, id_ in enumerate(ali_raw.ids):
if id_.startswith(sequence_id):
focus_index = i
break
# if we didn't find it, cannot continue
if focus_index is None:
raise InvalidParameterError(
"Target sequence {} could not be found in alignment"
.format(sequence_id)
)
# identify what columns (non-gap) to keep for focus
focus_seq = ali_raw[focus_index]
focus_cols = np.array(
[c not in [ali_raw._match_gap, ali_raw._insert_gap] for c in focus_seq]
)
# extract focus alignment
focus_ali = ali_raw.select(columns=focus_cols)
focus_seq_nogap = "".join(focus_ali[focus_index])
# determine region of sequence. If first_index is given,
# use that in any case, otherwise try to autodetect
full_focus_header = ali_raw.ids[focus_index]
focus_id = full_focus_header.split()[0]
# try to extract region from sequence header
id_, region_start, region_end = parse_header(focus_id)
# override with first_index if given
if kwargs["first_index"] is not None:
region_start = kwargs["first_index"]
region_end = region_start + len(focus_seq_nogap) - 1
if region_start is None or region_end is None:
raise InvalidParameterError(
"Could not extract region information " +
"from sequence header {} ".format(full_focus_header) +
"and first_index parameter is not given."
)
# resubstitute full sequence ID from identifier
# and region information
header = "{}/{}-{}".format(
id_, region_start, region_end
)
focus_ali.ids[focus_index] = header
# write target sequence to file
target_sequence_file = prefix + ".fa"
with open(target_sequence_file, "w") as f:
write_fasta(
[(header, focus_seq_nogap)], f
)
# apply sequence identity and fragment filters,
# and gap threshold
mod_outcfg, ali = modify_alignment(
focus_ali, focus_index, id_, region_start, **kwargs
)
# generate output configuration of protocol
outcfg = {
**mod_outcfg,
"sequence_id": sequence_id,
"sequence_file": target_sequence_file,
"first_index": region_start,
"target_sequence_file": target_sequence_file,
"focus_sequence": header,
"focus_mode": True,
}
if annotation_file is not None:
outcfg["annotation_file"] = annotation_file
# dump config to YAML file for debugging/logging
write_config_file(prefix + ".align_existing.outcfg", outcfg)
# return results of protocol
return outcfg
def modify_alignment(focus_ali, target_seq_index, target_seq_id, region_start, **kwargs):
"""
Apply pairwise identity filtering, fragment filtering, and exclusion
of columns with too many gaps to a sequence alignment. Also generates
files describing properties of the alignment such as frequency distributions,
conservation, and "old-style" alignment statistics files.
.. note::
assumes focus alignment (otherwise unprocessed) as input.
.. todo::
come up with something more clever to filter fragments than fixed width
(e.g. use 95% quantile of length distribution as reference point)
Parameters
----------
focus_ali : Alignment
Focus-mode input alignment
target_seq_index : int
Index of target sequence in alignment
target_seq_id : str
Identifier of target sequence (without range)
region_start : int
Index of first sequence position in target sequence
kwargs : See required arguments in source code
Returns
-------
outcfg : Dict
File products generated by the function:
* alignment_file
* statistics_file
* frequencies_file
* identities_file
* raw_focus_alignment_file
ali : Alignment
Final processed alignment
"""
check_required(
kwargs,
[
"prefix", "seqid_filter", "hhfilter",
"minimum_sequence_coverage", "minimum_column_coverage",
"compute_num_effective_seqs", "theta",
]
)
prefix = kwargs["prefix"]
create_prefix_folders(prefix)
focus_fasta_file = prefix + "_raw_focus.fasta"
outcfg = {
"alignment_file": prefix + ".a2m",
"statistics_file": prefix + "_alignment_statistics.csv",
"frequencies_file": prefix + "_frequencies.csv",
"identities_file": prefix + "_identities.csv",
"raw_focus_alignment_file": focus_fasta_file,
}
# swap target sequence to first position if it is not
# the first sequence in alignment;
# this is particularly important for hhfilter run
# because target sequence might otherwise be filtered out
if target_seq_index != 0:
indices = np.arange(0, len(focus_ali))
indices[0] = target_seq_index
indices[target_seq_index] = 0
target_seq_index = 0
focus_ali = focus_ali.select(sequences=indices)
with open(focus_fasta_file, "w") as f:
focus_ali.write(f, "fasta")
# apply pairwise identity filter (using hhfilter)
if kwargs["seqid_filter"] is not None:
filtered_file = prefix + "_filtered.a3m"
at.run_hhfilter(
focus_fasta_file, filtered_file,
threshold=kwargs["seqid_filter"],
columns="first", binary=kwargs["hhfilter"]
)
with open(filtered_file) as f:
focus_ali = Alignment.from_file(f, "a3m")
# final FASTA alignment before applying A2M format modifications
filtered_fasta_file = prefix + "_raw_focus_filtered.fasta"
with open(filtered_fasta_file, "w") as f:
focus_ali.write(f, "fasta")
ali = focus_ali
# filter fragments
# come up with something more clever here than fixed width
# (e.g. use 95% quantile of length distribution as reference point)
min_cov = kwargs["minimum_sequence_coverage"]
if min_cov is not None:
if isinstance(min_cov, int):
min_cov /= 100
keep_seqs = (1 - ali.count("-", axis="seq")) >= min_cov
ali = ali.select(sequences=keep_seqs)
# Calculate frequencies, conservation and identity to query
# on final alignment (except for lowercase modification)
# Note: running hhfilter might cause a loss of the target seque
# if it is not the first sequence in the file! To be sure that
# nothing goes wrong, target_seq_index should always be 0.
describe_seq_identities(
ali, target_seq_index=target_seq_index
).to_csv(
outcfg["identities_file"], float_format="%.3f", index=False
)
describe_frequencies(
ali, region_start, target_seq_index=target_seq_index
).to_csv(
outcfg["frequencies_file"], float_format="%.3f", index=False
)
coverage_stats = describe_coverage(
ali, prefix, region_start, kwargs["minimum_column_coverage"]
)
# keep list of uppercase sequence positions in alignment
pos_list = np.arange(region_start, region_start + ali.L, dtype="int32")
# Make columns with too many gaps lowercase
min_col_cov = kwargs["minimum_column_coverage"]
if min_col_cov is not None:
if isinstance(min_col_cov, int):
min_col_cov /= 100
lc_cols = ali.count(ali._match_gap, axis="pos") > 1 - min_col_cov
ali = ali.lowercase_columns(lc_cols)
# if we remove columns, we have to update list of positions
pos_list = pos_list[~lc_cols]
else:
lc_cols = None
# compute effective number of sequences
# (this is intended for cases where coupling stage is
# not run, but this number is wanted nonetheless)
if kwargs["compute_num_effective_seqs"]:
# make sure we only compute N_eff on the columns
# that would be used for model inference, dispose
# the rest
if lc_cols is None:
cut_ali = ali
else:
cut_ali = ali.select(columns=~lc_cols)
# compute sequence weights
cut_ali.set_weights(kwargs["theta"])
# N_eff := sum of all sequence weights
n_eff = float(cut_ali.weights.sum())
# patch into coverage statistics (N_eff column)
coverage_stats.loc[:, "N_eff"] = n_eff
else:
n_eff = None
# save coverage statistics to file
coverage_stats.to_csv(
outcfg["statistics_file"], float_format="%.3f",
index=False
)
# store description of final sequence alignment in outcfg
# (note these parameters will be updated by couplings protocol)
outcfg.update(
{
"num_sites": len(pos_list),
"num_sequences": len(ali),
"effective_sequences": n_eff,
"region_start": region_start,
}
)
# create segment in outcfg
outcfg["segments"] = [
Segment(
"aa", target_seq_id, region_start, region_start + ali.L - 1, pos_list
).to_list()
]
with open(outcfg["alignment_file"], "w") as f:
ali.write(f, "fasta")
return outcfg, ali
def jackhmmer_search(**kwargs):
"""
Protocol:
Iterative jackhmmer search against a sequence database.
Parameters
----------
Mandatory kwargs arguments:
See list below in code where calling check_required
.. todo::
explain meaning of parameters in detail.
Returns
-------
outcfg : dict
Output configuration of the protocol, including
the following fields:
* sequence_id (passed through from input)
* first_index (passed through from input)
* target_sequence_file
* sequence_file
* raw_alignment_file
* hittable_file
* focus_mode
* focus_sequence
* segments
"""
check_required(
kwargs,
[
"prefix", "sequence_id", "sequence_file",
"sequence_download_url", "region", "first_index",
"use_bitscores", "domain_threshold", "sequence_threshold",
"database", "iterations", "cpu", "nobias", "reuse_alignment",
"checkpoints_hmm", "checkpoints_ali", "jackhmmer",
"extract_annotation"
]
)
prefix = kwargs["prefix"]
# check if sequence identifier is valid
_verify_sequence_id(kwargs["sequence_id"])
# make sure output directory exists
create_prefix_folders(prefix)
# store search sequence file here
target_sequence_file = prefix + ".fa"
full_sequence_file = prefix + "_full.fa"
# make sure search sequence is defined and load it
full_seq_file, (full_seq_id, full_seq) = fetch_sequence(
kwargs["sequence_id"],
kwargs["sequence_file"],
kwargs["sequence_download_url"],
full_sequence_file
)
# cut sequence to target region and save in sequence_file
# (this is the main sequence file used downstream)
(region_start, region_end), cut_seq = cut_sequence(
full_seq,
kwargs["sequence_id"],
kwargs["region"],
kwargs["first_index"],
target_sequence_file
)
# run jackhmmer... allow to reuse pre-exisiting
# Stockholm alignment file here
ali_outcfg_file = prefix + ".align_jackhmmer_search.outcfg"
# determine if to rerun, only possible if previous results
# were stored in ali_outcfg_file
if kwargs["reuse_alignment"] and valid_file(ali_outcfg_file):
ali = read_config_file(ali_outcfg_file)
# check if the alignment file itself is also there
verify_resources(
"Tried to reuse alignment, but empty or "
"does not exist",
ali["alignment"], ali["domtblout"]
)
else:
# otherwise, we have to run the alignment
# modify search thresholds to be suitable for jackhmmer
seq_threshold, domain_threshold = search_thresholds(
kwargs["use_bitscores"],
kwargs["sequence_threshold"],
kwargs["domain_threshold"],
len(cut_seq)
)
# run search process
ali = at.run_jackhmmer(
query=target_sequence_file,
database=kwargs[kwargs["database"]],
prefix=prefix,
use_bitscores=kwargs["use_bitscores"],
domain_threshold=domain_threshold,
seq_threshold=seq_threshold,
iterations=kwargs["iterations"],
nobias=kwargs["nobias"],
cpu=kwargs["cpu"],
checkpoints_hmm=kwargs["checkpoints_hmm"],
checkpoints_ali=kwargs["checkpoints_ali"],
binary=kwargs["jackhmmer"],
)
# get rid of huge stdout log file immediately
# (do not use /dev/null option of jackhmmer function
# to make no assumption about operating system)
try:
os.remove(ali.output)
except OSError:
pass
# turn namedtuple into dictionary to make
# restarting code nicer
ali = dict(ali._asdict())
# save results of search for possible restart
write_config_file(ali_outcfg_file, ali)
# prepare output dictionary with result files
outcfg = {
"sequence_id": kwargs["sequence_id"],
"target_sequence_file": target_sequence_file,
"sequence_file": full_sequence_file,
"first_index": kwargs["first_index"],
"focus_mode": True,
"raw_alignment_file": ali["alignment"],
"hittable_file": ali["domtblout"],
}
# define a single protein segment based on target sequence
outcfg["segments"] = [
Segment(
"aa", kwargs["sequence_id"],
region_start, region_end,
range(region_start, region_end + 1)
).to_list()
]
outcfg["focus_sequence"] = "{}/{}-{}".format(
kwargs["sequence_id"], region_start, region_end
)
return outcfg
def hmmbuild_and_search(**kwargs):
"""
Protocol:
Build HMM from sequence alignment using hmmbuild and
search against a sequence database using hmmsearch.
Parameters
----------
Mandatory kwargs arguments:
See list below in code where calling check_required
Returns
-------
outcfg : dict
Output configuration of the protocol, including
the following fields:
* target_sequence_file
* sequence_file
* raw_alignment_file
* hittable_file
* focus_mode
* focus_sequence
* segments
"""
def _format_alignment_for_hmmbuild(input_alignment_file, **kwargs):
# this file is starting point of pipeline;
# check if input alignment actually exists
verify_resources(
"Input alignment does not exist",
input_alignment_file
)
# first try to autodetect format of alignment
with open(input_alignment_file) as f:
format = detect_format(f)
if format is None:
raise InvalidParameterError(
"Format of input alignment {} could not be "
"automatically detected.".format(
input_alignment_file
)
)
with open(input_alignment_file) as f:
ali_raw = Alignment.from_file(f, format)
# Target sequence of alignment
sequence_id = kwargs["sequence_id"]
if sequence_id is None:
raise InvalidParameterError(
"Parameter sequence_id must be defined"
)
# First, find focus sequence in alignment
focus_index = None
for i, id_ in enumerate(ali_raw.ids):
if id_.startswith(sequence_id):
focus_index = i
break
# if we didn't find it, cannot continue
if focus_index is None:
raise InvalidParameterError(
"Target sequence {} could not be found in alignment"
.format(sequence_id)
)
# identify what columns (non-gap) to keep for focus
# this should be all columns in the raw_focus_alignment_file
# but checking anyway
focus_seq = ali_raw[focus_index]
focus_cols = np.array(
[c not in [ali_raw._match_gap, ali_raw._insert_gap] for c in focus_seq]
)
# extract focus alignment
focus_ali = ali_raw.select(columns=focus_cols)
focus_seq_nogap = "".join(focus_ali[focus_index])
# determine region of sequence. If first_index is given,
# use that in any case, otherwise try to autodetect
full_focus_header = ali_raw.ids[focus_index]
focus_id = full_focus_header.split()[0]
# try to extract region from sequence header
id_, region_start, region_end = parse_header(focus_id)
# override with first_index if given
if kwargs["first_index"] is not None:
region_start = kwargs["first_index"]
region_end = region_start + len(focus_seq_nogap) - 1
if region_start is None or region_end is None:
raise InvalidParameterError(
"Could not extract region information " +
"from sequence header {} ".format(full_focus_header) +
"and first_index parameter is not given."
)
# resubstitute full sequence ID from identifier
# and region information
header = "{}/{}-{}".format(
id_, region_start, region_end
)
focus_ali.ids[focus_index] = header
# write target sequence to file
target_sequence_file = prefix + ".fa"
with open(target_sequence_file, "w") as f:
write_fasta(
[(header, focus_seq_nogap)], f
)
# swap target sequence to first position if it is not
# the first sequence in alignment;
# this is particularly important for hhfilter run
# because target sequence might otherwise be filtered out
if focus_index != 0:
indices = np.arange(0, len(focus_ali))
indices[0] = focus_index
indices[focus_index] = 0
focus_index = 0
focus_ali = focus_ali.select(sequences=indices)
# write the raw focus alignment for hmmbuild
focus_fasta_file = prefix + "_raw_focus_input.fasta"
with open(focus_fasta_file, "w") as f:
focus_ali.write(f, "fasta")
return focus_fasta_file, target_sequence_file, region_start, region_end
# define the gap threshold for inclusion in HMM's build by HMMbuild.
SYMFRAC_HMMBUILD = 0.0
# check for required options
check_required(
kwargs,
[
"prefix", "sequence_id", "alignment_file",
"use_bitscores", "domain_threshold", "sequence_threshold",
"database", "cpu", "nobias", "reuse_alignment",
"hmmbuild", "hmmsearch"
]
)
prefix = kwargs["prefix"]
# check if sequence identifier is valid
_verify_sequence_id(kwargs["sequence_id"])
# make sure output directory exists
create_prefix_folders(prefix)
# prepare input alignment for hmmbuild
focus_fasta_file, target_sequence_file, region_start, region_end = \
_format_alignment_for_hmmbuild(
kwargs["alignment_file"], **kwargs
)
# run hmmbuild_and_search... allow to reuse pre-exisiting
# Stockholm alignment file here
ali_outcfg_file = prefix + ".align_hmmbuild_and_search.outcfg"
# determine if to rerun, only possible if previous results
# were stored in ali_outcfg_file
if kwargs["reuse_alignment"] and valid_file(ali_outcfg_file):
ali = read_config_file(ali_outcfg_file)
# check if the alignment file itself is also there
verify_resources(
"Tried to reuse alignment, but empty or "
"does not exist",
ali["alignment"], ali["domtblout"]
)
else:
# otherwise, we have to run the alignment
# modify search thresholds to be suitable for hmmsearch
sequence_length = region_end - region_start + 1
seq_threshold, domain_threshold = search_thresholds(
kwargs["use_bitscores"],
kwargs["sequence_threshold"],
kwargs["domain_threshold"],
sequence_length
)
# create the hmm
hmmbuild_result = at.run_hmmbuild(
alignment_file=focus_fasta_file,
prefix=prefix,
symfrac=SYMFRAC_HMMBUILD,
cpu=kwargs["cpu"],
binary=kwargs["hmmbuild"],
)
hmmfile = hmmbuild_result.hmmfile
# run the alignment from the hmm
ali = at.run_hmmsearch(
hmmfile=hmmfile,
database=kwargs[kwargs["database"]],
prefix=prefix,
use_bitscores=kwargs["use_bitscores"],
domain_threshold=domain_threshold,
seq_threshold=seq_threshold,
nobias=kwargs["nobias"],
cpu=kwargs["cpu"],
binary=kwargs["hmmsearch"],
)
# get rid of huge stdout log file immediately
try:
os.remove(ali.output)
except OSError:
pass
# turn namedtuple into dictionary to make
# restarting code nicer
ali = dict(ali._asdict())
# only item from hmmsearch_result to save is the hmmfile
ali["hmmfile"] = hmmfile
# save results of search for possible restart
write_config_file(ali_outcfg_file, ali)
# prepare output dictionary with result files
outcfg = {
"sequence_file": target_sequence_file,
"first_index": region_start,
"input_raw_focus_alignment": focus_fasta_file,
"target_sequence_file": target_sequence_file,
"focus_mode": True,
"raw_alignment_file": ali["alignment"],
"hittable_file": ali["domtblout"],
}
# convert the raw output alignment to fasta format
# and add the appropriate query sequecne
raw_focus_alignment_file = _make_hmmsearch_raw_fasta(outcfg, prefix)
outcfg["raw_focus_alignment_file"] = raw_focus_alignment_file
# define a single protein segment based on target sequence
outcfg["segments"] = [
Segment(
"aa", kwargs["sequence_id"],
region_start, region_end,
range(region_start, region_end + 1)
).to_list()
]
outcfg["focus_sequence"] = "{}/{}-{}".format(
kwargs["sequence_id"], region_start, region_end
)
return outcfg
def standard(**kwargs):
"""
Protocol:
Standard buildali4 workflow (run iterative jackhmmer
search against sequence database, than determine which
sequences and columns to include in the calculation based
on coverage and maximum gap thresholds).
Parameters
----------
Mandatory kwargs arguments:
See list below in code where calling check_required
Returns
-------
outcfg : dict
Output configuration of the pipeline, including
the following fields:
* sequence_id (passed through from input)
* first_index (passed through from input)
* alignment_file
* raw_alignment_file
* raw_focus_alignment_file
* statistics_file
* target_sequence_file
* sequence_file
* annotation_file
* frequencies_file
* identities_file
* hittable_file
* focus_mode
* focus_sequence
* segments
ali : Alignment
Final sequence alignment
"""
check_required(
kwargs,
[
"prefix", "extract_annotation",
]
)
prefix = kwargs["prefix"]
# make sure output directory exists
create_prefix_folders(prefix)
# first step of protocol is to get alignment using
# jackhmmer; initialize output configuration with
# results of this search
jackhmmer_outcfg = jackhmmer_search(**kwargs)
stockholm_file = jackhmmer_outcfg["raw_alignment_file"]
segment = Segment.from_list(jackhmmer_outcfg["segments"][0])
target_seq_id = segment.sequence_id
region_start = segment.region_start
region_end = segment.region_end
# read in stockholm format (with full annotation)
with open(stockholm_file) as a:
ali_raw = Alignment.from_file(a, "stockholm")
# and store as FASTA file first (disabled for now
# since equivalent information easily be obtained
# from Stockholm file
"""
ali_raw_fasta_file = prefix + "_raw.fasta"
with open(ali_raw_fasta_file, "w") as f:
ali_raw.write(f, "fasta")
"""
# save annotation in sequence headers (species etc.)
if kwargs["extract_annotation"]:
annotation_file = prefix + "_annotation.csv"
annotation = extract_header_annotation(ali_raw)
annotation.to_csv(annotation_file, index=False)
# center alignment around focus/search sequence
focus_cols = np.array([c != "-" for c in ali_raw[0]])
focus_ali = ali_raw.select(columns=focus_cols)
target_seq_index = 0
mod_outcfg, ali = modify_alignment(
focus_ali, target_seq_index, target_seq_id, region_start, **kwargs
)
# merge results of jackhmmer_search and modify_alignment stage
outcfg = {
**jackhmmer_outcfg,
**mod_outcfg,
"annotation_file": annotation_file
}
# dump output config to YAML file for debugging/logging
write_config_file(prefix + ".align_standard.outcfg", outcfg)
# return results of protocol
return outcfg
def complex(**kwargs):
"""
Protocol:
Run monomer alignment protocol and postprocess it for
EVcomplex calculations
Parameters
----------
Mandatory kwargs arguments:
See list below in code where calling check_required
Returns
-------
outcfg : dict
Output configuration of the alignment protocol, and
the following additional field:
genome_location_file : path to file containing
the genomic locations for CDs's corresponding to
identifiers in the alignment.
"""
check_required(
kwargs,
[
"prefix", "alignment_protocol",
"uniprot_to_embl_table",
"ena_genome_location_table"
]
)
verify_resources(
"Uniprot to EMBL mapping table does not exist",
kwargs["uniprot_to_embl_table"]
)
verify_resources(
"ENA genome location table does not exist",
kwargs["ena_genome_location_table"]
)
prefix = kwargs["prefix"]
# make sure output directory exists
create_prefix_folders(prefix)
# run the regular alignment protocol
# (standard, existing, ...)
alignment_protocol = kwargs["alignment_protocol"]
if alignment_protocol not in PROTOCOLS:
raise InvalidParameterError(
"Invalid choice for alignment protocol: {}".format(
alignment_protocol
)
)
outcfg = PROTOCOLS[kwargs["alignment_protocol"]](**kwargs)
# if the user selected the existing alignment protocol
# they can supply an input annotation file
# which overwrites the annotation file generated by the existing protocol
if alignment_protocol == "existing":
check_required(kwargs, ["override_annotation_file"])
if kwargs["override_annotation_file"] is not None:
verify_resources(
"Override annotation file does not exist",
kwargs["override_annotation_file"]
)
outcfg["annotation_file"] = prefix + "_annotation.csv"
annotation_data = | pd.read_csv(kwargs["override_annotation_file"]) | pandas.read_csv |
from __future__ import print_function, division
import os
os.environ["OMP_NUM_THREADS"] = "1"
import torch
import torch.multiprocessing as mp
import time
import numpy as np
import random
import json
from tqdm import tqdm
from utils.net_util import ScalarMeanTracker
from runners import nonadaptivea3c_val, savn_val
from pandas import Series, DataFrame
def main_eval(args, create_shared_model, init_agent):
# 设置随即数种子i
np.random.seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
if args.gpu_ids == -1:
args.gpu_ids = [-1]
else:
torch.cuda.manual_seed(args.seed)
try:
mp.set_start_method("spawn")
except RuntimeError:
pass
model_to_open = args.load_model
processes = []
res_queue = mp.Queue()
if args.model == "SAVN":
args.learned_loss = True
args.num_steps = 6
target = savn_val
else:
args.learned_loss = False
args.num_steps = args.max_episode_length
target = nonadaptivea3c_val
rank = 0
for scene_type in args.scene_types:
p = mp.Process(
target=target,
args=(
rank,
args,
model_to_open,
create_shared_model,
init_agent,
res_queue,
args.max_val_ep,
scene_type,
),
)
p.start()
processes.append(p)
time.sleep(0.1)
rank += 1
count = 0
end_count = 0
all_train_scalars = ScalarMeanTracker()
# analyze performance for each scene_type
scene_train_scalars = {scene_type:ScalarMeanTracker() for scene_type in args.scene_types}
# analyze performance for each difficulty level
if args.curriculum_learning:
diff_train_scalars = {}
proc = len(args.scene_types)
# pbar = tqdm(total=args.max_val_ep * proc)
try:
while end_count < proc:
train_result = res_queue.get()
# pbar.update(1)
count += 1
print("{} episdoes evaluated...".format(count))
if "END" in train_result:
end_count += 1
continue
# analysis performance for each difficulty split
if args.curriculum_learning:
diff = train_result['difficulty']
if diff not in diff_train_scalars:
diff_train_scalars[diff] = ScalarMeanTracker()
diff_train_scalars[diff].add_scalars(train_result)
# analysis performance for each scene_type
scene_train_scalars[train_result["scene_type"]].add_scalars(train_result)
all_train_scalars.add_scalars(train_result)
all_tracked_means = all_train_scalars.pop_and_reset()
scene_tracked_means = {scene_type: scene_train_scalars[scene_type].pop_and_reset()
for scene_type in args.scene_types}
if args.curriculum_learning:
diff_tracked_means = {diff: diff_train_scalars[diff].pop_and_reset()
for diff in diff_train_scalars}
finally:
for p in processes:
time.sleep(0.1)
p.join()
if args.curriculum_learning:
result = {"all_result":all_tracked_means,
"diff_result":diff_tracked_means,
"scene_result":scene_tracked_means}
else:
result = {"all_result":all_tracked_means,
"scene_result":scene_tracked_means}
try:
with open(args.results_json, "w") as fp:
json.dump(result, fp, sort_keys=True, indent=4)
except:
print("dump result to path {} failed, result dumped to test_result.json".format(args.results_json))
with open("test_result.json", "w") as fp:
json.dump(result, fp, sort_keys=True, indent=4)
print("\n\n\nall_result:\n")
print(Series(all_tracked_means))
print("\n\n\nscene_result:\n")
print(DataFrame(scene_tracked_means))
if args.curriculum_learning:
print("\n\n\ndiff_result:\n")
print( | DataFrame(diff_tracked_means) | pandas.DataFrame |
from collections import Counter
import numpy as np
import pandas as pd
from scipy.spatial.distance import cdist
class KNN:
def __init__(self, k: int):
self.k = k # number of nearest neighbors to be found
self.features = pd.DataFrame([]) # feature matrix
self.labels = pd.Series([]) # label vector
self.index = | pd.Index([]) | pandas.Index |
import pyAgrum as gum
from .DiscreteDistribution import DiscreteDistribution
from .DiscreteVariable import DiscreteVariable
from .MLModel import FitParametersBase, MLModel
import colored
import pandas as pd
import tqdm
import typing_extensions
import pydantic
import typing
import copy
import pkg_resources
import warnings
installed_pkg = {pkg.key for pkg in pkg_resources.working_set}
if 'ipdb' in installed_pkg:
import ipdb # noqa: F401
_CPT_HTML_TEMPLATE = """
<div>
Variable {var_name}
</div>
<div style="float:left; margin:10px">
Conditional count table
{cct}
</div>
<div style="float:left; margin:10px">
Conditional probability table
{cpt}
</div>
"""
_BN_HTML_TEMPLATE = """
<div>
Model {name}
</div>
<div>
{cpt}
</div>
"""
class BayesianNetwork(pydantic.BaseModel):
__slots__ = ('bn',)
name: str = pydantic.Field("", description="Bayes net title")
variables: typing.Dict[str, typing.Optional[DiscreteVariable]] = pydantic.Field(
{}, description="Discrete variable specification")
parents: typing.Dict[str, typing.List[str]] = pydantic.Field(
{}, description="Dictionary giving for each variable the list of their parents")
cct: typing.Dict[str, list] = pydantic.Field(
{}, description="Conditional counts for each variable according their parents")
backend: typing.Optional[typing_extensions.Literal["pyagrum"]] = pydantic.Field(
"pyagrum", description="Bayesian network backend")
@pydantic.validator("variables")
def normalize_variables(cls, variables):
for var_name, var in variables.items():
if var is None:
variables[var_name] = DiscreteVariable()
variables[var_name].name = var_name
return variables
def __init__(self, **data: typing.Any):
super().__init__(**data)
# Init BN backend
backend_bn_method = \
getattr(self, self.backend + "_init_bn", None)
if callable(backend_bn_method):
backend_bn_method()
else:
raise ValueError(
f"Bayesian network backend '{self.backend}' not supported")
self.init_backend()
def init_backend(self):
# Add variables to backend
for var_name, var in self.variables.items():
self.bn_update_variable(var_name)
# Add parents to backend
for var_name, var in self.parents.items():
self.bn_update_parents(var_name)
# Update CCT
for var_name in self.variables.keys():
self.update_cct(var_name)
def __str__(self):
cct_cpt_sep = colored.stylize(" | ",
colored.fg("blue") + colored.attr("bold"))
var_strlist = []
for var_name, var in self.variables.items():
var_header_str = colored.stylize(f"Variable: {var_name}",
colored.fg("blue") + colored.attr("bold"))
cct_str = self.get_cct(var_name, transpose=True).to_string()
cpt_str = self.get_cpt(var_name, transpose=True).to_string()
cct_strlist = cct_str.split("\n")
cpt_strlist = cpt_str.split("\n")
cct_cpt_strlist = [cct + cct_cpt_sep + cpt
for cct, cpt in zip(cct_strlist, cpt_strlist)]
cct_cpt_str = "\n".join(cct_cpt_strlist)
var_str = "\n".join([var_header_str,
cct_cpt_str])
var_strlist.append(var_str)
bn_str = "\n\n".join(var_strlist)
return bn_str
def to_html(self, transpose=True, filename=None):
cpt_html_list = []
for var_name in self.variables.keys():
cpt_html_list.append(self.cpt_to_html(
var_name, transpose=transpose))
cpt_html = "\n".join(cpt_html_list)
bn_html = _BN_HTML_TEMPLATE.format(
name=self.name,
cpt=cpt_html)
if not(filename is None):
with open(filename, "w") as f:
f.write(bn_html)
return bn_html
def cpt_to_html(self, var_name, transpose=True, filename=None):
cct = self.get_cct(var_name, transpose=transpose)
cpt = self.get_cpt(var_name, transpose=transpose)
cpt_html = _CPT_HTML_TEMPLATE.format(
var_name=var_name,
cct=cct.to_html(),
cpt=cpt.to_html())
if not(filename is None):
with open(filename, "w") as f:
f.write(cpt_html)
return cpt_html
# Required when using slot attributes
def __setattr__(self, attr, value):
if attr in self.__slots__:
object.__setattr__(self, attr, value)
else:
super(self.__class__, self).__setattr__(attr, value)
def is_num_equal(self, other, check_cct=True):
# Check variables
if list(self.variables.keys()) != list(other.variables.keys()):
return False
for var_self, var_other in zip(self.variables.values(),
other.variables.values()):
if var_self.domain != var_other.domain:
return False
if var_self.domain_type != var_other.domain_type:
return False
if (self.get_cpt(var_self.name) != other.get_cpt(var_other.name)).any(None):
return False
if (self.get_cct(var_self.name) != other.get_cct(var_other.name)).any(None):
return False
if not(self.backend_is_num_equal(other)):
return False
return True
def backend_is_num_equal(self, other):
if self.backend != other.backend:
return False
backend_is_num_equal_method = \
getattr(self, self.backend + "_is_num_equal", None)
if callable(backend_is_num_equal_method):
return backend_is_num_equal_method(other)
else:
raise ValueError(
f"Bayesian network backend '{self.backend}' not supported")
def pyagrum_is_num_equal(self, other):
if self.bn.names() != other.bn.names():
return False
for var in self.bn.names():
if (self.bn.cpt(var)[:] != other.bn.cpt(var)[:]).any():
return False
return True
def pyagrum_init_bn(self):
self.bn = gum.BayesNet(self.name)
def add_variable(self, **var_specs):
self.update_variable(**var_specs)
def bn_update_variable(self, var_name):
backend_bn_method = \
getattr(self, self.backend + "_update_variable", None)
if callable(backend_bn_method):
backend_bn_method(var_name)
else:
raise ValueError(
f"Bayesian network backend '{self.backend}' not supported")
def pyagrum_update_variable(self, var_name):
bn_var = self.variables[var_name].pyagrum_init_var()
if var_name in self.bn.names():
self.bn.erase(var_name)
# Pyagrum does not support empty domain variables
if bn_var.domainSize() > 0:
self.bn.add(bn_var)
def remove_parent(self, var_name):
self.add_parents(var_name)
def add_parents(self, var_name, parents=[]):
# TODO: We have a problem here !
if var_name in self.variables.keys():
if all([pa in self.variables.keys()
for pa in parents]):
self.parents[var_name] = parents
# Update parents info in backend
self.bn_update_parents(var_name)
# Update variables numerical specs
self.update_cct(var_name)
else:
raise ValueError(
"Parent variables must be part of BN variables")
else:
raise ValueError("Variable {var_name} is not part of BN variables")
def update_variable(self, **var_specs):
""" Update variables specs."""
new_var = DiscreteVariable(**var_specs)
# Just add variable
self.variables[new_var.name] = new_var
self.bn_update_variable(new_var.name)
self.update_cct(new_var.name)
# Update case: the variable already exists and has children
var_children = [v for v in self.variables.keys()
if new_var.name in self.parents.get(v, [])]
for vc in var_children:
self.bn_update_variable(vc)
self.update_cct(vc)
def bn_update_parents(self, var_name):
""" Update parents specs in backend."""
backend_bn_method = \
getattr(self, self.backend + "_update_parents", None)
if callable(backend_bn_method):
backend_bn_method(var_name)
else:
raise ValueError(
f"Bayesian network backend '{self.backend}' not supported")
def pyagrum_update_parents(self, var_name):
for parent in self.parents.get(var_name, []):
if (var_name in self.bn.names()) and \
(parent in self.bn.names()):
self.bn.addArc(parent, var_name)
def update_cct(self, var_name):
parents_var = self.parents.get(var_name, [])
var_domain = [var_name] + parents_var
var_domain_labels = \
pd.MultiIndex.from_product([self.variables[v].domain for v in var_domain],
names=var_domain)
init_cct = False
if not(var_name in self.cct):
init_cct = True
else:
var_domain_labels_cur = pd.DataFrame(self.cct[var_name]).columns
# DOES NOT WORK ANYMORE WITH ADD_PARENT
#var_domain_labels_cur = self.get_cct(var_name, flatten=True).index
if len(var_domain) == 1:
init_cct = set(var_domain_labels.levels[0]) != \
set(var_domain_labels_cur)
else:
init_cct = set(var_domain_labels) != \
set(var_domain_labels_cur)
# Erase CCT only if CPT structure has changed
if init_cct:
# Init CCT as a DataFrame
cct_df = pd.Series(0, index=var_domain_labels, name="count")
# Store CCT in cct attribute
self.cct[var_name] = cct_df.to_frame()\
.reset_index()\
.to_dict("records")
# Update backend parameters
# NOTE: This is mandatory even if structure has not changed
# All this BN backend can be improved a LOT !
# => Implement PANDAS BN !!!
self.update_cpt_params(var_name)
def init_from_dataframe(self, df, add_data_var=False):
for var_name, dfs in df.items():
# Do not add variable from data columns not defined in
# variables dictionary
if not(add_data_var) and not(var_name in self.variables.keys()):
continue
if dfs.dtype.name == "category":
if dfs.cat.categories.dtype.name == "interval":
var_domain_type = "interval"
var_domain = [str(it)
for it in dfs.cat.categories.to_list()]
else:
var_domain_type = "label"
var_domain = dfs.cat.categories.to_list()
else:
# Default behaviour: force categorical data
var_domain_type = "label"
var_domain = dfs.astype(str).astype(
"category").cat.categories.to_list()
var = DiscreteVariable(name=var_name,
domain=var_domain,
domain_type=var_domain_type)
self.variables[var_name] = var
self.init_backend()
def adapt_data(self, data):
"""Utility method to ensure series has well formatted categorical data, i.e. string labels.
"""
# parents_var = self.parents.get(var_name, [])
# var_dim = [var_name] + parents_var
data_new = {}
data_var_list = [var for var in self.variables.keys()
if var in data.columns]
# Check if input dataframe has consistent catagorical variables
for var_name, data_var_s in data[data_var_list].items():
data_var_s = data[var_name]
if data_var_s.dtype.name != "category":
# Try to transform it
cat_type = pd.api.types.CategoricalDtype(categories=self.variables[var_name].domain,
ordered=self.variables[var_name].domain_type != "label")
data_var_s = data_var_s.astype(str).astype(cat_type)
if data_var_s.cat.categories.dtype.name == "interval":
series_lab = [str(it)
for it in data_var_s.cat.categories.to_list()]
else:
series_lab = data_var_s.cat.categories.to_list()
if self.variables[var_name].domain != series_lab:
err_msg = f"Domain of variable {var_name}: {self.variables[var_name].domain}\n"
err_msg += f"Series categories: : {series_lab}\n"
err_msg += f"Inconsistency detected"
raise ValueError(err_msg)
if data_var_s.cat.categories.dtype.name == "interval":
data_var_s.cat.rename_categories(
self.variables[var_name].domain, inplace=True)
data_new[var_name] = data_var_s
return pd.DataFrame(data_new, index=data.index)
def fit(self, data,
update_fit=False,
update_decay=0,
logger=None):
for var_name in self.variables.keys():
var_domain = [var_name] + self.parents.get(var_name, [])
var_not_in_data = [
var for var in var_domain if not(var in data.columns)]
if len(var_not_in_data) > 0:
warnings.warn(f"Skip variable {var_name} fitting: domain [{var_not_in_data}] not in data",
RuntimeWarning)
continue
df_cur = data[var_domain]
self.fit_cpt(data=df_cur,
var_name=var_name,
update_fit=update_fit,
update_decay=update_decay,
logger=logger)
def fit_cpt(self, data, var_name,
update_fit=False,
update_decay=0,
logger=None):
"""
This function aims to compute the joint counts associated to CPT parameters from a Pandas
dataframe.
Parameters
- data: a Pandas DataFrame consisting only of categorical variables.
- var_name: the variable name associated to the CPT to be fitted.
- update_fit: indicates if current joint counts has to be updated with new observation.
- update_decay: decay coef in [0,1] to reduce importance of current count comparared to new fitted data. 0 means that old data is as same weight than new data. Otherwise count_update = count_new_fit + (1-decay)*count_old. Note that decay coef == 1 is equivalent to set update_fit == False.
Note: this method is an adaptation of codes found at http://www-desir.lip6.fr/~phw/aGrUM/officiel/notebooks/
"""
if not(logger is None):
logger.debug("- Learn CPT {0}\n".format(var_name))
parents_var = self.parents.get(var_name, [])
var_dim = [var_name] + parents_var
data = self.adapt_data(data)
# # Check if input dataframe has consistent catagorical variables
# for var in data[var_domain].columns:
# # data_var_type = self.find_consistent_series_type(data[var])
# # data[var] = data[var].astype(str).astype(data_var_type)
# self.adapt_series_categories(data[var], inplace=True)
# ipdb.set_trace()
# Compute counts from input data
index_name = data.index.name if not(data.index.name is None) \
else "index"
cct_cur_df = data[var_dim].reset_index()\
.groupby(by=var_dim, dropna=False)[index_name]\
.count()\
.rename("count")
if len(self.cct.get(var_name, [])) > 0 and update_fit:
cct_df = cct_cur_df + \
(1 - update_decay)*self.get_cct(var_name, flatten=True)
self.cct[var_name] = cct_df.to_frame()\
.reset_index()\
.to_dict("records")
else:
# Erase cpt counts with new counts
self.cct[var_name] = cct_cur_df.to_frame()\
.reset_index()\
.to_dict("records")
# Update backend parameters after learning process
self.update_cpt_params(var_name)
def get_cct(self, var_name, transpose=False, flatten=False):
"""
This method returns the variable count table as a dataframe or a serie if flatten is demanded.
"""
parents_var = self.parents.get(var_name, [])
var_domain = [var_name] + parents_var
cct_df = pd.DataFrame(self.cct[var_name])
# Set categorical labels for counts columns with respect to
# variable labels
for var in cct_df[var_domain].columns:
cat_type = pd.api.types.CategoricalDtype(
categories=self.variables[var].domain,
ordered=self.variables[var].domain_type != "label")
cct_df[var] = cct_df[var].astype(cat_type)
cct_df = pd.pivot_table(data=cct_df,
index=var_name,
columns=parents_var,
dropna=False,
values="count").fillna(0)
if len(parents_var) == 0:
# To have unified output change unique columns name as "" since
# there is no parents
cct_df.rename(columns={"count": ""}, inplace=True)
if flatten:
# When no parent, cpt_df.index is a CategoricalIndex
# which cannot be reorder_levels
if len(parents_var) > 0:
# PROBLEM: STACK CANCELS CATEGORICAL MULTINDEX INDEX SPECS IN pandas<=1.2.4
# Use transpose+stack one dim instead
# cct_df = cct_df.stack(parents_var).reorder_levels(var_domain)
cct_df = cct_df.transpose().stack(var_name).reorder_levels(var_domain)
else:
cct_df = cct_df[""]
cct_df.name = "count"
if transpose:
cct_df = cct_df.transpose()
return cct_df
def get_cpt(self, var_name,
nan_management="uniform",
flatten=False,
transpose=False):
"""
apriori_coef: Parameter representing the apriori weight during the fitting process compared to data. if apriori_coef is a non negative real number :
- the higher it is, the closer to the apriori distribution the resulting configuration distribution will be.
- the lower it is, the closer to the distribution fitted by data the resulting configuration distribution will be.
User can also pass a string associated to an apriori coefficient strategy. Possible values are
- smart: in this case, the apriori coefficient is set equal to 1/nb_data_conf if nb_data_conf > 0 else 1 where nb_data_conf is the number of data observed for a given configuration.
apriori_dist: shape of the apriori distribution. Possible values are: 'uniform'. Passing None to this parameter disables apriori consideration in the fitting process.
apriori_data_threshold: apply apriori for a conditional distribution if the number of oberserved corresponding configurations is lower or equal than this parameter.
Notes:
- To avoid numerical problems like joint probabilities at 0 during inference process, it is recommanded to
ensure non-zeros probabilities for each modalities of each variable | parents.
=> TODO: implement the smart a priori system to do that
"""
cct_df = self.get_cct(var_name)
parents_var = self.parents.get(var_name, [])
var_domain = [var_name] + parents_var
var_norm_size = len(self.variables[var_name].domain)
# if len(parents_var) == 0:
# # To have unified output change unique columns name as "" since
# # there is no parents
# cpt_df = cct_df/cct_df.sum()
# cpt_df.name = "prob"
# else:
# Normalization
cpt_df = cct_df.div(cct_df.sum(axis=0), axis=1)
if flatten:
# When no parent, cpt_df.index is a CategoricalIndex
# which cannot be reorder_levels
if len(parents_var) > 0:
cpt_df = cpt_df.stack(parents_var, dropna=False)
# NOTE: Theoretically reorder level is useless
# but we keep it to be sure
cpt_df = cpt_df.reorder_levels(var_domain)
else:
cpt_df = cpt_df[""]
cpt_df.name = "prob"
# Manage NaN
if nan_management == "uniform":
cpt_df.fillna(1/var_norm_size,
inplace=True)
else:
cpt_df.fillna(0, inplace=True)
if transpose:
cpt_df = cpt_df.transpose()
return cpt_df
# # A priori management
# if not(apriori_dist is None):
# # Select apriori distribution
# apriori_joint_arr = pd.np.zeros(joint_counts.shape)
# if apriori_dist == "uniform":
# apriori_joint_arr[:] = 1/apriori_joint_arr.shape[0]
# else:
# err_msg = "apriori distribution {0} is not supported. Possible values are : 'uniform'\n".format(apriori_dist)
# raise ValueError(err_msg)
# # Build the apriori coefficient array
# apriori_coef_arr = pd.np.ones(cond_counts.shape)
# if isinstance(apriori_coef, str):
# if apriori_coef == "smart":
# if len(cond_counts.shape) == 0:
# apriori_coef_arr = 1/cond_counts if cond_counts > 0 else 1.0
# else:
# idx_cond_count_sup_0 = pd.np.where(cond_counts > 0)
# apriori_coef_arr = pd.np.ones(cond_counts.shape)
# apriori_coef_arr[idx_cond_count_sup_0] = 1/cond_counts[idx_cond_count_sup_0]
# else:
# err_msg = "apriori coef {0} is not supported. Possible values are : 'smart' or non negative value\n".format(apriori_coef)
# raise ValueError(err_msg)
# else:
# if len(cond_counts.shape) == 0:
# apriori_coef_arr = abs(apriori_coef)
# else:
# apriori_coef_arr[:] = abs(apriori_coef)
# # Check coordinate that need apriori
# if len(cond_counts.shape) == 0:
# if cond_counts > apriori_data_threshold: apriori_coef_arr = 0.
# else:
# apriori_counts_idx = cond_counts <= apriori_data_threshold
# apriori_coef_arr[~apriori_counts_idx] = 0.
# # Update joint and cond counts
# joint_counts += apriori_joint_arr*apriori_coef_arr
# cond_counts = joint_counts.sum(axis=0)
# # Normalization of counts to get a consistent CPT
# # Note: np.nan_to_num is used only in the case where no apriori is requested to force nan value to 0
# # => this is of course highly unsafe to work in this situation as CPTs may not sum to 1 for all configurations
# bn.cpt(var_name)[:] = pd.np.nan_to_num((joint_counts/cond_counts).transpose().reshape(*domains)
def update_cpt_params(self, var_name):
update_cpt_params_method = getattr(
self, self.backend + "_update_cpt_params", None)
if callable(update_cpt_params_method):
return update_cpt_params_method(var_name)
else:
raise ValueError(
f"Bayesian network backend '{self.backend}' not supported")
def pyagrum_update_cpt_params(self, var_name):
if not(var_name in self.bn.names()):
return
cpt = self.get_cpt(var_name, transpose=True)
# Reshape
parents_var = self.parents.get(var_name, [])
var_domain = parents_var + [var_name]
cpt_domain_size = [len(self.variables[var].domain)
for var in var_domain]
try:
cpt_np = cpt.to_numpy()\
.reshape(cpt_domain_size)
except Exception as e:
ipdb.set_trace()
# Then transpose axis if needed
cpt_nb_dim = len(var_domain)
if cpt_nb_dim > 1:
cpt_transpose = list(
reversed(range(0, cpt_nb_dim - 1))) + [cpt_nb_dim - 1]
cpt_np = cpt_np.transpose(cpt_transpose)
self.bn.cpt(var_name)[:] = cpt_np
def predict(self, data, var_targets, map_k=0, probs=True,
progress_mode=False, logger=None, **kwrgs):
"""
This function is used to predict the value of a target variable from observations
using a bayesian network model.
Inputs:
- data: the data containing the observations used to predict the target variable
as a =pandas.DataFrame= object
- var_targets: the name of the target variable as a =str= object
- probs: indicate if posterior probabilities are returned as a DiscreteDistribution object
- map_k: indicate if the k most probable labels are returned. (default map_k == 0)
Returns:
- a numpy.array containing the predictions of the target variables maximising the
maximum a posteriori criterion
- a numpy.array containing the posterior probability distribution of the target
variable given each observation in data.
"""
if isinstance(data, pd.core.frame.DataFrame):
data_pred = self.adapt_data(data)
else:
raise ValueError(f"Input data must be a Pandas DataFrame")
predict_method = getattr(self, self.backend + "_predict", None)
if callable(predict_method):
return predict_method(data=data_pred,
var_targets=var_targets,
map_k=map_k,
probs=probs,
progress_mode=progress_mode,
logger=logger)
else:
raise ValueError(
f"Bayesian network backend '{self.backend}' not supported")
def pyagrum_predict(self, data, var_targets,
map_k=0,
probs=True,
progress_mode=False,
logger=None):
# Initialize the inference engine
inf_bn = gum.LazyPropagation(self.bn)
inf_bn.setTargets(set(var_targets))
# target_size = len(self.variables[var_target].domain)
# target_dom = self.variables[var_target].domain
nb_data = len(data)
pred_res = {tv: {"scores": DiscreteDistribution(index=data.index,
**self.variables[tv].dict()),
"comp_ok": | pd.Series(True, index=data.index) | pandas.Series |
#%%
import os
import sys
os.chdir(os.path.dirname(os.getcwd())) # make directory one step up the current directory
from pymaid_creds import url, name, password, token
import pymaid
rm = pymaid.CatmaidInstance(url, token, name, password)
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
import connectome_tools.process_matrix as pm
import connectome_tools.process_graph as pg
import connectome_tools.cascade_analysis as casc
import connectome_tools.celltype as ct
import connectome_tools.cluster_analysis as clust
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams['font.size'] = 5
plt.rcParams['font.family'] = 'arial'
adj = pm.Promat.pull_adj('ad', subgraph='brain and accessory')
edges = pd.read_csv('data/edges_threshold/pairwise-threshold_ad_all-edges.csv', index_col=0)
pairs = pm.Promat.get_pairs()
dVNCs = pymaid.get_skids_by_annotation('mw dVNC')
dVNC_pairs = pm.Promat.load_pairs_from_annotation('mw dVNC', pairs, return_type='all_pair_ids_bothsides', skids=dVNCs, use_skids=True)
# %%
# dVNC projectome data prep
import cmasher as cmr
projectome = pd.read_csv('data/projectome/projectome_adjacency.csv', index_col = 0, header = 0)
projectome.index = [str(x) for x in projectome.index]
# identify meshes
meshes = ['Brain Hemisphere left', 'Brain Hemisphere right', 'SEZ_left', 'SEZ_right', 'T1_left', 'T1_right', 'T2_left', 'T2_right', 'T3_left', 'T3_right', 'A1_left', 'A1_right', 'A2_left', 'A2_right', 'A3_left', 'A3_right', 'A4_left', 'A4_right', 'A5_left', 'A5_right', 'A6_left', 'A6_right', 'A7_left', 'A7_right', 'A8_left', 'A8_right']
pairOrder_dVNC = [x for sublist in zip(dVNC_pairs.leftid, dVNC_pairs.rightid) for x in sublist]
input_projectome = projectome.loc[meshes, [str(x) for x in pairOrder_dVNC]]
output_projectome = projectome.loc[[str(x) for x in pairOrder_dVNC], meshes]
dVNC_projectome_pairs_summed_output = []
indices = []
for i in np.arange(0, len(output_projectome.index), 2):
combined_pairs = (output_projectome.iloc[i, :] + output_projectome.iloc[i+1, :])
combined_hemisegs = []
for j in np.arange(0, len(combined_pairs), 2):
combined_hemisegs.append((combined_pairs[j] + combined_pairs[j+1]))
dVNC_projectome_pairs_summed_output.append(combined_hemisegs)
indices.append(output_projectome.index[i])
dVNC_projectome_pairs_summed_output = pd.DataFrame(dVNC_projectome_pairs_summed_output, index = indices, columns = ['brain','SEZ', 'T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8'])
#dVNC_projectome_pairs_summed_output = dVNC_projectome_pairs_summed_output.iloc[:, 1:len(dVNC_projectome_pairs_summed_output)]
#normalize # of presynaptic sites
dVNC_projectome_pairs_summed_output_norm = dVNC_projectome_pairs_summed_output.copy()
for i in range(len(dVNC_projectome_pairs_summed_output)):
sum_row = sum(dVNC_projectome_pairs_summed_output_norm.iloc[i, :])
for j in range(len(dVNC_projectome_pairs_summed_output.columns)):
dVNC_projectome_pairs_summed_output_norm.iloc[i, j] = dVNC_projectome_pairs_summed_output_norm.iloc[i, j]/sum_row
# remove brain from columns
dVNC_projectome_pairs_summed_output_norm_no_brain = dVNC_projectome_pairs_summed_output_norm.iloc[:, 1:len(dVNC_projectome_pairs_summed_output)]
dVNC_projectome_pairs_summed_output_no_brain = dVNC_projectome_pairs_summed_output.iloc[:, 1:len(dVNC_projectome_pairs_summed_output)]
# %%
# ordering and plotting
# sorting with normalized data
sort_threshold = 0
dVNC_projectome_pairs_summed_output_sort_norm = dVNC_projectome_pairs_summed_output_norm_no_brain.copy()
dVNC_projectome_pairs_summed_output_sort_norm[dVNC_projectome_pairs_summed_output_sort_norm<sort_threshold]=0
order = ['SEZ', 'T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8']
order.reverse()
dVNC_projectome_pairs_summed_output_sort_norm.sort_values(by=order, ascending=False, inplace=True)
sort = dVNC_projectome_pairs_summed_output_sort_norm.index
cmap = plt.cm.get_cmap('Blues') # modify 'Blues' cmap to have a white background
blue_cmap = cmap(np.linspace(0, 1, 20))
blue_cmap[0] = np.array([1, 1, 1, 1])
blue_cmap = mpl.colors.LinearSegmentedColormap.from_list(name='New_Blues', colors=blue_cmap)
cmap = blue_cmap
fig, ax = plt.subplots(1,1,figsize=(2,2))
sns.heatmap(dVNC_projectome_pairs_summed_output_norm.loc[sort, :], ax=ax, cmap=cmap)
plt.savefig(f'VNC_interaction/plots/projectome/A8-T1_sort_projectome_normalized_sortThres{sort_threshold}.pdf', bbox_inches='tight')
# sorting with raw data
sort_threshold = 0
dVNC_projectome_pairs_summed_output_sort = dVNC_projectome_pairs_summed_output_no_brain.copy()
dVNC_projectome_pairs_summed_output_sort[dVNC_projectome_pairs_summed_output_sort<sort_threshold]=0
order = ['SEZ', 'T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8']
order.reverse()
dVNC_projectome_pairs_summed_output_sort.sort_values(by=order, ascending=False, inplace=True)
sort = dVNC_projectome_pairs_summed_output_sort.index
vmax = 70
cmap = blue_cmap
fig, ax = plt.subplots(1,1,figsize=(2,2))
sns.heatmap(dVNC_projectome_pairs_summed_output.loc[sort, :], ax=ax, cmap=cmap, vmax=vmax)
plt.savefig(f'VNC_interaction/plots/projectome/A8-T1_sort_projectome_sortThres{sort_threshold}.pdf', bbox_inches='tight')
# %%
# old prototype code; lots of conflicting ordering sections added for testing purposes
'''
# order based on clustering raw data
cluster = sns.clustermap(dVNC_projectome_pairs_summed_output, col_cluster = False, figsize=(6,4))
row_order = cluster.dendrogram_row.reordered_ind
#fig, ax = plt.subplots(figsize=(6,4))
#sns.heatmap(dVNC_projectome_pairs_summed_output.iloc[row_order, :], rasterized=True, ax=ax)
plt.savefig(f'VNC_interaction/plots/projectome/clustered_projectome_raw.pdf', bbox_inches='tight')
# order based on clustering normalized data
cluster = sns.clustermap(dVNC_projectome_pairs_summed_output_norm, col_cluster = False, figsize=(6,4), rasterized=True)
row_order = cluster.dendrogram_row.reordered_ind
#fig, ax = plt.subplots(figsize=(6,4))
#sns.heatmap(dVNC_projectome_pairs_summed_output_norm.iloc[row_order, :], rasterized=True, ax=ax)
plt.savefig(f'VNC_interaction/plots/projectome/clustered_projectome_normalized.pdf', bbox_inches='tight')
# order based on counts per column
for i in range(1, 51):
dVNC_projectome_pairs_summed_output_sort = dVNC_projectome_pairs_summed_output_norm.copy()
dVNC_projectome_pairs_summed_output_sort[dVNC_projectome_pairs_summed_output_sort<(i/100)]=0
dVNC_projectome_pairs_summed_output_sort.sort_values(by=['T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8'], ascending=False, inplace=True)
row_order = dVNC_projectome_pairs_summed_output_sort[dVNC_projectome_pairs_summed_output_sort.sum(axis=1)>0].index
second_sort = dVNC_projectome_pairs_summed_output_norm[dVNC_projectome_pairs_summed_output_sort.sum(axis=1)==0]
second_sort[second_sort<.1]=0
second_sort.sort_values(by=[i for i in reversed(['T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8'])], ascending=False, inplace=True)
row_order = list(row_order) + list(second_sort.index)
fig, ax = plt.subplots(figsize=(6,4))
sns.heatmap(dVNC_projectome_pairs_summed_output_norm.loc[row_order, :], ax=ax, rasterized=True)
plt.savefig(f'VNC_interaction/plots/projectome/projectome_0.{i}-sort-threshold.pdf', bbox_inches='tight')
for i in range(1, 51):
dVNC_projectome_pairs_summed_output_sort = dVNC_projectome_pairs_summed_output.copy()
dVNC_projectome_pairs_summed_output_sort[dVNC_projectome_pairs_summed_output_sort<(i)]=0
dVNC_projectome_pairs_summed_output_sort.sort_values(by=['T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8'], ascending=False, inplace=True)
row_order = dVNC_projectome_pairs_summed_output_sort.index
second_sort = dVNC_projectome_pairs_summed_output[dVNC_projectome_pairs_summed_output_sort.sum(axis=1)==0]
second_sort[second_sort<10]=0
second_sort.sort_values(by=['T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8'], ascending=False, inplace=True)
row_order = list(row_order) + list(second_sort.index)
fig, ax = plt.subplots(figsize=(6,4))
sns.heatmap(dVNC_projectome_pairs_summed_output.loc[row_order, :], ax=ax, rasterized=True)
plt.savefig(f'VNC_interaction/plots/projectome/projectome_{i}-sort-threshold.pdf', bbox_inches='tight')
fig, ax = plt.subplots(figsize=(3,2))
sns.heatmap(dVNC_projectome_pairs_summed_output.iloc[row_order, :], ax=ax)
plt.savefig('VNC_interaction/plots/projectome/output_projectome_cluster.pdf', bbox_inches='tight', transparent = True)
# order input projectome in the same way
dVNC_projectome_pairs_summed_input = []
indices = []
for i in np.arange(0, len(input_projectome.columns), 2):
combined_pairs = (input_projectome.iloc[:, i] + input_projectome.iloc[:, i+1])
combined_hemisegs = []
for j in np.arange(0, len(combined_pairs), 2):
combined_hemisegs.append((combined_pairs[j] + combined_pairs[j+1]))
dVNC_projectome_pairs_summed_input.append(combined_hemisegs)
indices.append(input_projectome.columns[i])
dVNC_projectome_pairs_summed_input = pd.DataFrame(dVNC_projectome_pairs_summed_input, index = indices, columns = ['SEZ', 'T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8'])
dVNC_projectome_pairs_summed_input = dVNC_projectome_pairs_summed_input.iloc[:, 1:len(dVNC_projectome_pairs_summed_input)]
#cluster = sns.clustermap(dVNC_projectome_pairs_summed_input, col_cluster = False, cmap = cmr.freeze, figsize=(10,10))
fig, ax = plt.subplots(figsize=(3,2))
sns.heatmap(dVNC_projectome_pairs_summed_input.iloc[row_order, :], cmap=cmr.freeze, ax=ax)
plt.savefig('VNC_interaction/plots/projectome/input_projectome_cluster.pdf', bbox_inches='tight', transparent = True)
'''
# %%
# paths 2-hop upstream of each dVNC
from tqdm import tqdm
# sort dVNC pairs
sort = [int(x) for x in sort]
dVNC_pairs.set_index('leftid', drop=False, inplace=True)
dVNC_pairs = dVNC_pairs.loc[sort, :]
dVNC_pairs.reset_index(inplace=True, drop=True)
hops = 2
threshold = 0.01
dVNC_pair_paths_us = [pm.Promat.upstream_multihop(edges=edges, sources=dVNC_pairs.loc[i].to_list(), hops=hops) for i in tqdm(range(0, len(dVNC_pairs)))]
dVNC_pair_paths_ds = [pm.Promat.downstream_multihop(edges=edges, sources=dVNC_pairs.loc[i].to_list(), hops=hops) for i in tqdm(range(0, len(dVNC_pairs)))]
# %%
# plotting individual dVNC paths
_, celltypes = ct.Celltype_Analyzer.default_celltypes()
skids_list = [list(adj.index)] + [x.get_skids() for x in celltypes]
# UPSTREAM
all_layers_us = [ct.Celltype_Analyzer.layer_id(dVNC_pair_paths_us, dVNC_pairs.leftid, skids_type)[0] for skids_type in skids_list]
layer_names = ['Total'] + [x.get_name() for x in celltypes]
threshold = 0.01
layer_colors = ['Greens', 'Greens', 'Blues', 'Greens', 'Oranges', 'Reds', 'Greens', 'Blues', 'Purples', 'Blues', 'Reds', 'Purples', 'Reds', 'Purples', 'Reds', 'Purples', 'Reds']
layer_vmax = [200, 50, 50, 50, 50, 50, 50, 50, 50, 50, 100, 50, 50, 50, 50, 50, 50]
save_path = 'VNC_interaction/plots/dVNC_partners/Upstream_'
ct.Celltype_Analyzer.plot_layer_types(layer_types=all_layers_us, layer_names=layer_names, layer_colors=layer_colors,
layer_vmax=layer_vmax, pair_ids=dVNC_pairs.leftid, figsize=(.5*hops/3, 1.5), save_path=save_path, threshold=threshold, hops=hops)
# DOWNSTREAM
all_layers_ds = [ct.Celltype_Analyzer.layer_id(dVNC_pair_paths_ds, dVNC_pairs.leftid, skids_type)[0] for skids_type in skids_list]
layer_names = ['Total'] + [x.get_name() for x in celltypes]
threshold = 0.01
layer_colors = ['Greens', 'Greens', 'Blues', 'Greens', 'Oranges', 'Reds', 'Greens', 'Blues', 'Purples', 'Blues', 'Reds', 'Purples', 'Reds', 'Purples', 'Reds', 'Purples', 'Reds']
layer_vmax = [200, 50, 50, 50, 50, 50, 50, 50, 50, 50, 100, 50, 50, 50, 50, 50, 50]
save_path = 'VNC_interaction/plots/dVNC_partners/Downstream-in-brain_'
ct.Celltype_Analyzer.plot_layer_types(layer_types=all_layers_ds, layer_names=layer_names, layer_colors=layer_colors,
layer_vmax=layer_vmax, pair_ids=dVNC_pairs.leftid, figsize=(.5*hops/3, 1.5), save_path=save_path, threshold=threshold, hops=hops)
# %%
# make bar plots for 1-hop and 2-hop
_, celltypes = ct.Celltype_Analyzer.default_celltypes()
figsize = (2,0.5)
# UPSTREAM
us_1order = ct.Celltype_Analyzer([ct.Celltype(str(dVNC_pairs.loc[i].leftid) + '_us_1o', x[0]) for i, x in enumerate(dVNC_pair_paths_us)])
us_2order = ct.Celltype_Analyzer([ct.Celltype(str(dVNC_pairs.loc[i].leftid) + '_us_2o', x[1]) for i, x in enumerate(dVNC_pair_paths_us)])
us_1order.set_known_types(celltypes)
us_2order.set_known_types(celltypes)
path = 'VNC_interaction/plots/dVNC_partners/summary_plot_1st_order_upstream.pdf'
us_1order.plot_memberships(path = path, figsize=figsize)
path = 'VNC_interaction/plots/dVNC_partners/summary_plot_2nd_order_upstream.pdf'
us_2order.plot_memberships(path = path, figsize=figsize)
# DOWNSTREAM
ds_1order = ct.Celltype_Analyzer([ct.Celltype(str(dVNC_pairs.loc[i].leftid) + '_ds_1o', x[0]) for i, x in enumerate(dVNC_pair_paths_ds)])
ds_2order = ct.Celltype_Analyzer([ct.Celltype(str(dVNC_pairs.loc[i].leftid) + '_ds_2o', x[1]) for i, x in enumerate(dVNC_pair_paths_ds)])
ds_1order.set_known_types(celltypes)
ds_2order.set_known_types(celltypes)
path = 'VNC_interaction/plots/dVNC_partners/summary_plot_1st_order_downstream.pdf'
ds_1order.plot_memberships(path = path, figsize=figsize)
path = 'VNC_interaction/plots/dVNC_partners/summary_plot_2nd_order_downstream.pdf'
ds_2order.plot_memberships(path = path, figsize=figsize)
# %%
# combine all data types for dVNCs: us1o, us2o, ds1o, ds2o, projectome
fraction_cell_types_1o_us = pd.DataFrame([x.iloc[:, 0] for x in fraction_types], index = fraction_types_names).T
fraction_cell_types_1o_us.columns = [f'1o_us_{x}' for x in fraction_cell_types_1o_us.columns]
unk_col = 1-fraction_cell_types_1o_us.sum(axis=1)
unk_col[unk_col==11]=0
fraction_cell_types_1o_us['1o_us_unk']=unk_col
fraction_cell_types_2o_us = pd.DataFrame([x.iloc[:, 1] for x in fraction_types], index = fraction_types_names).T
fraction_cell_types_2o_us.columns = [f'2o_us_{x}' for x in fraction_cell_types_2o_us.columns]
unk_col = 1-fraction_cell_types_2o_us.sum(axis=1)
unk_col[unk_col==11]=0
fraction_cell_types_2o_us['2o_us_unk']=unk_col
fraction_cell_types_1o_ds = pd.DataFrame([x.iloc[:, 0] for x in fraction_types_ds], index = fraction_types_names).T
fraction_cell_types_1o_ds.columns = [f'1o_ds_{x}' for x in fraction_cell_types_1o_ds.columns]
unk_col = 1-fraction_cell_types_1o_ds.sum(axis=1)
unk_col[unk_col==11]=0
fraction_cell_types_1o_ds['1o_ds_unk']=unk_col
fraction_cell_types_1o_ds[fraction_cell_types_1o_ds==-1]=0
fraction_cell_types_2o_ds = pd.DataFrame([x.iloc[:, 1] for x in fraction_types_ds], index = fraction_types_names).T
fraction_cell_types_2o_ds.columns = [f'2o_ds_{x}' for x in fraction_cell_types_2o_ds.columns]
unk_col = 1-fraction_cell_types_2o_ds.sum(axis=1)
unk_col[unk_col==11]=0
fraction_cell_types_2o_ds['2o_ds_unk']=unk_col
fraction_cell_types_2o_ds[fraction_cell_types_2o_ds==-1]=0
all_data = dVNC_projectome_pairs_summed_output_norm.copy()
all_data.index = [int(x) for x in all_data.index]
all_data = pd.concat([fraction_cell_types_1o_us, fraction_cell_types_2o_us, all_data, fraction_cell_types_1o_ds, fraction_cell_types_2o_ds], axis=1)
all_data.fillna(0, inplace=True)
# clustered version of all_data combined
cluster = sns.clustermap(all_data, col_cluster = False, figsize=(30,30), rasterized=True)
plt.savefig(f'VNC_interaction/plots/projectome/clustered_projectome_all_data.pdf', bbox_inches='tight')
order = cluster.dendrogram_row.reordered_ind
fig,ax=plt.subplots(1,1,figsize=(6,4))
sns.heatmap(all_data.iloc[order, :].drop(list(fraction_cell_types_1o_us.columns) + list(fraction_cell_types_2o_us.columns) + list(fraction_cell_types_1o_ds.columns) + list(fraction_cell_types_2o_ds.columns), axis=1), ax=ax, rasterized=True)
plt.savefig(f'VNC_interaction/plots/projectome/clustered_projectome_all_data_same_size.pdf', bbox_inches='tight')
cluster = sns.clustermap(all_data.drop(['1o_us_pre-dVNC', '2o_us_pre-dVNC'], axis=1), col_cluster = False, figsize=(20,15), rasterized=True)
plt.savefig(f'VNC_interaction/plots/projectome/clustered_projectome_all_data_removed_us-pre-dVNCs.pdf', bbox_inches='tight')
# decreasing sort of all_data but with feedback and non-feedback dVNC clustered
for i in range(1, 50):
dVNCs_with_FB = all_data.loc[:, list(fraction_cell_types_1o_ds.columns) + list(fraction_cell_types_2o_ds.columns)].sum(axis=1)
dVNCs_FB_true_skids = dVNCs_with_FB[dVNCs_with_FB>0].index
dVNCs_FB_false_skids = dVNCs_with_FB[dVNCs_with_FB==0].index
dVNC_projectome_pairs_summed_output_sort = all_data.copy()
dVNC_projectome_pairs_summed_output_sort = dVNC_projectome_pairs_summed_output_sort.loc[:, ['T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8']]
dVNC_projectome_pairs_summed_output_sort = dVNC_projectome_pairs_summed_output_sort.loc[dVNCs_FB_true_skids]
dVNC_projectome_pairs_summed_output_sort[dVNC_projectome_pairs_summed_output_sort<(i/100)]=0
dVNC_projectome_pairs_summed_output_sort.sort_values(by=['T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8'], ascending=False, inplace=True)
row_order_FB_true = dVNC_projectome_pairs_summed_output_sort.index
second_sort = all_data.copy()
second_sort = second_sort.loc[:, ['T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8']]
second_sort = second_sort.loc[dVNCs_FB_false_skids]
second_sort[second_sort<(i/100)]=0
second_sort.sort_values(by=['T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8'], ascending=False, inplace=True)
row_order_FB_false = second_sort.index
row_order = list(row_order_FB_true) + list(row_order_FB_false)
fig, ax = plt.subplots(figsize=(20, 15))
sns.heatmap(all_data.loc[row_order, :], ax=ax, rasterized=True)
plt.savefig(f'VNC_interaction/plots/projectome/splitFB_projectome_0.{i}-sort-threshold.pdf', bbox_inches='tight')
fig, ax = plt.subplots(figsize=(6,4))
sns.heatmap(all_data.loc[row_order, ['T1', 'T2', 'T3', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8']], ax=ax, rasterized=True)
plt.savefig(f'VNC_interaction/plots/projectome/splitFB_same-size_projectome_0.{i}-sort-threshold.pdf', bbox_inches='tight')
# %%
# what fraction of us and ds neurons are from different cell types per hop?
fraction_cell_types_1o_us = pd.DataFrame([x.iloc[:, 0] for x in fraction_types], index = fraction_types_names)
fraction_cell_types_1o_us = fraction_cell_types_1o_us.fillna(0) # one dVNC with no inputs
fraction_cell_types_2o_us = pd.DataFrame([x.iloc[:, 1] for x in fraction_types], index = fraction_types_names)
fraction_cell_types_2o_us = fraction_cell_types_2o_us.fillna(0) # one dVNC with no inputs
fraction_cell_types_1o_us_scatter = []
for j in range(1, len(fraction_cell_types_1o_us.columns)):
for i in range(0, len(fraction_cell_types_1o_us.index)):
fraction_cell_types_1o_us_scatter.append([fraction_cell_types_1o_us.iloc[i, j], fraction_cell_types_1o_us.index[i]])
fraction_cell_types_1o_us_scatter = pd.DataFrame(fraction_cell_types_1o_us_scatter, columns = ['fraction', 'cell_type'])
fraction_cell_types_2o_us_scatter = []
for j in range(1, len(fraction_cell_types_2o_us.columns)):
for i in range(0, len(fraction_cell_types_2o_us.index)):
fraction_cell_types_2o_us_scatter.append([fraction_cell_types_2o_us.iloc[i, j], fraction_cell_types_2o_us.index[i]])
fraction_cell_types_2o_us_scatter = | pd.DataFrame(fraction_cell_types_2o_us_scatter, columns = ['fraction', 'cell_type']) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# **1. Load JSON file**<Br>
# **2. Data Exploration and Visualization**<br>
# **3. Select variables and Convert into CSV**<br>
# **4. Text Preprocessing**
# > a) Change to lower cases<Br>
# > b) Transform links (tentative?)<br>
# > c) Remove punctuation<br>
# > d) Remove stopwords<br>
# > e) lemmatize words (to root forms)<br>
####### 1. Loading JSON file #######
import numpy as np
import pandas as pd
import os
import json
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
get_ipython().magic(u'matplotlib inline')
inline_rc = dict(mpl.rcParams)
from tqdm import tqdm
#True: all data (about 8 mil); False: 500,000 entries
full_data = False
#load user review data
reviews = []
with open('data/yelp_academic_dataset_review.json') as f:
for i, line in tqdm(enumerate(f)):
reviews.append(json.loads(line))
if full_data==False and i+1 >= 500000:
break
df_review = pd.DataFrame(reviews)
df_review.tail()
#load business data
biz=[]
with open('data/yelp_academic_dataset_business.json') as f1:
for i, line in tqdm(enumerate(f1)):
biz.append(json.loads(line))
if full_data==False and i+1 >= 500000:
break
df_biz = pd.DataFrame(biz)
df_biz.tail()
#load user data
user=[]
with open('data/yelp_academic_dataset_user.json') as f1:
for i, line in tqdm(enumerate(f1)):
user.append(json.loads(line))
if full_data==False and i+1 >= 500000:
break
df_user = pd.DataFrame(user)
df_user.tail()
####### 2. Data Exloration and Visualization #######
x=df_review['stars'].value_counts()
x=x.sort_index()
#plot star rating distribution
plt.figure(figsize=(6,5))
ax= sns.barplot(x.index, x.values, alpha=0.8)
plt.title("Star Rating Distribution", fontsize=16)
plt.ylabel('Number of businesses')
plt.xlabel('Star Ratings')
biz_cat = ''.join(df_biz['categories'].astype('str'))
cats=pd.DataFrame(biz_cat.split(','),columns=['categories'])
#prep for chart
x=cats.categories.value_counts()
x=x.sort_values(ascending=False)
x=x.iloc[0:20]
#chart
plt.figure(figsize=(16,4))
ax = sns.barplot(x.index, x.values, alpha=0.8)#,color=color[5])
plt.title("Top business categories",fontsize=25)
locs, labels = plt.xticks()
plt.setp(labels, rotation=80)
plt.ylabel('Number of businesses', fontsize=12)
plt.xlabel('Category', fontsize=12)
plt.show()
####### 3. Select Variables and Convert into CSV #######
# Issues for consideration:<br>
# Are we going to pick an industry, then work with the subset businesses?
# Or we do not consider the industry? e.g. cafe, restaurant, hair salon, etc.
# Replace business_id with businesss name
# Selected three variables: business_name, stars, text
df_comb=df_review.copy()
df_comb['business_name'] = df_comb['business_id'].map(df_biz.set_index('business_id')['name'])
df_comb = df_comb[['business_name','stars','text']]
df_comb
#plot 20 most reviewed business
x=df_comb['business_name'].value_counts()
x=x.sort_values(ascending=False)
x=x.iloc[0:20]
#plot chart
plt.figure(figsize=(16,4))
ax = sns.barplot(x.index, x.values, alpha=0.8)#,color=color[5])
plt.title("20 Most Reviewed Businesses",fontsize=20)
locs, labels = plt.xticks()
plt.setp(labels, rotation=80)
plt.ylabel('Number of reviews', fontsize=12)
plt.xlabel('Business', fontsize=12)
plt.show()
### Conversion into CSV ###
#Convert review, business, user datasets into CSV
#df_review.to_csv('data/yelp_reviews.csv', index=False)
#df_biz.to_csv('data/yelp_business.csv', index=False)
#df_user.to_csv('data/yelp_user.csv', index=False)
####### 4. Text Preprocessing #######
#### Preprocessing steps: ####
# For Sentiment analysis:
# >a) Change to lower cases
# >b) Remove HTML
# >c) Remove duplicate characters
# >d) Remove punctuation & Tokenize
# >e) Remove stopwords
# >f) Lemmatization/Stemming
import nltk
#nltk.download('stopwords')
#nltk.download('wordnet')
from tqdm.auto import tqdm, trange
from nltk.tokenize import RegexpTokenizer
from nltk.stem import WordNetLemmatizer,PorterStemmer
from nltk.corpus import stopwords
from bs4 import BeautifulSoup
import string
import re
lemmatizer = WordNetLemmatizer()
stemmer = PorterStemmer()
#True: preprocessing for sentiment analysis
#False: preprocessing for text summarization
sentiment=True
def preprocess(s):
if sentiment==True:
#1. lowercase
s = s.lower()
#2. remove HTML
soup = BeautifulSoup(s,'lxml')
html_free = soup.get_text()
#3. remove duplicate characters
reg = re.sub(r'([a-z])\1+', r'\1', s)
#4. Remove punctuation & Tokenize
no_punct = "".join([c for c in reg if c not in string.punctuation])
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(no_punct)
#4. Remove stopwords
filtered_words = [w for w in tokens if w not in stopwords.words('english')]
#5. lemmatize/stem words
final_words=[lemmatizer.lemmatize(w) for w in filtered_words]
#final_words=[stemmer.stem(w) for w in filtered_words]
else:
#1. lowercase
s = s.lower()
#2. remove HTML
soup = BeautifulSoup(s,'lxml')
html_free = soup.get_text()
#3. remove duplicate characters
reg = re.sub(r'([a-z])\1+', r'\1', s)
tokenizer = RegexpTokenizer(r'\w+')
final_words = tokenizer.tokenize(reg)
return " ".join(final_words)
tqdm.pandas()
df_pre['text']=df_pre['text'].progress_map(lambda s:preprocess(s))
#printout before & after of preprocessing
| pd.DataFrame({'from': df_review['text'], 'to': df_pre['text']}) | pandas.DataFrame |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def test_ops(self):
td = Timedelta(10, unit='d')
self.assertEqual(-td, Timedelta(-10, unit='d'))
self.assertEqual(+td, Timedelta(10, unit='d'))
self.assertEqual(td - td, Timedelta(0, unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20, unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20, unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is np.nan)
# invert
self.assertEqual(-td, Timedelta('-10d'))
self.assertEqual(td * -1, Timedelta('-10d'))
self.assertEqual(-1 * td, Timedelta('-10d'))
self.assertEqual(abs(-td), Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda: td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda: td + 2)
self.assertRaises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(Timedelta(241, unit='h'), | pd.offsets.Hour(1) | pandas.offsets.Hour |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, date_range)
from pandas.core.index import MultiIndex
from pandas.compat import StringIO, lrange, range, u
from pandas import compat
import pandas.util.testing as tm
from .common import TestData
class TestSeriesRepr(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(lrange(0, len(index)), index=index, name='sth')
expected = ["first second", "foo one 0",
" two 1", " three 2",
"bar one 3", " two 4",
"baz two 5", " three 6",
"qux one 7", " two 8",
" three 9", "Name: sth, dtype: int64"]
expected = "\n".join(expected)
self.assertEqual(repr(s), expected)
def test_name_printing(self):
# test small series
s = Series([0, 1, 2])
s.name = "test"
self.assertIn("Name: test", repr(s))
s.name = None
self.assertNotIn("Name:", repr(s))
# test big series (diff code path)
s = Series(lrange(0, 1000))
s.name = "test"
self.assertIn("Name: test", repr(s))
s.name = None
self.assertNotIn("Name:", repr(s))
s = Series(index=date_range('20010101', '20020101'), name='test')
self.assertIn("Name: test", repr(s))
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series( | tm.randn(1000) | pandas.util.testing.randn |
# coding=utf-8
# Real-time air quality data from Beijing official environmental department: http://zx.bjmemc.com.cn/getAqiList.shtml
import datetime
import pandas as pd
from selenium import webdriver
import requests
import const
import settings
config = settings.config[const.DEFAULT]
def get_beijing_aq_latest():
station_id_mapping = {
1: "yongdingmennei_aq", # 永定门
2: "yufa_aq", # 京南
3: "zhiwuyuan_aq", # 香山
5: "fengtaihuayuan_aq", # 丰台花园
6: "shunyi_aq", # 顺义新城
7: "yanqin_aq", # 夏都
8: "pinggu_aq", # 平谷镇
9: "fangshan_aq", # 良乡
10: "yizhuang_aq", # 亦庄
11: "yungang_aq", # 云岗
12: "miyunshuiku_aq", # 京东北
13: "huairou_aq", # 怀柔镇
14: "badaling_aq", # 京西北
15: "wanshouxigong_aq", # 万寿西宫
17: "pingchang_aq", # 昌平镇
18: "mentougou_aq", # 双峪
19: "tongzhou_aq", # 通州北苑
20: "daxing_aq", # 黄村
21: "dingling_aq", # 定陵
23: "qianmen_aq", # 前门
24: "dongsi_aq", # 东四
25: "tiantan_aq", # 天坛
26: "aotizhongxin_aq", # 奥体中心
27: "nongzhanguan_aq", # 农展馆
28: "miyun_aq", # 密云镇
29: "gucheng_aq", # 古城
32: "guanyuan_aq", # 西城官园
34: "nansanhuan_aq", # 南三环
37: "beibuxinqu_aq", # 北部新区
38: "wanliu_aq", # 海淀万柳
40: "yongledian_aq", # 京东南
41: "liulihe_aq", # 京西南
43: "donggaocun_aq", # 京东
46: "dongsihuan_aq", # 东四环
47: "xizhimenbei_aq", # 西直门
}
url = "http://zx.bjmemc.com.cn/getAqiList.shtml"
driver = webdriver.Chrome(executable_path=config[const.CHROME_DRIVER_PATH])
driver.get(url)
data = driver.execute_script("return wfelkfbnx;")
rows = list()
for item in data:
_id = item["id"]
if _id not in station_id_mapping:
continue
station_id = station_id_mapping[_id]
pm2_5 = item["pm2_01"]
pm10 = item["pm10_01"]
no2 = item["no2_01"]
co = item["co_01"]
o3 = item["o3_01"]
so2 = item["so2_01"]
time_str = (datetime.datetime.utcfromtimestamp(int(item["date_f"])) - datetime.timedelta(hours=8)).strftime(
'%Y-%m-%d %H:%M:%S')
rows.append([station_id, time_str, pm2_5, pm10, o3, no2, co, so2])
driver.close()
df = pd.DataFrame(data=rows, columns=[const.ID, const.TIME, const.PM25, const.PM10,
const.O3, const.NO2, const.CO, const.SO2])
return df
def get_beijing_historical(chunk_size = 1):
start_date = datetime.date(2015, 1, 1)
end_date = datetime.date(2017, 1, 1)
date_range = | pd.date_range(start_date, end_date) | pandas.date_range |
import numpy as np
import pandas as pd
import scipy.stats as stats
class Aggregation:
"""Cálculo de padrões de agregação
Argumento:
file: arquivo de dados no formato csv
Retorno:
pandas series e dataframe com os resultados da análise de agregação
determinados pelo método de McGinnies, Racker-Brischle e Morisita
"""
def __init__(self, file):
self._file = file
self._df = pd.read_csv(file, sep=";", decimal=",", encoding="latin-1")
self._ni = self._df.Especie.value_counts()
self._ut = self._df["Parcela"].nunique()
self._Di = self._ni / self._ut
self._ui = self._df.groupby("Especie")["Parcela"].nunique()
self._fri = self._ui / self._ut
self._di = -np.log(1 - self._fri)
def __str__(self) -> str:
return f"Análise de agregação"
def mcginnies(self):
return self._Di / self._di
def racker_brischle(self):
return (self._Di - self._di) / self._di**2
def morisita(self):
self._table = | pd.crosstab(self._df["Parcela"], self._df["Especie"]) | pandas.crosstab |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/2 23:26
Desc: 东方财富网-行情首页-沪深京 A 股
"""
import requests
import pandas as pd
def stock_zh_a_spot_em() -> pd.DataFrame:
"""
东方财富网-沪深京 A 股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 实时行情
:rtype: pandas.DataFrame
"""
url = "http://82.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80,m:1 t:2,m:1 t:23,m:0 t:81 s:2048",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"量比",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["量比"] = pd.to_numeric(temp_df["量比"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
def stock_zh_b_spot_em() -> pd.DataFrame:
"""
东方财富网- B 股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 实时行情
:rtype: pandas.DataFrame
"""
url = "http://28.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:7,m:1 t:3",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"量比",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["量比"] = pd.to_numeric(temp_df["量比"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
def code_id_map_em() -> dict:
"""
东方财富-股票和市场代码
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 股票和市场代码
:rtype: dict
"""
url = "http://80.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:1 t:2,m:1 t:23",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df["market_id"] = 1
temp_df.columns = ["sh_code", "sh_id"]
code_id_dict = dict(zip(temp_df["sh_code"], temp_df["sh_id"]))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["sz_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["sz_id"])))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:81 s:2048",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["bj_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["bj_id"])))
return code_id_dict
def stock_zh_a_hist(
symbol: str = "000001",
period: str = "daily",
start_date: str = "19700101",
end_date: str = "20500101",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日行情
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param period: choice of {'daily', 'weekly', 'monthly'}
:type period: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param adjust: choice of {"qfq": "前复权", "hfq": "后复权", "": "不复权"}
:type adjust: str
:return: 每日行情
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
adjust_dict = {"qfq": "1", "hfq": "2", "": "0"}
period_dict = {"daily": "101", "weekly": "102", "monthly": "103"}
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61,f116",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"klt": period_dict[period],
"fqt": adjust_dict[adjust],
"secid": f"{code_id_dict[symbol]}.{symbol}",
"beg": start_date,
"end": end_date,
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["klines"]:
return pd.DataFrame()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["日期"])
temp_df.reset_index(inplace=True, drop=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
return temp_df
def stock_zh_a_hist_min_em(
symbol: str = "000001",
start_date: str = "1979-09-01 09:32:00",
end_date: str = "2222-01-01 09:32:00",
period: str = "5",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日分时行情
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param period: choice of {'1', '5', '15', '30', '60'}
:type period: str
:param adjust: choice of {'', 'qfq', 'hfq'}
:type adjust: str
:return: 每日分时行情
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
adjust_map = {
"": "0",
"qfq": "1",
"hfq": "2",
}
if period == "1":
url = "https://push2his.eastmoney.com/api/qt/stock/trends2/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"ndays": "5",
"iscr": "0",
"secid": f"{code_id_dict[symbol]}.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["trends"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"最新价",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
return temp_df
else:
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"klt": period,
"fqt": adjust_map[adjust],
"secid": f"{code_id_dict[symbol]}.{symbol}",
"beg": "0",
"end": "20500000",
"_": "1630930917857",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
temp_df = temp_df[
[
"时间",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
return temp_df
def stock_zh_a_hist_pre_min_em(
symbol: str = "000001",
start_time: str = "09:00:00",
end_time: str = "15:50:00",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日分时行情包含盘前数据
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param start_time: 开始时间
:type start_time: str
:param end_time: 结束时间
:type end_time: str
:return: 每日分时行情包含盘前数据
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
url = "https://push2.eastmoney.com/api/qt/stock/trends2/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"ndays": "1",
"iscr": "1",
"iscca": "0",
"secid": f"{code_id_dict[symbol]}.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["trends"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"最新价",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
date_format = temp_df.index[0].date().isoformat()
temp_df = temp_df[
date_format + " " + start_time : date_format + " " + end_time
]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
return temp_df
def stock_hk_spot_em() -> pd.DataFrame:
"""
东方财富网-港股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hk_stocks
:return: 港股-实时行情
:rtype: pandas.DataFrame
"""
url = "http://72.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:128 t:3,m:128 t:4,m:128 t:1,m:128 t:2",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1624010056945",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
"今开",
"最高",
"最低",
"昨收",
"成交量",
"成交额",
]
]
temp_df["序号"] = pd.to_numeric(temp_df["序号"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
return temp_df
def stock_hk_hist(
symbol: str = "40224",
period: str = "daily",
start_date: str = "19700101",
end_date: str = "22220101",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情-港股-每日行情
http://quote.eastmoney.com/hk/08367.html
:param symbol: 港股-每日行情
:type symbol: str
:param period: choice of {'daily', 'weekly', 'monthly'}
:type period: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param adjust: choice of {"qfq": "1", "hfq": "2", "": "不复权"}
:type adjust: str
:return: 每日行情
:rtype: pandas.DataFrame
"""
adjust_dict = {"qfq": "1", "hfq": "2", "": "0"}
period_dict = {"daily": "101", "weekly": "102", "monthly": "103"}
url = "http://33.push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"secid": f"116.{symbol}",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period_dict[period],
"fqt": adjust_dict[adjust],
"end": "20500000",
"lmt": "1000000",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
if temp_df.empty:
return pd.DataFrame()
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["日期"])
temp_df = temp_df[start_date:end_date]
if temp_df.empty:
return pd.DataFrame()
temp_df.reset_index(inplace=True, drop=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
return temp_df
def stock_hk_hist_min_em(
symbol: str = "01611",
period: str = "1",
adjust: str = "",
start_date: str = "1979-09-01 09:32:00",
end_date: str = "2222-01-01 09:32:00",
) -> pd.DataFrame:
"""
东方财富网-行情-港股-每日分时行情
http://quote.eastmoney.com/hk/00948.html
:param symbol: 股票代码
:type symbol: str
:param period: choice of {'1', '5', '15', '30', '60'}
:type period: str
:param adjust: choice of {'', 'qfq', 'hfq'}
:type adjust: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 每日分时行情
:rtype: pandas.DataFrame
"""
adjust_map = {
"": "0",
"qfq": "1",
"hfq": "2",
}
if period == "1":
url = "http://push2his.eastmoney.com/api/qt/stock/trends2/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"iscr": "0",
"ndays": "5",
"secid": f"116.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["trends"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"最新价",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
return temp_df
else:
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"klt": period,
"fqt": adjust_map[adjust],
"secid": f"116.{symbol}",
"beg": "0",
"end": "20500000",
"_": "1630930917857",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
temp_df = temp_df[
[
"时间",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
return temp_df
def stock_us_spot_em() -> pd.DataFrame:
"""
东方财富-美股-实时行情
http://quote.eastmoney.com/center/gridlist.html#us_stocks
:return: 美股-实时行情; 延迟 15 min
:rtype: pandas.DataFrame
"""
url = "http://72.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "20000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:105,m:106,m:107",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f26,f22,f33,f11,f62,f128,f136,f115,f152",
"_": "1624010056945",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"_",
"_",
"_",
"简称",
"编码",
"名称",
"最高价",
"最低价",
"开盘价",
"昨收价",
"总市值",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"市盈率",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df["代码"] = temp_df["编码"].astype(str) + "." + temp_df["简称"]
temp_df = temp_df[
[
"序号",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
"开盘价",
"最高价",
"最低价",
"昨收价",
"总市值",
"市盈率",
"成交量",
"成交额",
"振幅",
"换手率",
"代码",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["开盘价"] = pd.to_numeric(temp_df["开盘价"], errors="coerce")
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"], errors="coerce")
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"], errors="coerce")
temp_df["昨收价"] = pd.to_numeric(temp_df["昨收价"], errors="coerce")
temp_df["总市值"] = pd.to_numeric(temp_df["总市值"], errors="coerce")
temp_df["市盈率"] = pd.to_numeric(temp_df["市盈率"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
return temp_df
def stock_us_hist(
symbol: str = "105.MSFT",
period: str = "daily",
start_date: str = "19700101",
end_date: str = "22220101",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情-美股-每日行情
http://quote.eastmoney.com/us/ENTX.html#fullScreenChart
:param symbol: 股票代码; 此股票代码需要通过调用 ak.stock_us_spot_em() 的 `代码` 字段获取
:type symbol: str
:param period: choice of {'daily', 'weekly', 'monthly'}
:type period: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param adjust: choice of {"qfq": "1", "hfq": "2", "": "不复权"}
:type adjust: str
:return: 每日行情
:rtype: pandas.DataFrame
"""
period_dict = {"daily": "101", "weekly": "102", "monthly": "103"}
adjust_dict = {"qfq": "1", "hfq": "2", "": "0"}
url = "http://63.push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"secid": f"{symbol}",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period_dict[period],
"fqt": adjust_dict[adjust],
"end": "20500000",
"lmt": "1000000",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["klines"]:
return pd.DataFrame()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["日期"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(inplace=True, drop=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_ | numeric(temp_df["成交量"]) | pandas.to_numeric |
import pandas as pd
import numpy as np
from sklearn import preprocessing
from tqdm import tqdm
from scipy.stats import t
def beta_ridge(Y, X, lamb):
"""
Compute ridge coeffs
Parameters
----------
Y : Nx1 Matrix
X : Matrix (with intercept column)
lamb : Lambda value to use for L2
Returns
-------
coefficients : Vector of ridge coefficients
Note
----
For simplicity we use matrix inverses,
which are not computationally efficient at O(p^3).
SVD would be a more efficient approach.
"""
Z = X.iloc[:, 1:]
Z = pd.DataFrame(preprocessing.scale(Z))
Y_c = Y - np.mean(Y)
left = np.linalg.inv(Z.transpose().dot(Z) + lamb * np.identity(Z.shape[1]))
right = Z.transpose().dot(Y_c)
coefficients = left.dot(right)
return(coefficients)
def sse(Y, X, betas):
'''
Get sum of square errors
Parameters
----------
Y : Nx1 Matrix
X : Matrix
betas : Vector of estimated coefficients
Returns
-------
sse : Sum of square errors
'''
e = betas.dot(X.values.T)-Y
sse = np.sum(e**2)
return sse
def ridge_fit(Y, X, lamb, n_iter=100, progress_disable = False):
"""
Estimate ridge standard errors through bootstrapping
Parameters
----------
Y : Nx1 Matrix
X : Matrix
lamb : Lambda value to use for L2
n_ter : Integer of number of iterations for bootstrapping
progress_disable : Disable option for tqdm progress bar
Returns
-------
results : Results wrapper with ridge results
coefficients = ridge coefficients from full sample
bootstrap_coeffs = ridge coefficients from bootstrapping procedure
bootstrap_coeffs_var = Coefficient variance from bootstrapping
bootstrap_coeffs_SE = Coefficient standard errors from bootstrapping
bootstrap_coeffs_t = T-stats (from bootstrapping SE)
bootstrap_coeffs_p = P-values
"""
nobs = np.shape(X)[0]
K = np.shape(X)[1] - 1
beta_hat_boots = np.zeros((n_iter, K))
for b_iter in tqdm(range(0, n_iter), disable=progress_disable):
b_index = np.random.choice(range(0, nobs), nobs, replace = True)
_Y, _X = pd.DataFrame(Y).iloc[b_index], | pd.DataFrame(X) | pandas.DataFrame |
#!/usr/bin/env python
# Copyright (c) 2020 IBM Corp. - <NAME> <<EMAIL>>
# Based on: masked_language_modeling.py
# https://keras.io/examples/nlp/masked_language_modeling/
# Fixed spelling errors in messages and comments.
# Preparation on dyce2:
# virtualenv --system-site-packages tf-nightly
# source tf-nightly/bin/activate
# pip install tf-nightly
# pip install dataclasses
# pip install pandas
# pip install pydot
# Results in TF 2.5.0 using the available CUDA 11
import os
#0 = all messages are logged (default behavior)
#1 = INFO messages are not printed
#2 = INFO and WARNING messages are not printed
#3 = INFO, WARNING, and ERROR messages are not printed
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
from dataclasses import dataclass
import pandas as pd
import numpy as np
import glob
import re
from pprint import pprint
@dataclass
class Config:
MAX_LEN = 256 # length of each input sample in tokens
BATCH_SIZE = 32 # batch size
LR = 0.001 # learning rate
VOCAB_SIZE = 512 # max number of words in vocabulary
EMBED_DIM = 128 # word embedding vector size
NUM_HEAD = 8 # used in bert model
FF_DIM = 128 # feedforward; used in bert model
NUM_LAYERS = 1 # number of BERT module layers
config = Config()
# Every sample file contains a single line of text.
# Returns these lines as a list of strings.
def get_text_list_from_files(files):
text_list = []
for name in files:
with open(name) as f:
for line in f:
text_list.append(line)
return text_list
# Compose the full path names to the token files.
# Creates and returns a dataframe.
# Frame has single key "tokens".
def get_data_from_text_files(folder_name):
files = glob.glob(folder_name + "/*.toks")
texts = get_text_list_from_files(files)
df = | pd.DataFrame({"tokens": texts}) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[24]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pickle
import pygame
pygame.init()
# In[25]:
WIDTH = 1200
HEIGHT = 600
THICKNESS = 30
BALL_RADIUS = 20
PAD_WIDTH = 30
PAD_HEIGHT = 120
VELOCITY = 1
FRAMERATE = 150
BUFFER = 5
AI = True
bgColor = pygame.Color('white')
wallColor = pygame.Color('gray')
padColor = pygame.Color('orange')
ballColor = pygame.Color('red')
# In[26]:
screen = pygame.display.set_mode((WIDTH, HEIGHT))
screen.fill(bgColor)
# In[27]:
class Ball:
def __init__(self, x, y, vx, vy):
self.x = x
self.y = y
self.vx = vx
self.vy = vy
def show(self, color):
global screen
pygame.draw.circle(screen, color, (self.x, self.y), BALL_RADIUS)
def level(self, velocity):
self.vx = velocity if self.vx > 0 else -velocity
self.vy = velocity if self.vy > 0 else -velocity
def update(self):
global HEIGHT, THICKNESS, BALL_RADIUS, PAD_WIDTH, PAD_WIDTH, bgColor, ballColor, padObject
newX = self.x + self.vx
newY = self.y + self.vy
if newX < (THICKNESS + BALL_RADIUS) or (newX > (WIDTH-PAD_WIDTH-BUFFER-BALL_RADIUS) and not (newX >= WIDTH-BUFFER-PAD_WIDTH//2) and (((self.y+BALL_RADIUS) >= padObject.y) and ((self.y-BALL_RADIUS) <= (padObject.y + PAD_HEIGHT)))):
self.vx = -self.vx
elif newY < (THICKNESS + BALL_RADIUS) or newY > (HEIGHT-BALL_RADIUS-THICKNESS):
self.vy = - self.vy
else:
self.show(bgColor)
self.x = self.x + self.vx
self.y = self.y + self.vy
self.show(ballColor)
class Pad:
def __init__(self, y):
self.y = y
def show(self, color):
global screen, PAD_HEIGHT, PAD_WIDTH, BUFFER
pygame.draw.rect(screen, color, pygame.Rect((WIDTH-PAD_WIDTH-BUFFER), self.y, PAD_WIDTH, PAD_HEIGHT))
def update(self, position=None):
global HEIGHT, THICKNESS, padColor, PAD_HEIGHT
if position == None:
newY = pygame.mouse.get_pos()[1]
else:
newY = position
if newY > THICKNESS and newY < (HEIGHT-THICKNESS-PAD_HEIGHT):
self.show(bgColor)
self.y = newY
self.show(padColor)
# In[28]:
pygame.draw.rect(screen, wallColor, pygame.Rect(0, 0, WIDTH, THICKNESS))
pygame.draw.rect(screen, wallColor, pygame.Rect(0, THICKNESS, THICKNESS, (HEIGHT-THICKNESS)))
pygame.draw.rect(screen, wallColor, pygame.Rect(0, (HEIGHT-THICKNESS), WIDTH, THICKNESS))
init_x = np.random.choice(range((0+THICKNESS+BALL_RADIUS), ((WIDTH-THICKNESS-BALL_RADIUS)+1)))
init_y = np.random.choice(range((0+THICKNESS+BALL_RADIUS), ((HEIGHT-THICKNESS-BALL_RADIUS)+1)))
ballObject = Ball(init_x, init_y, -VELOCITY, -VELOCITY)
ballObject.show(ballColor)
padObject = Pad(HEIGHT//2)
padObject.show(padColor)
# In[29]:
if AI:
pongAI = pickle.load(open('pong_AI.sav', 'rb'))
pongAI
features = ['x', 'y', 'vx', 'vy']
target = 'pad_y'
# In[30]:
if not AI:
logger_df = pd.DataFrame(columns=['x', 'y', 'vx', 'vy', 'pad_y'])
game_c = 0
clock = pygame.time.Clock()
while True:
event = pygame.event.poll()
if event.type == pygame.QUIT:
pygame.quit()
break
game_c += 1
_ = clock.tick(FRAMERATE)
pygame.display.flip()
ballObject.update()
if AI:
padObject.update(pongAI.predict([[ballObject.x, ballObject.y, ballObject.vx, ballObject.vy]]))
else:
padObject.update(None)
logger_df = logger_df.append({
'x': ballObject.x,
'y': ballObject.y,
'vx': ballObject.vx,
'vy': ballObject.vy,
'pad_y': padObject.y
}, ignore_index=True, sort=False)
if game_c%(VELOCITY*5000) == 0:
VELOCITY += 1
ballObject.level(VELOCITY)
if not AI:
print(logger_df.shape)
print(logger_df.head())
# In[ ]:
if not AI:
logger_df.to_parquet('training_data.parquet')
# In[ ]:
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
# In[ ]:
def mape(A, F):
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from sklearn.model_selection import train_test_split
pd.options.mode.chained_assignment = None
def data_preprocessing():
df = | pd.read_csv("data/SCADA_data.csv.gz") | pandas.read_csv |
import numpy as np
import numpy.linalg as linalg
import pandas as pd
def linear_regression(X, y):
return linalg.inv(X.T.dot(X)).dot(X.T).dot(y)
def go():
data = np.loadtxt('quasar_train.csv', delimiter=',')
wavelengths = data[0]
fluxes = data[1]
ones = np.ones(fluxes.size)
df_ones = pd.DataFrame(ones, columns=['xint'])
df_wavelengths = pd.DataFrame(wavelengths, columns=['wavelength'])
df_fluxes = | pd.DataFrame(fluxes, columns=['flux']) | pandas.DataFrame |
from datetime import datetime
import warnings
import pytest
import pandas as pd
import pyodbc
from mssql_dataframe.connect import connect
from mssql_dataframe.core import custom_warnings, conversion, create
pd.options.mode.chained_assignment = "raise"
class package:
def __init__(self, connection):
self.connection = connection.connection
self.create = create.create(self.connection)
self.create_meta = create.create(self.connection, include_metadata_timestamps=True)
@pytest.fixture(scope="module")
def sql():
db = connect(database="tempdb", server="localhost")
yield package(db)
db.connection.close()
@pytest.fixture(scope="module")
def sample():
dataframe = pd.DataFrame(
{
"_varchar": [None, "b", "c", "4", "e"],
"_tinyint": [None, 2, 3, 4, 5],
"_smallint": [256, 2, 6, 4, 5], # tinyint max is 255
"_int": [32768, 2, 3, 4, 5], # smallint max is 32,767
"_bigint": [2147483648, 2, 3, None, 5], # int max size is 2,147,483,647
"_float": [1.111111, 2, 3, 4, 5], # any decicmal places
"_time": [str(datetime.now().time())]
* 5, # string in format HH:MM:SS.ffffff
"_datetime": [datetime.now()] * 4 + [pd.NaT],
"_empty": [None] * 5,
}
)
return dataframe
def test_table_errors(sql):
table_name = "##test_table_column"
with pytest.raises(KeyError):
columns = {"A": "VARCHAR"}
sql.create.table(table_name, columns, primary_key_column="Z")
def test_table_column(sql):
table_name = "##test_table_column"
columns = {"A": "VARCHAR"}
sql.create.table(table_name, columns)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 1
assert all(schema.index == "A")
assert all(schema["sql_type"] == "varchar")
assert all(schema["is_nullable"] == True)
assert all(schema["ss_is_identity"] == False)
assert all(schema["pk_seq"].isna())
assert all(schema["pk_name"].isna())
assert all(schema["pandas_type"] == "string")
assert all(schema["odbc_type"] == pyodbc.SQL_VARCHAR)
assert all(schema["odbc_size"] == 0)
assert all(schema["odbc_precision"] == 0)
def test_table_pk(sql):
table_name = "##test_table_pk"
columns = {"A": "TINYINT", "B": "VARCHAR(100)", "C": "FLOAT"}
primary_key_column = "A"
not_nullable = "B"
sql.create.table(
table_name,
columns,
not_nullable=not_nullable,
primary_key_column=primary_key_column,
)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 3
assert all(schema.index == ["A", "B", "C"])
assert all(schema["sql_type"] == ["tinyint", "varchar", "float"])
assert all(schema["is_nullable"] == [False, False, True])
assert all(schema["ss_is_identity"] == False)
assert schema["pk_seq"].equals(
pd.Series([1, pd.NA, pd.NA], index=["A", "B", "C"], dtype="Int64")
)
assert all(schema["pk_name"].isna() == [False, True, True])
assert all(schema["pandas_type"] == ["UInt8", "string", "float64"])
assert all(
schema["odbc_type"]
== [pyodbc.SQL_TINYINT, pyodbc.SQL_VARCHAR, pyodbc.SQL_FLOAT]
)
assert all(schema["odbc_size"] == [1, 0, 8])
assert all(schema["odbc_precision"] == [0, 0, 53])
def test_table_composite_pk(sql):
table_name = "##test_table_composite_pk"
columns = {"A": "TINYINT", "B": "VARCHAR(5)", "C": "FLOAT"}
primary_key_column = ["A", "B"]
not_nullable = "B"
sql.create.table(
table_name,
columns,
not_nullable=not_nullable,
primary_key_column=primary_key_column,
)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 3
assert all(schema.index == ["A", "B", "C"])
assert all(schema["sql_type"] == ["tinyint", "varchar", "float"])
assert all(schema["is_nullable"] == [False, False, True])
assert all(schema["ss_is_identity"] == False)
assert schema["pk_seq"].equals(
pd.Series([1, 2, pd.NA], index=["A", "B", "C"], dtype="Int64")
)
assert all(schema["pk_name"].isna() == [False, False, True])
assert all(schema["pandas_type"] == ["UInt8", "string", "float64"])
assert all(
schema["odbc_type"]
== [pyodbc.SQL_TINYINT, pyodbc.SQL_VARCHAR, pyodbc.SQL_FLOAT]
)
assert all(schema["odbc_size"] == [1, 0, 8])
assert all(schema["odbc_precision"] == [0, 0, 53])
def test_table_pk_input_error(sql):
with pytest.raises(ValueError):
table_name = "##test_table_pk_input_error"
columns = {"A": "TINYINT", "B": "VARCHAR(100)", "C": "DECIMAL(5,2)"}
primary_key_column = "A"
not_nullable = "B"
sql.create.table(
table_name,
columns,
not_nullable=not_nullable,
primary_key_column=primary_key_column,
sql_primary_key=True,
)
def test_table_sqlpk(sql):
table_name = "##test_table_sqlpk"
columns = {"A": "VARCHAR"}
sql.create.table(table_name, columns, sql_primary_key=True)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 2
assert all(schema.index == ["_pk", "A"])
assert all(schema["sql_type"] == ["int identity", "varchar"])
assert all(schema["is_nullable"] == [False, True])
assert all(schema["ss_is_identity"] == [True, False])
assert schema["pk_seq"].equals(
pd.Series([1, pd.NA], index=["_pk", "A"], dtype="Int64")
)
assert all(schema["pk_name"].isna() == [False, True])
assert all(schema["pandas_type"] == ["Int32", "string"])
assert all(schema["odbc_type"] == [pyodbc.SQL_INTEGER, pyodbc.SQL_VARCHAR])
assert all(schema["odbc_size"] == [4, 0])
assert all(schema["odbc_precision"] == [0, 0])
def test_table_from_dataframe_simple(sql):
table_name = "##test_table_from_dataframe_simple"
dataframe = pd.DataFrame({"ColumnA": [1]})
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.create.table_from_dataframe(table_name, dataframe)
assert len(warn) == 1
assert isinstance(warn[0].message, custom_warnings.SQLObjectAdjustment)
assert "Created table" in str(warn[0].message)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 1
assert all(schema.index == "ColumnA")
assert all(schema["sql_type"] == "tinyint")
assert all(schema["is_nullable"] == False)
assert all(schema["ss_is_identity"] == False)
assert all(schema["pk_seq"].isna())
assert all(schema["pk_name"].isna())
assert all(schema["pandas_type"] == "UInt8")
assert all(schema["odbc_type"] == pyodbc.SQL_TINYINT)
assert all(schema["odbc_size"] == 1)
assert all(schema["odbc_precision"] == 0)
result = conversion.read_values(f'SELECT * FROM {table_name}', schema, sql.connection)
assert result.equals(dataframe)
def test_table_from_dataframe_datestr(sql):
table_name = "##test_table_from_dataframe_datestr"
dataframe = pd.DataFrame({"ColumnA": ["06/22/2021"]})
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.create_meta.table_from_dataframe(table_name, dataframe)
assert len(warn) == 1
assert isinstance(warn[0].message, custom_warnings.SQLObjectAdjustment)
assert "Created table" in str(warn[0].message)
schema, _ = conversion.get_schema(sql.connection, table_name)
expected = pd.DataFrame({
'column_name': pd.Series(['ColumnA','_time_insert']),
'sql_type': pd.Series(['date','datetime2'], dtype='string'),
'is_nullable': pd.Series([False, True]),
'ss_is_identity': pd.Series([False, False]),
'pk_seq': pd.Series([None, None], dtype='Int64'),
'pk_name': pd.Series([None, None], dtype='string'),
'pandas_type': | pd.Series(['datetime64[ns]', 'datetime64[ns]'], dtype='string') | pandas.Series |
import torch
import pandas as pd
from Util import data_split, data_split_val
import spacy
import numpy as np
import pickle
# nlp = spacy.load('en_core_web_sm')
import json
import dateparser
# from bson.int64 import Int64
from datetime import datetime
from sklearn.feature_extraction.text import CountVectorizer
from scipy.sparse.linalg import svds
import os
import nltk
from Util import data_preprocess
import json
from transformers import RobertaTokenizer
domain_map = {"gossip":0,"politi":1, "health_deterrent":2}
def flip_label(labels, p=0.4):
mask = np.random.binomial(1, p, len(labels))
flip_label = []
for index, i in enumerate(mask):
# keep the label
if i == 0:
flip_label.append(labels[index])
else:
flip_label.append(1-labels[index])
return flip_label
class SimpleTextDataset(torch.utils.data.Dataset):
def __init__(self, hparams, type, is_tgt, tokenizer):
self.hparams = hparams
if hparams.clf_method == 'defend':
file_name = "./data/simple_text_defend.torch"
else:
file_name = "./data/simple_text.torch"
# file_name = "/home/yli29/FakeDetectionBaseline/data/simple_text.torch"
if os.path.exists(file_name):
data = torch.load(file_name)
for key, value in data.items():
setattr(self, key, json.loads(value))
else:
data = pd.read_csv(hparams.data_path)
if hparams.clf_method == 'defend':
news_content_list = data['content'].tolist()
news_sentences = [[j.text for j in nlp(i).sents] for i in news_content_list]
# news_sentences = [[j for j in i if len(j) > 10] for i in news_sentences]
# truncate the senteces
sentence_count = hparams.sentence_count
news_sentences = [i[:sentence_count] + (sentence_count - len(i))
* ["<pad>"] for i in news_sentences]
data_sentences = [[tokenizer.encode(j, max_length=hparams.max_sentence_length,
truncation=True,
pad_to_max_length=True
) for j in i] for i in news_sentences]
data['encode_text'] = data_sentences
else:
data['encode_text'] = data['content'].apply(lambda x: tokenizer.encode(x,
max_length=hparams.max_length,
truncation=True,
pad_to_max_length=True, ))
self.data = data.to_json(orient="records")
torch.save({"data":self.data
}, file_name)
# select specific domain dataset
self.data = [{**self.data[i], "index":i} for i in range(len(self.data))]
self.random_seed = hparams.random_seed
src_domain = hparams.src_domain
tgt_domain = hparams.tgt_domain
is_not_in = hparams.is_not_in
train_src_data, train_src_label, _, _, val_src_data, val_src_label = \
self.get_domain_data(self.data, src_domain, self.random_seed, is_not_in)
train_tgt_data, train_tgt_label, test_tgt_data, test_tgt_label, val_tgt_data, val_tgt_label = \
self.get_domain_data(self.data, tgt_domain, random_seed=self.random_seed, is_not_in=False)
# print("Attention We are using validation data as the training dataset")
# test_tgt_data = train_src_data
# test_tgt_label = train_src_label
# train_src_data = val_src_data
# train_src_label = val_src_label
if src_domain != tgt_domain and type == 'train' and hparams.is_few_shot:
train_data_tgt, train_label_tgt, _, _ = self.get_domain_data(self.data, tgt_domain, random_seed=self.random_seed)
hparams.tgt_train_size = float(hparams.tgt_train_size)
max_len = hparams.tgt_train_size if hparams.tgt_train_size > 1 else int(hparams.tgt_train_size * len(train_data_tgt))
max_len = int(max_len)
train_src_data += train_data_tgt[:max_len]
train_src_label += train_label_tgt[:max_len]
elif src_domain == tgt_domain and type == 'train' and hparams.is_few_shot:
max_len = hparams.tgt_train_size if hparams.tgt_train_size > 1 else int(
hparams.tgt_train_size * len(train_src_data))
max_len = int(max_len)
train_src_data = train_src_data[:max_len]
train_src_label = train_src_label[:max_len]
if type == "train":
self.features = [i[0] for i in train_src_data]
self.labels = train_src_label
return_element, train_tgt_data_notin = self.get_weak_labels(train_tgt_data)
if hparams.is_flip_label:
if hparams.is_only_weak:
self.labels = return_element['weak_label'].values.tolist()
self.features = return_element['encoded_text'].values.tolist()
else:
self.labels += return_element['weak_label'].values.tolist()
self.features += return_element['encoded_text'].values.tolist()
if self.hparams.is_get_clean_data:
select_data = train_tgt_data_notin.iloc[:self.hparams.clean_count, :]
new_index = select_data['index_new']
select_tgt_features = select_data[['encoded_text']].values.tolist()
select_tgt_labels = [train_tgt_label[i] for i in new_index]
assert len(select_tgt_labels) == self.hparams.clean_count
assert len(select_tgt_features) == self.hparams.clean_count
self.labels += select_tgt_labels
self.features += [i[0] for i in select_tgt_features]
elif type == 'test':
self.features = [i[0] for i in test_tgt_data]
self.labels = test_tgt_label
else:
self.features = [i[0] for i in val_tgt_data]
self.labels = val_tgt_label
# def get_weak_labels(self, tgt_train_data):
#
# data = pd.read_csv(self.hparams.weak_labels_path, header=None)
# data = data.rename(columns={0: "index", 1: 'weak_label'})
# tgt_train_data = pd.DataFrame(tgt_train_data, columns=['encoded_text', 'index'])
# tgt_train_data['index_new'] = list(range(len(tgt_train_data)))
# tgt_train_data_in = tgt_train_data.join(data.set_index('index'), how='inner', on='index')
# tgt_train_data_notin = tgt_train_data[
# tgt_train_data['index'].apply(lambda x: x not in set(data['index'].values.tolist()))]
# print("There are {} weak samples".format(len(tgt_train_data)))
# tgt_train_data_in['weak_label'] = tgt_train_data_in['weak_label'].apply(lambda x: int(x))
# tgt_train_data_in = tgt_train_data_in[['encoded_text', 'weak_label']]
#
# return tgt_train_data_in, tgt_train_data_notin
def get_weak_label_v2(self, tgt_train_data):
data = pd.read_csv(self.hparams.weak_labels_path, header=None)
data = data.loc[tgt_train_data['index'],:]
weak_labels = data[self.hparams.weak_fn].tolist()
zero_index = set(data.iloc[list(np.argsort(weak_labels)[:self.hparams.weak_label_count]),'index'].tolist())
one_index = set(data.iloc[list(np.argsort(weak_labels)[-self.hparams.weak_label_count:]),'index'].tolist())
tgt_train_data['index_new'] = list(range(len(tgt_train_data)))
def helper_fn(x):
if x in zero_index:
return 0
elif x in one_index:
return 1
else:
return np.nan
tgt_train_data['weak_label'] = tgt_train_data['index'].apply(lambda x: helper_fn(x))
tgt_train_data_notin = tgt_train_data[tgt_train_data['weak_label'].isna()]
tgt_train_data_in = tgt_train_data[tgt_train_data['weak_label'].notna()]
tgt_train_data_in = tgt_train_data_in[['encoded_text', 'weak_label', "domain"]]
return tgt_train_data_in, tgt_train_data_notin
def get_domain_data(self, self_data, domain, random_seed, is_not_in=False):
data = []
if is_not_in is False and "," not in domain:
for i in self_data:
if domain in i['domain']:
data.append(i)
# balance the dataset
one = [(i['encode_text'], i['index']) for i in data if i['label'] == 1]
zero = [(i['encode_text'], i['index']) for i in data if i['label'] == 0]
min_len = min(len(one), len(zero))
one = one[:min_len]
zero = zero[:min_len]
data = one + zero
# train_X, train_Y, test_X, test_Y, val_X, val_Y
train_data, train_label, test_data, test_label, val_data, val_label = data_split_val(data, [1] * min_len + [
0] * min_len)
return train_data, train_label, test_data, test_label, val_data, val_label
else:
train_data_list = []
train_label_list = []
test_data_list = []
test_label_list = []
val_data_list = []
val_label_list = []
if is_not_in:
domains = list(domain_map.keys())
domains = [i for i in domains if domain not in i]
else:
domains = domain.split(",")
for domain in domains:
train_data, train_label, test_data, test_label, val_data, val_label = self.get_domain_data(self_data,
domain,
random_seed=random_seed)
print("Domain: {} Train Size: {} Test Size {}".format(domain, len(train_data), len(test_data)))
train_data_list += train_data
train_label_list += train_label
test_data_list += test_data
test_label_list += test_label
val_data_list += val_data
val_label_list += val_label
return train_data_list, train_label_list, test_data_list, test_label_list, val_data_list, val_label_list
def __len__(self):
return len(self.labels)
def __getitem__(self, item):
return torch.tensor(self.labels[item], dtype=torch.long), torch.tensor(self.features[item], dtype=torch.long)
class AdvTextDataset(torch.utils.data.Dataset):
def __init__(self, hparams, type, tokenizer, weak_flag=False):
super(AdvTextDataset, self).__init__()
self.hparams = hparams
file_name = "./data/simple_text_roberta.torch"
if os.path.exists(file_name):
data = torch.load(file_name)
for key, value in data.items():
setattr(self, key, json.loads(value))
else:
if tokenizer is None:
tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
data = pd.read_csv(hparams.data_path)
data['encode_text'] = data['content'].apply(lambda x: tokenizer.encode(x,
max_length=hparams.max_length,
truncation=True,
pad_to_max_length=True, ))
data['index'] = data.index
data = data.to_json(orient="records")
torch.save({"data": data}, file_name)
self.data = json.loads(data)
self.random_seed = hparams.random_seed
# select specific domain dataset
src_domain = hparams.src_domain
if "," in src_domain:
src_domain = src_domain.split(",")
tgt_domain = hparams.tgt_domain
is_not_in = hparams.is_not_in
train_src_data, train_src_label, test_src_data, test_src_label, val_src_data, val_src_label = self.get_domain_data(self.data, src_domain,
self.random_seed, is_not_in)
train_tgt_data, train_tgt_label, test_tgt_data, test_tgt_label, val_tgt_data, val_tgt_label = self.get_domain_data(self.data,
tgt_domain, random_seed=self.random_seed)
self.p_y = self.label_probability(train_tgt_label=torch.tensor(train_tgt_label))
debug = False
if type == "train":
src_features = [(i[0], i[2]) for i in train_src_data]
src_labels = train_src_label
tgt_features = []
tgt_labels = []
tgt_no_labels = []
tgt_no_features = []
train_tgt_data_in, train_tgt_data_notin = self.get_weak_label_v2(train_tgt_data)
if self.hparams.is_get_clean_data:
new_index = train_tgt_data_notin['index_new'].values.tolist()
zero_index = [i for i in new_index if train_tgt_label[i] == 0][:int(self.hparams.clean_count / 2)]
one_index = [i for i in new_index if train_tgt_label[i] == 1][:int(self.hparams.clean_count / 2)]
index = zero_index + one_index
train_tgt_data = | pd.DataFrame(train_tgt_data, columns=['encoded_text', 'index', 'domain']) | pandas.DataFrame |
import streamlit as st
import pandas as pd
import altair as alt
def clean_summary_data(file_str:str, name:str):
input_df = pd.read_csv(
file_str,
names=['1', '2','3','type','ministry','source','amount'],
thousands=',')
input_df[['amount']] = input_df[['amount']].fillna(value='EMPTY')
input_df = (input_df.fillna(method='ffill') # populate columns with previous value
.drop(['1','2'], axis=1) # drop unused columns
.drop([0,1,2,3,]) # drop first ununsed rows
)
# remove other income / expenses
input_df.drop(input_df[input_df['3'] == 'Other Expense'].index, inplace = True)
input_df.drop(input_df[input_df['3'] == 'Other Income'].index, inplace = True)
input_df.drop(input_df[input_df['3'] == 'Total Other Income'].index, inplace = True)
input_df.drop(input_df[input_df['3'] == 'Total Other Expense'].index, inplace = True)
input_df = input_df.drop(['3'], axis=1)
# exclude Transfer from Invested Funds
# input_df.drop(input_df[input_df['source'] == 'Transfer from Invested Funds'].index, inplace = True)
# input_df.drop(input_df[input_df['source'] == 'Endowment Fund earnings'].index, inplace = True)
# input_df.drop(input_df[input_df['source'] == 'Endowment Fund earnings'].index, inplace = True)
# remove summary income fields
# input_df.drop(input_df[input_df['ministry'] == 'Transfer from Invested Funds'].index, inplace = True)
input_df.drop(input_df[input_df['ministry'].str.startswith('Total')].index, inplace = True)
input_df.drop(input_df[input_df['type'].str.startswith('Total')].index, inplace = True)
input_df.drop(input_df[input_df['amount'] == 'EMPTY'].index, inplace = True)
# set all amount types to float
input_df['amount'] = input_df['amount'].astype(float)
# input_df = input_df.set_index(['type','ministry', 'source'])
input_df.set_index(['type','ministry','source'])
input_df = input_df.rename(columns={'amount': name})
# Caputre Guest Pastors
guest_pastors_loc = input_df.index[input_df['ministry'] == 'Guest Pastors'].tolist()
if guest_pastors_loc:
input_df.loc[guest_pastors_loc[0], 'source'] = 'Guest Pastors'
input_df.loc[guest_pastors_loc[0], 'ministry'] = 'Pastoral Ministry'
# Capture Severance Pay
severance_loc = input_df.index[input_df['ministry'] == 'Severance Pay'].tolist()
if severance_loc:
input_df.loc[severance_loc[0], 'source'] = 'Severance Pay'
input_df.loc[severance_loc[0], 'ministry'] = 'Pastoral Ministry'
# print(input_df)
return input_df
# @st.cache
def get_UN_data():
data_2018 = clean_summary_data('./data/2018-summary.csv', '2018')
data_2019 = clean_summary_data('./data/2019-summary.csv', '2019')
data_2020 = clean_summary_data('./data/2020-summary.csv', '2020')
# data_2018.join(data_2019, lsuffix='2018')
# data_2018.join(data_2019, lsuffix='2018')
data = | pd.merge(data_2018, data_2019, how='outer') | pandas.merge |
#!/usr/bin/env python3
import argparse
import numpy as np
import matplotlib.pyplot as plt
import pandas
from matplotlib.pyplot import cm
import os
import seaborn as sns
from collections import defaultdict
SEQUENCE_IDENTITY_IDX = 13
ALIGNMENT_IDENTITY_IDX = 14
SAMPLE = "Sample"
SEQUENCE_IDENTITY = "Sequence Identity"
ALIGNMENT_IDENTITY = "Alignment Identity"
READ_LENGTH_GRANULARITY = 200
KB = 1000
GB = 1000000000
PLOT_TO_PDF=True
if PLOT_TO_PDF:
plt.style.use('ggplot')
text_fontsize = 8
# plt.rcParams['ytick.labelsize']=text_fontsize+4
plt.rcParams.update({'font.size': text_fontsize})
plt.rcParams['pdf.fonttype'] = 42
plt.switch_backend('agg')
def parse_args(args = None):
parser = argparse.ArgumentParser("Plots information from margin's calcLocalPhasingCorrectness ")
parser.add_argument('--input', '-i', dest='input_csvs', default=None, required=True, type=str, action='append',
help='Input read identity CSV files (can list multiple)')
parser.add_argument('--identifier', '-I', dest='identifiers', default=None, required=False, type=str, action='append',
help='Input identifiers (can list multiple)')
parser.add_argument('--figure_name', '-f', dest='figure_name', default="output", required=False, type=str,
help='Figure name')
return parser.parse_args() if args is None else parser.parse_args(args)
def main():
args = parse_args()
id_list = []
all_identities = list()
max_read_length = 0
# for read length plot
fig, ax = plt.subplots(figsize=(6, 6))
colors = iter(cm.rainbow(np.linspace(0, 1, len(args.input_csvs))))
for i, csv in enumerate(args.input_csvs):
id = csv
if args.identifiers is not None and i < len(args.identifiers):
id = args.identifiers[i]
id_list.append(id)
print("Reading {} with ID {}".format(csv, id))
with open(csv) as fin:
read_lengths = defaultdict(lambda: 0)
for j, line in enumerate(fin):
line = line.strip().split(sep=",")
if j == 0:
if line[SEQUENCE_IDENTITY_IDX] != "sequence_identity" or line[ALIGNMENT_IDENTITY_IDX] != "alignment_identity":
raise Exception("Unexpected identity headers: {}".format(line))
continue
if len(line) in (0,2): continue
seq_iden = float(line[SEQUENCE_IDENTITY_IDX])
aln_iden = float(line[ALIGNMENT_IDENTITY_IDX])
# save rows
row = [id, seq_iden, aln_iden]
all_identities.append(row)
# row = ["All", seq_iden, aln_iden]
# all_identities.append(row)
# read length
length = abs(int(line[4]) - int(line[3]))
read_lengths[int(round(length/READ_LENGTH_GRANULARITY))] += 1
color = next(colors)
total_coverage = 0
curr_len = max(read_lengths.keys())
max_read_length = max([curr_len, max_read_length])
first = True
while curr_len > 0:
current_len_sequence = curr_len * READ_LENGTH_GRANULARITY * read_lengths[curr_len]
new_total_coverage = total_coverage + current_len_sequence
if (first):
ax.vlines(curr_len*READ_LENGTH_GRANULARITY/KB, total_coverage/GB, new_total_coverage/GB, alpha=.5,
color=color, label=id)
ax.hlines(new_total_coverage/GB, curr_len*READ_LENGTH_GRANULARITY/KB, (curr_len-1)*READ_LENGTH_GRANULARITY/KB, alpha=.5, color=color)
first = False
else:
ax.vlines(curr_len*READ_LENGTH_GRANULARITY/KB, total_coverage/GB, new_total_coverage/GB, alpha=.5,
color=color)
ax.hlines(new_total_coverage/GB, curr_len*READ_LENGTH_GRANULARITY/KB, (curr_len-1)*READ_LENGTH_GRANULARITY/KB, alpha=.5, color=color)
total_coverage = new_total_coverage
curr_len -= 1
plt.legend()
if max_read_length*READ_LENGTH_GRANULARITY > 100000:
ax.set_xlim(0, 100)
ax.set_ylabel("Total Aligned Sequence (Gb)")
ax.set_xlabel("Read Length (kb)")
if PLOT_TO_PDF:
plt.savefig("{}.read_nx.pdf".format(args.figure_name), dpi=300)
else:
plt.savefig("{}.read_nx.png".format(args.figure_name))
plt.show()
plt.close()
print("Plotting identity violins")
columns = [SAMPLE, SEQUENCE_IDENTITY, ALIGNMENT_IDENTITY]
median = np.median(list(map(lambda x: x[2], all_identities)))
mean = np.mean(list(map(lambda x: x[2], all_identities)))
fig, ax = plt.subplots(figsize=(3*len(id_list), 6))
df = | pandas.DataFrame(all_identities, columns=columns) | pandas.DataFrame |
from .mcmcposteriorsamplergamma import fit
from scipy.stats import norm, gamma
import pandas as pd
import numpy as np
import pickle as pk
from ..shared_functions import *
class mcmcsamplergamma:
"""
Class for the mcmc sampler of the deconvolution gaussian model
"""
def __init__(self, K=1, Kc=1, alpha = 1, alphac = 1):
"""
Constructor of the class
Parameters
-------------
K: int, Number of components of the noise distribution
Kc: int, Number of components of the convolved distribution
**kwargs:
alpha: float, parameter to determine the hyperprior of the noise weight components
alphac: float, parameter to determine the hyperprior of the target weight components
"""
self.K = K
self.Kc = Kc
self.alpha = alpha
self.alphac = alphac
self.fitted = False
return
def fit(self, dataNoise, dataConvolution, iterations = 1000, ignored_iterations = 1000, chains = 1,
priors = None,
precission = 0.99, method = "moments", bias = None,
initial_conditions = [], show_progress = True, seed = 0):
"""
Fit the model to the posterior distribution
Parameters
-------------
dataNoise: list/npArray, 1D array witht he data of the noise
dataConvolution: list/npArray, 1D array witht he data of the convolution
iterations: int, number of samples to be drawn and stored for each chain during the sampling
ignored_iterations: int, number of samples to be drawn and ignored for each chain during the sampling
chains: int, number of independently initialised realisations of the markov chain
priors: array, parameter of the prior gamma distribution acording to the definition of the wikipedia
kconst: float, parameter k of the prior gamma distribution
initialConditions: list, 1D array with all the parameters required to initialise manually all the components of all the chains the chains
show_progress: bool, indicate if the method should show the progress in the generation of the new data
seed: int, value to initialise the random generator and obtain reproducible results
Returns
---------------
Nothing
"""
self.data = dataNoise
self.datac = dataConvolution
self.iterations = iterations
self.ignored_iterations = ignored_iterations
self.chains = chains
if bias == None:
m = np.min([dataNoise,dataConvolution])
if m < 0:
self.bias = m - 0.01
else:
self.bias = 0
elif bias < np.min([dataNoise,dataConvolution]):
self.bias = bias
else:
self.bias = np.min([dataNoise,dataConvolution])*0.9999
if priors==None:
m = np.mean(dataNoise-self.bias)
v = np.var(dataNoise-self.bias)
self.priortheta_theta = 100*v/m
self.priork_theta = 100*v/m
self.priortheta_k = 1.1
self.priork_k = 1.1
m = np.mean(dataConvolution-self.bias)
v = np.var(dataConvolution-self.bias)
self.priortheta_thetac = 100*v/m
self.priork_thetac = 100*v/m
self.priortheta_kc = 1.1
self.priork_kc = 1.1
self.precission = precission
self.method = method
self.samples = np.array(fit(dataNoise-self.bias, dataConvolution-self.bias,
self.ignored_iterations, self.iterations, self.chains,
self.K, self.Kc,
self.alpha, self.alphac,
self.priortheta_k, self.priortheta_theta, self.priork_k, self.priork_theta,
self.priortheta_kc, self.priortheta_thetac, self.priork_kc, self.priork_thetac,
0,
self.precission, self.method,
initial_conditions, show_progress, seed))
self.fitted = True
return
def save(self, name):
"""
Pickle save the model.
Parameters
----------------
name: string, name in which to store the model
Return:
nothing
"""
if self.fitted:
pickling_on = open(name+".pickle","wb")
pk.dump({"K":self.K, "Kc":self.Kc, "alpha": self.alpha, "alphac": self.alphac, "iterations": self.iterations,
"ignored_iterations": self.ignored_iterations,
"priortheta_k": self.priortheta_k, "priortheta_theta": self.priortheta_theta, "priork_k": self.priork_k,
"priork_theta": self.priork_theta, "priortheta_kc": self.priortheta_kc, "priortheta_thetac": self.priortheta_thetac,
"priortheta_thetac": self.priortheta_thetac, "priork_thetac": self.priork_thetac,
"bias":self.bias, "chains":self.chains, "samples":self.samples}, pickling_on)
pickling_on.close()
else:
print("The model has not been fitted so there is nothing to save.")
return
def load(self, name):
"""
Pickle load the model.
Parameters
----------------
name: string, name from which to recover the model
Return:
nothing
"""
pickle_off = open(name+".pickle","rb")
aux = pk.load(pickle_off)
pickle_off.close()
self.K = aux["K"]
self.Kc = aux ["Kc"]
self.alpha = aux["alpha"]
self.alphac = aux["alphac"]
self.iterations = aux["iterations"]
self.ignored_iterations = aux["ignored_iterations"]
self.chains = aux["chains"]
self.samples = aux["samples"]
self.priortheta_k = aux["priortheta_k"]
self.priortheta_theta = aux["priortheta_theta"]
self.priork_k = aux["priork_k"]
self.priork_theta = aux["priork_theta"]
self.priortheta_kc = aux["priortheta_kc"]
self.priortheta_thetac = aux["priortheta_thetac"]
self.priortheta_thetac = aux["priortheta_thetac"]
self.priork_thetac = aux["priork_thetac"]
self.bias = aux["bias"]
self.fitted = True
return
def sample_autofluorescence(self, size = 1, style = "full", pos = None):
"""
Generate samples from the fitted posterior distribution according to the noise distribution
Parameters
-------------
size: int, number of samples to be drawn
Returns
-------------
list: list, 1D array with *size* samples from the model
"""
if style=="full":
return np.array(sample_autofluorescence_gamma(self.samples,self.K,self.Kc,size=size, bias=0))+self.bias
elif style=="single":
if pos == None:
pos = np.random.choice(range(len(self.samples)))
return np.array(sample_autofluorescence_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias
else:
return np.array(sample_autofluorescence_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias
# return np.array(sample_autofluorescence_gamma(self.samples,self.K,self.Kc,size))
def sample_deconvolution(self, size = 1, style = "full", pos = None):
"""
Generate samples from the fitted posterior distribution according to the deconvolved distribution
Parameters
-------------
size: int, number of samples to be drawn
Returns
-------------
list: list, 1D array with *size* samples from the model
"""
if style=="full":
return np.array(sample_deconvolution_gamma(self.samples,self.K,self.Kc,size=size, bias=0))+self.bias
elif style=="single":
if pos == None:
pos = np.random.choice(range(len(self.samples)))
return np.array(sample_deconvolution_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias
else:
return np.array(sample_deconvolution_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias
# return np.array(sample_deconvolution_gamma(self.samples,self.K,self.Kc,size))
def sample_convolution(self, size = 1, style = "full", pos = None):
"""
Generate samples from the fitted posterior distribution according to the convolved distribution
Parameters
-------------
size: int, number of samples to be drawn
Returns
-------------
list: list, 1D array with *size* samples from the model
"""
if style=="full":
return np.array(sample_convolution_gamma(self.samples,self.K,self.Kc,size=size, bias=0))+self.bias
elif style=="single":
if pos == None:
pos = np.random.choice(range(len(self.samples)))
return np.array(sample_convolution_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias
else:
return np.array(sample_convolution_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias
# return np.array(sample_convolution_gamma(self.samples,self.K,self.Kc,size))
def score_autofluorescence(self, x, percentiles = [5, 95], size = 100):
"""
Evaluate the mean and percentiles of the the pdf at certain position acording to the convolved distribution
Parameters
-------------
x: list/array, positions where to evaluate the distribution
percentiles: list/array, percentiles to be evaluated
size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability
Returns
-------------
list: list, 2D array with the mean and all the percentile evaluations at all points in x
"""
yT = []
for l in range(size):
i = np.random.choice(self.iterations)
y = np.zeros(len(x))
for k in range(self.K):
thetastar = self.samples[i,self.K+k]
kconststar = self.samples[i,2*self.K+k]
y += self.samples[i,k]*gamma.pdf(x,a=kconststar,scale=thetastar)
yT.append(y)
return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
def score_deconvolution(self, x, percentiles = [5, 95], size = 100):
"""
Evaluate the mean and percentiles of the the pdf at certain position acording to the deconvolved distribution
Parameters
-------------
x: list/array, positions where to evaluate the distribution
percentiles: list/array, percentiles to be evaluated
size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability
Returns
-------------
list: list, 2D array with the mean and all the percentile evaluations at all points in x
"""
yT = []
for l in range(size):
i = np.random.choice(self.iterations)
y = np.zeros(len(x))
for j in range(self.Kc):
thetastar = self.samples[i,3*self.K+self.Kc+j]
kconststar = self.samples[i,3*self.K+2*self.Kc+j]
y += self.samples[i,3*self.K+j]*gamma.pdf(x,a=kconststar,scale=thetastar)
yT.append(y)
return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
def score_convolution(self, x, percentiles = [5, 95], size = 100):
"""
Evaluate the mean and percentiles of the the pdf at certain position acording to the convolved distribution
Parameters
-------------
x: list/array, positions where to evaluate the distribution
percentiles: list/array, percentiles to be evaluated
size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability
Returns
-------------
list: list, 2D array with the mean and all the percentile evaluations at all points in x
"""
yT = []
for l in range(size):
i = np.random.choice(self.iterations)
y = np.zeros(len(x))
for j in range(self.Kc):
for k in range(self.K):
theta1 = self.samples[i,self.K+k]
theta2 = self.samples[i,3*self.K+self.Kc+j]
k1 = self.samples[i,2*self.K+k]
k2 = self.samples[i,3*self.K+2*self.Kc+j]
mu = theta1*k1+theta2*k2
s = theta1*theta1*k1+theta2*theta2*k2
thetastar = s/mu
kconststar = mu*mu/s
y += self.samples[i,k]*self.samples[i,3*self.K+j]*gamma.pdf(x,a=kconststar,scale=thetastar)
yT.append(y)
return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
def sampler_statistics(self, sort="weight"):
"""
Show statistics of correct mixing of the mcmc sampler
Args:
sort: ["weight", "none", "means"], method for sorting the samples from the different chains
Returns
-------------
DataFrame: DataFrame the mean, std, percentiles, mixing ratio(rhat) and effective number of samples for each parameter of the model
"""
self.sampler_statistics = | pd.DataFrame(columns=["Mean","Std","5%","50%","95%","Rhat","Neff"]) | pandas.DataFrame |
import tkinter as tk
import item_database
import transactions_database
import all_transactions_database
from tkintertable import TableCanvas, TableModel
import datetime
import pandas as pd
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from decimal import *
import win32print
import win32api
import win32con
import os.path
import itertools
import pygsheets
# COLORS
BACKGROUND_FRAME_COLOR = '#42423f'
BUTTON_AND_LABEL_COLOR = '#adada6'
BACK_BUTTON_COLOR = '#d93027'
ENTRY_COLOR = '#d9d1d0'
# Return & Discount window
TK_INPUT_WIN_H = 250
TK_INPUT_WIN_W = 250
TK_INPUT_BG = '#575353'
FG_LABELS_COLOR = '#ffffff'
ONLINE_IND_COLOR = '#5dc77a'
OFFLINE_IND_COLOR = '#ed4c40'
YES_BTN = '#82ba6e'
NO_BTN = '#b52438'
# font
FONT = ('Courier', 15, 'bold')
# --------------- PRELIMINARY SETUP --------------- #
# globals
global PRINTER_NAME, G_INV_SH_NAME, G_TRAN_SH_NAME, GC, SH, WKS, SH_T, WKS_T, COLUMNS_GOOGLE_INVENTORY, ALL_TRANSACTIONS
global INVENTORY_DF, ON_OFF_CYC
# read inputs text file
input_file = open('inputs.txt', 'r')
for idx, line in enumerate(input_file.readlines()):
value = line.split("=")[1]
if idx == 0:
PRINTER_NAME = value[2:-2]
elif idx == 1:
G_INV_SH_NAME = value[2:-2]
elif idx == 2:
G_TRAN_SH_NAME = value[2:-1]
else:
COLUMNS_GOOGLE_INVENTORY = value.split('[')[1].split(']')[0].split(', ')
# authorize google sheets
if os.path.isfile('creds.json'):
try:
GC = pygsheets.authorize(service_file='creds.json')
except Exception as e:
print("Something went Wrong while getting authorizing credentials file:", str(e))
if len(G_INV_SH_NAME) > 0:
try:
print("Trying to open the google inventory file...")
SH = GC.open(G_INV_SH_NAME)
WKS = SH[0]
print("Successfully opened the google inventory file!")
except Exception as e:
print("Something went wrong while opening the google inventory file:", str(e))
if len(G_TRAN_SH_NAME) > 0:
try:
print("Trying to open the google transactions file...")
SH_T = GC.open(G_TRAN_SH_NAME)
WKS_T = SH_T[0]
print("Successfully opened the google transactions file!")
except Exception as e:
print("Something went wrong while opening the google transactions file:", str(e))
else:
print("You don't yet have a google sheets API set up. Follow this link to set one up:\n"
"https://developers.google.com/sheets/api/quickstart/python")
""" Checking whether inventory & transactions excel files exist already,
if not, then create it. Either way, store the data into data frames. """
if not os.path.isfile('Transactions.xlsx'):
header_df = pd.DataFrame({'Name': [], 'S.Price': [], 'Date': [], 'P.Type': [],
'Total': []})
header_df.to_excel('Transactions.xlsx', index=False)
ALL_TRANSACTIONS = pd.read_excel('Transactions.xlsx', ignore_index=True)
""" Clean up the database for all transactions, because we want to give priority to changes
done in the transactions excel file. """
all_transactions_database.deleteData()
# Next, add the data in transactions file into the database
for idx, name in enumerate(list(ALL_TRANSACTIONS['Name'])):
all_transactions_database.addData(name, str(ALL_TRANSACTIONS['S.Price'][idx]), str(ALL_TRANSACTIONS['Date'][idx]),
str(ALL_TRANSACTIONS['P.Type'][idx]), str(ALL_TRANSACTIONS['Total'][idx]))
# Repeating above for Inventory excel file
if not os.path.isfile('Inventory.xlsx'):
header_df = pd.DataFrame({'Name': [], 'Barcode': [], 'S.Price': [], 'P.Price': [], 'Quantity': [],
'Online_Price': [], 'Tax': []})
header_df.to_excel('Inventory.xlsx', index=False)
INVENTORY_DF = pd.read_excel('Inventory.xlsx', ignore_index=True)
item_database.deleteData()
for idx, name in enumerate(list(INVENTORY_DF['Name'])):
item_database.addData(name, str(INVENTORY_DF['Barcode'][idx]), str(INVENTORY_DF['P.Price'][idx]),
str(INVENTORY_DF['S.Price'][idx]), str(INVENTORY_DF['Quantity'][idx]),
str(INVENTORY_DF['Online_Price'][idx]), str(INVENTORY_DF['Tax'][idx]))
# Initializing cyclical iterator for online/offline label
ON_OFF_CYC = itertools.cycle('of')
# Open receipt.txt and empty it in case of future runs
open('receipt.txt', 'w').write('')
# Initializing a list which will encompass the items to which discount is added
discount_added = []
# --------------- Helper Functions --------------- #
def update_all_transaction_df():
"""will take in any modifications done to the database, and also help add a tax column to the excel file."""
all_transactions_data = {"Name": [names[0] for names in all_transactions_database.getNames()],
"S.Price": [s_prices[0] for s_prices in all_transactions_database.getSPrices()],
"Date": [dates[0] for dates in all_transactions_database.getDates()],
"P.Type": [p_types[0] for p_types in all_transactions_database.getPTypes()],
"Total": [tots[0] for tots in all_transactions_database.getTotals()]}
new_all_transactions_df = pd.DataFrame(data=all_transactions_data)
new_all_transactions_df['Total'] = pd.to_numeric(new_all_transactions_df['Total'], errors='coerce')
# adding taxes column to be added into the excel file
taxes = []
for i in range(len(new_all_transactions_df)):
if str(item_database.getTaxableFromNameAndSP(new_all_transactions_df['Name'][i],
new_all_transactions_df['S.Price'][i])) == 'T':
t = Decimal(Decimal(new_all_transactions_df['S.Price'][i]) * Decimal(0.0825)).quantize(Decimal('.01'))
taxes.append(t)
elif str(new_all_transactions_df['Name'][i]) == 'Misc.':
real_p = Decimal(Decimal(new_all_transactions_df['S.Price'][i]) / Decimal(1.0825)).quantize(Decimal('.01'))
t = Decimal(Decimal(new_all_transactions_df['S.Price'][i]) - real_p).quantize(Decimal('.01'))
taxes.append(t)
else:
taxes.append(Decimal(0.0))
new_all_transactions_df['Tax'] = taxes
new_all_transactions_df['Tax'] = | pd.to_numeric(new_all_transactions_df['Tax']) | pandas.to_numeric |
import requests as r
import zipfile
import io
import json
import pandas as pd
from datetime import date, datetime, timedelta
from dateutil.parser import parse
from QualtricsAPI.Setup import Credentials
from QualtricsAPI.JSON import Parser
from QualtricsAPI.Exceptions import Qualtrics500Error, Qualtrics503Error, Qualtrics504Error, Qualtrics400Error, Qualtrics401Error, Qualtrics403Error
import warnings
class Responses(Credentials):
'''This is a child class to the credentials class that gathers the survey responses from Qualtrics surveys'''
def __init__(self):
return
def setup_request(self, file_format='csv', survey=None):
''' This method sets up the request and handles the setup of the request for the survey.'''
assert survey != None, 'Hey There! The survey parameter cannot be None. You need to pass in a survey ID as a string into the survey parameter.'
assert isinstance(survey, str) == True, 'Hey There! The survey parameter must be of type string.'
assert len(survey) == 18, 'Hey there! It looks like your survey ID is a the incorrect length. It needs to be 18 characters long. Please try again.'
assert survey[:3] == 'SV_', 'Hey there! It looks like your survey ID is incorrect. You can find the survey ID on the Qualtrics site under your account settings. Please try again.'
headers, url = self.header_setup(content_type=True, xm=False, path='responseexports/')
payload = {"format": file_format, "surveyId": survey}
request = r.request("POST", url, data=json.dumps(payload), headers=headers)
response = request.json()
try:
progress_id = response['result']['id']
return progress_id, url, headers
except:
print(f"ServerError:\nError Code: {response['meta']['error']['errorCode']}\nError Message: {response['meta']['error']['errorMessage']}")
def send_request(self, file_format='csv', survey=None):
'''This method sends the request, and sets up the download request.'''
file = None
progress_id, url, headers = self.setup_request(file_format=file_format, survey=survey)
check_progress = 0
progress_status = "in progress"
while check_progress < 100 and (progress_status != "complete") and (file is None):
check_url = url + progress_id
check_response = r.request("GET", check_url, headers=headers)
file = check_response.json()["result"]["file"]
check_progress = check_response.json()["result"]["percentComplete"]
download_url = url + progress_id + '/file'
download_request = r.get(download_url, headers=headers, stream=True)
return download_request
def get_responses(self, survey=None):
'''This function accepts the survey id, and returns the survey responses associated with that survey.
:param survey: This is the id associated with a given survey.
:return: a Pandas DataFrame
'''
warnings.warn('This method is being actively depricated. Please migrate your code over to the new V3 method "Responses().get_survey_responses".', DeprecationWarning, stacklevel=2)
download_request = self.send_request(file_format='csv', survey=survey)
with zipfile.ZipFile(io.BytesIO(download_request.content)) as survey_zip:
for s in survey_zip.infolist():
df = pd.read_csv(survey_zip.open(s.filename))
return df
def get_questions(self, survey=None):
'''This method returns a DataFrame containing the survey questions and the Question IDs.
:param survey: This is the id associated with a given survey.
:return: a Pandas DataFrame
'''
warnings.warn('This method is being actively depricated. Please migrate your code over to the new V3 method "Responses().get_survey_questions".', DeprecationWarning, stacklevel=2)
df = self.get_responses(survey=survey)
questions = pd.DataFrame(df[:1].T)
questions.columns = ['Questions']
return questions
# Version 3 Code
def setup_request_v3(self, survey=None, payload=None):
''' This method sets up the request and handles the setup of the request for the survey.'''
assert survey != None, 'Hey There! The survey parameter cannot be None. You need to pass in a survey ID as a string into the survey parameter.'
assert isinstance(survey, str) == True, 'Hey There! The survey parameter must be of type string.'
assert len(survey) == 18, 'Hey there! It looks like your survey ID is a the incorrect length. It needs to be 18 characters long. Please try again.'
assert survey[:3] == 'SV_', 'Hey there! It looks like your survey ID is incorrect. You can find the survey ID on the Qualtrics site under your account settings. Please try again.'
headers, url = self.header_setup(content_type=True, xm=False, path=f'surveys/{survey}/export-responses/')
request = r.request("POST", url, data=json.dumps(payload), headers=headers)
response = request.json()
try:
if response['meta']['httpStatus'] == '500 - Internal Server Error':
raise Qualtrics500Error('500 - Internal Server Error')
elif response['meta']['httpStatus'] == '503 - Temporary Internal Server Error':
raise Qualtrics503Error('503 - Temporary Internal Server Error')
elif response['meta']['httpStatus'] == '504 - Gateway Timeout':
raise Qualtrics504Error('504 - Gateway Timeout')
elif response['meta']['httpStatus'] == '400 - Bad Request':
raise Qualtrics400Error('Qualtrics Error\n(Http Error: 400 - Bad Request): There was something invalid about the request.')
elif response['meta']['httpStatus'] == '401 - Unauthorized':
raise Qualtrics401Error('Qualtrics Error\n(Http Error: 401 - Unauthorized): The Qualtrics API user could not be authenticated or does not have authorization to access the requested resource.')
elif response['meta']['httpStatus'] == '403 - Forbidden':
raise Qualtrics403Error('Qualtrics Error\n(Http Error: 403 - Forbidden): The Qualtrics API user was authenticated and made a valid request, but is not authorized to access this requested resource.')
except (Qualtrics500Error, Qualtrics503Error, Qualtrics504Error, Qualtrics400Error, Qualtrics401Error, Qualtrics403Error) as e:
return print(e)
else:
progress_id = response['result']['progressId']
return progress_id, url, headers
# Version 3 Code
def send_request_v3(self, survey=None, payload=None):
'''This method sends the request, and sets up the download request.'''
is_file = None
progress_id, url, headers = self.setup_request_v3(survey=survey, payload=payload)
progress_status = "in progress"
while progress_status != "complete" and progress_status != "failed" and is_file is None:
check_url = url + progress_id
check_request = r.request("GET", check_url, headers=headers)
check_response = check_request.json()
try:
is_file = check_response["result"]["fileId"]
except KeyError:
pass
progress_status = check_response["result"]["status"]
try:
if check_response['meta']['httpStatus'] == '500 - Internal Server Error':
raise Qualtrics500Error('500 - Internal Server Error')
elif check_response['meta']['httpStatus'] == '503 - Temporary Internal Server Error':
raise Qualtrics503Error('503 - Temporary Internal Server Error')
elif check_response['meta']['httpStatus'] == '504 - Gateway Timeout':
raise Qualtrics504Error('504 - Gateway Timeout')
elif check_response['meta']['httpStatus'] == '400 - Bad Request':
raise Qualtrics400Error('Qualtrics Error\n(Http Error: 400 - Bad Request): There was something invalid about the request.')
elif check_response['meta']['httpStatus'] == '401 - Unauthorized':
raise Qualtrics401Error('Qualtrics Error\n(Http Error: 401 - Unauthorized): The Qualtrics API user could not be authenticated or does not have authorization to access the requested resource.')
elif check_response['meta']['httpStatus'] == '403 - Forbidden':
raise Qualtrics403Error('Qualtrics Error\n(Http Error: 403 - Forbidden): The Qualtrics API user was authenticated and made a valid request, but is not authorized to access this requested resource.')
except (Qualtrics500Error, Qualtrics503Error, Qualtrics504Error, Qualtrics400Error, Qualtrics401Error, Qualtrics403Error) as e:
return print(e)
else:
download_url = url + is_file + '/file'
download_request = r.get(download_url, headers=headers, stream=True)
return download_request
# Version 3 Code
def get_survey_responses(self, survey=None, **kwargs):
'''This function accepts the survey id, and returns the survey responses associated with that survey.
:param useLabels: Instead of exporting the recode value for the answer choice, export the text of the answer choice. For more information on recode values, see Recode Values on the Qualtrics Support Page.
:type useLabels: bool
:param includeLabelColumns: For columns that have answer labels, export two columns: one that uses recode values and one that uses labels. The label column will has a IsLabelsColumn field in the 3rd header row. Note that this cannot be used with useLabels.
:type includeLabelColumns: bool
:param exportResponsesInProgress: Export only responses-in-progress.
:type exportResponsesInProgress: bool
:param limit: Maximum number of responses exported. This begins with the first survey responses recieved. So a Limit = 10, would be the surveys first 10 responses.
:type limit: int
:param seenUnansweredRecode: Recode seen-but-unanswered questions with this value.
:type seenUnansweredRecode: int
:param multiselectSeenUnansweredRecode: Recode seen-but-unanswered choices for multi-select questions with this value. If not set, this will be the seenUnansweredRecode value.
:type multiselectSeenUnansweredRecode: int
:param includeDisplayOrder: If true, include display order information in your export. This is useful for surveys with randomization.
:type includeDisplayOrder: bool
:param endDate: Only export responses recorded after the specified UTC date. Example Format: ('%Y-%m-%dT%H:%M:%SZ' => 2020-01-13T12:30:00Z)
:type endDate: str
:param startDate: Only export responses recorded after the specified UTC date. Example Format: ('%Y-%m-%dT%H:%M:%SZ'=> 2020-01-13T12:30:00Z)
:type startDate: str
:param timeZone: Timezone used to determine response date values. If this parameter is not provided, dates will be exported in UTC/GMT. See (https://api.qualtrics.com/instructions/docs/Instructions/dates-and-times.md) for the available timeZones. :type timeZone: str
:param survey: This is the id associated with a given survey.
:return: a Pandas DataFrame
'''
dynamic_payload = {"format": 'csv'}
for key in list(kwargs.keys()):
assert key in ['useLabels', 'includeLabelColumns', 'exportResponsesInProgress', 'limit', 'seenUnansweredRecode', 'multiselectSeenUnansweredRecode', 'includeDisplayOrder', 'startDate', 'endDate', 'timeZone'], "Hey there! You can only pass in parameters with names in the list, ['useLabels', 'includeLabelColumns', 'exportResponsesInProgress', 'limit', 'seenUnansweredRecode', 'multiselectSeenUnansweredRecode', 'includeDisplayOrder', 'startDate', 'endDate', 'timeZone']"
if key == 'useLabels':
assert 'includeLabelColumns' not in list(kwargs.keys()), 'Hey there, you cannot pass both the "includeLabelColumns" and the "useLabels" parameters at the same time. Please pass just one and try again.'
assert isinstance(kwargs['useLabels'], bool), 'Hey there, your "useLabels" parameter needs to be of type "bool"!'
dynamic_payload.update({'useLabels': kwargs[(key)]})
elif key == 'exportResponsesInProgress':
assert isinstance(kwargs['exportResponsesInProgress'], bool), 'Hey there, your "exportResponsesInProgress" parameter needs to be of type "bool"!'
dynamic_payload.update({'exportResponsesInProgress': kwargs[(key)]})
elif key == 'limit':
assert isinstance(kwargs['limit'], int), 'Hey there, your "limit" parameter needs to be of type "int"!'
dynamic_payload.update({'limit': kwargs[(key)]})
elif key == 'seenUnansweredRecode':
assert isinstance(kwargs['seenUnansweredRecode'], int), 'Hey there, your "seenUnansweredRecode" parameter needs to be of type "int"!'
dynamic_payload.update({'seenUnansweredRecode': kwargs[(key)]})
elif key == 'multiselectSeenUnansweredRecode':
assert isinstance(kwargs['multiselectSeenUnansweredRecode'], int), 'Hey there, your "multiselectSeenUnansweredRecode" parameter needs to be of type "int"!'
dynamic_payload.update({'multiselectSeenUnansweredRecode': kwargs[(key)]})
elif key == 'includeLabelColumns':
assert isinstance(kwargs['includeLabelColumns'], bool), 'Hey there, your "includeLabelColumns" parameter needs to be of type "bool"!'
assert 'useLabels' not in list(kwargs.keys()), 'Hey there, you cannot pass both the "includeLabelColumns" and the "useLabels" parameters at the same time. Please pass just one and try again.'
dynamic_payload.update({'includeLabelColumns': kwargs[(key)]})
elif key == 'includeDisplayOrder':
assert isinstance(kwargs['includeDisplayOrder'], bool), 'Hey there, your "includeDisplayOrder" parameter needs to be of type "bool"!'
dynamic_payload.update({'includeDisplayOrder': kwargs[(key)]})
elif key == 'startDate':
assert isinstance(kwargs['startDate'], str), 'Hey there, your "startDate" parameter needs to be of type "str"!'
start_date = parse(timestr=kwargs[(key)])
dynamic_payload.update({'startDate': start_date.strftime('%Y-%m-%dT%H:%M:%SZ')})
elif key == 'endDate':
assert isinstance(kwargs['endDate'], str), 'Hey there, your "endDate" parameter needs to be of type "str"!'
end_date = parse(timestr=kwargs[(key)])
dynamic_payload.update({'endDate': end_date.strftime('%Y-%m-%dT%H:%M:%SZ')})
elif key == 'timeZone':
assert isinstance(kwargs['timeZone'], str), 'Hey there, your "timeZone" parameter needs to be of type "str"!'
dynamic_payload.update({'timeZone': kwargs[(key)]})
print(dynamic_payload)
download_request = self.send_request_v3(survey=survey, payload=dynamic_payload)
with zipfile.ZipFile(io.BytesIO(download_request.content)) as survey_zip:
for s in survey_zip.infolist():
df = pd.read_csv(survey_zip.open(s.filename))
return df
# Version 3 Code
def get_survey_questions(self, survey=None):
'''This method returns a DataFrame containing the survey questions and the Question IDs.
:param survey: This is the id associated with a given survey.
:return: a Pandas DataFrame
'''
df = self.get_survey_responses(survey=survey, limit=2)
questions = | pd.DataFrame(df[:1].T) | pandas.DataFrame |
import time
import pandas as pd
import numpy as np
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('\nHello! Let\'s explore some US bikeshare data!\n')
# get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
while True:
city = str(input('Please enter a city to analyze (Chicago, New Yor City, Washington): ')).lower()
# checking, if the correct selection is entered
if city in ('chicago', 'new york city', 'washington'):
break
else:
print('Wrong entry. Please enter the name of the cities.')
# get user input for month (all, january, february, ... , june)
while True:
month = str(input('Enter a month to filter for (all, january, february, ... , june): ')).lower()
# checking, if the correct selection is entered
if month in ('all', 'january', 'february', 'march', 'april', 'may', 'june'):
break
else:
print('Wrong entry. Please enter the name of a month.')
# get user input for day of week (all, monday, tuesday, ... sunday)
while True:
day = str(input('Enter a day to filter for (all, monday, tuesday, ... sunday): ')).lower()
# checking, if the correct selection is entered
if day in ('all', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'):
break
else:
print('Wrong entry. Please enter the name of a day.')
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
# load data file into a dataframe
df = | pd.read_csv(CITY_DATA[city]) | pandas.read_csv |
# -*- coding: utf-8 -*-
import os
import sys
import time
import openpyxl as openpyxl
import pandas
import pandas as pd
import tushare as ts
import numpy as np
from datetime import datetime, timedelta
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
import mplfinance as mpf
import matplotlib.pyplot as plt
from PyQt5.QtWidgets import QApplication, QMessageBox
from dateutil.relativedelta import relativedelta
from mpl_finance import candlestick_ohlc, candlestick2_ohlc
import numpy as np
import decimal
import sys
from PyQt5 import QtGui, QtWidgets, QtCore
from PyQt5.QtWidgets import QDialog, QApplication
from primodial import Ui_Dialog
from numpy import long
# author : ye
mode = 0
fig, ax = plt.subplots()
datadir = './data/'
strategydir = './strategy/'
financialdir = './financialdata/'
x, y, lastday, xminnow, xmaxnow = 1, 1, 0, 0, 0
# 云层细代表震荡,越来越细改变的趋势也越大,要看有没有最高点
# to avoid data collection, change return value to suffix of the file in 'data' dictionary -> enter offline mode!
def endDate():
return time.strftime('%Y%m%d')
# return '20210818'
# 1:excel 0:tushare
def getDataByTscode(ts_code, mode):
if mode == 1:
filedir = os.path.join(datadir, nameStrategy(ts_code))
byexcel = pd.read_excel(filedir)
byexcel.index = byexcel['Unnamed: 0']
byexcel = byexcel.drop(columns=['Unnamed: 0'])
return byexcel
if mode == 0:
ts.set_token('<KEY>')
pro = ts.pro_api()
t1 = endDate()
t2 = (datetime.now() - relativedelta(years=1)).strftime('%Y%m%d')
df = pro.daily(ts_code=ts_code, start_date=t2, end_date=t1)
df = df.iloc[::-1]
return df
def nameStrategy(code):
return code + '-' + endDate() + '.xlsx'
def vision(data, ts_name):
ichimoku = Ichimoku(data)
ichimoku.run()
ichimoku.plot(ts_name)
def call_back(event):
axtemp = event.inaxes
x_min, x_max = axtemp.get_xlim()
fanwei = (x_max - x_min) / 10
if event.button == 'up':
axtemp.set(xlim=(x_min + fanwei, x_max - fanwei))
elif event.button == 'down':
axtemp.set(xlim=(x_min - fanwei, x_max + fanwei))
fig.canvas.draw_idle()
def button_press_callback(click):
global x
global y
x = click.xdata
y = click.ydata
point = (click.xdata, click.ydata)
print(point)
def motion_notify_callback(event):
global x, xminnow, xmaxnow
if event.button != 1: return
xnow = event.xdata
print(x)
delta = x - xnow
plt.xlim(xmin=xminnow + delta, xmax=xmaxnow + delta)
xminnow = xminnow + delta
xmaxnow = xmaxnow + delta
x = xnow
point = (event.xdata, event.ydata, xminnow, xmaxnow)
print(point)
fig.canvas.draw_idle()
class Ichimoku():
"""
@param: ohcl_df <DataFrame>
Required columns of ohcl_df are:
Date<Float>,Open<Float>,High<Float>,Close<Float>,Low<Float>
"""
def __init__(self, ohcl_df):
self.ohcl_df = ohcl_df
ohcl_df['trade_date'] = pandas.to_datetime(ohcl_df['trade_date'].astype(str))
def run(self):
tenkan_window = 9
kijun_window = 26
senkou_span_b_window = 52
cloud_displacement = 26
chikou_shift = -26
ohcl_df = self.ohcl_df
# Dates are floats in mdates like 736740.0
# the period is the difference of last two dates
last_date = ohcl_df["trade_date"].iloc[-1].date()
period = 1
# Add rows for N periods shift (cloud_displacement)
ext_beginning = last_date + timedelta(days=1)
ext_end = last_date + timedelta(days=((period * cloud_displacement) + period))
dates_ext = pd.date_range(start=ext_beginning, end=ext_end)
dates_ext_df = | pd.DataFrame({"trade_date": dates_ext}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pastas as ps
def acf_func(**kwargs):
index = pd.to_datetime(np.arange(0, 100, 1), unit="D", origin="2000")
data = np.sin(np.linspace(0, 10 * np.pi, 100))
r = pd.Series(data=data, index=index)
acf_true = np.cos(np.linspace(0.0, np.pi, 11))[1:]
acf = ps.stats.acf(r, lags=np.arange(1.0, 11.), min_obs=1, **kwargs).values
return acf, acf_true
def test_acf_rectangle():
acf, acf_true = acf_func(bin_method="rectangle")
assert abs((acf - acf_true)).max() < 0.05
def test_acf_gaussian():
acf, acf_true = acf_func(bin_method="gaussian")
assert abs((acf - acf_true)).max() < 0.05
def test_runs_test():
"""
http://www.itl.nist.gov/div898/handbook/eda/section3/eda35d.htm
True Z-statistic = 2.69
Read NIST test data
"""
data = pd.read_csv("tests/data/nist.csv")
test, _ = ps.stats.runs_test(data)
assert test[0] - 2.69 < 0.02
def test_stoffer_toloi():
res = pd.Series(index= | pd.date_range(start=0, periods=1000, freq="D") | pandas.date_range |
import argparse
import pandas as pd
from baseline_tools import write_standard_data, read_IMDB_origin_data, read_AGNEWS_origin_data, \
read_SST2_origin_data
parser = argparse.ArgumentParser()
parser.add_argument('--dataset')
parser.add_argument('--path')
parser.add_argument('--output')
args = parser.parse_args()
dataset = args.dataset
path = args.path
output = args.output
def create_SST2(data_name, sst_folder, output_folder):
if sst_folder[-1] != '/':
sst_folder += '/'
if output_folder[-1] != '/':
output_folder += '/'
datasetSentences = pd.read_csv(sst_folder + 'datasetSentences.txt', sep='\t')
dictionary = | pd.read_csv(sst_folder + 'dictionary.txt', sep='|', header=None, names=['sentence', 'phrase ids']) | pandas.read_csv |
import os as os
from lib import ReadCsv
from lib import ReadConfig
from lib import ReadData
from lib import NetworkModel
from lib import ModelMetrics
from lib import SeriesPlot
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from lib import modwt
import keras
from datetime import date,datetime,time
from datetime import datetime
config = ReadConfig.ReadConfig()
config_data = config.read_config(os.path.join("config", "config.json"))
reader = ReadData.ReadData()
all_data = reader.readClimateFiles(config_data)
subset = all_data[['date','site_x', 'Hs','Hmax','Tz','Tp','DirTpTRUE','SST']]
subset.describe()
def make_date(series):
for dt in series:
yield datetime.strptime(dt, '%d/%m/%Y')
dates = list(make_date(subset['date']))
subset.index = range(0, subset.shape[0])
datesDf = pd.DataFrame({'dates': pd.Series(dates)}, index=range(0,len(dates)))
subset2 = pd.concat([subset, datesDf], axis=1)
subset2 = subset2.sort_values('dates')
idx1 = subset2.reindex(columns=['dates','site_x']).index
subset2.index = idx1
sitenames = subset2['site_x'].unique()
def getsite(data, col, site):
return data[(data[col] == site)]
# 7 day lag.
def make_lags(data, fromN, maxN):
for i in range(fromN,maxN):
nextData = data.shift(i).dropna()
colnames = list(map(lambda col: col+'_t-'+str(i), nextData.columns))
nextData.columns = colnames
yield nextData
target_set = None
for site in sitenames:
data = getsite(subset2, 'site_x', site)
data.index = range(0,data.shape[0])
lags = list(make_lags(data, 1,8))
minrows = lags[6].shape[0]
target = data[6:minrows]
for i in range(0,len(lags)):
lags[i] = lags[i][i:minrows]
lags.append(target)
if target_set is None:
target_set = pd.concat(lags, axis=1)
else:
temp = pd.concat(lags, axis=1)
target_set = pd.concat([target_set, temp], axis=0)
target_set = target_set.dropna()
target_set[['Hs_t-7','Hs_t-6','Hs_t-5','Hs_t-4','Hs_t-3','Hs_t-2','Hs_t-1','Hs']].head(10)
# Now that we have timeseries data we now need to calculate wavelet decompositions for each
# window of time. Note that we are lagging only up to a period of 7 days.
norm_data = None
numeric_cols = ['Hs', 'Hmax','Tz','Tp','DirTpTRUE','SST']
temp = []
for col in numeric_cols:
for i in range(1,8):
temp.append(col+'_t-'+str(i))
numeric_cols.extend(temp)
wavelet='db3'
wavelet_cols = []
wavelet_data=None
for site in sitenames:
data = getsite(target_set, 'site_x', site)
data = data[numeric_cols]
for col in numeric_cols:
C1, C2, C3, A = modwt.modwt(data[col].values, wavelet, 3)
nameA = col+"_A1"
name1 = col+"_C1"
name2 = col+"_C2"
name3 = col+"_C3"
wavelet_cols.append([nameA,name1,name2,name3])
data[nameA] = pd.Series(A)
data[name1] = | pd.Series(C1) | pandas.Series |
################################################################################
# The contents of this file are Teradata Public Content and have been released
# to the Public Domain.
# <NAME> & <NAME> - April 2020 - v.1.1
# Copyright (c) 2020 by Teradata
# Licensed under BSD; see "license.txt" file in the bundle root folder.
#
################################################################################
# R and Python TechBytes Demo - Part 5: Python in-nodes with SCRIPT
# ------------------------------------------------------------------------------
# File: stoRFScoreMM.py
# ------------------------------------------------------------------------------
# The R and Python TechBytes Demo comprises of 5 parts:
# Part 1 consists of only a Powerpoint overview of R and Python in Vantage
# Part 2 demonstrates the Teradata R package tdplyr for clients
# Part 3 demonstrates the Teradata Python package teradataml for clients
# Part 4 demonstrates using R in-nodes with the SCRIPT and ExecR Table Operators
# Part 5 demonstrates using Python in-nodes with the SCRIPT Table Operator
################################################################################
#
# This TechBytes demo utilizes a use case to predict the propensity of a
# financial services customer base to open a credit card account.
#
# The present file is the Python scoring script to be used with the SCRIPT
# table operator, as described in the following use case 2 of the present demo
# Part 5:
#
# 2) Fitting and scoring multiple models
#
# We utilize the statecode variable as a partition to built a Random
# Forest model for every state. This is done by using SCRIPT Table Operator
# to run a model fitting script with a PARTITION BY statecode in the query.
# This creates a model for each of the CA, NY, TX, IL, AZ, OH and Other
# state codes, and perists the model in the database via CREATE TABLE AS
# statement.
# Then we run a scoring script via the SCRIPT Table Operator against
# these persisted Random Forest models to score the entire data set.
#
# For this use case, we build an analytic data set nearly identical to the
# one in the teradataml demo (Part 3), with one change as indicated by item
# (d) below. This is so we can demonstrate the in-database capability of
# simultaneously building many models.
# 60% of the analytic data set rows are sampled to create a training
# subset. The remaining 40% is used to create a testing/scoring dataset.
# The train and test/score datasets are used in the SCRIPT operations.
################################################################################
# File Changelog
# v.1.0 2019-10-29 First release
# v.1.1 2020-04-02 Added change log; no code changes in present file
################################################################################
import sys
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
import pickle
import base64
###
### Read input
###
delimiter = '\t'
inputData = []
try:
line = input()
if line == '': # Exit if user provides blank line
pass
else:
allArgs = line.split(delimiter)
inputData.append(allArgs[0:-2])
modelSerB64 = allArgs[-1]
except (EOFError): # Exit if reached EOF or CTRL-D
pass
while 1:
try:
line = input()
if line == '': # Exit if user provides blank line
break
else:
allArgs = line.split(delimiter)
inputData.append(allArgs[0:-2])
except (EOFError): # Exit if reached EOF or CTRL-D
break
#for line in sys.stdin.read().splitlines():
# line = line.split(delimiter)
# inputData.append(line)
###
### If no data received, gracefully exit rather than producing an error later.
###
if not inputData:
sys.exit()
## In the input information, all rows have the same number of column elements
## except for the first row. The latter also contains the model info in its
## last column. Isolate the serialized model from the end of first row.
#modelSerB64 = inputData[0][-1]
###
### Set up input DataFrame according to input schema
###
# Know your data: You must know in advance the number and data types of the
# incoming columns from the database!
# For numeric columns, the database sends in floats in scientific format with a
# blank space when the exponential is positive; e.g., 1.0 is sent as 1.000E 000.
# The following input data read deals with any such blank spaces in numbers.
columns = ['cust_id', 'tot_income', 'tot_age', 'tot_cust_years', 'tot_children',
'female_ind', 'single_ind', 'married_ind', 'separated_ind',
'statecode', 'ck_acct_ind', 'sv_acct_ind', 'cc_acct_ind',
'ck_avg_bal', 'sv_avg_bal', 'cc_avg_bal', 'ck_avg_tran_amt',
'sv_avg_tran_amt', 'cc_avg_tran_amt', 'q1_trans_cnt',
'q2_trans_cnt', 'q3_trans_cnt', 'q4_trans_cnt', 'SAMPLE_ID']
df = pd.DataFrame(inputData, columns=columns)
#df = pd.DataFrame.from_records(inputData, exclude=['nRow', 'model'], columns=columns)
del inputData
df['cust_id'] = pd.to_numeric(df['cust_id'])
df['tot_income'] = df['tot_income'].apply(lambda x: "".join(x.split()))
df['tot_income'] = pd.to_numeric(df['tot_income'])
df['tot_age'] = pd.to_numeric(df['tot_age'])
df['tot_cust_years'] = pd.to_numeric(df['tot_cust_years'])
df['tot_children'] = pd.to_numeric(df['tot_children'])
df['female_ind'] = pd.to_numeric(df['female_ind'])
df['single_ind'] = pd.to_numeric(df['single_ind'])
df['married_ind'] = pd.to_numeric(df['married_ind'])
df['separated_ind'] = pd.to_numeric(df['separated_ind'])
df['statecode'] = df['statecode'].apply(lambda x: x.replace('"', ''))
df['ck_acct_ind'] = pd.to_numeric(df['ck_acct_ind'])
df['sv_acct_ind'] = pd.to_numeric(df['sv_acct_ind'])
df['cc_acct_ind'] = pd.to_numeric(df['cc_acct_ind'])
df['sv_acct_ind'] = pd.to_numeric(df['sv_acct_ind'])
df['cc_acct_ind'] = pd.to_numeric(df['cc_acct_ind'])
df['ck_avg_bal'] = df['ck_avg_bal'].apply(lambda x: "".join(x.split()))
df['ck_avg_bal'] = pd.to_numeric(df['ck_avg_bal'])
df['sv_avg_bal'] = df['sv_avg_bal'].apply(lambda x: "".join(x.split()))
df['sv_avg_bal'] = pd.to_numeric(df['sv_avg_bal'])
df['cc_avg_bal'] = df['cc_avg_bal'].apply(lambda x: "".join(x.split()))
df['cc_avg_bal'] = pd.to_numeric(df['cc_avg_bal'])
df['ck_avg_tran_amt'] = df['ck_avg_tran_amt'].apply(lambda x: "".join(x.split()))
df['ck_avg_tran_amt'] = pd.to_numeric(df['ck_avg_tran_amt'])
df['sv_avg_tran_amt'] = df['sv_avg_tran_amt'].apply(lambda x: "".join(x.split()))
df['sv_avg_tran_amt'] = pd.to_numeric(df['sv_avg_tran_amt'])
df['cc_avg_tran_amt'] = df['cc_avg_tran_amt'].apply(lambda x: "".join(x.split()))
df['cc_avg_tran_amt'] = pd.to_numeric(df['cc_avg_tran_amt'])
df['q1_trans_cnt'] = pd.to_numeric(df['q1_trans_cnt'])
df['q2_trans_cnt'] = pd.to_numeric(df['q2_trans_cnt'])
df['q3_trans_cnt'] = pd.to_numeric(df['q3_trans_cnt'])
df['q4_trans_cnt'] = pd.to_numeric(df['q4_trans_cnt'])
df['SAMPLE_ID'] = | pd.to_numeric(df['SAMPLE_ID']) | pandas.to_numeric |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 14 11:21:38 2018
@author: zdiveki
"""
import pandas as pd
import nltk
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.cross_validation import cross_val_score
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import FunctionTransformer, LabelEncoder
import re
import numpy as np
from sklearn import metrics
from sklearn.svm import SVC
from sklearn.linear_model import LinearRegression
from nltk.corpus import stopwords
from string import punctuation
from functools import reduce
import pdb
filename = '../cleaned_wine_list_with_body.xlsx'
file1 = '../cleaned_majestic_wine_list_with_body.xlsx'
a0=pd.read_excel(filename)
a1 = pd.read_excel(file1)
columns_sel = ['abv', 'colour', 'country', 'description', 'grape_variety', 'name', 'Body']
a_concat = | pd.concat([a0[columns_sel], a1[columns_sel]]) | pandas.concat |
import datetime
import os, sys
import backtrader as bt
import empyrical as emp
import pyfolio as pyf
import numpy as np
import pandas as pd
class Config:
valid_contracts = ["IF00", "IH00", "IC00"]
contract = valid_contracts[0]
data = os.path.abspath("../data.csv")
df = | pd.read_csv(data, index_col='TRADE_DT', parse_dates=True) | pandas.read_csv |
# coding: utf-8
# In[1]:
from __future__ import division, print_function, absolute_import
from past.builtins import basestring
import os
import gzip
import pandas as pd
from twip.constant import DATA_PATH
from gensim.models import TfidfModel, LsiModel
from gensim.corpora import Dictionary
# In[2]:
import matplotlib
from IPython.display import display, HTML
get_ipython().magic(u'matplotlib inline')
np = pd.np
display(HTML("<style>.container { width:100% !important; }</style>"))
pd.set_option('display.max_rows', 6)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 800)
pd.set_option('precision', 2)
get_ipython().magic(u'precision 4')
get_ipython().magic(u'pprint')
# In[3]:
from sklearn.linear_model import SGDRegressor
from sklearn.svm import SVR
# In[6]:
lsi = LsiModel.load(os.path.join(DATA_PATH, 'lsi100'))
lsi2 = LsiModel.load(os.path.join(DATA_PATH, 'lsi2'))
# In[7]:
with gzip.open(os.path.join(DATA_PATH, 'tweet_topic_vectors.csv.gz'), 'rb') as f:
topics = pd.DataFrame.from_csv(f, encoding='utf8')
topics = topics.fillna(0)
# In[8]:
dates = pd.read_csv(os.path.join(DATA_PATH, 'datetimes.csv.gz'), engine='python')
nums = pd.read_csv(os.path.join(DATA_PATH, 'numbers.csv.gz'), engine='python')
# In[9]:
nums.favorite_count.hist(bins=[0,1,2,3,4,5,7,10,15,25,40,100,1000])
from matplotlib import pyplot as plt
plt.yscale('log', nonposy='clip')
plt.xscale('log', nonposy='clip')
plt.xlabel('Number of Favorites')
plt.ylabel('Number of Tweets')
# When I first ran this, my dataframes weren't "aligned".
# So it's very important to check your datasets after every load.
# The correspondence between dates and topics and numerical features is critical for training!
# In[10]:
print(len(dates))
print(len(topics))
print(len(nums))
print(sum(nums.favorite_count >= 1))
# In[11]:
sum(nums.index == dates.index) == len(dates)
# In[12]:
sum(nums.index == topics.index) == len(dates)
# In[13]:
sgd = SGDRegressor()
sgd
# In[14]:
sgd = SGDRegressor().fit(topics.values, nums.favorite_count)
# Well, that was **much** faster...
# In[15]:
predicted_favorites = sgd.predict(topics.values)
predicted_favorites
# In[16]:
np.sum(predicted_favorites >= 1)
# Well that seems more "balanced" at least.
# And it's nice to have a continuous score.
# In[17]:
np.sum(nums.favorite_count.values >= 1)
# In[18]:
from pug.nlp.stats import Confusion
# In[19]:
results = pd.DataFrame()
results['predicted'] = pd.Series(predicted_favorites >= 1)
results['truth'] = pd.Series(nums.favorite_count >= 1)
conf = Confusion(results)
conf
# In[20]:
results.predicted.corr(results.truth)
# Wait, why are we classifying with a regressor anyway?
# In[21]:
pd.Series(predicted_favorites).corr(nums.favorite_count)
# ## Not so hot...
# Balance the training again?
# Get rid of some negatives?
# In[32]:
pos = np.array(nums.favorite_count >= 1)
neg = ~pos
portion_pos = 2 * float(sum(pos)) / len(nums)
mask = ((np.random.binomial(1, portion_pos, size=len(nums)).astype(bool) & neg) | pos)
sgd = SGDRegressor().fit(topics[mask], nums.favorite_count[mask] >= 1)
print(portion_pos)
print(sum(mask))
print(sum(pos) * 2)
print(sum(neg))
len(nums)
# In[33]:
results = | pd.DataFrame() | pandas.DataFrame |
# Finds and scores all framework mutations from input antibody file (csv format). Outputs normalized FR scores.
# Verbose mode prints full pairwise alignment of each antibody.
# output_mutations option creates a csv with all antibody scores
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from Bio import pairwise2
from Bio.pairwise2 import format_alignment
def get_position(pos, germ):
""" Place gaps for IMGT numbering scheme """
try:
return positions[positions[germ] == pos].index[0]
except:
return 0
dict = {
"1-2": "QVQLVQSGAEVKKPGASVKVSCKASGYTFTGYYMHWVRQAPGQGLEWMGRINPNSGGTNYAQKFQGRVTSTRDTSISTAYMELSRLRSDDTVVYYCAR",
"1-3": "QVQLVQSGAEVKKPGASVKVSCKASGYTFTSYAMHWVRQAPGQRLEWMGWINAGNGNTKYSQKFQGRVTITRDTSASTAYMELSSLRSEDTAVYYCAR",
"1-24": "QVQLVQSGAEVKKPGASVKVSCKVSGYTLTELSMHWVRQAPGKGLEWMGGFDPEDGETIYAQKFQGRVTMTEDTSTDTAYMELSSLRSEDTAVYYCAT",
"1-46": "QVQLVQSGAEVKKPGASVKVSCKASGYTFTSYYMHWVRQAPGQGLEWMGIINPSGGSTSYAQKFQGRVTMTRDTSTSTVYMELSSLRSEDTAVYYCAR",
"1-69": "QVQLVQSGAEVKKPGSSVKVSCKASGGTFSSYAISWVRQAPGQGLEWMGGIIPIFGTANYAQKFQGRVTITADESTSTAYMELSSLRSEDTAVYYCAR",
"2-5": "QITLKESGPTLVKPTQTLTLTCTFSGFSLSTSGVGVGWIRQPPGKALEWLALIYWNDDKRYSPSLKSRLTITKDTSKNQVVLTMTNMDPVDTATYYCAHR",
"3-7": "EVQLVESGGGLVQPGGSLRLSCAASGFTFSSYWMSWVRQAPGKGLEWVANIKQDGSEKYYVDSVKGRFTISRDNAKNSLYLQMNSLRAEDTAVYYCAR",
"3-9": "EVQLVESGGGLVQPGRSLRLSCAASGFTFDDYAMHWVRQAPGKGLEWVSGISWNSGSIGYADSVKGRFTISRDNAKNSLYLQMNSLRAEDTALYYCAKD",
"3-20": "EVQLVESGGGVVRPGGSLRLSCAASGFTFDDYGMSWVRQAPGKGLEWVSGINWNGGSTGYADSVKGRFTISRDNAKNSLYLQMNSLRAEDTALYHCAR",
"3-21": "EVQLVESGGGLVKPGGSLRLSCAASGFTFSSYSMNWVRQAPGKGLEWVSSISSSSSYIYYADSVKGRFTISRDNAKNSLYLQMNSLRAEDTAVYYCAR",
"3-23": "EVQLLESGGGLVQPGGSLRLSCAASGFTFSSYAMSWVRQAPGKGLEWVSAISGSGGSTYYADSVKGRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAK",
"3-30": "QVQLVESGGGVVQPGRSLRLSCAASGFTFSSYAMHWVRQAPGKGLEWVAVISYDGSNKYYADSVKGRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR",
"3-33": "QVQLVESGGGVVQPGRSLRLSCAASGFTFSSYGMHWVRQAPGKGLEWVAVIWYDGSNKYYADSVKGRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR",
"3-48": "EVQLVESGGGLVQPGGSLRLSCAASGFTFSSYSMNWVRQAPGKGLEWVSYISSSSSTIYYADSVKGRFTISRDNAKNSLYLQMNSLRAEDTAVYYCAR",
"3-66": "EVQLVESGGGLVQPGGSLRLSCAASGFTVSSNYMSWVRQAPGKGLEWVSVIYSGGSTYYADSVKGRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR",
"3-74": "EVQLVESGGGLVQPGGSLRLSCAASGFTFSSYWMHWVRQAPGKGLVWVSRINSDGSSTSYADSVKGRFTISRDNAKNTLYLQMNSLRAEDTAVYYCAR",
"4-4": "QVQLQESGPGLVKPPGTLSLTCAVSGGSISSSNWWSWVRQPPGKGLEWIGEIYHSGSTNYNPSLKSRVTISVDKSKNQFSLKLSSVTAADTAVYCCAR",
"4-30-4": "QVQLQESGPGLVKPSQTLSLTCTVSGGSISSGDYYWSWIRQPPGKGLEWIGYIYYSGSTYYNPSLKSRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR",
"4-31": "QVQLQESGPGLVKPSQTLSLTCTVSGGSISSGGYYWSWIRQHPGKGLEWIGYIYYSGSTYYNPSLKSLVTISVDTSKNQFSLKLSSVTAADTAVYYCAR",
"4-34": "QVQLQQWGAGLLKPSETLSLTCAVYGGSFSGYYWSWIRQPPGKGLEWIGEINHSGSTNYNPSLKSRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR",
"4-39": "QLQLQESGPGLVKPSETLSLTCTVSGGSISSSSYYWGWIRQPPGKGLEWIGSIYYSGSTYYNPSLKSRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR",
"4-59": "QVQLQESGPGLVKPSETLSLTCTVSGGSISSYYWSWIRQPPGKGLEWIGYIYYSGSTNYNPSLKSRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR",
"4-61": "QVQLQESGPGLVKPSETLSLTCTVSGGSVSSGSYYWSWIRQPPGKGLEWIGYIYYSGSTNYNPSLKSRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR",
"5-51": "EVQLVQSGAEVKKPGESLKISCKGSGYSFTSYWIGWVRQMPGKGLEWMGIIYPGDSDTRYSPSFQGQVTISADKSISTAYLQWSSLKASDTAMYYCAR",
"6-1": "QVQLQQSGPGLVKPSQTLSLTCAISGDSVSSNSAAWNWIRQSPSRGLEWLGRTYYRSKWYNDYAVSVKSRITINPDTSKNQFSLQLNSVTPEDTAVYYCAR"
}
verbose = False #True: output alignment and indinvidual scores, False: plot results
output_csv = True #output csv file of calculated scores
output_mutations = True #output csv file with all antibody scores
#input files
ab_filename = "FDA_Abs.csv" #input antibody file (use "Flagged" in name for phase identification)
norm_filename = "normalization.csv" #input normalization constants
numbering = "IMGT_num.csv" #index to IMGT numbering scheme
#read input files
Abs = pd.read_csv(ab_filename)
norm = | pd.read_csv(norm_filename, index_col=0) | pandas.read_csv |
import unittest
import os
import pandas as pd
from cgnal.core.tests.core import TestCase, logTest
from cgnal.core.logging.defaults import getDefaultLogger
from cgnal.core.data.layer.pandas.databases import Database, Table
from tests import TMP_FOLDER
logger = getDefaultLogger()
db = Database(TMP_FOLDER + "/db")
df1 = | pd.DataFrame([[1, 2, 3], [6, 5, 4]], columns=["a", "b", "c"]) | pandas.DataFrame |
"""Tests for Table Schema integration."""
import json
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
from pandas import DataFrame
from pandas.core.dtypes.dtypes import (
PeriodDtype, CategoricalDtype, DatetimeTZDtype)
from pandas.io.json.table_schema import (
as_json_table_type,
build_table_schema,
make_field,
set_default_names)
class TestBuildSchema(object):
def setup_method(self, method):
self.df = DataFrame(
{'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'c'],
'C': pd.date_range('2016-01-01', freq='d', periods=4),
'D': pd.timedelta_range('1H', periods=4, freq='T'),
},
index=pd.Index(range(4), name='idx'))
def test_build_table_schema(self):
result = | build_table_schema(self.df, version=False) | pandas.io.json.table_schema.build_table_schema |
import numpy as np
import pandas as pd
from bach import Series, DataFrame
from bach.operations.cut import CutOperation, QCutOperation
from sql_models.util import quote_identifier
from tests.functional.bach.test_data_and_utils import assert_equals_data
PD_TESTING_SETTINGS = {
'check_dtype': False,
'check_exact': False,
'atol': 1e-3,
}
def compare_boundaries(expected: pd.Series, result: Series) -> None:
for exp, res in zip(expected.to_numpy(), result.to_numpy()):
if not isinstance(exp, pd.Interval):
assert res is None or np.isnan(res)
continue
np.testing.assert_almost_equal(exp.left, float(res.left), decimal=2)
np.testing.assert_almost_equal(exp.right, float(res.right), decimal=2)
if exp.closed_left:
assert res.closed_left
if exp.closed_right:
assert res.closed_right
def test_cut_operation_pandas(engine) -> None:
p_series = pd.Series(range(100), name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
expected = pd.cut(p_series, bins=10)
result = CutOperation(series=series, bins=10)()
compare_boundaries(expected, result)
expected_wo_right = | pd.cut(p_series, bins=10, right=False) | pandas.cut |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def mysql_url() -> str:
conn = os.environ["MYSQL_URL"]
return conn
def test_mysql_without_partition(mysql_url: str) -> None:
query = "select * from test_table limit 3"
df = read_sql(mysql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_int": pd.Series([1, 2, 3], dtype="Int64"),
"test_float": pd.Series([1.1, 2.2, 3.3], dtype="float64")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_mysql_with_partition(mysql_url: str) -> None:
query = "select * from test_table"
df = read_sql(
mysql_url,
query,
partition_on="test_int",
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": | pd.Series([1, 2, 3, 4, 5, 6], dtype="Int64") | pandas.Series |
import ast
import time
import numpy as np
import pandas as pd
from copy import deepcopy
from typing import Any
from matplotlib import dates as mdates
from scipy import stats
from aistac.components.aistac_commons import DataAnalytics
from ds_discovery.components.transitioning import Transition
from ds_discovery.components.commons import Commons
from aistac.properties.abstract_properties import AbstractPropertyManager
from ds_discovery.components.discovery import DataDiscovery
from ds_discovery.intent.abstract_common_intent import AbstractCommonsIntentModel
__author__ = '<NAME>'
class AbstractBuilderIntentModel(AbstractCommonsIntentModel):
_INTENT_PARAMS = ['self', 'save_intent', 'column_name', 'intent_order',
'replace_intent', 'remove_duplicates', 'seed']
def __init__(self, property_manager: AbstractPropertyManager, default_save_intent: bool=None,
default_intent_level: [str, int, float]=None, default_intent_order: int=None,
default_replace_intent: bool=None):
"""initialisation of the Intent class.
:param property_manager: the property manager class that references the intent contract.
:param default_save_intent: (optional) The default action for saving intent in the property manager
:param default_intent_level: (optional) the default level intent should be saved at
:param default_intent_order: (optional) if the default behaviour for the order should be next available order
:param default_replace_intent: (optional) the default replace existing intent behaviour
"""
default_save_intent = default_save_intent if isinstance(default_save_intent, bool) else True
default_replace_intent = default_replace_intent if isinstance(default_replace_intent, bool) else True
default_intent_level = default_intent_level if isinstance(default_intent_level, (str, int, float)) else 'A'
default_intent_order = default_intent_order if isinstance(default_intent_order, int) else 0
intent_param_exclude = ['size']
intent_type_additions = [np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, pd.Timestamp]
super().__init__(property_manager=property_manager, default_save_intent=default_save_intent,
intent_param_exclude=intent_param_exclude, default_intent_level=default_intent_level,
default_intent_order=default_intent_order, default_replace_intent=default_replace_intent,
intent_type_additions=intent_type_additions)
def run_intent_pipeline(self, canonical: Any=None, intent_levels: [str, int, list]=None, run_book: str=None,
seed: int=None, simulate: bool=None, **kwargs) -> pd.DataFrame:
"""Collectively runs all parameterised intent taken from the property manager against the code base as
defined by the intent_contract. The whole run can be seeded though any parameterised seeding in the intent
contracts will take precedence
:param canonical: a direct or generated pd.DataFrame. see context notes below
:param intent_levels: (optional) a single or list of intent_level to run in order given
:param run_book: (optional) a preset runbook of intent_level to run in order
:param seed: (optional) a seed value that will be applied across the run: default to None
:param simulate: (optional) returns a report of the order of run and return the indexed column order of run
:return: a pandas dataframe
"""
simulate = simulate if isinstance(simulate, bool) else False
col_sim = {"column": [], "order": [], "method": []}
# legacy
if 'size' in kwargs.keys():
canonical = kwargs.pop('size')
canonical = self._get_canonical(canonical)
size = canonical.shape[0] if canonical.shape[0] > 0 else 1000
# test if there is any intent to run
if self._pm.has_intent():
# get the list of levels to run
if isinstance(intent_levels, (str, list)):
column_names = Commons.list_formatter(intent_levels)
elif isinstance(run_book, str) and self._pm.has_run_book(book_name=run_book):
column_names = self._pm.get_run_book(book_name=run_book)
else:
# put all the intent in order of model, get, correlate, associate
_model = []
_get = []
_correlate = []
_frame_start = []
_frame_end = []
for column in self._pm.get_intent().keys():
for order in self._pm.get(self._pm.join(self._pm.KEY.intent_key, column), {}):
for method in self._pm.get(self._pm.join(self._pm.KEY.intent_key, column, order), {}).keys():
if str(method).startswith('get_'):
if column in _correlate + _frame_start + _frame_end:
continue
_get.append(column)
elif str(method).startswith('model_'):
_model.append(column)
elif str(method).startswith('correlate_'):
if column in _get:
_get.remove(column)
_correlate.append(column)
elif str(method).startswith('frame_'):
if column in _get:
_get.remove(column)
if str(method).startswith('frame_starter'):
_frame_start.append(column)
else:
_frame_end.append(column)
column_names = Commons.list_unique(_frame_start + _get + _model + _correlate + _frame_end)
for column in column_names:
level_key = self._pm.join(self._pm.KEY.intent_key, column)
for order in sorted(self._pm.get(level_key, {})):
for method, params in self._pm.get(self._pm.join(level_key, order), {}).items():
try:
if method in self.__dir__():
if simulate:
col_sim['column'].append(column)
col_sim['order'].append(order)
col_sim['method'].append(method)
continue
result = []
params.update(params.pop('kwargs', {}))
if isinstance(seed, int):
params.update({'seed': seed})
_ = params.pop('intent_creator', 'Unknown')
if str(method).startswith('get_'):
result = eval(f"self.{method}(size=size, save_intent=False, **params)",
globals(), locals())
elif str(method).startswith('correlate_'):
result = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
elif str(method).startswith('model_'):
canonical = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
continue
elif str(method).startswith('frame_starter'):
canonical = self._get_canonical(params.pop('canonical', canonical), deep_copy=False)
size = canonical.shape[0]
canonical = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
continue
elif str(method).startswith('frame_'):
canonical = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
continue
if 0 < size != len(result):
raise IndexError(f"The index size of '{column}' is '{len(result)}', "
f"should be {size}")
canonical[column] = result
except ValueError as ve:
raise ValueError(f"intent '{column}', order '{order}', method '{method}' failed with: {ve}")
except TypeError as te:
raise TypeError(f"intent '{column}', order '{order}', method '{method}' failed with: {te}")
if simulate:
return pd.DataFrame.from_dict(col_sim)
return canonical
def _get_number(self, from_value: [int, float]=None, to_value: [int, float]=None, relative_freq: list=None,
precision: int=None, ordered: str=None, at_most: int=None, size: int=None,
seed: int=None) -> list:
""" returns a number in the range from_value to to_value. if only to_value given from_value is zero
:param from_value: (signed) integer to start from
:param to_value: optional, (signed) integer the number sequence goes to but not include
:param relative_freq: a weighting pattern or probability that does not have to add to 1
:param precision: the precision of the returned number. if None then assumes int value else float
:param ordered: order the data ascending 'asc' or descending 'dec', values accepted 'asc' or 'des'
:param at_most: the most times a selection should be chosen
:param size: the size of the sample
:param seed: a seed value for the random function: default to None
"""
if not isinstance(from_value, (int, float)) and not isinstance(to_value, (int, float)):
raise ValueError(f"either a 'range_value' or a 'range_value' and 'to_value' must be provided")
if not isinstance(from_value, (float, int)):
from_value = 0
if not isinstance(to_value, (float, int)):
(from_value, to_value) = (0, from_value)
if to_value <= from_value:
raise ValueError("The number range must be a positive different, found to_value <= from_value")
at_most = 0 if not isinstance(at_most, int) else at_most
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
precision = 3 if not isinstance(precision, int) else precision
if precision == 0:
from_value = int(round(from_value, 0))
to_value = int(round(to_value, 0))
is_int = True if (isinstance(to_value, int) and isinstance(from_value, int)) else False
if is_int:
precision = 0
# build the distribution sizes
if isinstance(relative_freq, list) and len(relative_freq) > 1:
freq_dist_size = self._freq_dist_size(relative_freq=relative_freq, size=size, seed=_seed)
else:
freq_dist_size = [size]
# generate the numbers
rtn_list = []
generator = np.random.default_rng(seed=_seed)
dtype = int if is_int else float
bins = np.linspace(from_value, to_value, len(freq_dist_size) + 1, dtype=dtype)
for idx in np.arange(1, len(bins)):
low = bins[idx - 1]
high = bins[idx]
if low >= high:
continue
elif at_most > 0:
sample = []
for _ in np.arange(at_most, dtype=dtype):
count_size = freq_dist_size[idx - 1] * generator.integers(2, 4, size=1)[0]
sample += list(set(np.linspace(bins[idx - 1], bins[idx], num=count_size, dtype=dtype,
endpoint=False)))
if len(sample) < freq_dist_size[idx - 1]:
raise ValueError(f"The value range has insufficient samples to choose from when using at_most."
f"Try increasing the range of values to sample.")
rtn_list += list(generator.choice(sample, size=freq_dist_size[idx - 1], replace=False))
else:
if dtype == int:
rtn_list += generator.integers(low=low, high=high, size=freq_dist_size[idx - 1]).tolist()
else:
choice = generator.random(size=freq_dist_size[idx - 1], dtype=float)
choice = np.round(choice * (high-low)+low, precision).tolist()
# make sure the precision
choice = [high - 10**(-precision) if x >= high else x for x in choice]
rtn_list += choice
# order or shuffle the return list
if isinstance(ordered, str) and ordered.lower() in ['asc', 'des']:
rtn_list.sort(reverse=True if ordered.lower() == 'asc' else False)
else:
generator.shuffle(rtn_list)
return rtn_list
def _get_category(self, selection: list, relative_freq: list=None, size: int=None, at_most: int=None,
seed: int=None) -> list:
""" returns a category from a list. Of particular not is the at_least parameter that allows you to
control the number of times a selection can be chosen.
:param selection: a list of items to select from
:param relative_freq: a weighting pattern that does not have to add to 1
:param size: an optional size of the return. default to 1
:param at_most: the most times a selection should be chosen
:param seed: a seed value for the random function: default to None
:return: an item or list of items chosen from the list
"""
if not isinstance(selection, list) or len(selection) == 0:
return [None]*size
_seed = self._seed() if seed is None else seed
select_index = self._get_number(len(selection), relative_freq=relative_freq, at_most=at_most, size=size,
seed=_seed)
rtn_list = [selection[i] for i in select_index]
return list(rtn_list)
def _get_datetime(self, start: Any, until: Any, relative_freq: list=None, at_most: int=None, ordered: str=None,
date_format: str=None, as_num: bool=None, ignore_time: bool=None, size: int=None,
seed: int=None, day_first: bool=None, year_first: bool=None) -> list:
""" returns a random date between two date and/or times. weighted patterns can be applied to the overall date
range.
if a signed 'int' type is passed to the start and/or until dates, the inferred date will be the current date
time with the integer being the offset from the current date time in 'days'.
if a dictionary of time delta name values is passed this is treated as a time delta from the start time.
for example if start = 0, until = {days=1, hours=3} the date range will be between now and 1 days and 3 hours
Note: If no patterns are set this will return a linearly random number between the range boundaries.
:param start: the start boundary of the date range can be str, datetime, pd.datetime, pd.Timestamp or int
:param until: up until boundary of the date range can be str, datetime, pd.datetime, pd.Timestamp, pd.delta, int
:param relative_freq: (optional) A pattern across the whole date range.
:param at_most: the most times a selection should be chosen
:param ordered: order the data ascending 'asc' or descending 'dec', values accepted 'asc' or 'des'
:param ignore_time: ignore time elements and only select from Year, Month, Day elements. Default is False
:param date_format: the string format of the date to be returned. if not set then pd.Timestamp returned
:param as_num: returns a list of Matplotlib date values as a float. Default is False
:param size: the size of the sample to return. Default to 1
:param seed: a seed value for the random function: default to None
:param year_first: specifies if to parse with the year first
If True parses dates with the year first, eg 10/11/12 is parsed as 2010-11-12.
If both dayfirst and yearfirst are True, yearfirst is preceded (same as dateutil).
:param day_first: specifies if to parse with the day first
If True, parses dates with the day first, eg %d-%m-%Y.
If False default to the a preferred preference, normally %m-%d-%Y (but not strict)
:return: a date or size of dates in the format given.
"""
# pre check
if start is None or until is None:
raise ValueError("The start or until parameters cannot be of NoneType")
# Code block for intent
as_num = False if not isinstance(as_num, bool) else as_num
ignore_time = False if not isinstance(ignore_time, bool) else ignore_time
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
if isinstance(start, int):
start = (pd.Timestamp.now() + pd.Timedelta(days=start))
if isinstance(until, int):
until = (pd.Timestamp.now() + pd.Timedelta(days=until))
if isinstance(until, dict):
until = (start + pd.Timedelta(**until))
if start == until:
rtn_list = [self._convert_date2value(start, day_first=day_first, year_first=year_first)[0]] * size
else:
_dt_start = self._convert_date2value(start, day_first=day_first, year_first=year_first)[0]
_dt_until = self._convert_date2value(until, day_first=day_first, year_first=year_first)[0]
precision = 15
if ignore_time:
_dt_start = int(_dt_start)
_dt_until = int(_dt_until)
precision = 0
rtn_list = self._get_number(from_value=_dt_start, to_value=_dt_until, relative_freq=relative_freq,
at_most=at_most, ordered=ordered, precision=precision, size=size, seed=seed)
if not as_num:
rtn_list = mdates.num2date(rtn_list)
if isinstance(date_format, str):
rtn_list = pd.Series(rtn_list).dt.strftime(date_format).to_list()
else:
rtn_list = pd.Series(rtn_list).dt.tz_convert(None).to_list()
return rtn_list
def _get_intervals(self, intervals: list, relative_freq: list=None, precision: int=None, size: int=None,
seed: int=None) -> list:
""" returns a number based on a list selection of tuple(lower, upper) interval
:param intervals: a list of unique tuple pairs representing the interval lower and upper boundaries
:param relative_freq: a weighting pattern or probability that does not have to add to 1
:param precision: the precision of the returned number. if None then assumes int value else float
:param size: the size of the sample
:param seed: a seed value for the random function: default to None
:return: a random number
"""
# Code block for intent
size = 1 if size is None else size
if not isinstance(precision, int):
precision = 0 if all(isinstance(v[0], int) and isinstance(v[1], int) for v in intervals) else 3
_seed = self._seed() if seed is None else seed
if not all(isinstance(value, tuple) for value in intervals):
raise ValueError("The intervals list must be a list of tuples")
interval_list = self._get_category(selection=intervals, relative_freq=relative_freq, size=size, seed=_seed)
interval_counts = pd.Series(interval_list, dtype='object').value_counts()
rtn_list = []
for index in interval_counts.index:
size = interval_counts[index]
if size == 0:
continue
if len(index) == 2:
(lower, upper) = index
if index == 0:
closed = 'both'
else:
closed = 'right'
else:
(lower, upper, closed) = index
if lower == upper:
rtn_list += [round(lower, precision)] * size
continue
if precision == 0:
margin = 1
else:
margin = 10**(((-1)*precision)-1)
if str.lower(closed) == 'neither':
lower += margin
upper -= margin
elif str.lower(closed) == 'right':
lower += margin
elif str.lower(closed) == 'both':
upper += margin
# correct adjustments
if lower >= upper:
upper = lower + margin
rtn_list += self._get_number(lower, upper, precision=precision, size=size, seed=_seed)
np.random.default_rng(seed=_seed).shuffle(rtn_list)
return rtn_list
def _get_dist_normal(self, mean: float, std: float, size: int=None, seed: int=None) -> list:
"""A normal (Gaussian) continuous random distribution.
:param mean: The mean (“centre”) of the distribution.
:param std: The standard deviation (jitter or “width”) of the distribution. Must be >= 0
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.normal(loc=mean, scale=std, size=size))
return rtn_list
def _get_dist_logistic(self, mean: float, std: float, size: int=None, seed: int=None) -> list:
"""A logistic continuous random distribution.
:param mean: The mean (“centre”) of the distribution.
:param std: The standard deviation (jitter or “width”) of the distribution. Must be >= 0
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.logistic(loc=mean, scale=std, size=size))
return rtn_list
def _get_dist_exponential(self, scale: [int, float], size: int=None, seed: int=None) -> list:
"""An exponential continuous random distribution.
:param scale: The scale of the distribution.
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.exponential(scale=scale, size=size))
return rtn_list
def _get_dist_gumbel(self, mean: float, std: float, size: int=None, seed: int=None) -> list:
"""An gumbel continuous random distribution.
The Gumbel (or Smallest Extreme Value (SEV) or the Smallest Extreme Value Type I) distribution is one of
a class of Generalized Extreme Value (GEV) distributions used in modeling extreme value problems.
The Gumbel is a special case of the Extreme Value Type I distribution for maximums from distributions
with “exponential-like” tails.
:param mean: The mean (“centre”) of the distribution.
:param std: The standard deviation (jitter or “width”) of the distribution. Must be >= 0
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.gumbel(loc=mean, scale=std, size=size))
return rtn_list
def _get_dist_binomial(self, trials: int, probability: float, size: int=None, seed: int=None) -> list:
"""A binomial discrete random distribution. The Binomial Distribution represents the number of
successes and failures in n independent Bernoulli trials for some given value of n
:param trials: the number of trials to attempt, must be >= 0.
:param probability: the probability distribution, >= 0 and <=1.
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.binomial(n=trials, p=probability, size=size))
return rtn_list
def _get_dist_poisson(self, interval: float, size: int=None, seed: int=None) -> list:
"""A Poisson discrete random distribution.
The Poisson distribution
.. math:: f(k; \lambda)=\frac{\lambda^k e^{-\lambda}}{k!}
For events with an expected separation :math:`\lambda` the Poisson
distribution :math:`f(k; \lambda)` describes the probability of
:math:`k` events occurring within the observed
interval :math:`\lambda`.
Because the output is limited to the range of the C int64 type, a
ValueError is raised when `lam` is within 10 sigma of the maximum
representable value.
:param interval: Expectation of interval, must be >= 0.
:param size: the size of the sample.
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.poisson(lam=interval, size=size))
return rtn_list
def _get_dist_bernoulli(self, probability: float, size: int=None, seed: int=None) -> list:
"""A Bernoulli discrete random distribution using scipy
:param probability: the probability occurrence
:param size: the size of the sample
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
rtn_list = list(stats.bernoulli.rvs(p=probability, size=size, random_state=_seed))
return rtn_list
def _get_dist_bounded_normal(self, mean: float, std: float, lower: float, upper: float, precision: int=None,
size: int=None, seed: int=None) -> list:
"""A bounded normal continuous random distribution.
:param mean: the mean of the distribution
:param std: the standard deviation
:param lower: the lower limit of the distribution
:param upper: the upper limit of the distribution
:param precision: the precision of the returned number. if None then assumes int value else float
:param size: the size of the sample
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
precision = precision if isinstance(precision, int) else 3
_seed = self._seed() if seed is None else seed
rtn_list = stats.truncnorm((lower-mean)/std, (upper-mean)/std, loc=mean, scale=std).rvs(size).round(precision)
return rtn_list
def _get_distribution(self, distribution: str, package: str=None, precision: int=None, size: int=None,
seed: int=None, **kwargs) -> list:
"""returns a number based the distribution type.
:param distribution: The string name of the distribution function from numpy random Generator class
:param package: (optional) The name of the package to use, options are 'numpy' (default) and 'scipy'.
:param precision: (optional) the precision of the returned number
:param size: (optional) the size of the sample
:param seed: (optional) a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
precision = 3 if precision is None else precision
if isinstance(package, str) and package == 'scipy':
rtn_list = eval(f"stats.{distribution}.rvs(size=size, random_state=_seed, **kwargs)", globals(), locals())
else:
generator = np.random.default_rng(seed=_seed)
rtn_list = eval(f"generator.{distribution}(size=size, **kwargs)", globals(), locals())
rtn_list = list(rtn_list.round(precision))
return rtn_list
def _get_selection(self, canonical: Any, column_header: str, relative_freq: list=None, sample_size: int=None,
selection_size: int=None, size: int=None, at_most: bool=None, shuffle: bool=None,
seed: int=None) -> list:
""" returns a random list of values where the selection of those values is taken from a connector source.
:param canonical: a pd.DataFrame as the reference dataframe
:param column_header: the name of the column header to correlate
:param relative_freq: (optional) a weighting pattern of the final selection
:param selection_size: (optional) the selection to take from the sample size, normally used with shuffle
:param sample_size: (optional) the size of the sample to take from the reference file
:param at_most: (optional) the most times a selection should be chosen
:param shuffle: (optional) if the selection should be shuffled before selection. Default is true
:param size: (optional) size of the return. default to 1
:param seed: (optional) a seed value for the random function: default to None
:return: list
The canonical is normally a connector contract str reference or a set of parameter instructions on how to
generate a pd.Dataframe but can be a pd.DataFrame. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
"""
canonical = self._get_canonical(canonical)
_seed = self._seed() if seed is None else seed
if isinstance(canonical, dict):
canonical = pd.DataFrame.from_dict(data=canonical)
if column_header not in canonical.columns:
raise ValueError(f"The column '{column_header}' not found in the canonical")
_values = canonical[column_header].iloc[:sample_size]
if isinstance(selection_size, float) and shuffle:
_values = _values.sample(frac=1, random_state=_seed).reset_index(drop=True)
if isinstance(selection_size, int) and 0 < selection_size < _values.size:
_values = _values.iloc[:selection_size]
return self._get_category(selection=_values.to_list(), relative_freq=relative_freq, size=size, at_most=at_most,
seed=_seed)
def _frame_starter(self, canonical: Any, selection: list=None, headers: [str, list]=None, drop: bool=None,
dtype: [str, list]=None, exclude: bool=None, regex: [str, list]=None, re_ignore_case: bool=None,
rename_map: dict=None, default_size: int=None, seed: int=None) -> pd.DataFrame:
""" Selects rows and/or columns changing the shape of the DatFrame. This is always run last in a pipeline
Rows are filtered before the column filter so columns can be referenced even though they might not be included
the final column list.
:param canonical: a pd.DataFrame as the reference dataframe
:param selection: a list of selections where conditions are filtered on, executed in list order
An example of a selection with the minimum requirements is: (see 'select2dict(...)')
[{'column': 'genre', 'condition': "=='Comedy'"}]
:param headers: a list of headers to drop or filter on type
:param drop: to drop or not drop the headers
:param dtype: the column types to include or exclusive. Default None else int, float, bool, object, 'number'
:param exclude: to exclude or include the dtypes
:param regex: a regular expression to search the headers. example '^((?!_amt).)*$)' excludes '_amt' columns
:param re_ignore_case: true if the regex should ignore case. Default is False
:param rename_map: a from: to dictionary of headers to rename
:param default_size: if the canonical fails return an empty dataframe with the default index size
:param seed: this is a place holder, here for compatibility across methods
:return: pd.DataFrame
The starter is a pd.DataFrame, a pd.Series or list, a connector contract str reference or a set of
parameter instructions on how to generate a pd.Dataframe. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
Selections are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'select2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [inst.select2dict(column='gender', condition="=='M'"),
inst.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed. It also
helps with building the logic that is executed in order
"""
canonical = self._get_canonical(canonical, size=default_size)
# not used but in place form method consistency
_seed = self._seed() if seed is None else seed
if isinstance(selection, list):
selection = deepcopy(selection)
# run the select logic
select_idx = self._selection_index(canonical=canonical, selection=selection)
canonical = canonical.iloc[select_idx].reset_index(drop=True)
drop = drop if isinstance(drop, bool) else False
exclude = exclude if isinstance(exclude, bool) else False
re_ignore_case = re_ignore_case if isinstance(re_ignore_case, bool) else False
rtn_frame = Commons.filter_columns(canonical, headers=headers, drop=drop, dtype=dtype, exclude=exclude,
regex=regex, re_ignore_case=re_ignore_case)
if isinstance(rename_map, dict):
rtn_frame.rename(mapper=rename_map, axis='columns', inplace=True)
return rtn_frame
def _frame_selection(self, canonical: Any, selection: list=None, headers: [str, list]=None,
drop: bool=None, dtype: [str, list]=None, exclude: bool=None, regex: [str, list]=None,
re_ignore_case: bool=None, seed: int=None) -> pd.DataFrame:
""" This method always runs at the start of the pipeline, taking a direct or generated pd.DataFrame,
see context notes below, as the foundation canonical of all subsequent steps of the pipeline.
:param canonical: a direct or generated pd.DataFrame. see context notes below
:param selection: a list of selections where conditions are filtered on, executed in list order
An example of a selection with the minimum requirements is: (see 'select2dict(...)')
[{'column': 'genre', 'condition': "=='Comedy'"}]
:param headers: a list of headers to drop or filter on type
:param drop: to drop or not drop the headers
:param dtype: the column types to include or exclusive. Default None else int, float, bool, object, 'number'
:param exclude: to exclude or include the dtypes
:param regex: a regular expression to search the headers. example '^((?!_amt).)*$)' excludes '_amt' columns
:param re_ignore_case: true if the regex should ignore case. Default is False
:param seed: this is a place holder, here for compatibility across methods
:return: pd.DataFrame
Selections are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'select2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [inst.select2dict(column='gender', condition="=='M'"),
inst.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed. It also
helps with building the logic that is executed in order
"""
return self._frame_starter(canonical=canonical, selection=selection, headers=headers, drop=drop, dtype=dtype,
exclude=exclude, regex=regex, re_ignore_case=re_ignore_case, seed=seed)
def _model_custom(self, canonical: Any, code_str: str, seed: int=None, **kwargs):
""" Commonly used for custom methods, takes code string that when executed changes the the canonical returning
the modified canonical. If the method passes returns a pd.Dataframe this will be returned else the assumption is
the canonical has been changed inplace and thus the modified canonical will be returned
When referencing the canonical in the code_str it should be referenced either by use parameter label 'canonical'
or the short cut '@' symbol. kwargs can also be passed into the code string but must be preceded by a '$' symbol
for example:
assume canonical['gender'] = ['M', 'F', 'U']
code_str ='''
\n@['new_gender'] = [True if x in $value else False for x in @[$header]]
\n@['value'] = [4, 5, 6]
'''
where kwargs are header="'gender'" and value=['M', 'F']
:param canonical: a pd.DataFrame as the reference dataframe
:param code_str: an action on those column values. to reference the canonical use '@'
:param seed: (optional) a seed value for the random function: default to None
:param kwargs: a set of kwargs to include in any executable function
:return: a list (optionally a pd.DataFrame
"""
canonical = self._get_canonical(canonical)
_seed = seed if isinstance(seed, int) else self._seed()
local_kwargs = locals()
for k, v in local_kwargs.pop('kwargs', {}).items():
local_kwargs.update({k: v})
code_str = code_str.replace(f'${k}', str(v))
code_str = code_str.replace('@', 'canonical')
df = exec(code_str, globals(), local_kwargs)
if df is None:
return canonical
return df
def _model_iterator(self, canonical: Any, marker_col: str=None, starting_frame: str=None, selection: list=None,
default_action: dict=None, iteration_actions: dict=None, iter_start: int=None,
iter_stop: int=None, seed: int=None) -> pd.DataFrame:
""" This method allows one to model repeating data subset that has some form of action applied per iteration.
The optional marker column must be included in order to apply actions or apply an iteration marker
An example of use might be a recommender generator where a cohort of unique users need to be selected, for
different recommendation strategies but users can be repeated across recommendation strategy
:param canonical: a pd.DataFrame as the reference dataframe
:param marker_col: (optional) the marker column name for the action outcome. default is to not include
:param starting_frame: (optional) a str referencing an existing connector contract name as the base DataFrame
:param selection: (optional) a list of selections where conditions are filtered on, executed in list order
An example of a selection with the minimum requirements is: (see 'select2dict(...)')
[{'column': 'genre', 'condition': "=='Comedy'"}]
:param default_action: (optional) a default action to take on all iterations. defaults to iteration value
:param iteration_actions: (optional) a dictionary of actions where the key is a specific iteration
:param iter_start: (optional) the start value of the range iteration default is 0
:param iter_stop: (optional) the stop value of the range iteration default is start iteration + 1
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: pd.DataFrame
The starting_frame can be a pd.DataFrame, a pd.Series, int or list, a connector contract str reference or a
set of parameter instructions on how to generate a pd.Dataframe. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
Selections are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'select2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [inst.select2dict(column='gender', condition="=='M'"),
inst.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed. It also
helps with building the logic that is executed in order
Actions are the resulting outcome of the selection (or the default). An action can be just a value or a dict
that executes a intent method such as get_number(). To help build actions there is a helper function called
action2dict(...) that takes a method as a mandatory attribute.
With actions there are special keyword 'method' values:
@header: use a column as the value reference, expects the 'header' key
@constant: use a value constant, expects the key 'value'
@sample: use to get sample values, expected 'name' of the Sample method, optional 'shuffle' boolean
@eval: evaluate a code string, expects the key 'code_str' and any locals() required
An example of a simple action to return a selection from a list:
{'method': 'get_category', selection: ['M', 'F', 'U']}
This same action using the helper method would look like:
inst.action2dict(method='get_category', selection=['M', 'F', 'U'])
an example of using the helper method, in this example we use the keyword @header to get a value from another
column at the same index position:
inst.action2dict(method="@header", header='value')
We can even execute some sort of evaluation at run time:
inst.action2dict(method="@eval", code_str='sum(values)', values=[1,4,2,1])
"""
canonical = self._get_canonical(canonical)
rtn_frame = self._get_canonical(starting_frame)
_seed = self._seed() if seed is None else seed
iter_start = iter_start if isinstance(iter_start, int) else 0
iter_stop = iter_stop if isinstance(iter_stop, int) and iter_stop > iter_start else iter_start + 1
default_action = default_action if isinstance(default_action, dict) else 0
iteration_actions = iteration_actions if isinstance(iteration_actions, dict) else {}
for counter in range(iter_start, iter_stop):
df_count = canonical.copy()
# selection
df_count = self._frame_selection(df_count, selection=selection, seed=_seed)
# actions
if isinstance(marker_col, str):
if counter in iteration_actions.keys():
_action = iteration_actions.get(counter, None)
df_count[marker_col] = self._apply_action(df_count, action=_action, seed=_seed)
else:
default_action = default_action if isinstance(default_action, dict) else counter
df_count[marker_col] = self._apply_action(df_count, action=default_action, seed=_seed)
rtn_frame = pd.concat([rtn_frame, df_count], ignore_index=True)
return rtn_frame
def _model_group(self, canonical: Any, headers: [str, list], group_by: [str, list], aggregator: str=None,
list_choice: int=None, list_max: int=None, drop_group_by: bool=False, seed: int=None,
include_weighting: bool=False, freq_precision: int=None, remove_weighting_zeros: bool=False,
remove_aggregated: bool=False) -> pd.DataFrame:
""" returns the full column values directly from another connector data source. in addition the the
standard groupby aggregators there is also 'list' and 'set' that returns an aggregated list or set.
These can be using in conjunction with 'list_choice' and 'list_size' allows control of the return values.
if list_max is set to 1 then a single value is returned rather than a list of size 1.
:param canonical: a pd.DataFrame as the reference dataframe
:param headers: the column headers to apply the aggregation too
:param group_by: the column headers to group by
:param aggregator: (optional) the aggregator as a function of Pandas DataFrame 'groupby' or 'list' or 'set'
:param list_choice: (optional) used in conjunction with list or set aggregator to return a random n choice
:param list_max: (optional) used in conjunction with list or set aggregator restricts the list to a n size
:param drop_group_by: (optional) drops the group by headers
:param include_weighting: (optional) include a percentage weighting column for each
:param freq_precision: (optional) a precision for the relative_freq values
:param remove_aggregated: (optional) if used in conjunction with the weighting then drops the aggregator column
:param remove_weighting_zeros: (optional) removes zero values
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
"""
canonical = self._get_canonical(canonical)
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
freq_precision = freq_precision if isinstance(freq_precision, int) else 3
aggregator = aggregator if isinstance(aggregator, str) else 'sum'
headers = Commons.list_formatter(headers)
group_by = Commons.list_formatter(group_by)
df_sub = Commons.filter_columns(canonical, headers=headers + group_by).dropna()
if aggregator.startswith('set') or aggregator.startswith('list'):
df_tmp = df_sub.groupby(group_by)[headers[0]].apply(eval(aggregator)).apply(lambda x: list(x))
df_tmp = df_tmp.reset_index()
for idx in range(1, len(headers)):
result = df_sub.groupby(group_by)[headers[idx]].apply(eval(aggregator)).apply(lambda x: list(x))
df_tmp = df_tmp.merge(result, how='left', left_on=group_by, right_index=True)
for idx in range(len(headers)):
header = headers[idx]
if isinstance(list_choice, int):
df_tmp[header] = df_tmp[header].apply(lambda x: generator.choice(x, size=list_choice))
if isinstance(list_max, int):
df_tmp[header] = df_tmp[header].apply(lambda x: x[0] if list_max == 1 else x[:list_max])
df_sub = df_tmp
else:
df_sub = df_sub.groupby(group_by, as_index=False).agg(aggregator)
if include_weighting:
df_sub['sum'] = df_sub.sum(axis=1, numeric_only=True)
total = df_sub['sum'].sum()
df_sub['weighting'] = df_sub['sum'].\
apply(lambda x: round((x / total), freq_precision) if isinstance(x, (int, float)) else 0)
df_sub = df_sub.drop(columns='sum')
if remove_weighting_zeros:
df_sub = df_sub[df_sub['weighting'] > 0]
df_sub = df_sub.sort_values(by='weighting', ascending=False)
if remove_aggregated:
df_sub = df_sub.drop(headers, axis=1)
if drop_group_by:
df_sub = df_sub.drop(columns=group_by, errors='ignore')
return df_sub
def _model_merge(self, canonical: Any, other: Any, left_on: str=None, right_on: str=None,
on: str=None, how: str=None, headers: list=None, suffixes: tuple=None, indicator: bool=None,
validate: str=None, seed: int=None) -> pd.DataFrame:
""" returns the full column values directly from another connector data source. The indicator parameter can be
used to mark the merged items.
:param canonical: a pd.DataFrame as the reference dataframe
:param other: a direct or generated pd.DataFrame. see context notes below
:param left_on: the canonical key column(s) to join on
:param right_on: the merging dataset key column(s) to join on
:param on: if th left and right join have the same header name this can replace left_on and right_on
:param how: (optional) One of 'left', 'right', 'outer', 'inner'. Defaults to inner. See below for more detailed
description of each method.
:param headers: (optional) a filter on the headers included from the right side
:param suffixes: (optional) A tuple of string suffixes to apply to overlapping columns. Defaults ('', '_dup').
:param indicator: (optional) Add a column to the output DataFrame called _merge with information on the source
of each row. _merge is Categorical-type and takes on a value of left_only for observations whose
merge key only appears in 'left' DataFrame or Series, right_only for observations whose merge key
only appears in 'right' DataFrame or Series, and both if the observation’s merge key is found
in both.
:param validate: (optional) validate : string, default None. If specified, checks if merge is of specified type.
“one_to_one” or “1:1”: checks if merge keys are unique in both left and right datasets.
“one_to_many” or “1:m”: checks if merge keys are unique in left dataset.
“many_to_one” or “m:1”: checks if merge keys are unique in right dataset.
“many_to_many” or “m:m”: allowed, but does not result in checks.
:param seed: this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
The other is a pd.DataFrame, a pd.Series, int or list, a connector contract str reference or a set of
parameter instructions on how to generate a pd.Dataframe. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
"""
# Code block for intent
canonical = self._get_canonical(canonical)
other = self._get_canonical(other, size=canonical.shape[0])
_seed = self._seed() if seed is None else seed
how = how if isinstance(how, str) and how in ['left', 'right', 'outer', 'inner'] else 'inner'
indicator = indicator if isinstance(indicator, bool) else False
suffixes = suffixes if isinstance(suffixes, tuple) and len(suffixes) == 2 else ('', '_dup')
# Filter on the columns
if isinstance(headers, list):
headers.append(right_on if isinstance(right_on, str) else on)
other = Commons.filter_columns(other, headers=headers)
df_rtn = pd.merge(left=canonical, right=other, how=how, left_on=left_on, right_on=right_on, on=on,
suffixes=suffixes, indicator=indicator, validate=validate)
return df_rtn
def _model_concat(self, canonical: Any, other: Any, as_rows: bool=None, headers: [str, list]=None,
drop: bool=None, dtype: [str, list]=None, exclude: bool=None, regex: [str, list]=None,
re_ignore_case: bool=None, shuffle: bool=None, seed: int=None) -> pd.DataFrame:
""" returns the full column values directly from another connector data source.
:param canonical: a pd.DataFrame as the reference dataframe
:param other: a direct or generated pd.DataFrame. see context notes below
:param as_rows: (optional) how to concatenate, True adds the connector dataset as rows, False as columns
:param headers: (optional) a filter of headers from the 'other' dataset
:param drop: (optional) to drop or not drop the headers if specified
:param dtype: (optional) a filter on data type for the 'other' dataset. int, float, bool, object
:param exclude: (optional) to exclude or include the data types if specified
:param regex: (optional) a regular expression to search the headers. example '^((?!_amt).)*$)' excludes '_amt'
:param re_ignore_case: (optional) true if the regex should ignore case. Default is False
:param shuffle: (optional) if the rows in the loaded canonical should be shuffled
:param seed: this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
The other is a pd.DataFrame, a pd.Series, int or list, a connector contract str reference or a set of
parameter instructions on how to generate a pd.Dataframe. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
"""
canonical = self._get_canonical(canonical)
other = self._get_canonical(other, size=canonical.shape[0])
_seed = self._seed() if seed is None else seed
shuffle = shuffle if isinstance(shuffle, bool) else False
as_rows = as_rows if isinstance(as_rows, bool) else False
# Filter on the columns
df_rtn = Commons.filter_columns(df=other, headers=headers, drop=drop, dtype=dtype, exclude=exclude,
regex=regex, re_ignore_case=re_ignore_case, copy=False)
if shuffle:
df_rtn.sample(frac=1, random_state=_seed).reset_index(drop=True)
if canonical.shape[0] <= df_rtn.shape[0]:
df_rtn = df_rtn.iloc[:canonical.shape[0]]
axis = 'index' if as_rows else 'columns'
return pd.concat([canonical, df_rtn], axis=axis)
def _model_dict_column(self, canonical: Any, header: str, convert_str: bool=None, replace_null: Any=None,
seed: int=None) -> pd.DataFrame:
""" takes a column that contains dict and expands them into columns. Note, the column must be a flat dictionary.
Complex structures will not work.
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header of the column to be convert
:param convert_str: (optional) if the header has the dict as a string convert to dict using ast.literal_eval()
:param replace_null: (optional) after conversion, replace null values with this value
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: pd.DataFrame
"""
canonical = self._get_canonical(canonical)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
_seed = self._seed() if seed is None else seed
convert_str = convert_str if isinstance(convert_str, bool) else False
# replace NaN with '{}' if the column is strings, otherwise replace with {}
if convert_str:
canonical[header] = canonical[header].fillna('{}').apply(ast.literal_eval)
else:
canonical[header] = canonical[header].fillna({i: {} for i in canonical.index})
# convert the key/values into columns (this is the fasted code)
result = pd.json_normalize(canonical[header])
if isinstance(replace_null, (int, float, str)):
result.replace(np.nan, replace_null, inplace=True)
return canonical.join(result).drop(columns=[header])
def _model_explode(self, canonical: Any, header: str, seed: int=None) -> pd.DataFrame:
""" takes a single column of list values and explodes the DataFrame so row is represented by each elements
in the row list
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header of the column to be exploded
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
The canonical is a pd.DataFrame, a pd.Series or list, a connector contract str reference or a set of
parameter instructions on how to generate a pd.Dataframe. the description of each is:
"""
canonical = self._get_canonical(canonical)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
_seed = self._seed() if seed is None else seed
return canonical.explode(column=header, ignore_index=True)
def _model_sample(self, canonical: Any, sample: Any, columns_list: list=None, exclude_associate: list=None,
auto_transition: bool=None, detail_numeric: bool=None, strict_typing: bool=None,
category_limit: int=None, apply_bias: bool=None, seed: int = None) -> pd.DataFrame:
""" Takes a sample dataset and using analytics, builds a set of synthetic columns that are representative of
the sample but scaled to the size of the canonical
:param canonical:
:param sample:
:param columns_list:
:param exclude_associate:
:param auto_transition:
:param detail_numeric:
:param strict_typing:
:param category_limit:
:param apply_bias:
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
"""
canonical = self._get_canonical(canonical)
sample = self._get_canonical(sample)
auto_transition = auto_transition if isinstance(auto_transition, bool) else True
columns_list = columns_list if isinstance(columns_list, list) else list(sample.columns)
sample = Commons.filter_columns(sample, headers=columns_list)
if auto_transition:
Transition.from_memory().cleaners.auto_transition(sample, inplace=True)
blob = DataDiscovery.analyse_association(sample, columns_list=columns_list, exclude_associate=exclude_associate,
detail_numeric=detail_numeric, strict_typing=strict_typing,
category_limit=category_limit)
return self._model_analysis(canonical=canonical, analytics_blob=blob, apply_bias=apply_bias, seed=seed)
def _model_script(self, canonical: Any, script_contract: str, seed: int = None) -> pd.DataFrame:
"""Takes a synthetic build script and using analytics, builds a set of synthetic columns that are that are
defined by the build script and scaled to the size of the canonical
:param canonical:
:param script_contract:
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
"""
canonical = self._get_canonical(canonical)
script = self._get_canonical(script_contract)
type_options = {'number': '_get_number', 'date': '_get_datetime', 'category': 'get_category',
'selection': 'get_selection', 'intervals': 'get_intervals', 'distribution': 'get_distribution'}
script['params'] = script['params'].replace(['', ' '], np.nan)
script['params'].loc[script['params'].isna()] = '[]'
script['params'] = [ast.literal_eval(x) if isinstance(x, str) and x.startswith('[') and x.endswith(']')
else x for x in script['params']]
# replace all other items with list
script['params'] = [x if isinstance(x, list) else [x] for x in script['params']]
script['params'] = script['params'].astype('object')
for index, row in script.iterrows():
method = type_options.get(row['type'])
params = row['params']
canonical[row['name']] = eval(f"self.{method}(size={canonical.shape[0]}, **params)", globals(), locals())
return canonical
def _model_analysis(self, canonical: Any, analytics_blob: dict, apply_bias: bool=None,
seed: int=None) -> pd.DataFrame:
""" builds a set of columns based on an analysis dictionary of weighting (see analyse_association)
if a reference DataFrame is passed then as the analysis is run if the column already exists the row
value will be taken as the reference to the sub category and not the random value. This allows already
constructed association to be used as reference for a sub category.
:param canonical: a pd.DataFrame as the reference dataframe
:param analytics_blob: the analytics blob from DataDiscovery.analyse_association(...)
:param apply_bias: (optional) if dominant values have been excluded, re-include to maintain bias
:param seed: seed: (optional) a seed value for the random function: default to None
:return: a DataFrame
"""
def get_level(analysis: dict, sample_size: int, _seed: int=None):
_seed = self._seed(seed=_seed, increment=True)
for name, values in analysis.items():
if row_dict.get(name) is None:
row_dict[name] = list()
_analysis = DataAnalytics(analysis=values.get('insight', {}))
result_type = object
if str(_analysis.intent.dtype).startswith('cat'):
result_type = 'category'
result = self._get_category(selection=_analysis.intent.categories,
relative_freq=_analysis.patterns.get('relative_freq', None),
seed=_seed, size=sample_size)
elif str(_analysis.intent.dtype).startswith('num'):
result_type = 'int' if _analysis.params.precision == 0 else 'float'
result = self._get_intervals(intervals=[tuple(x) for x in _analysis.intent.intervals],
relative_freq=_analysis.patterns.get('relative_freq', None),
precision=_analysis.params.get('precision', None),
seed=_seed, size=sample_size)
elif str(_analysis.intent.dtype).startswith('date'):
result_type = 'object' if _analysis.params.is_element('data_format') else 'date'
result = self._get_datetime(start=_analysis.stats.lowest,
until=_analysis.stats.highest,
relative_freq=_analysis.patterns.get('relative_freq', None),
date_format=_analysis.params.get('data_format', None),
day_first=_analysis.params.get('day_first', None),
year_first=_analysis.params.get('year_first', None),
seed=_seed, size=sample_size)
else:
result = []
# if the analysis was done with excluding dominance then se if they should be added back
if apply_bias and _analysis.patterns.is_element('dominant_excluded'):
_dom_percent = _analysis.patterns.dominant_percent/100
_dom_values = _analysis.patterns.dominant_excluded
if len(_dom_values) > 0:
s_values = pd.Series(result, dtype=result_type)
non_zero = s_values[~s_values.isin(_dom_values)].index
choice_size = int((s_values.size * _dom_percent) - (s_values.size - len(non_zero)))
if choice_size > 0:
generator = np.random.default_rng(_seed)
_dom_choice = generator.choice(_dom_values, size=choice_size)
s_values.iloc[generator.choice(non_zero, size=choice_size, replace=False)] = _dom_choice
result = s_values.to_list()
# now add the result to the row_dict
row_dict[name] += result
if sum(_analysis.patterns.relative_freq) == 0:
unit = 0
else:
unit = sample_size / sum(_analysis.patterns.relative_freq)
if values.get('sub_category'):
leaves = values.get('branch', {}).get('leaves', {})
for idx in range(len(leaves)):
section_size = int(round(_analysis.patterns.relative_freq[idx] * unit, 0)) + 1
next_item = values.get('sub_category').get(leaves[idx])
get_level(next_item, section_size, _seed)
return
canonical = self._get_canonical(canonical)
apply_bias = apply_bias if isinstance(apply_bias, bool) else True
row_dict = dict()
seed = self._seed() if seed is None else seed
size = canonical.shape[0]
get_level(analytics_blob, sample_size=size, _seed=seed)
for key in row_dict.keys():
row_dict[key] = row_dict[key][:size]
return pd.concat([canonical, pd.DataFrame.from_dict(data=row_dict)], axis=1)
def _model_encoding(self, canonical: Any, headers: [str, list], encoding: bool=None, ordinal: dict=None,
prefix=None, dtype: Any=None, prefix_sep: str=None, dummy_na: bool=False,
drop_first: bool=False, seed: int=None) -> pd.DataFrame:
""" encodes categorical data types, by default, as dummy encoded but optionally can choose label
encoding
:param canonical: a pd.DataFrame as the reference dataframe
:param headers: the header(s) to apply multi-hot
:param encoding: the type of encoding to apply to the categories, types supported 'dummy', 'ordinal', 'label'
:param ordinal: a dictionary of ordinal encoding. encoding must be 'ordinal', if not mapped then returns null
:param prefix : str, list of str, or dict of str, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
:param prefix_sep : str, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix`.
:param dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
:param drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
:param dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
:param seed: seed: (optional) a seed value for the random function: default to None
:return: a pd.Dataframe
"""
# intend code block on the canonical
canonical = self._get_canonical(canonical)
headers = Commons.list_formatter(headers)
seed = self._seed() if seed is None else seed
encoding = encoding if isinstance(encoding, str) and encoding in ['label', 'ordinal'] else 'dummy'
prefix = prefix if isinstance(prefix, str) else None
prefix_sep = prefix_sep if isinstance(prefix_sep, str) else "_"
dummy_na = dummy_na if isinstance(dummy_na, bool) else False
drop_first = drop_first if isinstance(drop_first, bool) else False
dtype = dtype if dtype else np.uint8
for header in headers:
if canonical[header].dtype.name != 'category':
canonical[header] = canonical[header].astype('category')
if encoding == 'ordinal':
ordinal = ordinal if isinstance(ordinal, dict) else {}
canonical[header] = canonical[header].map(ordinal, na_action=np.nan)
elif encoding == 'label':
canonical[f"{prefix}{prefix_sep}{header}"] = canonical[header].cat.codes
if encoding == 'dummy':
dummy_df = pd.get_dummies(canonical, columns=headers, prefix=prefix, prefix_sep=prefix_sep,
dummy_na=dummy_na, drop_first=drop_first, dtype=dtype)
for name in dummy_df.columns:
canonical[name] = dummy_df[name]
return canonical
def _correlate_selection(self, canonical: Any, selection: list, action: [str, int, float, dict],
default_action: [str, int, float, dict]=None, seed: int=None, rtn_type: str=None):
""" returns a value set based on the selection list and the action enacted on that selection. If
the selection criteria is not fulfilled then the default_action is taken if specified, else null value.
If a DataFrame is not passed, the values column is referenced by the header '_default'
:param canonical: a pd.DataFrame as the reference dataframe
:param selection: a list of selections where conditions are filtered on, executed in list order
An example of a selection with the minimum requirements is: (see 'select2dict(...)')
[{'column': 'genre', 'condition': "=='Comedy'"}]
:param action: a value or dict to act upon if the select is successful. see below for more examples
An example of an action as a dict: (see 'action2dict(...)')
{'method': 'get_category', 'selection': ['M', 'F', 'U']}
:param default_action: (optional) a default action to take if the selection is not fulfilled
:param seed: (optional) a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: value set based on the selection list and the action
Selections are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'select2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [inst.select2dict(column='gender', condition="=='M'"),
inst.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed. It also
helps with building the logic that is executed in order
Actions are the resulting outcome of the selection (or the default). An action can be just a value or a dict
that executes a intent method such as get_number(). To help build actions there is a helper function called
action2dict(...) that takes a method as a mandatory attribute.
With actions there are special keyword 'method' values:
@header: use a column as the value reference, expects the 'header' key
@constant: use a value constant, expects the key 'value'
@sample: use to get sample values, expected 'name' of the Sample method, optional 'shuffle' boolean
@eval: evaluate a code string, expects the key 'code_str' and any locals() required
An example of a simple action to return a selection from a list:
{'method': 'get_category', selection: ['M', 'F', 'U']}
This same action using the helper method would look like:
inst.action2dict(method='get_category', selection=['M', 'F', 'U'])
an example of using the helper method, in this example we use the keyword @header to get a value from another
column at the same index position:
inst.action2dict(method="@header", header='value')
We can even execute some sort of evaluation at run time:
inst.action2dict(method="@eval", code_str='sum(values)', values=[1,4,2,1])
"""
canonical = self._get_canonical(canonical)
if len(canonical) == 0:
raise TypeError("The canonical given is empty")
if not isinstance(selection, list):
raise ValueError("The 'selection' parameter must be a 'list' of 'dict' types")
if not isinstance(action, (str, int, float, dict)) or (isinstance(action, dict) and len(action) == 0):
raise TypeError("The 'action' parameter is not of an accepted format or is empty")
_seed = seed if isinstance(seed, int) else self._seed()
# prep the values to be a DataFrame if it isn't already
action = deepcopy(action)
selection = deepcopy(selection)
# run the logic
select_idx = self._selection_index(canonical=canonical, selection=selection)
if not isinstance(default_action, (str, int, float, dict)):
default_action = None
rtn_values = self._apply_action(canonical, action=default_action, seed=_seed)
# deal with categories
is_category = False
if rtn_values.dtype.name == 'category':
is_category = True
rtn_values = rtn_values.astype('object')
rtn_values.update(self._apply_action(canonical, action=action, select_idx=select_idx, seed=_seed))
if is_category:
rtn_values = rtn_values.astype('category')
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
rtn_values = rtn_values.astype(rtn_type)
return rtn_values
return rtn_values.to_list()
def _correlate_custom(self, canonical: Any, code_str: str, seed: int=None, **kwargs):
""" Commonly used for custom list comprehension, takes code string that when evaluated returns a list of values
When referencing the canonical in the code_str it should be referenced either by use parameter label 'canonical'
or the short cut '@' symbol.
for example:
code_str = "[x + 2 for x in @['A']]" # where 'A' is a header in the canonical
kwargs can also be passed into the code string but must be preceded by a '$' symbol
for example:
code_str = "[True if x == $v1 else False for x in @['A']]" # where 'v1' is a kwargs
:param canonical: a pd.DataFrame as the reference dataframe
:param code_str: an action on those column values. to reference the canonical use '@'
:param seed: (optional) a seed value for the random function: default to None
:param kwargs: a set of kwargs to include in any executable function
:return: a list (optionally a pd.DataFrame
"""
canonical = self._get_canonical(canonical)
_seed = seed if isinstance(seed, int) else self._seed()
local_kwargs = locals()
for k, v in local_kwargs.pop('kwargs', {}).items():
local_kwargs.update({k: v})
code_str = code_str.replace(f'${k}', str(v))
code_str = code_str.replace('@', 'canonical')
rtn_values = eval(code_str, globals(), local_kwargs)
if rtn_values is None:
return [np.nan] * canonical.shape[0]
return rtn_values
def _correlate_aggregate(self, canonical: Any, headers: list, agg: str, seed: int=None, precision: int=None,
rtn_type: str=None):
""" correlate two or more columns with each other through a finite set of aggregation functions. The
aggregation function names are limited to 'sum', 'prod', 'count', 'min', 'max' and 'mean' for numeric columns
and a special 'list' function name to combine the columns as a list
:param canonical: a pd.DataFrame as the reference dataframe
:param headers: a list of headers to correlate
:param agg: the aggregation function name enact. The available functions are:
'sum', 'prod', 'count', 'min', 'max', 'mean' and 'list' which combines the columns as a list
:param precision: the value precision of the return values
:param seed: (optional) a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: a list of equal length to the one passed
"""
canonical = self._get_canonical(canonical)
if not isinstance(headers, list) or len(headers) < 2:
raise ValueError("The headers value must be a list of at least two header str")
if agg not in ['sum', 'prod', 'count', 'min', 'max', 'mean', 'list']:
raise ValueError("The only allowed func values are 'sum', 'prod', 'count', 'min', 'max', 'mean', 'list'")
# Code block for intent
_seed = seed if isinstance(seed, int) else self._seed()
precision = precision if isinstance(precision, int) else 3
if agg == 'list':
return canonical.loc[:, headers].values.tolist()
rtn_values = eval(f"canonical.loc[:, headers].{agg}(axis=1)", globals(), locals()).round(precision)
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
rtn_values = rtn_values.astype(rtn_type)
return rtn_values
return rtn_values.to_list()
def _correlate_choice(self, canonical: Any, header: str, list_size: int=None, random_choice: bool=None,
replace: bool=None, shuffle: bool=None, convert_str: bool=None, seed: int=None,
rtn_type: str=None):
""" correlate a column where the elements of the columns contains a list, and a choice is taken from that list.
if the list_size == 1 then a single value is correlated otherwise a list is correlated
Null values are passed through but all other elements must be a list with at least 1 value in.
if 'random' is true then all returned values will be a random selection from the list and of equal length.
if 'random' is false then each list will not exceed the 'list_size'
Also if 'random' is true and 'replace' is False then all lists must have more elements than the list_size.
By default 'replace' is True and 'shuffle' is False.
In addition 'convert_str' allows lists that have been formatted as a string can be converted from a string
to a list using 'ast.literal_eval(x)'
:param canonical: a pd.DataFrame as the reference dataframe
:param header: The header containing a list to chose from.
:param list_size: (optional) the number of elements to return, if more than 1 then list
:param random_choice: (optional) if the choice should be a random choice.
:param replace: (optional) if the choice selection should be replaced or selected only once
:param shuffle: (optional) if the final list should be shuffled
:param convert_str: if the header has the list as a string convert to list using ast.literal_eval()
:param seed: (optional) a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: a list of equal length to the one passed
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
# Code block for intent
list_size = list_size if isinstance(list_size, int) else 1
random_choice = random_choice if isinstance(random_choice, bool) else False
convert_str = convert_str if isinstance(convert_str, bool) else False
replace = replace if isinstance(replace, bool) else True
shuffle = shuffle if isinstance(shuffle, bool) else False
_seed = seed if isinstance(seed, int) else self._seed()
s_values = canonical[header].copy()
if s_values.empty:
return list()
s_idx = s_values.where(~s_values.isna()).dropna().index
if convert_str:
s_values.iloc[s_idx] = [ast.literal_eval(x) if isinstance(x, str) else x for x in s_values.iloc[s_idx]]
s_values.iloc[s_idx] = Commons.list_formatter(s_values.iloc[s_idx])
generator = np.random.default_rng(seed=_seed)
if random_choice:
try:
s_values.iloc[s_idx] = [generator.choice(x, size=list_size, replace=replace, shuffle=shuffle)
for x in s_values.iloc[s_idx]]
except ValueError:
raise ValueError(f"Unable to make a choice. Ensure {header} has all appropriate values for the method")
s_values.iloc[s_idx] = [x[0] if list_size == 1 else list(x) for x in s_values.iloc[s_idx]]
else:
s_values.iloc[s_idx] = [x[:list_size] if list_size > 1 else x[0] for x in s_values.iloc[s_idx]]
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
s_values = s_values.astype(rtn_type)
return s_values
return s_values.to_list()
def _correlate_join(self, canonical: Any, header: str, action: [str, dict], sep: str=None, seed: int=None,
rtn_type: str=None):
""" correlate a column and join it with the result of the action, This allows for composite values to be
build from. an example might be to take a forename and add the surname with a space separator to create a
composite name field, of to join two primary keys to create a single composite key.
:param canonical: a pd.DataFrame as the reference dataframe
:param header: an ordered list of columns to join
:param action: (optional) a string or a single action whose outcome will be joined to the header value
:param sep: (optional) a separator between the values
:param seed: (optional) a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: a list of equal length to the one passed
Actions are the resulting outcome of the selection (or the default). An action can be just a value or a dict
that executes a intent method such as get_number(). To help build actions there is a helper function called
action2dict(...) that takes a method as a mandatory attribute.
With actions there are special keyword 'method' values:
@header: use a column as the value reference, expects the 'header' key
@constant: use a value constant, expects the key 'value'
@sample: use to get sample values, expected 'name' of the Sample method, optional 'shuffle' boolean
@eval: evaluate a code string, expects the key 'code_str' and any locals() required
An example of a simple action to return a selection from a list:
{'method': 'get_category', selection=['M', 'F', 'U']
an example of using the helper method, in this example we use the keyword @header to get a value from another
column at the same index position:
inst.action2dict(method="@header", header='value')
We can even execute some sort of evaluation at run time:
inst.action2dict(method="@eval", code_str='sum(values)', values=[1,4,2,1])
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(action, (dict, str)):
raise ValueError(f"The action must be a dictionary of a single action or a string value")
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
# Code block for intent
_seed = seed if isinstance(seed, int) else self._seed()
sep = sep if isinstance(sep, str) else ''
s_values = canonical[header].copy()
if s_values.empty:
return list()
action = deepcopy(action)
null_idx = s_values[s_values.isna()].index
s_values.to_string()
result = self._apply_action(canonical, action=action, seed=_seed)
s_values = pd.Series([f"{a}{sep}{b}" for (a, b) in zip(s_values, result)], dtype='object')
if null_idx.size > 0:
s_values.iloc[null_idx] = np.nan
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
s_values = s_values.astype(rtn_type)
return s_values
return s_values.to_list()
def _correlate_sigmoid(self, canonical: Any, header: str, precision: int=None, seed: int=None,
rtn_type: str=None):
""" logistic sigmoid a.k.a logit, takes an array of real numbers and transforms them to a value
between (0,1) and is defined as
f(x) = 1/(1+exp(-x)
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param precision: (optional) how many decimal places. default to 3
:param seed: (optional) the random seed. defaults to current datetime
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: an equal length list of correlated values
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
s_values = canonical[header].copy()
if s_values.empty:
return list()
precision = precision if isinstance(precision, int) else 3
_seed = seed if isinstance(seed, int) else self._seed()
rtn_values = np.round(1 / (1 + np.exp(-s_values)), precision)
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
rtn_values = rtn_values.astype(rtn_type)
return rtn_values
return rtn_values.to_list()
def _correlate_polynomial(self, canonical: Any, header: str, coefficient: list, seed: int=None,
rtn_type: str=None, keep_zero: bool=None) -> list:
""" creates a polynomial using the reference header values and apply the coefficients where the
index of the list represents the degree of the term in reverse order.
e.g [6, -2, 0, 4] => f(x) = 4x**3 - 2x + 6
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param coefficient: the reverse list of term coefficients
:param seed: (optional) the random seed. defaults to current datetime
:param keep_zero: (optional) if True then zeros passed remain zero, Default is False
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: an equal length list of correlated values
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
s_values = canonical[header].copy()
if s_values.empty:
return list()
keep_zero = keep_zero if isinstance(keep_zero, bool) else False
_seed = seed if isinstance(seed, int) else self._seed()
def _calc_polynomial(x, _coefficient):
if keep_zero and x == 0:
return 0
res = 0
for index, coeff in enumerate(_coefficient):
res += coeff * x ** index
return res
rtn_values = s_values.apply(lambda x: _calc_polynomial(x, coefficient))
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
rtn_values = rtn_values.astype(rtn_type)
return rtn_values
return rtn_values.to_list()
def _correlate_missing(self, canonical: Any, header: str, granularity: [int, float]=None,
as_type: str=None, lower: [int, float]=None, upper: [int, float]=None, nulls_list: list=None,
exclude_dominant: bool=None, replace_zero: [int, float]=None, precision: int=None,
day_first: bool=None, year_first: bool=None, seed: int=None,
rtn_type: str=None):
""" imputes missing data with a weighted distribution based on the analysis of the other elements in the
column
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param granularity: (optional) the granularity of the analysis across the range. Default is 5
int passed - represents the number of periods
float passed - the length of each interval
list[tuple] - specific interval periods e.g []
list[float] - the percentile or quantities, All should fall between 0 and 1
:param as_type: (optional) specify the type to analyse
:param lower: (optional) the lower limit of the number value. Default min()
:param upper: (optional) the upper limit of the number value. Default max()
:param nulls_list: (optional) a list of nulls that should be considered null
:param exclude_dominant: (optional) if overly dominant are to be excluded from analysis to avoid bias (numbers)
:param replace_zero: (optional) with categories, a non-zero minimal chance relative frequency to replace zero
This is useful when the relative frequency of a category is so small the analysis returns zero
:param precision: (optional) by default set to 3.
:param day_first: (optional) if the date provided has day first
:param year_first: (optional) if the date provided has year first
:param seed: (optional) the random seed. defaults to current datetime
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return:
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
s_values = canonical[header].copy()
if s_values.empty:
return list()
as_type = as_type if isinstance(as_type, str) else s_values.dtype.name
_seed = seed if isinstance(seed, int) else self._seed()
nulls_list = nulls_list if isinstance(nulls_list, list) else [np.nan, None, 'nan', '', ' ']
if isinstance(nulls_list, list):
s_values.replace(nulls_list, np.nan, inplace=True, regex=True)
null_idx = s_values[s_values.isna()].index
if as_type.startswith('int') or as_type.startswith('float') or as_type.startswith('num'):
_analysis = DataAnalytics(DataDiscovery.analyse_number(s_values, granularity=granularity, lower=lower,
upper=upper, detail_stats=False, precision=precision,
exclude_dominant=exclude_dominant))
s_values.iloc[null_idx] = self._get_intervals(intervals=[tuple(x) for x in _analysis.intent.intervals],
relative_freq=_analysis.patterns.relative_freq,
precision=_analysis.params.precision,
seed=_seed, size=len(null_idx))
elif as_type.startswith('cat'):
_analysis = DataAnalytics(DataDiscovery.analyse_category(s_values, replace_zero=replace_zero))
s_values.iloc[null_idx] = self._get_category(selection=_analysis.intent.categories,
relative_freq=_analysis.patterns.relative_freq,
seed=_seed, size=len(null_idx))
elif as_type.startswith('date'):
_analysis = DataAnalytics(DataDiscovery.analyse_date(s_values, granularity=granularity, lower=lower,
upper=upper, day_first=day_first,
year_first=year_first))
s_values.iloc[null_idx] = self._get_datetime(start=_analysis.intent.lowest,
until=_analysis.intent.highest,
relative_freq=_analysis.patterns.relative_freq,
date_format=_analysis.params.data_format,
day_first=_analysis.params.day_first,
year_first=_analysis.params.year_first,
seed=_seed, size=len(null_idx))
else:
raise ValueError(f"The data type '{as_type}' is not supported. Try using the 'as_type' parameter")
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
s_values = s_values.astype(rtn_type)
return s_values
return s_values.to_list()
def _correlate_numbers(self, canonical: Any, header: str, to_numeric: bool=None, standardize: bool=None,
normalize: tuple=None, offset: [int, float, str]=None, jitter: float=None,
jitter_freq: list=None, precision: int=None, replace_nulls: [int, float]=None,
seed: int=None, keep_zero: bool=None, min_value: [int, float]=None,
max_value: [int, float]=None, rtn_type: str=None):
""" returns a number that correlates to the value given. The jitter is based on a normal distribution
with the correlated value being the mean and the jitter its standard deviation from that mean
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param to_numeric: (optional) ensures numeric type. None convertable strings are set to null
:param standardize: (optional) if the column should be standardised
:param normalize: (optional) normalise the column between two values. the tuple is the lower and upper bounds
:param offset: (optional) a fixed value to offset or if str an operation to perform using @ as the header value.
:param jitter: (optional) a perturbation of the value where the jitter is a std. defaults to 0
:param jitter_freq: (optional) a relative freq with the pattern mid point the mid point of the jitter
:param precision: (optional) how many decimal places. default to 3
:param replace_nulls: (optional) a numeric value to replace nulls
:param seed: (optional) the random seed. defaults to current datetime
:param keep_zero: (optional) if True then zeros passed remain zero, Default is False
:param min_value: a minimum value not to go below
:param max_value: a max value not to go above
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: an equal length list of correlated values
The offset can be a numeric offset that is added to the value, e.g. passing 2 will add 2 to all values.
If a string is passed if format should be a calculation with the '@' character used to represent the column
value. e.g.
'1-@' would subtract the column value from 1,
'@*0.5' would multiply the column value by 0.5
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
s_values = canonical[header].copy()
if s_values.empty:
return list()
if isinstance(to_numeric, bool) and to_numeric:
s_values = pd.to_numeric(s_values.apply(str).str.replace('[$£€, ]', '', regex=True), errors='coerce')
if not (s_values.dtype.name.startswith('int') or s_values.dtype.name.startswith('float')):
raise ValueError(f"The header column is of type '{s_values.dtype.name}' and not numeric. "
f"Use the 'to_numeric' parameter if appropriate")
keep_zero = keep_zero if isinstance(keep_zero, bool) else False
precision = precision if isinstance(precision, int) else 3
_seed = seed if isinstance(seed, int) else self._seed()
if isinstance(replace_nulls, (int, float)):
s_values[s_values.isna()] = replace_nulls
null_idx = s_values[s_values.isna()].index
zero_idx = s_values.where(s_values == 0).dropna().index if keep_zero else []
if isinstance(offset, (int, float)) and offset != 0:
s_values = s_values.add(offset)
elif isinstance(offset, str):
offset = offset.replace("@", 'x')
s_values = s_values.apply(lambda x: eval(offset))
if isinstance(jitter, (int, float)) and jitter != 0:
sample = self._get_number(-abs(jitter) / 2, abs(jitter) / 2, relative_freq=jitter_freq,
size=s_values.size, seed=_seed)
s_values = s_values.add(sample)
if isinstance(min_value, (int, float)):
if min_value < s_values.max():
min_idx = s_values.dropna().where(s_values < min_value).dropna().index
s_values.iloc[min_idx] = min_value
else:
raise ValueError(f"The min value {min_value} is greater than the max result value {s_values.max()}")
if isinstance(max_value, (int, float)):
if max_value > s_values.min():
max_idx = s_values.dropna().where(s_values > max_value).dropna().index
s_values.iloc[max_idx] = max_value
else:
raise ValueError(f"The max value {max_value} is less than the min result value {s_values.min()}")
if isinstance(standardize, bool) and standardize:
s_values = pd.Series(Commons.list_standardize(s_values.to_list()))
if isinstance(normalize, tuple):
if normalize[0] >= normalize[1] or len(normalize) != 2:
raise ValueError("The normalize tuple must be of size 2 with the first value lower than the second")
s_values = pd.Series(Commons.list_normalize(s_values.to_list(), normalize[0], normalize[1]))
# reset the zero values if any
s_values.iloc[zero_idx] = 0
s_values = s_values.round(precision)
if precision == 0 and not s_values.isnull().any():
s_values = s_values.astype(int)
if null_idx.size > 0:
s_values.iloc[null_idx] = np.nan
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
s_values = s_values.astype(rtn_type)
return s_values
return s_values.to_list()
def _correlate_categories(self, canonical: Any, header: str, correlations: list, actions: dict,
default_action: [str, int, float, dict]=None, seed: int=None, rtn_type: str=None):
""" correlation of a set of values to an action, the correlations must map to the dictionary index values.
Note. to use the current value in the passed values as a parameter value pass an empty dict {} as the keys
value. If you want the action value to be the current value of the passed value then again pass an empty dict
action to be the current value
simple correlation list:
['A', 'B', 'C'] # if values is 'A' then action is 0 and so on
multiple choice correlation:
[['A','B'], 'C'] # if values is 'A' OR 'B' then action is 0 and so on
For more complex correlation the selection logic can be used, see notes below.
for actions also see notes below.
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param correlations: a list of categories (can also contain lists for multiple correlations.
:param actions: the correlated set of categories that should map to the index
:param default_action: (optional) a default action to take if the selection is not fulfilled
:param seed: a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: a list of equal length to the one passed
Selections are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'select2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [inst.select2dict(column='gender', condition="=='M'"),
inst.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed. It also
helps with building the logic that is executed in order
Actions are the resulting outcome of the selection (or the default). An action can be just a value or a dict
that executes a intent method such as get_number(). To help build actions there is a helper function called
action2dict(...) that takes a method as a mandatory attribute.
With actions there are special keyword 'method' values:
@header: use a column as the value reference, expects the 'header' key
@constant: use a value constant, expects the key 'value'
@sample: use to get sample values, expected 'name' of the Sample method, optional 'shuffle' boolean
@eval: evaluate a code string, expects the key 'code_str' and any locals() required
An example of a simple action to return a selection from a list:
{'method': 'get_category', selection: ['M', 'F', 'U']}
This same action using the helper method would look like:
inst.action2dict(method='get_category', selection=['M', 'F', 'U'])
an example of using the helper method, in this example we use the keyword @header to get a value from another
column at the same index position:
inst.action2dict(method="@header", header='value')
We can even execute some sort of evaluation at run time:
inst.action2dict(method="@eval", code_str='sum(values)', values=[1,4,2,1])
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
_seed = seed if isinstance(seed, int) else self._seed()
actions = deepcopy(actions)
correlations = deepcopy(correlations)
corr_list = []
for corr in correlations:
corr_list.append(Commons.list_formatter(corr))
if not isinstance(default_action, (str, int, float, dict)):
default_action = None
rtn_values = self._apply_action(canonical, action=default_action, seed=_seed)
# deal with categories
if rtn_values.dtype.name == 'category':
rtn_values = rtn_values.astype('object')
s_values = canonical[header].copy().astype(str)
for i in range(len(corr_list)):
action = actions.get(i, actions.get(str(i), -1))
if action == -1:
continue
if isinstance(corr_list[i][0], dict):
corr_idx = self._selection_index(canonical, selection=corr_list[i])
else:
corr_idx = s_values[s_values.isin(map(str, corr_list[i]))].index
rtn_values.update(self._apply_action(canonical, action=action, select_idx=corr_idx, seed=_seed))
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
rtn_values = rtn_values.astype(rtn_type)
return rtn_values
return rtn_values.to_list()
def _correlate_dates(self, canonical: Any, header: str, offset: [int, dict]=None, jitter: int=None,
jitter_units: str=None, jitter_freq: list=None, now_delta: str=None, date_format: str=None,
min_date: str=None, max_date: str=None, fill_nulls: bool=None, day_first: bool=None,
year_first: bool=None, seed: int=None, rtn_type: str=None):
""" correlates dates to an existing date or list of dates. The return is a list of pd
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param offset: (optional) and offset to the date. if int then assumed a 'days' offset
int or dictionary associated with pd. eg {'days': 1}
:param jitter: (optional) the random jitter or deviation in days
:param jitter_units: (optional) the units of the jitter, Options: 'W', 'D', 'h', 'm', 's'. default 'D'
:param jitter_freq: (optional) a relative freq with the pattern mid point the mid point of the jitter
:param now_delta: (optional) returns a delta from now as an int list, Options: 'Y', 'M', 'W', 'D', 'h', 'm', 's'
:param min_date: (optional)a minimum date not to go below
:param max_date: (optional)a max date not to go above
:param fill_nulls: (optional) if no date values should remain untouched or filled based on the list mode date
:param day_first: (optional) if the dates given are day first format. Default to True
:param year_first: (optional) if the dates given are year first. Default to False
:param date_format: (optional) the format of the output
:param seed: (optional) a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: a list of equal size to that given
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
values = canonical[header].copy()
if values.empty:
return list()
def _clean(control):
_unit_type = ['years', 'months', 'weeks', 'days', 'leapdays', 'hours', 'minutes', 'seconds']
_params = {}
if isinstance(control, int):
control = {'days': control}
if isinstance(control, dict):
for k, v in control.items():
if k not in _unit_type:
raise ValueError(f"The key '{k}' in 'offset', is not a recognised unit type for pd.DateOffset")
return control
_seed = self._seed() if seed is None else seed
fill_nulls = False if fill_nulls is None or not isinstance(fill_nulls, bool) else fill_nulls
offset = _clean(offset) if isinstance(offset, (dict, int)) else None
if isinstance(now_delta, str) and now_delta not in ['Y', 'M', 'W', 'D', 'h', 'm', 's']:
raise ValueError(f"the now_delta offset unit '{now_delta}' is not recognised "
f"use of of ['Y', 'M', 'W', 'D', 'h', 'm', 's']")
units_allowed = ['W', 'D', 'h', 'm', 's']
jitter_units = jitter_units if isinstance(jitter_units, str) and jitter_units in units_allowed else 'D'
jitter = pd.Timedelta(value=jitter, unit=jitter_units) if isinstance(jitter, int) else None
# set minimum date
_min_date = pd.to_datetime(min_date, errors='coerce', infer_datetime_format=True, utc=True)
if _min_date is None or _min_date is pd.NaT:
_min_date = pd.to_datetime(pd.Timestamp.min, utc=True)
# set max date
_max_date = pd.to_datetime(max_date, errors='coerce', infer_datetime_format=True, utc=True)
if _max_date is None or _max_date is pd.NaT:
_max_date = pd.to_datetime(pd.Timestamp.max, utc=True)
if _min_date >= _max_date:
raise ValueError(f"the min_date {min_date} must be less than max_date {max_date}")
# convert values into datetime
s_values = pd.Series(pd.to_datetime(values.copy(), errors='coerce', infer_datetime_format=True,
dayfirst=day_first, yearfirst=year_first, utc=True))
if jitter is not None:
if jitter_units in ['W', 'D']:
value = jitter.days
zip_units = 'D'
else:
value = int(jitter.to_timedelta64().astype(int) / 1000000000)
zip_units = 's'
zip_spread = self._get_number(-abs(value) / 2, (abs(value + 1) / 2), relative_freq=jitter_freq,
precision=0, size=s_values.size, seed=_seed)
zipped_dt = list(zip(zip_spread, [zip_units]*s_values.size))
s_values += np.array([pd.Timedelta(x, y).to_timedelta64() for x, y in zipped_dt])
if fill_nulls:
generator = np.random.default_rng(seed=_seed)
s_values = s_values.fillna(generator.choice(s_values.mode()))
null_idx = s_values[s_values.isna()].index
if isinstance(offset, dict) and offset:
s_values = s_values.add(pd.DateOffset(**offset))
if _min_date > pd.to_datetime(pd.Timestamp.min, utc=True):
if _min_date > s_values.min():
min_idx = s_values.dropna().where(s_values < _min_date).dropna().index
s_values.iloc[min_idx] = _min_date
else:
raise ValueError(f"The min value {min_date} is greater than the max result value {s_values.max()}")
if _max_date < pd.to_datetime(pd.Timestamp.max, utc=True):
if _max_date < s_values.max():
max_idx = s_values.dropna().where(s_values > _max_date).dropna().index
s_values.iloc[max_idx] = _max_date
else:
raise ValueError(f"The max value {max_date} is less than the min result value {s_values.min()}")
if now_delta:
s_values = (s_values.dt.tz_convert(None) - pd.Timestamp('now')).abs()
s_values = (s_values / np.timedelta64(1, now_delta))
s_values = s_values.round(0) if null_idx.size > 0 else s_values.astype(int)
else:
if isinstance(date_format, str):
s_values = s_values.dt.strftime(date_format)
else:
s_values = s_values.dt.tz_convert(None)
if null_idx.size > 0:
s_values.iloc[null_idx].apply(lambda x: np.nan)
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
s_values = s_values.astype(rtn_type)
return s_values
return s_values.to_list()
def _correlate_discrete(self, canonical: Any, header: str, granularity: [int, float, list]=None,
lower: [int, float]=None, upper: [int, float]=None, categories: list=None,
precision: int=None, seed: int=None) -> list:
""" converts continuous representation into discrete representation through interval categorisation
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param granularity: (optional) the granularity of the analysis across the range. Default is 3
int passed - represents the number of periods
float passed - the length of each interval
list[tuple] - specific interval periods e.g []
list[float] - the percentile or quantities, All should fall between 0 and 1
:param lower: (optional) the lower limit of the number value. Default min()
:param upper: (optional) the upper limit of the number value. Default max()
:param precision: (optional) The precision of the range and boundary values. by default set to 5.
:param categories:(optional) a set of labels the same length as the intervals to name the categories
:return: a list of equal size to that given
"""
# exceptions check
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
_seed = seed if isinstance(seed, int) else self._seed()
# intend code block on the canonical
granularity = 3 if not isinstance(granularity, (int, float, list)) or granularity == 0 else granularity
precision = precision if isinstance(precision, int) else 5
# firstly get the granularity
lower = canonical[header].min() if not isinstance(lower, (int, float)) else lower
upper = canonical[header].max() if not isinstance(upper, (int, float)) else upper
if lower >= upper:
upper = lower
granularity = [(lower, upper, 'both')]
if isinstance(granularity, (int, float)):
# if granularity float then convert frequency to intervals
if isinstance(granularity, float):
# make sure frequency goes beyond the upper
_end = upper + granularity - (upper % granularity)
periods = pd.interval_range(start=lower, end=_end, freq=granularity).drop_duplicates()
periods = periods.to_tuples().to_list()
granularity = []
while len(periods) > 0:
period = periods.pop(0)
if len(periods) == 0:
granularity += [(period[0], period[1], 'both')]
else:
granularity += [(period[0], period[1], 'left')]
# if granularity int then convert periods to intervals
else:
periods = pd.interval_range(start=lower, end=upper, periods=granularity).drop_duplicates()
granularity = periods.to_tuples().to_list()
if isinstance(granularity, list):
if all(isinstance(value, tuple) for value in granularity):
if len(granularity[0]) == 2:
granularity[0] = (granularity[0][0], granularity[0][1], 'both')
granularity = [(t[0], t[1], 'right') if len(t) == 2 else t for t in granularity]
elif all(isinstance(value, float) and 0 < value < 1 for value in granularity):
quantiles = list(set(granularity + [0, 1.0]))
boundaries = canonical[header].quantile(quantiles).values
boundaries.sort()
granularity = [(boundaries[0], boundaries[1], 'both')]
granularity += [(boundaries[i - 1], boundaries[i], 'right') for i in range(2, boundaries.size)]
else:
granularity = (lower, upper, 'both')
granularity = [(np.round(p[0], precision), np.round(p[1], precision), p[2]) for p in granularity]
# now create the categories
conditions = []
for interval in granularity:
lower, upper, closed = interval
if str.lower(closed) == 'neither':
conditions.append((canonical[header] > lower) & (canonical[header] < upper))
elif str.lower(closed) == 'right':
conditions.append((canonical[header] > lower) & (canonical[header] <= upper))
elif str.lower(closed) == 'both':
conditions.append((canonical[header] >= lower) & (canonical[header] <= upper))
else:
conditions.append((canonical[header] >= lower) & (canonical[header] < upper))
if isinstance(categories, list) and len(categories) == len(conditions):
choices = categories
else:
if canonical[header].dtype.name.startswith('int'):
choices = [f"{int(i[0])}->{int(i[1])}" for i in granularity]
else:
choices = [f"{i[0]}->{i[1]}" for i in granularity]
# noinspection PyTypeChecker
return np.select(conditions, choices, default="<NA>").tolist()
"""
UTILITY METHODS SECTION
"""
@staticmethod
def _convert_date2value(dates: Any, day_first: bool = True, year_first: bool = False):
values = pd.to_datetime(dates, errors='coerce', infer_datetime_format=True, dayfirst=day_first,
yearfirst=year_first)
return mdates.date2num(pd.Series(values)).tolist()
@staticmethod
def _convert_value2date(values: Any, date_format: str=None):
dates = []
for date in mdates.num2date(values):
date = | pd.Timestamp(date) | pandas.Timestamp |
try:
# Error handling if something happens during script initialisation
from csv import QUOTE_ALL # Needed to export data to CSV
from bs4 import BeautifulSoup # Needed to parse the dynamic webpage of the Ducanator
from requests import get # Needed to get the webpage of the Ducanator
from re import search # Needed to find the json string to import into pandas
from pandas import read_csv, set_option, merge, to_numeric, DataFrame, read_json, read_html, ExcelWriter # Needed to convert the json string into a usable dataframe object for manipulation
from traceback import format_exc # Needed for more friendly error messages.
from openpyxl import load_workbook
from numpy import arange
from json import loads, dumps
from re import compile
from time import sleep, time
from os import path
except ModuleNotFoundError:
print('OOPSIE WOOPSIE!! Uwu We made a fucky wucky!! A wittle fucko boingo! The code monkeys at our headquarters are working VEWY HAWD to fix this!')
print('You didn\'t install the packages like I told you to. Please run \"pip install bs4 requests pandas\" in a cmd window to install the required packages!')
print('\033[1;31m' + format_exc())
exit(1)
def get_items(retry_attempts):
url_items = 'https://api.warframe.market/v1/items'
for x in range(0, retry_attempts):
try:
item_json = get(url_items).json()
break
except Exception:
print('Item data download failed, retrying... ' + str(retry_attempts - x - 1) + ' attempts left...', end='\r')
item_json = item_json['payload']['items']
df_items_get_items = DataFrame(item_json)
df_items_get_items['item_name'] = df_items_get_items['item_name'].replace(to_replace=r' \(.+\)', value='', regex=True)
df_items_get_items = df_items_get_items.drop(columns=['thumb'])
df_items_get_items.to_csv(csv_name, index=None, quoting=QUOTE_ALL)
return df_items_get_items
def standing_to_plat_syndicates(url_syndicate_fragment, df_items_local, collapsible_regex, retry_attempts, res_per_syndicate, include_offline, order_type):
try:
print('Processing ' + url_syndicate_fragment.replace('_', ' ') + ' Warframe.Market Orders')
workbook_name = 'StandingToPlat.xlsx'
sheet_name_standing_to_plat_data = 'Standing To Plat Data'
url_syndicate = 'https://warframe.fandom.com/wiki/' + url_syndicate_fragment
soup = BeautifulSoup(get(url_syndicate).content, "html.parser").find('div', {'id': collapsible_regex}).find_all('span')
item_list = []
cost_list = []
for count1, elem1 in enumerate(soup):
if count1 % 2 == 0:
cost_list.append(elem1.text)
else:
item_list.append(elem1.text)
df_syndicate_item = | DataFrame(item_list) | pandas.DataFrame |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DataAdapter tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import data_adapter
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class DummyArrayLike(object):
"""Dummy array-like object."""
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, key):
return self.data[key]
@property
def shape(self):
return self.data.shape
@property
def dtype(self):
return self.data.dtype
def fail_on_convert(x, **kwargs):
_ = x
_ = kwargs
raise TypeError('Cannot convert DummyArrayLike to a tensor')
ops.register_tensor_conversion_function(DummyArrayLike, fail_on_convert)
class DataAdapterTestBase(keras_parameterized.TestCase):
def setUp(self):
super(DataAdapterTestBase, self).setUp()
self.batch_size = 5
self.numpy_input = np.zeros((50, 10))
self.numpy_target = np.ones(50)
self.tensor_input = constant_op.constant(2.0, shape=(50, 10))
self.tensor_target = array_ops.ones((50,))
self.arraylike_input = DummyArrayLike(self.numpy_input)
self.arraylike_target = DummyArrayLike(self.numpy_target)
self.dataset_input = dataset_ops.DatasetV2.from_tensor_slices(
(self.numpy_input, self.numpy_target)).shuffle(50).batch(
self.batch_size)
def generator():
while True:
yield (np.zeros((self.batch_size, 10)), np.ones(self.batch_size))
self.generator_input = generator()
self.iterator_input = data_utils.threadsafe_generator(generator)()
self.sequence_input = TestSequence(batch_size=self.batch_size,
feature_shape=10)
self.model = keras.models.Sequential(
[keras.layers.Dense(8, input_shape=(10,), activation='softmax')])
class TestSequence(data_utils.Sequence):
def __init__(self, batch_size, feature_shape):
self.batch_size = batch_size
self.feature_shape = feature_shape
def __getitem__(self, item):
return (np.zeros((self.batch_size, self.feature_shape)),
np.ones((self.batch_size,)))
def __len__(self):
return 10
class TensorLikeDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super(TensorLikeDataAdapterTest, self).setUp()
self.adapter_cls = data_adapter.TensorLikeDataAdapter
def test_can_handle_numpy(self):
self.assertTrue(self.adapter_cls.can_handle(self.numpy_input))
self.assertTrue(
self.adapter_cls.can_handle(self.numpy_input, self.numpy_target))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
def test_size_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
def test_batch_size_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5)
self.assertEqual(adapter.batch_size(), 5)
def test_partial_batch_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=4)
self.assertEqual(adapter.get_size(), 13) # 50/4
self.assertTrue(adapter.has_partial_batch())
self.assertEqual(adapter.partial_batch_size(), 2)
def test_epochs(self):
num_epochs = 3
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5, epochs=num_epochs)
ds_iter = iter(adapter.get_dataset())
num_batches_per_epoch = self.numpy_input.shape[0] // 5
for _ in range(num_batches_per_epoch * num_epochs):
next(ds_iter)
with self.assertRaises(StopIteration):
next(ds_iter)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_numpy(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.numpy_input, self.numpy_target, batch_size=5)
def test_can_handle_pandas(self):
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
self.skipTest('Skipping test because pandas is not installed.')
self.assertTrue(self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input)))
self.assertTrue(
self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input)[0]))
self.assertTrue(
self.adapter_cls.can_handle(
pd.DataFrame(self.numpy_input),
pd.DataFrame(self.numpy_input)[0]))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_pandas(self):
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
self.skipTest('Skipping test because pandas is not installed.')
input_a = keras.Input(shape=(3,), name='input_a')
input_b = keras.Input(shape=(3,), name='input_b')
input_c = keras.Input(shape=(1,), name='input_b')
x = keras.layers.Dense(4, name='dense_1')(input_a)
y = keras.layers.Dense(3, name='dense_2')(input_b)
z = keras.layers.Dense(1, name='dense_3')(input_c)
model_1 = keras.Model(inputs=input_a, outputs=x)
model_2 = keras.Model(inputs=[input_a, input_b], outputs=[x, y])
model_3 = keras.Model(inputs=input_c, outputs=z)
model_1.compile(optimizer='rmsprop', loss='mse')
model_2.compile(optimizer='rmsprop', loss='mse')
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
input_a_df = | pd.DataFrame(input_a_np) | pandas.DataFrame |
#! /usr/bin/env python3
###############################################################################
import sys
import os
import argparse
import logging
from datetime import date
import time
import requests
import textwrap
import pandas as pd
import pprint
from lib import utils as ut
###############################################################################
program_name = os.path.basename(__file__)
VERSION = 0.2
logger = logging.getLogger(__name__)
###############################################################################
class ArgumentParser(argparse.ArgumentParser):
def error(self, message):
print('\n\033[1;33mError: {}\x1b[0m\n'.format(message))
print("invoke \033[1m{} -h\033[0m for help\n".format(os.path.basename(__file__)))
self.print_help(sys.stderr)
# self.exit(2, '%s: error: %s\n' % (self.prog, message))
self.exit(2)
# def format_usage(self):
# usage = super()
# return "CUSTOM"+usage
###############################################################################
# Subroutines
# ------------------------------------------------------------------------------
def parser_add_arguments():
"""
Parse comamnd line parameters
"""
date_now = date.today().strftime('%Y%m%d')
parser = ArgumentParser(
prog=program_name,
description=textwrap.dedent('''\
Utility for interacting with the batch requests API of internet.nl
The following commands are supported:
\033[1m sub\033[0m - submit a new batch request
\033[1m list\033[0m - list all or some of the batch requests
\033[1m stat\033[0m - get the status of a specific request
\033[1m get\033[0m - retrieve the results of a request
\033[1m del\033[0m - delete a request
'''),
epilog=textwrap.dedent('''\
\033[1mConfiguration\033[0m
The configuration (endpoint and credentials) are taken from the first section of the
\033[3mbatch-request.conf\033[0m configuration file. You can specify other sections to use with
the \033[3m-p\033[0m option. If username or password are missing then you are prompted for them.
If the endpoint is missing then the default internet.nl endpoint is used.
If there is no configuration file then the default internet.nl endpoint is used
and you are prompted for a username and password.
'''),
# \033[1mSome examples:\033[0m
#
# \033[3m./%(prog)s\033[0m
# list the details of all the measurement batches
#
# \033[3m./%(prog)s list 4 \033[0m
# list the details of the last four measurement batches
#
# \033[3m./%(prog)s list 4 -p \033[0m
# same, but prompts you for username and password
#
# \033[3m./%(prog)s list 4 -p dev\033[0m
# same, takes configuration from \033[3m[dev]\033[0m section in \033[3mbatch-request.conf\033[0m
#
# \033[3m./%(prog)s stat 02e19b69317a4aa2958980312754de52\033[0m
# get the status of batch 02e19b69317a4aa2958980312754de52
#
# \033[3m./%(prog)s get 02e19b69317a4aa2958980312754de52\033[0m
# get the json results of batch 02e19b69317a4aa2958980312754de52
formatter_class=argparse.RawTextHelpFormatter, )
parser.add_argument("command",
help=textwrap.dedent('''\
the request to execute
'''),
action="store",
# nargs="?",
choices=["sub", "list", "stat", "get", "del", ],
# default="list")
)
parser.add_argument('parameter', nargs='?', metavar='parameter',
help=textwrap.dedent('''\
extra parameter for the request, type and meaning depend on the request:
\033[1msub\033[0m (required): \033[1m{web|mail}\033[0m the type of measurement to submit
\033[1mlist\033[0m (optional): the number of items to list (default: 0 = all)
\033[1mstat,get,del\033[0m (required): the request_id for the \033[1mstat,get or del\033[0m request
'''))
parser.add_argument("-d",
metavar='FILE',
help=textwrap.dedent('''\
the domains xlsx file to use for the sub request (mandatory)
the domains file is used to get all the domains to be tested
the database for each processed domain. This is typically the same file as used
for submitting domains to internet.nl, containing a \033[3mweb\033[0m column with the domains
for a website test and a \033[3mmail\033[0m column with the domains for a mail test.
metadata will be retrieved from the column with the name \033[3mtype\033[0m, unless another
name or names are provided with the \033[3m-m\033[0m argument
web, mail and the metadata column(s) need to aligned, e.g. \033[3m'www.foo.org',
'foo.org', 'foo metadata'\033[0m must be in the same row in their respective web, mail
and metadata column.
'''),
action="store")
parser.add_argument("-s",
metavar='sheet_name',
help=textwrap.dedent('''\
the name of the sheet in FILE to use. Only effective in combination with the
\033[3m-d\033[0m argument for the \033[3msub\033[0m command
'''),
action="store")
parser.add_argument("-n",
metavar='name',
help=textwrap.dedent('''\
the name of the measurement submission request. Only effective in combination
with the \033[3msub\033[0m command (default: use current date of {})
'''.format(date_now)),
action="store")
parser.add_argument('-o',
action="store",
metavar='output file',
help=textwrap.dedent('''\
filename to store the measurement results in for a \033[3mget\033[0m command.
Default is to show the results on screen.
'''))
parser.add_argument('-p',
action="store",
nargs="?",
const='',
metavar='SECTION',
help=textwrap.dedent('''\
get the credentials from the specified \033[3m[SECTION]\033[0m in the configuration file
just the option without SECTION will prompt you for username and password
'''))
parser.add_argument("-v", "--verbose",
help="more verbose output",
action="store_true")
parser.add_argument("--debug",
help="show debug output",
action="store_true")
parser.add_argument("-V", "--version",
help="print version and exit",
action="version",
version='%(prog)s (version {})'.format(VERSION))
# parser.usage = "duckdb-to-graphs.py [-q] [-t TYPE] database [N]"
return parser
# ------------------------------------------------------------------------------
class CustomConsoleFormatter(logging.Formatter):
"""
Log facility format
"""
def format(self, record):
# info = '\033[0;32m'
info = ''
warning = '\033[0;33m'
error = '\033[1;33m'
debug = '\033[1;34m'
reset = "\x1b[0m"
# formatter = "%(levelname)s - %(message)s"
formatter = "%(message)s"
if record.levelno == logging.INFO:
log_fmt = info + formatter + reset
self._style._fmt = log_fmt
elif record.levelno == logging.WARNING:
log_fmt = warning + formatter + reset
self._style._fmt = log_fmt
elif record.levelno == logging.ERROR:
log_fmt = error + formatter + reset
self._style._fmt = log_fmt
elif record.levelno == logging.DEBUG:
# formatter = '%(asctime)s %(levelname)s [%(filename)s.py:%(lineno)s/%(funcName)s] %(message)s'
formatter = '%(levelname)s [%(filename)s.py:%(lineno)s/%(funcName)s] %(message)s'
log_fmt = debug + formatter + reset
self._style._fmt = log_fmt
else:
self._style._fmt = formatter
return super().format(record)
# ------------------------------------------------------------------------------
def get_logger(args):
logger = logging.getLogger(__name__)
# Create handlers
console_handler = logging.StreamHandler()
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = CustomConsoleFormatter()
console_handler.setFormatter(formatter)
if args.verbose:
logger.setLevel(logging.INFO)
if args.debug:
logger.setLevel(logging.DEBUG)
# add handlers to the logger
logger.addHandler(console_handler)
return logger
# ------------------------------------------------------------------------------
def call_API(action, credentials, request_id):
if action == 'list':
r = requests.get(credentials['endpoint'],
params={'limit': request_id},
auth=(credentials['username'], credentials['password']))
elif action == 'stat':
r = requests.get(credentials['endpoint'] + '/' + request_id,
auth=(credentials['username'], credentials['password']))
elif action == 'get':
r = requests.get(credentials['endpoint'] + '/' + request_id + '/results',
auth=(credentials['username'], credentials['password']))
elif action == 'del':
r = requests.patch(credentials['endpoint'] + '/' + request_id,
auth=(credentials['username'], credentials['password']))
return r
###############################################################################
def main():
global VERBOSE, DEBUG, logger
parser = parser_add_arguments()
args = parser.parse_args()
VERBOSE = args.verbose
DEBUG = args.debug
logger = get_logger(args)
pp = pprint.PrettyPrinter(indent=4)
request_id = '0'
domainsFile = None
sheet_name = 0
action = args.command
parameter = args.parameter
domains = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Merge individual result files with the planned experimental design to create a single all-encompassing
dataframe with experiment and results.
"""
import click
import logging
import pandas as pd
import glob
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn experimental data from (../interim) into
cleaned data ready to be analyzed (saved in ../processed).
Parameters:
----------
input_filepath: string
location of experimental data
output_filepath: string
destination for processed results csv file
Returns:
-------
None:
writes results dataframe to csv in processed folder
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from generated experimental data')
# initialize list to store results from experiment runs
df_list = []
result_paths = glob.glob(f"{input_filepath}/run*_results.txt")
# read in dataframe results
for path in result_paths:
exp_result = pd.read_json(path, orient = 'index')
exp_result = exp_result.transpose() # transpose to be in row wise format
df_list.append(exp_result)
# concatenate into one df for analysis
raw_results = pd.concat(df_list).sort_values('run_number')
# rename columns to match exp design titles
raw_results.columns = ["run_id","job_name", "train_time", "billable_seconds","f1"]
raw_results = raw_results.reset_index(drop=True)
# read in exp design to match experiments to the raw results
exp_design = | pd.read_csv(f'{input_filepath}/experimental_design.csv') | pandas.read_csv |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self, sort=sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
tm.assert_frame_equal(result, expected)
result = concat(pieces, keys=["A", "B", "C"], axis=1)
expected = DataFrame(pieces, index=["A", "B", "C"]).T
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name="A")
s2 = Series(randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
s = Series(randn(3), index=["c", "a", "b"], name="A")
s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
# ensure names argument is not ignored on axis=1, #23490
s = Series([1, 2, 3])
s2 = Series([4, 5, 6])
result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A")
)
tm.assert_frame_equal(result, expected)
result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]],
columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10), unit="s")
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = DataFrame()
df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
df_expected = DataFrame({"a": []}, index=[], dtype="int64")
for how, expected in [("inner", df_expected), ("outer", df_a)]:
result = pd.concat([df_a, df_empty], axis=1, join=how)
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1]
s1 = Series(randn(len(dates)), index=dates, name="value")
s2 = Series(randn(len(dates)), index=dates, name="value")
result = concat([s1, s2], axis=1, ignore_index=True)
expected = Index([0, 1])
tm.assert_index_equal(result.columns, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
tm.assert_frame_equal(
concat((df for df in (df1, df2)), ignore_index=True), expected
)
tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1:
def __len__(self) -> int:
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError as err:
raise IndexError from err
tm.assert_frame_equal(pd.concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(abc.Iterable):
def __iter__(self):
yield df1
yield df2
tm.assert_frame_equal(pd.concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = tm.makeCustomDataframe(10, 2)
for obj in [1, dict(), [1, 2], (1, 2)]:
msg = (
f"cannot concatenate object of type '{type(obj)}'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
concat([df1, obj])
def test_concat_invalid_first_argument(self):
df1 = tm.makeCustomDataframe(10, 2)
df2 = tm.makeCustomDataframe(10, 2)
msg = (
"first argument must be an iterable of pandas "
'objects, you passed an object of type "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
concat(df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
# text reader ok
# GH6583
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = read_csv(StringIO(data), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
dtype = None if values else np.float64
second = Series(values, dtype=dtype)
expected = DataFrame(
{
0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
def test_default_index(self):
# is_series and ignore_index
s1 = Series([1, 2, 3], name="x")
s2 = Series([4, 5, 6], name="y")
res = pd.concat([s1, s2], axis=1, ignore_index=True)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
# use check_index_type=True to check the result have
# RangeIndex (default index)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_series and all inputs have no names
s1 = Series([1, 2, 3])
s2 = Series([4, 5, 6])
res = pd.concat([s1, s2], axis=1, ignore_index=False)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
exp.columns = pd.RangeIndex(2)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_dataframe and ignore_index
df1 = DataFrame({"A": [1, 2], "B": [5, 6]})
df2 = DataFrame({"A": [3, 4], "B": [7, 8]})
res = pd.concat([df1, df2], axis=0, ignore_index=True)
exp = DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]], columns=["A", "B"])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
res = pd.concat([df1, df2], axis=1, ignore_index=True)
exp = DataFrame([[1, 5, 3, 7], [2, 6, 4, 8]])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
def test_concat_multiindex_rangeindex(self):
# GH13542
# when multi-index levels are RangeIndex objects
# there is a bug in concat with objects of len 1
df = DataFrame(np.random.randn(9, 2))
df.index = MultiIndex(
levels=[pd.RangeIndex(3), pd.RangeIndex(3)],
codes=[np.repeat(np.arange(3), 3), np.tile(np.arange(3), 3)],
)
res = concat([df.iloc[[2, 3, 4], :], df.iloc[[5], :]])
exp = df.iloc[[2, 3, 4, 5], :]
tm.assert_frame_equal(res, exp)
def test_concat_multiindex_dfs_with_deepcopy(self):
# GH 9967
from copy import deepcopy
example_multiindex1 = pd.MultiIndex.from_product([["a"], ["b"]])
example_dataframe1 = DataFrame([0], index=example_multiindex1)
example_multiindex2 = pd.MultiIndex.from_product([["a"], ["c"]])
example_dataframe2 = DataFrame([1], index=example_multiindex2)
example_dict = {"s1": example_dataframe1, "s2": example_dataframe2}
expected_index = pd.MultiIndex(
levels=[["s1", "s2"], ["a"], ["b", "c"]],
codes=[[0, 1], [0, 0], [0, 1]],
names=["testname", None, None],
)
expected = DataFrame([[0], [1]], index=expected_index)
result_copy = pd.concat(deepcopy(example_dict), names=["testname"])
tm.assert_frame_equal(result_copy, expected)
result_no_copy = pd.concat(example_dict, names=["testname"])
tm.assert_frame_equal(result_no_copy, expected)
def test_categorical_concat_append(self):
cat = Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = DataFrame({"cats": cat, "vals": vals})
cat2 = Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = DataFrame({"cats": cat2, "vals": vals2}, index=Index([0, 1, 0, 1]))
tm.assert_frame_equal(pd.concat([df, df]), exp)
tm.assert_frame_equal(df.append(df), exp)
# GH 13524 can concat different categories
cat3 = Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_different_categories = DataFrame({"cats": cat3, "vals": vals3})
res = pd.concat([df, df_different_categories], ignore_index=True)
exp = DataFrame({"cats": list("abab"), "vals": [1, 2, 1, 2]})
tm.assert_frame_equal(res, exp)
res = df.append(df_different_categories, ignore_index=True)
tm.assert_frame_equal(res, exp)
def test_categorical_concat_dtypes(self):
# GH8143
index = ["cat", "obj", "num"]
cat = Categorical(["a", "b", "c"])
obj = Series(["a", "b", "c"])
num = Series([1, 2, 3])
df = pd.concat([Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == "object"
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "int64"
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "category"
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_categorical_concat(self, sort):
# See GH 10177
df1 = DataFrame(
np.arange(18, dtype="int64").reshape(6, 3), columns=["a", "b", "c"]
)
df2 = DataFrame(np.arange(14, dtype="int64").reshape(7, 2), columns=["a", "c"])
cat_values = ["one", "one", "two", "one", "two", "two", "one"]
df2["h"] = Series(Categorical(cat_values))
res = pd.concat((df1, df2), axis=0, ignore_index=True, sort=sort)
exp = DataFrame(
{
"a": [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12],
"b": [
1,
4,
7,
10,
13,
16,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
"c": [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13],
"h": [None] * 6 + cat_values,
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_concat_gh7864(self):
# GH 7864
# make sure ordering is preserved
df = DataFrame({"id": [1, 2, 3, 4, 5, 6], "raw_grade": list("abbaae")})
df["grade"] = Categorical(df["raw_grade"])
df["grade"].cat.set_categories(["e", "a", "b"])
df1 = df[0:3]
df2 = df[3:]
tm.assert_index_equal(df["grade"].cat.categories, df1["grade"].cat.categories)
tm.assert_index_equal(df["grade"].cat.categories, df2["grade"].cat.categories)
dfx = pd.concat([df1, df2])
tm.assert_index_equal(df["grade"].cat.categories, dfx["grade"].cat.categories)
dfa = df1.append(df2)
tm.assert_index_equal(df["grade"].cat.categories, dfa["grade"].cat.categories)
def test_categorical_concat_preserve(self):
# GH 8641 series concat not preserving category dtype
# GH 13524 can concat different categories
s = Series(list("abc"), dtype="category")
s2 = Series(list("abd"), dtype="category")
exp = Series(list("abcabd"))
res = pd.concat([s, s2], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), dtype="category")
res = pd.concat([s, s], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), index=[0, 1, 2, 0, 1, 2], dtype="category")
res = pd.concat([s, s])
tm.assert_series_equal(res, exp)
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame({"A": a, "B": b.astype(CategoricalDtype(list("cab")))})
res = pd.concat([df2, df2])
exp = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_index_preserver(self):
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame(
{"A": a, "B": b.astype(CategoricalDtype(list("cab")))}
).set_index("B")
result = pd.concat([df2, df2])
expected = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
).set_index("B")
tm.assert_frame_equal(result, expected)
# wrong categories
df3 = DataFrame(
{"A": a, "B": Categorical(b, categories=list("abe"))}
).set_index("B")
msg = "categories must match existing categories when appending"
with pytest.raises(TypeError, match=msg):
pd.concat([df2, df3])
def test_concat_categoricalindex(self):
# GH 16111, categories that aren't lexsorted
categories = [9, 0, 1, 2, 3]
a = Series(1, index=pd.CategoricalIndex([9, 0], categories=categories))
b = Series(2, index=pd.CategoricalIndex([0, 1], categories=categories))
c = Series(3, index=pd.CategoricalIndex([1, 2], categories=categories))
result = | pd.concat([a, b, c], axis=1) | pandas.concat |
import pandas as pd
import re
# Creating `text_id `column from index
def make_text_id(df):
df["text_id"] = df.index
df = df[["text_id", "article", "highlights"]]
return df
def split_into_2_dfs(df):
df_1 = df[["text_id", "article"]]
df_2 = df[["text_id", "highlights"]]
return df_1, df_2
def split_sentences(text):
# Segment texts into sentences
r_sentence_boundary = re.compile(
r"\s?[.!?]\s?"
) # Modify this to not include abbreviations and other exceptions
return r_sentence_boundary.split(text)[:-1]
# Split text by sentences
def split_by_sentence(df):
df["sentences"] = df["article"].apply(lambda x: split_sentences(str(x)))
# Make a list of (text_id, sentence_list) pairs
def tup_list_maker(tup_list):
"""
Takes a list of tuples with index 0 being the text_id and index 1 being a
list of sentences and broadcasts the text_id to each sentence
"""
final_list = []
for item in tup_list:
index = item[0]
sentences = item[1]
for sentence in sentences:
pair = (index, sentence)
final_list.append(pair)
return final_list
# Create a list of tuples containing in index0, text_id and in index 1 the list of sentences corresponding to this text
def create_full_tuple(df):
tuples = list(zip(df["text_id"], [sentence for sentence in df["sentences"]]))
tup_list = tup_list_maker(tuples)
# Converting the tuples list into a dataframe
sentences = pd.DataFrame(tup_list, columns=["text_id", "sentence"])
return sentences
# Create full dataframe
def create_full_final_dataframe(df):
"""
Creates the final segmented dataframe with the `is_summary` column
"""
dataframe = make_text_id(df)
df_article, df_highlights = split_into_2_dfs(dataframe)
df_article["sentences"] = df_article["article"].apply(
lambda x: split_sentences(str(x))
)
df_highlights["sentences"] = df_highlights["highlights"].apply(
lambda x: split_sentences(str(x))
)
segmented_df_articles = create_full_tuple(df_article)
segmented_df_highlights = create_full_tuple(df_highlights)
# Create targets for dataframes
segmented_df_articles["is_summary_sentence"] = 0
segmented_df_highlights["is_summary_sentence"] = 1
# Stack the 2 dataframes and order by `text_id` column
return segmented_df_articles.append(
segmented_df_highlights, ignore_index=True
).sort_values(by=["text_id"])
if __name__ == "__main__":
# Load data
train = | pd.read_csv("data/interim/cnn_dm_train.csv.gz", compression="gzip") | pandas.read_csv |
import os
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian
import pandas as pd
from pandas import DataFrame, HDFStore, Series, _testing as tm, read_hdf
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io import pytables as pytables
from pandas.io.pytables import ClosedFileError, PossibleDataLossError, Term
pytestmark = pytest.mark.single
def test_mode(setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
msg = r"[\S]* does not exist"
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError, match=msg):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError, match=msg):
with HDFStore(path, mode=mode) as store:
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError, match=msg):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
msg = (
"mode w is not allowed while performing a read. "
r"Allowed modes are r, r\+ and a."
)
with pytest.raises(ValueError, match=msg):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
msg = (
r"Re-opening the file \[[\S]*\] with mode \[a\] will delete the "
"current file!"
)
# invalid mode change
with pytest.raises(PossibleDataLossError, match=msg):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(setup_path):
with tm.ensure_clean(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_complibs_default_settings(setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame({"A": "foo", "B": "bar"}, index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
tm.assert_series_equal(s_nan, retr)
def test_multiple_open_close(setup_path):
# gh-4409: open & close multiple times
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", mode="w", format="table")
# single
store = HDFStore(path)
assert "CLOSED" not in store.info()
assert store.is_open
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
with ensure_clean_path(setup_path) as path:
if pytables._table_file_open_policy_is_strict:
# multiples
store1 = HDFStore(path)
msg = (
r"The file [\S]* is already opened\. Please close it before "
r"reopening in write mode\."
)
with pytest.raises(ValueError, match=msg):
HDFStore(path)
store1.close()
else:
# multiples
store1 = HDFStore(path)
store2 = HDFStore(path)
assert "CLOSED" not in store1.info()
assert "CLOSED" not in store2.info()
assert store1.is_open
assert store2.is_open
store1.close()
assert "CLOSED" in store1.info()
assert not store1.is_open
assert "CLOSED" not in store2.info()
assert store2.is_open
store2.close()
assert "CLOSED" in store1.info()
assert "CLOSED" in store2.info()
assert not store1.is_open
assert not store2.is_open
# nested close
store = HDFStore(path, mode="w")
store.append("df", df)
store2 = HDFStore(path)
store2.append("df2", df)
store2.close()
assert "CLOSED" in store2.info()
assert not store2.is_open
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
# double closing
store = HDFStore(path, mode="w")
store.append("df", df)
store2 = HDFStore(path)
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
store2.close()
assert "CLOSED" in store2.info()
assert not store2.is_open
# ops on a closed store
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", mode="w", format="table")
store = HDFStore(path)
store.close()
msg = r"[\S]* file is not open!"
with pytest.raises(ClosedFileError, match=msg):
store.keys()
with pytest.raises(ClosedFileError, match=msg):
"df" in store
with pytest.raises(ClosedFileError, match=msg):
len(store)
with pytest.raises(ClosedFileError, match=msg):
store["df"]
with pytest.raises(ClosedFileError, match=msg):
store.select("df")
with pytest.raises(ClosedFileError, match=msg):
store.get("df")
with pytest.raises(ClosedFileError, match=msg):
store.append("df2", df)
with pytest.raises(ClosedFileError, match=msg):
store.put("df3", df)
with pytest.raises(ClosedFileError, match=msg):
store.get_storer("df2")
with pytest.raises(ClosedFileError, match=msg):
store.remove("df2")
with pytest.raises(ClosedFileError, match=msg):
store.select("df")
msg = "'HDFStore' object has no attribute 'df'"
with pytest.raises(AttributeError, match=msg):
store.df
def test_fspath():
with | tm.ensure_clean("foo.h5") | pandas._testing.ensure_clean |
from PyQt5 import QtWidgets as Qtw
from PyQt5 import QtCore as Qtc
from PyQt5 import QtGui as Qtg
from datetime import datetime, timedelta
from bu_data_model import BU366
import sys
import socket
import time
import pandas as pd
from openpyxl.chart import ScatterChart, Reference, Series
class CheckingThread(Qtc.QThread):
answer_thread = Qtc.pyqtSignal(str, list)
running_state = Qtc.pyqtSignal(str)
remaining_time = Qtc.pyqtSignal(str)
error_threads = Qtc.pyqtSignal(str)
running_threads = {}
def __init__(self, threadid_, n366, total_time, polling_interval, poll_rest):
Qtc.QThread.__init__(self)
self.threadid = threadid_
self.name = n366.name
self.n366 = n366
self.total_time = total_time
self.polling_inteval = polling_interval
self.poll_rest = timedelta(seconds=poll_rest)
self.next_poll = datetime.now()
self.end = datetime.now() + timedelta(minutes=total_time)
self.poll_rest_flag = False
if poll_rest > 0:
self.poll_rest_flag = True
def run(self): # we run iterating over time until the test is over
time_ = datetime.now() # get the time now for the loop
self.running_threads[f'{self.name}'] = self # we add the object to the queue to be able to stop it later
while time_ < self.end: # main loop until end time is bigger than current time
self.remaining_time.emit(f'{self.end - time_}') # we update the remaining time of the test via signal
self.running_state.emit('°R') # we update the status to °R via a signal
try: # we check if the conection is active
self.n366.check_active() # here we poll the DN and get the values to the dataframes
except: # try to reconnect
self.running_state.emit('°RC')
while not self.n366.connection_state and datetime.now() < self.end: # while there is no connection we try to reconnect
for tu_name_disconnect in self.n366.tus: # updates display to show the disconnection status
tu_item = self.n366.tus[tu_name_disconnect]
self.answer_thread.emit(tu_name_disconnect, [ # emit list with values
-1, # set local sector
-100, # set RSSI
0, # set SNR
0, # set RXMCS
0, # set TXMCS
0, # set RX PER
0, # set TX PER
0, # set RX MCS DR
0, # set TX MCS DR
tu_item.get_availability(),
tu_item.get_disconnection_counter(),
tu_item.get_disconnection_ldt(),
tu_item.get_disconnection_lds(),
tu_item.get_disconnection_tdt(),
False,
0,
])
self.n366.connect(self.n366.username, self.n366.password, 22, 1)
# mini loop to fill in the disconnection time for each TU. We get disconnection start from object
# and disconnection end at that time
except_disconnection_start = self.n366.disconnection_start
except_disconnection_end = datetime.now()
while except_disconnection_start < except_disconnection_end: # while there is time between both events
for tu_name_reconnect in self.n366.tus: # updates display to show the disconnection status
except_tu_item = self.n366.tus[tu_name_reconnect] # get each TU
# create a record with the disconnection parameters
record = {'Local Sector': 0, 'RSSI': -100, 'SNR': 0, 'MCS-RX': 0, 'MCS-TX': 0,
'MCS-DR-RX': 0, 'MCS-DR-TX': 0, 'Power Index': 0}
record_series = pd.Series(record, name=except_disconnection_start)
# add the record of the disconnection
except_tu_item.parameters_df = except_tu_item.parameters_df.append(record_series)
# add the time for the next event
except_disconnection_start = except_disconnection_start + self.poll_rest
continue # jump over the loop to try to parse the active connection as we had no
# conection extablished
tu_counter = len(self.n366.tus)
for tu_name in self.n366.tus:
tu_item = self.n366.tus[tu_name]
self.answer_thread.emit(tu_name, [ # emit list with values
tu_item.get_local_sector(),
tu_item.get_rssi(),
tu_item.get_snr(),
tu_item.get_rxmcs(),
tu_item.get_txmcs(),
tu_item.get_rxspeednum(),
tu_item.get_txspeednum(),
tu_item.get_rxmcsdr(),
tu_item.get_txmcsdr(),
tu_item.get_availability(),
tu_item.get_disconnection_counter(),
tu_item.get_disconnection_ldt(),
tu_item.get_disconnection_lds(),
tu_item.get_disconnection_tdt(),
tu_item.get_connection_status(),
tu_item.get_power_index(),
])
if self.poll_rest_flag and self.next_poll < time_ and tu_counter >= 0:
if tu_item.connection_state:
record = {'Local Sector': tu_item.get_local_sector(),
'RSSI': tu_item.get_rssi(), 'SNR': tu_item.get_snr(),
'MCS-RX': tu_item.get_rxmcs(), 'MCS-TX': tu_item.get_txmcs(),
'MCS-DR-RX': tu_item.get_rxmcsdr(), 'MCS-DR-TX': tu_item.get_txmcsdr(),
'Power Index': tu_item.get_power_index()}
else:
record = {'Local Sector': 0, 'RSSI': -100, 'SNR': 0, 'MCS-RX': 0, 'MCS-TX': 0,
'MCS-DR-RX': 0, 'MCS-DR-TX': 0, 'Power Index': 0}
record_series = | pd.Series(record, name=time_) | pandas.Series |
import numpy as np
import re
import pandas as pd
from nova.utils import CalcVol
import logging
# create logger
module_logger = logging.getLogger('NOVA.datastruct')
class DataStruct(object):
def __init__(self, model):
"""
:param model: pyCloudy Model object
"""
self.logger = logging.getLogger('NOVA.datastruct.DataStruct')
self.logger.info('creating an instance of DataStruct')
self.model = model
self._model_id = self.model.model_name_s.split("_")[0]
self._model_info = {}
self._lines = {"line_id": [], "value": [], "model_id": []}
self._ionic_frac = {"model_id": [], "ion": [], "value": []}
self._abundance = {"model_id": [], "elem": [], "value": []}
self._model_input = {"model_id": None, "input": None}
self._params = {"model_id": [], "param": [], "value": []}
self.df_tables = {"model_info": None, "lines": None, "ionic_frac": None, "abund": None, "model_input": None}
def build(self):
self._tab_model_info()
self._tab_lines()
self._tab_ionic_frac()
self._tab_abundance()
self._tab_model_input()
self._tab_model_params()
self._make_df()
def _make_df(self):
self.df_tables = {"model_info": pd.DataFrame(self._model_info, index=[0]),
"lines": pd.DataFrame(self._lines),
"ionic_frac": pd.DataFrame(self._ionic_frac),
"abund": pd.DataFrame(self._abundance),
"model_input": | pd.DataFrame(self._model_input, index=[0]) | pandas.DataFrame |