prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
lreshape,
melt,
wide_to_long,
)
import pandas._testing as tm
class TestMelt:
def setup_method(self, method):
self.df = tm.makeTimeDataFrame()[:10]
self.df["id1"] = (self.df["A"] > 0).astype(np.int64)
self.df["id2"] = (self.df["B"] > 0).astype(np.int64)
self.var_name = "var"
self.value_name = "val"
self.df1 = DataFrame(
[
[1.067683, -1.110463, 0.20867],
[-1.321405, 0.368915, -1.055342],
[-0.807333, 0.08298, -0.873361],
]
)
self.df1.columns = [list("ABC"), list("abc")]
self.df1.columns.names = ["CAP", "low"]
def test_top_level_method(self):
result = melt(self.df)
assert result.columns.tolist() == ["variable", "value"]
def test_method_signatures(self):
tm.assert_frame_equal(self.df.melt(), melt(self.df))
tm.assert_frame_equal(
self.df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"]),
melt(self.df, id_vars=["id1", "id2"], value_vars=["A", "B"]),
)
tm.assert_frame_equal(
self.df.melt(var_name=self.var_name, value_name=self.value_name),
melt(self.df, var_name=self.var_name, value_name=self.value_name),
)
tm.assert_frame_equal(self.df1.melt(col_level=0), melt(self.df1, col_level=0))
def test_default_col_names(self):
result = self.df.melt()
assert result.columns.tolist() == ["variable", "value"]
result1 = self.df.melt(id_vars=["id1"])
assert result1.columns.tolist() == ["id1", "variable", "value"]
result2 = self.df.melt(id_vars=["id1", "id2"])
assert result2.columns.tolist() == ["id1", "id2", "variable", "value"]
def test_value_vars(self):
result3 = self.df.melt(id_vars=["id1", "id2"], value_vars="A")
assert len(result3) == 10
result4 = self.df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"])
expected4 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
"value": (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", "variable", "value"],
)
tm.assert_frame_equal(result4, expected4)
def test_value_vars_types(self):
# GH 15348
expected = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
"value": (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", "variable", "value"],
)
for type_ in (tuple, list, np.array):
result = self.df.melt(id_vars=["id1", "id2"], value_vars=type_(("A", "B")))
tm.assert_frame_equal(result, expected)
def test_vars_work_with_multiindex(self):
expected = DataFrame(
{
("A", "a"): self.df1[("A", "a")],
"CAP": ["B"] * len(self.df1),
"low": ["b"] * len(self.df1),
"value": self.df1[("B", "b")],
},
columns=[("A", "a"), "CAP", "low", "value"],
)
result = self.df1.melt(id_vars=[("A", "a")], value_vars=[("B", "b")])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"id_vars, value_vars, col_level, expected",
[
(
["A"],
["B"],
0,
DataFrame(
{
"A": {0: 1.067683, 1: -1.321405, 2: -0.807333},
"CAP": {0: "B", 1: "B", 2: "B"},
"value": {0: -1.110463, 1: 0.368915, 2: 0.08298},
}
),
),
(
["a"],
["b"],
1,
DataFrame(
{
"a": {0: 1.067683, 1: -1.321405, 2: -0.807333},
"low": {0: "b", 1: "b", 2: "b"},
"value": {0: -1.110463, 1: 0.368915, 2: 0.08298},
}
),
),
],
)
def test_single_vars_work_with_multiindex(
self, id_vars, value_vars, col_level, expected
):
result = self.df1.melt(id_vars, value_vars, col_level=col_level)
tm.assert_frame_equal(result, expected)
def test_tuple_vars_fail_with_multiindex(self):
# melt should fail with an informative error message if
# the columns have a MultiIndex and a tuple is passed
# for id_vars or value_vars.
tuple_a = ("A", "a")
list_a = [tuple_a]
tuple_b = ("B", "b")
list_b = [tuple_b]
msg = r"(id|value)_vars must be a list of tuples when columns are a MultiIndex"
for id_vars, value_vars in (
(tuple_a, list_b),
(list_a, tuple_b),
(tuple_a, tuple_b),
):
with pytest.raises(ValueError, match=msg):
self.df1.melt(id_vars=id_vars, value_vars=value_vars)
def test_custom_var_name(self):
result5 = self.df.melt(var_name=self.var_name)
assert result5.columns.tolist() == ["var", "value"]
result6 = self.df.melt(id_vars=["id1"], var_name=self.var_name)
assert result6.columns.tolist() == ["id1", "var", "value"]
result7 = self.df.melt(id_vars=["id1", "id2"], var_name=self.var_name)
assert result7.columns.tolist() == ["id1", "id2", "var", "value"]
result8 = self.df.melt(
id_vars=["id1", "id2"], value_vars="A", var_name=self.var_name
)
assert result8.columns.tolist() == ["id1", "id2", "var", "value"]
result9 = self.df.melt(
id_vars=["id1", "id2"], value_vars=["A", "B"], var_name=self.var_name
)
expected9 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
self.var_name: ["A"] * 10 + ["B"] * 10,
"value": (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", self.var_name, "value"],
)
tm.assert_frame_equal(result9, expected9)
def test_custom_value_name(self):
result10 = self.df.melt(value_name=self.value_name)
assert result10.columns.tolist() == ["variable", "val"]
result11 = self.df.melt(id_vars=["id1"], value_name=self.value_name)
assert result11.columns.tolist() == ["id1", "variable", "val"]
result12 = self.df.melt(id_vars=["id1", "id2"], value_name=self.value_name)
assert result12.columns.tolist() == ["id1", "id2", "variable", "val"]
result13 = self.df.melt(
id_vars=["id1", "id2"], value_vars="A", value_name=self.value_name
)
assert result13.columns.tolist() == ["id1", "id2", "variable", "val"]
result14 = self.df.melt(
id_vars=["id1", "id2"], value_vars=["A", "B"], value_name=self.value_name
)
expected14 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
self.value_name: (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", "variable", self.value_name],
)
tm.assert_frame_equal(result14, expected14)
def test_custom_var_and_value_name(self):
result15 = self.df.melt(var_name=self.var_name, value_name=self.value_name)
assert result15.columns.tolist() == ["var", "val"]
result16 = self.df.melt(
id_vars=["id1"], var_name=self.var_name, value_name=self.value_name
)
assert result16.columns.tolist() == ["id1", "var", "val"]
result17 = self.df.melt(
id_vars=["id1", "id2"], var_name=self.var_name, value_name=self.value_name
)
assert result17.columns.tolist() == ["id1", "id2", "var", "val"]
result18 = self.df.melt(
id_vars=["id1", "id2"],
value_vars="A",
var_name=self.var_name,
value_name=self.value_name,
)
assert result18.columns.tolist() == ["id1", "id2", "var", "val"]
result19 = self.df.melt(
id_vars=["id1", "id2"],
value_vars=["A", "B"],
var_name=self.var_name,
value_name=self.value_name,
)
expected19 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
self.var_name: ["A"] * 10 + ["B"] * 10,
self.value_name: (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", self.var_name, self.value_name],
)
tm.assert_frame_equal(result19, expected19)
df20 = self.df.copy()
df20.columns.name = "foo"
result20 = df20.melt()
assert result20.columns.tolist() == ["foo", "value"]
def test_col_level(self):
res1 = self.df1.melt(col_level=0)
res2 = self.df1.melt(col_level="CAP")
assert res1.columns.tolist() == ["CAP", "value"]
assert res2.columns.tolist() == ["CAP", "value"]
def test_multiindex(self):
res = self.df1.melt()
assert res.columns.tolist() == ["CAP", "low", "value"]
@pytest.mark.parametrize(
"col",
[
pd.Series(pd.date_range("2010", periods=5, tz="US/Pacific")),
pd.Series(["a", "b", "c", "a", "d"], dtype="category"),
pd.Series([0, 1, 0, 0, 0]),
],
)
def test_pandas_dtypes(self, col):
# GH 15785
df = DataFrame(
{"klass": range(5), "col": col, "attr1": [1, 0, 0, 0, 0], "attr2": col}
)
expected_value = pd.concat([pd.Series([1, 0, 0, 0, 0]), col], ignore_index=True)
result = melt(
df, id_vars=["klass", "col"], var_name="attribute", value_name="value"
)
expected = DataFrame(
{
0: list(range(5)) * 2,
1: pd.concat([col] * 2, ignore_index=True),
2: ["attr1"] * 5 + ["attr2"] * 5,
3: expected_value,
}
)
expected.columns = ["klass", "col", "attribute", "value"]
tm.assert_frame_equal(result, expected)
def test_preserve_category(self):
# GH 15853
data = DataFrame({"A": [1, 2], "B": pd.Categorical(["X", "Y"])})
result = melt(data, ["B"], ["A"])
expected = DataFrame(
{"B": pd.Categorical(["X", "Y"]), "variable": ["A", "A"], "value": [1, 2]}
)
tm.assert_frame_equal(result, expected)
def test_melt_missing_columns_raises(self):
# GH-23575
# This test is to ensure that pandas raises an error if melting is
# attempted with column names absent from the dataframe
# Generate data
df = DataFrame(np.random.randn(5, 4), columns=list("abcd"))
# Try to melt with missing `value_vars` column name
msg = "The following '{Var}' are not present in the DataFrame: {Col}"
with pytest.raises(
KeyError, match=msg.format(Var="value_vars", Col="\\['C'\\]")
):
df.melt(["a", "b"], ["C", "d"])
# Try to melt with missing `id_vars` column name
with pytest.raises(KeyError, match=msg.format(Var="id_vars", Col="\\['A'\\]")):
df.melt(["A", "b"], ["c", "d"])
# Multiple missing
with pytest.raises(
KeyError,
match=msg.format(Var="id_vars", Col="\\['not_here', 'or_there'\\]"),
):
df.melt(["a", "b", "not_here", "or_there"], ["c", "d"])
# Multiindex melt fails if column is missing from multilevel melt
multi = df.copy()
multi.columns = [list("ABCD"), list("abcd")]
with pytest.raises(KeyError, match=msg.format(Var="id_vars", Col="\\['E'\\]")):
multi.melt([("E", "a")], [("B", "b")])
# Multiindex fails if column is missing from single level melt
with pytest.raises(
KeyError, match=msg.format(Var="value_vars", Col="\\['F'\\]")
):
multi.melt(["A"], ["F"], col_level=0)
def test_melt_mixed_int_str_id_vars(self):
# GH 29718
df = DataFrame({0: ["foo"], "a": ["bar"], "b": [1], "d": [2]})
result = melt(df, id_vars=[0, "a"], value_vars=["b", "d"])
expected = DataFrame(
{0: ["foo"] * 2, "a": ["bar"] * 2, "variable": list("bd"), "value": [1, 2]}
)
tm.assert_frame_equal(result, expected)
def test_melt_mixed_int_str_value_vars(self):
# GH 29718
df = DataFrame({0: ["foo"], "a": ["bar"]})
result = melt(df, value_vars=[0, "a"])
expected = DataFrame({"variable": [0, "a"], "value": ["foo", "bar"]})
tm.assert_frame_equal(result, expected)
def test_ignore_index(self):
# GH 17440
df = DataFrame({"foo": [0], "bar": [1]}, index=["first"])
result = melt(df, ignore_index=False)
expected = DataFrame(
{"variable": ["foo", "bar"], "value": [0, 1]}, index=["first", "first"]
)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# -*- coding: utf-8 -*-
'''
Created on Mon Sep 28 16:26:09 2015
@author: r4dat
'''
# ICD9 procs from NHSN definition.
# Diabetes diagnoses from AHRQ version 5 SAS program, CMBFQI32.TXT
# sample string generator print((','.join(map(str, [str(x) for x in range(25040,25094)]))).replace(',','","'))
#
# "25000"-"25033",
# "64800"-"64804" = "DM" /* Diabetes w/o chronic complications*/
# "25000","25001","25002","25003","25004","25005","25006","25007","25008","25009","25010","25011","25012","25013","25014","25015","25016","25017","25018","25019","25020","25021","25022","25023","25024","25025","25026","25027","25028","25029","25030","25031","25032","25033",
# "64800","64801","64802","64803","64804"
#
# "25040"-"25093",
# "7751 " = "DMCX" /* Diabetes w/ chronic complications */
# "25040","25041","25042","25043","25044","25045","25046","25047","25048","25049","25050","25051","25052","25053","25054","25055","25056","25057","25058","25059","25060","25061","25062","25063","25064","25065","25066","25067","25068","25069","25070","25071","25072","25073","25074","25075","25076","25077","25078","25079","25080","25081","25082","25083","25084","25085","25086","25087","25088","25089","25090","25091","25092","25093"
# "7751"
#
import pypyodbc
import pandas as pd
import numpy as np
pd.set_option('expand_frame_repr', False)
inpdb12 = pypyodbc.connect('Driver={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=C:\\Users\\db12.accdb')
inpdb13 = pypyodbc.connect('Driver={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=C:\\Users\\db13.accdb')
inpdb14 = pypyodbc.connect('Driver={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=C:\\Users\\db14.accdb')
inpdb15 = pypyodbc.connect('Driver={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=C:\\Users\\db15.accdb')
conn_dict = {2012: inpdb12,
2013: inpdb13,
2014: inpdb14,
2015: inpdb15}
# Dictionary: each year has a tuple of names for the needed tables
# tables can be named differently each year
tablenames_dict = {2008: ['[ST08IP-DS1]', '[ST08IP-DS1DIAG]', '[ST08IP-DS1PROC]', '[ST08IP-DS1REV'],
2009: ['[ST09IP-4Q-DS1MAIN]', '[ST09IP-4Q-DS1DIAG]', '[ST09IP-4Q-DS1PROC]', '[ST09IP-4Q-DS1REV'],
2010: ['[ST2010IPDS1MAIN]', '[ST2010IPDS1DIAG]', '[ST2010IPDS1PROC]', '[ST2010IPDS1REV'],
2011: ['[ST2011Q4IPDS1MAIN]', '[ST2011Q4IPDS1DIAG]', '[ST2011Q4IPDS1PROC]', '[ST2011Q4IPDS1REV'],
2012: ['[ST2012Q4IPDS1]', '[ST2012Q4IPDS1DIAG]', '[ST2012Q4IPDS1PROC]', '[ST2012Q4IPDS1REV'],
2013: ['[ST2013Q4IPDS1MAIN]', '[ST2013Q4IPDS1DIAG]', '[ST2013Q4IPDS1PROC]', '[ST2013Q4IPDS1REV'],
2014: ['[ST2014Q4IPDS1]', '[ST2014Q4IPDS1DIAG]', '[ST2014Q4IPDS1PROC]', '[ST2014Q4IPDS1REV'],
2015: ['[ST2015Q1IPDS1]', '[ST2015Q1IPDS1DIAG]', '[ST2015Q1IPDS1PROC]', '[ST2015Q1IPDS1REV']}
###############################################################################
# DF processing
###############################################################################
cols_to_keep = ['CNTRL', 'HOSP', 'ZIP', 'DOB', 'SEX', 'ADATE','adate']
cols_to_keep = [x.lower() for x in cols_to_keep]
# Function to stack datasets according to discharge year
def make_main(iyear):
for iteryear in conn_dict.keys():
if iteryear == iyear:
base_ds = pd.read_sql(
' '.join(['select * from', tablenames_dict[iteryear][0], 'where year(adate) =', str(iyear), ';']),
conn_dict[iteryear]) # where year(adate) =',str(iyear)
base_ds = base_ds[cols_to_keep]
base_ds['orig_table'] = tablenames_dict[iteryear][0]
base_ds['dbyear'] = iteryear
record_count = len(base_ds)
print(' '.join(
['file', tablenames_dict[iteryear][0], 'has', str(record_count), 'records with admit dates in',
'CY' + str(iyear)]))
if iteryear > iyear:
add_ds = pd.read_sql(
' '.join(['select * from', tablenames_dict[iteryear][0], 'where year(adate) =', str(iyear), ';']),
conn_dict[iteryear])
add_ds = add_ds[cols_to_keep]
add_ds['orig_table'] = tablenames_dict[iteryear][0]
add_ds['dbyear'] = iteryear
record_count = len(add_ds)
print(' '.join(
['file', tablenames_dict[iteryear][0], 'has', str(record_count), 'records with admit dates in',
'CY' + str(iyear)]))
base_ds = pd.concat([base_ds, add_ds])
return base_ds
def make_colo(iyear):
for iteryear in conn_dict.keys():
if iteryear == iyear:
base_ds = pd.read_sql(' '.join([
'select b.cntrl,proc,procdate,hosp,dob,sex,adate,ddate,ethn,race FROM (select distinct cntrl,proc,procdate from',
tablenames_dict[iteryear][2], 'where year(procdate) =', str(iyear),
" and proc IN('1731','1732','1734','1735','1736','1739', '4503', '4526', '4541','4549', '4552', '4571','4572','4573','4574','4575','4576', '4579', '4581','4582','4583', '4592','4593','4594','4595', '4603', '4604', '4610','4611', '4613', '4614', '4643', '4652', '4675','4676', '4694')",
') as a left join', tablenames_dict[iteryear][0],
'as b ON a.cntrl=b.cntrl;']), conn_dict[iteryear])
base_ds['orig_table'] = tablenames_dict[iteryear][2]
base_ds['dbyear'] = iteryear
record_count = len(base_ds)
print(' '.join(
['file', tablenames_dict[iteryear][2], 'has', str(record_count), 'records with admit dates in',
'CY' + str(iyear)]))
if iteryear > iyear:
add_ds = pd.read_sql(' '.join([
'select b.cntrl,proc,procdate,hosp,dob,sex,adate,ddate,ethn,race FROM (select distinct cntrl,proc,procdate from',
tablenames_dict[iteryear][2], 'where year(procdate) =', str(iyear),
" and proc IN('1731','1732','1734','1735','1736','1739', '4503', '4526', '4541','4549', '4552', '4571','4572','4573','4574','4575','4576', '4579', '4581','4582','4583', '4592','4593','4594','4595', '4603', '4604', '4610','4611', '4613', '4614', '4643', '4652', '4675','4676', '4694')",
') as a left join', tablenames_dict[iteryear][0],
'as b ON a.cntrl=b.cntrl;']), conn_dict[iteryear])
add_ds['orig_table'] = tablenames_dict[iteryear][2]
add_ds['dbyear'] = iteryear
record_count = len(add_ds)
print(' '.join(
['file', tablenames_dict[iteryear][2], 'has', str(record_count), 'records with admit dates in',
'CY' + str(iyear)]))
base_ds = pd.concat([base_ds, add_ds])
return base_ds
def make_diab(iyear):
for iteryear in conn_dict.keys():
if iteryear == iyear:
base_ds = pd.read_sql(' '.join(
['select a.cntrl,diag,adate,ddate FROM (select distinct cntrl,diag from', tablenames_dict[iteryear][1],
"WHERE diag IN('25000','25001','25002','25003','25004','25005','25006','25007','25008','25009','25010','25011','25012','25013','25014','25015','25016','25017','25018','25019','25020','25021','25022','25023','25024','25025','25026','25027','25028','25029','25030','25031','25032','25033','64800','64801','64802','64803','64804')",
') as a LEFT JOIN', tablenames_dict[iteryear][0], 'as b ON a.cntrl=b.cntrl WHERE year(adate)=',
str(iyear), ';']), conn_dict[iteryear])
base_ds['orig_table'] = tablenames_dict[iteryear][1]
base_ds['dbyear'] = iteryear
record_count = len(base_ds)
print(' '.join(['file', tablenames_dict[iteryear][1], 'has', str(record_count), 'records with Diab in',
'CY' + str(iyear)]))
if iteryear > iyear:
add_ds = pd.read_sql(' '.join(
['select a.cntrl,diag,adate,ddate FROM (select distinct cntrl,diag from', tablenames_dict[iteryear][1],
"WHERE diag IN('25000','25001','25002','25003','25004','25005','25006','25007','25008','25009','25010','25011','25012','25013','25014','25015','25016','25017','25018','25019','25020','25021','25022','25023','25024','25025','25026','25027','25028','25029','25030','25031','25032','25033','64800','64801','64802','64803','64804')",
') as a LEFT JOIN', tablenames_dict[iteryear][0], 'as b ON a.cntrl=b.cntrl WHERE year(adate)=',
str(iyear), ';']), conn_dict[iteryear])
add_ds['orig_table'] = tablenames_dict[iteryear][1]
add_ds['dbyear'] = iteryear
record_count = len(add_ds)
print(' '.join(
['file', tablenames_dict[iteryear][1], 'has', str(record_count), 'records with Diab dates in',
'CY' + str(iyear)]))
base_ds = pd.concat([base_ds, add_ds])
return base_ds
def make_diabx(iyear):
for iteryear in conn_dict.keys():
if iteryear == iyear:
base_ds = pd.read_sql(' '.join(
['select a.cntrl,diag,adate,ddate FROM (select distinct cntrl,diag from', tablenames_dict[iteryear][1],
"WHERE diag IN('25040','25041','25042','25043','25044','25045','25046','25047','25048','25049','25050','25051','25052','25053','25054','25055','25056','25057','25058','25059','25060','25061','25062','25063','25064','25065','25066','25067','25068','25069','25070','25071','25072','25073','25074','25075','25076','25077','25078','25079','25080','25081','25082','25083','25084','25085','25086','25087','25088','25089','25090','25091','25092','25093','7751')",
') as a LEFT JOIN', tablenames_dict[iteryear][0], 'as b ON a.cntrl=b.cntrl WHERE year(adate)=',
str(iyear), ';']), conn_dict[iteryear])
base_ds['orig_table'] = tablenames_dict[iteryear][1]
base_ds['dbyear'] = iteryear
record_count = len(base_ds)
print(' '.join(['file', tablenames_dict[iteryear][1], 'has', str(record_count), 'records with DiabX in',
'CY' + str(iyear)]))
if iteryear > iyear:
add_ds = pd.read_sql(' '.join(
['select a.cntrl,diag,adate,ddate FROM (select distinct cntrl,diag from', tablenames_dict[iteryear][1],
"WHERE diag IN('25040','25041','25042','25043','25044','25045','25046','25047','25048','25049','25050','25051','25052','25053','25054','25055','25056','25057','25058','25059','25060','25061','25062','25063','25064','25065','25066','25067','25068','25069','25070','25071','25072','25073','25074','25075','25076','25077','25078','25079','25080','25081','25082','25083','25084','25085','25086','25087','25088','25089','25090','25091','25092','25093','7751')",
') as a LEFT JOIN', tablenames_dict[iteryear][0], 'as b ON a.cntrl=b.cntrl WHERE year(adate)=',
str(iyear), ';']), conn_dict[iteryear])
add_ds['orig_table'] = tablenames_dict[iteryear][1]
add_ds['dbyear'] = iteryear
record_count = len(add_ds)
print(' '.join(
['file', tablenames_dict[iteryear][1], 'has', str(record_count), 'records with DiabX dates in',
'CY' + str(iyear)]))
base_ds = pd.concat([base_ds, add_ds])
return base_ds
# Interactive Test Frame
# test=pd.read_sql(' '.join(['select TOP 200 * from',tablenames_dict[2014][0]]),conn_dict[2014])
# print('Creating Main dataset')
# main13=make_main(2013)
## 2013
col13 = make_colo(2013)
diab = make_diab(2013)
diabx = make_diabx(2013)
col13['key'] = col13['cntrl'].map(int).map(str) + col13['dbyear'].map(str)
diab['key'] = diab['cntrl'].map(str) + diab['dbyear'].map(str)
diabx['key'] = diabx['cntrl'].map(str) + diabx['dbyear'].map(str)
col13['dm'] = col13['key'].isin(diab['key'])
col13['dmx'] = col13['key'].isin(diabx['key'])
col13 = col13.rename(columns=
{
'hosp': 'ccn',
'sex': 'gender',
'adate': 'admitdate'
})
col13.drop_duplicates(subset=['key'], keep='first', inplace=True)
col13['dob'] = pd.to_datetime(col13['dob'])
col13['procdate'] = pd.to_datetime(col13['procdate'])
col13['ccn'] = col13['ccn'].map(int)
col13['admitdate'] = pd.to_datetime(col13['admitdate'])
## 2014
col14 = make_colo(2014)
diab = make_diab(2014)
diabx = make_diabx(2014)
col14['key'] = col14['cntrl'].map(int).map(str) + col14['dbyear'].map(str)
diab['key'] = diab['cntrl'].map(str) + diab['dbyear'].map(str)
diabx['key'] = diabx['cntrl'].map(str) + diabx['dbyear'].map(str)
col14['dm'] = col14['key'].isin(diab['key'])
col14['dmx'] = col14['key'].isin(diabx['key'])
col14 = col14.rename(columns=
{
'hosp': 'ccn',
'sex': 'gender',
'adate': 'admitdate'
})
col14.drop_duplicates(subset=['key'], keep='first', inplace=True)
col14['dob'] = pd.to_datetime(col14['dob'])
col14['procdate'] = pd.to_datetime(col14['procdate'])
col14['ccn'] = col14['ccn'].map(int)
col14['admitdate'] = | pd.to_datetime(col14['admitdate']) | pandas.to_datetime |
"""
This script uses the slack users.list endpoint to pull
all users.
Refer to https://api.slack.com/methods/users.list
"""
import os
import logging
import pandas as pd
from slack_sdk import WebClient # Import WebClient from Python SDK (github.com/slackapi/python-slack-sdk)
"""
FUNCTIONS
"""
def connect(api_token):
""" Executes request via Slack SDK and returns json"""
client = WebClient(token=api_token)
logger = logging.getLogger(__name__)
response = client.users_list()
return response
def format_users_list(response):
""" Formats conversation list and creates dataframe """
user_list = []
for i in response["members"]:
user_list.append({
"id": i.get("id"),
"email": i["profile"].get("email"),
"title": i["profile"].get("title"),
"first_name": i["profile"].get("first_name"),
"last_name": i["profile"].get("last_name"),
"real_name": i["profile"].get("real_name"),
"tz": i["profile"].get("tz"),
"display_name": i["profile"].get("display_name"),
"is_email_confirmed": i["profile"].get("is_email_confirmed"),
"updated": i["profile"].get("updated")
}
)
user_list_data = | pd.DataFrame(user_list) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Directory structure of training
The network directory is the root of the structure and is typically in
_ibeis_cache/nets for ibeis databases. Otherwise it it custom defined (like in
.cache/wbia_cnn/training for mnist tests)
# era=(group of epochs)
Datasets contain ingested data packed into a single file for quick loading.
Data can be presplit into testing / learning / validation sets. Metadata is
always a dictionary where keys specify columns and each item corresponds a row
of data. Non-corresponding metadata is currently not supported, but should
probably be located in a manifest.json file.
# TODO: what is the same data has tasks that use different labels?
# need to incorporate that structure.
The model directory must keep track of several things:
* The network architecture (which may depend on the dataset being used)
- input / output shape
- network layers
* The state of learning
- epoch/era number
- learning rate
- regularization rate
* diagnostic information
- graphs of loss / error rates
- images of convolutional weights
- other visualizations
The trained model keeps track of the trained weights and is now independant of
the dataset. Finalized weights should be copied to and loaded from here.
"""
import logging
import six
import numpy as np
import utool as ut
import sys
from os.path import join, exists, dirname, basename, split, splitext
from six.moves import cPickle as pickle # NOQA
import warnings
import sklearn
from wbia_cnn import net_strs
from wbia_cnn import draw_net
from wbia_cnn.models import _model_legacy
print, rrr, profile = ut.inject2(__name__)
logger = logging.getLogger()
VERBOSE_CNN = ut.get_module_verbosity_flags('cnn')[0] or ut.VERBOSE
# Delayed imports
lasagne = None
T = None
theano = None
def delayed_import():
global lasagne
global theano
global T
import lasagne
import theano
from theano import tensor as T # NOQA
# def testdata_model_with_history():
# model = BaseModel()
# # make a dummy history
# X_train, y_train = [1, 2, 3], [0, 0, 1]
# rng = np.random.RandomState(0)
# def dummy_epoch_dict(num):
# epoch_info = {
# 'epoch': num,
# 'loss': 1 / np.exp(num / 10) + rng.rand() / 100,
# 'train_loss': 1 / np.exp(num / 10) + rng.rand() / 100,
# 'train_loss_regularized': (
# 1 / np.exp(num / 10) + np.exp(rng.rand() * num) + rng.rand() / 100
# ),
# 'valid_loss': 1 / np.exp(num / 10) - rng.rand() / 100,
# 'param_update_mags': {
# 'C0': (rng.normal() ** 2, rng.rand()),
# 'F1': (rng.normal() ** 2, rng.rand()),
# },
# }
# return epoch_info
# count = 0
# for era_length in [4, 4, 4]:
# alias_key = 'dummy_alias_key'
# model.start_new_era(X_train, y_train, X_train, y_train, alias_key)
# for count in range(count, count + era_length):
# model.record_epoch(dummy_epoch_dict(count))
# # model.record_epoch({'epoch': 1, 'valid_loss': .8, 'train_loss': .9})
# # model.record_epoch({'epoch': 2, 'valid_loss': .5, 'train_loss': .7})
# # model.record_epoch({'epoch': 3, 'valid_loss': .3, 'train_loss': .6})
# # model.record_epoch({'epoch': 4, 'valid_loss': .2, 'train_loss': .3})
# # model.record_epoch({'epoch': 5, 'valid_loss': .1, 'train_loss': .2})
# return model
if 'theano' in sys.modules:
delayed_import()
@ut.reloadable_class
class History(ut.NiceRepr):
"""
Manages bookkeeping for training history
"""
def __init__(history):
# an era is a group of epochs
history.era_list = []
history.epoch_list = []
# Marks the start of the era
history._start_epoch = 0
def __len__(history):
return history.total_epochs
def __nice__(history):
return history.get_history_nice()
# def __iter__(history):
# for epochs in history.grouped_epochs():
# yield epochs
@classmethod
def from_oldstyle(cls, era_history):
history = cls()
for era_num, era in enumerate(era_history):
epoch_info_list = era['epoch_info_list']
era = ut.delete_dict_keys(era.copy(), ['epoch_info_list', 'size'])
# Append new information
era['era_num'] = era_num
for epoch in epoch_info_list:
epoch = epoch.copy()
epoch['era_num'] = era_num
if 'epoch' in epoch:
epoch['epoch_num'] = epoch['epoch']
del epoch['epoch']
history.epoch_list.append(epoch)
history.era_list.append(era)
history._start_epoch = len(history.epoch_list)
return history
@property
def total_epochs(history):
return len(history.epoch_list)
@property
def total_eras(history):
return len(history.era_list)
@property
def hist_id(history):
r"""
CommandLine:
python -m wbia_cnn.models.abstract_models --test-History.hist_id:0
Example:
>>> # ENABLE_DOCTEST
>>> from wbia_cnn.models.abstract_models import * # NOQA
>>> model = testdata_model_with_history()
>>> history = model.history
>>> result = str(model.history.hist_id)
>>> print(result)
epoch0003_era012_zamwoidy
"""
hashid = history.get_history_hashid()
nice = history.get_history_nice()
history_id = nice + '_' + hashid
return history_id
@property
def current_era_size(history):
return history.total_epochs - history._start_epoch
def get_history_hashid(history):
r"""
Builds a hashid that uniquely identifies the architecture and the
training procedure this model has gone through to produce the current
architecture weights.
"""
era_hash_list = [ut.hashstr27(ut.repr2(era)) for era in history.era_list]
# epoch_hash_list = [ut.hashstr27(ut.repr2(epoch)) for epoch in history.epoch_list]
# epoch_hash_str = ''.join(epoch_hash_list)
era_hash_str = ''.join(era_hash_list)
era_hash_str += str(history.total_epochs)
history_hashid = ut.hashstr27(era_hash_str, hashlen=8)
return history_hashid
def get_history_nice(history):
if history.total_epochs == 0:
nice = 'NoHist'
else:
nice = 'epoch%04d_era%03d' % (history.total_eras, history.total_epochs)
return nice
def grouped_epochs(history):
era_num = ut.take_column(history.epoch_list, 'era_num')
unique, groupxs = ut.group_indices(era_num)
grouped_epochs = ut.apply_grouping(history.epoch_list, groupxs)
return grouped_epochs
def grouped_epochsT(history):
for epochs in history.grouped_epochs():
yield ut.dict_stack2(epochs)
def record_epoch(history, epoch_info):
epoch_info['epoch_num'] = len(history.epoch_list)
history.epoch_list.append(epoch_info)
def _new_era(history, model, X_learn, y_learn, X_valid, y_valid):
"""
Used to denote a change in hyperparameters during training.
"""
y_hashid = ut.hashstr_arr(y_learn, 'y', alphabet=ut.ALPHABET_27)
learn_hashid = str(model.arch_id) + '_' + y_hashid
if history.total_epochs > 0 and history.current_era_size == 0:
logger.info('Not starting new era (previous era has no epochs)')
else:
_new_era = {
'size': 0,
'learn_hashid': learn_hashid,
'arch_hashid': model.get_arch_hashid(),
'arch_id': model.arch_id,
'num_learn': len(y_learn),
'num_valid': len(y_valid),
'learn_state': model.learn_state.asdict(),
}
num_eras = history.total_eras
logger.info('starting new era %d' % (num_eras,))
model.current_era = _new_era
history.era_list.append(_new_era)
history._start_epoch = history.total_epochs
def _record_epoch(history, epoch_info):
"""
Records an epoch in an era.
"""
# each key/val in epoch_info dict corresponds to a key/val_list in an
# era dict.
# history.current_era['size'] += 1
# history.current_era['epoch_info_list'].append(epoch_info)
epoch_info['era_num'] = history.total_eras
history.epoch_list.append(epoch_info)
def rewind_to(history, epoch_num):
target_epoch = history.epoch_list[epoch_num]
era_num = target_epoch['era_num']
history.epoch_list = history.epoch_list[: epoch_num + 1]
history.era_list = history.era_list[: era_num + 1]
history._start_epoch = history.total_epochs
def to_json(history):
return ut.to_json(history.__dict__)
@ut.reloadable_class
class LearnState(ut.DictLike):
"""
Keeps track of parameters that can be changed during theano execution
"""
def __init__(self, learning_rate, momentum, weight_decay):
self._keys = [
'momentum',
'weight_decay',
'learning_rate',
]
self._shared_state = {key: None for key in self._keys}
self._isinit = False
# Set special properties
self.learning_rate = learning_rate
self.momentum = momentum
self.weight_decay = weight_decay
# --- special properties ---
momentum = property(
fget=lambda self: self.getitem('momentum'),
fset=lambda self, val: self.setitem('momentum', val),
)
learning_rate = property(
fget=lambda self: self.getitem('learning_rate'),
fset=lambda self, val: self.setitem('learning_rate', val),
)
weight_decay = property(
fget=lambda self: self.getitem('weight_decay'),
fset=lambda self, val: self.setitem('weight_decay', val),
)
@property
def shared(self):
if self._isinit:
return self._shared_state
else:
raise AssertionError('Learning has not been initialized')
def init(self):
if not self._isinit:
self._isinit = True
# Reset variables with shared theano state
_preinit_state = self._shared_state.copy()
for key in self.keys():
self._shared_state[key] = None
for key in self.keys():
self[key] = _preinit_state[key]
def keys(self):
return self._keys
def getitem(self, key):
_shared = self._shared_state[key]
if self._isinit:
value = None if _shared is None else _shared.get_value()
else:
value = _shared
return value
def setitem(self, key, value):
if self._isinit:
import theano
logger.info('[model] setting %s to %.9r' % (key, value))
_shared = self._shared_state[key]
if value is None and _shared is not None:
raise ValueError('Cannot set an initialized shared variable to None.')
elif _shared is None and value is not None:
self._shared_state[key] = theano.shared(
np.cast['float32'](value), name=key
)
elif _shared is not None:
_shared.set_value(np.cast['float32'](value))
else:
self._shared_state[key] = value
@ut.reloadable_class
class _ModelFitter(object):
"""
CommandLine:
python -m wbia_cnn _ModelFitter.fit:0
"""
def _init_fit_vars(model, kwargs):
model._rng = ut.ensure_rng(0)
model.history = History()
# Training state
model.requested_headers = ['learn_loss', 'valid_loss', 'learnval_rat']
model.data_params = None
# Stores current result
model.best_results = {
'epoch_num': None,
'learn_loss': np.inf,
'valid_loss': np.inf,
'weights': None,
}
# TODO: some sort of behavior config Things that dont influence
# training, but do impact performance / memory usage.
model._behavior = {
'buffered': False,
}
# Static configuration indicating training preferences
# (these will not influence the model learning)
model.monitor_config = {
'monitor': ut.get_argflag('--monitor'),
'monitor_updates': False,
'checkpoint_freq': 50,
'case_dump_freq': 25,
'weight_dump_freq': 5,
'showprog': True,
}
ut.update_existing(model.monitor_config, kwargs)
ut.delete_dict_keys(kwargs, model.monitor_config.keys())
# Static configuration indicating hyper-parameters
# (these will influence how the model learns)
# Some of these values (ie learning state) may be dynamic durring
# training. The dynamic version should be used in this context. This
# dictionary is always static in a fit session and only indicates the
# initial state of these variables.
model.hyperparams = {
'label_encode_on': True,
'whiten_on': False,
'augment_on': False,
'augment_on_validate': False,
'augment_weights': False,
'augment_delay': 0,
# 'augment_delay': 2,
'era_size': 10, # epochs per era
'era_clean': False,
'max_epochs': None,
'rate_schedule': 0.9,
'stopping_patience': 100,
# 'class_weight': None,
'class_weight': 'balanced',
'random_seed': None,
'learning_rate': 0.005,
'momentum': 0.9,
'weight_decay': 0.0,
}
ut.update_existing(model.hyperparams, kwargs)
ut.delete_dict_keys(kwargs, model.hyperparams.keys())
# Dynamic configuration that may change with time
model.learn_state = LearnState(
learning_rate=model.hyperparams['learning_rate'],
momentum=model.hyperparams['momentum'],
weight_decay=model.hyperparams['weight_decay'],
)
# This will by a dynamic dict that will span the life of a training
# session
model._fit_session = None
def _default_input_weights(model, X, y, w=None):
if w is None:
# Hack, assuming a classification task
if 'class_to_weight' in model.data_params:
class_to_weight = model.data_params['class_to_weight']
w = class_to_weight.take(y).astype(np.float32)
else:
# logger.info('no class weights')
w = np.ones(y.shape).astype(np.float32)
return w
def fit(
model,
X_train,
y_train,
X_valid=None,
y_valid=None,
valid_idx=None,
X_test=None,
y_test=None,
verbose=True,
**kwargs
):
r"""
Trains the network with backprop.
CommandLine:
python -m wbia_cnn _ModelFitter.fit --name=bnorm --vd --monitor
python -m wbia_cnn _ModelFitter.fit --name=dropout
python -m wbia_cnn _ModelFitter.fit --name=incep
Example:
>>> # DISABLE_DOCTEST
>>> from wbia_cnn.models import mnist
>>> model, dataset = mnist.testdata_mnist(defaultname='bnorm', dropout=.5)
>>> model.init_arch()
>>> model.print_layer_info()
>>> model.print_model_info_str()
>>> X_train, y_train = dataset.subset('train')
>>> model.fit(X_train, y_train)
"""
from wbia_cnn import utils
logger.info('\n[train] --- TRAINING LOOP ---')
ut.update_existing(model.hyperparams, kwargs)
ut.delete_dict_keys(kwargs, model.hyperparams.keys())
ut.update_existing(model.monitor_config, kwargs)
ut.delete_dict_keys(kwargs, model.monitor_config.keys())
ut.update_existing(model._behavior, kwargs)
ut.delete_dict_keys(kwargs, model._behavior.keys())
assert len(kwargs) == 0, 'unhandled kwargs=%r' % (kwargs,)
try:
model._validate_input(X_train, y_train)
except Exception:
logger.info('[WARNING] Input validation failed...')
X_learn, y_learn, X_valid, y_valid = model._ensure_learnval_split(
X_train, y_train, X_valid, y_valid, valid_idx
)
model.ensure_data_params(X_learn, y_learn)
has_encoder = getattr(model, 'encoder', None) is not None
learn_hist = model.encoder.inverse_transform(y_learn) if has_encoder else y_learn
valid_hist = model.encoder.inverse_transform(y_valid) if has_encoder else y_valid
if learn_hist.shape[-1] == 1:
logger.info('Learn y histogram: ' + ut.repr2(ut.dict_hist(learn_hist)))
if valid_hist.shape[-1] == 1:
logger.info('Valid y histogram: ' + ut.repr2(ut.dict_hist(valid_hist)))
# FIXME: make class weights more ellegant and customizable
w_learn = model._default_input_weights(X_learn, y_learn)
w_valid = model._default_input_weights(X_valid, y_valid)
model._new_fit_session()
epoch = model.best_results['epoch_num']
if epoch is None:
epoch = 0
logger.info('Initializng training at epoch=%r' % (epoch,))
else:
logger.info('Resuming training at epoch=%r' % (epoch,))
# Begin training the neural network
logger.info('model.monitor_config = %s' % (ut.repr4(model.monitor_config),))
logger.info('model.batch_size = %r' % (model.batch_size,))
logger.info('model.hyperparams = %s' % (ut.repr4(model.hyperparams),))
logger.info('learn_state = %s' % ut.repr4(model.learn_state.asdict()))
logger.info('model.arch_id = %r' % (model.arch_id,))
# create theano symbolic expressions that define the network
theano_backprop = model.build_backprop_func()
theano_forward = model.build_forward_func()
# number of non-best iterations after, that triggers a best save
# This prevents strings of best-saves one after another
countdown_defaults = {
'checkpoint': model.hyperparams['era_size'] * 2,
'stop': model.hyperparams['stopping_patience'],
}
countdowns = {key: None for key in countdown_defaults.keys()}
def check_countdown(key):
if countdowns[key] is not None:
if countdowns[key] > 0:
countdowns[key] -= 1
else:
countdowns[key] = countdown_defaults[key]
return True
model.history._new_era(model, X_train, y_train, X_train, y_train)
printcol_info = utils.get_printcolinfo(model.requested_headers)
utils.print_header_columns(printcol_info)
tt = ut.Timer(verbose=False)
# ---------------------------------------
# EPOCH 0: Execute backwards and forward passes
tt.tic()
learn_info = model._epoch_validate_learn(
theano_forward, X_learn, y_learn, w_learn
)
valid_info = model._epoch_validate(theano_forward, X_valid, y_valid, w_valid)
# ---------------------------------------
# EPOCH 0: Summarize the epoch
epoch_info = {'epoch_num': epoch}
epoch_info.update(**learn_info)
epoch_info.update(**valid_info)
epoch_info['duration'] = tt.toc()
epoch_info['learn_state'] = model.learn_state.asdict()
epoch_info['learnval_rat'] = epoch_info['learn_loss'] / epoch_info['valid_loss']
# ---------------------------------------
# EPOCH 0: Check how we are learning
# Cache best results
model.best_results['weights'] = model.get_all_param_values()
model.best_results['epoch_num'] = epoch_info['epoch_num']
if 'valid_precision' in epoch_info:
model.best_results['valid_precision'] = epoch_info['valid_precision']
model.best_results['valid_recall'] = epoch_info['valid_recall']
model.best_results['valid_fscore'] = epoch_info['valid_fscore']
model.best_results['valid_support'] = epoch_info['valid_support']
for key in model.requested_headers:
model.best_results[key] = epoch_info[key]
# ---------------------------------------
# EPOCH 0: Record this epoch in history and print info
# model.history._record_epoch(epoch_info)
utils.print_epoch_info(model, printcol_info, epoch_info)
epoch += 1
while True:
try:
# ---------------------------------------
# Execute backwards and forward passes
tt.tic()
learn_info = model._epoch_learn(
theano_backprop, X_learn, y_learn, w_learn, epoch
)
if learn_info.get('diverged'):
break
valid_info = model._epoch_validate(
theano_forward, X_valid, y_valid, w_valid
)
# ---------------------------------------
# Summarize the epoch
epoch_info = {'epoch_num': epoch}
epoch_info.update(**learn_info)
epoch_info.update(**valid_info)
epoch_info['duration'] = tt.toc()
epoch_info['learn_state'] = model.learn_state.asdict()
epoch_info['learnval_rat'] = (
epoch_info['learn_loss'] / epoch_info['valid_loss']
)
# ---------------------------------------
# Record this epoch in history
model.history._record_epoch(epoch_info)
# ---------------------------------------
# Check how we are learning
if epoch_info['valid_loss'] < model.best_results['valid_loss']:
# Found a better model. Reset countdowns.
for key in countdowns.keys():
countdowns[key] = countdown_defaults[key]
# Cache best results
model.best_results['weights'] = model.get_all_param_values()
model.best_results['epoch_num'] = epoch_info['epoch_num']
if 'valid_precision' in epoch_info:
model.best_results['valid_precision'] = epoch_info[
'valid_precision'
]
model.best_results['valid_recall'] = epoch_info['valid_recall']
model.best_results['valid_fscore'] = epoch_info['valid_fscore']
model.best_results['valid_support'] = epoch_info['valid_support']
if 'learn_precision' in epoch_info:
model.best_results['learn_precision'] = epoch_info[
'learn_precision'
]
model.best_results['learn_recall'] = epoch_info['learn_recall']
model.best_results['learn_fscore'] = epoch_info['learn_fscore']
model.best_results['learn_support'] = epoch_info['learn_support']
for key in model.requested_headers:
model.best_results[key] = epoch_info[key]
# Check frequencies and countdowns
checkpoint_flag = utils.checkfreq(
model.monitor_config['checkpoint_freq'], epoch
)
if check_countdown('checkpoint'):
countdowns['checkpoint'] = None
checkpoint_flag = True
# ---------------------------------------
# Output Diagnostics
# Print the epoch
utils.print_epoch_info(model, printcol_info, epoch_info)
# Output any diagnostics
if checkpoint_flag:
# FIXME: just move it to the second location
if model.monitor_config['monitor']:
model._dump_best_monitor()
model.checkpoint_save_model_info()
model.checkpoint_save_model_state()
model.save_model_info()
model.save_model_state()
if model.monitor_config['monitor']:
model._dump_epoch_monitor()
# if epoch > 10:
# TODO: can dump case info every epoch
# But we want to dump the images less often
# Make function to just grab the failure case info
# and another function to visualize it.
if model.monitor_config['monitor']:
if utils.checkfreq(model.monitor_config['weight_dump_freq'], epoch):
model._dump_weight_monitor()
if utils.checkfreq(model.monitor_config['case_dump_freq'], epoch):
model._dump_case_monitor(X_learn, y_learn, X_valid, y_valid)
if check_countdown('stop'):
logger.info('Early stopping')
break
# Check if the era is done
max_era_size = model._fit_session['max_era_size']
if model.history.current_era_size >= max_era_size:
# Decay learning rate
era = model.history.total_eras
rate_schedule = model.hyperparams['rate_schedule']
rate_schedule = ut.ensure_iterable(rate_schedule)
frac = rate_schedule[min(era, len(rate_schedule) - 1)]
model.learn_state.learning_rate = (
model.learn_state.learning_rate * frac
)
# Increase number of epochs in the next era
max_era_size = np.ceil(max_era_size / (frac ** 2))
model._fit_session['max_era_size'] = max_era_size
# Start a new era
model.history._new_era(model, X_train, y_train, X_train, y_train)
if model.hyperparams.get('era_clean', False):
y_learn = model._epoch_clean(
theano_forward, X_learn, y_learn, w_learn
)
y_valid = model._epoch_clean(
theano_forward, X_valid, y_valid, w_valid
)
utils.print_header_columns(printcol_info)
# Break on max epochs
if model.hyperparams['max_epochs'] is not None:
if epoch >= model.hyperparams['max_epochs']:
logger.info('\n[train] maximum number of epochs reached\n')
break
# Increment the epoch
epoch += 1
except KeyboardInterrupt:
logger.info('\n[train] Caught CRTL+C')
logger.info('model.arch_id = %r' % (model.arch_id,))
logger.info('learn_state = %s' % ut.repr4(model.learn_state.asdict()))
from six.moves import input
actions = ut.odict(
[
('resume', (['0', 'r'], 'resume training')),
('view', (['v', 'view'], 'view session directory')),
('ipy', (['ipy', 'ipython', 'cmd'], 'embed into IPython')),
('print', (['p', 'print'], 'print model state')),
('shock', (['shock'], 'shock the network')),
('save', (['s', 'save'], 'save best weights')),
('quit', (['q', 'exit', 'quit'], 'quit')),
]
)
while True:
# prompt
msg_list = [
'enter %s to %s'
% (ut.conj_phrase(ut.lmap(repr, map(str, tup[0])), 'or'), tup[1])
for key, tup in actions.items()
]
msg = ut.indentjoin(msg_list, '\n | * ')
msg = ''.join([' +-----------', msg, '\n L-----------\n'])
logger.info(msg)
#
ans = str(input()).strip()
# We have a resolution
if ans in actions['quit'][0]:
logger.info('quit training...')
return
elif ans in actions['resume'][0]:
break
elif ans in actions['ipy'][0]:
ut.embed()
elif ans in actions['save'][0]:
# Save the weights of the network
model.checkpoint_save_model_info()
model.checkpoint_save_model_state()
model.save_model_info()
model.save_model_state()
elif ans in actions['print'][0]:
model.print_state_str()
elif ans in actions['shock'][0]:
utils.shock_network(model.output_layer)
model.learn_state.learning_rate = (
model.learn_state.learning_rate * 2
)
elif ans in actions['view'][0]:
session_dpath = model._fit_session['session_dpath']
ut.view_directory(session_dpath)
else:
continue
# Handled the resolution
logger.info('resuming training...')
break
except (IndexError, ValueError, Exception) as ex:
ut.printex(ex, 'Error Occurred Embedding to enable debugging', tb=True)
errorstate = {'is_fixed': False}
# is_fixed = False
import utool
utool.embed()
if not errorstate['is_fixed']:
raise
# Save the best network
model.checkpoint_save_model_state()
model.save_model_state()
# Set model to best weights
model.set_all_param_values(model.best_results['weights'])
# # Remove history after overfitting starts
# if 'epoch_num' not in model.best_results:
# model.best_results['epoch_num'] = model.best_results['epoch']
# epoch_num = model.best_results['epoch_num']
# model.history.rewind_to(epoch_num)
if X_test is not None and y_test is not None:
# TODO: dump test output in a standard way
w_test = model._default_input_weights(X_test, y_test)
theano_forward = model.build_forward_func()
info = model._epoch_validate(theano_forward, X_test, y_test, w_test)
logger.info('train info = %r' % (info,))
model.dump_cases(X_test, y_test, 'test', dpath=model.arch_dpath)
# model._run_test(X_test, y_test)
# def _run_test(model, X_test, y_test):
# # Perform a test on the fitted model
# test_outptuts = model._predict(X_test)
# y_pred = test_outptuts['predictions']
# logger.info(model.name)
# report = sklearn.metrics.classification_report(
# y_true=y_test, y_pred=y_pred,
# )
# logger.info(report)
# pass
def _ensure_learnval_split(
model, X_train, y_train, X_valid=None, y_valid=None, valid_idx=None
):
if X_valid is not None:
assert valid_idx is None, 'Cant specify both valid_idx and X_valid'
# When X_valid is given assume X_train is actually X_learn
X_learn = X_train
y_learn = y_train
else:
if valid_idx is None:
# Split training set into a learning / validation set
from wbia_cnn.dataset import stratified_shuffle_split
train_idx, valid_idx = stratified_shuffle_split(
y_train, fractions=[0.7, 0.3], rng=432321
)
# import sklearn.cross_validation
# xvalkw = dict(n_folds=2, shuffle=True, random_state=43432)
# skf = sklearn.cross_validation.StratifiedKFold(y_train, **xvalkw)
# train_idx, valid_idx = list(skf)[0]
elif valid_idx is None and X_valid is None:
train_idx = ut.index_complement(valid_idx, len(X_train))
else:
assert False, 'impossible state'
# Set to learn network weights
X_learn = X_train.take(train_idx, axis=0)
y_learn = y_train.take(train_idx, axis=0)
# Set to crossvalidate hyperparamters
X_valid = X_train.take(valid_idx, axis=0)
y_valid = y_train.take(valid_idx, axis=0)
# logger.info('\n[train] --- MODEL INFO ---')
# model.print_arch_str()
# model.print_layer_info()
return X_learn, y_learn, X_valid, y_valid
def ensure_data_params(model, X_learn, y_learn):
if model.data_params is None:
model.data_params = {}
# TODO: move to dataset. This is independant of the model.
if model.hyperparams['whiten_on']:
# Center the data by subtracting the mean
if 'center_mean' not in model.data_params:
logger.info('computing center mean/std. (hacks std=1)')
X_ = X_learn.astype(np.float32)
try:
if ut.is_int(X_learn):
ut.assert_inbounds(X_learn, 0, 255, eq=True, verbose=ut.VERBOSE)
X_ = X_ / 255
ut.assert_inbounds(X_, 0.0, 1.0, eq=True, verbose=ut.VERBOSE)
except ValueError:
logger.info('[WARNING] Input bounds check failed...')
# Ensure that the mean is computed on 0-1 normalized data
model.data_params['center_mean'] = np.mean(X_, axis=0)
model.data_params['center_std'] = 1.0
# Hack to preconvert mean / std to 0-1 for old models
model._fix_center_mean_std()
else:
ut.delete_dict_keys(model.data_params, ['center_mean', 'center_std'])
if model.hyperparams['class_weight'] == 'balanced':
logger.info('Balancing class weights')
import sklearn.utils
unique_classes = np.array(sorted(ut.unique(y_learn)))
class_to_weight = sklearn.utils.compute_class_weight(
'balanced',
classes=unique_classes,
y=y_learn,
)
model.data_params['class_to_weight'] = class_to_weight
else:
ut.delete_dict_keys(model.data_params, ['class_to_weight'])
if model.hyperparams['label_encode_on']:
if getattr(model, 'encoder', None) is None:
if hasattr(model, 'init_encoder'):
model.init_encoder(y_learn)
def _rename_old_sessions(model):
import re
dpath_list = ut.glob(model.saved_session_dpath, '*', with_files=False)
for dpath in dpath_list:
if True or not re.match('^.*_nEpochs_[0-9]*$', dpath):
report_fpath = join(dpath, 'era_history.json')
if exists(report_fpath):
report_dict = ut.load_data(report_fpath)
# TODO: try to read from history report
nEpochs = report_dict['nEpochs']
else:
nEpochs = len(ut.glob(join(dpath, 'history'), 'loss_*'))
# Add suffix to session to indicate what happened?
# Maybe this should be done via symlink?
dpath_new = dpath + '_nEpochs_%04d' % (nEpochs,)
ut.move(dpath, dpath_new)
model.saved_session_dpath
pass
def _new_fit_session(model):
"""
Starts a model training session
"""
logger.info('Starting new fit session')
model._fit_session = {
'start_time': ut.get_timestamp(),
'max_era_size': model.hyperparams['era_size'],
'era_epoch_num': 0,
}
# TODO: ensure this somewhere else?
model._rng = ut.ensure_rng(model.hyperparams['random_seed'])
if model.monitor_config['monitor']:
ut.ensuredir(model.arch_dpath)
# Rename old sessions to distinguish this one
# TODO: put a lock file on any existing sessions
model._rename_old_sessions()
# Create a directory for this training session with a timestamp
session_dname = 'fit_session_' + model._fit_session['start_time']
session_dpath = join(model.saved_session_dpath, session_dname)
session_dpath = ut.get_nonconflicting_path(
session_dpath, offset=1, suffix='_conflict%d'
)
prog_dirs = {
'dream': join(session_dpath, 'dream'),
'loss': join(session_dpath, 'history'),
'weights': join(session_dpath, 'weights'),
}
model._fit_session.update(
**{
'prog_dirs': prog_dirs,
}
)
# for dpath in prog_dirs.values():
# ut.ensuredir(dpath)
ut.ensuredir(session_dpath)
model._fit_session['session_dpath'] = session_dpath
if ut.get_argflag('--vd'):
# Open session in file explorer
ut.view_directory(session_dpath)
# Make a symlink to the latest session
session_link = join(model.arch_dpath, 'latest_session')
ut.symlink(session_dpath, session_link, overwrite=True)
# Write backprop arch info to arch root
back_archinfo_fpath = join(session_dpath, 'arch_info_fit.json')
back_archinfo_json = model.make_arch_json(with_noise=True)
ut.writeto(back_archinfo_fpath, back_archinfo_json, verbose=True)
# Write feed-forward arch info to arch root
pred_archinfo_fpath = join(session_dpath, 'arch_info_predict.json')
pred_archinfo_json = model.make_arch_json(with_noise=False)
ut.writeto(pred_archinfo_fpath, pred_archinfo_json, verbose=False)
# Write arch graph to root
try:
back_archimg_fpath = join(session_dpath, 'arch_graph_fit.jpg')
model.imwrite_arch(fpath=back_archimg_fpath, fullinfo=False)
model._overwrite_latest_image(back_archimg_fpath, 'arch_graph')
except Exception as ex:
ut.printex(ex, iswarning=True)
# Write initial states of the weights
try:
ut.ensuredir(prog_dirs['weights'])
fig = model.show_weights_image(fnum=2)
fpath = join(
prog_dirs['weights'], 'weights_' + model.history.hist_id + '.png'
)
fig.savefig(fpath)
model._overwrite_latest_image(fpath, 'weights')
except Exception as ex:
ut.printex(ex, iswarning=True)
def _overwrite_latest_image(model, fpath, new_name):
"""
copies the new image to a path to be overwritten so new updates are
shown
"""
import shutil
dpath, fname = split(fpath)
ext = splitext(fpath)[1]
session_dpath = model._fit_session['session_dpath']
if session_dpath != dirname(fpath):
# Copy latest image to the session dir if it isn't there
shutil.copy(fpath, join(session_dpath, 'latest_' + new_name + ext))
# Copy latest image to the main arch dir
shutil.copy(fpath, join(model.arch_dpath, 'latest_' + new_name + ext))
def _dump_case_monitor(model, X_learn, y_learn, X_valid, y_valid):
prog_dirs = model._fit_session['prog_dirs']
try:
model.dump_cases(X_learn, y_learn, 'learn')
except Exception:
logger.info('WARNING: DUMP CASES HAS FAILED')
pass
try:
model.dump_cases(X_valid, y_valid, 'valid')
except Exception:
logger.info('WARNING: DUMP CASES HAS FAILED')
pass
if False:
try:
# Save class dreams
ut.ensuredir(prog_dirs['dream'])
fpath = join(
prog_dirs['dream'], 'class_dream_' + model.history.hist_id + '.png'
)
fig = model.show_class_dream(fnum=4)
fig.savefig(fpath, dpi=180)
model._overwrite_latest_image(fpath, 'class_dream')
except Exception as ex:
ut.printex(ex, 'failed to dump dream', iswarning=True)
def _dump_weight_monitor(model):
prog_dirs = model._fit_session['prog_dirs']
try:
# Save weights images
ut.ensuredir(prog_dirs['weights'])
fpath = join(
prog_dirs['weights'], 'weights_' + model.history.hist_id + '.png'
)
fig = model.show_weights_image(fnum=2)
fig.savefig(fpath, dpi=180)
model._overwrite_latest_image(fpath, 'weights')
except Exception as ex:
ut.printex(ex, 'failed to dump weights', iswarning=True)
def get_report_json(model):
report_dict = {}
report_dict['best'] = ut.delete_keys(model.best_results.copy(), ['weights'])
for key in report_dict['best'].keys():
if hasattr(report_dict['best'][key], 'tolist'):
report_dict['best'][key] = report_dict['best'][key].tolist()
if len(model.history) > 0:
report_dict['num_learn'] = model.history.era_list[-1]['num_learn']
report_dict['num_valid'] = model.history.era_list[-1]['num_valid']
report_dict['hyperparams'] = model.hyperparams
report_dict['arch_hashid'] = model.get_arch_hashid()
report_dict['model_name'] = model.name
report_json = ut.repr2_json(report_dict, nl=2, precision=4)
return report_json
def _dump_best_monitor(model):
session_dpath = model._fit_session['session_dpath']
# Save text best info
report_fpath = join(session_dpath, 'best_report.json')
report_json = model.get_report_json()
ut.write_to(report_fpath, report_json, verbose=False)
def _dump_epoch_monitor(model):
prog_dirs = model._fit_session['prog_dirs']
session_dpath = model._fit_session['session_dpath']
# Save text history info
text_fpath = join(session_dpath, 'era_history.txt')
history_text = model.history.to_json()
ut.write_to(text_fpath, history_text, verbose=False)
# Save loss graphs
try:
ut.ensuredir(prog_dirs['loss'])
fpath = join(prog_dirs['loss'], 'loss_' + model.history.hist_id + '.png')
fig = model.show_loss_history(fnum=1)
fig.savefig(fpath, dpi=180)
model._overwrite_latest_image(fpath, 'loss')
except Exception as ex:
ut.printex(ex, 'failed to dump loss', iswarning=True)
raise
try:
ut.ensuredir(prog_dirs['loss'])
fpath = join(prog_dirs['loss'], 'pr_' + model.history.hist_id + '.png')
fig = model.show_pr_history(fnum=4)
fig.savefig(fpath, dpi=180)
model._overwrite_latest_image(fpath, 'pr')
except Exception as ex:
ut.printex(ex, 'failed to dump pr', iswarning=True)
raise
# Save weight updates
try:
ut.ensuredir(prog_dirs['loss'])
fpath = join(
prog_dirs['loss'], 'update_mag_' + model.history.hist_id + '.png'
)
fig = model.show_update_mag_history(fnum=3)
fig.savefig(fpath, dpi=180)
model._overwrite_latest_image(fpath, 'update_mag')
except Exception as ex:
ut.printex(ex, 'failed to dump update mags ', iswarning=True)
def _epoch_learn(model, theano_backprop, X_learn, y_learn, w_learn, epoch):
"""
Backwards propogate -- Run learning set through the backwards pass
Ignore:
>>> from wbia_cnn.models.abstract_models import * # NOQA
>>> from wbia_cnn.models import mnist
>>> import theano
>>> model, dataset = mnist.testdata_mnist(dropout=.5)
>>> model.monitor_config['monitor'] = False
>>> model.monitor_config['showprog'] = True
>>> model._behavior['buffered'] = False
>>> model.init_arch()
>>> model.learn_state.init()
>>> batch_size = 16
>>> X_learn, y_learn = dataset.subset('test')
>>> model.ensure_data_params(X_learn, y_learn)
>>> class_to_weight = model.data_params['class_to_weight']
>>> class_to_weight.take(y_learn)
>>> w_learn = class_to_weight.take(y_learn).astype(np.float32)
>>> model._new_fit_session()
>>> theano_backprop = model.build_backprop_func()
"""
buffered = model._behavior['buffered']
augment_on = model.hyperparams.get('augment_on', True)
if epoch <= model.hyperparams['augment_delay']:
# Dont augment in the first few epochs so the model can start to
# get somewhere. This will hopefully help training initialize
# faster.
augment_on = False
learn_outputs = model.process_batch(
theano_backprop,
X_learn,
y_learn,
w_learn,
shuffle=True,
augment_on=augment_on,
buffered=buffered,
)
# average loss over all learning batches
learn_info = {}
learn_info['learn_loss'] = learn_outputs['loss'].mean()
learn_info['learn_loss_std'] = learn_outputs['loss'].std()
if 'loss_reg' in learn_outputs:
# Regularization information
learn_info['learn_loss_reg'] = learn_outputs['loss_reg']
reg_amount = learn_outputs['loss_reg'] - learn_outputs['loss']
reg_ratio = reg_amount / learn_outputs['loss']
reg_percent = reg_amount / learn_outputs['loss_reg']
if 'accuracy' in learn_outputs:
learn_info['learn_acc'] = learn_outputs['accuracy'].mean()
learn_info['learn_acc_std'] = learn_outputs['accuracy'].std()
if 'predictions' in learn_outputs:
try:
p, r, f, s = sklearn.metrics.precision_recall_fscore_support(
y_true=learn_outputs['auglbl_list'],
y_pred=learn_outputs['predictions'],
)
except ValueError:
p, r, f, s = 0.0, 0.0, 0.0, 0.0
# report = sklearn.metrics.classification_report(
# y_true=learn_outputs['auglbl_list'], y_pred=learn_outputs['predictions']
# )
learn_info['learn_precision'] = p
learn_info['learn_recall'] = r
learn_info['learn_fscore'] = f
learn_info['learn_support'] = s
learn_info['reg_percent'] = reg_percent
learn_info['reg_ratio'] = reg_ratio
param_update_mags = {}
for key, val in learn_outputs.items():
if key.startswith('param_update_magnitude_'):
key_ = key.replace('param_update_magnitude_', '')
param_update_mags[key_] = (val.mean(), val.std())
if param_update_mags:
learn_info['param_update_mags'] = param_update_mags
# If the training loss is nan, the training has diverged
if np.isnan(learn_info['learn_loss']):
logger.info('\n[train] train loss is Nan. training diverged\n')
logger.info('learn_outputs = %r' % (learn_outputs,))
logger.info('\n[train] train loss is Nan. training diverged\n')
"""
from wbia_cnn import draw_net
draw_net.imwrite_theano_symbolic_graph(theano_backprop)
"""
# imwrite_theano_symbolic_graph(thean_expr):
learn_info['diverged'] = True
return learn_info
def _epoch_validate_learn(model, theano_forward, X_learn, y_learn, w_learn):
"""
Forwards propagate -- Run validation set through the forwards pass
"""
augment_on = model.hyperparams.get('augment_on_validate', False)
learn_outputs = model.process_batch(
theano_forward, X_learn, y_learn, w_learn, augment_on=augment_on
)
# average loss over all learning batches
learn_info = {}
learn_info['learn_loss'] = learn_outputs['loss_determ'].mean()
learn_info['learn_loss_std'] = learn_outputs['loss_determ'].std()
if 'loss_reg' in learn_outputs:
# Regularization information
learn_info['learn_loss_reg'] = learn_outputs['loss_reg']
reg_amount = learn_outputs['loss_reg'] - learn_outputs['loss']
reg_ratio = reg_amount / learn_outputs['loss']
reg_percent = reg_amount / learn_outputs['loss_reg']
if 'accuracy' in learn_outputs:
learn_info['learn_acc'] = learn_outputs['accuracy'].mean()
learn_info['learn_acc_std'] = learn_outputs['accuracy'].std()
if 'predictions' in learn_outputs:
try:
p, r, f, s = sklearn.metrics.precision_recall_fscore_support(
y_true=learn_outputs['auglbl_list'],
y_pred=learn_outputs['predictions'],
)
except ValueError:
p, r, f, s = 0.0, 0.0, 0.0, 0.0
# report = sklearn.metrics.classification_report(
# y_true=learn_outputs['auglbl_list'], y_pred=learn_outputs['predictions']
# )
learn_info['learn_precision'] = p
learn_info['learn_recall'] = r
learn_info['learn_fscore'] = f
learn_info['learn_support'] = s
learn_info['reg_percent'] = reg_percent
learn_info['reg_ratio'] = reg_ratio
param_update_mags = {}
for key, val in learn_outputs.items():
if key.startswith('param_update_magnitude_'):
key_ = key.replace('param_update_magnitude_', '')
param_update_mags[key_] = (val.mean(), val.std())
if param_update_mags:
learn_info['param_update_mags'] = param_update_mags
return learn_info
def _epoch_validate(model, theano_forward, X_valid, y_valid, w_valid):
"""
Forwards propagate -- Run validation set through the forwards pass
"""
augment_on = model.hyperparams.get('augment_on_validate', False)
valid_outputs = model.process_batch(
theano_forward, X_valid, y_valid, w_valid, augment_on=augment_on
)
valid_info = {}
valid_info['valid_loss'] = valid_outputs['loss_determ'].mean()
valid_info['valid_loss_std'] = valid_outputs['loss_determ'].std()
if 'valid_acc' in model.requested_headers:
valid_info['valid_acc'] = valid_outputs['accuracy'].mean()
valid_info['valid_acc_std'] = valid_outputs['accuracy'].std()
if 'predictions' in valid_outputs:
try:
p, r, f, s = sklearn.metrics.precision_recall_fscore_support(
y_true=valid_outputs['auglbl_list'],
y_pred=valid_outputs['predictions'],
)
except ValueError:
p, r, f, s = 0.0, 0.0, 0.0, 0.0
valid_info['valid_precision'] = p
valid_info['valid_recall'] = r
valid_info['valid_fscore'] = f
valid_info['valid_support'] = s
return valid_info
def _epoch_clean(
model, theano_forward, X_general, y_general, w_general, conf_thresh=0.95
):
"""
Forwards propogate -- Run set through the forwards pass and clean
"""
augment_on = model.hyperparams.get('augment_on_validate', False)
valid_outputs = model.process_batch(
theano_forward, X_general, y_general, w_general, augment_on=augment_on
)
predictions = valid_outputs['predictions']
confidences = valid_outputs['confidences']
y_cleaned = np.array(
[
pred if y != pred and conf > conf_thresh else y
for y, pred, conf in zip(y_general, predictions, confidences)
]
)
num_cleaned = len(np.nonzero(y_general != y_cleaned)[0])
logger.info('Cleaned %d instances' % (num_cleaned,))
return y_cleaned
def dump_cases(model, X, y, subset_id='unknown', dpath=None):
"""
For each class find:
* the most-hard failures
* the mid-level failures
* the critical cases (least-hard failures / most-hard successes)
* the mid-level successes
* the least-hard successs
"""
import vtool as vt
import pandas as pd
logger.info('Dumping %s cases' % (subset_id,))
# pd.set_option("display.max_rows", 20)
# pd.set_option("display.precision", 2)
# pd.set_option('expand_frame_repr', False)
# pd.set_option('display.float_format', lambda x: '%.2f' % x)
if dpath is None:
dpath = model._fit_session['session_dpath']
case_dpath = ut.ensuredir((dpath, 'cases', model.history.hist_id, subset_id))
y_true = y
netout = model._predict(X)
y_conf = netout['network_output_determ']
data_idx = np.arange(len(y))
y_pred = y_conf.argmax(axis=1)
if getattr(model, 'encoder', None):
class_idxs = model.encoder.transform(model.encoder.classes_)
class_lbls = model.encoder.classes_
else:
class_idxs = list(range(model.output_dims))
class_lbls = list(range(model.output_dims))
target_classes = ut.take(class_lbls, class_idxs)
index = | pd.Series(data_idx, name='data_idx') | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 13 22:45:00 2018
@author: benmo
"""
import pandas as pd, numpy as np, dask.dataframe as ddf
import quandl
import sys, os, socket
import pickle
from dask import delayed
from difflib import SequenceMatcher
from matplotlib.dates import bytespdate2num, num2date
from matplotlib.ticker import Formatter
import re
from itertools import permutations, product, chain
from functools import reduce
import struct
similar = lambda a, b: SequenceMatcher(None, a, b).ratio()
crs4326 = {'init': 'epsg:4326'}
def mostSimilar(x,term):
temp = np.array([x,list(map(lambda x: similar(x,term), x))]).T
return pd.DataFrame(temp,
columns=['Name','Score']).sort_values('Score',ascending=False)
def getEconVars():
varz = pd.read_csv("/home/benmo/Data/Econ/Indicators/indicators.csv")
fedData = pickleLib.fedData()
econDict = {}
for col in varz.columns:
temp = varz[col].dropna()
econDict[col] = {}
for var in temp:
econDict[col][var] = mostSimilar(fedData.item, var).iloc[:5].set_index(
'Name').to_dict()
return econDict
#"/home/benmo/Data/PyObjects/commodities.pkl"
def get_commodities():
oil = quandl.get('CHRIS/CME_WS1', authtoken="<KEY>")
natgas = quandl.get('CHRIS/CME_NG1', authtoken="<KEY>")
gold = quandl.get('CHRIS/CME_GC1', authtoken="<KEY>")
rice = quandl.get('CHRIS/ODE_TR1', authtoken="<KEY>")
grain = quandl.get('CHRIS/EUREX_FCGR1', authtoken="<KEY>")
lumber = quandl.get('CHRIS/CME_LB1', authtoken="<KEY>")
steelCHN = quandl.get('CHRIS/SHFE_WR1', authtoken="<KEY>")
steelUSA = quandl.get('CHRIS/CME_HR1', authtoken="<KEY>")
coal = quandl.get('CHRIS/SGX_CFF1', authtoken="<KEY>")
df = pd.DataFrame([])
for (key, temp) in zip(['Oil', 'Natural Gas', 'Gold', 'Rice', 'Grain',
'Lumber', 'SteelCHN', 'SteelUSA', 'Coal'], [oil, natgas, gold, rice,
grain, lumber, steelCHN,
steelUSA, coal]):
temp['Commodity'] = key
df = df.append(temp)
return df
def get_etfs():
oil = quandl.get('CHRIS/CME_WS1', authtoken="<KEY>")
natgas = quandl.get('CHRIS/CME_NG1', authtoken="<KEY>")
gold = quandl.get('CHRIS/CME_GC1', authtoken="<KEY>")
rice = quandl.get('CHRIS/ODE_TR1', authtoken="<KEY>")
grain = quandl.get('CHRIS/EUREX_FCGR1', authtoken="<KEY>")
lumber = quandl.get('CHRIS/CME_LB1', authtoken="<KEY>")
steelCHN = quandl.get('CHRIS/SHFE_WR1', authtoken="<KEY>")
steelUSA = quandl.get('CHRIS/CME_HR1', authtoken="<KEY>")
coal = quandl.get('CHRIS/SGX_CFF1', authtoken="<KEY>")
df = pd.DataFrame([])
for (key, temp) in zip(['Oil', 'Natural Gas', 'Gold', 'Rice', 'Grain',
'Lumber', 'SteelCHN', 'SteelUSA', 'Coal'], [oil, natgas, gold, rice,
grain, lumber, steelCHN,
steelUSA, coal]):
temp['Commodity'] = key
df = df.append(temp)
return df
def print_lines(fn, N, out=None):
fout=open(out, 'w+') if out == None else None
f=open(fn)
for i in range(N):
line=f.readline()
print(line) if out == None else fout.write(line)
f.close()
fout.close() if out == None else print('no file written')
tuple2str = lambda name: name if isinstance(name, tuple) ==False else reduce(lambda x, y: str(x)
.replace('.0','') + '_' + str(y).replace('.0',''), list(map(lambda xi: str(xi), name)))
def search_str(regx, string):
return True if re.search(regx, string) else False
def returnFiltered(term, data):
temp = list(filter(lambda x: term
in x.lower(), data['item']))
return data[data.isin(temp).item==True]
def egen(data, f, applyto, groupby, column_filt, newcol):
tmp = data[column_filt]
tmp[newcol] = tmp.groupby(groupby).apply(f)
tmp['index'] = tmp.index
return pd.merge(data, tmp, how='inner', left_on=column_filt, right_on =applyto + ['index'])
def read_idx(filename):
with open(filename, 'rb') as f:
zero, data_type, dims = struct.unpack('>HBB', f.read(4))
shape = tuple(struct.unpack('>I', f.read(4))[0] for d in range(dims))
return np.fromstring(f.read(), dtype=np.uint8).reshape(shape)
class MyComp():
cName = socket.gethostname()
if sys.platform == 'linux':
ffProfilePath = "/home/benmo/.mozilla/firefox/w55ako72.dev-edition-default"
picklePath = "/home/benmo/Data/PyObjects"
else:
if cName == 'DESKTOP-HOKP1GT':
ffProfilePath = "C:/Users/benmo/AppData/Roaming/Mozilla/Firefox/Profiles/it0uu1ch.default"
uofcPath = "D:/OneDrive - University of Calgary"
financePath = "C:/users/benmo/OneDrive/2016& 2017Classes/Financial Econ"
picklePath = "D:/data/pyobjects"
classesPath = "C:/users/benmo/OneDrive/2016& 2017Classes"
else:
ffProfilePath = "C:/Users/benmo/AppData/Roaming/Mozilla/Firefox/Profiles/vpv78y9i.default"
uofcPath = "D:/benmo/OneDrive - University of Calgary"
financePath = "D:/benmo/OneDrive/2016& 2017Classes/Financial Econ"
picklePath = "D:/data/pyobjects"
classesPath = "D:/benmo/OneDrive/2016& 2017Classes"
def mySAS():
bob = pd.read_sas("D:/data/Personal Research/pcg15Public/pcg15Public/epcg15.sas7bdat")
return bob
def collect_csv(path, na_val='NA',skiprows=0, dtype_map=None):
try:
return list(map(lambda x: [x, x.compute()], ddf.read_csv(
path, skiprows=skiprows, dtype=dtype_map)))
except:
try:
return list(map(lambda x: [x, x.compute()], ddf.read_csv(
path, low_memory=False, skiprows=skiprows, dtype=dtype_map)))
except:
try:
return list(map(lambda x: [x, x.compute()], ddf.read_csv(
path, low_memory=False, dtype=str,
skiprows=skiprows)))
except:
return list(map(lambda x: [x, x.compute()], ddf.read_csv(
path, low_memory=False, dtype=str,
na_values=na_val, skiprows=skiprows)))
'''example:
bob = ddf.read_csv('Z:/Electricity/*.csv',skiprows=2,dtype={'Date': str,
'HE': str,
'Import/Export': str,
'Asset Id': str,
'Block Number': str,
'Price': 'float64',
'From': 'int64',
'To': 'int64',
'Size': 'int64',
'Available': 'int64',
'Dispatched': str,
'Dispatched MW': 'int64',
'Flexible': str,
'Offer Control': str})
bob=bob.drop('Effective Date/Time',axis=1)
bob.compute().to_csv('Z:/Electricity/Combined.csv',index=False)
'''
def nestmap(outer, outerf, innerf, mapping=list):
return map(lambda x: outerf(mapping(map(lambda inner: innerf(inner), x))), outer)
def test():
bob = pd.read_csv("C:/users/benmo/desktop/fedReserve.csv")
list(filter(lambda x: 'utl' in x.lower(), bob['item']))
data = quandl.get('FED/DTCOLRHTS_N_M', authtoken="<KEY>")
class pickleLib:
picklePath = MyComp.picklePath
pathStates = picklePath + '/usaStates.pkl'
pathCensus = "D:/Data/Personal Research/usadata.dta"
states = lambda pth=pathStates: pd.read_pickle(pth)
priceData = lambda pth=picklePath + "/priceData.pkl": pd.read_pickle(pth)
fedData = lambda pth=picklePath + "/fedData.pkl": pd.read_pickle(pth)
futuresData = lambda pth=picklePath + "/futuresData.pkl": pd.read_pickle(pth)
treasuryData = lambda pth=picklePath + "/treasuryData.pkl": pd.read_pickle(pth)
globalYieldData = lambda pth=picklePath + "/globalYield.pkl": | pd.read_pickle(pth) | pandas.read_pickle |
from pandas import DataFrame
from requests.models import HTTPError
import pandas as pd
import tmdbsimple as tmdb
import json
flatten = lambda l: [item for sublist in l for item in sublist]
def create_actors_dataframe(credits_df, save_path=None, actor_id=None):
"""Create the dataframe of actors present in the tmdb dataset.
Parameters
----------
credits_df : pandas.DataFrame
dataframe from the file tmdb_5000_credits.csv
save_path : str or None
Save the dataframe to the given path if not None
Return
------
pandas.DataFrame
DataFrame which contains information about actors in the tmdb dataset
"""
columns_to_drop = ['also_known_as']
actors = flatten([json.loads(item) for index, item in credits_df.cast.iteritems()])
if actor_id is not None:
list_of_id = list(set([actor['id'] for actor in actors]))
recover_index = list_of_id.index(actor_id)
list_of_id = list_of_id[recover_index:]
else:
list_of_id = set([actor['id'] for actor in actors])
actors.clear()
for state, id in enumerate(list_of_id):
try:
actor = tmdb.People(id).info()
except HTTPError:
print(f'id {id} not found')
else:
actors.append(actor)
if save_path is not None and state % 500 == 0:
actors_df = | pd.DataFrame(actors) | pandas.DataFrame |
import cv2
import pickle
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
from skimage.feature import hog
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.svm import SVC
# This defines if we're building the classifer with Edge Features, or not.
EDGE_FEATURES = True
# Predicts the accuracy of the model
PREDICTION = True
# Number of Principal components to reduce the feature space to
PCA_NO = 2
# Float reprecenting how much of the data to use to build the model
TRAINING_PERCENTAGE = 0.5
CurPath = os.path.dirname(__file__)
# Assumes the Dataset directory is in the same directory as this script
DSPath = os.path.join(CurPath,"Dataset/")
# Returns a numpy array / OpenCV image given the name it appears in, in the data.csv.
def getImage(imageIndex):
filename = "{}.jpg".format(imageIndex)
filepath = DSPath + filename
return cv2.imread(filepath, 0)
# Returns a copy of the input image post FFT returns the array as float
def applyFFT(image):
f = np.fft.fft2(image)
fshift = np.fft.fftshift(f)
fft = 20*np.log(np.abs(fshift))
# Return as array on np.uint8, as otherwise, it's an array of float, which is not right
return fft.astype(dtype=np.uint8)
# Returns a copy of the input image post Gabor Filter
def applyGabor(image):
g_kernel = cv2.getGaborKernel((11, 11), 8.0, np.pi/4, 10.0, 0.5, 0, ktype=cv2.CV_32F)
filtered_img = cv2.filter2D(image, cv2.CV_8UC3, g_kernel)
return filtered_img
# Returns a copy of the input image post Edge Detection
def applyEdgeDetection(image):
return cv2.Canny(image,75,200)
# Returns the features of the HoG image
def applyHoG(image):
_, hog_image = hog(image,
visualize=True,
block_norm='L2-Hys',
pixels_per_cell=(16, 16))
return hog_image
# Responsible for finding all features of a given image and returning as a 1D array.
def getFeatures(image, edge_check=EDGE_FEATURES):
# Every Descriptor returns a 130,360 array, therefore flatten each to (130*360,)
f_FFT = applyFFT(image).flatten()
f_Gabor = applyGabor(image).flatten()
f_HoG = applyHoG(image).flatten()
# Get as one big vector
image_features = np.hstack((f_FFT,f_Gabor,f_HoG))
if edge_check:
# Add edge detection to vector, if we want it on there
f_Edge = applyEdgeDetection(image).flatten()
image_features = np.hstack((image_features,f_Edge))
# When using Images of size: (360,130)
# Without Edge Features, vector is size: (140400,)
# With Edge Features, vector is size: (187200,)
return image_features
# Saves the SVM model for later use
def saveSVM(svm):
# save the model to disk
if EDGE_FEATURES:
filename = CurPath + '/SVM_model_EDGES.sav'
else:
filename = CurPath + '/SVM_model.sav'
pickle.dump(svm, open(filename, 'wb'))
# Not used due to the way we have to predict stuff
def applyPCA(feature_matrix):
if feature_matrix.ndim == 1:
feature_matrix = feature_matrix.reshape(1,-1)
print(feature_matrix.shape)
ss = StandardScaler()
standard_fm = ss.fit_transform(feature_matrix)
print(PCA_NO)
pca = PCA(1)
standard_fm = pca.fit_transform(standard_fm)
return standard_fm
def run():
# load in the data.csv and use it as a label process
labels = | pd.read_csv(DSPath + "data.csv", index_col=0) | pandas.read_csv |
import numpy as np
import pandas as pd
import allel
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
vcf = allel.read_vcf("../../data/raw/1349 sample and all 253k unfiltered SNPs.vcf", )
variants = np.char.array(vcf["variants/CHROM"].astype(str)) + ":" + np.char.array(vcf["variants/POS"].astype(str))
vcf_arr = vcf["calldata/GT"].astype("float")
vcf_arr[vcf_arr == -1] = np.nan
mutations = vcf_arr
# mutations = np.abs(mutations)
mutations = mutations.sum(axis=2)
mutations = mutations.T
mutations_df = pd.DataFrame(data=mutations, index=vcf["samples"], columns=variants)
mutations_df.dropna(axis=1, how="any", thresh=800, inplace=True)
mutations_df.dropna(axis=0, how="any", thresh=200000, inplace=True)
mutations_df.fillna(value=0, inplace=True)
# Subset patients
samples_phenotypes = pd.read_table("../../data/raw/Sample metadata.csv", sep=",")
samples_phenotypes.set_index("ID", inplace=True)
good_samples = pd.read_table("../../data/interim/samples_metadata.csv", sep=",")
good_samples.set_index("ID", inplace=True)
good_samples = good_samples[good_samples["SRC"] != "LGS"]
good_samples = good_samples[good_samples["SRC"] != "D2"]
good_samples = good_samples[good_samples["SRC"] != "U2"]
SLE_samples = good_samples[good_samples["SLE"] == 1]
hla_protein_samples = | pd.Index(['55062', '56104', '34903', '16820', '41060', '54687', '44119', '48523',
'33287', '14947', '21560', '87483', '42335', '30146', '28289', '40007']) | pandas.Index |
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data."""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple enumerating EIA 860 tables for which PUDL's ETL works."""
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CC': 'Combined-Cycle, Total Unit',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'N': 'New Contract - see NC code. This abbreviation existed only in 2008 before being replaced by NC.',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OT': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [
# base cols
['plant_id_eia'],
# static cols
['balancing_authority_code_eia', 'balancing_authority_name_eia',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude', 'service_area',
'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'datum', 'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'nerc_region', 'net_metering',
'pipeline_notes', 'regulatory_status_code',
'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
],
'generators': [
# base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'owned_by_non_utility', 'deliver_power_transgrid',
'summer_capacity_mw', 'winter_capacity_mw', 'summer_capacity_estimate',
'winter_capacity_estimate', 'minimum_load_mw', 'distributed_generation',
'technology_description', 'reactive_power_output_mvar',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'energy_source_1_transport_1', 'energy_source_1_transport_2',
'energy_source_1_transport_3', 'energy_source_2_transport_1',
'energy_source_2_transport_2', 'energy_source_2_transport_3',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date',
'utility_id_eia', 'data_source'],
# need type fixing
{}
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [
# base cols
['utility_id_eia'],
# static cols
['utility_name_eia'],
# annual cols
['street_address', 'city', 'state', 'zip_code', 'entity_type',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
'attention_line', 'address_2', 'zip_code_4',
'contact_firstname', 'contact_lastname', 'contact_title',
'contact_firstname_2', 'contact_lastname_2', 'contact_title_2',
'phone_extension_1', 'phone_extension_2', 'phone_number_1',
'phone_number_2'],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [
# base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{},
]
}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
'ferc714',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2020)),
'eia861': tuple(range(1990, 2020)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2021)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2020)),
'ferc714': (None, ),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_partitions = {
'eia860': {
'years': tuple(range(2004, 2020))
},
'eia860m': {
'year_month': '2020-11'
},
'eia861': {
'years': tuple(range(2001, 2020))
},
'eia923': {
'years': tuple(range(2001, 2020))
},
'epacems': {
'years': tuple(range(1995, 2021)),
'states': tuple(cems_states.keys())},
'ferc1': {
'years': tuple(range(1994, 2020))
},
'ferc714': {},
}
"""
dict: A dictionary of data sources (keys) and dictionaries (values) of names of
partition type (sub-key) and paritions (sub-value) containing the paritions
such as tuples of years for each data source that are able to be ingested
into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': (
"service_territory_eia861",
"balancing_authority_eia861",
"sales_eia861",
"advanced_metering_infrastructure_eia861",
"demand_response_eia861",
"demand_side_management_eia861",
"distributed_generation_eia861",
"distribution_systems_eia861",
"dynamic_pricing_eia861",
"energy_efficiency_eia861",
"green_pricing_eia861",
"mergers_eia861",
"net_metering_eia861",
"non_net_metering_eia861",
"operational_data_eia861",
"reliability_eia861",
"utility_data_eia861",
),
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
),
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "Catalyst Cooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'ferc714': [
'electricity', 'electric', 'utility', 'planning area', 'form 714',
'balancing authority', 'demand', 'system lambda', 'ferc',
'federal energy regulatory commission', "hourly", "generation",
"interchange", "forecast", "load", "adjacency", "plants",
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
ENTITY_TYPE_DICT = {
'M': 'Municipal',
'C': 'Cooperative',
'R': 'Retail Power Marketer',
'I': 'Investor Owned',
'P': 'Political Subdivision',
'T': 'Transmission',
'S': 'State',
'W': 'Wholesale Power Marketer',
'F': 'Federal',
'A': 'Municipal Mktg Authority',
'G': 'Community Choice Aggregator',
'D': 'Nonutility DSM Administrator',
'B': 'Behind the Meter',
'Q': 'Independent Power Producer',
'IND': 'Industrial',
'COM': 'Commercial',
'PR': 'Private', # Added by AES for OD table (Arbitrary moniker)
'PO': 'Power Marketer', # Added by AES for OD table
'U': 'Unknown', # Added by AES for OD table
'O': 'Other' # Added by AES for OD table
}
# Confirm these designations -- educated guess based on the form instructions
MOMENTARY_INTERRUPTION_DEF = { # Added by AES for R table
'L': 'Less than 1 minute',
'F': 'Less than or equal to 5 minutes',
'O': 'Other',
}
# https://www.eia.gov/electricity/data/eia411/#tabs_NERC-3
RECOGNIZED_NERC_REGIONS = [
'BASN', # ASSESSMENT AREA Basin (WECC)
'CALN', # ASSESSMENT AREA California (WECC)
'CALS', # ASSESSMENT AREA California (WECC)
'DSW', # ASSESSMENT AREA Desert Southwest (WECC)
'ASCC', # Alaska
'ISONE', # ISO New England (NPCC)
'ERCOT', # lumped under TRE in 2017 Form instructions
'NORW', # ASSESSMENT AREA Northwest (WECC)
'NYISO', # ISO (NPCC)
'PJM', # RTO
'ROCK', # ASSESSMENT AREA Rockies (WECC)
'ECAR', # OLD RE Now part of RFC and SERC
'FRCC', # included in 2017 Form instructions, recently joined with SERC
'HICC', # Hawaii
'MAAC', # OLD RE Now part of RFC
'MAIN', # OLD RE Now part of SERC, RFC, MRO
'MAPP', # OLD/NEW RE Became part of MRO, resurfaced in 2010
'MRO', # RE included in 2017 Form instructions
'NPCC', # RE included in 2017 Form instructions
'RFC', # RE included in 2017 Form instructions
'SERC', # RE included in 2017 Form instructions
'SPP', # RE included in 2017 Form instructions
'TRE', # RE included in 2017 Form instructions (included ERCOT)
'WECC', # RE included in 2017 Form instructions
'WSCC', # OLD RE pre-2002 version of WECC
'MISO', # ISO unclear whether technically a regional entity, but lots of entries
'ECAR_MAAC',
'MAPP_WECC',
'RFC_SERC',
'SPP_WECC',
'MRO_WECC',
'ERCOT_SPP',
'SPP_TRE',
'ERCOT_TRE',
'MISO_TRE',
'VI', # Virgin Islands
'GU', # Guam
'PR', # Puerto Rico
'AS', # American Samoa
'UNK',
]
CUSTOMER_CLASSES = [
"commercial",
"industrial",
"direct_connection",
"other",
"residential",
"total",
"transportation"
]
TECH_CLASSES = [
'backup', # WHERE Is this used? because removed from DG table b/c not a real component
'chp_cogen',
'combustion_turbine',
'fuel_cell',
'hydro',
'internal_combustion',
'other',
'pv',
'steam',
'storage_pv',
'all_storage', # need 'all' as prefix so as not to confuse with other storage category
'total',
'virtual_pv',
'wind',
]
REVENUE_CLASSES = [
'retail_sales',
'unbundled',
'delivery_customers',
'sales_for_resale',
'credits_or_adjustments',
'other',
'transmission',
'total',
]
RELIABILITY_STANDARDS = [
'ieee_standard',
'other_standard'
]
FUEL_CLASSES = [
'gas',
'oil',
'other',
'renewable',
'water',
'wind',
'wood',
]
RTO_CLASSES = [
'caiso',
'ercot',
'pjm',
'nyiso',
'spp',
'miso',
'isone',
'other'
]
ESTIMATED_OR_ACTUAL = {'E': 'estimated', 'A': 'actual'}
TRANSIT_TYPE_DICT = {
'CV': 'conveyer',
'PL': 'pipeline',
'RR': 'railroad',
'TK': 'truck',
'WA': 'water',
'UN': 'unknown',
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
"plant_id_ferc1": pd.Int64Dtype(),
"plant_id_pudl": pd.Int64Dtype(),
"report_date": "datetime64[ns]",
"report_year": pd.Int64Dtype(),
"utility_id_ferc1": pd.Int64Dtype(),
"utility_id_pudl": pd.Int64Dtype(),
},
"ferc714": { # INCOMPLETE
"demand_mwh": float,
"demand_annual_mwh": float,
"eia_code": pd.Int64Dtype(),
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
"report_date": "datetime64[ns]",
"respondent_id_ferc714": pd.Int64Dtype(),
"respondent_name_ferc714": pd.StringDtype(),
"respondent_type": pd.CategoricalDtype(categories=[
"utility", "balancing_authority",
]),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'actual_peak_demand_savings_mw': float, # Added by AES for DR table
'address_2': pd.StringDtype(), # Added by AES for 860 utilities table
'advanced_metering_infrastructure': pd.Int64Dtype(), # Added by AES for AMI table
# Added by AES for UD misc table
'alternative_fuel_vehicle_2_activity': pd.BooleanDtype(),
'alternative_fuel_vehicle_activity': pd.BooleanDtype(),
'annual_indirect_program_cost': float,
'annual_total_cost': float,
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': pd.StringDtype(),
'associated_combined_heat_power': pd.BooleanDtype(),
'attention_line': pd.StringDtype(),
'automated_meter_reading': pd.Int64Dtype(), # Added by AES for AMI table
'backup_capacity_mw': float, # Added by AES for NNM & DG misc table
'balancing_authority_code_eia': pd.CategoricalDtype(),
'balancing_authority_id_eia': pd.Int64Dtype(),
'balancing_authority_name_eia': pd.StringDtype(),
'bga_source': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
'bunded_activity': pd.BooleanDtype(),
'business_model': pd.CategoricalDtype(categories=[
"retail", "energy_services"]),
'buy_distribution_activity': pd.BooleanDtype(),
'buying_transmission_activity': pd.BooleanDtype(),
'bypass_heat_recovery': pd.BooleanDtype(),
'caidi_w_major_event_days_minus_loss_of_service_minutes': float,
'caidi_w_major_event_dats_minutes': float,
'caidi_wo_major_event_days_minutes': float,
'capacity_mw': float,
'carbon_capture': pd.BooleanDtype(),
'chlorine_content_ppm': float,
'circuits_with_voltage_optimization': pd.Int64Dtype(),
'city': pd.StringDtype(),
'cofire_fuels': pd.BooleanDtype(),
'consumed_by_facility_mwh': float,
'consumed_by_respondent_without_charge_mwh': float,
'contact_firstname': pd.StringDtype(),
'contact_firstname_2': pd.StringDtype(),
'contact_lastname': pd.StringDtype(),
'contact_lastname_2': pd.StringDtype(),
'contact_title': pd.StringDtype(),
'contact_title_2': pd.StringDtype(),
'contract_expiration_date': 'datetime64[ns]',
'contract_type_code': pd.StringDtype(),
'county': pd.StringDtype(),
'county_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'credits_or_adjustments': float,
'critical_peak_pricing': pd.BooleanDtype(),
'critical_peak_rebate': pd.BooleanDtype(),
'current_planned_operating_date': 'datetime64[ns]',
'customers': float,
'customer_class': pd.CategoricalDtype(categories=CUSTOMER_CLASSES),
'customer_incentives_cost': float,
'customer_incentives_incremental_cost': float,
'customer_incentives_incremental_life_cycle_cost': float,
'customer_other_costs_incremental_life_cycle_cost': float,
'daily_digital_access_customers': pd.Int64Dtype(),
'data_observed': pd.BooleanDtype(),
'datum': pd.StringDtype(),
'deliver_power_transgrid': pd.BooleanDtype(),
'delivery_customers': float,
'direct_load_control_customers': pd.Int64Dtype(),
'distributed_generation': pd.BooleanDtype(),
'distributed_generation_owned_capacity_mw': float,
'distribution_activity': pd.BooleanDtype(),
'distribution_circuits': pd.Int64Dtype(),
'duct_burners': pd.BooleanDtype(),
'energy_displaced_mwh': float,
'energy_efficiency_annual_cost': float,
'energy_efficiency_annual_actual_peak_reduction_mw': float,
'energy_efficiency_annual_effects_mwh': float,
'energy_efficiency_annual_incentive_payment': float,
'energy_efficiency_incremental_actual_peak_reduction_mw': float,
'energy_efficiency_incremental_effects_mwh': float,
'energy_savings_estimates_independently_verified': pd.BooleanDtype(),
'energy_savings_independently_verified': pd.BooleanDtype(),
'energy_savings_mwh': float,
'energy_served_ami_mwh': float,
'energy_source_1_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_code': pd.StringDtype(),
'energy_source_code_1': pd.StringDtype(),
'energy_source_code_2': pd.StringDtype(),
'energy_source_code_3': pd.StringDtype(),
'energy_source_code_4': pd.StringDtype(),
'energy_source_code_5': pd.StringDtype(),
'energy_source_code_6': pd.StringDtype(),
'energy_storage': pd.BooleanDtype(),
'entity_type': pd.CategoricalDtype(categories=ENTITY_TYPE_DICT.values()),
'estimated_or_actual_capacity_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_fuel_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_tech_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'exchange_energy_delivered_mwh': float,
'exchange_energy_recieved_mwh': float,
'ferc_cogen_docket_no': pd.StringDtype(),
'ferc_cogen_status': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator_docket_no': pd.StringDtype(),
'ferc_small_power_producer': pd.BooleanDtype(),
'ferc_small_power_producer_docket_no': pd.StringDtype(),
'fluidized_bed_tech': pd.BooleanDtype(),
'fraction_owned': float,
'fuel_class': pd.StringDtype(),
'fuel_consumed_for_electricity_mmbtu': float,
'fuel_consumed_for_electricity_units': float,
'fuel_consumed_mmbtu': float,
'fuel_consumed_units': float,
'fuel_cost_per_mmbtu': float,
'fuel_group_code': pd.StringDtype(),
'fuel_group_code_simple': pd.StringDtype(),
'fuel_mmbtu_per_unit': float,
'fuel_pct': float,
'fuel_qty_units': float,
# are fuel_type and fuel_type_code the same??
# fuel_type includes 40 code-like things.. WAT, SUN, NUC, etc.
'fuel_type': pd.StringDtype(),
# from the boiler_fuel_eia923 table, there are 30 code-like things, like NG, BIT, LIG
'fuel_type_code': pd.StringDtype(),
'fuel_type_code_aer': pd.StringDtype(),
'fuel_type_code_pudl': pd.StringDtype(),
'furnished_without_charge_mwh': float,
'generation_activity': pd.BooleanDtype(),
# this is a mix of integer-like values (2 or 5) and strings like AUGSF
'generator_id': pd.StringDtype(),
'generators_number': float,
'generators_num_less_1_mw': float,
'green_pricing_revenue': float,
'grid_voltage_2_kv': float,
'grid_voltage_3_kv': float,
'grid_voltage_kv': float,
'heat_content_mmbtu_per_unit': float,
'highest_distribution_voltage_kv': float,
'home_area_network': pd.Int64Dtype(),
'inactive_accounts_included': pd.BooleanDtype(),
'incremental_energy_savings_mwh': float,
'incremental_life_cycle_energy_savings_mwh': float,
'incremental_life_cycle_peak_reduction_mwh': float,
'incremental_peak_reduction_mw': float,
'iso_rto_code': pd.StringDtype(),
'latitude': float,
'liquefied_natural_gas_storage': pd.BooleanDtype(),
'load_management_annual_cost': float,
'load_management_annual_actual_peak_reduction_mw': float,
'load_management_annual_effects_mwh': float,
'load_management_annual_incentive_payment': float,
'load_management_annual_potential_peak_reduction_mw': float,
'load_management_incremental_actual_peak_reduction_mw': float,
'load_management_incremental_effects_mwh': float,
'load_management_incremental_potential_peak_reduction_mw': float,
'longitude': float,
'major_program_changes': pd.BooleanDtype(),
'mercury_content_ppm': float,
'merge_address': pd.StringDtype(),
'merge_city': pd.StringDtype(),
'merge_company': pd.StringDtype(),
'merge_date': 'datetime64[ns]',
'merge_state': pd.StringDtype(),
'mine_id_msha': pd.Int64Dtype(),
'mine_id_pudl': pd.Int64Dtype(),
'mine_name': pd.StringDtype(),
'mine_type_code': pd.StringDtype(),
'minimum_load_mw': float,
'moisture_content_pct': float,
'momentary_interruption_definition': pd.CategoricalDtype(categories=MOMENTARY_INTERRUPTION_DEF.values()),
'multiple_fuels': pd.BooleanDtype(),
'nameplate_power_factor': float,
'natural_gas_delivery_contract_type_code': pd.StringDtype(),
'natural_gas_local_distribution_company': pd.StringDtype(),
'natural_gas_pipeline_name_1': pd.StringDtype(),
'natural_gas_pipeline_name_2': pd.StringDtype(),
'natural_gas_pipeline_name_3': pd.StringDtype(),
'natural_gas_storage': pd.BooleanDtype(),
'natural_gas_transport_code': pd.StringDtype(),
'nerc_region': pd.CategoricalDtype(categories=RECOGNIZED_NERC_REGIONS),
'nerc_regions_of_operation': pd.CategoricalDtype(categories=RECOGNIZED_NERC_REGIONS),
'net_generation_mwh': float,
'net_metering': pd.BooleanDtype(),
'net_power_exchanged_mwh': float,
'net_wheeled_power_mwh': float,
'new_parent': pd.StringDtype(),
'non_amr_ami': pd.Int64Dtype(),
'nuclear_unit_id': pd.Int64Dtype(),
'operates_generating_plant': pd.BooleanDtype(),
'operating_date': 'datetime64[ns]',
'operating_switch': pd.StringDtype(),
# TODO: double check this for early 860 years
'operational_status': pd.StringDtype(),
'operational_status_code': pd.StringDtype(),
'original_planned_operating_date': 'datetime64[ns]',
'other': float,
'other_combustion_tech': pd.BooleanDtype(),
'other_costs': float,
'other_costs_incremental_cost': float,
'other_modifications_date': 'datetime64[ns]',
'other_planned_modifications': pd.BooleanDtype(),
'outages_recorded_automatically': pd.BooleanDtype(),
'owned_by_non_utility': pd.BooleanDtype(),
'owner_city': pd.StringDtype(),
'owner_name': pd.StringDtype(),
'owner_state': pd.StringDtype(),
'owner_street_address': pd.StringDtype(),
'owner_utility_id_eia': pd.Int64Dtype(),
'owner_zip_code': pd.StringDtype(),
# we should transition these into readable codes, not a one letter thing
'ownership_code': pd.StringDtype(),
'phone_extension_1': pd.StringDtype(),
'phone_extension_2': pd.StringDtype(),
'phone_number_1': pd.StringDtype(),
'phone_number_2': pd.StringDtype(),
'pipeline_notes': pd.StringDtype(),
'planned_derate_date': 'datetime64[ns]',
'planned_energy_source_code_1': pd.StringDtype(),
'planned_modifications': pd.BooleanDtype(),
'planned_net_summer_capacity_derate_mw': float,
'planned_net_summer_capacity_uprate_mw': float,
'planned_net_winter_capacity_derate_mw': float,
'planned_net_winter_capacity_uprate_mw': float,
'planned_new_capacity_mw': float,
'planned_new_prime_mover_code': pd.StringDtype(),
'planned_repower_date': 'datetime64[ns]',
'planned_retirement_date': 'datetime64[ns]',
'planned_uprate_date': 'datetime64[ns]',
'plant_id_eia': pd.Int64Dtype(),
'plant_id_epa': pd.Int64Dtype(),
'plant_id_pudl': pd.Int64Dtype(),
'plant_name_eia': pd.StringDtype(),
'plants_reported_asset_manager': pd.BooleanDtype(),
'plants_reported_operator': pd.BooleanDtype(),
'plants_reported_other_relationship': pd.BooleanDtype(),
'plants_reported_owner': pd.BooleanDtype(),
'point_source_unit_id_epa': pd.StringDtype(),
'potential_peak_demand_savings_mw': float,
'pulverized_coal_tech': pd.BooleanDtype(),
'previously_canceled': pd.BooleanDtype(),
'price_responsive_programes': pd.BooleanDtype(),
'price_responsiveness_customers': pd.Int64Dtype(),
'primary_transportation_mode_code': pd.StringDtype(),
'primary_purpose_naics_id': pd.Int64Dtype(),
'prime_mover_code': pd.StringDtype(),
'pv_current_flow_type': pd.CategoricalDtype(categories=['AC', 'DC']),
'reactive_power_output_mvar': float,
'real_time_pricing_program': pd.BooleanDtype(),
'rec_revenue': float,
'rec_sales_mwh': float,
'regulatory_status_code': pd.StringDtype(),
'report_date': 'datetime64[ns]',
'reported_as_another_company': pd.StringDtype(),
'retail_marketing_activity': pd.BooleanDtype(),
'retail_sales': float,
'retail_sales_mwh': float,
'retirement_date': 'datetime64[ns]',
'revenue_class': pd.CategoricalDtype(categories=REVENUE_CLASSES),
'rto_iso_lmp_node_id': pd.StringDtype(),
'rto_iso_location_wholesale_reporting_id': pd.StringDtype(),
'rtos_of_operation': pd.StringDtype(),
'saidi_w_major_event_dats_minus_loss_of_service_minutes': float,
'saidi_w_major_event_days_minutes': float,
'saidi_wo_major_event_days_minutes': float,
'saifi_w_major_event_days_customers': float,
'saifi_w_major_event_days_minus_loss_of_service_customers': float,
'saifi_wo_major_event_days_customers': float,
'sales_for_resale': float,
'sales_for_resale_mwh': float,
'sales_mwh': float,
'sales_revenue': float,
'sales_to_ultimate_consumers_mwh': float,
'secondary_transportation_mode_code': pd.StringDtype(),
'sector_id': pd.Int64Dtype(),
'sector_name': pd.StringDtype(),
'service_area': pd.StringDtype(),
'service_type': pd.CategoricalDtype(categories=[
"bundled", "energy", "delivery",
]),
'short_form': pd.BooleanDtype(),
'sold_to_utility_mwh': float,
'solid_fuel_gasification': pd.BooleanDtype(),
'data_source': pd.StringDtype(),
'standard': pd.CategoricalDtype(categories=RELIABILITY_STANDARDS),
'startup_source_code_1': | pd.StringDtype() | pandas.StringDtype |
# Quantile utilities for processing MERRA/AIRS data
import numpy
import numpy.ma as ma
import calculate_VPD
import netCDF4
from netCDF4 import Dataset
from numpy import random, linalg
import datetime
import pandas
import os, sys
from scipy import stats
import h5py
def quantile_cloud_locmask(airsdr, mtdr, indr, dtdr, yrlst, mnst, mnfn, hrchc, rgchc, msk):
# Construct cloud variable quantiles and z-scores, with a possibly irregular location mask
# Read probs and pressure levels
rnm = '%s/AIRS_Levels_Quantiles.nc' % (airsdr)
f = Dataset(rnm,'r')
plev = f['level'][:]
prbs = f['probability'][:]
alts = f['altitude'][:]
f.close()
nyr = len(yrlst)
nprb = prbs.shape[0]
# Mask, lat, lon
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_South_Southeast_US_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[0],hrchc)
f = Dataset(fnm,'r')
mask = f[msk][:,:]
latmet = f['plat'][:]
lonmet = f['plon'][:]
f.close()
mask[mask <= 0] = 0
lnsq = numpy.arange(lonmet.shape[0])
ltsq = numpy.arange(latmet.shape[0])
# Subset a bit
lnsm = numpy.sum(mask,axis=0)
print(lnsq.shape)
print(lnsm.shape)
print(lnsm)
ltsm = numpy.sum(mask,axis=1)
print(ltsq.shape)
print(ltsm.shape)
print(ltsm)
lnmn = numpy.amin(lnsq[lnsm > 0])
lnmx = numpy.amax(lnsq[lnsm > 0]) + 1
ltmn = numpy.amin(ltsq[ltsm > 0])
ltmx = numpy.amax(ltsq[ltsm > 0]) + 1
stridx = 'Lon Range: %d, %d\nLat Range: %d, %d \n' % (lnmn,lnmx,ltmn,ltmx)
print(stridx)
#latflt = latin.flatten()
#lonflt = lonin.flatten()
#mskflt = mask.flatten()
#lcsq = numpy.arange(mskflt.shape[0])
#lcsb = lcsq[mskflt > 0]
nx = lnmx - lnmn
ny = ltmx - ltmn
lnrp = numpy.tile(lonmet[lnmn:lnmx],ny)
ltrp = numpy.repeat(latmet[ltmn:ltmx],nx)
mskblk = mask[ltmn:ltmx,lnmn:lnmx]
mskflt = mskblk.flatten()
tsmp = 0
for k in range(nyr):
dyinit = datetime.date(yrlst[k],6,1)
dyst = datetime.date(yrlst[k],mnst,1)
ttst = dyst.timetuple()
jst = ttst.tm_yday
if mnfn < 12:
dyfn = datetime.date(yrlst[k],mnfn+1,1)
ttfn = dyfn.timetuple()
jfn = ttfn.tm_yday
else:
dyfn = datetime.date(yrlst[k]+1,1,1)
dy31 = datetime.date(yrlst[k],12,31)
tt31 = dy31.timetuple()
jfn = tt31.tm_yday + 1
dystidx = abs((dyst-dyinit).days)
dyfnidx = abs((dyfn-dyinit).days)
jdsq = numpy.arange(jst,jfn)
print(jdsq)
tmhld = numpy.repeat(jdsq,nx*ny)
print(tmhld.shape)
print(numpy.amin(tmhld))
print(numpy.amax(tmhld))
stridx = 'Day Range: %d, %d\n' % (dystidx,dyfnidx)
print(stridx)
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_South_Southeast_US_%02dUTC_no_vertical_variation_for_missing_IncludesCloudParams.h5' % (indr,yrlst[k],hrchc)
f = h5py.File(fnm,'r')
ctyp1 = f['/ctype'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
ctyp2 = f['/ctype2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprt1 = f['/cprtop'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprt2 = f['/cprtop2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprb1 = f['/cprbot'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprb2 = f['/cprbot2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cfrc1 = f['/cfrac'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cfrc2 = f['/cfrac2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cfrc12 = f['/cfrac12'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cngwt1 = f['/cngwat'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cngwt2 = f['/cngwat2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cttp1 = f['/cstemp'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cttp2 = f['/cstemp2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
f.close()
mtnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_South_Southeast_US_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[k],hrchc)
f = Dataset(mtnm,'r')
psfc = f.variables['spres'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
f.close()
nt = ctyp1.shape[0]
mskall = numpy.tile(mskflt,nt)
msksq = numpy.arange(mskall.shape[0])
msksb = msksq[mskall > 0]
mskstr = 'Total Obs: %d, Within Mask: %d \n' % (msksq.shape[0],msksb.shape[0])
print(mskstr)
lthld = numpy.tile(ltrp,nt)
lnhld = numpy.tile(lnrp,nt)
ctyp1 = ctyp1.flatten()
ctyp2 = ctyp2.flatten()
cfrc1 = cfrc1.flatten()
cfrc2 = cfrc2.flatten()
cfrc12 = cfrc12.flatten()
cngwt1 = cngwt1.flatten()
cngwt2 = cngwt2.flatten()
cttp1 = cttp1.flatten()
cttp2 = cttp2.flatten()
psfc = psfc.flatten()
# Number of slabs
nslbtmp = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16)
nslbtmp[(ctyp1 > 100) & (ctyp2 > 100)] = 2
nslbtmp[(ctyp1 > 100) & (ctyp2 < 100)] = 1
if tsmp == 0:
nslabout = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
nslabout[:] = nslbtmp[msksb]
else:
nslabout = numpy.append(nslabout,nslbtmp[msksb])
flsq = numpy.arange(ctyp1.shape[0])
# For two slabs, slab 1 must have highest cloud bottom pressure
cprt1 = cprt1.flatten()
cprt2 = cprt2.flatten()
cprb1 = cprb1.flatten()
cprb2 = cprb2.flatten()
slabswap = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16)
swpsq = flsq[(nslbtmp == 2) & (cprb1 < cprb2)]
slabswap[swpsq] = 1
print(numpy.mean(slabswap))
# Cloud Pressure variables
pbttmp1 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
pbttmp1[nslbtmp >= 1] = cprb1[nslbtmp >= 1]
pbttmp1[swpsq] = cprb2[swpsq]
ptptmp1 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
ptptmp1[nslbtmp >= 1] = cprt1[nslbtmp >= 1]
ptptmp1[swpsq] = cprt2[swpsq]
pbttmp2 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
pbttmp2[nslbtmp == 2] = cprb2[nslbtmp == 2]
pbttmp2[swpsq] = cprb1[swpsq]
ptptmp2 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
ptptmp2[nslbtmp == 2] = cprt2[nslbtmp == 2]
ptptmp2[swpsq] = cprt1[swpsq]
# DP Cloud transformation
dptmp1 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
dptmp1[nslbtmp >= 1] = pbttmp1[nslbtmp >= 1] - ptptmp1[nslbtmp >= 1]
dpslbtmp = numpy.zeros((ctyp1.shape[0],)) - 9999.0
dpslbtmp[nslbtmp == 2] = ptptmp1[nslbtmp == 2] - pbttmp2[nslbtmp == 2]
dptmp2 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
dptmp2[nslbtmp == 2] = pbttmp2[nslbtmp == 2] - ptptmp2[nslbtmp == 2]
# Adjust negative DPSlab values
dpnsq = flsq[(nslbtmp == 2) & (dpslbtmp < 0.0) & (dpslbtmp > -1000.0)]
dpadj = numpy.zeros((ctyp1.shape[0],))
dpadj[dpnsq] = numpy.absolute(dpslbtmp[dpnsq])
dpslbtmp[dpnsq] = 1.0
dptmp1[dpnsq] = dptmp1[dpnsq] / 2.0
dptmp2[dpnsq] = dptmp2[dpnsq] / 2.0
# Sigma / Logit Adjustments
zpbtmp = numpy.zeros((psfc.shape[0],)) - 9999.0
zdp1tmp = numpy.zeros((psfc.shape[0],)) - 9999.0
zdslbtmp = numpy.zeros((psfc.shape[0],)) - 9999.0
zdp2tmp = numpy.zeros((psfc.shape[0],)) - 9999.0
ncldct = 0
for t in range(psfc.shape[0]):
if ( (pbttmp1[t] >= 0.0) and (dpslbtmp[t] >= 0.0) ):
prptmp = numpy.array( [ (psfc[t] - pbttmp1[t]) / psfc[t], \
dptmp1[t] / psfc[t], dpslbtmp[t] / psfc[t], \
dptmp2[t] / psfc[t], 0.0 ] )
if (prptmp[0] < 0.0):
# Adjustment needed
prpadj = prptmp[0]
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
prptmp[2] = prptmp[2] + prpadj*prptmp[2]
prptmp[3] = prptmp[3] + prpadj*prptmp[3]
ncldct = ncldct + 1
elif (prptmp[0] == 0.0):
# Adjustment needed
prpadj = -0.01
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
prptmp[2] = prptmp[2] + prpadj*prptmp[2]
prptmp[3] = prptmp[3] + prpadj*prptmp[3]
ncldct = ncldct + 1
prptmp[4] = 1.0 - prptmp[0] - prptmp[1] - prptmp[2] - prptmp[3]
ztmp = calculate_VPD.lgtzs(prptmp)
zpbtmp[t] = ztmp[0]
zdp1tmp[t] = ztmp[1]
zdslbtmp[t] = ztmp[2]
zdp2tmp[t] = ztmp[3]
elif ( pbttmp1[t] >= 0.0 ):
prptmp = numpy.array( [ (psfc[t] - pbttmp1[t]) / psfc[t], \
dptmp1[t] / psfc[t], 0.0 ] )
if (prptmp[0] < 0.0):
# Adjustment needed
prpadj = prptmp[0]
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
ncldct = ncldct + 1
elif (prptmp[0] == 0.0):
# Adjustment needed
prpadj = -0.01
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
ncldct = ncldct + 1
prptmp[2] = 1.0 - prptmp[0] - prptmp[1]
ztmp = calculate_VPD.lgtzs(prptmp)
zpbtmp[t] = ztmp[0]
zdp1tmp[t] = ztmp[1]
zdslbtmp[t] = -9999.0
zdp2tmp[t] = -9999.0
else:
zpbtmp[t] = -9999.0
zdp1tmp[t] = -9999.0
zdslbtmp[t] = -9999.0
zdp2tmp[t] = -9999.0
str1 = 'Cloud Bot Pres Below Sfc: %d ' % (ncldct)
print(str1)
if tsmp == 0:
psfcout = numpy.zeros((msksb.shape[0],)) - 9999.0
psfcout[:] = psfc[msksb]
prsbot1out = numpy.zeros((msksb.shape[0],)) - 9999.0
prsbot1out[:] = zpbtmp[msksb]
dpcld1out = numpy.zeros((msksb.shape[0],)) - 9999.0
dpcld1out[:] = zdp1tmp[msksb]
dpslbout = numpy.zeros((msksb.shape[0],)) - 9999.0
dpslbout[:] = zdslbtmp[msksb]
dpcld2out = numpy.zeros((msksb.shape[0],)) - 9999.0
dpcld2out[:] = zdp2tmp[msksb]
else:
psfcout = numpy.append(psfcout,psfc[msksb])
prsbot1out = numpy.append(prsbot1out,zpbtmp[msksb])
dpcld1out = numpy.append(dpcld1out,zdp1tmp[msksb])
dpslbout = numpy.append(dpslbout,zdslbtmp[msksb])
dpcld2out = numpy.append(dpcld2out,zdp2tmp[msksb])
# Slab Types: 101.0 = Liquid, 201.0 = Ice, None else
# Output: 0 = Liquid, 1 = Ice
typtmp1 = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16) - 99
typtmp1[nslbtmp >= 1] = (ctyp1[nslbtmp >= 1] - 1.0) / 100.0 - 1.0
typtmp1[swpsq] = (ctyp2[swpsq] - 1.0) / 100.0 - 1.0
typtmp2 = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16) - 99
typtmp2[nslbtmp == 2] = (ctyp2[nslbtmp == 2] - 1.0) / 100.0 - 1.0
typtmp2[swpsq] = (ctyp1[swpsq] - 1.0) / 100.0 - 1.0
if tsmp == 0:
slbtyp1out = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
slbtyp1out[:] = typtmp1[msksb]
slbtyp2out = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
slbtyp2out[:] = typtmp2[msksb]
else:
slbtyp1out = numpy.append(slbtyp1out,typtmp1[msksb])
slbtyp2out = numpy.append(slbtyp2out,typtmp2[msksb])
# Cloud Fraction Logit, still account for swapping
z1tmp = numpy.zeros((cfrc1.shape[0],)) - 9999.0
z2tmp = numpy.zeros((cfrc1.shape[0],)) - 9999.0
z12tmp = numpy.zeros((cfrc1.shape[0],)) - 9999.0
for t in range(z1tmp.shape[0]):
if ( (cfrc1[t] > 0.0) and (cfrc2[t] > 0.0) and (cfrc12[t] > 0.0) ):
# Must adjust amounts
if (slabswap[t] == 0):
prptmp = numpy.array( [cfrc1[t]-cfrc12[t], cfrc2[t]-cfrc12[t], cfrc12[t], 0.0] )
else:
prptmp = numpy.array( [cfrc2[t]-cfrc12[t], cfrc1[t]-cfrc12[t], cfrc12[t], 0.0] )
prptmp[3] = 1.0 - prptmp[0] - prptmp[1] - prptmp[2]
ztmp = calculate_VPD.lgtzs(prptmp)
z1tmp[t] = ztmp[0]
z2tmp[t] = ztmp[1]
z12tmp[t] = ztmp[2]
elif ( (cfrc1[t] > 0.0) and (cfrc2[t] > 0.0) ):
if (slabswap[t] == 0):
prptmp = numpy.array( [cfrc1[t], cfrc2[t], 0.0] )
else:
prptmp = numpy.array( [cfrc2[t], cfrc1[t], 0.0] )
prptmp[2] = 1.0 - prptmp[0] - prptmp[1]
ztmp = calculate_VPD.lgtzs(prptmp)
z1tmp[t] = ztmp[0]
z2tmp[t] = ztmp[1]
z12tmp[t] = -9999.0
elif ( cfrc1[t] > 0.0 ):
prptmp = numpy.array( [cfrc1[t], 1.0 - cfrc1[t] ] )
ztmp = calculate_VPD.lgtzs(prptmp)
z1tmp[t] = ztmp[0]
z2tmp[t] = -9999.0
z12tmp[t] = -9999.0
else:
z1tmp[t] = -9999.0
z2tmp[t] = -9999.0
z12tmp[t] = -9999.0
if tsmp == 0:
cfclgt1out = numpy.zeros((msksb.shape[0],)) - 9999.0
cfclgt1out[:] = z1tmp[msksb]
cfclgt2out = numpy.zeros((msksb.shape[0],)) - 9999.0
cfclgt2out[:] = z2tmp[msksb]
cfclgt12out = numpy.zeros((msksb.shape[0],)) - 9999.0
cfclgt12out[:] = z12tmp[msksb]
else:
cfclgt1out = numpy.append(cfclgt1out,z1tmp[msksb])
cfclgt2out = numpy.append(cfclgt2out,z2tmp[msksb])
cfclgt12out = numpy.append(cfclgt12out,z12tmp[msksb])
# Cloud Non-Gas Water
ngwttmp1 = numpy.zeros(cngwt1.shape[0]) - 9999.0
ngwttmp1[nslbtmp >= 1] = cngwt1[nslbtmp >= 1]
ngwttmp1[swpsq] = cngwt2[swpsq]
ngwttmp2 = numpy.zeros(cngwt1.shape[0]) - 9999.0
ngwttmp2[nslbtmp == 2] = cngwt2[nslbtmp == 2]
ngwttmp2[swpsq] = cngwt1[swpsq]
if tsmp == 0:
ngwt1out = numpy.zeros((msksb.shape[0],)) - 9999.0
ngwt1out[:] = ngwttmp1[msksb]
ngwt2out = numpy.zeros((msksb.shape[0],)) - 9999.0
ngwt2out[:] = ngwttmp2[msksb]
else:
ngwt1out = numpy.append(ngwt1out,ngwttmp1[msksb])
ngwt2out = numpy.append(ngwt2out,ngwttmp2[msksb])
# Cloud Top Temperature
cttptmp1 = numpy.zeros(cttp1.shape[0]) - 9999.0
cttptmp1[nslbtmp >= 1] = cttp1[nslbtmp >= 1]
cttptmp1[swpsq] = cttp2[swpsq]
cttptmp2 = numpy.zeros(cttp1.shape[0]) - 9999.0
cttptmp2[nslbtmp == 2] = cttp2[nslbtmp == 2]
cttptmp2[swpsq] = cttp1[swpsq]
if tsmp == 0:
cttp1out = numpy.zeros((msksb.shape[0],)) - 9999.0
cttp1out[:] = cttptmp1[msksb]
cttp2out = numpy.zeros((msksb.shape[0],)) - 9999.0
cttp2out[:] = cttptmp2[msksb]
else:
cttp1out = numpy.append(cttp1out,cttptmp1[msksb])
cttp2out = numpy.append(cttp2out,cttptmp2[msksb])
# Loc/Time
if tsmp == 0:
latout = numpy.zeros((msksb.shape[0],)) - 9999.0
latout[:] = lthld[msksb]
lonout = numpy.zeros((msksb.shape[0],)) - 9999.0
lonout[:] = lnhld[msksb]
yrout = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
yrout[:] = yrlst[k]
jdyout = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
jdyout[:] = tmhld[msksb]
else:
latout = numpy.append(latout,lthld[msksb])
lonout = numpy.append(lonout,lnhld[msksb])
yrtmp = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
yrtmp[:] = yrlst[k]
yrout = numpy.append(yrout,yrtmp)
jdyout = numpy.append(jdyout,tmhld[msksb])
tsmp = tsmp + msksb.shape[0]
# Process quantiles
nslbqs = calculate_VPD.quantile_msgdat_discrete(nslabout,prbs)
str1 = '%.2f Number Slab Quantile: %d' % (prbs[53],nslbqs[53])
print(str1)
print(nslbqs)
psfcqs = calculate_VPD.quantile_msgdat(psfcout,prbs)
str1 = '%.2f Surface Pressure Quantile: %.3f' % (prbs[53],psfcqs[53])
print(str1)
prsbt1qs = calculate_VPD.quantile_msgdat(prsbot1out,prbs)
str1 = '%.2f CldBot1 Pressure Quantile: %.3f' % (prbs[53],prsbt1qs[53])
print(str1)
dpcld1qs = calculate_VPD.quantile_msgdat(dpcld1out,prbs)
str1 = '%.2f DPCloud1 Quantile: %.3f' % (prbs[53],dpcld1qs[53])
print(str1)
dpslbqs = calculate_VPD.quantile_msgdat(dpslbout,prbs)
str1 = '%.2f DPSlab Quantile: %.3f' % (prbs[53],dpslbqs[53])
print(str1)
dpcld2qs = calculate_VPD.quantile_msgdat(dpcld2out,prbs)
str1 = '%.2f DPCloud2 Quantile: %.3f' % (prbs[53],dpcld2qs[53])
print(str1)
slb1qs = calculate_VPD.quantile_msgdat_discrete(slbtyp1out,prbs)
str1 = '%.2f Type1 Quantile: %d' % (prbs[53],slb1qs[53])
print(str1)
slb2qs = calculate_VPD.quantile_msgdat_discrete(slbtyp2out,prbs)
str1 = '%.2f Type2 Quantile: %d' % (prbs[53],slb2qs[53])
print(str1)
lgt1qs = calculate_VPD.quantile_msgdat(cfclgt1out,prbs)
str1 = '%.2f Logit 1 Quantile: %.3f' % (prbs[53],lgt1qs[53])
print(str1)
lgt2qs = calculate_VPD.quantile_msgdat(cfclgt2out,prbs)
str1 = '%.2f Logit 2 Quantile: %.3f' % (prbs[53],lgt2qs[53])
print(str1)
lgt12qs = calculate_VPD.quantile_msgdat(cfclgt12out,prbs)
str1 = '%.2f Logit 1/2 Quantile: %.3f' % (prbs[53],lgt12qs[53])
print(str1)
ngwt1qs = calculate_VPD.quantile_msgdat(ngwt1out,prbs)
str1 = '%.2f NGWater1 Quantile: %.3f' % (prbs[53],ngwt1qs[53])
print(str1)
ngwt2qs = calculate_VPD.quantile_msgdat(ngwt2out,prbs)
str1 = '%.2f NGWater2 Quantile: %.3f' % (prbs[53],ngwt2qs[53])
print(str1)
cttp1qs = calculate_VPD.quantile_msgdat(cttp1out,prbs)
str1 = '%.2f CTTemp1 Quantile: %.3f' % (prbs[53],cttp1qs[53])
print(str1)
cttp2qs = calculate_VPD.quantile_msgdat(cttp2out,prbs)
str1 = '%.2f CTTemp2 Quantile: %.3f' % (prbs[53],cttp2qs[53])
print(str1)
# Should be no missing for number of slabs
print('Slab summary')
print(numpy.amin(nslabout))
print(numpy.amax(nslabout))
print(tsmp)
# Output Quantiles
mstr = dyst.strftime('%b')
qfnm = '%s/%s_US_JJA_%02dUTC_%04d_Cloud_Quantile.nc' % (dtdr,rgchc,hrchc,yrlst[k])
qout = Dataset(qfnm,'w')
dimp = qout.createDimension('probability',nprb)
varprb = qout.createVariable('probability','f4',['probability'], fill_value = -9999)
varprb[:] = prbs
varprb.long_name = 'Probability break points'
varprb.units = 'none'
varprb.missing_value = -9999
varnslb = qout.createVariable('NumberSlab_quantile','i2',['probability'], fill_value = -99)
varnslb[:] = nslbqs
varnslb.long_name = 'Number of cloud slabs quantiles'
varnslb.units = 'Count'
varnslb.missing_value = -99
varcbprs = qout.createVariable('CloudBot1Logit_quantile','f4',['probability'], fill_value = -9999)
varcbprs[:] = prsbt1qs
varcbprs.long_name = 'Slab 1 cloud bottom pressure logit quantiles'
varcbprs.units = 'hPa'
varcbprs.missing_value = -9999
vardpc1 = qout.createVariable('DPCloud1Logit_quantile','f4',['probability'], fill_value = -9999)
vardpc1[:] = dpcld1qs
vardpc1.long_name = 'Slab 1 cloud pressure depth logit quantiles'
vardpc1.units = 'hPa'
vardpc1.missing_value = -9999
vardpslb = qout.createVariable('DPSlabLogit_quantile','f4',['probability'], fill_value = -9999)
vardpslb[:] = dpslbqs
vardpslb.long_name = 'Two-slab vertical separation logit quantiles'
vardpslb.units = 'hPa'
vardpslb.missing_value = -9999
vardpc2 = qout.createVariable('DPCloud2Logit_quantile','f4',['probability'], fill_value = -9999)
vardpc2[:] = dpcld2qs
vardpc2.long_name = 'Slab 2 cloud pressure depth logit quantiles'
vardpc2.units = 'hPa'
vardpc2.missing_value = -9999
vartyp1 = qout.createVariable('CType1_quantile','i2',['probability'], fill_value = -99)
vartyp1[:] = slb1qs
vartyp1.long_name = 'Slab 1 cloud type quantiles'
vartyp1.units = 'None'
vartyp1.missing_value = -99
vartyp1.comment = 'Cloud slab type: 0=Liquid, 1=Ice'
vartyp2 = qout.createVariable('CType2_quantile','i2',['probability'], fill_value = -99)
vartyp2[:] = slb2qs
vartyp2.long_name = 'Slab 2 cloud type quantiles'
vartyp2.units = 'None'
vartyp2.missing_value = -99
vartyp2.comment = 'Cloud slab type: 0=Liquid, 1=Ice'
varlgt1 = qout.createVariable('CFrcLogit1_quantile','f4',['probability'], fill_value = -9999)
varlgt1[:] = lgt1qs
varlgt1.long_name = 'Slab 1 cloud fraction (cfrac1x) logit quantiles'
varlgt1.units = 'None'
varlgt1.missing_value = -9999
varlgt2 = qout.createVariable('CFrcLogit2_quantile','f4',['probability'], fill_value = -9999)
varlgt2[:] = lgt2qs
varlgt2.long_name = 'Slab 2 cloud fraction (cfrac2x) logit quantiles'
varlgt2.units = 'None'
varlgt2.missing_value = -9999
varlgt12 = qout.createVariable('CFrcLogit12_quantile','f4',['probability'], fill_value = -9999)
varlgt12[:] = lgt12qs
varlgt12.long_name = 'Slab 1/2 overlap fraction (cfrac12) logit quantiles'
varlgt12.units = 'None'
varlgt12.missing_value = -9999
varngwt1 = qout.createVariable('NGWater1_quantile','f4',['probability'], fill_value = -9999)
varngwt1[:] = ngwt1qs
varngwt1.long_name = 'Slab 1 cloud non-gas water quantiles'
varngwt1.units = 'g m^-2'
varngwt1.missing_value = -9999
varngwt2 = qout.createVariable('NGWater2_quantile','f4',['probability'], fill_value = -9999)
varngwt2[:] = ngwt2qs
varngwt2.long_name = 'Slab 2 cloud non-gas water quantiles'
varngwt2.units = 'g m^-2'
varngwt2.missing_value = -9999
varcttp1 = qout.createVariable('CTTemp1_quantile','f4',['probability'], fill_value = -9999)
varcttp1[:] = cttp1qs
varcttp1.long_name = 'Slab 1 cloud top temperature'
varcttp1.units = 'K'
varcttp1.missing_value = -9999
varcttp2 = qout.createVariable('CTTemp2_quantile','f4',['probability'], fill_value = -9999)
varcttp2[:] = cttp2qs
varcttp2.long_name = 'Slab 2 cloud top temperature'
varcttp2.units = 'K'
varcttp2.missing_value = -9999
qout.close()
# Set up transformations
znslb = calculate_VPD.std_norm_quantile_from_obs(nslabout, nslbqs, prbs, msgval=-99)
zpsfc = calculate_VPD.std_norm_quantile_from_obs(psfcout, psfcqs, prbs, msgval=-9999.)
zprsbt1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(prsbot1out, prsbt1qs, prbs, msgval=-9999.)
zdpcld1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(dpcld1out, dpcld1qs, prbs, msgval=-9999.)
zdpslb = calculate_VPD.std_norm_quantile_from_obs_fill_msg(dpslbout, dpslbqs, prbs, msgval=-9999.)
zdpcld2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(dpcld2out, dpcld2qs, prbs, msgval=-9999.)
zctyp1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(slbtyp1out, slb1qs, prbs, msgval=-99)
zctyp2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(slbtyp2out, slb2qs, prbs, msgval=-99)
zlgt1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cfclgt1out, lgt1qs, prbs, msgval=-9999.)
zlgt2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cfclgt2out, lgt2qs, prbs, msgval=-9999.)
zlgt12 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cfclgt12out, lgt12qs, prbs, msgval=-9999.)
zngwt1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(ngwt1out, ngwt1qs, prbs, msgval=-9999.)
zngwt2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(ngwt2out, ngwt2qs, prbs, msgval=-9999.)
zcttp1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cttp1out, cttp1qs, prbs, msgval=-9999.)
zcttp2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cttp2out, cttp2qs, prbs, msgval=-9999.)
# Output transformed quantile samples
zfnm = '%s/%s_US_JJA_%02dUTC_%04d_Cloud_StdGausTrans.nc' % (dtdr,rgchc,hrchc,yrlst[k])
zout = Dataset(zfnm,'w')
dimsmp = zout.createDimension('sample',tsmp)
varlon = zout.createVariable('Longitude','f4',['sample'])
varlon[:] = lonout
varlon.long_name = 'Longitude'
varlon.units = 'degrees_east'
varlat = zout.createVariable('Latitude','f4',['sample'])
varlat[:] = latout
varlat.long_name = 'Latitude'
varlat.units = 'degrees_north'
varjdy = zout.createVariable('JulianDay','i2',['sample'])
varjdy[:] = jdyout
varjdy.long_name = 'JulianDay'
varjdy.units = 'day'
varyr = zout.createVariable('Year','i2',['sample'])
varyr[:] = yrout
varyr.long_name = 'Year'
varyr.units = 'year'
varnslb = zout.createVariable('NumberSlab_StdGaus','f4',['sample'], fill_value = -9999)
varnslb[:] = znslb
varnslb.long_name = 'Quantile transformed number of cloud slabs'
varnslb.units = 'None'
varnslb.missing_value = -9999.
varcbprs = zout.createVariable('CloudBot1Logit_StdGaus','f4',['sample'], fill_value = -9999)
varcbprs[:] = zprsbt1
varcbprs.long_name = 'Quantile transformed slab 1 cloud bottom pressure logit'
varcbprs.units = 'None'
varcbprs.missing_value = -9999.
vardpc1 = zout.createVariable('DPCloud1Logit_StdGaus','f4',['sample'], fill_value = -9999)
vardpc1[:] = zdpcld1
vardpc1.long_name = 'Quantile transformed slab 1 cloud pressure depth logit'
vardpc1.units = 'None'
vardpc1.missing_value = -9999.
vardpslb = zout.createVariable('DPSlabLogit_StdGaus','f4',['sample'], fill_value = -9999)
vardpslb[:] = zdpslb
vardpslb.long_name = 'Quantile transformed two-slab vertical separation logit'
vardpslb.units = 'None'
vardpslb.missing_value = -9999.
vardpc2 = zout.createVariable('DPCloud2Logit_StdGaus','f4',['sample'], fill_value = -9999)
vardpc2[:] = zdpcld2
vardpc2.long_name = 'Quantile transformed slab 2 cloud pressure depth logit'
vardpc2.units = 'None'
vardpc2.missing_value = -9999.
vartyp1 = zout.createVariable('CType1_StdGaus','f4',['sample'], fill_value = -9999)
vartyp1[:] = zctyp1
vartyp1.long_name = 'Quantile transformed slab 1 cloud type logit'
vartyp1.units = 'None'
vartyp1.missing_value = -9999.
vartyp2 = zout.createVariable('CType2_StdGaus','f4',['sample'], fill_value = -9999)
vartyp2[:] = zctyp2
vartyp2.long_name = 'Quantile transformed slab 2 cloud type'
vartyp2.units = 'None'
vartyp2.missing_value = -9999.
varlgt1 = zout.createVariable('CFrcLogit1_StdGaus','f4',['sample'], fill_value = -9999)
varlgt1[:] = zlgt1
varlgt1.long_name = 'Quantile transformed slab 1 cloud fraction logit'
varlgt1.units = 'None'
varlgt1.missing_value = -9999.
varlgt2 = zout.createVariable('CFrcLogit2_StdGaus','f4',['sample'], fill_value = -9999)
varlgt2[:] = zlgt2
varlgt2.long_name = 'Quantile transformed slab 2 cloud fraction logit'
varlgt2.units = 'None'
varlgt2.missing_value = -9999.
varlgt12 = zout.createVariable('CFrcLogit12_StdGaus','f4',['sample'], fill_value = -9999)
varlgt12[:] = zlgt12
varlgt12.long_name = 'Quantile transformed slab 1/2 overlap fraction logit'
varlgt12.units = 'None'
varlgt12.missing_value = -9999.
varngwt1 = zout.createVariable('NGWater1_StdGaus','f4',['sample'], fill_value = -9999)
varngwt1[:] = zngwt1
varngwt1.long_name = 'Quantile transformed slab 1 non-gas water'
varngwt1.units = 'None'
varngwt1.missing_value = -9999.
varngwt2 = zout.createVariable('NGWater2_StdGaus','f4',['sample'], fill_value = -9999)
varngwt2[:] = zngwt2
varngwt2.long_name = 'Quantile transformed slab 2 non-gas water'
varngwt2.units = 'None'
varngwt2.missing_value = -9999.
varcttp1 = zout.createVariable('CTTemp1_StdGaus','f4',['sample'], fill_value = -9999)
varcttp1[:] = zcttp1
varcttp1.long_name = 'Quantile transformed slab 1 cloud top temperature'
varcttp1.units = 'None'
varcttp1.missing_value = -9999.
varcttp2 = zout.createVariable('CTTemp2_StdGaus','f4',['sample'], fill_value = -9999)
varcttp2[:] = zcttp2
varcttp2.long_name = 'Quantile transformed slab 2 cloud top temperature'
varcttp2.units = 'None'
varcttp2.missing_value = -9999.
zout.close()
return
# Temp/RH Quantiles
def quantile_profile_locmask(airsdr, mtdr, indr, dtdr, yrlst, mnst, mnfn, hrchc, rgchc, msk):
# Construct profile/sfc variable quantiles and z-scores, with a possibly irregular location mask
# Read probs and pressure levels
rnm = '%s/AIRS_Levels_Quantiles.nc' % (airsdr)
f = Dataset(rnm,'r')
plev = f['level'][:]
prbs = f['probability'][:]
alts = f['altitude'][:]
f.close()
nyr = len(yrlst)
nprb = prbs.shape[0]
nzout = 101
tmpqout = numpy.zeros((nzout,nprb)) - 9999.
rhqout = numpy.zeros((nzout,nprb)) - 9999.
sftmpqs = numpy.zeros((nprb,)) - 9999.
sfaltqs = numpy.zeros((nprb,)) - 9999.
psfcqs = numpy.zeros((nprb,)) - 9999.
altmed = numpy.zeros((nzout,)) - 9999.
# Mask, lat, lon
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_South_Southeast_US_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[0],hrchc)
f = Dataset(fnm,'r')
mask = f[msk][:,:]
latmet = f['plat'][:]
lonmet = f['plon'][:]
f.close()
mask[mask <= 0] = 0
lnsq = numpy.arange(lonmet.shape[0])
ltsq = numpy.arange(latmet.shape[0])
# Subset a bit
lnsm = numpy.sum(mask,axis=0)
print(lnsq.shape)
print(lnsm.shape)
print(lnsm)
ltsm = numpy.sum(mask,axis=1)
print(ltsq.shape)
print(ltsm.shape)
print(ltsm)
lnmn = numpy.amin(lnsq[lnsm > 0])
lnmx = numpy.amax(lnsq[lnsm > 0]) + 1
ltmn = numpy.amin(ltsq[ltsm > 0])
ltmx = numpy.amax(ltsq[ltsm > 0]) + 1
stridx = 'Lon Range: %d, %d\nLat Range: %d, %d \n' % (lnmn,lnmx,ltmn,ltmx)
print(stridx)
nx = lnmx - lnmn
ny = ltmx - ltmn
lnrp = numpy.tile(lonmet[lnmn:lnmx],ny)
ltrp = numpy.repeat(latmet[ltmn:ltmx],nx)
mskblk = mask[ltmn:ltmx,lnmn:lnmx]
mskflt = mskblk.flatten()
tsmp = 0
for k in range(nyr):
dyinit = datetime.date(yrlst[k],6,1)
dyst = datetime.date(yrlst[k],mnst,1)
ttst = dyst.timetuple()
jst = ttst.tm_yday
if mnfn < 12:
dyfn = datetime.date(yrlst[k],mnfn+1,1)
ttfn = dyfn.timetuple()
jfn = ttfn.tm_yday
else:
dyfn = datetime.date(yrlst[k]+1,1,1)
dy31 = datetime.date(yrlst[k],12,31)
tt31 = dy31.timetuple()
jfn = tt31.tm_yday + 1
dystidx = abs((dyst-dyinit).days)
dyfnidx = abs((dyfn-dyinit).days)
jdsq = numpy.arange(jst,jfn)
tmhld = numpy.repeat(jdsq,nx*ny)
stridx = 'Day Range: %d, %d\n' % (dystidx,dyfnidx)
print(stridx)
mtnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_South_Southeast_US_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[k],hrchc)
f = h5py.File(mtnm,'r')
stparr = f['/stemp'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
psfarr = f['/spres'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
salarr = f['/salti'][ltmn:ltmx,lnmn:lnmx]
f.close()
nt = psfarr.shape[0]
msksq1 = numpy.arange(mskflt.shape[0])
msksb1 = msksq1[mskflt > 0]
mskall = numpy.tile(mskflt,nt)
msksq = numpy.arange(mskall.shape[0])
msksb = msksq[mskall > 0]
mskstr = 'Total Obs: %d, Within Mask: %d \n' % (msksq.shape[0],msksb.shape[0])
print(mskstr)
lthld = numpy.tile(ltrp,nt)
lnhld = numpy.tile(lnrp,nt)
stparr = stparr.flatten()
psfarr = psfarr.flatten()
salarr = salarr.flatten()
if tsmp == 0:
sftmpout = numpy.zeros((msksb.shape[0],)) - 9999.0
sftmpout[:] = stparr[msksb]
psfcout = numpy.zeros((msksb.shape[0],)) - 9999.0
psfcout[:] = psfarr[msksb]
sfaltout = numpy.zeros((msksb.shape[0],)) - 9999.0
sfaltout[:] = numpy.tile(salarr[msksb1],nt)
else:
sftmpout = numpy.append(sftmpout,stparr[msksb])
psfcout = numpy.append(psfcout,psfarr[msksb])
sfaltout = numpy.append(sfaltout,numpy.tile(salarr[msksb1],nt))
# Loc/Time
if tsmp == 0:
latout = numpy.zeros((msksb.shape[0],)) - 9999.0
latout[:] = lthld[msksb]
lonout = numpy.zeros((msksb.shape[0],)) - 9999.0
lonout[:] = lnhld[msksb]
yrout = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
yrout[:] = yrlst[k]
jdyout = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
jdyout[:] = tmhld[msksb]
else:
latout = numpy.append(latout,lthld[msksb])
lonout = numpy.append(lonout,lnhld[msksb])
yrtmp = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
yrtmp[:] = yrlst[k]
yrout = numpy.append(yrout,yrtmp)
jdyout = numpy.append(jdyout,tmhld[msksb])
tsmp = tsmp + msksb.shape[0]
# Vertical profiles
tmpmerout = numpy.zeros((tsmp,nzout)) - 9999.
h2omerout = numpy.zeros((tsmp,nzout)) - 9999.
altout = numpy.zeros((tsmp,nzout)) - 9999.
sidx = 0
for k in range(nyr):
dyinit = datetime.date(yrlst[k],6,1)
dyst = datetime.date(yrlst[k],mnst,1)
ttst = dyst.timetuple()
jst = ttst.tm_yday
if mnfn < 12:
dyfn = datetime.date(yrlst[k],mnfn+1,1)
ttfn = dyfn.timetuple()
jfn = ttfn.tm_yday
else:
dyfn = datetime.date(yrlst[k]+1,1,1)
dy31 = datetime.date(yrlst[k],12,31)
tt31 = dy31.timetuple()
jfn = tt31.tm_yday + 1
dystidx = abs((dyst-dyinit).days)
dyfnidx = abs((dyfn-dyinit).days)
jdsq = numpy.arange(jst,jfn)
tmhld = numpy.repeat(jdsq,nx*ny)
stridx = 'Day Range: %d, %d\n' % (dystidx,dyfnidx)
print(stridx)
mtnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_South_Southeast_US_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[k],hrchc)
f = h5py.File(mtnm,'r')
tmparr = f['/ptemp'][dystidx:dyfnidx,:,ltmn:ltmx,lnmn:lnmx]
h2oarr = f['/rh'][dystidx:dyfnidx,:,ltmn:ltmx,lnmn:lnmx]
altarr = f['/palts'][dystidx:dyfnidx,:,ltmn:ltmx,lnmn:lnmx]
f.close()
nt = tmparr.shape[0]
mskall = numpy.tile(mskflt,nt)
msksq = numpy.arange(mskall.shape[0])
msksb = msksq[mskall > 0]
lthld = numpy.tile(ltrp,nt)
lnhld = numpy.tile(lnrp,nt)
fidx = sidx + msksb.shape[0]
for j in range(nzout):
tmpvec = tmparr[:,j,:,:].flatten()
tmpvec[tmpvec > 1e30] = -9999.
tmpmerout[sidx:fidx,j] = tmpvec[msksb]
altvec = altarr[:,j,:,:].flatten()
altout[sidx:fidx,j] = altvec[msksb]
h2ovec = h2oarr[:,j,:,:].flatten()
h2ovec[h2ovec > 1e30] = -9999.
h2omerout[sidx:fidx,j] = h2ovec[msksb]
sidx = sidx + msksb.shape[0]
# Quantiles
ztmpout = numpy.zeros((tsmp,nzout)) - 9999.
zrhout = numpy.zeros((tsmp,nzout)) - 9999.
zsftmpout = numpy.zeros((tsmp,)) - 9999.
zsfaltout = numpy.zeros((tsmp,)) - 9999.
zpsfcout = numpy.zeros((tsmp,)) - 9999.
for j in range(nzout):
tmptmp = calculate_VPD.quantile_msgdat(tmpmerout[:,j],prbs)
tmpqout[j,:] = tmptmp[:]
str1 = 'Plev %.2f, %.2f Temp Quantile: %.3f' % (plev[j],prbs[103],tmptmp[103])
print(str1)
# Transform
ztmp = calculate_VPD.std_norm_quantile_from_obs(tmpmerout[:,j], tmptmp, prbs, msgval=-9999.)
ztmpout[:,j] = ztmp[:]
alttmp = calculate_VPD.quantile_msgdat(altout[:,j],prbs)
altmed[j] = alttmp[103]
str1 = 'Plev %.2f, %.2f Alt Quantile: %.3f' % (plev[j],prbs[103],alttmp[103])
print(str1)
# Adjust RH over 100
rhadj = h2omerout[:,j]
rhadj[rhadj > 1.0] = 1.0
rhqtmp = calculate_VPD.quantile_msgdat(rhadj,prbs)
rhqout[j,:] = rhqtmp[:]
str1 = 'Plev %.2f, %.2f RH Quantile: %.4f' % (plev[j],prbs[103],rhqtmp[103])
print(str1)
zrh = calculate_VPD.std_norm_quantile_from_obs(rhadj, rhqtmp, prbs, msgval=-9999.)
zrhout[:,j] = zrh[:]
psfcqs = calculate_VPD.quantile_msgdat(psfcout,prbs)
str1 = '%.2f PSfc Quantile: %.2f' % (prbs[103],psfcqs[103])
print(str1)
zpsfcout = calculate_VPD.std_norm_quantile_from_obs(psfcout, psfcqs, prbs, msgval=-9999.)
sftpqs = calculate_VPD.quantile_msgdat(sftmpout,prbs)
str1 = '%.2f SfcTmp Quantile: %.2f' % (prbs[103],sftpqs[103])
print(str1)
zsftmpout = calculate_VPD.std_norm_quantile_from_obs(sftmpout, sftpqs, prbs, msgval=-9999.)
sfalqs = calculate_VPD.quantile_msgdat(sfaltout,prbs)
str1 = '%.2f SfcAlt Quantile: %.2f' % (prbs[103],sfalqs[103])
print(str1)
zsfaltout = calculate_VPD.std_norm_quantile_from_obs(sfaltout, sfalqs, prbs, msgval=-9999.)
# Output Quantiles
mstr = dyst.strftime('%b')
qfnm = '%s/%s_US_JJA_%02dUTC_%04d_TempRHSfc_Quantile.nc' % (dtdr,rgchc,hrchc,yrlst[k])
qout = Dataset(qfnm,'w')
dimz = qout.createDimension('level',nzout)
dimp = qout.createDimension('probability',nprb)
varlvl = qout.createVariable('level','f4',['level'], fill_value = -9999)
varlvl[:] = plev
varlvl.long_name = 'AIRS/SARTA pressure levels'
varlvl.units = 'hPa'
varlvl.missing_value = -9999
varprb = qout.createVariable('probability','f4',['probability'], fill_value = -9999)
varprb[:] = prbs
varprb.long_name = 'Probability break points'
varprb.units = 'none'
varprb.missing_value = -9999
# Altitude grid
varalt = qout.createVariable('Altitude_median', 'f4', ['level'], fill_value = -9999)
varalt[:] = altmed
varalt.long_name = 'Altitude median value'
varalt.units = 'm'
varalt.missing_value = -9999
vartmp = qout.createVariable('Temperature_quantile', 'f4', ['level','probability'], fill_value = -9999)
vartmp[:] = tmpqout
vartmp.long_name = 'Temperature quantiles'
vartmp.units = 'K'
vartmp.missing_value = -9999.
varrh = qout.createVariable('RH_quantile', 'f4', ['level','probability'], fill_value = -9999)
varrh[:] = rhqout
varrh.long_name = 'Relative humidity quantiles'
varrh.units = 'Unitless'
varrh.missing_value = -9999.
varstmp = qout.createVariable('SfcTemp_quantile', 'f4', ['probability'], fill_value = -9999)
varstmp[:] = sftpqs
varstmp.long_name = 'Surface temperature quantiles'
varstmp.units = 'K'
varstmp.missing_value = -9999.
varpsfc = qout.createVariable('SfcPres_quantile', 'f4', ['probability'], fill_value = -9999)
varpsfc[:] = psfcqs
varpsfc.long_name = 'Surface pressure quantiles'
varpsfc.units = 'hPa'
varpsfc.missing_value = -9999.
varsalt = qout.createVariable('SfcAlt_quantile', 'f4', ['probability'], fill_value = -9999)
varsalt[:] = sfalqs
varsalt.long_name = 'Surface altitude quantiles'
varsalt.units = 'm'
varsalt.missing_value = -9999.
qout.close()
# Output transformed quantile samples
zfnm = '%s/%s_US_JJA_%02dUTC_%04d_TempRHSfc_StdGausTrans.nc' % (dtdr,rgchc,hrchc,yrlst[k])
zout = Dataset(zfnm,'w')
dimz = zout.createDimension('level',nzout)
dimsmp = zout.createDimension('sample',tsmp)
varlvl = zout.createVariable('level','f4',['level'], fill_value = -9999)
varlvl[:] = plev
varlvl.long_name = 'AIRS/SARTA pressure levels'
varlvl.units = 'hPa'
varlvl.missing_value = -9999
varlon = zout.createVariable('Longitude','f4',['sample'])
varlon[:] = lonout
varlon.long_name = 'Longitude'
varlon.units = 'degrees_east'
varlat = zout.createVariable('Latitude','f4',['sample'])
varlat[:] = latout
varlat.long_name = 'Latitude'
varlat.units = 'degrees_north'
varjdy = zout.createVariable('JulianDay','i2',['sample'])
varjdy[:] = jdyout
varjdy.long_name = 'JulianDay'
varjdy.units = 'day'
varyr = zout.createVariable('Year','i2',['sample'])
varyr[:] = yrout
varyr.long_name = 'Year'
varyr.units = 'year'
varsrt3 = zout.createVariable('Temperature_StdGaus', 'f4', ['sample','level'], fill_value = -9999)
varsrt3[:] = ztmpout
varsrt3.long_name = 'Quantile transformed temperature'
varsrt3.units = 'None'
varsrt3.missing_value = -9999.
varsrt4 = zout.createVariable('RH_StdGaus', 'f4', ['sample','level'], fill_value = -9999)
varsrt4[:] = zrhout
varsrt4.long_name = 'Quantile transformed relative humidity'
varsrt4.units = 'None'
varsrt4.missing_value = -9999.
varsrts1 = zout.createVariable('SfcTemp_StdGaus', 'f4', ['sample'], fill_value = -9999)
varsrts1[:] = zsftmpout
varsrts1.long_name = 'Quantile transformed surface temperature'
varsrts1.units = 'None'
varsrts1.missing_value = -9999.
varsrts2 = zout.createVariable('SfcPres_StdGaus', 'f4', ['sample'], fill_value = -9999)
varsrts2[:] = zpsfcout
varsrts2.long_name = 'Quantile transformed surface pressure'
varsrts2.units = 'None'
varsrts2.missing_value = -9999.
varsrts3 = zout.createVariable('SfcAlt_StdGaus', 'f4', ['sample'], fill_value = -9999)
varsrts3[:] = zsfaltout
varsrts3.long_name = 'Quantile transformed surface pressure'
varsrts3.units = 'None'
varsrts3.missing_value = -9999.
zout.close()
return
def expt_near_sfc_summary(inpdr, outdr, expfl, qclrfl, outfnm):
# Produce experiment near-surface summaries
# inpdr: Name of input directory
# outdr: Name of output directory
# expfl: Name of file with experiment results
# qclrfl: Input quantile file
# outfnm: Ouptut file name
nzairs = 100
nzsrt = 101
# Read simulation results
f = h5py.File(expfl,'r')
tmprtr = f['airs_ptemp'][:,:]
h2ortr = f['airs_h2o'][:,:]
tqflg = f['airs_ptemp_qc'][:,:]
hqflg = f['airs_h2o_qc'][:,:]
tmpsrt = f['ptemp'][:,1:nzsrt]
h2osrt = f['gas_1'][:,1:nzsrt]
psfc = f['spres'][:]
lvs = f['level'][1:nzsrt]
f.close()
nszout = tmprtr.shape[0]
tqflg = tqflg.astype(numpy.int16)
hqflg = hqflg.astype(numpy.int16)
# Altitude info
qin = Dataset(qclrfl,'r')
alts = qin['Altitude_median'][:]
qin.close()
alth2o = numpy.zeros((nszout,nzsrt))
alth2o[:,nzsrt-4] = alts[nzsrt-4]
curdlt = 0.0
for j in range(nzsrt-5,-1,-1):
#str1 = 'Level %d: %.4f' % (j,curdlt)
#print(str1)
if (alts[j] > alts[j+1]):
curdlt = alts[j] - alts[j+1]
alth2o[:,j] = alts[j]
else:
alth2o[:,j] = alts[j+1] + curdlt * 2.0
curdlt = curdlt * 2.0
alth2o[:,97] = 0.0
tsfcsrt = calculate_VPD.near_sfc_temp(tmpsrt, lvs, psfc, passqual = False, qual = None)
print(tsfcsrt[0:10])
tsfcrtr, tqflgsfc = calculate_VPD.near_sfc_temp(tmprtr, lvs, psfc, passqual = True, qual = tqflg)
print(tsfcrtr[0:10])
print(tqflgsfc[0:10])
qvsrt, rhsrt, vpdsrt = calculate_VPD.calculate_QV_and_VPD(h2osrt,tmpsrt,lvs,alth2o[:,1:nzsrt])
qvrtr, rhrtr, vpdrtr = calculate_VPD.calculate_QV_and_VPD(h2ortr,tmprtr,lvs,alth2o[:,1:nzsrt])
qsfsrt, rhsfsrt = calculate_VPD.near_sfc_qv_rh(qvsrt, tsfcsrt, lvs, psfc, passqual = False, qual = None)
qsfrtr, rhsfrtr, qflgsfc = calculate_VPD.near_sfc_qv_rh(qvrtr, tsfcrtr, lvs, psfc, passqual = True, qual = hqflg)
print(tqflgsfc.dtype)
print(qflgsfc.dtype)
# Output: Sfc Temp and qflg, SfC QV, RH and qflg
fldbl = numpy.array([-9999.],dtype=numpy.float64)
flflt = numpy.array([-9999.],dtype=numpy.float32)
flshrt = numpy.array([-99],dtype=numpy.int16)
#outfnm = '%s/MAGIC_%s_%s_%02dUTC_SR%02d_Sfc_UQ_Output.h5' % (outdr,rgchc,mnchc,hrchc,scnrw)
f = h5py.File(outfnm,'w')
dft1 = f.create_dataset('TSfcAir_True',data=tsfcsrt)
dft1.attrs['missing_value'] = fldbl
dft1.attrs['_FillValue'] = fldbl
dft2 = f.create_dataset('TSfcAir_Retrieved',data=tsfcrtr)
dft2.attrs['missing_value'] = fldbl
dft2.attrs['_FillValue'] = fldbl
dft3 = f.create_dataset('TSfcAir_QC',data=tqflgsfc)
dfq1 = f.create_dataset('QVSfcAir_True',data=qsfsrt)
dfq1.attrs['missing_value'] = fldbl
dfq1.attrs['_FillValue'] = fldbl
dfq2 = f.create_dataset('QVSfcAir_Retrieved',data=qsfrtr)
dfq2.attrs['missing_value'] = fldbl
dfq2.attrs['_FillValue'] = fldbl
dfq3 = f.create_dataset('RHSfcAir_True',data=rhsfsrt)
dfq3.attrs['missing_value'] = fldbl
dfq3.attrs['_FillValue'] = fldbl
dfq4 = f.create_dataset('RHSfcAir_Retrieved',data=rhsfrtr)
dfq4.attrs['missing_value'] = fldbl
dfq4.attrs['_FillValue'] = fldbl
dfq5 = f.create_dataset('RHSfcAir_QC',data=qflgsfc)
dfp1 = f.create_dataset('SfcPres',data=psfc)
dfp1.attrs['missing_value'] = fldbl
dfp1.attrs['_FillValue'] = fldbl
f.close()
return
def quantile_cfrac_locmask_conus(rfdr, mtdr, csdr, airdr, dtdr, yrlst, mnst, mnfn, hrchc, rgchc, mskvr, mskvl):
# Construct cloud variable quantiles and z-scores, with a possibly irregular location mask
# rfdr: Directory for reference data (Levels/Quantiles)
# mtdr: Directory for MERRA data
# csdr: Directory for cloud slab data
# airdr: Directory for AIRS cloud fraction
# dtdr: Output directory
# yrlst: List of years to process
# mnst: Starting Month
# mnfn: Ending Month
# hrchc: Template Hour Choice
# rgchc: Template Region Choice
# mskvr: Name of region mask variable
# mskvl: Value of region mask for Region Choice
# Read probs and pressure levels
rnm = '%s/AIRS_Levels_Quantiles.nc' % (rfdr)
f = Dataset(rnm,'r')
plev = f['level'][:]
prbs = f['probability'][:]
alts = f['altitude'][:]
f.close()
nyr = len(yrlst)
nprb = prbs.shape[0]
# RN generator
sdchc = 542354 + yrlst[0] + hrchc
random.seed(sdchc)
# Mask, lat, lon
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_CONUS_with_NCA_regions_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[0],hrchc)
f = Dataset(fnm,'r')
mask = f.variables[mskvr][:,:]
latmet = f.variables['plat'][:]
lonmet = f.variables['plon'][:]
tminf = f.variables['time'][:]
tmunit = f.variables['time'].units[:]
f.close()
mskind = numpy.zeros((mask.shape),dtype=mask.dtype)
print(mskvl)
mskind[mask == mskvl] = 1
lnsq = numpy.arange(lonmet.shape[0])
ltsq = numpy.arange(latmet.shape[0])
# Subset a bit
lnsm = numpy.sum(mskind,axis=0)
#print(lnsq.shape)
#print(lnsm.shape)
#print(lnsm)
ltsm = numpy.sum(mskind,axis=1)
#print(ltsq.shape)
#print(ltsm.shape)
#print(ltsm)
lnmn = numpy.amin(lnsq[lnsm > 0])
lnmx = numpy.amax(lnsq[lnsm > 0]) + 1
ltmn = numpy.amin(ltsq[ltsm > 0])
ltmx = numpy.amax(ltsq[ltsm > 0]) + 1
stridx = 'Lon Range: %d, %d\nLat Range: %d, %d \n' % (lnmn,lnmx,ltmn,ltmx)
print(stridx)
nx = lnmx - lnmn
ny = ltmx - ltmn
lnrp = numpy.tile(lonmet[lnmn:lnmx],ny)
ltrp = numpy.repeat(latmet[ltmn:ltmx],nx)
mskblk = mskind[ltmn:ltmx,lnmn:lnmx]
mskflt = mskblk.flatten()
tsmp = 0
for k in range(nyr):
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_CONUS_with_NCA_regions_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[k],hrchc)
f = Dataset(fnm,'r')
tminf = f.variables['time'][:]
tmunit = f.variables['time'].units[:]
f.close()
tmunit = tmunit.replace("days since ","")
dybs = datetime.datetime.strptime(tmunit,"%Y-%m-%d %H:%M:%S")
print(dybs)
dy0 = dybs + datetime.timedelta(days=tminf[0])
dyinit = datetime.date(dy0.year,dy0.month,dy0.day)
print(dyinit)
dyst = datetime.date(yrlst[k],mnst,1)
ttst = dyst.timetuple()
jst = ttst.tm_yday
if mnfn < 12:
dyfn = datetime.date(yrlst[k],mnfn+1,1)
ttfn = dyfn.timetuple()
jfn = ttfn.tm_yday
else:
dyfn = datetime.date(yrlst[k]+1,1,1)
dy31 = datetime.date(yrlst[k],12,31)
tt31 = dy31.timetuple()
jfn = tt31.tm_yday + 1
dystidx = abs((dyst-dyinit).days)
dyfnidx = abs((dyfn-dyinit).days)
jdsq = numpy.arange(jst,jfn)
print(jdsq)
tmhld = numpy.repeat(jdsq,nx*ny)
#print(tmhld.shape)
#print(numpy.amin(tmhld))
#print(numpy.amax(tmhld))
stridx = 'Day Range: %d, %d\n' % (dystidx,dyfnidx)
print(stridx)
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_CONUS_with_NCA_regions_%02dUTC_no_vertical_variation_for_missing_IncludesCloudParams.h5' % (csdr,yrlst[k],hrchc)
f = h5py.File(fnm,'r')
tms = f['/time'][:,dystidx:dyfnidx]
ctyp1 = f['/ctype'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
ctyp2 = f['/ctype2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprt1 = f['/cprtop'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprt2 = f['/cprtop2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprb1 = f['/cprbot'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprb2 = f['/cprbot2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cfrc1 = f['/cfrac'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cfrc2 = f['/cfrac2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cfrc12 = f['/cfrac12'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cngwt1 = f['/cngwat'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cngwt2 = f['/cngwat2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cttp1 = f['/cstemp'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cttp2 = f['/cstemp2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
f.close()
tmflt = tms.flatten()
nt = tmflt.shape[0]
lnhld = numpy.tile(lnrp,nt)
lthld = numpy.tile(ltrp,nt)
mtnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_CONUS_with_NCA_regions_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[k],hrchc)
f = Dataset(mtnm,'r')
psfc = f.variables['spres'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
f.close()
nt = ctyp1.shape[0]
mskall = numpy.tile(mskflt,nt)
msksq = numpy.arange(mskall.shape[0])
msksb = msksq[mskall > 0]
mskstr = 'Total Obs: %d, Within Mask: %d \n' % (msksq.shape[0],msksb.shape[0])
print(mskstr)
# lthld = numpy.tile(ltrp,nt)
# lnhld = numpy.tile(lnrp,nt)
nslbtmp = numpy.zeros((ctyp1.shape),dtype=numpy.int16)
nslbtmp[(ctyp1 > 100) & (ctyp2 > 100)] = 2
nslbtmp[(ctyp1 > 100) & (ctyp2 < 100)] = 1
# AIRS clouds
anm = '%s/CONUS_AIRS_CldFrc_Match_JJA_%d_%02d_UTC.nc' % (airdr,yrlst[k],hrchc)
f = Dataset(anm,'r')
arsfrc1 = f.variables['AIRS_CldFrac_1'][:,dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
arsfrc2 = f.variables['AIRS_CldFrac_2'][:,dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
f.close()
# Sum
frctot = arsfrc1 + arsfrc2
# Construct Clr/PC/Ovc indicator for AIRS total cloud frac
totclr = numpy.zeros(frctot.shape,dtype=numpy.int16)
totclr[frctot == 0.0] = -1
totclr[frctot == 1.0] = 1
totclr = ma.masked_array(totclr, mask = frctot.mask)
frc0 = frctot[0,:,:,:]
frc0 = frc0.flatten()
frcsq = numpy.arange(tmhld.shape[0])
# Subset by AIRS matchup and location masks
frcsb = frcsq[(numpy.logical_not(frc0.mask)) & (mskall > 0)]
nairs = frcsb.shape[0]
print(tmhld.shape)
print(frcsb.shape)
ctyp1 = ctyp1.flatten()
ctyp2 = ctyp2.flatten()
nslbtmp = nslbtmp.flatten()
cngwt1 = cngwt1.flatten()
cngwt2 = cngwt2.flatten()
cttp1 = cttp1.flatten()
cttp2 = cttp2.flatten()
psfc = psfc.flatten()
# Number of slabs
if tsmp == 0:
nslabout = numpy.zeros((nairs,),dtype=numpy.int16)
nslabout[:] = nslbtmp[frcsb]
else:
nslabout = numpy.append(nslabout,nslbtmp[frcsb])
# For two slabs, slab 1 must have highest cloud bottom pressure
cprt1 = cprt1.flatten()
cprt2 = cprt2.flatten()
cprb1 = cprb1.flatten()
cprb2 = cprb2.flatten()
slabswap = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16)
swpsq = frcsq[(nslbtmp == 2) & (cprb1 < cprb2)]
slabswap[swpsq] = 1
# Cloud Pressure variables
pbttmp1 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
pbttmp1[nslbtmp >= 1] = cprb1[nslbtmp >= 1]
pbttmp1[swpsq] = cprb2[swpsq]
ptptmp1 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
ptptmp1[nslbtmp >= 1] = cprt1[nslbtmp >= 1]
ptptmp1[swpsq] = cprt2[swpsq]
pbttmp2 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
pbttmp2[nslbtmp == 2] = cprb2[nslbtmp == 2]
pbttmp2[swpsq] = cprb1[swpsq]
ptptmp2 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
ptptmp2[nslbtmp == 2] = cprt2[nslbtmp == 2]
ptptmp2[swpsq] = cprt1[swpsq]
# DP Cloud transformation
dptmp1 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
dptmp1[nslbtmp >= 1] = pbttmp1[nslbtmp >= 1] - ptptmp1[nslbtmp >= 1]
dpslbtmp = numpy.zeros((ctyp1.shape[0],)) - 9999.0
dpslbtmp[nslbtmp == 2] = ptptmp1[nslbtmp == 2] - pbttmp2[nslbtmp == 2]
dptmp2 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
dptmp2[nslbtmp == 2] = pbttmp2[nslbtmp == 2] - ptptmp2[nslbtmp == 2]
# Adjust negative DPSlab values
dpnsq = frcsq[(nslbtmp == 2) & (dpslbtmp < 0.0) & (dpslbtmp > -1000.0)]
dpadj = numpy.zeros((ctyp1.shape[0],))
dpadj[dpnsq] = numpy.absolute(dpslbtmp[dpnsq])
dpslbtmp[dpnsq] = 1.0
dptmp1[dpnsq] = dptmp1[dpnsq] / 2.0
dptmp2[dpnsq] = dptmp2[dpnsq] / 2.0
# Sigma / Logit Adjustments
zpbtmp = numpy.zeros((psfc.shape[0],)) - 9999.0
zdp1tmp = numpy.zeros((psfc.shape[0],)) - 9999.0
zdslbtmp = numpy.zeros((psfc.shape[0],)) - 9999.0
zdp2tmp = numpy.zeros((psfc.shape[0],)) - 9999.0
ncldct = 0
for t in range(psfc.shape[0]):
if ( (pbttmp1[t] >= 0.0) and (dpslbtmp[t] >= 0.0) ):
prptmp = numpy.array( [ (psfc[t] - pbttmp1[t]) / psfc[t], \
dptmp1[t] / psfc[t], dpslbtmp[t] / psfc[t], \
dptmp2[t] / psfc[t], 0.0 ] )
if (prptmp[0] < 0.0):
# Adjustment needed
prpadj = prptmp[0]
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
prptmp[2] = prptmp[2] + prpadj*prptmp[2]
prptmp[3] = prptmp[3] + prpadj*prptmp[3]
ncldct = ncldct + 1
elif (prptmp[0] == 0.0):
# Adjustment needed
prpadj = -0.01
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
prptmp[2] = prptmp[2] + prpadj*prptmp[2]
prptmp[3] = prptmp[3] + prpadj*prptmp[3]
ncldct = ncldct + 1
prptmp[4] = 1.0 - prptmp[0] - prptmp[1] - prptmp[2] - prptmp[3]
ztmp = calculate_VPD.lgtzs(prptmp)
zpbtmp[t] = ztmp[0]
zdp1tmp[t] = ztmp[1]
zdslbtmp[t] = ztmp[2]
zdp2tmp[t] = ztmp[3]
elif ( pbttmp1[t] >= 0.0 ):
prptmp = numpy.array( [ (psfc[t] - pbttmp1[t]) / psfc[t], \
dptmp1[t] / psfc[t], 0.0 ] )
if (prptmp[0] < 0.0):
# Adjustment needed
prpadj = prptmp[0]
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
ncldct = ncldct + 1
elif (prptmp[0] == 0.0):
# Adjustment needed
prpadj = -0.01
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
ncldct = ncldct + 1
prptmp[2] = 1.0 - prptmp[0] - prptmp[1]
ztmp = calculate_VPD.lgtzs(prptmp)
zpbtmp[t] = ztmp[0]
zdp1tmp[t] = ztmp[1]
zdslbtmp[t] = -9999.0
zdp2tmp[t] = -9999.0
else:
zpbtmp[t] = -9999.0
zdp1tmp[t] = -9999.0
zdslbtmp[t] = -9999.0
zdp2tmp[t] = -9999.0
str1 = 'Cloud Bot Pres Below Sfc: %d ' % (ncldct)
print(str1)
if tsmp == 0:
psfcout = numpy.zeros((frcsb.shape[0],)) - 9999.0
psfcout[:] = psfc[frcsb]
prsbot1out = numpy.zeros((frcsb.shape[0],)) - 9999.0
prsbot1out[:] = zpbtmp[frcsb]
dpcld1out = numpy.zeros((frcsb.shape[0],)) - 9999.0
dpcld1out[:] = zdp1tmp[frcsb]
dpslbout = numpy.zeros((frcsb.shape[0],)) - 9999.0
dpslbout[:] = zdslbtmp[frcsb]
dpcld2out = numpy.zeros((frcsb.shape[0],)) - 9999.0
dpcld2out[:] = zdp2tmp[frcsb]
else:
psfcout = numpy.append(psfcout,psfc[frcsb])
prsbot1out = numpy.append(prsbot1out,zpbtmp[frcsb])
dpcld1out = numpy.append(dpcld1out,zdp1tmp[frcsb])
dpslbout = numpy.append(dpslbout,zdslbtmp[frcsb])
dpcld2out = numpy.append(dpcld2out,zdp2tmp[frcsb])
# Slab Types: 101.0 = Liquid, 201.0 = Ice, None else
# Output: 0 = Liquid, 1 = Ice
typtmp1 = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16) - 99
typtmp1[nslbtmp >= 1] = (ctyp1[nslbtmp >= 1] - 1.0) / 100.0 - 1.0
typtmp1[swpsq] = (ctyp2[swpsq] - 1.0) / 100.0 - 1.0
typtmp2 = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16) - 99
typtmp2[nslbtmp == 2] = (ctyp2[nslbtmp == 2] - 1.0) / 100.0 - 1.0
typtmp2[swpsq] = (ctyp1[swpsq] - 1.0) / 100.0 - 1.0
if tsmp == 0:
slbtyp1out = numpy.zeros((frcsb.shape[0],),dtype=numpy.int16)
slbtyp1out[:] = typtmp1[frcsb]
slbtyp2out = numpy.zeros((frcsb.shape[0],),dtype=numpy.int16)
slbtyp2out[:] = typtmp2[frcsb]
else:
slbtyp1out = numpy.append(slbtyp1out,typtmp1[frcsb])
slbtyp2out = numpy.append(slbtyp2out,typtmp2[frcsb])
# Cloud Cover Indicators
totclrtmp = numpy.zeros((frcsb.shape[0],3,3),dtype=numpy.int16)
cctr = 0
for frw in range(3):
for fcl in range(3):
clrvec = totclr[cctr,:,:,:].flatten()
totclrtmp[:,frw,fcl] = clrvec[frcsb]
cctr = cctr + 1
if tsmp == 0:
totclrout = numpy.zeros(totclrtmp.shape,dtype=numpy.int16)
totclrout[:,:,:] = totclrtmp
else:
totclrout = numpy.append(totclrout,totclrtmp,axis=0)
# Cloud Fraction Logit, still account for swapping
z1tmp = numpy.zeros((frcsb.shape[0],3,3)) - 9999.0
z2tmp = numpy.zeros((frcsb.shape[0],3,3)) - 9999.0
z12tmp = numpy.zeros((frcsb.shape[0],3,3)) - 9999.0
# Cloud Fraction
cctr = 0
for frw in range(3):
for fcl in range(3):
frcvect = frctot[cctr,:,:,:].flatten()
frcvec1 = arsfrc1[cctr,:,:,:].flatten()
frcvec2 = arsfrc2[cctr,:,:,:].flatten()
# Quick fix for totals over 1.0
fvsq = numpy.arange(frcvect.shape[0])
fvsq2 = fvsq[frcvect > 1.0]
frcvect[fvsq2] = frcvect[fvsq2] / 1.0
frcvec1[fvsq2] = frcvec1[fvsq2] / 1.0
frcvec2[fvsq2] = frcvec2[fvsq2] / 1.0
for t in range(nairs):
crslb = nslbtmp[frcsb[t]]
crclr = totclrtmp[t,frw,fcl]
if ( (crslb == 0) or (crclr == -1) ):
z1tmp[t,frw,fcl] = -9999.0
z2tmp[t,frw,fcl] = -9999.0
z12tmp[t,frw,fcl] = -9999.0
elif ( (crslb == 1) and (crclr == 1) ):
z1tmp[t,frw,fcl] = -9999.0
z2tmp[t,frw,fcl] = -9999.0
z12tmp[t,frw,fcl] = -9999.0
elif ( (crslb == 1) and (crclr == 0) ):
prptmp = numpy.array( [frcvect[frcsb[t]], 1.0 - frcvect[frcsb[t]] ] )
ztmp = calculate_VPD.lgtzs(prptmp)
z1tmp[t,frw,fcl] = ztmp[0]
z2tmp[t,frw,fcl] = -9999.0
z12tmp[t,frw,fcl] = -9999.0
# For 2 slabs, recall AIRS cloud layers go upper/lower, ours is opposite
# Also apply random overlap adjust AIRS zero values
elif ( (crslb == 2) and (crclr == 0) ):
frcs = numpy.array([frcvec2[frcsb[t]],frcvec1[frcsb[t]]])
if (numpy.sum(frcs) < 0.01):
frcs[0] = 0.005
frcs[1] = 0.005
elif frcs[0] < 0.005:
frcs[0] = 0.005
frcs[1] = frcs[1] - 0.005
elif frcs[1] < 0.005:
frcs[1] = 0.005
frcs[0] = frcs[0] - 0.005
mnfrc = numpy.amin(frcs)
c12tmp = random.uniform(0.0,mnfrc,size=1)
prptmp = numpy.array( [frcs[0] - c12tmp[0]*frcs[1], \
frcs[1] - c12tmp[0]*frcs[0], c12tmp[0], 0.0])
prptmp[3] = 1.0 - prptmp[0] - prptmp[1] - prptmp[2]
ztmp = calculate_VPD.lgtzs(prptmp)
z1tmp[t,frw,fcl] = ztmp[0]
z2tmp[t,frw,fcl] = ztmp[1]
z12tmp[t,frw,fcl] = ztmp[2]
elif ( (crslb == 2) and (crclr == 1) ):
frcs = numpy.array([frcvec2[frcsb[t]],frcvec1[frcsb[t]]])
if frcs[0] < 0.005:
frcs[0] = 0.005
frcs[1] = frcs[1] - 0.005
elif frcs[1] < 0.005:
frcs[1] = 0.005
frcs[0] = frcs[0] - 0.005
mnfrc = numpy.amin(frcs)
c12tmp = random.uniform(0.0,mnfrc,size=1)
prptmp = numpy.array( [0.999 * (frcs[0] - c12tmp[0]*frcs[1]), \
0.999 * (frcs[1] - c12tmp[0]*frcs[0]), 0.999 * c12tmp[0], 0.001])
prptmp[3] = 1.0 - prptmp[0] - prptmp[1] - prptmp[2]
ztmp = calculate_VPD.lgtzs(prptmp)
z1tmp[t,frw,fcl] = ztmp[0]
z2tmp[t,frw,fcl] = ztmp[1]
z12tmp[t,frw,fcl] = ztmp[2]
cctr = cctr + 1
if tsmp == 0:
cfclgt1out = numpy.zeros(z1tmp.shape)
cfclgt1out[:,:,:] = z1tmp
cfclgt2out = numpy.zeros(z2tmp.shape)
cfclgt2out[:,:,:] = z2tmp
cfclgt12out = numpy.zeros(z12tmp.shape)
cfclgt12out[:,:,:] = z12tmp
else:
cfclgt1out = numpy.append(cfclgt1out,z1tmp,axis=0)
cfclgt2out = numpy.append(cfclgt2out,z2tmp,axis=0)
cfclgt12out = numpy.append(cfclgt12out,z12tmp,axis=0)
# Cloud Non-Gas Water
ngwttmp1 = numpy.zeros(cngwt1.shape[0]) - 9999.0
ngwttmp1[nslbtmp >= 1] = cngwt1[nslbtmp >= 1]
ngwttmp1[swpsq] = cngwt2[swpsq]
ngwttmp2 = numpy.zeros(cngwt1.shape[0]) - 9999.0
ngwttmp2[nslbtmp == 2] = cngwt2[nslbtmp == 2]
ngwttmp2[swpsq] = cngwt1[swpsq]
if tsmp == 0:
ngwt1out = numpy.zeros((frcsb.shape[0],)) - 9999.0
ngwt1out[:] = ngwttmp1[frcsb]
ngwt2out = numpy.zeros((frcsb.shape[0],)) - 9999.0
ngwt2out[:] = ngwttmp2[frcsb]
else:
ngwt1out = numpy.append(ngwt1out,ngwttmp1[frcsb])
ngwt2out = numpy.append(ngwt2out,ngwttmp2[frcsb])
# Cloud Top Temperature
cttptmp1 = numpy.zeros(cttp1.shape[0]) - 9999.0
cttptmp1[nslbtmp >= 1] = cttp1[nslbtmp >= 1]
cttptmp1[swpsq] = cttp2[swpsq]
cttptmp2 = numpy.zeros(cttp1.shape[0]) - 9999.0
cttptmp2[nslbtmp == 2] = cttp2[nslbtmp == 2]
cttptmp2[swpsq] = cttp1[swpsq]
if tsmp == 0:
cttp1out = numpy.zeros((frcsb.shape[0],)) - 9999.0
cttp1out[:] = cttptmp1[frcsb]
cttp2out = numpy.zeros((frcsb.shape[0],)) - 9999.0
cttp2out[:] = cttptmp2[frcsb]
else:
cttp1out = numpy.append(cttp1out,cttptmp1[frcsb])
cttp2out = numpy.append(cttp2out,cttptmp2[frcsb])
# Loc/Time
if tsmp == 0:
latout = numpy.zeros((frcsb.shape[0],)) - 9999.0
latout[:] = lthld[frcsb]
lonout = numpy.zeros((frcsb.shape[0],)) - 9999.0
lonout[:] = lnhld[frcsb]
yrout = numpy.zeros((frcsb.shape[0],),dtype=numpy.int16)
yrout[:] = yrlst[k]
jdyout = numpy.zeros((frcsb.shape[0],),dtype=numpy.int16)
jdyout[:] = tmhld[frcsb]
else:
latout = numpy.append(latout,lthld[frcsb])
lonout = numpy.append(lonout,lnhld[frcsb])
yrtmp = numpy.zeros((frcsb.shape[0],),dtype=numpy.int16)
yrtmp[:] = yrlst[k]
yrout = numpy.append(yrout,yrtmp)
jdyout = numpy.append(jdyout,tmhld[frcsb])
tsmp = tsmp + nairs
# Process quantiles
nslbqs = calculate_VPD.quantile_msgdat_discrete(nslabout,prbs)
str1 = '%.2f Number Slab Quantile: %d' % (prbs[103],nslbqs[103])
print(str1)
print(nslbqs)
# psfcqs = calculate_VPD.quantile_msgdat(psfcout,prbs)
# str1 = '%.2f Surface Pressure Quantile: %.3f' % (prbs[53],psfcqs[53])
# print(str1)
prsbt1qs = calculate_VPD.quantile_msgdat(prsbot1out,prbs)
str1 = '%.2f CldBot1 Pressure Quantile: %.3f' % (prbs[103],prsbt1qs[103])
print(str1)
dpcld1qs = calculate_VPD.quantile_msgdat(dpcld1out,prbs)
str1 = '%.2f DPCloud1 Quantile: %.3f' % (prbs[103],dpcld1qs[103])
print(str1)
dpslbqs = calculate_VPD.quantile_msgdat(dpslbout,prbs)
str1 = '%.2f DPSlab Quantile: %.3f' % (prbs[103],dpslbqs[103])
print(str1)
dpcld2qs = calculate_VPD.quantile_msgdat(dpcld2out,prbs)
str1 = '%.2f DPCloud2 Quantile: %.3f' % (prbs[103],dpcld2qs[103])
print(str1)
slb1qs = calculate_VPD.quantile_msgdat_discrete(slbtyp1out,prbs)
str1 = '%.2f Type1 Quantile: %d' % (prbs[103],slb1qs[103])
print(str1)
slb2qs = calculate_VPD.quantile_msgdat_discrete(slbtyp2out,prbs)
str1 = '%.2f Type2 Quantile: %d' % (prbs[103],slb2qs[103])
print(str1)
# Indicators
totclrqout = numpy.zeros((3,3,nprb)) - 99
lgt1qs = numpy.zeros((3,3,nprb)) - 9999.0
lgt2qs = numpy.zeros((3,3,nprb)) - 9999.0
lgt12qs = numpy.zeros((3,3,nprb)) - 9999.0
for frw in range(3):
for fcl in range(3):
tmpclr = calculate_VPD.quantile_msgdat_discrete(totclrout[:,frw,fcl],prbs)
totclrqout[frw,fcl,:] = tmpclr[:]
str1 = 'Clr/Ovc Indicator %d, %d %.2f Quantile: %d' % (frw,fcl,prbs[103],tmpclr[103])
print(str1)
tmplgtq = calculate_VPD.quantile_msgdat(cfclgt1out[:,frw,fcl],prbs)
lgt1qs[frw,fcl,:] = tmplgtq[:]
tmplgtq = calculate_VPD.quantile_msgdat(cfclgt2out[:,frw,fcl],prbs)
lgt2qs[frw,fcl,:] = tmplgtq[:]
tmplgtq = calculate_VPD.quantile_msgdat(cfclgt12out[:,frw,fcl],prbs)
lgt12qs[frw,fcl,:] = tmplgtq[:]
str1 = 'CFrac Logit %d, %d %.2f Quantile: %.3f, %.3f, %.3f' % (frw,fcl,prbs[103], \
lgt1qs[frw,fcl,103],lgt2qs[frw,fcl,103],lgt12qs[frw,fcl,103])
print(str1)
ngwt1qs = calculate_VPD.quantile_msgdat(ngwt1out,prbs)
str1 = '%.2f NGWater1 Quantile: %.3f' % (prbs[103],ngwt1qs[103])
print(str1)
ngwt2qs = calculate_VPD.quantile_msgdat(ngwt2out,prbs)
str1 = '%.2f NGWater2 Quantile: %.3f' % (prbs[103],ngwt2qs[103])
print(str1)
cttp1qs = calculate_VPD.quantile_msgdat(cttp1out,prbs)
str1 = '%.2f CTTemp1 Quantile: %.3f' % (prbs[103],cttp1qs[103])
print(str1)
cttp2qs = calculate_VPD.quantile_msgdat(cttp2out,prbs)
str1 = '%.2f CTTemp2 Quantile: %.3f' % (prbs[103],cttp2qs[103])
print(str1)
# Output Quantiles
qfnm = '%s/CONUS_AIRS_JJA_%04d_%02dUTC_%s_Cloud_Quantile.nc' % (dtdr,yrlst[k],hrchc,rgchc)
qout = Dataset(qfnm,'w')
dimp = qout.createDimension('probability',nprb)
dimfov1 = qout.createDimension('fovrow',3)
dimfov2 = qout.createDimension('fovcol',3)
varprb = qout.createVariable('probability','f4',['probability'], fill_value = -9999)
varprb[:] = prbs
varprb.long_name = 'Probability break points'
varprb.units = 'none'
varprb.missing_value = -9999
varnslb = qout.createVariable('NumberSlab_quantile','i2',['probability'], fill_value = -99)
varnslb[:] = nslbqs
varnslb.long_name = 'Number of cloud slabs quantiles'
varnslb.units = 'Count'
varnslb.missing_value = -99
varcbprs = qout.createVariable('CloudBot1Logit_quantile','f4',['probability'], fill_value = -9999)
varcbprs[:] = prsbt1qs
varcbprs.long_name = 'Slab 1 cloud bottom pressure logit quantiles'
varcbprs.units = 'hPa'
varcbprs.missing_value = -9999
vardpc1 = qout.createVariable('DPCloud1Logit_quantile','f4',['probability'], fill_value = -9999)
vardpc1[:] = dpcld1qs
vardpc1.long_name = 'Slab 1 cloud pressure depth logit quantiles'
vardpc1.units = 'hPa'
vardpc1.missing_value = -9999
vardpslb = qout.createVariable('DPSlabLogit_quantile','f4',['probability'], fill_value = -9999)
vardpslb[:] = dpslbqs
vardpslb.long_name = 'Two-slab vertical separation logit quantiles'
vardpslb.units = 'hPa'
vardpslb.missing_value = -9999
vardpc2 = qout.createVariable('DPCloud2Logit_quantile','f4',['probability'], fill_value = -9999)
vardpc2[:] = dpcld2qs
vardpc2.long_name = 'Slab 2 cloud pressure depth logit quantiles'
vardpc2.units = 'hPa'
vardpc2.missing_value = -9999
vartyp1 = qout.createVariable('CType1_quantile','i2',['probability'], fill_value = -99)
vartyp1[:] = slb1qs
vartyp1.long_name = 'Slab 1 cloud type quantiles'
vartyp1.units = 'None'
vartyp1.missing_value = -99
vartyp1.comment = 'Cloud slab type: 0=Liquid, 1=Ice'
vartyp2 = qout.createVariable('CType2_quantile','i2',['probability'], fill_value = -99)
vartyp2[:] = slb2qs
vartyp2.long_name = 'Slab 2 cloud type quantiles'
vartyp2.units = 'None'
vartyp2.missing_value = -99
vartyp2.comment = 'Cloud slab type: 0=Liquid, 1=Ice'
varcvr = qout.createVariable('CCoverInd_quantile','i2',['fovrow','fovcol','probability'], fill_value = 99)
varcvr[:] = totclrqout
varcvr.long_name = 'Cloud cover indicator quantiles'
varcvr.units = 'None'
varcvr.missing_value = -99
varcvr.comment = 'Cloud cover indicators: -1=Clear, 0=Partly cloudy, 1=Overcast'
varlgt1 = qout.createVariable('CFrcLogit1_quantile','f4',['fovrow','fovcol','probability'], fill_value = -9999)
varlgt1[:] = lgt1qs
varlgt1.long_name = 'Slab 1 cloud fraction (cfrac1x) logit quantiles'
varlgt1.units = 'None'
varlgt1.missing_value = -9999
varlgt2 = qout.createVariable('CFrcLogit2_quantile','f4',['fovrow','fovcol','probability'], fill_value = -9999)
varlgt2[:] = lgt2qs
varlgt2.long_name = 'Slab 2 cloud fraction (cfrac2x) logit quantiles'
varlgt2.units = 'None'
varlgt2.missing_value = -9999
varlgt12 = qout.createVariable('CFrcLogit12_quantile','f4',['fovrow','fovcol','probability'], fill_value = -9999)
varlgt12[:] = lgt12qs
varlgt12.long_name = 'Slab 1/2 overlap fraction (cfrac12) logit quantiles'
varlgt12.units = 'None'
varlgt12.missing_value = -9999
varngwt1 = qout.createVariable('NGWater1_quantile','f4',['probability'], fill_value = -9999)
varngwt1[:] = ngwt1qs
varngwt1.long_name = 'Slab 1 cloud non-gas water quantiles'
varngwt1.units = 'g m^-2'
varngwt1.missing_value = -9999
varngwt2 = qout.createVariable('NGWater2_quantile','f4',['probability'], fill_value = -9999)
varngwt2[:] = ngwt2qs
varngwt2.long_name = 'Slab 2 cloud non-gas water quantiles'
varngwt2.units = 'g m^-2'
varngwt2.missing_value = -9999
varcttp1 = qout.createVariable('CTTemp1_quantile','f4',['probability'], fill_value = -9999)
varcttp1[:] = cttp1qs
varcttp1.long_name = 'Slab 1 cloud top temperature'
varcttp1.units = 'K'
varcttp1.missing_value = -9999
varcttp2 = qout.createVariable('CTTemp2_quantile','f4',['probability'], fill_value = -9999)
varcttp2[:] = cttp2qs
varcttp2.long_name = 'Slab 2 cloud top temperature'
varcttp2.units = 'K'
varcttp2.missing_value = -9999
qout.close()
# Set up transformations
zccvout = numpy.zeros((tsmp,3,3,)) - 9999.
zlgt1 = numpy.zeros((tsmp,3,3)) - 9999.
zlgt2 = numpy.zeros((tsmp,3,3)) - 9999.
zlgt12 = numpy.zeros((tsmp,3,3)) - 9999.
znslb = calculate_VPD.std_norm_quantile_from_obs(nslabout, nslbqs, prbs, msgval=-99)
zprsbt1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(prsbot1out, prsbt1qs, prbs, msgval=-9999.)
zdpcld1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(dpcld1out, dpcld1qs, prbs, msgval=-9999.)
zdpslb = calculate_VPD.std_norm_quantile_from_obs_fill_msg(dpslbout, dpslbqs, prbs, msgval=-9999.)
zdpcld2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(dpcld2out, dpcld2qs, prbs, msgval=-9999.)
zctyp1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(slbtyp1out, slb1qs, prbs, msgval=-99)
zctyp2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(slbtyp2out, slb2qs, prbs, msgval=-99)
for frw in range(3):
for fcl in range(3):
ztmp = calculate_VPD.std_norm_quantile_from_obs_fill_msg(totclrout[:,frw,fcl], totclrqout[frw,fcl,:], \
prbs, msgval=-99)
zccvout[:,frw,fcl] = ztmp[:]
ztmp = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cfclgt1out[:,frw,fcl], lgt1qs[frw,fcl,:], \
prbs, msgval=-9999.)
zlgt1[:,frw,fcl] = ztmp[:]
ztmp = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cfclgt2out[:,frw,fcl], lgt2qs[frw,fcl,:], \
prbs, msgval=-9999.)
zlgt2[:,frw,fcl] = ztmp[:]
ztmp = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cfclgt12out[:,frw,fcl], lgt12qs[frw,fcl,:], \
prbs, msgval=-9999.)
zlgt12[:,frw,fcl] = ztmp[:]
zngwt1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(ngwt1out, ngwt1qs, prbs, msgval=-9999.)
zngwt2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(ngwt2out, ngwt2qs, prbs, msgval=-9999.)
zcttp1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cttp1out, cttp1qs, prbs, msgval=-9999.)
zcttp2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cttp2out, cttp2qs, prbs, msgval=-9999.)
# Output transformed quantile samples
zfnm = '%s/CONUS_AIRS_JJA_%04d_%02dUTC_%s_Cloud_StdGausTrans.nc' % (dtdr,yrlst[k],hrchc,rgchc)
zout = Dataset(zfnm,'w')
dimsmp = zout.createDimension('sample',tsmp)
dimfov1 = zout.createDimension('fovrow',3)
dimfov2 = zout.createDimension('fovcol',3)
varlon = zout.createVariable('Longitude','f4',['sample'])
varlon[:] = lonout
varlon.long_name = 'Longitude'
varlon.units = 'degrees_east'
varlat = zout.createVariable('Latitude','f4',['sample'])
varlat[:] = latout
varlat.long_name = 'Latitude'
varlat.units = 'degrees_north'
varjdy = zout.createVariable('JulianDay','i2',['sample'])
varjdy[:] = jdyout
varjdy.long_name = 'JulianDay'
varjdy.units = 'day'
varyr = zout.createVariable('Year','i2',['sample'])
varyr[:] = yrout
varyr.long_name = 'Year'
varyr.units = 'year'
varnslb = zout.createVariable('NumberSlab_StdGaus','f4',['sample'], fill_value = -9999)
varnslb[:] = znslb
varnslb.long_name = 'Quantile transformed number of cloud slabs'
varnslb.units = 'None'
varnslb.missing_value = -9999.
varcbprs = zout.createVariable('CloudBot1Logit_StdGaus','f4',['sample'], fill_value = -9999)
varcbprs[:] = zprsbt1
varcbprs.long_name = 'Quantile transformed slab 1 cloud bottom pressure logit'
varcbprs.units = 'None'
varcbprs.missing_value = -9999.
vardpc1 = zout.createVariable('DPCloud1Logit_StdGaus','f4',['sample'], fill_value = -9999)
vardpc1[:] = zdpcld1
vardpc1.long_name = 'Quantile transformed slab 1 cloud pressure depth logit'
vardpc1.units = 'None'
vardpc1.missing_value = -9999.
vardpslb = zout.createVariable('DPSlabLogit_StdGaus','f4',['sample'], fill_value = -9999)
vardpslb[:] = zdpslb
vardpslb.long_name = 'Quantile transformed two-slab vertical separation logit'
vardpslb.units = 'None'
vardpslb.missing_value = -9999.
vardpc2 = zout.createVariable('DPCloud2Logit_StdGaus','f4',['sample'], fill_value = -9999)
vardpc2[:] = zdpcld2
vardpc2.long_name = 'Quantile transformed slab 2 cloud pressure depth logit'
vardpc2.units = 'None'
vardpc2.missing_value = -9999.
vartyp1 = zout.createVariable('CType1_StdGaus','f4',['sample'], fill_value = -9999)
vartyp1[:] = zctyp1
vartyp1.long_name = 'Quantile transformed slab 1 cloud type logit'
vartyp1.units = 'None'
vartyp1.missing_value = -9999.
vartyp2 = zout.createVariable('CType2_StdGaus','f4',['sample'], fill_value = -9999)
vartyp2[:] = zctyp2
vartyp2.long_name = 'Quantile transformed slab 2 cloud type'
vartyp2.units = 'None'
vartyp2.missing_value = -9999.
varcov = zout.createVariable('CCoverInd_StdGaus','f4',['sample','fovrow','fovcol'], fill_value= -9999)
varcov[:] = zccvout
varcov.long_name = 'Quantile transformed cloud cover indicator'
varcov.units = 'None'
varcov.missing_value = -9999.
varlgt1 = zout.createVariable('CFrcLogit1_StdGaus','f4',['sample','fovrow','fovcol'], fill_value = -9999)
varlgt1[:] = zlgt1
varlgt1.long_name = 'Quantile transformed slab 1 cloud fraction logit'
varlgt1.units = 'None'
varlgt1.missing_value = -9999.
varlgt2 = zout.createVariable('CFrcLogit2_StdGaus','f4',['sample','fovrow','fovcol'], fill_value = -9999)
varlgt2[:] = zlgt2
varlgt2.long_name = 'Quantile transformed slab 2 cloud fraction logit'
varlgt2.units = 'None'
varlgt2.missing_value = -9999.
varlgt12 = zout.createVariable('CFrcLogit12_StdGaus','f4',['sample','fovrow','fovcol'], fill_value = -9999)
varlgt12[:] = zlgt12
varlgt12.long_name = 'Quantile transformed slab 1/2 overlap fraction logit'
varlgt12.units = 'None'
varlgt12.missing_value = -9999.
varngwt1 = zout.createVariable('NGWater1_StdGaus','f4',['sample'], fill_value = -9999)
varngwt1[:] = zngwt1
varngwt1.long_name = 'Quantile transformed slab 1 non-gas water'
varngwt1.units = 'None'
varngwt1.missing_value = -9999.
varngwt2 = zout.createVariable('NGWater2_StdGaus','f4',['sample'], fill_value = -9999)
varngwt2[:] = zngwt2
varngwt2.long_name = 'Quantile transformed slab 2 non-gas water'
varngwt2.units = 'None'
varngwt2.missing_value = -9999.
varcttp1 = zout.createVariable('CTTemp1_StdGaus','f4',['sample'], fill_value = -9999)
varcttp1[:] = zcttp1
varcttp1.long_name = 'Quantile transformed slab 1 cloud top temperature'
varcttp1.units = 'None'
varcttp1.missing_value = -9999.
varcttp2 = zout.createVariable('CTTemp2_StdGaus','f4',['sample'], fill_value = -9999)
varcttp2[:] = zcttp2
varcttp2.long_name = 'Quantile transformed slab 2 cloud top temperature'
varcttp2.units = 'None'
varcttp2.missing_value = -9999.
zout.close()
return
def quantile_profile_locmask_conus(rfdr, mtdr, csdr, airdr, dtdr, yrlst, mnst, mnfn, hrchc, rgchc, mskvr, mskvl):
# Construct profile/sfc variable quantiles and z-scores, with a possibly irregular location mask
# rfdr: Directory for reference data (Levels/Quantiles)
# mtdr: Directory for MERRA data
# csdr: Directory for cloud slab data
# airdr: Directory for AIRS cloud fraction
# dtdr: Output directory
# yrlst: List of years to process
# mnst: Starting Month
# mnfn: Ending Month
# hrchc: Template Hour Choice
# rgchc: Template Region Choice
# mskvr: Name of region mask variable
# mskvl: Value of region mask for Region Choice
# Read probs and pressure levels
rnm = '%s/AIRS_Levels_Quantiles.nc' % (rfdr)
f = Dataset(rnm,'r')
plev = f['level'][:]
prbs = f['probability'][:]
alts = f['altitude'][:]
f.close()
nyr = len(yrlst)
nprb = prbs.shape[0]
nzout = 101
tmpqout = numpy.zeros((nzout,nprb)) - 9999.
rhqout = numpy.zeros((nzout,nprb)) - 9999.
sftmpqs = numpy.zeros((nprb,)) - 9999.
sfaltqs = numpy.zeros((nprb,)) - 9999.
psfcqs = numpy.zeros((nprb,)) - 9999.
altmed = numpy.zeros((nzout,)) - 9999.
# Mask, lat, lon
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_CONUS_with_NCA_regions_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[0],hrchc)
f = Dataset(fnm,'r')
mask = f.variables[mskvr][:,:]
latmet = f.variables['plat'][:]
lonmet = f.variables['plon'][:]
tminf = f.variables['time'][:]
tmunit = f.variables['time'].units[:]
f.close()
mskind = numpy.zeros((mask.shape),dtype=mask.dtype)
print(mskvl)
mskind[mask == mskvl] = 1
lnsq = numpy.arange(lonmet.shape[0])
ltsq = numpy.arange(latmet.shape[0])
# Subset a bit
lnsm = numpy.sum(mskind,axis=0)
#print(lnsq.shape)
#print(lnsm.shape)
#print(lnsm)
ltsm = numpy.sum(mskind,axis=1)
#print(ltsq.shape)
#print(ltsm.shape)
#print(ltsm)
lnmn = numpy.amin(lnsq[lnsm > 0])
lnmx = numpy.amax(lnsq[lnsm > 0]) + 1
ltmn = numpy.amin(ltsq[ltsm > 0])
ltmx = numpy.amax(ltsq[ltsm > 0]) + 1
stridx = 'Lon Range: %d, %d\nLat Range: %d, %d \n' % (lnmn,lnmx,ltmn,ltmx)
print(stridx)
nx = lnmx - lnmn
ny = ltmx - ltmn
lnrp = numpy.tile(lonmet[lnmn:lnmx],ny)
ltrp = numpy.repeat(latmet[ltmn:ltmx],nx)
mskblk = mskind[ltmn:ltmx,lnmn:lnmx]
mskflt = mskblk.flatten()
tsmp = 0
for k in range(nyr):
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_CONUS_with_NCA_regions_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[k],hrchc)
f = Dataset(fnm,'r')
tminf = f.variables['time'][:]
tmunit = f.variables['time'].units[:]
f.close()
tmunit = tmunit.replace("days since ","")
dybs = datetime.datetime.strptime(tmunit,"%Y-%m-%d %H:%M:%S")
print(dybs)
dy0 = dybs + datetime.timedelta(days=tminf[0])
dyinit = datetime.date(dy0.year,dy0.month,dy0.day)
print(dyinit)
dyst = datetime.date(yrlst[k],mnst,1)
ttst = dyst.timetuple()
jst = ttst.tm_yday
if mnfn < 12:
dyfn = datetime.date(yrlst[k],mnfn+1,1)
ttfn = dyfn.timetuple()
jfn = ttfn.tm_yday
else:
dyfn = datetime.date(yrlst[k]+1,1,1)
dy31 = datetime.date(yrlst[k],12,31)
tt31 = dy31.timetuple()
jfn = tt31.tm_yday + 1
dystidx = abs((dyst-dyinit).days)
dyfnidx = abs((dyfn-dyinit).days)
jdsq = numpy.arange(jst,jfn)
print(jdsq)
tmhld = numpy.repeat(jdsq,nx*ny)
#print(tmhld.shape)
#print(numpy.amin(tmhld))
#print(numpy.amax(tmhld))
stridx = 'Day Range: %d, %d\n' % (dystidx,dyfnidx)
print(stridx)
# MERRA variables
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_CONUS_with_NCA_regions_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[k],hrchc)
f = Dataset(fnm,'r')
tms = f.variables['time'][:]
stparr = f['/stemp'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
psfarr = f['/spres'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
salarr = f['/salti'][ltmn:ltmx,lnmn:lnmx]
tmparr = f['/ptemp'][dystidx:dyfnidx,:,ltmn:ltmx,lnmn:lnmx]
h2oarr = f['/rh'][dystidx:dyfnidx,:,ltmn:ltmx,lnmn:lnmx]
altarr = f['/palts'][dystidx:dyfnidx,:,ltmn:ltmx,lnmn:lnmx]
f.close()
tmflt = tms.flatten()
nt = tmflt.shape[0]
lnhld = numpy.tile(lnrp,nt)
lthld = numpy.tile(ltrp,nt)
mskall = numpy.tile(mskflt,nt)
msksq = numpy.arange(mskall.shape[0])
msksb = msksq[mskall > 0]
mskstr = 'Total Obs: %d, Within Mask: %d \n' % (msksq.shape[0],msksb.shape[0])
print(mskstr)
# AIRS Clouds
anm = '%s/CONUS_AIRS_CldFrc_Match_JJA_%d_%02d_UTC.nc' % (airdr,yrlst[k],hrchc)
f = Dataset(anm,'r')
arsfrc1 = f.variables['AIRS_CldFrac_1'][:,dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
arsfrc2 = f.variables['AIRS_CldFrac_2'][:,dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
f.close()
# Sum
frctot = arsfrc1 + arsfrc2
frc0 = frctot[0,:,:,:]
frc0 = frc0.flatten()
frcsq = numpy.arange(tmhld.shape[0])
# Subset by AIRS matchup and location masks
frcsb = frcsq[(numpy.logical_not(frc0.mask)) & (mskall > 0)]
nairs = frcsb.shape[0]
print(tmhld.shape)
print(frcsb.shape)
tmptmp = numpy.zeros((nairs,nzout))
h2otmp = numpy.zeros((nairs,nzout))
alttmp = numpy.zeros((nairs,nzout))
for j in range(nzout):
tmpvec = tmparr[:,j,:,:].flatten()
tmpvec[tmpvec > 1e30] = -9999.
tmptmp[:,j] = tmpvec[frcsb]
altvec = altarr[:,j,:,:].flatten()
alttmp[:,j] = altvec[frcsb]
h2ovec = h2oarr[:,j,:,:].flatten()
h2ovec[h2ovec > 1e30] = -9999.
h2otmp[:,j] = h2ovec[frcsb]
if tsmp == 0:
tmpmerout = numpy.zeros(tmptmp.shape)
tmpmerout[:,:] = tmptmp
h2omerout = numpy.zeros(h2otmp.shape)
h2omerout[:,:] = h2otmp
altout = numpy.zeros(alttmp.shape)
altout[:,:] = alttmp
else:
tmpmerout = numpy.append(tmpmerout,tmptmp,axis=0)
h2omerout = numpy.append(h2omerout,h2otmp,axis=0)
altout = numpy.append(altout,alttmp,axis=0)
stparr = stparr.flatten()
psfarr = psfarr.flatten()
salarr = salarr.flatten()
salfl = numpy.tile(salarr[:],nt)
if tsmp == 0:
sftmpout = numpy.zeros((nairs,)) - 9999.0
sftmpout[:] = stparr[frcsb]
psfcout = numpy.zeros((nairs,)) - 9999.0
psfcout[:] = psfarr[frcsb]
sfaltout = numpy.zeros((nairs,)) - 9999.0
sfaltout[:] = salfl[frcsb]
else:
sftmpout = numpy.append(sftmpout,stparr[frcsb])
psfcout = numpy.append(psfcout,psfarr[frcsb])
sfaltout = numpy.append(sfaltout,salfl[frcsb])
# Loc/Time
if tsmp == 0:
latout = numpy.zeros((nairs,)) - 9999.0
latout[:] = lthld[frcsb]
lonout = numpy.zeros((nairs,)) - 9999.0
lonout[:] = lnhld[frcsb]
yrout = numpy.zeros((nairs,),dtype=numpy.int16)
yrout[:] = yrlst[k]
jdyout = numpy.zeros((nairs,),dtype=numpy.int16)
jdyout[:] = tmhld[frcsb]
else:
latout = numpy.append(latout,lthld[frcsb])
lonout = numpy.append(lonout,lnhld[frcsb])
yrtmp = numpy.zeros((nairs,),dtype=numpy.int16)
yrtmp[:] = yrlst[k]
yrout = numpy.append(yrout,yrtmp)
jdyout = numpy.append(jdyout,tmhld[frcsb])
tsmp = tsmp + nairs
# Quantiles
tmpqout = numpy.zeros((nzout,nprb)) - 9999.
rhqout = numpy.zeros((nzout,nprb)) - 9999.
sftmpqs = numpy.zeros((nprb,)) - 9999.
sfaltqs = numpy.zeros((nprb,)) - 9999.
psfcqs = numpy.zeros((nprb,)) - 9999.
altmed = numpy.zeros((nzout,)) - 9999.
ztmpout = numpy.zeros((tsmp,nzout)) - 9999.
zrhout = numpy.zeros((tsmp,nzout)) - 9999.
zsftmpout = numpy.zeros((tsmp,)) - 9999.
zsfaltout = numpy.zeros((tsmp,)) - 9999.
zpsfcout = numpy.zeros((tsmp,)) - 9999.
# Quantiles
for j in range(nzout):
tmptmp = calculate_VPD.quantile_msgdat(tmpmerout[:,j],prbs)
tmpqout[j,:] = tmptmp[:]
str1 = 'Plev %.2f, %.2f Temp Quantile: %.3f' % (plev[j],prbs[103],tmptmp[103])
print(str1)
# Transform
ztmp = calculate_VPD.std_norm_quantile_from_obs(tmpmerout[:,j], tmptmp, prbs, msgval=-9999.)
ztmpout[:,j] = ztmp[:]
alttmp = calculate_VPD.quantile_msgdat(altout[:,j],prbs)
altmed[j] = alttmp[103]
str1 = 'Plev %.2f, %.2f Alt Quantile: %.3f' % (plev[j],prbs[103],alttmp[103])
print(str1)
# Adjust RH over 100
rhadj = h2omerout[:,j]
rhadj[rhadj > 1.0] = 1.0
rhqtmp = calculate_VPD.quantile_msgdat(rhadj,prbs)
rhqout[j,:] = rhqtmp[:]
str1 = 'Plev %.2f, %.2f RH Quantile: %.4f' % (plev[j],prbs[103],rhqtmp[103])
print(str1)
zrh = calculate_VPD.std_norm_quantile_from_obs(rhadj, rhqtmp, prbs, msgval=-9999.)
zrhout[:,j] = zrh[:]
psfcqs = calculate_VPD.quantile_msgdat(psfcout,prbs)
str1 = '%.2f PSfc Quantile: %.2f' % (prbs[103],psfcqs[103])
print(str1)
zpsfcout = calculate_VPD.std_norm_quantile_from_obs(psfcout, psfcqs, prbs, msgval=-9999.)
sftpqs = calculate_VPD.quantile_msgdat(sftmpout,prbs)
str1 = '%.2f SfcTmp Quantile: %.2f' % (prbs[103],sftpqs[103])
print(str1)
zsftmpout = calculate_VPD.std_norm_quantile_from_obs(sftmpout, sftpqs, prbs, msgval=-9999.)
sfalqs = calculate_VPD.quantile_msgdat(sfaltout,prbs)
str1 = '%.2f SfcAlt Quantile: %.2f' % (prbs[103],sfalqs[103])
print(str1)
zsfaltout = calculate_VPD.std_norm_quantile_from_obs(sfaltout, sfalqs, prbs, msgval=-9999.)
# Output Quantiles
qfnm = '%s/CONUS_AIRS_JJA_%04d_%02dUTC_%s_TempRHSfc_Quantile.nc' % (dtdr,yrlst[k],hrchc,rgchc)
qout = Dataset(qfnm,'w')
dimz = qout.createDimension('level',nzout)
dimp = qout.createDimension('probability',nprb)
varlvl = qout.createVariable('level','f4',['level'], fill_value = -9999)
varlvl[:] = plev
varlvl.long_name = 'AIRS/SARTA pressure levels'
varlvl.units = 'hPa'
varlvl.missing_value = -9999
varprb = qout.createVariable('probability','f4',['probability'], fill_value = -9999)
varprb[:] = prbs
varprb.long_name = 'Probability break points'
varprb.units = 'none'
varprb.missing_value = -9999
# Altitude grid
varalt = qout.createVariable('Altitude_median', 'f4', ['level'], fill_value = -9999)
varalt[:] = altmed
varalt.long_name = 'Altitude median value'
varalt.units = 'm'
varalt.missing_value = -9999
vartmp = qout.createVariable('Temperature_quantile', 'f4', ['level','probability'], fill_value = -9999)
vartmp[:] = tmpqout
vartmp.long_name = 'Temperature quantiles'
vartmp.units = 'K'
vartmp.missing_value = -9999.
varrh = qout.createVariable('RH_quantile', 'f4', ['level','probability'], fill_value = -9999)
varrh[:] = rhqout
varrh.long_name = 'Relative humidity quantiles'
varrh.units = 'Unitless'
varrh.missing_value = -9999.
varstmp = qout.createVariable('SfcTemp_quantile', 'f4', ['probability'], fill_value = -9999)
varstmp[:] = sftpqs
varstmp.long_name = 'Surface temperature quantiles'
varstmp.units = 'K'
varstmp.missing_value = -9999.
varpsfc = qout.createVariable('SfcPres_quantile', 'f4', ['probability'], fill_value = -9999)
varpsfc[:] = psfcqs
varpsfc.long_name = 'Surface pressure quantiles'
varpsfc.units = 'hPa'
varpsfc.missing_value = -9999.
varsalt = qout.createVariable('SfcAlt_quantile', 'f4', ['probability'], fill_value = -9999)
varsalt[:] = sfalqs
varsalt.long_name = 'Surface altitude quantiles'
varsalt.units = 'm'
varsalt.missing_value = -9999.
qout.close()
# Output transformed quantile samples
zfnm = '%s/CONUS_AIRS_JJA_%04d_%02dUTC_%s_TempRHSfc_StdGausTrans.nc' % (dtdr,yrlst[k],hrchc,rgchc)
zout = Dataset(zfnm,'w')
dimz = zout.createDimension('level',nzout)
dimsmp = zout.createDimension('sample',tsmp)
varlvl = zout.createVariable('level','f4',['level'], fill_value = -9999)
varlvl[:] = plev
varlvl.long_name = 'AIRS/SARTA pressure levels'
varlvl.units = 'hPa'
varlvl.missing_value = -9999
varlon = zout.createVariable('Longitude','f4',['sample'])
varlon[:] = lonout
varlon.long_name = 'Longitude'
varlon.units = 'degrees_east'
varlat = zout.createVariable('Latitude','f4',['sample'])
varlat[:] = latout
varlat.long_name = 'Latitude'
varlat.units = 'degrees_north'
varjdy = zout.createVariable('JulianDay','i2',['sample'])
varjdy[:] = jdyout
varjdy.long_name = 'JulianDay'
varjdy.units = 'day'
varyr = zout.createVariable('Year','i2',['sample'])
varyr[:] = yrout
varyr.long_name = 'Year'
varyr.units = 'year'
varsrt3 = zout.createVariable('Temperature_StdGaus', 'f4', ['sample','level'], fill_value = -9999)
varsrt3[:] = ztmpout
varsrt3.long_name = 'Quantile transformed temperature'
varsrt3.units = 'None'
varsrt3.missing_value = -9999.
varsrt4 = zout.createVariable('RH_StdGaus', 'f4', ['sample','level'], fill_value = -9999)
varsrt4[:] = zrhout
varsrt4.long_name = 'Quantile transformed relative humidity'
varsrt4.units = 'None'
varsrt4.missing_value = -9999.
varsrts1 = zout.createVariable('SfcTemp_StdGaus', 'f4', ['sample'], fill_value = -9999)
varsrts1[:] = zsftmpout
varsrts1.long_name = 'Quantile transformed surface temperature'
varsrts1.units = 'None'
varsrts1.missing_value = -9999.
varsrts2 = zout.createVariable('SfcPres_StdGaus', 'f4', ['sample'], fill_value = -9999)
varsrts2[:] = zpsfcout
varsrts2.long_name = 'Quantile transformed surface pressure'
varsrts2.units = 'None'
varsrts2.missing_value = -9999.
varsrts3 = zout.createVariable('SfcAlt_StdGaus', 'f4', ['sample'], fill_value = -9999)
varsrts3[:] = zsfaltout
varsrts3.long_name = 'Quantile transformed surface pressure'
varsrts3.units = 'None'
varsrts3.missing_value = -9999.
zout.close()
return
def airscld_invtransf_mix_cloud9_conus_nosfc(rfdr, dtdr, yrchc, hrchc, rgchc, rfmn, rfdy, rfgrn, scnrw, nrep = 10, \
l2dir = '/archive/AIRSOps/airs/gdaac/v6'):
# Read in mixture model parameters, draw random samples and set up SARTA input files
# Use AIRS FOV cloud fraction information
# Use designated AIRS reference granule, and pull surface pressure temperature from there
# dtdr: Output directory
# yrchc: Template Year Choice
# hrchc: Template Hour Choice
# rgchc: Template Region Choice
# rfmn: Month for reference granule
# rfdy: Day for reference granule
# rfgrn: Reference granule number
# scnrw: Scan row for experiment
# nrep: Number of replicate granules
# l2dir: Local AIRS Level 2 directory (to retrieve reference info)
# RN Generator
sdchc = 165434 + yrchc + hrchc
random.seed(sdchc)
cldprt = numpy.array([0.4,0.2,0.08])
nszout = 45 * 30 * nrep
sfrps = 45 * nrep
nlvsrt = 98
msgdbl = -9999.0
# Read probs and pressure levels
rnm = '%s/AIRS_Levels_Quantiles.nc' % (rfdr)
f = Dataset(rnm,'r')
airs_sarta_levs = f['level'][:]
f.close()
# Get reference granule info
airsdr = '%s/%04d/%02d/%02d/airs2sup' % (l2dir,yrchc,rfmn,rfdy)
if (os.path.exists(airsdr)):
fllst = os.listdir(airsdr)
l2str = 'AIRS.%04d.%02d.%02d.%03d' % (yrchc,rfmn,rfdy,rfgrn)
rffd = -1
j = 0
while ( (j < len(fllst)) and (rffd < 0) ):
lncr = len(fllst[j])
l4 = lncr - 4
if ( (fllst[j][l4:lncr] == '.hdf') and (l2str in fllst[j])):
l2fl = '%s/%s' % (airsdr,fllst[j])
ncl2 = Dataset(l2fl)
psfc = ncl2.variables['PSurfStd'][:,:]
topg = ncl2.variables['topog'][:,:]
ncl2.close()
rffd = j
j = j + 1
else:
print('L2 directory not found')
# Surface replicates
psfcvc = psfc[scnrw-1,:]
topgvc = topg[scnrw-1,:]
spres = numpy.tile(psfcvc,(sfrps,))
salti = numpy.tile(topgvc,(sfrps,))
# Variable list
clrlst = ['Temperature','RH','SfcTemp']
clrst = [1,64,0]
clrct = [98,35,1]
cldlst = ['NumberSlab','CloudBot1Logit','DPCloud1Logit','DPSlabLogit','DPCloud2Logit', \
'CType1','CType2','CCoverInd','CFrcLogit1','CFrcLogit2','CFrcLogit12', \
'NGWater1','NGWater2','CTTemp1','CTTemp2']
cldst = [0,0,0,0,0, 0,0,0,0,0,0, 0,0,0,0]
cldct = [1,1,1,1,1, 1,1,9,9,9,9, 1,1,1,1]
nvar = 0
for q in range(len(clrct)):
nvar = nvar + clrct[q]
nclr = nvar
for q in range(len(cldlst)):
nvar = nvar + cldct[q]
ncld = nvar - nclr
# Discrete/Continuous Indicator
typind = []
for q in range(len(clrct)):
for p in range(clrct[q]):
typind.append('Continuous')
cldtypind = ['Discrete','Continuous','Continuous','Continuous','Continuous', \
'Discrete','Discrete','Discrete','Continuous','Continuous','Continuous', \
'Continuous','Continuous','Continuous','Continuous']
for q in range(len(cldct)):
for p in range(cldct[q]):
typind.append(cldtypind[q])
# Quantile files
qclrnm = '%s/CONUS_AIRS_JJA_%04d_%02dUTC_%s_TempRHSfc_Quantile.nc' % (dtdr,yrchc,hrchc,rgchc)
qcldnm = '%s/CONUS_AIRS_JJA_%04d_%02dUTC_%s_Cloud_Quantile.nc' % (dtdr,yrchc,hrchc,rgchc)
qin = Dataset(qclrnm,'r')
prbs = qin.variables['probability'][:]
nprb = prbs.shape[0]
qsclr = numpy.zeros((nclr,nprb))
lvs = qin.variables['level'][:]
alts = qin.variables['Altitude_median'][:]
rhmd = qin.variables['RH_quantile'][:,103]
nlvl = lvs.shape[0]
cctr = 0
for j in range(len(clrlst)):
print(clrlst[j])
if clrst[j] == 0:
vr1 = '%s_quantile' % (clrlst[j])
qsclr[cctr,:] = qin.variables[vr1][:]
else:
inst = clrst[j] - 1
infn = inst + clrct[j]
otst = cctr
otfn = cctr + clrct[j]
vr1 = '%s_quantile' % (clrlst[j])
qsclr[otst:otfn,:] = qin.variables[vr1][inst:infn,:]
cctr = cctr + clrct[j]
qin.close()
print('Clear medians')
print(qsclr[:,103])
cldnmout = []
qin = Dataset(qcldnm,'r')
qscld = numpy.zeros((ncld,nprb))
dctr = 0
for j in range(len(cldlst)):
print(cldlst[j])
vr1 = '%s_quantile' % (cldlst[j])
vrinf = qin.variables[vr1]
if cldct[j] == 1:
qscld[dctr,:] = qin.variables[vr1][:]
dctr = dctr + 1
cldnmout.append(cldlst[j])
elif (len(vrinf.shape) == 2):
inst = cldst[j]
infn = inst + cldct[j]
for n2 in range(inst,infn):
clnm = '%s_%d' % (cldlst[j],n2)
cldnmout.append(clnm)
otst = dctr
otfn = dctr + cldct[j]
vr1 = '%s_quantile' % (clrlst[j])
qscld[otst:otfn,:] = qin.variables[vr1][inst:infn,:]
dctr = dctr + cldct[j]
elif (len(vrinf.shape) == 3):
for cl0 in range(vrinf.shape[0]):
for rw0 in range(vrinf.shape[1]):
otst = dctr
otfn = dctr + 1
qscld[otst:otfn,:] = qin.variables[vr1][cl0,rw0,:]
clnm = '%s_%d_%d' % (cldlst[j],cl0,rw0)
cldnmout.append(clnm)
dctr = dctr + 1
qin.close()
print('Cloud medians')
print(qscld[:,103])
# Read GMM Results
gmmnm = '%s/CONUS_AIRS_JJA_%04d_%02dUTC_%s_GMM_parameters.nc' % (dtdr,yrchc,hrchc,rgchc)
gmin = Dataset(gmmnm,'r')
gmnms = gmin['State_Vector_Names'][:,:]
gmmean = gmin['Mean'][:,:]
gmpkcv = gmin['Packed_Covariance'][:,:]
gmprps = gmin['Mixture_Proportion'][:]
gmin.close()
nmclps = gmnms.tolist()
strvrs = list(map(calculate_VPD.clean_byte_list,nmclps))
if sys.version_info[0] < 3:
print('Version 2')
strvrs = map(str,strvrs)
nmix = gmmean.shape[0]
nmxvar = gmmean.shape[1]
mrgcv = numpy.zeros((nmix,nmxvar,nmxvar),dtype=numpy.float64)
for j in range(nmix):
mrgcv[j,:,:] = calculate_VPD.unpackcov(gmpkcv[j,:], nelm=nmxvar)
# Component sizes
dtall = numpy.zeros((nszout,nmxvar),dtype=numpy.float)
cmpidx = numpy.zeros((nszout,),dtype=numpy.int16)
csmp = random.multinomial(nszout,pvals=gmprps)
cmsz = 0
for j in range(nmix):
cvfl = mrgcv[j,:,:]
s1 = numpy.sqrt(numpy.diagonal(cvfl))
crmt = calculate_VPD.cov2cor(cvfl)
sdmt = numpy.diag(numpy.sqrt(cvfl.diagonal()))
w, v = linalg.eig(crmt)
print(numpy.amin(w))
sdfn = cmsz + csmp[j]
dtz = random.multivariate_normal(numpy.zeros((nmxvar,)),crmt,size=csmp[j])
dttmp = numpy.tile(gmmean[j,:],(csmp[j],1)) + numpy.dot(dtz,sdmt)
dtall[cmsz:sdfn,:] = dttmp[:,:]
cmpidx[cmsz:sdfn] = j + 1
cmsz = cmsz + csmp[j]
# Re-shuffle
ssq = numpy.arange(nszout)
sqsmp = random.choice(ssq,size=nszout,replace=False)
csmpshf = cmpidx[sqsmp]
dtshf = dtall[sqsmp,:]
print(dtshf.shape)
### Inverse Transform
qout = numpy.zeros(dtshf.shape)
for j in range(nclr):
if typind[j] == 'Discrete':
qout[:,j] = calculate_VPD.data_quantile_from_std_norm_discrete(dtshf[:,j],qsclr[j,:],prbs,minval=qsclr[j,0],maxval=qsclr[j,nprb-1])
else:
qout[:,j] = calculate_VPD.data_quantile_from_std_norm(dtshf[:,j],qsclr[j,:],prbs,minval=qsclr[j,0],maxval=qsclr[j,nprb-1])
for j in range(nclr,nvar):
if typind[j] == 'Discrete':
qout[:,j] = calculate_VPD.data_quantile_from_std_norm_discrete(dtshf[:,j],qscld[j-nclr,:],prbs,minval=qsclr[j-nclr,0],maxval=qscld[j-nclr,nprb-1])
else:
qout[:,j] = calculate_VPD.data_quantile_from_std_norm(dtshf[:,j],qscld[j-nclr,:],prbs,minval=qscld[j-nclr,0],maxval=qsclr[j-nclr,nprb-1])
### Prepare for SARTA
varlstout = ['cngwat','cngwat2','cprbot','cprbot2','cprtop','cprtop2', \
'cpsize','cpsize2','cstemp','cstemp2','ctype','ctype2','salti','spres','stemp']
# Adjust altitudes
alth2o = numpy.zeros((nszout,nlvsrt+3))
alth2o[:,nlvsrt-1] = alts[nlvsrt-1]
curdlt = 0.0
for j in range(nlvsrt-2,-1,-1):
str1 = 'Level %d: %.4f' % (j,curdlt)
print(str1)
if (alts[j] > alts[j+1]):
curdlt = alts[j] - alts[j+1]
alth2o[:,j] = alts[j]
else:
alth2o[:,j] = alts[j+1] + curdlt * 2.0
curdlt = curdlt * 2.0
alth2o[:,97] = 0.0
# Convert cloud items to data frame
smpfrm = pandas.DataFrame(data=qout[:,nclr:nvar],columns=cldnmout)
dtout = numpy.zeros((nszout,len(varlstout)), dtype=numpy.float64)
frmout = | pandas.DataFrame(data=dtout,columns=varlstout) | pandas.DataFrame |
import pandas as pd
import numpy as np
import nltk
import multiprocessing
import difflib
import time
import gc
import xgboost as xgb
import category_encoders as ce
import itertools
from collections import Counter
from sklearn.metrics import log_loss
from sklearn.cross_validation import train_test_split
def labelcount_encode(df, cols):
categorical_features = cols
new_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
#plt.rcParams['animation.ffmpeg_path'] = '/Users/alejandrosusillo/opt/anaconda3/lib/python3.7/site-packages/ffmpeg'
import seaborn as sns
import numpy as np
import io
import matplotlib.animation as animation
from pandas.plotting import register_matplotlib_converters
from sklearn.datasets import load_breast_cancer
from matplotlib.animation import FuncAnimation
def makePlot():
# Loading
data = load_breast_cancer()
breast_cancer_df = pd.DataFrame(data['data'])
breast_cancer_df.columns = data['feature_names']
breast_cancer_df['target'] = data['target']
breast_cancer_df['diagnosis'] = [data['target_names'][x] for x in data['target']]
feature_names= data['feature_names']
corr = breast_cancer_df[list(feature_names)].corr(method='pearson')
f, ax = plt.subplots(figsize=(11, 9))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
# here is the trick save your figure into a bytes object and you can afterwards expose it via flas
bytes_image = io.BytesIO()
plt.savefig(bytes_image, format='png')
bytes_image.seek(0)
return bytes_image
def linePlot(x, y, fileName, todoNaN):
#warehouse/serie_historica_acumulados.csv
# be aware of the array fileName in case we have more than one file
data_spain_ccaa = pd.read_csv('/Users/alejandrosusillo/Downloads/serie_historica_acumulados.csv', sep=',')
# ASK THE USER WHAT TO DO WITH CERTAIN RECORDS!!!!!!!!!!!!!!!!!!!!!!!!!!
data_spain_ccaa = data_spain_ccaa.drop(len(data_spain_ccaa)-1)
# ASK THE USER WHAT TO DO WITH THE NAN VALUES
data_spain_ccaa['Casos '] = data_spain_ccaa['Casos '].fillna(0)
# get andalucia cases
aux = data_spain_ccaa[data_spain_ccaa['CCAA Codigo ISO'].str.contains('AN')]
# get those days that are even
even_days = ( | pd.to_datetime(aux['Fecha']) | pandas.to_datetime |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
assert_series_equal(result, expected)
def test_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series(
[NaT, NaT], dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp)
assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + datetime_series
assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
assert_series_equal(nat_series_dtype_timestamp - NaT,
nat_series_dtype_timestamp)
assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with tm.assertRaises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_datetime,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_datetime +
nat_series_dtype_timedelta,
nat_series_dtype_timestamp)
# multiplication
assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series * 1, timedelta_series)
assert_series_equal(1 * timedelta_series, timedelta_series)
assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(timedelta_series * nan, nat_series_dtype_timedelta)
assert_series_equal(nan * timedelta_series, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
datetime_series * 1
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1
with tm.assertRaises(TypeError):
datetime_series * 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1.0
# division
assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s, s2), (s2, s)]:
self.assertRaises(TypeError, lambda: x == y)
self.assertRaises(TypeError, lambda: x != y)
self.assertRaises(TypeError, lambda: x >= y)
self.assertRaises(TypeError, lambda: x > y)
self.assertRaises(TypeError, lambda: x < y)
self.assertRaises(TypeError, lambda: x <= y)
def test_more_na_comparisons(self):
for dtype in [None, object]:
left = Series(['a', np.nan, 'c'], dtype=dtype)
right = Series(['a', np.nan, 'd'], dtype=dtype)
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_nat_comparisons(self):
data = [([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')],
[pd.NaT, pd.NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')],
[pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])]
# add lhs / rhs switched data
data = data + [(r, l) for l, r in data]
for l, r in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
# Series, Index
for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]:
expected = Series([False, False, True])
assert_series_equal(left == right, expected)
expected = Series([True, True, False])
assert_series_equal(left != right, expected)
expected = Series([False, False, False])
assert_series_equal(left < right, expected)
expected = Series([False, False, False])
assert_series_equal(left > right, expected)
expected = Series([False, False, True])
assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
assert_series_equal(left <= right, expected)
def test_nat_comparisons_scalar(self):
data = [[pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')],
[pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]]
for l in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
expected = Series([False, False, False])
assert_series_equal(left == pd.NaT, expected)
assert_series_equal(pd.NaT == left, expected)
expected = Series([True, True, True])
assert_series_equal(left != pd.NaT, expected)
assert_series_equal(pd.NaT != left, expected)
expected = Series([False, False, False])
assert_series_equal(left < pd.NaT, expected)
assert_series_equal(pd.NaT > left, expected)
assert_series_equal(left <= pd.NaT, expected)
assert_series_equal(pd.NaT >= left, expected)
assert_series_equal(left > pd.NaT, expected)
assert_series_equal(pd.NaT < left, expected)
assert_series_equal(left >= pd.NaT, expected)
assert_series_equal(pd.NaT <= left, expected)
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
self.assertRaises(ValueError, a.__lt__, b)
a = Series([1, 2])
b = Series([2, 3, 4])
self.assertRaises(ValueError, a.__eq__, b)
def test_comparison_label_based(self):
# GH 4947
# comparisons should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([False, True, False], list('abc'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False], list('abc'))
result = a | b
assert_series_equal(result, expected)
expected = Series([True, False, False], list('abc'))
result = a ^ b
assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([False, True, False, False], list('abcd'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False, False], list('abcd'))
result = a | b
assert_series_equal(result, expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result, expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ['z'])
expected = Series([False, False, False, False], list('abcz'))
assert_series_equal(result, expected)
result = a | Series([1], ['z'])
expected = Series([True, True, False, False], list('abcz'))
assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]), Series([1], ['z']),
Series(np.nan, b.index), Series(np.nan, a.index)]:
result = a[a | e]
assert_series_equal(result, a[a])
for e in [Series(['z'])]:
if compat.PY3:
with tm.assert_produces_warning(RuntimeWarning):
result = a[a | e]
else:
result = a[a | e]
assert_series_equal(result, a[a])
# vs scalars
index = list('bca')
t = Series([True, False, True])
for v in [True, 1, 2]:
result = Series([True, False, True], index=index) | v
expected = Series([True, True, True], index=index)
assert_series_equal(result, expected)
for v in [np.nan, 'foo']:
self.assertRaises(TypeError, lambda: t | v)
for v in [False, 0]:
result = Series([True, False, True], index=index) | v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [True, 1]:
result = Series([True, False, True], index=index) & v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [False, 0]:
result = Series([True, False, True], index=index) & v
expected = Series([False, False, False], index=index)
assert_series_equal(result, expected)
for v in [np.nan]:
self.assertRaises(TypeError, lambda: t & v)
def test_comparison_flex_basic(self):
left = pd.Series(np.random.randn(10))
right = pd.Series(np.random.randn(10))
tm.assert_series_equal(left.eq(right), left == right)
tm.assert_series_equal(left.ne(right), left != right)
tm.assert_series_equal(left.le(right), left < right)
tm.assert_series_equal(left.lt(right), left <= right)
tm.assert_series_equal(left.gt(right), left > right)
tm.assert_series_equal(left.ge(right), left >= right)
# axis
for axis in [0, None, 'index']:
tm.assert_series_equal(left.eq(right, axis=axis), left == right)
tm.assert_series_equal(left.ne(right, axis=axis), left != right)
tm.assert_series_equal(left.le(right, axis=axis), left < right)
tm.assert_series_equal(left.lt(right, axis=axis), left <= right)
tm.assert_series_equal(left.gt(right, axis=axis), left > right)
tm.assert_series_equal(left.ge(right, axis=axis), left >= right)
#
msg = 'No axis named 1 for object type'
for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']:
with tm.assertRaisesRegexp(ValueError, msg):
getattr(left, op)(right, axis=1)
def test_comparison_flex_alignment(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.eq(right), exp)
exp = pd.Series([True, True, False, True], index=list('abcd'))
tm.assert_series_equal(left.ne(right), exp)
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.le(right), exp)
exp = pd.Series([False, False, False, False], index=list('abcd'))
tm.assert_series_equal(left.lt(right), exp)
exp = pd.Series([False, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right), exp)
exp = pd.Series([False, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right), exp)
def test_comparison_flex_alignment_fill(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.eq(right, fill_value=2), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.ne(right, fill_value=2), exp)
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.le(right, fill_value=0), exp)
exp = pd.Series([False, False, False, True], index=list('abcd'))
tm.assert_series_equal(left.lt(right, fill_value=0), exp)
exp = pd.Series([True, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right, fill_value=0), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right, fill_value=0), exp)
def test_operators_bitwise(self):
# GH 9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4), dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list('abc'))
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list('abc'))
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
| assert_series_equal(res, expected) | pandas.util.testing.assert_series_equal |
import pandas as pd
from typing import Union, Any, Tuple
import os
import subprocess
import zarr
import xarray as xr
import numpy as np
from satpy import Scene
from pathlib import Path
import datetime
from satip.geospatial import lat_lon_to_osgb, GEOGRAPHIC_BOUNDS
from satip.compression import Compressor, is_dataset_clean
import warnings
warnings.filterwarnings("ignore", message="divide by zero encountered in true_divide")
warnings.filterwarnings("ignore", message="invalid value encountered in sin")
warnings.filterwarnings("ignore", message="invalid value encountered in cos")
warnings.filterwarnings(
"ignore",
message="You will likely lose important projection information when converting to a PROJ string from another format. See: https://proj.org/faq.html#what-is-the-best-format-for-describing-coordinate-reference-systems",
)
def decompress(full_bzip_filename: Path, temp_pth: Path) -> str:
"""
Decompresses .bz2 file and returns the non-compressed filename
Args:
full_bzip_filename: Full compressed filename
temp_pth: Temporary path to save the native file
Returns:
The full native filename to the decompressed file
"""
base_bzip_filename = os.path.basename(full_bzip_filename)
base_nat_filename = os.path.splitext(base_bzip_filename)[0]
full_nat_filename = os.path.join(temp_pth, base_nat_filename)
if os.path.exists(full_nat_filename):
os.remove(full_nat_filename)
with open(full_nat_filename, "wb") as nat_file_handler:
process = subprocess.run(
["pbzip2", "--decompress", "--keep", "--stdout", full_bzip_filename],
stdout=nat_file_handler,
)
process.check_returncode()
return full_nat_filename
def load_native_to_dataset(filename: Path, area: str) -> Union[Tuple[xr.DataArray, xr.DataArray], Tuple[None, None]]:
"""
Load compressed native files into an Xarray dataset, resampling to the same grid for the HRV channel,
and replacing small chunks of NaNs with interpolated values, and add a time coordinate
Args:
filename: The filename of the compressed native file to load
area: Name of the geographic area to use, such as 'UK'
Returns:
Returns Xarray DataArray if script worked, else returns None
"""
hrv_compressor = Compressor(variable_order=["HRV"], maxs=np.array([103.90016]), mins=np.array([-1.2278595]))
compressor = Compressor(mins=np.array(
[
-2.5118103,
-64.83977,
63.404694,
2.844452,
199.10002,
-17.254883,
-26.29155,
-1.1009827,
-2.4184198,
199.57048,
198.95093,
]
),
maxs=np.array(
[
69.60857,
339.15588,
340.26526,
317.86752,
313.2767,
315.99194,
274.82297,
93.786545,
101.34922,
249.91806,
286.96323,
]
),
variable_order=[
"IR_016",
"IR_039",
"IR_087",
"IR_097",
"IR_108",
"IR_120",
"IR_134",
"VIS006",
"VIS008",
"WV_062",
"WV_073",
],
)
temp_directory = filename.parent
try:
# IF decompression fails, pass
decompressed_filename: str = decompress(filename, temp_directory)
except subprocess.CalledProcessError:
return None, None
scene = Scene(filenames={"seviri_l1b_native": [decompressed_filename]})
hrv_scene = Scene(filenames={"seviri_l1b_native": [decompressed_filename]})
hrv_scene.load(
[
"HRV",
]
)
scene.load(
[
"IR_016",
"IR_039",
"IR_087",
"IR_097",
"IR_108",
"IR_120",
"IR_134",
"VIS006",
"VIS008",
"WV_062",
"WV_073",
]
)
# HRV covers a smaller portion of the disk than other bands, so use that as the bounds
# Selected bounds emprically for have no NaN values from off disk image, and covering the UK + a bit
scene = scene.crop(ll_bbox=GEOGRAPHIC_BOUNDS[area])
hrv_scene = hrv_scene.crop(ll_bbox=GEOGRAPHIC_BOUNDS[area])
dataarray: xr.DataArray = convert_scene_to_dataarray(scene, band="IR_016", area=area)
hrv_dataarray: xr.DataArray = convert_scene_to_dataarray(hrv_scene, band="HRV", area=area)
# Delete file off disk
os.remove(decompressed_filename)
# If any NaNs still exist, then don't return it
if is_dataset_clean(dataarray) and is_dataset_clean(hrv_dataarray):
# Compress and return
dataarray = compressor.compress(dataarray)
hrv_dataarray = hrv_compressor.compress(hrv_dataarray)
return dataarray, hrv_dataarray
else:
return None, None
def convert_scene_to_dataarray(scene: Scene, band: str, area: str) -> xr.DataArray:
scene = scene.crop(ll_bbox=GEOGRAPHIC_BOUNDS[area])
# Lat and Lon are the same for all the channels now
lon, lat = scene[band].attrs["area"].get_lonlats()
osgb_x, osgb_y = lat_lon_to_osgb(lat, lon)
dataset: xr.Dataset = scene.to_xarray_dataset()
# Add coordinate arrays, since x and y changes for each pixel, cannot replace dataset x,y coords with these directly
dataset.attrs["osgb_x_coords"] = osgb_x
dataset.attrs["osgb_y_coords"] = osgb_y
# Round to the nearest 5 minutes
dataset.attrs["end_time"] = | pd.Timestamp(dataset.attrs["end_time"]) | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 7 16:54:46 2018
@author: xiaoyang
"""
import requests
from bs4 import BeautifulSoup
import pandas as pd
from ast import literal_eval
class PriceOfCrudeOil(object):
# 得到原油现货价格
def get_latest_data(self):
df = self.get_latest_month_data()
str = ''
# return 1.0
return float(str.join(df.iloc[0][0]))
# return str(df.iloc[0][0])
pass
# 原油现货价格
# 爬取数据存在df里,价格单位为美元
def get_latest_month_data(self):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36'}
r = requests.get('https://cn.investing.com/currencies/wti-usd-historical-data', headers=headers)
soup = BeautifulSoup(r.text, 'lxml')
find1 = soup.find_all('table', {'class': 'genTbl closedTbl historicalTbl', 'id': 'curr_table'})
find2 = find1[0].find_all('tr')
df = | pd.DataFrame(columns=['收盘', '开盘', '高', '低', '涨跌']) | pandas.DataFrame |
import pandas as pd
from pandas import Timestamp
import numpy as np
import pytest
import niimpy
from niimpy.util import TZ
df11 = pd.DataFrame(
{"user": ['wAzQNrdKZZax']*3 + ['Afxzi7oI0yyp']*3 + ['lb983ODxEFUD']*3,
"device": ['iMTB2alwYk1B']*3 + ['3Zkk0bhWmyny']*3 + ['n8rndM6J5_4B']*3,
"time": [1547709614.05, 1547709686.036, 1547709722.06, 1547710540.99, 1547710688.469, 1547711339.439, 1547711831.275, 1547711952.182, 1547712028.281 ],
"battery_level": [96, 96, 95, 95, 94, 93, 94, 94, 94],
"battery_status": ['3']*5 + ['2', '2', '3', '3'],
"battery_health": ['2']*9,
"battery_adaptor": ['0']*5+['1', '1', '0', '0'],
"datetime": ['2019-01-17 09:20:14.049999872+02:00', '2019-01-17 09:21:26.036000+02:00', '2019-01-17 09:22:02.060000+02:00',
'2019-01-17 09:35:40.990000128+02:00', '2019-01-17 09:38:08.469000192+02:00', '2019-01-17 09:48:59.438999808+02:00',
'2019-01-17 09:57:11.275000064+02:00', '2019-01-17 09:59:12.181999872+02:00', '2019-01-17 10:00:28.280999936+02:00']
})
df11['datetime'] = pd.to_datetime(df11['datetime'])
df11 = df11.set_index('datetime', drop=False)
def test_get_battery_data():
df=df11.copy()
battery = niimpy.battery.get_battery_data(df)
assert battery.loc[Timestamp('2019-01-17 09:20:14.049999872+02:00'), 'battery_level'] == 96
assert battery.loc[Timestamp('2019-01-17 09:21:26.036000+02:00'), 'battery_health'] == '2'
assert battery.loc[Timestamp('2019-01-17 09:48:59.438999808+02:00'), 'battery_status'] == '2'
assert battery.loc[Timestamp('2019-01-17 09:57:11.275000064+02:00'), 'battery_adaptor'] == '1'
def test_battery_occurrences():
df=df11.copy()
occurances = niimpy.battery.battery_occurrences(df, hours=0, minutes=10)
assert occurances.loc[ | Timestamp('2019-01-17 09:20:14.049999872+02:00') | pandas.Timestamp |
import re
import numpy as np
import pytest
from pandas import Categorical, CategoricalIndex, DataFrame, Index, Series
import pandas._testing as tm
from pandas.core.arrays.categorical import recode_for_categories
from pandas.tests.arrays.categorical.common import TestCategorical
class TestCategoricalAPI:
def test_ordered_api(self):
# GH 9347
cat1 = Categorical(list("acb"), ordered=False)
tm.assert_index_equal(cat1.categories, Index(["a", "b", "c"]))
assert not cat1.ordered
cat2 = Categorical(list("acb"), categories=list("bca"), ordered=False)
tm.assert_index_equal(cat2.categories, Index(["b", "c", "a"]))
assert not cat2.ordered
cat3 = Categorical(list("acb"), ordered=True)
tm.assert_index_equal(cat3.categories, Index(["a", "b", "c"]))
assert cat3.ordered
cat4 = Categorical(list("acb"), categories=list("bca"), ordered=True)
tm.assert_index_equal(cat4.categories, Index(["b", "c", "a"]))
assert cat4.ordered
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
assert not cat2.ordered
cat2 = cat.as_ordered()
assert cat2.ordered
cat2.as_unordered(inplace=True)
assert not cat2.ordered
cat2.as_ordered(inplace=True)
assert cat2.ordered
assert cat2.set_ordered(True).ordered
assert not cat2.set_ordered(False).ordered
cat2.set_ordered(True, inplace=True)
assert cat2.ordered
cat2.set_ordered(False, inplace=True)
assert not cat2.ordered
# removed in 0.19.0
msg = "can't set attribute"
with pytest.raises(AttributeError, match=msg):
cat.ordered = True
with pytest.raises(AttributeError, match=msg):
cat.ordered = False
def test_rename_categories(self):
cat = Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
tm.assert_numpy_array_equal(
res.__array__(), np.array([1, 2, 3, 1], dtype=np.int64)
)
tm.assert_index_equal(res.categories, Index([1, 2, 3]))
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
exp_cat = Index(["a", "b", "c"])
tm.assert_index_equal(cat.categories, exp_cat)
# GH18862 (let rename_categories take callables)
result = cat.rename_categories(lambda x: x.upper())
expected = Categorical(["A", "B", "C", "A"])
tm.assert_categorical_equal(result, expected)
# and now inplace
res = cat.rename_categories([1, 2, 3], inplace=True)
assert res is None
tm.assert_numpy_array_equal(
cat.__array__(), np.array([1, 2, 3, 1], dtype=np.int64)
)
tm.assert_index_equal(cat.categories, Index([1, 2, 3]))
@pytest.mark.parametrize("new_categories", [[1, 2, 3, 4], [1, 2]])
def test_rename_categories_wrong_length_raises(self, new_categories):
cat = Categorical(["a", "b", "c", "a"])
msg = (
"new categories need to have the same number of items as the "
"old categories!"
)
with pytest.raises(ValueError, match=msg):
cat.rename_categories(new_categories)
def test_rename_categories_series(self):
# https://github.com/pandas-dev/pandas/issues/17981
c = Categorical(["a", "b"])
result = c.rename_categories(Series([0, 1], index=["a", "b"]))
expected = Categorical([0, 1])
tm.assert_categorical_equal(result, expected)
def test_rename_categories_dict(self):
# GH 17336
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"a": 4, "b": 3, "c": 2, "d": 1})
expected = | Index([4, 3, 2, 1]) | pandas.Index |
import os
from posixpath import join
import re
import math
import random
import pickle
from typing import ByteString
from pandas.core import base
import librosa
import numpy as np
import pandas as pd
from tqdm import tqdm
import soundfile as sf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from convert_aud_to_token import EmotionDataPreprocessing
# Part 1: Extract Audio Labels
def extract_audio_labels(base_path,labels_path):
if os.path.exists(labels_path):
return
info_line = re.compile(r'\[.+\]\n', re.IGNORECASE)
start_times, end_times, wav_file_names, emotions, vals, acts, doms = \
[], [], [], [], [], [], []
for sess in range(1, 6):
emo_evaluation_dir = \
base_path+'/Session{}/dialog/EmoEvaluation/'.format(sess)
evaluation_files = [l for l in os.listdir(emo_evaluation_dir) if 'Ses' in l]
for file in evaluation_files:
if file.startswith("."):
continue
with open(emo_evaluation_dir + file,encoding="utf-8") as f:
content = f.read()
info_lines = re.findall(info_line, content)
for line in info_lines[1:]: # the first line is a header
start_end_time, wav_file_name, emotion, val_act_dom = \
line.strip().split('\t')
start_time, end_time = start_end_time[1:-1].split('-')
val, act, dom = val_act_dom[1:-1].split(',')
val, act, dom = float(val), float(act), float(dom)
start_time, end_time = float(start_time), float(end_time)
start_times.append(start_time)
end_times.append(end_time)
wav_file_names.append(wav_file_name)
emotions.append(emotion)
vals.append(val)
acts.append(act)
doms.append(dom)
df_iemocap = pd.DataFrame(columns=['start_time', 'end_time', 'wav_file',
'emotion', 'val', 'act', 'dom'])
df_iemocap['start_time'] = start_times
df_iemocap['end_time'] = end_times
df_iemocap['wav_file'] = wav_file_names
df_iemocap['emotion'] = emotions
df_iemocap['val'] = vals
df_iemocap['act'] = acts
df_iemocap['dom'] = doms
os.makedirs(os.path.dirname(labels_path),exist_ok=True)
df_iemocap.to_csv(labels_path, index=False)
# Part 2: Build Audio Vectors
def build_audio_vectors(base_path,labels_path,data_dir,sr):
labels_df = | pd.read_csv(labels_path) | pandas.read_csv |
import pytest
import pandas as pd
import numpy as np
@pytest.fixture(scope="function")
def set_helpers(request):
rand = np.random.RandomState(1337)
request.cls.ser_length = 120
request.cls.window = 12
request.cls.returns = pd.Series(
rand.randn(1, 120)[0] / 100.0,
index=pd.date_range("2000-1-30", periods=120, freq="M"),
)
request.cls.factor_returns = pd.Series(
rand.randn(1, 120)[0] / 100.0,
index=pd.date_range("2000-1-30", periods=120, freq="M"),
)
@pytest.fixture(scope="session")
def input_data():
simple_benchmark = pd.Series(
np.array([0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0]) / 100,
index=pd.date_range("2000-1-30", periods=9, freq="D"),
)
rand = np.random.RandomState(1337)
noise = pd.Series(
rand.normal(0, 0.001, 1000),
index=pd.date_range("2000-1-30", periods=1000, freq="D", tz="UTC"),
)
inv_noise = noise.multiply(-1)
noise_uniform = pd.Series(
rand.uniform(-0.01, 0.01, 1000),
index=pd.date_range("2000-1-30", periods=1000, freq="D", tz="UTC"),
)
random_100k = pd.Series(rand.randn(100_000))
mixed_returns = pd.Series(
np.array([np.nan, 1.0, 10.0, -4.0, 2.0, 3.0, 2.0, 1.0, -10.0]) / 100,
index=pd.date_range("2000-1-30", periods=9, freq="D"),
)
one = [
-0.00171614,
0.01322056,
0.03063862,
-0.01422057,
-0.00489779,
0.01268925,
-0.03357711,
0.01797036,
]
two = [
0.01846232,
0.00793951,
-0.01448395,
0.00422537,
-0.00339611,
0.03756813,
0.0151531,
0.03549769,
]
# Sparse noise, same as noise but with np.nan sprinkled in
replace_nan = rand.choice(noise.index.tolist(), rand.randint(1, 10))
sparse_noise = noise.replace(replace_nan, np.nan)
# Flat line tz
flat_line_1_tz = pd.Series(
np.linspace(0.01, 0.01, num=1000),
index=pd.date_range("2000-1-30", periods=1000, freq="D", tz="UTC"),
)
# Sparse flat line at 0.01
# replace_nan = rand.choice(noise.index.tolist(), rand.randint(1, 10))
sparse_flat_line_1_tz = flat_line_1_tz.replace(replace_nan, np.nan)
df_index_simple = pd.date_range("2000-1-30", periods=8, freq="D")
df_index_week = pd.date_range("2000-1-30", periods=8, freq="W")
df_index_month = pd.date_range("2000-1-30", periods=8, freq="M")
df_week = pd.DataFrame(
{
"one": pd.Series(one, index=df_index_week),
"two": pd.Series(two, index=df_index_week),
}
)
df_month = pd.DataFrame(
{
"one": pd.Series(one, index=df_index_month),
"two": pd.Series(two, index=df_index_month),
}
)
df_simple = pd.DataFrame(
{
"one": pd.Series(one, index=df_index_simple),
"two": pd.Series(two, index=df_index_simple),
}
)
df_week = pd.DataFrame(
{
"one": pd.Series(one, index=df_index_week),
"two": pd.Series(two, index=df_index_week),
}
)
df_month = pd.DataFrame(
{
"one": pd.Series(one, index=df_index_month),
"two": pd.Series(two, index=df_index_month),
}
)
input_one = [
np.nan,
0.01322056,
0.03063862,
-0.01422057,
-0.00489779,
0.01268925,
-0.03357711,
0.01797036,
]
input_two = [
0.01846232,
0.00793951,
-0.01448395,
0.00422537,
-0.00339611,
0.03756813,
0.0151531,
np.nan,
]
df_index = pd.date_range("2000-1-30", periods=8, freq="D")
return {
# Simple benchmark, no drawdown
"simple_benchmark": simple_benchmark,
"simple_benchmark_w_noise": simple_benchmark
+ rand.normal(0, 0.001, len(simple_benchmark)),
"simple_benchmark_df": simple_benchmark.rename("returns").to_frame(),
# All positive returns, small variance
"positive_returns": pd.Series(
np.array([1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]) / 100,
index=pd.date_range("2000-1-30", periods=9, freq="D"),
),
# All negative returns
"negative_returns": pd.Series(
np.array([0.0, -6.0, -7.0, -1.0, -9.0, -2.0, -6.0, -8.0, -5.0])
/ 100,
index=pd.date_range("2000-1-30", periods=9, freq="D"),
),
# All negative returns
"all_negative_returns": pd.Series(
np.array([-2.0, -6.0, -7.0, -1.0, -9.0, -2.0, -6.0, -8.0, -5.0])
/ 100,
index=pd.date_range("2000-1-30", periods=9, freq="D"),
),
# Positive and negative returns with max drawdown
"mixed_returns": mixed_returns,
# Weekly returns
"weekly_returns": pd.Series(
np.array([0.0, 1.0, 10.0, -4.0, 2.0, 3.0, 2.0, 1.0, -10.0]) / 100,
index=pd.date_range("2000-1-30", periods=9, freq="W"),
),
# Monthly returns
"monthly_returns": pd.Series(
np.array([0.0, 1.0, 10.0, -4.0, 2.0, 3.0, 2.0, 1.0, -10.0]) / 100,
index=pd.date_range("2000-1-30", periods=9, freq="M"),
),
# Series of length 1
"one_return": pd.Series(
np.array([1.0]) / 100,
index=pd.date_range("2000-1-30", periods=1, freq="D"),
),
"udu_returns": pd.Series(
np.array([10, -10, 10]) / 100,
index=pd.date_range("2000-1-30", periods=3, freq="D"),
),
# Empty series
"empty_returns": pd.Series(
np.array([]) / 100,
index=pd.date_range("2000-1-30", periods=0, freq="D"),
),
# Random noise
"noise": noise,
"noise_uniform": noise_uniform,
"random_100k": random_100k,
# Random noise inv
"inv_noise": inv_noise,
# Flat line
"flat_line_0": pd.Series(
np.linspace(0, 0, num=1000),
index= | pd.date_range("2000-1-30", periods=1000, freq="D", tz="UTC") | pandas.date_range |
from unittest.mock import MagicMock
import pandas as pd
import pytest
from click.testing import CliRunner
from pytest_mock import MockerFixture
from nlpland import cli
df_filtered = pd.DataFrame({"AA url": ["a"]})
df_full = pd.DataFrame({"AA url": ["a", "b"]})
@pytest.fixture
def filtered(mocker: MockerFixture) -> MagicMock:
yield mocker.patch("nlpland.data.filter.get_filtered_df", return_value=df_filtered)
@pytest.fixture
def load(mocker: MockerFixture) -> MagicMock:
yield mocker.patch("nlpland.data.dataset.load_dataset", return_value=df_full)
def test_download(mocker: MockerFixture, filtered: MagicMock) -> None:
download = mocker.patch("nlpland.data.dataset.download_papers")
runner = CliRunner()
result = runner.invoke(cli.download, args=["--venues", "ACL"], catch_exceptions=False)
assert result.exit_code == 0
filtered.assert_called_once()
assert filtered.call_args[0][0]["venues"] == "ACL"
download.assert_called_once_with(df_filtered)
def test_extract(mocker: MockerFixture, filtered: MagicMock) -> None:
load = mocker.patch("nlpland.data.dataset.load_dataset", return_value=df_full)
rule = mocker.patch("nlpland.data.dataset.extract_abstracts_rulebased")
anth = mocker.patch("nlpland.data.dataset.extract_abstracts_anthology")
runner = CliRunner()
result = runner.invoke(cli.extract, args=["test", "--venues", "ACL"], catch_exceptions=False)
assert result.exit_code == 0
assert "Unsupported mode 'test'" in result.output
result = runner.invoke(cli.extract, args=["rule", "--venues", "ACL"], catch_exceptions=False)
assert result.exit_code == 0
filtered.assert_called_once()
assert filtered.call_args[0][0]["venues"] == "ACL"
rule.assert_called_once_with(df_filtered, df_full, overwrite_rule=False)
result = runner.invoke(cli.extract, args=["anth"], catch_exceptions=False)
assert result.exit_code == 0
anth.assert_called_once_with(df_full)
assert load.call_count == 3
def test_checkencode(mocker: MockerFixture, load: MagicMock) -> None:
check = mocker.patch("nlpland.data.check.check_encoding_issues")
runner = CliRunner()
result = runner.invoke(cli.checkencode, catch_exceptions=False)
assert result.exit_code == 0
load.assert_called_once_with(False)
check.assert_called_once_with(df_full)
def test_checkdataset(mocker: MockerFixture, load: MagicMock) -> None:
check = mocker.patch("nlpland.data.check.check_dataset")
runner = CliRunner()
result = runner.invoke(cli.checkdataset, catch_exceptions=False)
assert result.exit_code == 0
load.assert_called_once_with(False)
check.assert_called_once_with(df_full)
def test_checkpaper(mocker: MockerFixture) -> None:
paper_path = "path"
check = mocker.patch("nlpland.data.check.check_paper_parsing")
runner = CliRunner()
result = runner.invoke(cli.checkpaper, args=[paper_path], catch_exceptions=False)
assert result.exit_code == 0
check.assert_called_once_with(paper_path)
def test_countabstractsanth(mocker: MockerFixture) -> None:
check = mocker.patch("nlpland.data.check.count_anthology_abstracts")
runner = CliRunner()
result = runner.invoke(cli.countabstractsanth, catch_exceptions=False)
assert result.exit_code == 0
check.assert_called_once()
def test_count(mocker: MockerFixture, filtered: MagicMock) -> None:
count = mocker.patch("nlpland.modules.count.top_k_tokens", return_value=(3, 4))
runner = CliRunner()
result = runner.invoke(cli.count, args=["5", "--venues", "ACL"], catch_exceptions=False)
assert result.exit_code == 0
filtered.assert_called_once()
assert filtered.call_args[0][0]["venues"] == "ACL"
count.assert_called_once_with(5, df_filtered, "1")
def test_counts_time(mocker: MockerFixture, filtered: MagicMock) -> None:
count = mocker.patch("nlpland.modules.count.counts_over_time")
runner = CliRunner()
result = runner.invoke(cli.counts_time, args=["5", "--venues", "ACL"], catch_exceptions=False)
assert result.exit_code == 0
filtered.assert_called_once()
assert filtered.call_args[0][0]["venues"] == "ACL"
count.assert_called_once_with(df_filtered, 5, "1", None, False, filtered.call_args[0][0])
def test_scatter(mocker: MockerFixture, filtered: MagicMock) -> None:
df_y = | pd.DataFrame({"AA url": ["b"]}) | pandas.DataFrame |
import click
import sys
import pandas as pd
import math
import subprocess
import logging
import os
import re
from datetime import datetime
from distutils.dir_util import copy_tree
##########################################################################
#
# Initialize globals
#
##########################################################################
startTime = datetime.now()
tmp_dir = '.tmp/'
log_dir = 'logs/'
chain_dir = 'chains/'
examples_dir = 'examples/'
# assume liftOver is in sys PATH
liftover_path = 'liftOver'
#chain_dir = os.path.abspath('./chains')
# stores remapped positions for fast re-access
# key = chro_pos, value = [chro, pos, flat=mapped/unmapped]
remapped_list = {}
# stores processed files, used for restore progress
file_list = []
# stores failed files
failed_files = []
# Valid chromosome names
#valid_chro_names = ['chr'+str(i) for i in range(1,23)]
#valid_chro_names.append('chrX')
#valid_chro_names.append('chrY')
# the distance to next remapp position
step_size = 500
# the number positions to search
steps = 4000
# global beta
beta = 2
# if the header line is written
unmapped_logger_header = False
################### stat counters ###################
total_seg = 0
lifted_seg = 0
remapped_seg =0
rejected_seg = 0
unmapped_seg = 0
total_pro = 0
lifted_pro = 0
remapped_pro = 0
rejected_pro = 0
unmapped_pro = 0
# file = '/Volumes/arraymapMirror/arraymap/hg18/19197950/19197950_MB66_6332/segments.tab'
# ch = 'hg18ToHg19.over.chain'
# input_dir = '/Volumes/arraymapMirror/arraymap/hg18/GSE49'
# input_dir = '/Volumes/arraymapMirror/arraymap/hg18/GSE1755'
# output_dir = '/Users/bogao/DataFiles/hg19'
# segments_files = []
# probes_files = []
##########################################################################
#
# Utility functions
#
##########################################################################
# Map the unmapped positions to their nearest mappable positions
#
# Param:
# fin: path of the unmapped file generated by liftover
# chain: path of the chain file, should be same as used by liftover
# remap: the remapped_list
#
# Use global params:
# steps, step_size
# the searching range is 100(bps)/step_stize * steps in both direction.
#
# Return:
# a list of lists with chro, new_pos, name
# -1 in exception
#
# Note: unmappable positions will be returned with value 0
def solveUnmappables(fin, chain, remap):
try:
logger = logging.getLogger('liftover')
# read in unmapped file
df = pd.read_table(fin, sep='\t', comment='#', header=None, names=['chro','start','end','name'])
df.loc[df.chro == 'chr23', 'chro'] = 'chrX'
df.loc[df.chro == 'chr24', 'chro'] = 'chrY'
# keep new coordinates
positions = []
# number of items
num_pos = df.shape[0]
counter = 0
# cmd = [liftover_path, './tmp/remap.bed', chain, './tmp/remap_new.bed',
# './tmp/remap.unmapped']
cmd = [liftover_path, os.path.join(tmp_dir, 'remap.bed'), chain,
os.path.join(tmp_dir,'remap_new.bed'), os.path.join(tmp_dir,'remap.unmapped')]
# For each unmapped postion,
# if it is in the remapped_list, get new position from the list
# otherwise, gradually search along both sides of the chromesome,
# until a mappable position is found
# If nothing is mappable in 20M base range, assume that pos is unmappable.
for i in range(num_pos):
chro = df.iloc[i,0]
start = df.iloc[i,1]
name = df.iloc[i,3]
new_pos = -1
new_chro = 'NA'
key = '{}_{}'.format(chro, start)
# use buffered mapping if possible
if key in remap:
new_chro = remap[key][0]
new_pos = remap[key][1]
flag = remap[key][2]
if flag == 'mapped':
counter += 1
else:
logger.warning('Failed to convert (cached): ' + str([chro, start, name]))
# do a stepwise mapping
else:
with open(os.path.join(tmp_dir, 'remap.bed'), 'w') as f:
for i in range(1,steps):
print('{}\t{}\t{}\t{}'.format(chro, start+i*step_size, start+i*step_size+1, name), file=f)
print('{}\t{}\t{}\t{}'.format(chro, start-i*step_size, start-i*step_size+1, name), file=f)
return_info = subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# check running result
if return_info.returncode != 0 :
logger.warning('Approximate conversion failed, cmd error: ' + str([chro, start, name]))
elif os.path.getsize(os.path.join(tmp_dir,'remap_new.bed')) == 0 :
logger.warning('Failed to convert (new): ' + str([chro, start, name]))
remap[key] = [new_chro, new_pos, 'unmapped']
# use the first mapping result
else:
with open(os.path.join(tmp_dir, 'remap_new.bed'), 'r') as f:
next(f)
for line in f:
line = line.split('\t')
if len(line) > 1:
new_chro = line[0]
new_pos = int(line[1])
if new_chro == chro:
remap[key] = [new_chro, new_pos, 'mapped']
counter += 1
break
# while True:
# line = f.readline()
# line = line.split('\t')
# if len(line) > 1:
# new_chro = line[0]
# new_pos = int(line[1])
# if new_chro == chro:
# remap[key] = [new_chro, new_pos, 'mapped']
# counter += 1
# break
positions.append([new_chro, new_pos, name])
logger.info('Approximate conversion: %i/%i positions.', counter, num_pos)
return positions
except Exception as e:
logger.exception('Failure in approximate conversion: %s', fin)
return -1
# Convert the genome coordinates in segments.tab to the specified the edition
# according to the provided chain file.
#
# Params:
# fin: the path of the input file
# chain: the path of the chain file
# remap: the remapped_list
#
# Return:
# 0 or -1
#
def convertSegments(fin, fo, chain, remap, remap_flag=True, new_colnames = []):
logger = logging.getLogger('liftover')
logger.info('Processing segment:\t%s', fin)
try:
df = pd.read_table(fin, sep='\t', low_memory=False, keep_default_na=False)
# save original column name
original_colnames = df.columns.values.tolist()
#Rename columns for processing
df.rename(columns={df.columns[0]:'sample_id', df.columns[1]:'chromosome', df.columns[2]:'start',
df.columns[3]:'stop'}, inplace=True)
#Save column names for order restore after processing.
col_names = df.columns
#Drop NA
df = df.dropna(axis=0, how='any', subset=['chromosome', 'start', 'stop'])
chro_name = str( df.loc[0,'chromosome'] )
if 'chr' not in chro_name:
df['chr'] = 'chr' + df['chromosome'].astype(str)
else:
df['chr'] = df['chromosome'].astype(str)
#Force positions to be integer
df.start = df.start.astype(int)
df.stop = df.stop.astype(int)
#Generate new columns for processing
df['name'] = df.index
# update global counter
global total_seg
this_total = df.shape[0]
total_seg += this_total
#Create a file of start coordinates
df_starts = df.loc[:,['chr','start','stop','name']]
df_starts['stop'] = df_starts.start + 1
df_starts.to_csv(os.path.join(tmp_dir,'starts.bed'), sep=' ', index=False, header=False)
#Create a file of end coordinates
df_ends = df.loc[:,['chr','start','stop','name']]
df_ends['start'] = df_ends.stop - 1
df_ends.to_csv(os.path.join(tmp_dir, 'ends.bed'), sep=' ', index=False, header=False)
#Convert the start coordinates
# cmd = [liftover_path , './tmp/starts.bed' , chain , './tmp/starts_new.bed' ,
# './tmp/starts.unmapped']
cmd = [liftover_path , os.path.join(tmp_dir, 'starts.bed'), chain,
os.path.join(tmp_dir, 'starts_new.bed') , os.path.join(tmp_dir, 'starts.unmapped')]
return_info = subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
if return_info.returncode != 0 :
logger.error('sh: %s', cmd)
raise RuntimeError(cmd)
#Read in the new start positions from a file
starts_new = pd.read_table(os.path.join(tmp_dir, 'starts_new.bed'), sep='\t', names=df_starts.columns)
del starts_new['stop']
# update counter
# lifted_start = starts_new.shape[0]
# remapped_start = 0
#Remap unmapped start positions
if (remap_flag == True) and (os.path.getsize(os.path.join(tmp_dir, 'starts.unmapped')) >0):
starts_remap = solveUnmappables(os.path.join(tmp_dir, 'starts.unmapped'), chain, remap)
starts_remap = pd.DataFrame(starts_remap, columns=starts_new.columns)
# update counter
# remapped_start = starts_remap.shape[0]
#Merge start positions
starts_new = starts_new.append(starts_remap)
else:
starts_remap = pd.DataFrame(columns=starts_new.columns)
#Convert the end coordinates
# cmd = [liftover_path , './tmp/ends.bed' , chain , './tmp/ends_new.bed' ,
# './tmp/ends.unmapped' ]
cmd = [liftover_path , os.path.join(tmp_dir, 'ends.bed') , chain ,
os.path.join(tmp_dir, 'ends_new.bed') , os.path.join(tmp_dir, 'ends.unmapped' )]
return_info = subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
if return_info.returncode != 0 :
logger.error('sh: %s', cmd)
raise RuntimeError(cmd)
#Read in the new end positions from a file
ends_new = pd.read_table(os.path.join(tmp_dir,'ends_new.bed'), sep='\t', names=df_ends.columns)
del ends_new['start']
# update counter
# lifted_end = ends_new.shape[0]
# remapped_end = 0
#ends_new.rename(columns={'start':'stop'}, inplace=True)
#Remap unmapped end positions
if (remap_flag == True) and (os.path.getsize(os.path.join(tmp_dir, 'ends.unmapped')) >0):
ends_remap = solveUnmappables(os.path.join(tmp_dir, 'ends.unmapped'), chain, remap)
ends_remap = pd.DataFrame(ends_remap, columns=ends_new.columns)
# update counter
# remapped_end = ends_remap.shape[0]
#Merge end positions
ends_new = ends_new.append(ends_remap)
else:
ends_remap = pd.DataFrame(columns=ends_new.columns)
#Merge new positions with original data
dd = | pd.merge(starts_new,ends_new,how='inner', on=['name'], suffixes=['_s', '_e']) | pandas.merge |
# Quantify the dots to select the best quantifitcation
# Will take mid pixel, mid 9 pixels and mic 25 pixels and divide them by the corners.
# bsub -q short -W 4:00 -R "rusage[mem=50000]" -oo multiple_dot_lists_quantify_corners_HFF_mean_density.out -eo multiple_dot_lists_quantify_corners_HFF_mean_density.err 'python multiple_dot_lists_quantify_corners_HFF_mean_density.py'
# %matplotlib inline
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
mpl.style.use('seaborn-white')
import multiprocess as mp
import numpy as np
import pandas as pd
import bioframe
import cooltools
import cooler
#import bbi
from cooltools import snipping
import sys
import seaborn as sns
import csv
def pileup_multiple_dot_lists(cool_file,dot_file_list, exp_cool,resolution,flank,anchor_dist,anchor_flank,plot_name):
i=0
filename1=cool_file[0].split("/")[-2].split("_hg38")[0]
filename2=cool_file[1].split("/")[-2].split("_hg38")[0]
filename3=cool_file[2].split("/")[-2].split("_hg38")[0]
cool = [filename1,filename2,filename3]
exp_cool = [exp_cool[0], exp_cool[1], exp_cool[2]]
conditions = ['HiC-FA-DpnII', 'HiC-DSG-DpnII','MicroC-DSG-MNase']
print(filename1)
print(filename2)
print(filename3)
resolution=resolution
flank = flank
#resolution=sys.argv[4]
hg38 = bioframe.fetch_chromsizes('hg38')
chromsizes = bioframe.fetch_chromsizes('hg38')
chromosomes = list(chromsizes.index)
binsize = resolution
cooler_paths = {
'HiC-FA-DpnII' : cool_file[0],
'HiC-DSG-DpnII' : cool_file[1],
'MicroC-DSG-MNase' : cool_file[2],
}
exp_paths = {
'HiC-FA-DpnII' : exp_cool[0],
'HiC-DSG-DpnII' : exp_cool[1],
'MicroC-DSG-MNase' : exp_cool[2],
}
long_names = {
'HiC-FA-DpnII': 'HiC-FA-DpnII',
'HiC-DSG-DpnII': 'HiC-DSG-DpnII',
'MicroC-DSG-MNase': 'MicroC-DSG-MNase',
}
pal = sns.color_palette('colorblind')
colors = {
filename1: pal[0],
filename2 : '#333333',
filename3: pal[2],
}
clrs = {
cond: cooler.Cooler(cooler_paths[cond]) for cond in conditions
}
anchor_dist = anchor_dist
anchor_flank = flank
# dot file list
gs = plt.GridSpec(nrows=len(conditions), ncols=len(dot_file_list) + 1)
plt.figure(figsize=(6 * len(conditions)+1, 7))
mean_list={}
for dot_file in dot_file_list:
print(dot_file)
sites = pd.read_table(dot_file)
mid1=(sites['start1']+sites['end1'])/2
mid2=(sites['start2']+sites['end2'])/2
new_file=pd.DataFrame()
new_file = pd.concat([sites['chrom1'],mid1,sites['chrom2'],mid2],axis=1)
# "convergent" orientation of paired CTCF motifs
# sites = sites[(sites['strand1'] == '+') & (sites['strand2'] == '-')] ## not working
new_file.columns=['chrom1','mid1','chrom2','mid2']
#print(len(new_file))
new_file.head()
supports = [(chrom, 0, chromsizes[chrom]) for chrom in chromosomes]
snippet_flank = flank
windows1 = snipping.make_bin_aligned_windows(
binsize,
new_file['chrom1'],
new_file['mid1'],
flank_bp=snippet_flank)
# windows1['strand'] = sites['strand1']
windows2 = snipping.make_bin_aligned_windows(
binsize,
new_file['chrom2'],
new_file['mid2'],
flank_bp=snippet_flank)
windows = pd.merge(windows1, windows2, left_index=True, right_index=True, suffixes=('1', '2'))
windows = snipping.assign_regions(windows, supports)
windows = windows.dropna()
windows.head()
stacks = {}
piles = {}
# mid point distplot
k=0
r_list=[]
mean_1=[]
for cond in conditions:
expected = | pd.read_table(exp_paths[cond]) | pandas.read_table |
__description__ = \
"""
Helper code for converting raw values from plate reader into fluorescence
anisotropy binding experiments.
"""
__author__ = "<NAME>"
__date__ = "2020-09-01"
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import scipy.optimize
import re
def calculate_r(vv,vh,G=1.0):
"""
Calculate anisotropy from vertical (vv), horizontal (vh), and empirical
G-factor (G).
"""
return (vv - G*vh)/(vv + 2*G*vh)
def species(Mt,Xt,Kd):
"""
Return species of M, X, and MX for a single-site binding model given
total protein concentration (Mt), total ligand concentration (Xt),
and dissociation constant (Kd).
"""
a = 1
b = -(Xt + Mt + Kd)
c = Xt*Mt
roots = [(-b + np.sqrt(b**2 - 4*a*c))/(2*a),
(-b - np.sqrt(b**2 - 4*a*c))/(2*a)]
allowed_roots = []
for r in roots:
if np.isreal(r):
if r >= 0 and r <= Mt and r <= Xt:
allowed_roots.append(r)
if len(allowed_roots) == 0:
err = "no root found!\n"
raise RuntimeError(err)
if len(allowed_roots) > 1:
err = "no unique root found!\n"
raise RuntimeError(err)
MX = allowed_roots[0]
X = Xt - MX
M = Mt - MX
return M,X,MX
def get_peptide_Kd_scalar(Kd,Mt,Xt):
"""
returns factor to multiply Kd_app by to obtain actual Kd.
Kd: probe Kd
Mt: total protein conc
Xt: total probe conc
Follows Eq. 7 in https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3000649/
"""
M, X, MX = species(Mt,Xt,Kd)
delta_0 = MX/Xt
peptide_Kd_scalar = 1 + Xt*(1-delta_0/2)/Kd+delta_0/(1-delta_0)
return peptide_Kd_scalar
def read_plate_layout(plate_layout):
"""
Read a plate layout. See the read_file doc string for details of the
format.
Returns a list, where each element is a plate, that has the form:
[{"protein":{(row_name,column_number):protein,
"peptide":{(row_name,column_number):peptide,
"value":{(row_name,column_number):value},
...,]
If the contents of a cell can be coerced into a float, value will be a
float. If the contents of a cell is a string that starts with na (case-
insensitive), the cell will be np.nan. Otherwise, value will be a string.
"""
plates = [{"protein":{},"peptide":{},"conc":{}}]
plate_counter = 0
df = pd.read_excel(plate_layout,header=None)
for i in range(len(df)):
row = np.array(df.iloc[i])
if row[0] == "protein":
column_protein = row[:]
continue
elif row[0] == "peptide":
column_peptide = row[:]
continue
else:
try:
np.isnan(row[0])
plate_counter += 1
plates.append({"protein":{},"peptide":{},"conc":{}})
continue
except TypeError:
row_name = row[0]
for j in range(1,len(row)):
plates[-1]["protein"][(row_name,j)] = column_protein[j]
plates[-1]["peptide"][(row_name,j)] = column_peptide[j]
try:
plates[-1]["conc"][(row_name,j)] = np.float(row[j])
except ValueError:
plates[-1]["conc"][(row_name,j)] = row[j]
return plates
def read_file(plate_file,plate_layout,min_value=5e4,G=1.0):
"""
plate_file: output from the plate reader
plate_layout: excel file denoting plate layout
min_value: if not none, make any value below this NaN. only applies to
values in the main plate, not any named data.
G: G-factor for anisotropy
Inputs
------
Expects plate_layout to be a pandas readable spreadsheet with the form that
follows. For a plate with "M" rows and "N" columns:
protein,prot1,prot2,prot3,...,protN
peptide,pep1,pep2,pep3,...,pepN
A,concA1,concA2,concA3,...,concAN
B,concB1,concB2,concB3,...,concBN
...
M,concM1,concM2,concM3,...,concMN
where prot1 is the protein in column 1, pep3 is the peptide in column 3,
etc. This assumes a column only has one protein/peptide pair present.
It is also possible to have named data. If you were to, for example, put
the string "blank" in a cell, this will be read out as named_data. (see
below).
If the contents of a cell (in plate_layout) can be coerced into a float, the
cell will be read as a concentration. If it is a string that starts with
na (case-insensitive) it will be read as a concentration of value np.nan
(probably an empty cell). Otherwise, the cell will be interpreted as a key
for some named data.
Returns
-------
A data frame and a dictionary.
Data frame holds:
plate, plate_row, plate_column, c1, c2, protein, peptide, and concentration.
Dictionary holds any named cells in a plate. For example, if plate 0 had two
cells named blank with c1 and c2 of [10,11] and [44,55], this would return:
{
0:{
"blank":{
"c1":np.array([10,11]),
"c2":np.array([44,55]),
}
}
}
"""
# Read the plate layout file
plates = read_plate_layout(plate_layout)
# Read plate_file dumped by plate reader
f = open(plate_file,'rb')
data = f.read().decode("utf16")
f.close()
# Create out dictionary for main data
out_dict = {"plate":[],
"plate_row":[],
"plate_column":[],
"c1":[],
"c2":[]}
# Dictionary to hold named data
named_data = {}
num_plates = 0
plate_counter = 0
next_row = "header"
for l in data.split("\r\n"):
columns = l.split("\t")
# Get number of plates if no plates have been read
if num_plates == 0:
if len(columns) > 0 and columns[0].startswith("##BLOCKS"):
num_plates = int(columns[0].split("=")[1])
continue
# Read a plate
if num_plates > 0 and len(columns) > 1 and columns[0] == "":
# real_columns holds columns with data
real_columns = columns[2:-1]
# Parse header
if real_columns[0] == "A1":
# Sanity check to make sure this row looks like it should
if next_row != "header":
err = f"expecting {next_row} row, but got header row.\n"
raise ValueError(err)
plate_counter += 1
out_dict["plate"].extend([int(plate_counter)
for _ in range(len(real_columns))])
out_dict["plate_row"].extend([r[0] for r in real_columns])
out_dict["plate_column"].extend([int(r[1:]) for r in real_columns])
next_row = "c1"
else:
# Sanity check to make sure this row looks like it should
if next_row not in ["c1","c2"]:
err = f"expecting {next_row} row, but got data row.\n"
raise ValueError(err)
# Parse columns
for c in real_columns:
try:
value = float(c)
except ValueError:
value = np.nan
out_dict[next_row].append(value)
# Update next row counter
if next_row == "c1":
next_row = "c2"
else:
next_row = "header"
# Sanity check for plates
if num_plates != len(plates):
err = "number of plates different in data file and plate layout.\n"
raise ValueError(err)
# Make data frame from output
df = | pd.DataFrame(out_dict) | pandas.DataFrame |
import pandas as pd
def find_timegaps(series, gap, gap_comparison='higher', divergent_only=False):
"""
Find time gaps in the datetime series in input according to the gap size
checked using the operator specified.
The type of comparison along with the gap size define what gaps will be
flagged. If the variable divergent_only is True, only the gaps that does
not satisfy the comparison are returned. The returned column mask shows
which timestamps satisfied the comparison. Each gap returned in the column
delta is relative to the gap between the current timestamp and the previous
one.
Parameters
----------
series : pandas.Series of dtype=datetime
Series that represents a dataframe index, populated by timestamps.
gap : string to be parsed to pandas.Timedelta:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Timedelta.html
The time delta used to find time gaps higher, equal or lower with
respect to this delta.
gap_comparison : {'higher', 'equal', 'lower'}
The operator used in the comparison.
divergent_only : bool
Select if only the timestamps that does not satisfy the condition are
returned.
Returns
-------
DataFrame
Dataframe with integer indexes and timestamps of the input series as
rows, the comparison result and the time gap as columns.
"""
delta = pd.Series(series).diff() # [1:]
if gap_comparison == 'higher':
mask = delta > pd.Timedelta(gap)
elif gap_comparison == 'lower':
mask = delta < pd.Timedelta(gap)
elif gap_comparison == 'equal':
mask = delta == | pd.Timedelta(gap) | pandas.Timedelta |
from datetime import datetime
import pytest
from pytz import utc
import pandas._testing as tm
from pandas.tseries.holiday import (
MO,
SA,
AbstractHolidayCalendar,
DateOffset,
EasterMonday,
GoodFriday,
Holiday,
HolidayCalendarFactory,
Timestamp,
USColumbusDay,
USLaborDay,
USMartinLutherKingJr,
USMemorialDay,
USPresidentsDay,
USThanksgivingDay,
get_calendar,
next_monday,
)
def _check_holiday_results(holiday, start, end, expected):
"""
Check that the dates for a given holiday match in date and timezone.
Parameters
----------
holiday : Holiday
The holiday to check.
start : datetime-like
The start date of range in which to collect dates for a given holiday.
end : datetime-like
The end date of range in which to collect dates for a given holiday.
expected : list
The list of dates we expect to get.
"""
assert list(holiday.dates(start, end)) == expected
# Verify that timezone info is preserved.
assert list(
holiday.dates(utc.localize(Timestamp(start)), utc.localize(Timestamp(end)))
) == [utc.localize(dt) for dt in expected]
@pytest.mark.parametrize(
"holiday,start_date,end_date,expected",
[
(
USMemorialDay,
datetime(2011, 1, 1),
datetime(2020, 12, 31),
[
datetime(2011, 5, 30),
datetime(2012, 5, 28),
datetime(2013, 5, 27),
datetime(2014, 5, 26),
datetime(2015, 5, 25),
datetime(2016, 5, 30),
datetime(2017, 5, 29),
datetime(2018, 5, 28),
datetime(2019, 5, 27),
datetime(2020, 5, 25),
],
),
(
Holiday("July 4th Eve", month=7, day=3),
"2001-01-01",
"2003-03-03",
[Timestamp("2001-07-03 00:00:00"), Timestamp("2002-07-03 00:00:00")],
),
(
Holiday("July 4th Eve", month=7, day=3, days_of_week=(0, 1, 2, 3)),
"2001-01-01",
"2008-03-03",
[
| Timestamp("2001-07-03 00:00:00") | pandas.tseries.holiday.Timestamp |
# -*- coding: utf-8 -*-
from typing import Optional, Union
import pandas as pd
import typic
from standard_precip.spi import SPI
from tstoolbox import tsutils
def _nlarge_nsmall(pe_data, nlargest, nsmallest, groupby):
if nlargest is None and nsmallest is None:
return pe_data
nlarge = pd.Series()
nsmall = pd.Series()
if nlargest is not None:
nlarge = pe_data.resample(groupby).apply(
lambda x: x.nlargest(int(nlargest), x.columns[0])
)
nlarge = nlarge.droplevel(0)
nlarge.sort_index(inplace=True)
nlarge = nlarge.reindex(
| pd.date_range(start=nlarge.index[0], end=nlarge.index[-1], freq="D") | pandas.date_range |
import numpy as np
import re as re
from scipy import stats
import gnc
import netCDF4 as nc
import copy as pcopy
import pdb
import pb
import pandas as pa
def Dic_DataFrame_to_Excel(excel_file,dic_df,multisheet=False,keyname=True,na_rep='', cols=None, header=True, index=True, index_label=None):
"""
Write a dictionary of pandas.DataFrame data to an excel file with keys as excel sheets.
"""
def _df_member(obj):
"""
check if this obj is DataFrame obj.
"""
if isinstance(obj,pa.core.frame.DataFrame):
return True
else:
raise TypeError('this is not a DataFrame object')
for key,dfm in dic_df.items():
_df_member(dfm)
excel_wob=pa.ExcelWriter(excel_file)
if multisheet==True:
for key,dfm in dic_df.items():
dfm.to_excel(excel_wob,sheet_name=key,na_rep=na_rep,cols=cols,header=header,index=index,index_label=index_label)
elif multisheet==False:
for key,dfm in dic_df.items():
if keyname==True:
excel_wob.writerow([],sheet_name='sheet1')
excel_wob.writerow([key],sheet_name='sheet1')
dfm.to_excel(excel_wob,sheet_name='sheet1',na_rep=na_rep,cols=cols,header=header,index=index,index_label=index_label)
else:
pass
excel_wob.save()
def DataFrame_to_Excel(excel_file,DataFrame,na_rep='', cols=None, header=True, index=True, index_label=None):
"""
Write a pandas DataFrame to excel file
"""
tempdic={}
tempdic['data']=DataFrame
Dic_DataFrame_to_Excel(excel_file,tempdic,multisheet=False,keyname=False,na_rep=na_rep, cols=cols, header=header, index=index, index_label=index_label)
def blankspace2csv(blank_sep_file,csvfile):
"""
Purpose: change a blankspace separated file to a comma separated file. Different columns in the blank space delimited file can have arbitrary number of blank spaces.
"""
csv.register_dialect('whitespace',delimiter=' ',skipinitialspace=True)
fob=open(blank_sep_file)
csvob=csv.reader(fob,dialect='whitespace')
fob2=file(csvfile,'w')
csv_writer = csv.writer(fob2)
for d in csvob:
csv_writer.writerow(d)
fob2.close()
def DataFrame_from_Dic_Key_as_Index(data,columns=None):
"""
Purpose: to convert dictionary to pandas dataframe using dic.keys() as index and impose column names by setting columns=['var1','var2','var3']
Note:
1. data is a dictionary, columns is list of strings representing new dataframe column names.
2. The original keys of input dictionary serve as index for new dataframe; len(columns) must be equal to len(data[key])
3*. currently (2012/05/14) data[key]=(1D np.array -- or list -- or only one number) has been tested.
Example:
In [41]: print p2s2.keys()
[1952, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006]
In [42]: p2s2[1952]
Out[42]: array([ 16.57142857, 2. , 42. ])
In [51]: df=mathex.DataFrame_fromdic(p2s2,columns=['mean','min','max'])
In [52]: df.head()
Out[52]:
max mean min
1952 42 16.571429 2
1955 55 23.400000 5
1956 35 16.714286 4
1957 37 23.600000 11
1958 71 39.666667 11
In [53]: df.index
Out[53]:
Int64Index([1952, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963, 1964,
1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1974, 1975,
1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985, 1987,
1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006], dtype=int64)
"""
temp=dict()
if columns == None:
raise ValueError('columns names must be provided!')
else:
for colname in columns:
temp[colname]=[]
for i in data.keys():
if len(columns)==1:
temp[colname].append(data[i])
else:
for j,colname in enumerate(columns):
temp[colname].append(data[i][j])
tempdf=pa.DataFrame(temp,index=data.keys())
return tempdf
def csv_concat(outfile,infile_list,index_col=None):
"""
Purpose: Used to concat several csv files with the same column names.
"""
if not isinstance(infile_list,list):
raise TypeError("infile_list must be provided as a list")
else:
df_list=[]
for filename in infile_list:
dft = pa.read_csv(filename,index_col=index_col)
df_list.append(dft)
df_all = | pa.concat(df_list) | pandas.concat |
from Kernel import Kernel
from agent.ExchangeAgent import ExchangeAgent
from agent.HeuristicBeliefLearningAgent import HeuristicBeliefLearningAgent
from agent.examples.ImpactAgent import ImpactAgent
from agent.ZeroIntelligenceAgent import ZeroIntelligenceAgent
from util.order import LimitOrder
from util.oracle.MeanRevertingOracle import MeanRevertingOracle
from util import util
import numpy as np
import pandas as pd
import sys
DATA_DIR = "~/data"
# Some config files require additional command line parameters to easily
# control agent or simulation hyperparameters during coarse parallelization.
import argparse
parser = argparse.ArgumentParser(description='Detailed options for momentum config.')
parser.add_argument('-b', '--book_freq', default=None,
help='Frequency at which to archive order book for visualization')
parser.add_argument('-c', '--config', required=True,
help='Name of config file to execute')
parser.add_argument('-g', '--greed', type=float, default=0.25,
help='Impact agent greed')
parser.add_argument('-i', '--impact', action='store_false',
help='Do not actually fire an impact trade.')
parser.add_argument('-l', '--log_dir', default=None,
help='Log directory name (default: unix timestamp at program start)')
parser.add_argument('-n', '--obs_noise', type=float, default=1000000,
help='Observation noise variance for zero intelligence agents (sigma^2_n)')
parser.add_argument('-r', '--shock_variance', type=float, default=500000,
help='Shock variance for mean reversion process (sigma^2_s)')
parser.add_argument('-o', '--log_orders', action='store_true',
help='Log every order-related action by every agent.')
parser.add_argument('-s', '--seed', type=int, default=None,
help='numpy.random.seed() for simulation')
parser.add_argument('-v', '--verbose', action='store_true',
help='Maximum verbosity!')
parser.add_argument('--config_help', action='store_true',
help='Print argument options for this config file')
args, remaining_args = parser.parse_known_args()
if args.config_help:
parser.print_help()
sys.exit()
# Historical date to simulate. Required even if not relevant.
historical_date = pd.to_datetime('2014-01-28')
# Requested log directory.
log_dir = args.log_dir
# Requested order book snapshot archive frequency.
book_freq = args.book_freq
# Observation noise variance for zero intelligence agents.
sigma_n = args.obs_noise
# Shock variance of mean reversion process.
sigma_s = args.shock_variance
# Impact agent greed.
greed = args.greed
# Should the impact agent actually trade?
impact = args.impact
# Random seed specification on the command line. Default: None (by clock).
# If none, we select one via a specific random method and pass it to seed()
# so we can record it for future use. (You cannot reasonably obtain the
# automatically generated seed when seed() is called without a parameter.)
# Note that this seed is used to (1) make any random decisions within this
# config file itself and (2) to generate random number seeds for the
# (separate) Random objects given to each agent. This ensure that when
# the agent population is appended, prior agents will continue to behave
# in the same manner save for influences by the new agents. (i.e. all prior
# agents still have their own separate PRNG sequence, and it is the same as
# before)
seed = args.seed
if not seed: seed = int(pd.Timestamp.now().timestamp() * 1000000) % (2**32 - 1)
np.random.seed(seed)
# Config parameter that causes util.util.print to suppress most output.
# Also suppresses formatting of limit orders (which is time consuming).
util.silent_mode = not args.verbose
LimitOrder.silent_mode = not args.verbose
# Config parameter that causes every order-related action to be logged by
# every agent. Activate only when really needed as there is a significant
# time penalty to all that object serialization!
log_orders = args.log_orders
print ("Silent mode: {}".format(util.silent_mode))
print ("Logging orders: {}".format(log_orders))
print ("Book freq: {}".format(book_freq))
print ("ZeroIntelligenceAgent noise: {:0.4f}".format(sigma_n))
print ("ImpactAgent greed: {:0.2f}".format(greed))
print ("ImpactAgent firing: {}".format(impact))
print ("Shock variance: {:0.4f}".format(sigma_s))
print ("Configuration seed: {}\n".format(seed))
# Since the simulator often pulls historical data, we use a real-world
# nanosecond timestamp (pandas.Timestamp) for our discrete time "steps",
# which are considered to be nanoseconds. For other (or abstract) time
# units, one can either configure the Timestamp interval, or simply
# interpret the nanoseconds as something else.
# What is the earliest available time for an agent to act during the
# simulation?
midnight = historical_date
kernelStartTime = midnight
# When should the Kernel shut down? (This should be after market close.)
# Here we go for 5 PM the same day.
kernelStopTime = midnight + pd.to_timedelta('17:00:00')
# This will configure the kernel with a default computation delay
# (time penalty) for each agent's wakeup and recvMsg. An agent
# can change this at any time for itself. (nanoseconds)
defaultComputationDelay = 0 # no delay for this config
# IMPORTANT NOTE CONCERNING AGENT IDS: the id passed to each agent must:
# 1. be unique
# 2. equal its index in the agents list
# This is to avoid having to call an extra getAgentListIndexByID()
# in the kernel every single time an agent must be referenced.
# This is a list of symbols the exchange should trade. It can handle any number.
# It keeps a separate order book for each symbol. The example data includes
# only IBM. This config uses generated data, so the symbol doesn't really matter.
# If shock variance must differ for each traded symbol, it can be overridden here.
symbols = { 'IBM' : { 'r_bar' : 100000, 'kappa' : 0.05, 'sigma_s' : sigma_s } }
### Configure the Kernel.
kernel = Kernel("Base Kernel", random_state = np.random.RandomState(seed=np.random.randint(low=0,high=2**32)))
### Configure the agents. When conducting "agent of change" experiments, the
### new agents should be added at the END only.
agent_count = 0
agents = []
agent_types = []
### Configure an exchange agent.
# Let's open the exchange at 9:30 AM.
mkt_open = midnight + | pd.to_timedelta('09:30:00') | pandas.to_timedelta |
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas._libs.tslibs import period as libperiod
import pandas as pd
from pandas import DatetimeIndex, Period, PeriodIndex, Series, notna, period_range
import pandas._testing as tm
class TestGetItem:
def test_ellipsis(self):
# GH#21282
idx = period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
result = idx[...]
assert result.equals(idx)
assert result is not idx
def test_getitem(self):
idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx[0]
assert result == pd.Period("2011-01-01", freq="D")
result = idx[-1]
assert result == pd.Period("2011-01-31", freq="D")
result = idx[0:5]
expected = pd.period_range("2011-01-01", "2011-01-05", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[0:10:2]
expected = pd.PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-07", "2011-01-09"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[-20:-5:3]
expected = pd.PeriodIndex(
["2011-01-12", "2011-01-15", "2011-01-18", "2011-01-21", "2011-01-24"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[4::-1]
expected = PeriodIndex(
["2011-01-05", "2011-01-04", "2011-01-03", "2011-01-02", "2011-01-01"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
def test_getitem_index(self):
idx = period_range("2007-01", periods=10, freq="M", name="x")
result = idx[[1, 3, 5]]
exp = pd.PeriodIndex(["2007-02", "2007-04", "2007-06"], freq="M", name="x")
tm.assert_index_equal(result, exp)
result = idx[[True, True, False, False, False, True, True, False, False, False]]
exp = pd.PeriodIndex(
["2007-01", "2007-02", "2007-06", "2007-07"], freq="M", name="x"
)
tm.assert_index_equal(result, exp)
def test_getitem_partial(self):
rng = period_range("2007-01", periods=50, freq="M")
ts = Series(np.random.randn(len(rng)), rng)
with pytest.raises(KeyError, match=r"^'2006'$"):
ts["2006"]
result = ts["2008"]
assert (result.index.year == 2008).all()
result = ts["2008":"2009"]
assert len(result) == 24
result = ts["2008-1":"2009-12"]
assert len(result) == 24
result = ts["2008Q1":"2009Q4"]
assert len(result) == 24
result = ts[:"2009"]
assert len(result) == 36
result = ts["2009":]
assert len(result) == 50 - 24
exp = result
result = ts[24:]
tm.assert_series_equal(exp, result)
ts = ts[10:].append(ts[10:])
msg = "left slice bound for non-unique label: '2008'"
with pytest.raises(KeyError, match=msg):
ts[slice("2008", "2009")]
def test_getitem_datetime(self):
rng = period_range(start="2012-01-01", periods=10, freq="W-MON")
ts = Series(range(len(rng)), index=rng)
dt1 = datetime(2011, 10, 2)
dt4 = datetime(2012, 4, 20)
rs = ts[dt1:dt4]
tm.assert_series_equal(rs, ts)
def test_getitem_nat(self):
idx = pd.PeriodIndex(["2011-01", "NaT", "2011-02"], freq="M")
assert idx[0] == pd.Period("2011-01", freq="M")
assert idx[1] is pd.NaT
s = pd.Series([0, 1, 2], index=idx)
assert s[pd.NaT] == 1
s = pd.Series(idx, index=idx)
assert s[pd.Period("2011-01", freq="M")] == pd.Period("2011-01", freq="M")
assert s[pd.NaT] is pd.NaT
def test_getitem_list_periods(self):
# GH 7710
rng = period_range(start="2012-01-01", periods=10, freq="D")
ts = Series(range(len(rng)), index=rng)
exp = ts.iloc[[1]]
tm.assert_series_equal(ts[[Period("2012-01-02", freq="D")]], exp)
def test_getitem_seconds(self):
# GH#6716
didx = pd.date_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
pidx = period_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = [
"2014",
"2013/02",
"2013/01/02",
"2013/02/01 9H",
"2013/02/01 09:00",
]
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s["2013/01/01 10:00"], s[3600:3660])
tm.assert_series_equal(s["2013/01/01 9H"], s[:3600])
for d in ["2013/01/01", "2013/01", "2013"]:
tm.assert_series_equal(s[d], s)
def test_getitem_day(self):
# GH#6716
# Confirm DatetimeIndex and PeriodIndex works identically
didx = pd.date_range(start="2013/01/01", freq="D", periods=400)
pidx = period_range(start="2013/01/01", freq="D", periods=400)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = [
"2014",
"2013/02",
"2013/01/02",
"2013/02/01 9H",
"2013/02/01 09:00",
]
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s["2013/01"], s[0:31])
tm.assert_series_equal(s["2013/02"], s[31:59])
tm.assert_series_equal(s["2014"], s[365:])
invalid = ["2013/02/01 9H", "2013/02/01 09:00"]
for v in invalid:
with pytest.raises(KeyError, match=v):
s[v]
class TestWhere:
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where(self, klass):
i = period_range("20130101", periods=5, freq="D")
cond = [True] * len(i)
expected = i
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * (len(i) - 1)
expected = PeriodIndex([pd.NaT] + i[1:].tolist(), freq="D")
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = period_range("20130101", periods=5, freq="D")
for arr in [np.nan, pd.NaT]:
result = i.where(notna(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
result = i.where(notna(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
result = i.where(notna(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_where_invalid_dtypes(self):
pi = period_range("20130101", periods=5, freq="D")
i2 = pi.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + pi[2:].tolist(), freq="D")
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.asi8)
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.asi8.view("timedelta64[ns]"))
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.to_timestamp("S"))
class TestTake:
def test_take(self):
# GH#10295
idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx.take([0])
assert result == pd.Period("2011-01-01", freq="D")
result = idx.take([5])
assert result == pd.Period("2011-01-06", freq="D")
result = idx.take([0, 1, 2])
expected = pd.period_range("2011-01-01", "2011-01-03", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == "D"
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([7, 4, 1])
expected = pd.PeriodIndex(
["2011-01-08", "2011-01-05", "2011-01-02"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([3, 2, 5])
expected = PeriodIndex(
["2011-01-04", "2011-01-03", "2011-01-06"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([-3, 2, 5])
expected = PeriodIndex(
["2011-01-29", "2011-01-03", "2011-01-06"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
def test_take_misc(self):
index = period_range(start="1/1/10", end="12/31/12", freq="D", name="idx")
expected = PeriodIndex(
[
datetime(2010, 1, 6),
datetime(2010, 1, 7),
datetime(2010, 1, 9),
datetime(2010, 1, 13),
],
freq="D",
name="idx",
)
taken1 = index.take([5, 6, 8, 12])
taken2 = index[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, PeriodIndex)
assert taken.freq == index.freq
assert taken.name == expected.name
def test_take_fill_value(self):
# GH#12631
idx = pd.PeriodIndex(
["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx", freq="D"
)
result = idx.take(np.array([1, 0, -1]))
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "NaT"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "index -5 is out of bounds for( axis 0 with)? size 3"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
class TestIndexing:
def test_get_loc_msg(self):
idx = period_range("2000-1-1", freq="A", periods=10)
bad_period = Period("2012", "A")
with pytest.raises(KeyError, match=r"^Period\('2012', 'A-DEC'\)$"):
idx.get_loc(bad_period)
try:
idx.get_loc(bad_period)
except KeyError as inst:
assert inst.args[0] == bad_period
def test_get_loc_nat(self):
didx = DatetimeIndex(["2011-01-01", "NaT", "2011-01-03"])
pidx = PeriodIndex(["2011-01-01", "NaT", "2011-01-03"], freq="M")
# check DatetimeIndex compat
for idx in [didx, pidx]:
assert idx.get_loc(pd.NaT) == 1
assert idx.get_loc(None) == 1
assert idx.get_loc(float("nan")) == 1
assert idx.get_loc(np.nan) == 1
def test_get_loc(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with non-duplicate
idx0 = pd.PeriodIndex([p0, p1, p2])
expected_idx1_p1 = 1
expected_idx1_p2 = 2
assert idx0.get_loc(p1) == expected_idx1_p1
assert idx0.get_loc(str(p1)) == expected_idx1_p1
assert idx0.get_loc(p2) == expected_idx1_p2
assert idx0.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx0.get_loc("foo")
with pytest.raises(KeyError, match=r"^1\.1$"):
idx0.get_loc(1.1)
msg = (
r"'PeriodIndex\(\['2017-09-01', '2017-09-02', '2017-09-03'\],"
r" dtype='period\[D\]', freq='D'\)' is an invalid key"
)
with pytest.raises(TypeError, match=msg):
idx0.get_loc(idx0)
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with duplicate
idx1 = pd.PeriodIndex([p1, p1, p2])
expected_idx1_p1 = slice(0, 2)
expected_idx1_p2 = 2
assert idx1.get_loc(p1) == expected_idx1_p1
assert idx1.get_loc(str(p1)) == expected_idx1_p1
assert idx1.get_loc(p2) == expected_idx1_p2
assert idx1.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx1.get_loc("foo")
with pytest.raises(KeyError, match=r"^1\.1$"):
idx1.get_loc(1.1)
msg = (
r"'PeriodIndex\(\['2017-09-02', '2017-09-02', '2017-09-03'\],"
r" dtype='period\[D\]', freq='D'\)' is an invalid key"
)
with pytest.raises(TypeError, match=msg):
idx1.get_loc(idx1)
# get the location of p1/p2 from
# non-monotonic increasing/decreasing PeriodIndex with duplicate
idx2 = pd.PeriodIndex([p2, p1, p2])
expected_idx2_p1 = 1
expected_idx2_p2 = np.array([True, False, True])
assert idx2.get_loc(p1) == expected_idx2_p1
assert idx2.get_loc(str(p1)) == expected_idx2_p1
tm.assert_numpy_array_equal(idx2.get_loc(p2), expected_idx2_p2)
tm.assert_numpy_array_equal(idx2.get_loc(str(p2)), expected_idx2_p2)
def test_is_monotonic_increasing(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
idx_inc0 = pd.PeriodIndex([p0, p1, p2])
idx_inc1 = pd.PeriodIndex([p0, p1, p1])
idx_dec0 = pd.PeriodIndex([p2, p1, p0])
idx_dec1 = pd.PeriodIndex([p2, p1, p1])
idx = pd.PeriodIndex([p1, p2, p0])
assert idx_inc0.is_monotonic_increasing is True
assert idx_inc1.is_monotonic_increasing is True
assert idx_dec0.is_monotonic_increasing is False
assert idx_dec1.is_monotonic_increasing is False
assert idx.is_monotonic_increasing is False
def test_is_monotonic_decreasing(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
idx_inc0 = pd.PeriodIndex([p0, p1, p2])
idx_inc1 = pd.PeriodIndex([p0, p1, p1])
idx_dec0 = pd.PeriodIndex([p2, p1, p0])
idx_dec1 = pd.PeriodIndex([p2, p1, p1])
idx = pd.PeriodIndex([p1, p2, p0])
assert idx_inc0.is_monotonic_decreasing is False
assert idx_inc1.is_monotonic_decreasing is False
assert idx_dec0.is_monotonic_decreasing is True
assert idx_dec1.is_monotonic_decreasing is True
assert idx.is_monotonic_decreasing is False
def test_contains(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
p3 = pd.Period("2017-09-04")
ps0 = [p0, p1, p2]
idx0 = pd.PeriodIndex(ps0)
for p in ps0:
assert p in idx0
assert str(p) in idx0
assert "2017-09-01 00:00:01" in idx0
assert "2017-09" in idx0
assert p3 not in idx0
def test_get_value(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
idx0 = pd.PeriodIndex([p0, p1, p2])
input0 = np.array([1, 2, 3])
expected0 = 2
result0 = idx0.get_value(input0, p1)
assert result0 == expected0
idx1 = pd.PeriodIndex([p1, p1, p2])
input1 = np.array([1, 2, 3])
expected1 = np.array([1, 2])
result1 = idx1.get_value(input1, p1)
tm.assert_numpy_array_equal(result1, expected1)
idx2 = | pd.PeriodIndex([p1, p2, p1]) | pandas.PeriodIndex |
import pandas as pd
from flask import current_app
def venn_diagram_join(df1, df2):
# Calculates the join between two dataframes like a Venn diagram
#
# Join criteria is all columns in common between them.
# Returns which rows are rows are only present in the left, which overlap,
# and which are only on the right table.
#
# An evolution of earlier work from match_data.join_on_all_columns
venn_indicator = '_merge' # temporary column name for designating the match type
join_results = df1.merge(df2, how='outer', indicator=venn_indicator)
return (
join_results[join_results[venn_indicator] == 'left_only'].drop(columns=venn_indicator),
join_results[join_results[venn_indicator] == 'both'].drop(columns=venn_indicator),
join_results[join_results[venn_indicator] == 'right_only'].drop(columns=venn_indicator)
)
def filter_rows_by_ids(df, ids):
assert "source_id" in ids and "source_type" in ids and len(ids.columns) == 2
return df.merge(ids, how="inner") # rows in df with the expected id
def start(pdp_contacts_df, normalized_data):
current_app.logger.info("Starting classification of rows")
current_app.logger.info(" - {} rows in incoming data and {} in existing pdp_contacts".format(
normalized_data.shape[0], pdp_contacts_df.shape[0]
))
result = {
"new": pd.DataFrame(columns=pdp_contacts_df.columns),
"updated": | pd.DataFrame(columns=pdp_contacts_df.columns) | pandas.DataFrame |
import numpy as np
import pandas as pd
import tqdm
from . import algorithm
from . import loss
from . import utils
try:
import tensorflow as tf
except:
import warnings
warnings.warn("`import tensorflow as tf` returns an error: gradient.py won't work.")
class GradientAlgorithm(algorithm.Algorithm):
def __init__(
self,
explainer,
variable,
constant=None,
n_grid_points=21,
**kwargs
):
super().__init__(
explainer=explainer,
variable=variable,
constant=constant,
n_grid_points=n_grid_points
)
params = dict(
epsilon=1e-5,
stop_iter=10,
learning_rate=1e-2,
optimizer=utils.AdamOptimizer()
)
for k, v in kwargs.items():
params[k] = v
self.params = params
def fool(
self,
grid=None,
max_iter=50,
random_state=None,
save_iter=False,
verbose=True,
aim=False,
center=None
):
self._aim = aim
self._center = not aim if center is None else center
if aim is False:
super().fool(grid=grid, random_state=random_state)
# init algorithm
self._initialize()
self.result_explanation['changed'] = self.explainer.pd(
self._X_changed,
self._idv,
self.result_explanation['grid']
)
self.append_losses(i=0)
if save_iter:
self.append_explanations(i=0)
pbar = tqdm.tqdm(range(1, max_iter + 1), disable=not verbose)
for i in pbar:
# gradient of output w.r.t input
_ = self._calculate_gradient(self._X_changed)
d_output_input_long = self._calculate_gradient_long(self._X_changed)
self.result_explanation['changed'] = self.explainer.pd(
self._X_changed,
self._idv,
self.result_explanation['grid']
)
d_loss = self._calculate_gradient_loss(d_output_input_long)
step = self.params['optimizer'].calculate_step(d_loss)
self._X_changed -= self.params['learning_rate'] * step
self.append_losses(i=i)
if save_iter:
self.append_explanations(i=i)
pbar.set_description("Iter: %s || Loss: %s" % (i, self.iter_losses['loss'][-1]))
if utils.check_early_stopping(self.iter_losses, self.params['epsilon'], self.params['stop_iter']):
break
self.result_explanation['changed'] = self.explainer.pd(
X=self._X_changed,
idv=self._idv,
grid=self.result_explanation['grid']
)
_data_changed = pd.DataFrame(self._X_changed, columns=self.explainer.data.columns)
self.result_data = | pd.concat((self.explainer.data, _data_changed)) | pandas.concat |
from __future__ import division
from functools import partial
import gc
from multiprocessing import Pool
from operator import attrgetter
from typing import List, Optional, Tuple
from functional import pipe
import numpy as np
import pandas as pd
from sklearn.base import clone
from spdivik.distance import DistanceMetric, make_distance
from spdivik.score._picker import Picker
from spdivik.types import Centroids, Data, IntLabels, SegmentationMethod
from spdivik.seeding import seeded
KMeans = 'spdivik.kmeans._core.KMeans'
# TODO: Deduplicate this function. It was in `KMeans`.
def _normalize_rows(data: Data) -> Data:
normalized = data - data.mean(axis=1)[:, np.newaxis]
norms = np.sum(np.abs(normalized) ** 2, axis=-1, keepdims=True)**(1./2)
normalized /= norms
return normalized
def _dispersion(data: Data, labels: IntLabels, centroids: Centroids,
distance: DistanceMetric, normalize_rows: bool=False) -> float:
if normalize_rows:
data = _normalize_rows(data)
clusters = | pd.DataFrame(data) | pandas.DataFrame |
import os
import pandas as pd
from sklearn import metrics, svm
from sklearn.model_selection import train_test_split
# read data and create dataframes
length = 3100
coord_list = ['all', 'x', 'y', 'z']
# create global variables to store x,y,z and xyz data
for i in range(4):
globals()[f'df_UR5_{coord_list[i]}'] = pd.DataFrame()
home = "data/Kernels/5_7_2022"
for folder in os.listdir(home):
# if "_ex" in folder:
if os.path.isdir(f"{home}/{folder}"):
for file in os.listdir(f"{home}/{folder}"):
if '.csv' in file:
df = | pd.read_csv(f"{home}/{folder}/{file}") | pandas.read_csv |
"""
Generate a report that specifies number of contents created in last week and overall across Live, Review and Draft.
"""
import sys, time
import os
import requests
import pandas as pd
from datetime import date, timedelta, datetime
from pathlib import Path
from string import Template
from time import sleep
from dataproducts.util.utils import get_tenant_info, create_json, post_data_to_blob, push_metric_event
class ContentCreation:
def __init__(self, data_store_location, content_search, execution_date, org_search):
self.data_store_location = Path(data_store_location)
self.content_search = content_search
self.execution_date = execution_date
self.org_search = org_search
def weekly_creation(self, result_loc_, content_search_, date_):
"""
Calculate number of contents created in the last week.
:param result_loc_: pathlib.Path object to store the resultant csv at.
:param content_search_: ip and port for service hosting content search API
:param date_: datetime object to use in query as well as path
:return: None
"""
url = "{}v3/search".format(content_search_)
payload_template = Template("""{
"request": {
"filters":{
"status": ["Live","Draft","Review"],
"contentType": ["Resource","Collection"],
"createdFor": "$tenant",
"createdOn": {">=":"$start_datetime", "<":"$end_datetime"}
},
"fields" :["name","createdBy","contentType","resourceType","mimeType","createdOn","createdFor"],
"limit":10000,
"sortBy": {"createdOn":"desc"},
"exists": "createdFor",
"facets":["status"]
}
}""")
headers = {
'content-type': "application/json; charset=utf-8",
'cache-control': "no-cache"
}
final_dict = {'review': 0, 'draft': 0, 'live': 0}
tenant_list = []
tenant_name_mapping = pd.read_csv(result_loc_.joinpath(date_.strftime('%Y-%m-%d'), 'tenant_info.csv'))
for tenant in tenant_name_mapping.id.unique():
retry_count = 0
while retry_count < 5:
retry_count += 1
try:
payload = payload_template.substitute(tenant=tenant,
start_datetime=(date_ - timedelta(days=7)).strftime(
'%Y-%m-%dT00:00:00.000+0000'),
end_datetime=date_.strftime('%Y-%m-%dT00:00:00.000+0000'))
response = requests.request("POST", url, data=payload, headers=headers)
list_of_dicts = response.json()['result']['facets'][0]['values']
tenant_wise = {'tenant': tenant}
for single_val in list_of_dicts:
final_dict[single_val['name']] = final_dict[single_val['name']] + single_val['count']
tenant_wise[single_val['name']] = single_val['count']
tenant_list.append(tenant_wise)
break
except requests.exceptions.ConnectionError:
print(tenant, "Connection error.. retrying.")
sleep(10)
except KeyError:
with open(result_loc_.joinpath('content_creation_week_errors.log'), 'a') as f:
f.write('KeyError: Response value erroneous for {}\n'.format(tenant))
break
else:
with open(result_loc_.joinpath('content_creation_week_errors.log'), 'a') as f:
f.write('ConnectionError: Max retries reached for {}\n'.format(tenant))
result = pd.DataFrame(tenant_list).fillna(0)
result_loc_.joinpath(date_.strftime('%Y-%m-%d')).mkdir(exist_ok=True)
result.to_csv(result_loc_.joinpath(date_.strftime('%Y-%m-%d'), 'week.csv'), header=True, index=False)
def overall_creation(self, result_loc_, content_search_, date_):
"""
Calculate number of contents created since beginning.
:param result_loc_: pathlib.Path object to store the resultant csv at.
:param content_search_: ip and port for service hosting content search API
:param date_: datetime object to use in query as well as path
:return: None
"""
url = "{}v3/search".format(content_search_)
payload_template = Template("""{
"request": {
"filters":{
"status": ["Live","Draft","Review"],
"contentType": ["Resource","Collection"],
"createdFor": "$tenant",
"createdOn": {"<":"$end_datetime"}
},
"fields" :["name","createdBy","contentType","resourceType","mimeType","createdOn","createdFor"],
"limit":10000,
"sortBy": {"createdOn":"desc"},
"exists": "createdFor",
"facets":["status"]
}
}""")
headers = {
'content-type': "application/json; charset=utf-8",
'cache-control': "no-cache",
'postman-token': "<PASSWORD>"
}
final_dict = {'review': 0, 'draft': 0, 'live': 0}
tenant_list = []
tenant_name_mapping = pd.read_csv(result_loc_.joinpath(date_.strftime('%Y-%m-%d'), 'tenant_info.csv'))
for tenant in tenant_name_mapping.id.unique():
retry_count = 0
while retry_count < 5:
retry_count += 1
try:
payload = payload_template.substitute(tenant=tenant,
end_datetime=date_.strftime('%Y-%m-%dT00:00:00.000+0000'))
response = requests.request("POST", url, data=payload, headers=headers)
list_of_dicts = response.json()['result']['facets'][0]['values']
tenant_wise = {'tenant': tenant}
for single_val in list_of_dicts:
final_dict[single_val['name']] = final_dict[single_val['name']] + single_val['count']
tenant_wise[single_val['name']] = single_val['count']
tenant_list.append(tenant_wise)
break
except requests.exceptions.ConnectionError:
print(tenant, "Connection error.. retrying.")
sleep(10)
except KeyError:
with open(result_loc_.joinpath('content_creation_overall_errors.log'), 'a') as f:
f.write('KeyError: Response erroneous for {}\n'.format(tenant))
break
else:
with open(result_loc_.joinpath('content_creation_overall_errors.log'), 'a') as f:
f.write('ConnectionError: Max retries reached for {}\n'.format(tenant))
result = | pd.DataFrame(tenant_list) | pandas.DataFrame |
# -*- coding: utf-8 -*-
'''
:author <NAME>
:licence MIT
'''
import pandas as pd
import time
def raw2meta_extract(fn):
"""
Reasds raw2 files including GPS and enginerring information
Parameters
----------
fn : string
Path and filenmae of *.raw2 file
Returns
-------
data : pandas DataFrame
CTD (Salinity, Temperature, Fluorescence, Pressure), Pitch and Roll, Compass information
gps : pandas DataFrame
GPS position information
zoog : pandas DataFrame
Zoocam grayscale values
"""
pgain = 0.04
poff = -10
tgain = 0.001
toff = -5
sgain = 0.001
soff = -1
delta_t = 8
#get file index
print(time.ctime() + ": Processing "+fn)
print(time.ctime() + ": Generating file index...")
with open(fn) as f:
list2 = [row.split()[0] for row in f]
##########################################
#read files
##########################################
f = open(fn)
raw2 = f.readlines()
f.close()
print(time.ctime() + ": Loading CF_DIVE")
##########################################
# CF_DIVE 0F
##########################################
'''
This packet marks the present:
Nsurf = Dive-Set Number
Ncyc = Cycle Number
Npro = the profile number
uxti0 = the UNIX time that the Dive-Set
uxti1 = The Unix time this specific cycle began
For the 0901 code, the Dive-Set Number is only incremented after
surface communications (GPS and SBD) are attempted (multiple cycles
between surface communications will not increment the Dive-Set
Number, but will increment the Cycle Number).
This packet should be used to set Nsurf, Ncyc, Npro for all
proceeding packets, until the next CF_DIVE packet is encountered.
'''
cf_dive_idx = [i for i, j in enumerate(list2) if j == '0f']
cf_dive_raw = [raw2[i].split() for i in cf_dive_idx]
cf_dive = pd.DataFrame(cf_dive_raw)
cf_dive = cf_dive.iloc[:,1:]
cf_dive.columns = ['Nsurf','Ncyc','Npro','uxti0','uxti1','Dow','Month',
'day','Time','Year']
cf_dive = cf_dive.astype(dtype = {'Nsurf':'int64','Ncyc':'int64',
'Npro':'int64','uxti0':'int64',
'uxti1':'int64'})
##########################################
# CF_PDAT 11
##########################################
print(time.ctime() + ": Loading CF_PDAT")
edat_idx = [i for i, j in enumerate(list2) if j == '11']
edat_raw = [raw2[i].split() for i in edat_idx]
edat = pd.DataFrame(edat_raw)
edat = edat.iloc[:,1:9]
edat.columns = ['Nsurf','Ncyc','Npro','time','pressure','temperature',
'salinity','fluorescence']
edat = edat.astype(dtype = {'Nsurf':'int64','Ncyc': 'int64','Npro': 'int64',
'time':'float','pressure':'float',
'temperature':'float','salinity':'float',
'fluorescence':'float'} )
edat['pressure']=edat['pressure'] * pgain + poff #pressure as a double; step 1 of conversion
#still need to find pmin and do p=p-pmin to convert to dBar
sal_cond = edat['salinity'] > 0
edat.loc[sal_cond, 'salinity'] = edat.loc[sal_cond,'salinity'] * sgain + soff
sal_cond = edat['temperature'] > 0
edat.loc[sal_cond, 'temperature'] = edat.loc[sal_cond,'temperature'] * tgain + toff
for var in ['salinity','temperature','fluorescence']:
cond = edat[var] <= 0
edat.loc[cond, var] = float('nan')
edat = pd.merge(edat,cf_dive)
edat['Dive_start_time'] = pd.to_datetime(edat.uxti0, unit='s')
edat['Dive_start_time'] = edat['Dive_start_time'].dt.tz_localize('UTC')
#add time_of_measure
edat['time_of_measure'] = edat['Dive_start_time'] + pd.to_timedelta(edat['time'].astype('str') + 'seconds')
#edat.time_of_measure = edat.time_of_measure.dt.tz_localize('UTC')
edat['time_of_measure_PDT'] = edat.time_of_measure - pd.to_timedelta(delta_t, unit='hours') #transform to local time as defined -8 hours not ST
#correct pressure
edat['pressure'] = edat.pressure - edat.pressure.min() #Correct pressure
##########################################
#CF_EDAT 21
##########################################
pr_idx = [i for i, j in enumerate(list2) if j == '21']
pr_raw = [raw2[i].split() for i in pr_idx]
pr = pd.DataFrame(pr_raw)
pr = pr.iloc[:,1:7]
pr.columns = ['Nsurf','Ncyc','Npro','compass','pitch','roll']
pr = pr.astype(dtype = {'Nsurf':'int64','Ncyc': 'int64',
'Npro': 'int64','compass':'float',
'pitch':'float','roll':'float'})
pr.loc[:,['compass','pitch', 'roll']] /= 10
print(time.ctime() + "Loading CF_GPS1")
##########################################
#CF_GPS1--start of dive-set 01
##########################################
gps1_idx = [i for i, j in enumerate(list2) if j == '01']
gps1_raw = [raw2[i].split() for i in gps1_idx]
gps1 = | pd.DataFrame(gps1_raw) | pandas.DataFrame |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#Importing individual user data
user_1 = pd.read_csv('User_1.csv')
user_2 = | pd.read_csv('User_2.csv') | pandas.read_csv |
import h5py
import scipy.stats as st
from collections import defaultdict
import numpy as np
import pandas as pd
import copy
import lasagne
filename_whas = "data/whas/whas_train_test.h5"
filename_metabric = "data/metabric/metabric_IHC4_clinical_train_test.h5"
filename = filename_whas
datasets = defaultdict(dict)
with h5py.File(filename, 'r') as fp:
for ds in fp:
for array in fp[ds]:
datasets[ds][array] = fp[ds][array][:]
dataset = datasets['train']
covariates = pd.DataFrame(dataset['x'])
time = pd.DataFrame(dataset['t'], columns=['time'])
status = pd.DataFrame(dataset['e'], columns=['status'])
df = | pd.concat([time, status, covariates], axis=1) | pandas.concat |
import copy
import logging
import os
import sys
from pathlib import Path
from typing import Tuple
import pandas as pd
import pytest
from simod.common_routines import compute_sequence_flow_frequencies, mine_gateway_probabilities_alternative, \
mine_gateway_probabilities_alternative_with_gateway_management
from simod.configuration import Configuration, GateManagement
from simod.readers.log_reader import LogReader
from simod.readers.log_splitter import LogSplitter
from simod.replayer_datatypes import BPMNGraph
from simod.structure_optimizer import StructureOptimizer
@pytest.fixture
def args(entry_point):
args = [
{'model_path': Path(os.path.join(entry_point, 'PurchasingExample.bpmn')),
'log_path': Path(os.path.join(entry_point, 'PurchasingExample.xes'))},
]
return args
def setup_data(model_path: Path, log_path: Path):
settings = Configuration(model_path=Path(model_path), log_path=Path(log_path))
settings.fill_in_derived_fields()
log = LogReader(log_path, settings.read_options)
graph = BPMNGraph.from_bpmn_path(model_path)
return graph, log, settings
def split_log_buckets(log: LogReader, size: float, one_ts: bool) -> Tuple[pd.DataFrame, LogReader]:
# Split log data
splitter = LogSplitter(pd.DataFrame(log.data))
train, test = splitter.split_log('timeline_contained', size, one_ts)
total_events = len(log.data)
# Check size and change time splitting method if necesary
if len(test) < int(total_events * 0.1):
train, test = splitter.split_log('timeline_trace', size, one_ts)
# Set splits
key = 'end_timestamp' if one_ts else 'start_timestamp'
test = pd.DataFrame(test)
train = | pd.DataFrame(train) | pandas.DataFrame |
__author__ = '<NAME>'
from opengrid_dev.config import Config
config = Config()
import os
import sys
import json
import jsonpickle
import datetime as dt
import pandas as pd
from requests.exceptions import HTTPError
import warnings
from tqdm import tqdm
# compatibility with py3
if sys.version_info.major >= 3:
import pickle
else:
import cPickle as pickle
import tmpo
# compatibility with py3
if sys.version_info.major >= 3:
from .site import Site
from .device import Device, Fluksometer
from .sensor import Sensor, Fluksosensor
else:
from site import Site
from device import Device, Fluksometer
from sensor import Sensor, Fluksosensor
"""
The Houseprint is a Singleton object which contains all metadata for sites, devices and sensors.
It can be pickled, saved and passed around
"""
class Houseprint(object):
def __init__(self,
gjson=None,
spreadsheet="Opengrid houseprint (Responses)",
empty_init=False
):
"""
Parameters
---------
gjson: Path to authentication json
spreadsheet: String, name of the spreadsheet containing the metadata
"""
self.sites = []
self.timestamp = dt.datetime.utcnow() # Add a timestamp upon creation
if not empty_init:
if gjson is None:
gjson = config.get('houseprint', 'json')
self.gjson = gjson
self.spreadsheet = spreadsheet
self._parse_sheet()
def reset(self):
"""
Connect to the Google Spreadsheet again and re-parse the data
"""
self.__init__(gjson=self.gjson, spreadsheet=self.spreadsheet)
if hasattr(self, '_tmpos'):
self._add_sensors_to_tmpos()
def __repr__(self):
return """
Houseprint
Created on {} (UTC)
{} sites
{} devices
{} sensors
""".format(self.timestamp,
len(self.sites),
sum([len(site.devices) for site in self.sites]),
sum([len(site.sensors) for site in self.sites])
)
def _parse_sheet(self):
"""
Connects to Google, fetches the spreadsheet and parses the content
"""
import gspread
from oauth2client.client import SignedJwtAssertionCredentials
print('Opening connection to Houseprint sheet')
# fetch credentials
json_key = json.load(open(self.gjson))
scope = ['https://spreadsheets.google.com/feeds']
credentials = SignedJwtAssertionCredentials(
json_key['client_email'],
json_key['private_key'].encode('ascii'),
scope
)
# authorize and login
gc = gspread.authorize(credentials)
gc.login()
# open sheets
print("Opening spreadsheets")
sheet = gc.open(self.spreadsheet)
sites_sheet = sheet.worksheet('Accounts')
devices_sheet = sheet.worksheet('Devices')
sensors_sheet = sheet.worksheet('Sensors')
print('Parsing spreadsheets')
# 3 sub-methods that parse the different sheets
self._parse_sites(sites_sheet)
self._parse_devices(devices_sheet)
self._parse_sensors(sensors_sheet)
print('Houseprint parsing complete')
def _parse_sites(self, sheet):
"""
Sub method of _parse_sheet() that parses only the 'sites' sheet
Parameters
----------
sheet: GSpread worksheet
sheet containing metadata about sites
"""
records = sheet.get_all_records()
for r in records:
if r['Key'] == '':
continue
new_site = Site(hp=self,
key=r['Key'],
size=r['House size'],
inhabitants=r['Number of inhabitants'],
postcode=r['postcode'],
construction_year=r['construction year'],
k_level=r['K-level'],
e_level=r['E-level'],
epc_cert=r['EPC certificate'])
self.sites.append(new_site)
print('{} Sites created'.format(len(self.sites)))
def _parse_devices(self, sheet):
"""
Sub method of _parse_sheet() that parses only the 'devices' sheet
Parameters
----------
sheet: GSpread worksheet
sheet containing metadata about devices
"""
records = sheet.get_all_records()
for r in records:
if r['Key'] == '':
continue
# find parent site and check if it exists
site = self.find_site(r['Parent site'])
if site is None:
raise ValueError('Device {} was given an invalid site key {}'.format(r['Key'], r['Parent site']))
# create a new device according to its manufacturer
if r['manufacturer'] == 'Flukso':
new_device = Fluksometer(site=site, key=r['Key'])
else:
raise NotImplementedError('Devices from {} are not supported'.format(r['manufacturer']))
# add new device to parent site
site.devices.append(new_device)
print('{} Devices created'.format(sum([len(site.devices) for site in self.sites])))
def _parse_sensors(self, sheet):
"""
Sub method of _parse_sheet() that parses only the 'sensors' sheet
Parameters
----------
sheet: GSpread worksheet
sheet containing metadata about sensors
"""
records = sheet.get_all_records()
for r in records:
if r['Sensor_id'] == '': continue
# find parent. If a parent device is specified, us that, otherwise use a parent site directly
if r['parent device'] != '':
device = self.find_device(r['parent device'])
if device is None:
raise ValueError(
'Sensor {} was given an invalid device key {}. \
Leave the device field empty if you want to add a sensor without a device'.format(
r['Sensor_id'], r['parent device']))
else:
site = self.find_site(r['parent site'])
if site is None:
raise ValueError(
'Sensor {} was given an invalid site key {}'.format(r['Sensor_id'], r['parent site']))
# create new sensor according to its manufacturer
if r['manufacturer'] == 'Flukso':
new_sensor = Fluksosensor(
device=device,
key=r['Sensor_id'],
token=r['token'],
type=r['sensor type'],
description=r['name by user'],
system=r['system'],
quantity=r['quantity'],
unit=r['unit'],
direction=r['direction'],
tariff=r['tariff'],
cumulative=None # will be determined based on type
)
else:
raise NotImplementedError('Sensors from {} are not supported'.format(r['manufacturer']))
new_sensor.device.sensors.append(new_sensor)
print('{} sensors created'.format(sum([len(site.sensors) for site in self.sites])))
def get_sensors(self, sensortype=None):
"""
Return a list with all sensors
Parameters
----------
sensortype: gas, water, electricity: optional
Returns
-------
list of sensors
"""
res = []
for site in self.sites:
for sensor in site.get_sensors(sensortype=sensortype):
res.append(sensor)
return res
def get_fluksosensors(self, **kwargs):
"""
Same thing as get_sensors, but only for fluksosensors
Parameters
----------
kwargs
Returns
-------
[Fluksosensor]
"""
return [sensor for sensor in self.get_sensors(**kwargs) if isinstance(
sensor, Fluksosensor)]
def get_devices(self):
"""
Return a list with all devices
Returns
-------
list of devices
"""
res = []
for site in self.sites:
for device in site.devices:
res.append(device)
return res
def search_sites(self, **kwargs):
"""
Parameters
----------
kwargs: any keyword argument, like key=mykey
Returns
-------
List of sites satisfying the search criterion or empty list if no
variable found.
"""
result = []
for site in self.sites:
for keyword, value in kwargs.items():
if getattr(site, keyword) == value:
continue
else:
break
else:
result.append(site)
return result
def search_sensors(self, **kwargs):
"""
Parameters
----------
kwargs: any keyword argument, like key=mykey
Returns
-------
List of sensors satisfying the search criterion or empty list if no
variable found.
"""
result = []
for sensor in self.get_sensors():
for keyword, value in kwargs.items():
if value in getattr(sensor, keyword):
continue
else:
break
else:
result.append(sensor)
return result
def find_site(self, key):
"""
Parameters
----------
key: string
Returns
-------
Site
"""
for site in self.sites:
if site.key == key:
return site
return None
def find_device(self, key):
"""
Parameters
----------
key: string
Returns
-------
Device
"""
for device in self.get_devices():
if device.key.lower() == key.lower():
return device
return None
def find_sensor(self, key):
"""
Parameters
----------
key: string
Returns
-------
Sensor
"""
for sensor in self.get_sensors():
if sensor.key.lower() == key.lower():
return sensor
return None
def save(self, filename, pickle_format='jsonpickle'):
"""
Save the houseprint object
Parameters
----------
* filename : str
Filename, if relative path or just filename, it is appended to the
current working directory
pickle_format : str
'jsonpickle' or 'pickle'
pickle may be more robust, but jsonpickle should be compatible
across python versions
"""
# temporarily delete tmpo session
try:
tmpos_tmp = self._tmpos
delattr(self, '_tmpos')
except:
pass
abspath = os.path.join(os.getcwd(), filename)
if pickle_format == 'jsonpickle':
with open(abspath, 'w') as f:
frozen = jsonpickle.encode(self)
f.write(frozen)
elif pickle_format == 'pickle':
with open(abspath, 'wb') as f:
pickle.dump(self, file=f)
else:
raise NotImplementedError("Pickle format '{}' is not supported".format(pickle_format))
print("Saved houseprint to {}".format(abspath))
# restore tmposession if needed
try:
setattr(self, '_tmpos', tmpos_tmp)
except:
pass
def init_tmpo(self, tmpos=None, path_to_tmpo_data=None):
"""
Flukso sensors need a tmpo session to obtain data.
It is overkill to have each flukso sensor make its own session, syncing would
take too long and be overly redundant.
Passing a tmpo session to the get_data function is also bad form because
we might add new types of sensors that don't use tmpo in the future.
This is why the session is initialised here.
A tmpo session as parameter is optional. If passed, no additional sensors are added.
If no session is passed, a new one will be created using the location in the config file.
It will then be populated with the flukso sensors known to the houseprint object
Parameters
----------
tmpos : tmpo session
path_to_tmpo_data : str
"""
if tmpos is not None:
self._tmpos = tmpos
else:
try:
path_to_tmpo_data = config.get('tmpo', 'data')
except:
path_to_tmpo_data = None
self._tmpos = tmpo.Session(path_to_tmpo_data)
self._add_sensors_to_tmpos()
print("Using tmpo database from {}".format(self._tmpos.db))
def _add_sensors_to_tmpos(self):
"""
Add all flukso sensors in the houseprint to the tmpo session
"""
for sensor in self.get_fluksosensors():
self._tmpos.add(sensor.key, sensor.token)
def get_tmpos(self):
"""
Returns
-------
TMPO session
"""
if hasattr(self, '_tmpos'):
return self._tmpos
else:
self.init_tmpo()
return self._tmpos
@property
def tmpos(self):
return self.get_tmpos()
def sync_tmpos(self, http_errors='warn'):
"""
Add all Flukso sensors to the TMPO session and sync
Parameters
----------
http_errors : 'raise' | 'warn' | 'ignore'
default 'warn'
define what should be done with TMPO Http-errors
"""
tmpos = self.get_tmpos()
for sensor in tqdm(self.get_fluksosensors()):
try:
warnings.simplefilter('ignore')
tmpos.sync(sensor.key)
warnings.simplefilter('default')
except HTTPError as e:
warnings.simplefilter('default')
if http_errors == 'ignore':
continue
elif http_errors == 'warn':
warnings.warn(message='Error for SensorID: ' + sensor.key
+ str(e))
else:
print('Error for SensorID: ' + sensor.key)
raise e
def get_data(self, sensors=None, sensortype=None, head=None, tail=None, diff='default', resample='min',
unit='default'):
"""
Return a Pandas Dataframe with joined data for the given sensors
Parameters
----------
sensors : list of Sensor objects
If None, use sensortype to make a selection
sensortype : string (optional)
gas, water, electricity. If None, and Sensors = None,
all available sensors in the houseprint are fetched
head, tail: timestamps,
diff : bool or 'default'
If True, the original data will be differentiated
If 'default', the sensor will decide: if it has the attribute
cumulative==True, the data will be differentiated.
resample : str (default='min')
Sampling rate, if any. Use 'raw' if no resampling.
unit : str , default='default'
String representation of the target unit, eg m**3/h, kW, ...
"""
if sensors is None:
sensors = self.get_sensors(sensortype)
series = [sensor.get_data(head=head, tail=tail, diff=diff, resample=resample, unit=unit) for sensor in sensors]
# workaround for https://github.com/pandas-dev/pandas/issues/12985
series = [s for s in series if not s.empty]
if series:
df = | pd.concat(series, axis=1) | pandas.concat |
import os
import logging
import time
from pathlib import Path
import requests
import re
from concurrent import futures
import pandas as pd
from tqdm import tqdm
from build_eodhd_map import MAP_YAHOO, MAP_EODHD
_logger = logging.getLogger(__name__)
# Read eodhistoricaldata.com token fron environment -- or insert into code
EODHD_TOKEN = os.getenv("NUMERAI_EODHD_TOKEN", "your_eodhd_api_key")
DB_FOLDER = Path("./db/")
DATA_FOLDER = Path("./data/")
MAP_FILE = DB_FOLDER / "eodhd-map.csv"
QUOTE_FOLDER = Path(DATA_FOLDER / "ticker_bin")
_RETRY_COUNT = 3
_RETRY_WAIT = 25
_MAX_WORKERS = 10
def yahoo_download_one(signals_ticker: str) -> pd.DataFrame:
start_epoch = int(946684800) # 2000-01-01
end_epoch = int(time.time())
quotes = None
quotes = (
pd.read_csv(
f"https://query1.finance.yahoo.com/v7/finance/download/{signals_ticker}?period1={start_epoch}&period2={end_epoch}&interval=1d&events=history&includeAdjustedClose=true"
)
.dropna()
.set_index("Date")
)
if quotes is not None and len(quotes) > 1:
quotes["date64"] = pd.to_datetime(quotes.index, format="%Y-%m-%d")
quotes = quotes.reset_index(drop=True).set_index("date64").sort_index()
quotes.index.name = "date"
quotes.columns = [
"open",
"high",
"low",
"close",
"adjusted_close",
"volume",
]
return quotes
def eodhd_download_one(signals_ticker: str) -> pd.DataFrame:
start_date = "2000-01-01"
quotes = None
r = requests.get(
f"https://eodhistoricaldata.com/api/eod/{signals_ticker}?from={start_date}&fmt=json&api_token={EODHD_TOKEN}"
)
if r.status_code == requests.codes.ok:
if len(r.json()) > 0:
quotes = pd.DataFrame(r.json()).set_index("date")
quotes["date64"] = pd.to_datetime(quotes.index, format="%Y-%m-%d")
quotes = quotes.reset_index(drop=True).set_index("date64").sort_index()
quotes.index.name = "date"
quotes.columns = [
"open",
"high",
"low",
"close",
"adjusted_close",
"volume",
]
return quotes
def download_one(bloomberg_ticker: str, map: pd.DataFrame):
yahoo_ticker = map.loc[bloomberg_ticker, "yahoo"]
signals_ticker = map.loc[bloomberg_ticker, "signals_ticker"]
data_provider = map.loc[bloomberg_ticker, "data_provider"]
if pd.isnull(signals_ticker):
return bloomberg_ticker, None
quotes = None
for _ in range(_RETRY_COUNT):
try:
if data_provider == MAP_EODHD:
quotes = eodhd_download_one(signals_ticker)
elif data_provider == MAP_YAHOO:
quotes = yahoo_download_one(signals_ticker)
break
except Exception as ex:
_logger.warning(f"download_one, ticker:{bloomberg_ticker}, exception:{ex}")
time.sleep(_RETRY_WAIT)
return bloomberg_ticker, quotes
def make_filename_safe(bloomberg_ticker):
return re.sub(r"[^\w-]", "_", bloomberg_ticker.lower()) + ".pkl"
def download_save_all(ticker_map):
# Shuffle the download order to balance load and wait times with data providers
tickers = pd.Series(ticker_map.index).sample(frac=1).unique().tolist()
with futures.ThreadPoolExecutor(_MAX_WORKERS) as executor:
_futures = []
for ticker in tickers:
_futures.append(
executor.submit(
download_one,
bloomberg_ticker=ticker,
map=ticker_map,
)
)
for future in tqdm(futures.as_completed(_futures), total=len(tickers)):
bloomberg_ticker, quotes = future.result()
if quotes is not None:
quotes.to_pickle(QUOTE_FOLDER / make_filename_safe(bloomberg_ticker))
def read_quotes(bloomberg_ticker):
filename = Path(QUOTE_FOLDER / make_filename_safe(bloomberg_ticker))
if filename.exists():
quotes = | pd.read_pickle(filename) | pandas.read_pickle |
"""Scraping and parsing amazon"""
__author__ = 'thor'
import os
from ut.util.importing import get_environment_variable
import ut as ms
import ut.dacc.mong.util
import pandas as pd
import numpy as np
import requests
import re
from BeautifulSoup import BeautifulSoup as bs3_BeautifulSoup
from datetime import timedelta
from datetime import datetime
from pymongo import MongoClient
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as dates
from ut.serialize.s3 import S3
import tempfile
from ut.viz.util import insert_nans_in_x_and_y_when_there_is_a_gap_in_x
import pylab
class Amazon(object):
url_template = dict()
url_template['product_page'] = 'http://www.amazon.{country}/dp/{asin}/'
url_template['product_reviews'] = 'http://www.amazon.{country}/product-reviews/{asin}/'
regexp = dict()
regexp['nreviews_re'] = {'com': re.compile('\d[\d,]*(?= customer review)'),
'co.uk': re.compile('\d[\d,]*(?= customer review)'),
'in': re.compile('\d[\d,]*(?= customer review)'),
'de': re.compile('\d[\d\.]*(?= Kundenrezens\w\w)')}
regexp['no_reviews_re'] = {'com': re.compile('no customer reviews'),
'co.uk': re.compile('no customer reviews'),
'in': re.compile('no customer reviews'),
'de': re.compile('Noch keine Kundenrezensionen')}
# regexp['average_rating_re'] = {'com': re.compile('')}
default = dict()
default['country'] = 'com'
# default['requests_kwargs'] = {}
default['requests_kwargs'] = {
'proxies': {'http': 'http://us.proxymesh.com:31280'},
'auth': requests.auth.HTTPProxyAuth(get_environment_variable('PROXYMESH_USER'),
get_environment_variable('PROXYMESH_PASS'))
}
@classmethod
def url(cls, what='product_page', **kwargs):
kwargs = dict(Amazon.default, **kwargs)
return cls.url_template[what].format(**kwargs)
return r.text
@classmethod
def slurp(cls, what='product_page', **kwargs):
kwargs = dict(Amazon.default, **kwargs)
r = requests.get(Amazon.url(what=what, **kwargs), **Amazon.default['requests_kwargs'])
if r.status_code == 200:
return r.text
else: # try again and return no matter what
r = requests.get(Amazon.url(what=what, **kwargs), **Amazon.default['requests_kwargs'])
return r.text
# @classmethod
# def get_dynamic_book_info(cls, asin, **kwargs):
# html = Amazon.slurp(what='product_page', **kwargs)
# b = bs3_BeautifulSoup(b)
@classmethod
def get_info(cls, asin, country='co.uk', **kwargs):
info = {'date': datetime.now()}
info = dict(info, **{'sales_ranks': cls.get_sales_rank(asin, country='co.uk', **kwargs)})
# info = dict(info, **{'num_of_reviews': cls.get_number_of_reviews(asin, country='co.uk', **kwargs)})
return info
@classmethod
def get_sales_rank(cls, **kwargs):
html = Amazon.slurp(what='product_page', **kwargs)
sales_rank = [Amazon.parse_sales_rank(html, **kwargs)]
sales_rank += Amazon.parse_sales_sub_rank(html, **kwargs)
return sales_rank
@classmethod
def parse_product_title(cls, b, **kwargs):
if not isinstance(b, bs3_BeautifulSoup):
b = bs3_BeautifulSoup(b)
return b.find('span', attrs={'id': 'productTitle'}).text
@classmethod
def parse_sales_rank(cls, b, **kwargs):
if not isinstance(b, bs3_BeautifulSoup):
b = bs3_BeautifulSoup(b)
t = b.find('li', attrs={'id': re.compile('SalesRank')})
sales_rank_re = re.compile('(\d[\d,]+) in ([\w\ ]+)')
tt = sales_rank_re.findall(t.text)
return {'sales_rank': int(re.compile('\D').sub('', tt[0][0])),
'sales_rank_category': tt[0][1].strip(' ')}
@classmethod
def parse_sales_sub_rank(cls, b, **kwargs):
if not isinstance(b, bs3_BeautifulSoup):
b = bs3_BeautifulSoup(b)
t = b.find('li', attrs={'id': re.compile('SalesRank')})
tt = t.findAll('li', 'zg_hrsr_item')
sales_sub_rank = list()
for tti in tt:
d = dict()
d['sales_rank'] = int(re.compile('\D').sub('', tti.find('span', 'zg_hrsr_rank').text))
ttt = tti.find('span', 'zg_hrsr_ladder')
ttt = ttt.text.split(' ')[1]
d['sales_rank_category'] = ttt.split('>')
sales_sub_rank.append(d)
return sales_sub_rank
@classmethod
def parse_avg_rating(cls, b, **kwargs):
if not isinstance(b, bs3_BeautifulSoup):
b = bs3_BeautifulSoup(b)
t = b.find('span', 'reviewCountTextLinkedHistogram')
return float(re.compile('[\d\.]+').findall(t['title'])[0])
@classmethod
def parse_product_title(cls, b, **kwargs):
if not isinstance(b, bs3_BeautifulSoup):
b = bs3_BeautifulSoup(b)
t = b.find('div', attrs={'id': 'title'})
return t.find('span', attrs={'id': 'productTitle'}).text
@staticmethod
def test_rating_scrape_with_vanessas_book():
html = Amazon.slurp(what='product_page', country_ext='.co.uk', asin='1857886127')
@staticmethod
def get_number_of_reviews(asin, country, **kwargs):
url = 'http://www.amazon.{country}/product-reviews/{asin}'.format(country=country, asin=asin)
html = requests.get(url).text
try:
return int(re.compile('\D').sub('', Amazon.regexp['nreviews_re'][country].search(html).group(0)))
except Exception:
if Amazon.regexp['no_reviews_re'][country].search(html):
return 0
else:
return None # to distinguish from 0, and handle more cases if necessary
class AmazonBookWatch(object):
default = dict()
default['product_list'] = [
{'title': 'The Nanologues', 'asin': '9350095173'},
{'title': 'Never mind the bullocks', 'asin': '1857886127'},
{'title': 'The Other Side of Paradise', 'asin': '1580055311'}
]
default['watch_list'] = [
{'title': 'The Nanologues', 'asin': '9350095173', 'country': 'in'},
{'title': 'The Nanologues', 'asin': '9350095173', 'country': 'co.uk'},
{'title': 'The Nanologues', 'asin': '9350095173', 'country': 'com'},
{'title': 'Never mind the bullocks', 'asin': '1857886127', 'country': 'in'},
{'title': 'Never mind the bullocks', 'asin': '1857886127', 'country': 'co.uk'},
{'title': 'Never mind the bullocks', 'asin': '1857886127', 'country': 'com'},
{'title': 'The Other Side of Paradise', 'asin': '1580055311', 'country': 'com'},
{'title': 'The Other Side of Paradise', 'asin': '1580055311', 'country': 'co.uk'},
{'title': "Heaven's Harlots (Paperback)", 'asin': '0688170129', 'country': 'com'},
{'title': "Heaven's Harlots (Hardcover)", 'asin': '0688155049', 'country': 'com'},
{'title': "Women on Ice", 'asin': '0813554594', 'country': 'com'}
]
default['frequency_in_hours'] = 1
default['max_date_ticks'] = 200
default['stats_num_of_days'] = 1
default['figheight'] = 3
default['figwidth'] = 14
default['linewidth'] = 3
default['tick_font_size'] = 13
default['label_fontsize'] = 13
default['title_fontsize'] = 15
default['line_style'] = '-bo'
default['facecolor'] = 'blue'
default['save_format'] = 'png'
default['dpi'] = 40
default['book_info_html_template'] = '''<hr>
<h3>{book_title} - {country} - {num_of_reviews} reviews </h3>
'''
default['category_html'] = '<img style="box-shadow: 3px 3px 5px 6px #ccc;" src={image_url}>'
db = MongoClient()['misc']['book_watch']
def __init__(self, **kwargs):
self.s3 = S3(bucket_name='public-ut-images', access_key='ut')
attribute_name = 'product_list'
setattr(self, attribute_name, kwargs.get(attribute_name, None) or AmazonBookWatch.default[attribute_name])
attribute_name = 'watch_list'
setattr(self, attribute_name, kwargs.get(attribute_name, None) or AmazonBookWatch.default[attribute_name])
def asin_of_title(self, title):
the_map = {k: v for k, v in zip([x['title'] for x in self.product_list], [x['asin'] for x in self.product_list])}
return the_map[title]
def get_book_statuses(self):
now = datetime.now()
info_list = list()
for book in self.watch_list:
try:
info = dict({'date': now}, **book)
info = dict(info, **{'sale_ranks': Amazon.get_sales_rank(**book)})
info = dict(info, **{'num_of_reviews': Amazon.get_number_of_reviews(**book)})
info_list.append(info)
except Exception:
continue
return info_list
@staticmethod
def cursor_to_df(cursor):
d = ms.dacc.mong.util.to_df(cursor, 'sale_ranks')
d = process_sales_rank_category(d)
return d
@staticmethod
def get_min_max_sales_rank_dates(book_info):
cumul = list()
for x in list(book_info['sales_rank'].values()):
try:
cumul += x['data']['date'].tolist()
except Exception:
raise
return [np.min(cumul), np.max(cumul)]
def mk_book_info(self, title, country, **kwargs):
book_info = dict()
kwargs = dict(kwargs, **self.default)
d = AmazonBookWatch.cursor_to_df(self.db.find(spec={'title': title, 'country': country})
.sort([('_id', -1)]).limit(kwargs['max_date_ticks']))
book_info['num_reviews'] = np.max(d['num_of_reviews'])
book_info['sales_rank'] = dict()
d = d[['date', 'sales_rank_category', 'sales_rank_subcategory', 'sales_rank']]
categories = np.unique(d['sales_rank_category'])
for c in categories:
dd = d[d['sales_rank_category'] == c].sort('date', ascending=True)
book_info['sales_rank'][c] = dict()
book_info['sales_rank'][c]['sales_rank_subcategory'] = dd['sales_rank_subcategory'].iloc[0]
dd = dd[['date', 'sales_rank']]
book_info['sales_rank'][c]['data'] = dd
ddd = dd[dd['date'] > datetime.now() - timedelta(days=kwargs['stats_num_of_days'])]
book_info['sales_rank'][c]['rank_stats'] = pd.DataFrame([{
'hi_rank': np.min(ddd['sales_rank']),
'mean_rank': np.round(np.mean(ddd['sales_rank'])),
'lo_rank': np.max(ddd['sales_rank'])
}])
book_info['sales_rank'][c]['rank_stats'] = \
book_info['sales_rank'][c]['rank_stats'][['hi_rank', 'mean_rank', 'lo_rank']]
book_info['commun_date_range'] = self.get_min_max_sales_rank_dates(book_info)
return book_info
def mk_sales_rank_plot(self, d, category='', save_filename=True, **kwargs):
kwargs = dict(kwargs, **self.default)
if isinstance(d, dict):
if 'sales_rank' in list(d.keys()):
d = d['sales_rank'][category]['data']
elif category in list(d.keys()):
d = d[category]['data']
elif 'data' in list(d.keys()):
d = d['data']
else:
raise ValueError('Your dict must have a "data" key or a %s key' % category)
d = d.sort('date')
x = [xx.to_datetime() for xx in d['date']]
y = list(d['sales_rank'])
gap_thresh = timedelta(seconds=kwargs['frequency_in_hours'] * 4.1 * 3600)
x, y = insert_nans_in_x_and_y_when_there_is_a_gap_in_x(x, y, gap_thresh=gap_thresh)
fig, ax = plt.subplots(1)
fig.set_figheight(kwargs['figheight'])
fig.set_figwidth(kwargs['figwidth'])
ax.plot(x, y, kwargs['line_style'], linewidth=kwargs['linewidth'])
commun_date_range = kwargs.get('commun_date_range', None)
if commun_date_range:
pylab.xlim(kwargs['commun_date_range'])
ax.fill_between(x, y, max(y), facecolor=kwargs['facecolor'], alpha=0.5)
# plt.ylabel('Amazon (%s) Sales Rank' % category, fontsize=kwargs['label_fontsize'])
plot_title = kwargs.get('plot_title', 'Amazon (%s) Sales Rank' % category)
plt.title(plot_title, fontsize=kwargs['title_fontsize'])
plt.tick_params(axis='y', which='major', labelsize=kwargs['tick_font_size'])
# plt.tick_params(axis='x', which='major', labelsize=kwargs['tick_font_size'])
plt.tick_params(axis='x', which='minor', labelsize=kwargs['tick_font_size'])
plt.gca().invert_yaxis()
# ax.xaxis.set_minor_locator(dates.WeekdayLocator(byweekday=(1), interval=1))
ax.xaxis.set_minor_locator(dates.DayLocator(interval=1))
ax.xaxis.set_minor_formatter(dates.DateFormatter('%a\n%d %b'))
ax.xaxis.grid(True, which="minor")
ax.yaxis.grid()
ax.xaxis.set_major_locator(dates.MonthLocator())
# ax.xaxis.set_major_formatter(dates.DateFormatter('\n\n\n%b\n%Y'))
plt.tight_layout()
if save_filename:
if isinstance(save_filename, str):
save_filename = save_filename + '.' + kwargs['save_format']
else: # save to temp file
save_filename = tempfile.NamedTemporaryFile().name
plt.savefig(save_filename, format=kwargs['save_format'], dpi=kwargs['dpi'])
return save_filename
else:
return None
def mk_book_info_html(self, title, country, **kwargs):
kwargs = dict(kwargs, **self.default)
book_info = self.mk_book_info(title, country, **kwargs)
html = kwargs['book_info_html_template'].format(
book_title=title,
country=country,
num_of_reviews=book_info['num_reviews']
)
html = html + "<br>\n"
for category in list(book_info['sales_rank'].keys()):
# make and save a graph, send to s3, and return a url for it
file_name = self.mk_sales_rank_plot(
d=book_info['sales_rank'],
category=category, save_filename=True,
commun_date_range=book_info['commun_date_range'],
plot_title='Amazon.%s (%s) Sales Rank' % (
country, book_info['sales_rank'][category]['sales_rank_subcategory']),
**kwargs
)
s3_key_name = '{title} - {country} - {category} - {date}.png'.format(
title=title,
country=country,
category=category,
date=datetime.now().strftime('%Y%m%d')
)
self.s3.dumpf(file_name, s3_key_name)
image_url = self.s3.get_http_for_key(s3_key_name)
html = html + kwargs['category_html'].format(
image_url=image_url
) + "<br>\n"
# html = html + "\n<br>"
return html
def mk_html_report(self, title_country_list=None):
title_country_list = title_country_list or [
{'title': 'Never mind the bullocks', 'country': 'co.uk'},
{'title': 'Never mind the bullocks', 'country': 'com'},
{'title': 'The Nanologues', 'country': 'in'}
]
html = ''
html += 'Stats of the last 24 hours:<br>'
d = | pd.DataFrame() | pandas.DataFrame |
import sys
import click
import pandas as pd
import numpy as np
def make_df(file, label):
df = | pd.read_csv(file) | pandas.read_csv |
import pandas as pd
from ..utils import constants, plot, utils
import numpy as np
from warnings import warn
from shapely.geometry import Polygon, Point
import geopandas as gpd
from .flowdataframe import FlowDataFrame
from skmob.preprocessing import routing
class TrajSeries(pd.Series):
@property
def _constructor(self):
return TrajSeries
@property
def _constructor_expanddim(self):
return TrajDataFrame
class TrajDataFrame(pd.DataFrame):
"""TrajDataFrame.
A TrajDataFrame object is a pandas.DataFrame that has three columns latitude, longitude and datetime. TrajDataFrame accepts the following keyword arguments:
Parameters
----------
data : list or dict or pandas DataFrame
the data that must be embedded into a TrajDataFrame.
latitude : int or str, optional
the position or the name of the column in `data` containing the latitude. The default is `constants.LATITUDE`.
longitude : int or str, optional
the position or the name of the column in `data` containing the longitude. The default is `constants.LONGITUDE`.
datetime : int or str, optional
the position or the name of the column in `data` containing the datetime. The default is `constants.DATETIME`.
user_id : int or str, optional
the position or the name of the column in `data`containing the user identifier. The default is `constants.UID`.
trajectory_id : int or str, optional
the position or the name of the column in `data` containing the trajectory identifier. The default is `constants.TID`.
timestamp : boolean, optional
it True, the datetime is a timestamp. The default is `False`.
crs : dict, optional
the coordinate reference system of the geographic points. The default is `{"init": "epsg:4326"}`.
parameters : dict, optional
parameters to add to the TrajDataFrame. The default is `{}` (no parameters).
Examples
--------
>>> import skmob
>>> # create a TrajDataFrame from a list
>>> data_list = [[1, 39.984094, 116.319236, '2008-10-23 13:53:05'], [1, 39.984198, 116.319322, '2008-10-23 13:53:06'], [1, 39.984224, 116.319402, '2008-10-23 13:53:11'], [1, 39.984211, 116.319389, '2008-10-23 13:53:16']]
>>> tdf = skmob.TrajDataFrame(data_list, latitude=1, longitude=2, datetime=3)
>>> print(tdf.head())
0 lat lng datetime
0 1 39.984094 116.319236 2008-10-23 13:53:05
1 1 39.984198 116.319322 2008-10-23 13:53:06
2 1 39.984224 116.319402 2008-10-23 13:53:11
3 1 39.984211 116.319389 2008-10-23 13:53:16
>>> print(type(tdf))
<class 'skmob.core.trajectorydataframe.TrajDataFrame'>
>>>
>>> # create a TrajDataFrame from a pandas DataFrame
>>> import pandas as pd
>>> # create a DataFrame from the previous list
>>> data_df = pd.DataFrame(data_list, columns=['user', 'latitude', 'lng', 'hour'])
>>> print(type(data_df))
<class 'pandas.core.frame.DataFrame'>
>>> tdf = skmob.TrajDataFrame(data_df, latitude='latitude', datetime='hour', user_id='user')
>>> print(type(tdf))
<class 'skmob.core.trajectorydataframe.TrajDataFrame'>
>>> print(tdf.head())
uid lat lng datetime
0 1 39.984094 116.319236 2008-10-23 13:53:05
1 1 39.984198 116.319322 2008-10-23 13:53:06
2 1 39.984224 116.319402 2008-10-23 13:53:11
3 1 39.984211 116.319389 2008-10-23 13:53:16
"""
_metadata = ['_parameters', '_crs'] # All the metadata that should be accessible must be also in the metadata method
def __init__(self, data, latitude=constants.LATITUDE, longitude=constants.LONGITUDE, datetime=constants.DATETIME,
user_id=constants.UID, trajectory_id=constants.TID,
timestamp=False, crs={"init": "epsg:4326"}, parameters={}):
original2default = {latitude: constants.LATITUDE,
longitude: constants.LONGITUDE,
datetime: constants.DATETIME,
user_id: constants.UID,
trajectory_id: constants.TID}
columns = None
if isinstance(data, pd.DataFrame):
tdf = data.rename(columns=original2default)
columns = tdf.columns
# Dictionary
elif isinstance(data, dict):
tdf = pd.DataFrame.from_dict(data).rename(columns=original2default)
columns = tdf.columns
# List
elif isinstance(data, list) or isinstance(data, np.ndarray):
tdf = data
columns = []
num_columns = len(data[0])
for i in range(num_columns):
try:
columns += [original2default[i]]
except KeyError:
columns += [i]
elif isinstance(data, pd.core.internals.BlockManager):
tdf = data
else:
raise TypeError('DataFrame constructor called with incompatible data and dtype: {e}'.format(e=type(data)))
super(TrajDataFrame, self).__init__(tdf, columns=columns)
# Check crs consistency
if crs is None:
warn("crs will be set to the default crs WGS84 (EPSG:4326).")
if not isinstance(crs, dict):
raise TypeError('crs must be a dict type.')
self._crs = crs
if not isinstance(parameters, dict):
raise AttributeError("parameters must be a dictionary.")
self._parameters = parameters
if self._has_traj_columns():
self._set_traj(timestamp=timestamp, inplace=True)
def _has_traj_columns(self):
if (constants.DATETIME in self) and (constants.LATITUDE in self) and (constants.LONGITUDE in self):
return True
return False
def _is_trajdataframe(self):
if ((constants.DATETIME in self) and pd.core.dtypes.common.is_datetime64_any_dtype(self[constants.DATETIME]))\
and ((constants.LONGITUDE in self) and pd.core.dtypes.common.is_float_dtype(self[constants.LONGITUDE])) \
and ((constants.LATITUDE in self) and pd.core.dtypes.common.is_float_dtype(self[constants.LATITUDE])):
return True
return False
def _set_traj(self, timestamp=False, inplace=False):
if not inplace:
frame = self.copy()
else:
frame = self
if timestamp:
frame[constants.DATETIME] = pd.to_datetime(frame[constants.DATETIME], unit='s')
if not pd.core.dtypes.common.is_datetime64_any_dtype(frame[constants.DATETIME].dtype):
frame[constants.DATETIME] = pd.to_datetime(frame[constants.DATETIME])
if not | pd.core.dtypes.common.is_float_dtype(frame[constants.LONGITUDE].dtype) | pandas.core.dtypes.common.is_float_dtype |
import requests
import json
import pandas as pd
import numpy as np
import datetime as dt
import psycopg2 as pg
import pymongo as pm
import os
import eod_api
api = eod_api.api
e_date = (dt.datetime.now() - dt.timedelta(1)).strftime('%Y-%m-%d')
error_log = []
def get_db_exchanges():
con = pg.connect(database = 'securities_master', user = 'postgres')
cur = con.cursor()
command = 'SELECT * FROM exchange'
cur.execute(command)
data = cur.fetchall()
db_exchanges_df = pd.DataFrame(
data,
columns = [
'id',
'code',
'name',
'short_name',
'country',
'currency',
'created_date',
'last_updated_date',
'last_price_update_date',
'last_fundamental_update_date'
]
)
db_exchanges_df.set_index('id', inplace = True)
cur.close()
con.close()
return db_exchanges_df
def get_db_fundamentals(instrument_id):
pass
def get_db_fund_watchlist():
con = pg.connect(database = 'securities_master', user = 'postgres')
cur = con.cursor()
command = 'SELECT * FROM fund_watchlist'
cur.execute(command)
data = cur.fetchall()
db_fund_watchlist_df = pd.DataFrame(
data,
columns = ['id', 'instrument_id', 'created_date', 'last_updated_date']
)
db_fund_watchlist_df.set_index('id', inplace = True)
cur.close()
con.close()
return db_fund_watchlist_df
def get_db_indices():
con = pg.connect(database = 'securities_master', user = 'postgres')
cur = con.cursor()
command = 'SELECT * FROM benchmark_index'
cur.execute(command)
data = cur.fetchall()
db_indices_df = pd.DataFrame(
data,
columns = [
'id',
'short_name',
'name',
'city',
'country',
'timezone_offset',
'created_date',
'last_updated_date'
]
)
db_indices_df.set_index('id', inplace = True)
cur.close()
con.close()
return db_indices_df
def get_db_instruments(exchange_id = None):
con = pg.connect(database = 'securities_master', user = 'postgres')
cur = con.cursor()
if exchange_id == None:
command = 'SELECT * FROM instrument'
else:
command = f'SELECT * FROM instrument WHERE exchange_id = {exchange_id}'
# command = ('SELECT sym.id, sym.index_id, sym.ticker, bm.id, bm.short_name FROM symbol AS sym'
# 'JOIN benchmark_index AS bm ON (sym.index_id = bm.id')
cur.execute(command)
data = cur.fetchall()
cols = [
'id',
'exchange_id',
'ticker',
'instrument_type',
'name',
'currency',
'created_date',
'last_updated_date'
]
db_instruments_df = pd.DataFrame(
data,
columns = cols
)
db_instruments_df.set_index('id', inplace = True)
cur.close()
con.close()
return db_instruments_df
def get_db_price(instrument_id = None, price_date = None, include_ticker = False):
con = pg.connect(database = 'securities_master', user = 'postgres')
cur = con.cursor()
cols = [
'id',
'data_vendor_id',
'instrument_id',
'price_date',
'created_date',
'last_updated_date',
'open_price',
'high_price',
'low_price',
'close_price',
'adj_close_price',
'volume'
]
if include_ticker:
ticker_join = ' JOIN instrument ON (daily_price.instrument_id = instrument.id)'
instr_cols = [
'inst_id',
'exchange_id',
'ticker',
'instrument_type',
'name',
'currency',
'inst_created_date',
'inst_last_update_date'
]
cols.extend(instr_cols)
else:
ticker_join = ''
if (instrument_id == None) & (price_date == None):
command = 'SELECT * FROM daily_price{ticker_join}'
elif (instrument_id != None) & (price_date == None):
command = f'SELECT * FROM daily_price{ticker_join} WHERE instrument_id = {instrument_id}'
elif (instrument_id == None) & (price_date != None):
command = f'SELECT * FROM daily_price{ticker_join} WHERE price_date = \'{price_date}\''
else:
command = (f'SELECT * FROM daily_price{ticker_join} '
f'WHERE instrument_id = {instrument_id} AND price_date = \'{price_date}\'')
cur.execute(command)
data = cur.fetchall()
db_prices_df = pd.DataFrame(
data,
columns = cols
)
db_prices_df.set_index('id', inplace = True)
if include_ticker:
drop_cols = [
'inst_id',
'exchange_id',
# 'ticker',
'instrument_type',
'name',
'currency',
'inst_created_date',
'inst_last_update_date'
]
db_prices_df.drop(drop_cols, axis = 1, inplace = True)
cur.close()
con.close()
return db_prices_df
def get_eod_bulk_price(ex, e_date = e_date):
'''
Parameters
----------
ex : string : exchange (eg. US)
Returns
-------
df : pandas dataframe
'''
url = (
f'http://eodhistoricaldata.com/api/eod-bulk-last-day/{ex}'
f'?api_token={api}&fmt=json&date={e_date}'
)
response = requests.get(url)
data = response.text
bulk_data = pd.read_json(data)
# bulk_data = json.loads(data)
return bulk_data
def get_eod_constituents(index, s_date = '1990-01-01'):
url = (f'https://eodhistoricaldata.com/api/fundamentals/{index}.INDX?'
f'api_token={api}&historical=1&from={s_date}&to={e_date}')
response = requests.get(url)
data = response.text
df = pd.read_json(data)
general_info = df['General'].dropna()
constituents = df['Components'].dropna()
if constituents.shape[0] > 0:
constituent_keys = list(constituents[0].keys())
constituent_values = [list(i.values()) for i in constituents]
constituents = pd.DataFrame.from_records(constituent_values, columns = constituent_keys)
return constituents, general_info
def get_eod_corp_act(sec, ex, corp_act_type, s_date = '1900-01-01'):
'''
Parameters
----------
sec : string : security (eg. AAPL)
ex : string : exchange (eg. US)
corp_act_type: type of corporate action ('div', 'splits', 'shorts')
s_date : string : 'yyyy-mm-dd' format
Returns
-------
df : pandas dataframe
'''
valid_types = ['div', 'splits', 'shorts']
if corp_act_type in valid_types:
url = (f'https://eodhistoricaldata.com/api/{corp_act_type}/'
f'{sec}.{ex}?api_token={api}&from={s_date}&fmt=json')
response = requests.get(url)
data = response.text
df = pd.read_json(data).T
df.set_index('date', inplace = True)
return df
else:
print('Not a valid corporate action type.')
def get_eod_etf(sec, ex, s_date = '1900-01-01'):
'''
Parameters
----------
sec : string : security (eg. AAPL)
ex : string : exchange (eg. US)
s_date : string : 'yyyy-mm-dd' format
Returns
-------
df : pandas dataframe
'''
url = (f'https://eodhistoricaldata.com/api/fundamentals/{sec}.{ex}?'
f'api_token={api}&historical=1&from={s_date}&to={e_date}')
response = requests.get(url)
data = response.text
df = pd.read_json(data)
return df
def get_eod_exchanges(format = 'df'):
valid_formats = ['json', 'df']
if format in valid_formats:
url = f'https://eodhistoricaldata.com/api/exchanges-list/?api_token={api}&fmt=json'
response = requests.get(url)
data = response.text
if format == 'json':
exchanges = json.loads(data)
elif format == 'df':
exchanges = pd.read_json(data)
return exchanges
def get_eod_fundamentals(sec, ex, s_date = '1900-01-01'):
'''
Parameters
----------
sec : string : security (eg. AAPL)
ex : string : exchange (eg. US)
s_date : string : 'yyyy-mm-dd' format
Returns
-------
fundamentals : dictionary object
'''
url = (f'https://eodhistoricaldata.com/api/fundamentals/{sec}.{ex}?from={s_date}&to={e_date}'
f'&api_token={api}&period=d&fmt=json')
response = requests.get(url)
data = response.text
fundamentals = json.loads(data)
return fundamentals
def get_eod_instruments(exchange = 'INDX', format = 'df'):
valid_formats = ['json', 'df']
if format in valid_formats:
url = (
f'https://eodhistoricaldata.com/api/exchange-symbol-list/{exchange}'
f'?api_token={api}&fmt=json'
)
response = requests.get(url)
data = response.text
if format == 'json':
instruments = json.loads(data)
elif format == 'df':
instruments = pd.read_json(data)
return instruments
def get_eod_price(sec, ex, s_date = '1900-01-01'):
'''
Parameters
----------
sec : string : security (eg. AAPL)
ex : string : exchange (eg. US)
s_date : string : 'yyyy-mm-dd' format
Returns
-------
df : pandas dataframe
'''
url = (f'https://eodhistoricaldata.com/api/eod/{sec}.{ex}?from={s_date}&to={e_date}'
f'&api_token={api}&period=d&fmt=json')
try:
response = requests.get(url)
data = response.text
df = pd.read_json(data)
if df.shape[0] > 0:
df.set_index('date', inplace = True)
return df
except:
error_log.append(['Error: get_eod_price', url])
return | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import sys
import glob
import os
import re
import numpy as np
import logging
logging.basicConfig(stream=sys.stdout,
level=logging.INFO,
format='[%(asctime)s] %(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
#inside pathx (MD)
def time_freq_filter(filex,complexName,per):
pathx = os.getcwd()
file = os.path.basename(filex)
fName = complexName
bondtype = file.split(".csv")[0].split("_merged_")[1]
first = pd.read_csv(filex)
os.chdir(pathx)
if not os.path.exists(f'{complexName}/04_time_freq_filter'):
os.makedirs(f'{complexName}/04_time_freq_filter', exist_ok=True)
pathxx=f'{pathx}/{complexName}/04_time_freq_filter'
os.chdir(pathxx)
pathy=pathxx+"/"+str(per)+"_freq_filtered"
if not os.path.exists(str(per)+"_freq_filtered"):
os.makedirs(str(per)+"_freq_filtered", exist_ok=True)
os.chdir(pathy)
if first.empty:
pathz = pathy + "/" + str(per) + "_freq"
if not os.path.exists(str(per) + "_freq"):
os.makedirs(str(per) + "_freq")
os.chdir(pathz)
morefirstxy = pd.DataFrame(columns=["donor_acceptor","NumSpp","total","percentage"])
morefirstxy.to_csv (pathz+"/"+fName+"_"+bondtype+"_"+str(per)+"_freq.csv", index=None)
os.chdir("..")
if not os.path.exists(str(per)+"_freq_perres"):
os.makedirs(str(per)+"_freq_perres")
pathq=pathy+"/"+str(per)+"_freq_perres"
os.chdir(pathq)
first_perres=pd.DataFrame(columns=['itype', 'donor_chain', 'acceptor_chain', 'donor_resnm', 'acceptor_resnm',
'donor_resid','acceptor_resid', 'donor_atom', 'acceptor_atom','chain_type',
"prot_or_dna",'specificity',"time"])
first_perres.to_csv (pathq+"/"+fName+"_"+bondtype+"_"+str(per)+"_freq_perres.csv", index=None)
else:
#fIRST
logging.info('Finding percentages: {}'.format(fName))
firstx = []
for adx in first.donor_acceptor.unique () :
bbx = first[first["donor_acceptor"] == adx]
firstx.append([adx,
bbx.time.unique().size/first.time.unique().size*100])
firstxy = pd.DataFrame(firstx)
firstxy.columns = ["donor_acceptor","percentage"]
logging.info('Writing to file percentage: {}'.format(fName))
morefirstxy = firstxy[firstxy.percentage > float(per)]
if len(morefirstxy.donor_acceptor) == 0:
pathz = pathy + "/" + str(per) + "_freq"
if not os.path.exists(str(per) + "_freq"):
os.makedirs(str(per) + "_freq")
os.chdir(pathz)
morefirstxy = pd.DataFrame(columns=firstxy.columns)
morefirstxy.to_csv (pathz+"/"+fName+"_"+bondtype+"_"+str(per)+"_freq.csv", index=None)
os.chdir("..")
if not os.path.exists(str(per) + "_freq_perres"):
os.makedirs(str(per) + "_freq_perres")
pathq = pathy + "/" + str(per) + "_freq_perres"
os.chdir(pathq)
first_perres= pd.DataFrame(columns=first.columns)
first_perres.to_csv(pathq + "/" + fName + "_" + bondtype + "_" + str(per) + "_freq_perres.csv", index=None)
else:
pathz = pathy + "/" + str(per) + "_freq"
if not os.path.exists(str(per) + "_freq"):
os.makedirs(str(per) + "_freq")
os.chdir(pathz)
morefirstxy.to_csv (pathz+"/"+fName+"_"+bondtype+"_"+str(per)+"_freq.csv", index=None)
logging.info('Writing to file list: {}'.format(fName))
first_perres = pd.DataFrame()
for da in morefirstxy.donor_acceptor.unique():
df = first[first.donor_acceptor == da]
first_perres=first_perres.append(df)
first_perres.sort_values(by="time",inplace=True)
first_perres.reset_index(drop=True)
os.chdir("..")
if not os.path.exists(str(per)+"_freq_perres"):
os.makedirs(str(per)+"_freq_perres")
pathq=pathy+"/"+str(per)+"_freq_perres"
os.chdir(pathq)
first_perres.to_csv (pathq+"/"+fName+"_"+bondtype+"_"+str(per)+"_freq_perres.csv", index=None)
def make_freq_folders(pathy,per):
"""
Creates folders to write and read common and complex-specific bonds within 05_compare_cx_spp folder
:param pathy: path to 05_compare_cx_spp
:param per: time percentage
"""
import os
os.chdir(pathy)
pathz=pathy+"/"+str(per)+"_freq_filtered"
if not os.path.exists(str(per)+"_freq_filtered"):
os.makedirs(str(per)+"_freq_filtered",exist_ok=True)
for fold in ["_freq","_freq_perres"]:
os.chdir(pathz)
#to add freq
pathq=pathz+"/"+str(per)+fold
if not os.path.exists(str(per)+fold):
os.makedirs(str(per)+fold,exist_ok=True)
os.chdir(pathq)
pathq_common=pathq+"/common"
if not os.path.exists("common"):
os.makedirs("common",exist_ok=True)
os.chdir(pathq)
pathq_spp=pathq+"/complex_specific"
if not os.path.exists("complex_specific"):
os.makedirs("complex_specific",exist_ok=True)
def get_paths(pathy,per,fold,com_spp):
import os
os.chdir(pathy)
PathToWrite = pathy + "/" + per + "_" + "freq_filtered/" + per + fold + "/" + com_spp
return PathToWrite
def compare_bonds(complexName,per):
pathx = os.getcwd()
fName = complexName[0]
sName = complexName[1]
file_lists_freq_fName = glob.glob(f'{pathx}/{fName}/04_time_freq_filter/{str(per)}_freq_filtered/{str(per)}_freq/*csv')
file_lists_freq_sName = glob.glob(f'{pathx}/{sName}/04_time_freq_filter/{str(per)}_freq_filtered/{str(per)}_freq/*csv')
file_lists_freq = file_lists_freq_fName + file_lists_freq_sName
ToCompare = {}
for filex in file_lists_freq:
file = os.path.basename(filex)
if fName in filex:
Name = fName
else:
Name = sName
bondtype = file.split(f'{Name}_')[1].split("_")[0]
if bondtype == "ring":
bondtype = "ring_stacking"
first = | pd.read_csv(filex) | pandas.read_csv |
import pandas as pd
import xgboost as xgb
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
import numpy as np
from sklearn import metrics
pd.set_option('display.max_columns', None)
train = | pd.read_csv('./data/train.csv') | pandas.read_csv |
from numpy.fft import fft
import pickle_compat
pickle_compat.patch()
import pandas as pd
from sklearn import metrics
import pickle
import numpy as np
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from datetime import timedelta as td
Raw_CGMData1=pd.read_csv('CGMData.csv', low_memory=False)
RawInsulinData1=pd.read_csv('InsulinData.csv', low_memory=False)
Raw_CGMData2=pd.read_csv('CGM_patient2.csv', low_memory=False)
RawInsulinData2= | pd.read_csv('Insulin_patient2.csv', low_memory=False) | pandas.read_csv |
import os
from pathlib import Path
import dotenv
import pandas as pd
import numpy as np
from sklearn.metrics import roc_auc_score
project_dir = Path(__file__).resolve().parents[2]
dotenv_path = project_dir / ".env"
dotenv.load_dotenv(str(dotenv_path))
path_clinical_info = Path(os.environ["CLINIC_INFO_PATH"])
# model = "model_1-00_alpha_1-00_wplc_1-00_wt_1-00_wl_40-00_splc_early_stop"
# model = "model_0-75_alpha_1-00_wplc_1-00_wt_1-00_wl_40-00_splc_early_stop"
model = "model_gtvl_gtvt"
# model = "model_0-75_alpha_1-00_wplc_0-00_wt_0-00_wl_40-00_splc_early_stop"
path_volume_csv = project_dir / f"data/plc_volume/{model}.csv"
def main():
volume_df = pd.read_csv(path_volume_csv).set_index("patient_id")
clinical_df = pd.read_csv(path_clinical_info).set_index("patient_id")
df = | pd.concat([clinical_df, volume_df], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
import io
import os
import sys
import copy
import json
import pickle
import random
import hashlib
import warnings
import threading
import concurrent.futures
import numpy as np
import pandas as pd
import plyvel
import requests
from PIL import Image
from tqdm import tqdm
from loguru import logger
from dynaconf import settings
from tenacity import *
from script.decider import Decider
from script.feature import Feature
from script.utility import Utility
from script.attacker import Attacker
from script.deserialize import Deserialize
if not sys.warnoptions:
warnings.simplefilter("ignore")
class AntiGPS:
def __init__(self):
self.attacker = Attacker("./results/pano_text.json")
self.attacker_test = Attacker("./results/pano_text_pit.json")
self.feature = Feature()
self.decider = Decider()
self.lock = threading.Lock()
@retry(stop=stop_after_attempt(3), wait=wait_fixed(120))
def get_poi_azure(self, credentials: dict, radius=50) -> dict:
logger.debug(f"Getting Azure POIs around {credentials['lat']}, {credentials['lng']}")
credentials["radius"] = radius
credentials["subscription_key"] = settings.AZUREAPI.subscription_key
headers = {"x-ms-client-id": settings.AZUREAPI.client_id}
url = "https://atlas.microsoft.com/search/nearby/json?subscription-key={subscription_key}&api-version=1.0&lat={lat}&lon={lng}&radius={radius}".format(
**credentials
)
r = requests.get(url, headers=headers)
if r.status_code != 200:
logger.warning(f"{r.json}")
raise ValueError("No POIs around available")
return r.json()
def get_poi(self, credentials: dict, radius=50) -> dict:
key = (str(credentials["lat"]) + "_" + str(credentials["lng"])).encode()
pois = self.db_poi.get(key)
if pois == None:
pois = self.get_poi_azure(credentials, radius)
self.db_poi.put(key, json.dumps(pois).encode())
else:
pois = json.loads(pois)
return pois
@retry(stop=stop_after_attempt(3), wait=wait_fixed(120))
def get_streetview(self, credentials):
logger.debug(f"Getting Street View with heading {credentials['heading']}")
credentials["apikey"] = settings.GOOGLEAPI
url = "https://maps.googleapis.com/maps/api/streetview?size=1280x640&location={lat},{lng}&heading={heading}&pitch={pitch}&fov=120&source=outdoor&key={apikey}".format(
**credentials
)
image = requests.get(url)
if image.status_code != 200:
logger.warning(f"{image.json}")
raise ValueError("No Google Street View Available")
return image.content
def get_pano_google(self, credentials):
logger.debug(
f"Getting Google Street View Pano of {credentials['lat']}, {credentials['lng']}"
)
filename = f"./results/google_img/{hashlib.md5((str(credentials['lat']) + '_' + str(credentials['lng']) + '_' + str(credentials['heading']) + '_' + str(credentials['pitch'])).encode()).hexdigest()}.jpg"
## Firstly check if local pano exists
if os.path.exists(filename):
logger.warning(f"Image {filename} is existing")
image = Image.open(filename)
else:
## requests Google API
pano_120 = []
with open("./results/google_img.csv", "a") as fin:
for _, degree in enumerate([-120, 0, 120]):
cred = copy.deepcopy(credentials)
cred["heading"] += degree
img = self.get_streetview(cred)
pano_120.append(img)
image, img1, img2 = [Image.open(io.BytesIO(pano)) for pano in pano_120]
image = Utility.concat_images_h_resize(image, img1)
image = Utility.concat_images_h_resize(image, img2)
Utility.image_save(image, filename, google=True)
fin.write(
f"{filename},{credentials['lat']},{credentials['lng']},{credentials['heading']},{credentials['pitch']}"
+ "\n"
)
return image, filename
def deblur(self, pano):
url = settings.URL_DEBLUR
headers = {"Content-Type": "application/octet-stream"}
r = requests.post(url, data=pano.compressed_image, headers=headers)
img = Image.open(io.BytesIO(r.content))
filename = f"{settings.IMAGE_FOLDER}/{pano.id}.jpg"
img.save(filename, format="JPEG")
return filename
# TODO: two service: text detection and text recognition
def ocr(self, image_path: str) -> dict:
url = "http://localhost:8301/ocr"
data = {"image_path": os.path.abspath(image_path)}
headers = {"Content-Type": "application/json"}
r = requests.post(url, json=data, headers=headers)
return r.json()
def load_database(self, databaseDir):
self.database = Deserialize(databaseDir)
def extract_text(self, filename):
"""This func would be only used for extracting streetlearn dataset
manhattan: "./results/pano_text.json"
pittsburgh: "./results/pano_text_pit.json"
"""
## Donelist
donelist = set()
if os.path.exists(filename):
with open(filename, "r") as fin:
for line in fin.readlines():
data = json.loads(line)
donelist.add(data["id"])
## write into json file
with open(filename, "a") as fout:
for pid, pano in tqdm(self.database.pano.items()):
if pid.decode("utf8") in donelist:
logger.warning(f"{pid} already processed")
continue
# image_path = self.deblur(pano)
## No debluring
try:
img = Image.open(io.BytesIO(pano.compressed_image))
except:
logger.warning(f"pano {pid} cannot identify image")
continue
image_path = f"./results/images/{pid}.jpg"
img.save(image_path, format="JPEG")
info_text = self.ocr(image_path)
info_all = {
"id": pano.id,
"lat": pano.coords.lat,
"lng": pano.coords.lng,
"heading": pano.heading_deg,
"pitch": pano.pitch_deg,
"neighbor": [x.id for x in pano.neighbor],
"pano_date": pano.pano_date,
**info_text,
}
fout.write(json.dumps(info_all) + "\n")
os.remove(image_path)
def defense(self, pano_attack: dict):
"""Discarded
"""
logger.debug(f"Starting defense {pano_attack['id']}")
if "lat_attack" in pano_attack:
lat, lng = pano_attack["lat_attack"], pano_attack["lng_attack"]
else:
lat, lng = pano_attack["lat"], pano_attack["lng"]
_, img_path = self.get_pano_google(pano_attack)
text_defense_list = []
for i_path in img_path:
info_ocr = self.ocr(i_path)
text_defense_list.extend(info_ocr["text"])
text_attack = " ".join(
[x["predicted_labels"] for x in pano_attack["text_ocr"] if x["confidence_score"] > 0.95]
)
text_defense = " ".join(
[x["predicted_labels"] for x in text_defense_list if x["confidence_score"] > 0.95]
)
ratio = self.decider.similarity_text(text_attack, text_defense)
result = {
"similarity_text_ratio": [ratio],
"id": [pano_attack["id"]],
"lat": [pano_attack["lat"]],
"lng": [pano_attack["lng"]],
"lat_attack": [lat],
"lng_attack": [lng],
"text_attack": [text_attack],
"text_defense": [text_defense],
}
resultDF = pd.DataFrame(result)
resultDF.to_csv("./results/defense_result.csv", mode="a", index=False, header=False)
logger.info(f"Defensed {pano_attack['id']}")
def init_leveldb(self):
"""
Initialize training data levelDB database
"""
logger.info("Initializing levelDB ...")
self.db_feature = plyvel.DB(
"/home/bourne/Workstation/AntiGPS/results/features/", create_if_missing=True
)
self.db_attack = plyvel.DB(
"/home/bourne/Workstation/AntiGPS/results/train_data_attack/", create_if_missing=True
)
self.db_noattack = plyvel.DB(
"/home/bourne/Workstation/AntiGPS/results/train_data_noattack/", create_if_missing=True
)
self.db_attack_test = plyvel.DB(
"/home/bourne/Workstation/AntiGPS/results/test_data_attack/", create_if_missing=True
)
self.db_noattack_test = plyvel.DB(
"/home/bourne/Workstation/AntiGPS/results/test_data_noattack/", create_if_missing=True
)
self.db_poi = plyvel.DB(
"/home/bourne/Workstation/AntiGPS/results/azure_poi/", create_if_missing=True
)
self.db_attack_poi = plyvel.DB(
"/home/bourne/Workstation/AntiGPS/results/train_data_attack_poi/",
create_if_missing=True,
)
self.db_noattack_poi = plyvel.DB(
"/home/bourne/Workstation/AntiGPS/results/train_data_noattack_poi/",
create_if_missing=True,
)
self.db_attack_test_poi = plyvel.DB(
"/home/bourne/Workstation/AntiGPS/results/test_data_attack_poi/",
create_if_missing=True,
)
self.db_noattack_test_poi = plyvel.DB(
"/home/bourne/Workstation/AntiGPS/results/test_data_noattack_poi/",
create_if_missing=True,
)
self.db_partial_attack = plyvel.DB(
"/home/bourne/Workstation/AntiGPS/results/test_data_partial_attack/",
create_if_missing=True,
)
self.db_partial_attack_google = plyvel.DB(
"/home/bourne/Workstation/AntiGPS/results/test_data_partial_attack_google/",
create_if_missing=True,
)
self.db_partial_attack_poi = plyvel.DB(
"/home/bourne/Workstation/AntiGPS/results/test_data_partial_attack_poi/",
create_if_missing=True,
)
def close_leveldb(self):
logger.info("Closing levelDB ...")
self.db_feature.close()
self.db_attack.close()
self.db_noattack.close()
self.db_attack_test.close()
self.db_noattack_test.close()
self.db_poi.close()
self.db_attack_poi.close()
self.db_noattack_poi.close()
self.db_attack_test_poi.close()
self.db_noattack_test_poi.close()
self.db_partial_attack.close()
self.db_partial_attack_google.close()
self.db_partial_attack_poi.close()
# TODO: Real system get only one pano from car cam with GPS info
def generate_feature_vector(self, pano: bytes):
pass
# TODO: For real system, input should be two pano bytes or image objects
def generate_feature_vector_local(self, pano_id, pano_id_attack, valid="default", test=False):
"""Locally generate feature vectors with three validation methods:
1. local database (default);
2. Google Street View APIs (google);
3. Azure POIs API (poi)
Arguments:
pano_id {str} -- real pano id
pano_id_attack {str} -- attack pano id
Keyword Arguments:
valid {str} -- validation methods for attack pano (default: {"default"}, "google", "poi")
Returns:
list -- feature vector
"""
if test:
attacker = self.attacker_test
else:
attacker = self.attacker
key = pano_id.encode("utf-8")
if self.db_feature.get(key):
feature_vector = pickle.loads(self.db_feature.get(key))
else:
feature_vector = []
# feature_vector.extend(
# self.feature.textbox_position(attacker.dataset[pano_id], height=408, width=1632)
# )
feature_vector.extend(self.feature.textbox_position(attacker.dataset[pano_id]))
feature_vector.extend(self.feature.sentence_vector(attacker.dataset[pano_id]))
self.lock.acquire()
self.db_feature.put(key, pickle.dumps(feature_vector))
self.lock.release()
## google means get attack pano from google API. Otherwise get attack pano from local database
if valid == "default":
key = pano_id_attack.encode("utf-8")
if self.db_feature.get(key):
feature_vector.extend(pickle.loads(self.db_feature.get(key)))
else:
feature_vector_attack = []
feature_vector_attack.extend(
# self.feature.textbox_position(
# attacker.dataset[pano_id_attack], height=408, width=1632
# )
self.feature.textbox_position(attacker.dataset[pano_id_attack])
)
feature_vector_attack.extend(
self.feature.sentence_vector(attacker.dataset[pano_id_attack])
)
feature_vector.extend(feature_vector_attack)
self.lock.acquire()
self.db_feature.put(key, pickle.dumps(feature_vector_attack))
self.lock.release()
elif valid == "google":
## requests Google Street View and do OCR
key = (pano_id_attack + "_google").encode("utf-8")
if self.db_feature.get(key):
feature_vector.extend(pickle.loads(self.db_feature.get(key)))
else:
image, image_path = self.get_pano_google(attacker.dataset[pano_id_attack])
ocr_results = self.ocr(image_path)
feature_vector_attack = self.feature.textbox_position(ocr_results)
feature_vector_attack.extend(self.feature.sentence_vector(ocr_results))
feature_vector.extend(feature_vector_attack)
self.lock.acquire()
self.db_feature.put(key, pickle.dumps(feature_vector_attack))
self.lock.release()
elif valid == "poi":
## requests Azure POIs
key = (pano_id_attack + "_poi").encode("utf-8")
if self.db_feature.get(key):
feature_vector.extend(pickle.loads(self.db_feature.get(key)))
else:
pois = self.get_poi(attacker.dataset[pano_id_attack])
feature_vector_attack = self.feature.poi_vector(pois)
feature_vector.extend(feature_vector_attack)
self.lock.acquire()
self.db_feature.put(key, pickle.dumps(feature_vector_attack))
self.lock.release()
else:
raise ValueError(f"Invalid valid param: {valid}")
return feature_vector
def get_route_todo(self, routes_ids: list, db, thread=1) -> list:
if thread < 2:
return [routes_ids]
routes_done = {k.decode() for k, v in db}
route_todo = list(set([str(x) for x in routes_ids]) - routes_done)
logger.debug(f"done {len(routes_done)}, todo {len(route_todo)}")
return np.array_split(route_todo, thread)
# TODO: Some issues with multiple threads, need to fix
def generate_train_data(
self, filename, attack=True, noattack=True, thread=5, overwrite=False, test=False
):
logger.info(
"Generating {} data with Google Street Views".format("testing" if test else "training")
)
routesDF = | pd.read_csv(filename, index_col=["route_id"]) | pandas.read_csv |
import numpy as np
import keras
import tensorflow as tf
from matplotlib import pyplot as plt
import pandas as pd
from keras.layers.embeddings import Embedding
from keras.layers import concatenate, Lambda
import os, sys
from weather_model import Seq2Seq_MVE_subnets_swish, weather_conv1D, CausalCNN, RNN_builder, Seq2Seq, Seq2Seq_MVE, Seq2Seq_MVE_subnets
from keras.models import load_model, model_from_json
#from utils import random_sine, plot_prediction
#learning_rate = 0.01
#decay = 0 # Learning rate decay
model_save_path = '../models/'
#loss = "mse" # Other loss functions are possible, see Keras documentation.
# Regularisation isn't really needed for this application
#lambda_regulariser = None #0.000001 # Will not be used if regulariser is None
#regulariser = None # Possible regulariser: keras.regularizers.l2(lambda_regulariser)
#steps_per_epoch = 200 # batch_size * steps_per_epoch = total number of training examples
#num_signals = 2 # The number of random sine waves the compose the signal. The more sine waves, the harder the problem.
def crop(dimension, start, end):
# Crops (or slices) a Tensor on a given dimension from start to end
# example : to crop tensor x[:, :, 5:10]
# call slice(2, 5, 10) as you want to crop on the second dimension
def func(x):
if dimension == 0:
return x[start: end]
if dimension == 1:
return x[:, start: end]
if dimension == 2:
return x[:, :, start: end]
if dimension == 3:
return x[:, :, :, start: end]
if dimension == 4:
return x[:, :, :, :, start: end]
return Lambda(func)
class WeatherConv1D:
def __init__(self, regulariser=None,
lr=0.001, decay=0, loss="mse",
layers=[35, 35], batch_size=256,
input_len=37, input_features=29,
strides_len=3, kernel_size=5):
self.regulariser = regulariser
self.layers = layers
self.lr = lr
self.decay = decay
self.loss = loss
self.pred_result = None
self.batch_size = batch_size
self.input_len = input_len
self.input_features = input_features
self.kernel_strides = strides_len
self.kernel_size = kernel_size
print('Initialized!')
def build_graph(self):
keras.backend.clear_session() # clear session/graph
self.model = weather_conv1D(self.layers, self.lr,
self.decay, self.loss, self.input_len,
self.input_features, self.kernel_strides, self.kernel_size)
print(self.model.summary())
def sample_batch(self, data_inputs, ground_truth, ruitu_inputs, batch_size, certain_id=None, certain_feature=None):
max_i, _, max_j, _ = data_inputs.shape # Example: (1148, 37, 10, 9)-(sample_ind, timestep, sta_id, features)
if certain_id == None and certain_feature == None:
id_ = np.random.randint(max_j, size=batch_size)
i = np.random.randint(max_i, size=batch_size)
batch_ouputs = ground_truth[i,:,id_,:]
batch_ruitu = ruitu_inputs[i,:,id_,:]
elif certain_id != None:
pass
return batch_ruitu, batch_ouputs
def order_batch(self, data_inputs, ground_truth, ruitu_inputs, batch_size, certain_id=None, certain_feature=None):
pass #TODO:
def fit(self, train_input_ruitu, train_labels,
val_input_ruitu, val_labels, batch_size,
iterations=300, validation=True):
self.optimizer = keras.optimizers.Adam(lr=self.lr, decay=self.decay)
self.model.compile(optimizer = self.optimizer, loss=self.loss)
print('Train batch size: {}'.format(batch_size))
print('Validation on data size of {};'.format(val_input_ruitu.shape[0]))
for i in range(iterations):
batch_ruitu, batch_labels = self.sample_batch(train_input_ruitu, train_labels,
train_input_ruitu, batch_size=batch_size)
loss_ = self.model.train_on_batch(x=[batch_ruitu],
y=[batch_labels])
if (i+1)%50 == 0:
print('Iteration:{}/{}. Training batch loss:{}'.
format(i+1, iterations, loss_))
if validation :
self.evaluate(val_input_ruitu, val_labels, each_station_display=False)
print('###'*10)
print('Train finish! Total validation loss:')
self.evaluate(val_input_ruitu, val_labels, each_station_display=True)
def evaluate(self, data_input_ruitu, data_labels, each_station_display=False):
all_loss=[]
for i in range(10): # iterate for each station. (sample_ind, timestep, staionID, features)
val_loss= self.model.evaluate(x=[data_input_ruitu[:,:,i,:]],
y=[data_labels[:,:,i,:]], verbose=False)
all_loss.append(val_loss)
if each_station_display:
print('\tFor station 9000{}, evaluated loss: {}'.format(i+1, val_loss))
print('Mean evaluated loss on all stations:', np.mean(all_loss))
#return np.mean(all_loss)
def predict(self, batch_ruitu):
pred_result_list = []
for i in range(10):
#print('Predict for station: 9000{}'.format(i+1))
result = self.model.predict(x=[batch_ruitu[:,:,i,:]])
result = np.squeeze(result, axis=0)
#all_pred[i] = result
pred_result_list.append(result)
#pass
pred_result = np.stack(pred_result_list, axis=0)
#return all_pred, pred_result
print('Predict shape (10,37,3) means (stationID, timestep, features). Features include: t2m, rh2m and w10m')
self.pred_result = pred_result
return pred_result
def renorm_for_submit(self, pred_mean, pred_var):
if self.pred_result is None:
print('You must run self.predict(batch_inputs, batch_ruitu) firstly!!')
else:
df_empty = pd.DataFrame(columns=['FORE_data', 't2m', 'rh2m', 'w10m'])
target_list=['t2m','rh2m','w10m']
self.obs_range_dic={'t2m':[-30,42], # Official value: [-20,42]
'rh2m':[0.0,100.0],
'w10m':[0.0, 30.0]}
for j, target_v in enumerate(self.target_list):
series_ids = pd.Series()
series_targets = pd.Series()
renorm_value = renorm(self.pred_result[:,:,j], self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
for i in range(10):
if i != 9:
id_num = '0'+str(i+1)
else:
id_num = str(10)
sta_name_time = '900'+id_num+'_'
time_str_list=[]
for t in range(37):
if t < 10:
time_str= sta_name_time + '0'+ str(t)
else:
time_str = sta_name_time + str(t)
time_str_list.append(time_str)
series_id = pd.Series(time_str_list)
series_target = pd.Series(renorm_value[i])
series_ids = pd.concat([series_ids, series_id])
series_targets = pd.concat([series_targets, series_target])
df_empty['FORE_data'] = series_ids
df_empty[target_v] = series_targets
return df_empty
class CausalCNN_Class(WeatherConv1D):
def __init__(self, regulariser,lr, decay, loss,
n_filters, strides_len, kernel_size, seq_len,
input_features, output_features, dilation_rates):
self.regulariser=regulariser
self.n_filters=n_filters
self.lr=lr
self.decay=decay
self.loss=loss
self.seq_len=seq_len
self.input_features=input_features
self.output_features = output_features
self.strides_len=strides_len
self.kernel_size=kernel_size
self.dilation_rates=dilation_rates
def build_graph(self):
keras.backend.clear_session() # clear session/graph
self.model = CausalCNN(self.n_filters, self.lr,
self.decay, self.loss,
self.seq_len, self.input_features,
self.strides_len, self.kernel_size,
self.dilation_rates)
print(self.model.summary())
class FNN(WeatherConv1D):
def __init__(self, regulariser,lr, decay, loss,
layers, batch_size, seq_len, input_features, output_features):
self.regulariser=regulariser
self.layers=layers
self.lr=lr
self.decay=decay
self.loss=loss
self.seq_len=seq_len
self.input_features=input_features
self.output_features = output_features
def build_graph(self):
keras.backend.clear_session() # clear session/graph
self.model = weather_fnn(self.layers, self.lr,
self.decay, self.loss, self.seq_len,
self.input_features, self.output_features)
print(self.model.summary())
class Enc_Dec:
def __init__(self, num_input_features, num_output_features, num_decoder_features,
input_sequence_length, target_sequence_length,
num_steps_to_predict, regulariser = None,
lr=0.001, decay=0, loss = "mse",
layers=[35, 35]):
self.num_input_features = num_input_features
self.num_output_features = num_output_features
self.num_decoder_features = num_decoder_features
self.input_sequence_length = input_sequence_length
self.target_sequence_length = target_sequence_length
self.num_steps_to_predict = num_steps_to_predict
self.regulariser = regulariser
self.layers = layers
self.lr = lr
self.decay = decay
self.loss = loss
self.pred_result = None
self.train_loss=[]
self.target_list=['t2m','rh2m','w10m']
self.obs_range_dic={'t2m':[-30,42], # Official value: [-20,42]
'rh2m':[0.0,100.0],
'w10m':[0.0, 30.0]}
print('Initialized!')
def build_graph(self):
keras.backend.clear_session() # clear session/graph
self.optimiser = keras.optimizers.Adam(lr=self.lr, decay=self.decay)
# Define an input sequence.
encoder_inputs = keras.layers.Input(shape=(None, self.num_input_features), name='encoder_inputs')
# Create a list of RNN Cells, these are then concatenated into a single layer
# with the RNN layer.
encoder_cells = []
for hidden_neurons in self.layers:
encoder_cells.append(keras.layers.GRUCell(hidden_neurons,
kernel_regularizer = self.regulariser,
recurrent_regularizer = self.regulariser,
bias_regularizer = self.regulariser))
encoder = keras.layers.RNN(encoder_cells, return_state=True)
encoder_outputs_and_states = encoder(encoder_inputs)
# Discard encoder outputs and only keep the states.
encoder_states = encoder_outputs_and_states[1:]
# Define a decoder sequence.
decoder_inputs = keras.layers.Input(shape=(None, self.num_decoder_features), name='decoder_inputs')
decoder_cells = []
for hidden_neurons in self.layers:
decoder_cells.append(keras.layers.GRUCell(hidden_neurons,
kernel_regularizer = self.regulariser,
recurrent_regularizer = self.regulariser,
bias_regularizer = self.regulariser))
decoder = keras.layers.RNN(decoder_cells, return_sequences=True, return_state=True)
# Set the initial state of the decoder to be the ouput state of the encoder.
decoder_outputs_and_states = decoder(decoder_inputs, initial_state=encoder_states)
# Only select the output of the decoder (not the states)
decoder_outputs = decoder_outputs_and_states[0]
# Apply a dense layer with linear activation to set output to correct dimension
# and scale (tanh is default activation for GRU in Keras, our output sine function can be larger then 1)
decoder_dense1 = keras.layers.Dense(units=64,
activation='tanh',
kernel_regularizer = self.regulariser,
bias_regularizer = self.regulariser, name='dense_tanh')
output_dense = keras.layers.Dense(self.num_output_features,
activation='sigmoid',
kernel_regularizer = self.regulariser,
bias_regularizer = self.regulariser, name='output_sig')
#densen1=decoder_dense1(decoder_outputs)
decoder_outputs = output_dense(decoder_outputs)
# Create a model using the functional API provided by Keras.
self.model = keras.models.Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_outputs)
print(self.model.summary())
def sample_batch(self, data_inputs, ground_truth, ruitu_inputs, batch_size, certain_id=None, certain_feature=None):
max_i, _, max_j, _ = data_inputs.shape # Example: (1148, 37, 10, 9)-(sample_ind, timestep, sta_id, features)
if certain_id == None and certain_feature == None:
id_ = np.random.randint(max_j, size=batch_size)
i = np.random.randint(max_i, size=batch_size)
batch_inputs = data_inputs[i,:,id_,:]
batch_ouputs = ground_truth[i,:,id_,:]
batch_ruitu = ruitu_inputs[i,:,id_,:]
elif certain_id != None:
pass
return batch_inputs, batch_ruitu, batch_ouputs
def fit(self, train_input_obs, train_input_ruitu, train_labels,
val_input_obs, val_input_ruitu, val_labels, batch_size,
iterations=300, validation=True):
self.model.compile(optimizer = self.optimiser, loss=self.loss)
print('Train batch size: {}'.format(batch_size))
print('Validation on data size of {};'.format(val_input_obs.shape[0]))
for i in range(iterations):
batch_inputs, batch_ruitu, batch_labels = self.sample_batch(train_input_obs, train_labels,
train_input_ruitu, batch_size=batch_size)
loss_ = self.model.train_on_batch(x=[batch_inputs, batch_ruitu],
y=[batch_labels])
if (i+1)%50 == 0:
print('Iteration:{}/{}. Training batch loss:{}'.
format(i+1, iterations, loss_))
if validation :
self.evaluate(val_input_obs, val_input_ruitu, val_labels, each_station_display=False)
print('###'*10)
print('Train finish! Total validation loss:')
self.evaluate(val_input_obs, val_input_ruitu, val_labels, each_station_display=True)
def evaluate(self, data_input_obs, data_input_ruitu, data_labels, each_station_display=False):
assert data_input_ruitu.shape[0] == data_input_obs.shape[0] == data_labels.shape[0], 'Shape Error'
#assert data_input_obs.shape[1] == 28 and data_input_obs.shape[2] == 10 and data_input_obs.shape[3] == 9, 'Error! Obs input shape must be (None, 28,10,9)'
assert data_input_ruitu.shape[1] == 37 and data_input_ruitu.shape[2] == 10 and data_input_ruitu.shape[3] == 29, 'Error! Ruitu input shape must be (None, 37,10,29)'
assert data_labels.shape[1] == 37 and data_labels.shape[2] == 10 and data_labels.shape[3] == 3, 'Error! Ruitu input shape must be (None, 37,10,3)'
all_loss=[]
for i in range(10): # iterate for each station. (sample_ind, timestep, staionID, features)
val_loss= self.model.evaluate(x=[data_input_obs[:,:,i,:], data_input_ruitu[:,:,i,:]],
y=[data_labels[:,:,i,:]], verbose=False)
all_loss.append(val_loss)
if each_station_display:
print('\tFor station 9000{}, evaluated loss: {}'.format(i+1, val_loss))
print('Mean evaluated loss on all stations:', np.mean(all_loss))
def predict(self, batch_inputs, batch_ruitu):
assert batch_ruitu.shape[0] == batch_inputs.shape[0], 'Shape Error'
assert batch_inputs.shape[1] == 28 and batch_inputs.shape[2] == 10 and batch_inputs.shape[3] == 9, 'Error! Obs input shape must be (None, 28,10,9)'
assert batch_ruitu.shape[1] == 37 and batch_ruitu.shape[2] == 10 and batch_ruitu.shape[3] == 29, 'Error! Ruitu input shape must be (None, 37,10, 29)'
#all_pred={}
pred_result_list = []
for i in range(10):
#print('Predict for station: 9000{}'.format(i+1))
result = self.model.predict(x=[batch_inputs[:,:,i,:], batch_ruitu[:,:,i,:]])
result = np.squeeze(result, axis=0)
#all_pred[i] = result
pred_result_list.append(result)
#pass
pred_result = np.stack(pred_result_list, axis=0)
#return all_pred, pred_result
print('Predict shape (10,37,3) means (stationID, timestep, features). Features include: t2m, rh2m and w10m')
self.pred_result = pred_result
return pred_result
def renorm_for_submit(self, pred_mean, pred_var=None):
'''
# TODO: Add three strategies for output
'''
assert self.pred_result is not None, 'You must run self.predict(batch_inputs, batch_ruitu) firstly!!'
assert pred_mean.shape == (10, 37, 3), 'Error! This funtion ONLY works for one data sample with shape (10, 37, 3). Any data shape (None, 10, 37, 3) will leads this error!'
df_empty = pd.DataFrame(columns=['FORE_data', 't2m', 'rh2m', 'w10m'])
for j, target_v in enumerate(self.target_list):
series_ids = pd.Series()
series_targets = pd.Series()
renorm_value = renorm(pred_mean[:,:,j], self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
for i in range(10):
if i != 9:
id_num = '0'+str(i+1)
else:
id_num = str(10)
sta_name_time = '900'+id_num+'_'
time_str_list=[]
for t in range(37):
if t < 10:
time_str= sta_name_time + '0'+ str(t)
else:
time_str = sta_name_time + str(t)
time_str_list.append(time_str)
series_id = pd.Series(time_str_list)
series_target = pd.Series(renorm_value[i])
series_ids = pd.concat([series_ids, series_id])
series_targets = pd.concat([series_targets, series_target])
df_empty['FORE_data'] = series_ids
df_empty[target_v] = series_targets
return df_empty
#pass
def plot_prediction(self, x, y_true, y_pred, input_ruitu=None):
"""Plots the predictions.
Arguments
---------
x: Input sequence of shape (input_sequence_length,
dimension_of_signal)
y_true: True output sequence of shape (input_sequence_length,
dimension_of_signal)
y_pred: Predicted output sequence (input_sequence_length,
dimension_of_signal)
input_ruitu: Ruitu output sequence
"""
plt.figure(figsize=(12, 3))
output_dim = x.shape[-1]# feature dimension
for j in range(output_dim):
past = x[:, j]
true = y_true[:, j]
pred = y_pred[:, j]
if input_ruitu is not None:
ruitu = input_ruitu[:, j]
label1 = "Seen (past) values" if j==0 else "_nolegend_"
label2 = "True future values" if j==0 else "_nolegend_"
label3 = "Predictions" if j==0 else "_nolegend_"
label4 = "Ruitu values" if j==0 else "_nolegend_"
plt.plot(range(len(past)), past, "o-g",
label=label1)
plt.plot(range(len(past),
len(true)+len(past)), true, "x--g", label=label2)
plt.plot(range(len(past), len(pred)+len(past)), pred, "o--y",
label=label3)
if input_ruitu is not None:
plt.plot(range(len(past), len(ruitu)+len(past)), ruitu, "o--r",
label=label4)
plt.legend(loc='best')
plt.title("Predictions v.s. true values v.s. Ruitu")
plt.show()
class RNN_Class(WeatherConv1D):
def __init__(self, num_output_features, num_decoder_features,
target_sequence_length,
num_steps_to_predict, regulariser = None,
lr=0.001, decay=0, loss = "mse",
layers=[35, 35]):
self.num_output_features = num_output_features
self.num_decoder_features = num_decoder_features
self.target_sequence_length = target_sequence_length
self.num_steps_to_predict = num_steps_to_predict
self.regulariser = regulariser
self.layers = layers
self.lr = lr
self.decay = decay
self.loss = loss
self.pred_result = None
#self.batch_size = batch_size
print('Initialized!')
def build_graph(self):
keras.backend.clear_session() # clear session/graph
self.model = RNN_builder(self.num_output_features, self.num_decoder_features,
self.target_sequence_length,
self.num_steps_to_predict, self.regulariser,
self.lr, self.decay, self.loss, self.layers)
print(self.model.summary())
class Seq2Seq_Class(Enc_Dec):
def __init__(self, id_embd, time_embd,
num_input_features, num_output_features, num_decoder_features,
input_sequence_length, target_sequence_length,
num_steps_to_predict, regulariser = None,
lr=0.001, decay=0, loss = "mse",
layers=[35, 35], model_save_path='../models',
model_structure_name='seq2seq_model.json', model_weights_name='seq2seq_model_weights.h5'):
super().__init__(num_input_features, num_output_features, num_decoder_features,
input_sequence_length, target_sequence_length,
num_steps_to_predict, regulariser = None,
lr=lr, decay=decay, loss = loss,
layers=layers)
self.id_embd = id_embd
self.time_embd = time_embd
self.val_loss_list=[]
self.train_loss_list=[]
self.current_mean_val_loss = None
self.early_stop_limit = 10 # with the unit of Iteration Display
self.EARLY_STOP=False
self.pred_var_result = []
self.pi_dic={0.95:1.96, 0.9:1.645, 0.8:1.28, 0.68:1.}
self.target_list=['t2m','rh2m','w10m']
self.obs_range_dic={'t2m':[-30,42], # Official value: [-20,42]
'rh2m':[0.0,100.0],
'w10m':[0.0, 30.0]}
self.obs_and_output_feature_index_map = {'t2m':0,'rh2m':1,'w10m':2}
self.ruitu_feature_index_map = {'t2m':1,'rh2m':3,'w10m':4}
self.model_save_path = model_save_path
self.model_structure_name=model_structure_name
self.model_weights_name=model_weights_name
def build_graph(self):
#keras.backend.clear_session() # clear session/graph
self.optimizer = keras.optimizers.Adam(lr=self.lr, decay=self.decay)
self.model = Seq2Seq_MVE_subnets_swish(id_embd=True, time_embd=True,
lr=self.lr, decay=self.decay,
num_input_features=self.num_input_features, num_output_features=self.num_output_features,
num_decoder_features=self.num_decoder_features, layers=self.layers,
loss=self.loss, regulariser=self.regulariser)
def _mve_loss(y_true, y_pred):
pred_u = crop(2,0,3)(y_pred)
pred_sig = crop(2,3,6)(y_pred)
print(pred_sig)
#exp_sig = tf.exp(pred_sig) # avoid pred_sig is too small such as zero
#precision = 1./exp_sig
precision = 1./pred_sig
#log_loss= 0.5*tf.log(exp_sig)+0.5*precision*((pred_u-y_true)**2)
log_loss= 0.5*tf.log(pred_sig)+0.5*precision*((pred_u-y_true)**2)
log_loss=tf.reduce_mean(log_loss)
return log_loss
print(self.model.summary())
self.model.compile(optimizer = self.optimizer, loss=_mve_loss)
def sample_batch(self, data_inputs, ground_truth, ruitu_inputs, batch_size, certain_id=None, certain_feature=None):
max_i, _, max_j, _ = data_inputs.shape # Example: (1148, 37, 10, 9)-(sample_ind, timestep, sta_id, features)
id_ = np.random.randint(max_j, size=batch_size)
i = np.random.randint(max_i, size=batch_size)
batch_inputs = data_inputs[i,:,id_,:]
batch_ouputs = ground_truth[i,:,id_,:]
batch_ruitu = ruitu_inputs[i,:,id_,:]
# id used for embedding
if self.id_embd and (not self.time_embd):
expd_id = np.expand_dims(id_,axis=1)
batch_ids = np.tile(expd_id,(1,37))
return batch_inputs, batch_ruitu, batch_ouputs, batch_ids
elif (not self.id_embd) and (self.time_embd):
time_range = np.array(range(37))
batch_time = np.tile(time_range,(batch_size,1))
#batch_time = np.expand_dims(batch_time, axis=-1)
return batch_inputs, batch_ruitu, batch_ouputs, batch_time
elif (self.id_embd) and (self.time_embd):
expd_id = np.expand_dims(id_,axis=1)
batch_ids = np.tile(expd_id,(1,37))
time_range = np.array(range(37))
batch_time = np.tile(time_range,(batch_size,1))
#batch_time = np.expand_dims(batch_time, axis=-1)
return batch_inputs, batch_ruitu, batch_ouputs, batch_ids, batch_time
elif (not self.id_embd) and (not self.time_embd):
return batch_inputs, batch_ruitu, batch_ouputs
def fit(self, train_input_obs, train_input_ruitu, train_labels,
val_input_obs, val_input_ruitu, val_labels, val_ids, val_times, batch_size,
iterations=300, validation=True):
print('Train batch size: {}'.format(batch_size))
print('Validation on data size of {};'.format(val_input_obs.shape[0]))
early_stop_count = 0
for i in range(iterations):
batch_inputs, batch_ruitu, batch_labels, batch_ids, batch_time = self.sample_batch(train_input_obs, train_labels,
train_input_ruitu, batch_size=batch_size)
#batch_placeholders = np.zeros_like(batch_labels)
loss_ = self.model.train_on_batch(x=[batch_inputs, batch_ruitu, batch_ids, batch_time],
y=[batch_labels])
if (i+1)%50 == 0:
print('Iteration:{}/{}. Training batch MLE loss:{}'.
format(i+1, iterations, loss_))
if validation :
self.evaluate(val_input_obs, val_input_ruitu, val_labels, val_ids, val_times, each_station_display=False)
if len(self.val_loss_list) >0: # Early stopping
if(self.current_mean_val_loss) <= min(self.val_loss_list): # compare with the last early_stop_limit values except SELF
early_stop_count = 0
model_json = self.model.to_json()
with open(self.model_save_path+self.model_structure_name, "w") as json_file:
json_file.write(model_json)
self.model.save_weights(self.model_save_path+self.model_weights_name)
else:
early_stop_count +=1
print('Early-stop counter:', early_stop_count)
if early_stop_count == self.early_stop_limit:
self.EARLY_STOP=True
break
print('###'*10)
if self.EARLY_STOP:
print('Loading the best model before early-stop ...')
self.model.load_weights(self.model_save_path+self.model_weights_name)
print('Training finished! Detailed val MLE loss:')
self.evaluate(val_input_obs, val_input_ruitu, val_labels, val_ids, val_times, each_station_display=True)
def evaluate(self, data_input_obs, data_input_ruitu, data_labels, data_ids, data_time, each_station_display=False):
all_loss=[]
for i in range(10): # iterate for each station. (sample_ind, timestep, staionID, features)
#batch_placeholders = np.zeros_like(data_labels[:,:,i,:])
val_loss= self.model.evaluate(x=[data_input_obs[:,:,i,:], data_input_ruitu[:,:,i,:], data_ids[:,:,i], data_time],
y=[data_labels[:,:,i,:]], verbose=False)
all_loss.append(val_loss)
if each_station_display:
print('\tFor station 9000{}, val MLE loss: {}'.format(i+1, val_loss))
self.current_mean_val_loss = np.mean(all_loss)
print('Mean val MLE loss:', self.current_mean_val_loss)
self.val_loss_list.append(self.current_mean_val_loss)
def predict(self, batch_inputs, batch_ruitu, batch_ids, batch_times):
'''
Input:
Output:
pred_result (mean value) : (None, 10,37,3). i.e., (sample_nums, stationID, timestep, features)
pred_var_result (var value) : (None, 10,37,3)
'''
pred_result_list = []
pred_var_list = []
#pred_std_list =[]
for i in range(10):
result = self.model.predict(x=[batch_inputs[:,:,i,:], batch_ruitu[:,:,i,:], batch_ids[:,:,i], batch_times])
var_result = result[:,:,3:6] # Variance
result = result[:,:,0:3] # Mean
#result = np.squeeze(result, axis=0)
pred_result_list.append(result)
#var_result = np.squeeze(var_result, axis=0)
pred_var_list.append(var_result)
pred_result = np.stack(pred_result_list, axis=1)
pred_var_result = np.stack(pred_var_list, axis=1)
print('Predictive shape (None, 10,37,3) means (sample_nums, stationID, timestep, features). \
Features include: t2m, rh2m and w10m')
self.pred_result = pred_result
self.pred_var_result = pred_var_result
#self.pred_std_result = np.sqrt(np.exp(self.pred_var_result[:,:,i,j])) # Calculate standard deviation
return pred_result, pred_var_result
def renorm_for_visualization(self, obs_inputs, ruitu_inputs, pred_mean_result, pred_var_result, ground_truth=None):
'''
obs_inputs: (None, 28, 10, 9)
ruitu_inputs: (None, 37, 10, 29)
pred_mean_result: (None, 10, 37, 3)
pred_var_result: (None, 10, 37, 3)
ground_truth: (None, 37, 10, 3)
#self.target_list=['t2m','rh2m','w10m']
#self.obs_range_dic={'t2m':[-30,42],
# 'rh2m':[0.0,100.0],
# 'w10m':[0.0, 30.0]}
#self.obs_and_output_feature_index_map = {'t2m':0,'rh2m':1,'w10m':2}
#self.ruitu_feature_index_map = {'t2m':1,'rh2m':3,'w10m':4}
#TODO:
'''
for target_v in self.target_list:
temp1 = obs_inputs[:,:,:,self.obs_and_output_feature_index_map[target_v]]
temp2 = ruitu_inputs[:,:,:,self.ruitu_feature_index_map[target_v]]
temp3 = pred_mean_result[:,:,:,self.obs_and_output_feature_index_map[target_v]]
#temp4 = pred_var_result[:,:,:,self.obs_and_output_feature_index_map[target_v]]
obs_inputs[:,:,:,self.obs_and_output_feature_index_map[target_v]] = renorm(temp1, self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
ruitu_inputs[:,:,:,self.ruitu_feature_index_map[target_v]] = renorm(temp2, self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
pred_mean_result[:,:,:,self.obs_and_output_feature_index_map[target_v]] = renorm(temp3, self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
#pred_var_result[:,:,:,self.obs_and_output_feature_index_map[target_v]] = renorm(temp4, self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
if ground_truth is not None:
temp5 = ground_truth[:,:,:,self.obs_and_output_feature_index_map[target_v]]
ground_truth[:,:,:,self.obs_and_output_feature_index_map[target_v]] = renorm(temp5, self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
if ground_truth is not None:
return obs_inputs, ruitu_inputs, pred_mean_result, pred_var_result, ground_truth
else:
return obs_inputs, ruitu_inputs, pred_mean_result, pred_var_result
def calc_uncertainty_info(self, verbose=False):
'''
Verbose: Display uncertainty for each feature i.e., (t2m, rh2m, w10m)
#TODO: Refactor the double 'for' part.
'''
assert len(self.pred_var_result)>0, 'Error! You must run predict() before running calc_uncertainty_info()'
print('The uncertainty info are calculated on {} predicted samples with shape {}'
.format(len(self.pred_var_result), self.pred_var_result.shape))
#
if verbose:
assert self.target_list == ['t2m','rh2m','w10m'], 'ERROR, list changed!'
for j, target_v in enumerate(['t2m','rh2m','w10m']):
print('For feature {}:'.format(target_v))
for i in range(37):
#unctt_var = np.exp(self.pred_var_result[:,:,i,j])
unctt_std = np.sqrt(unctt_var)
unctt_mean_std = np.mean(unctt_std)
unctt_mean_var = np.mean(unctt_var)
#renorm_unctt_mean_std = renorm(unctt_mean_std, self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
print('\tTime:{}-Variance:{:.4f}; Std:{:.4f};'.
format(i+1, unctt_mean_var, unctt_mean_std))
else:
for i in range(37):
unctt_var = np.exp(self.pred_var_result[:,:,i,:])
unctt_std = np.sqrt(unctt_var)
unctt_mean_std = np.mean(unctt_std)
unctt_mean_var = np.mean(unctt_var)
#renorm_unctt_mean_std = 0
print('Time:{}-Variance:{:.4f}; Std:{:.4f};'.
format(i+1, unctt_mean_var, unctt_mean_std))
def minus_plus_std_strategy(self, pred_mean, pred_var, feature_name,\
timestep_to_ensemble=21, alpha=0):
'''
This stratergy aims to calculate linear weighted at specific timestep (timestep_to_ensemble) between prediction and ruitu as formula:
(alpha)*pred_mean + (1-alpha)*ruitu_inputs
pred_mean: (10, 37, 3)
pred_var: (10, 37, 3)
timestep_to_ensemble: int32 (From 0 to 36)
'''
print('Using minus_plus_var_strategy with alpha {}'.format(alpha))
assert 0<=timestep_to_ensemble<=36 , 'Please ensure 0<=timestep_to_ensemble<=36!'
assert -0.3<= alpha <=0.3, '-0.3<= alpha <=0.3!'
assert pred_mean.shape == (10, 37, 3), 'Error! This funtion ONLY works for \
one data sample with shape (10, 37, 3). Any data shape (None, 10, 37, 3) will leads this error!'
pred_std = np.sqrt(np.exp(pred_var))
print('alpha:',alpha)
pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] = \
pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] + \
alpha * pred_std[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]]
return pred_mean
def linear_ensemble_strategy(self, pred_mean, pred_var, ruitu_inputs, feature_name,\
timestep_to_ensemble=21, alpha=1):
'''
This stratergy aims to calculate linear weighted at specific timestep (timestep_to_ensemble) between prediction and ruitu as formula:
(alpha)*pred_mean + (1-alpha)*ruitu_inputs
pred_mean: (10, 37, 3)
pred_var: (10, 37, 3)
ruitu_inputs: (37,10,29). Need Swamp to(10,37,29) FIRSTLY!!
timestep_to_ensemble: int32 (From 0 to 36)
'''
assert 0<= alpha <=1, 'Please ensure 0<= alpha <=1 !'
assert pred_mean.shape == (10, 37, 3), 'Error! This funtion ONLY works for \
one data sample with shape (10, 37, 3). Any data shape (None, 10, 37, 3) will leads this error!'
#pred_std = np.sqrt(np.exp(pred_var))
ruitu_inputs = np.swapaxes(ruitu_inputs,0,1)
print('alpha:',alpha)
pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] = \
(alpha)*pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] + \
(1-alpha)*ruitu_inputs[:,timestep_to_ensemble:, self.ruitu_feature_index_map[feature_name]]
print('Corrected pred_mean shape:', pred_mean.shape)
return pred_mean
def fuzzy_ensemble_strategy(self, pred_mean, pred_var, feature_name,\
timestep_to_ensemble=21, alpha=0):
'''
This stratergy aims to calculate linear weighted at specific timestep (timestep_to_ensemble) between prediction and ruitu as formula:
(alpha)*pred_mean + (1-alpha)*ruitu_inputs
pred_mean: (10, 37, 3)
pred_var: (10, 37, 3)
timestep_to_ensemble: int32 (From 0 to 36)
'''
print('Using fuzzy_ensemble_strategy with alpha {}'.format(alpha))
assert 0<=timestep_to_ensemble<=36 , 'Please ensure 0<=timestep_to_ensemble<=36!'
assert -0.4<= alpha <=0.4, 'Please ensure -0.4<= alpha <=0.4 !'
assert pred_mean.shape == (10, 37, 3), 'Error! This funtion ONLY works for \
one data sample with shape (10, 37, 3). Any data shape (None, 10, 37, 3) will leads this error!'
pred_std = np.sqrt(np.exp(pred_var))
#print('normalizing for Std. after timestep:', timestep_to_ensemble)
temp_std = pred_std[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]]
norm_std = temp_std / np.max(temp_std)
#print('norm_std shape', norm_std.shape)
dim_0, dim_1 = norm_std.shape
reshaped_std = norm_std.reshape(-1)
from skfuzzy import trimf
fuzzy_degree = trimf(reshaped_std, [0., 1, 1.2])
fuzzy_degree = fuzzy_degree.reshape(dim_0, dim_1)
#print('fuzzy_degree shape:',fuzzy_degree.shape)
#print('temp_std shape:',temp_std.shape)
pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] = \
pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] + \
fuzzy_degree*alpha*temp_std
#pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] + \
#alpha * pred_std[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]]
#print('pred_mean.shape',pred_mean.shape)
return pred_mean
pass
def renorm_for_submit(self, pred_mean, pred_var, ruitu_inputs, timestep_to_ensemble=21, alpha=1):
'''
Overwrite for Seq2Seq_MVE Class
pred_mean: shape of (10, 37, 3)
pred_var: shape of (10, 37, 3)
ruitu_inputs: shape of (10, 37, 3)
timestep_to_ensemble: int32 (From 0 to 36)
# TODO: Add three strategies for output
'''
assert self.pred_result is not None, 'You must run self.predict(batch_inputs, batch_ruitu) firstly!!'
assert pred_mean.shape == (10, 37, 3), 'Error! This funtion ONLY works for one data sample with shape (10, 37, 3). Any data shape (None, 10, 37, 3) will leads this error!'
df_empty = pd.DataFrame(columns=['FORE_data', 't2m', 'rh2m', 'w10m'])
for j, target_v in enumerate(self.target_list):
series_ids = pd.Series()
series_targets = pd.Series()
#print('Feature {}, timestep_to_ensemble: {}, weighted alpha: {}'.
# format(target_v, timestep_to_ensemble, alpha))
#pred_mean = self.linear_ensemble_strategy(pred_mean, pred_var,
# ruitu_inputs, target_v, timestep_to_ensemble, alpha)
#pred_mean =self.minus_plus_std_strategy(pred_mean, pred_var, target_v,\
# timestep_to_ensemble, alpha)
#pred_mean = self.fuzzy_ensemble_strategy(pred_mean, pred_var, target_v,\
# timestep_to_ensemble, alpha=0.)
renorm_value = renorm(pred_mean[:,:,j], self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
for i in range(10):
if i != 9:
id_num = '0'+str(i+1)
else:
id_num = str(10)
sta_name_time = '900'+id_num+'_'
time_str_list=[]
for t in range(37):
if t < 10:
time_str= sta_name_time + '0'+ str(t)
else:
time_str = sta_name_time + str(t)
time_str_list.append(time_str)
series_id = pd.Series(time_str_list)
series_target = pd.Series(renorm_value[i])
series_ids = pd.concat([series_ids, series_id])
series_targets = | pd.concat([series_targets, series_target]) | pandas.concat |
import pandas as pd
import pytest
import numpy as np
import dask.dataframe as dd
from dask.dataframe.utils import assert_eq
from dask.utils import ignoring
def mad(x):
return np.fabs(x - x.mean()).mean()
def rolling_functions_tests(p, d):
# Old-fashioned rolling API
assert_eq(pd.rolling_count(p, 3), dd.rolling_count(d, 3))
assert_eq(pd.rolling_sum(p, 3), dd.rolling_sum(d, 3))
assert_eq(pd.rolling_mean(p, 3), dd.rolling_mean(d, 3))
assert_eq(pd.rolling_median(p, 3), dd.rolling_median(d, 3))
assert_eq(pd.rolling_min(p, 3), dd.rolling_min(d, 3))
assert_eq(pd.rolling_max(p, 3), dd.rolling_max(d, 3))
assert_eq(pd.rolling_std(p, 3), dd.rolling_std(d, 3))
assert_eq(pd.rolling_var(p, 3), dd.rolling_var(d, 3))
# see note around test_rolling_dataframe for logic concerning precision
assert_eq(pd.rolling_skew(p, 3),
dd.rolling_skew(d, 3), check_less_precise=True)
assert_eq(pd.rolling_kurt(p, 3),
dd.rolling_kurt(d, 3), check_less_precise=True)
assert_eq(pd.rolling_quantile(p, 3, 0.5), dd.rolling_quantile(d, 3, 0.5))
assert_eq(pd.rolling_apply(p, 3, mad), dd.rolling_apply(d, 3, mad))
with ignoring(ImportError):
assert_eq( | pd.rolling_window(p, 3, 'boxcar') | pandas.rolling_window |
import bz2
import gzip
import lzma
import os
import re
import numpy as np
import pandas as pd
import xarray as xr
def untransform_varnames(varnames):
"""Map transformed variable names back to their originals.
Mainly useful for dealing with PyMC3 traces.
Example
-------
untransform_varnames(['eta__0', 'eta__1', 'theta', 'theta_log__'])
{'eta': {'eta__0', 'eta_1'}, 'theta': {'theta'}}, {'theta': {'theta_log__'}}
Parameters
----------
varnames : iterable of strings
All the varnames from a trace
Returns
-------
(dict, dict)
A dictionary of names to vector names, and names to transformed names
"""
# Captures tau_log____0 or tau_log__, but not tau__0
transformed_vec_ptrn = re.compile(r'^(.*)__(?:__\d+)$')
# Captures tau__0 and tau_log____0, so use after the above
vec_ptrn = re.compile(r'^(.*)__\d+$')
varname_map = {}
transformed = {}
for varname in varnames:
has_match = False
for ptrn, mapper in ((transformed_vec_ptrn, transformed), (vec_ptrn, varname_map)):
match = ptrn.match(varname)
if match:
base_name = match.group(1)
if base_name not in mapper:
mapper[base_name] = set()
mapper[base_name].add(varname)
has_match = True
if not has_match:
if varname not in varname_map:
varname_map[varname] = set()
varname_map[varname].add(varname)
return varname_map, transformed
def expand_variable_names(trace, varnames):
"""
Expand the name of variables to include multidimensional variables
"""
tmp = []
for vtrace in pd.unique(trace.columns):
for varname in varnames:
if vtrace == varname or vtrace.startswith('{}__'.format(varname)):
tmp.append(vtrace)
return np.unique(tmp)
def get_stats(trace, stat=None, combined=True):
"""
get sampling statistics from trace
Parameters
----------
trace : Posterior sample
Pandas DataFrame or PyMC3 trace
stats : string
Statistics
combined : Bool
If True multiple statistics from different chains will be combined together.
Returns
----------
stat: array with the choosen statistic
"""
if type(trace).__name__ == 'MultiTrace':
try:
return trace.get_sampler_stats(stat, combine=combined)
except KeyError:
print('There is no {} information in the passed trace.'.format(stat))
elif isinstance(trace, pd.DataFrame):
try:
return trace[stat].values
except KeyError:
print('There is no {} information in the passed trace.'.format(stat))
else:
raise ValueError('The trace should be a DataFrame or a trace from PyMC3')
def get_varnames(trace, varnames):
if varnames is None:
return np.unique(trace.columns)
else:
return expand_variable_names(trace, varnames)
def log_post_trace(trace, model):
"""
Calculate the elementwise log-posterior for the sampled trace.
Currently only supports trace and models from PyMC3.
Parameters
----------
trace : trace object
Posterior samples
model : PyMC Model
Returns
-------
logp : array of shape (n_samples, n_observations)
The contribution of the observations to the logp of the whole model.
"""
tr_t = type(trace).__name__
mo_t = type(model).__name__
if tr_t == 'MultiTrace' and mo_t == 'Model':
cached = [(var, var.logp_elemwise) for var in model.observed_RVs]
def logp_vals_point(point):
if len(model.observed_RVs) == 0:
raise ValueError('The model does not contain observed values.')
logp_vals = []
for var, logp in cached:
logp = logp(point)
if var.missing_values:
logp = logp[~var.observations.mask]
logp_vals.append(logp.ravel())
return np.concatenate(logp_vals)
points = trace.points()
logp = (logp_vals_point(point) for point in points)
return np.stack(logp)
else:
raise ValueError('Currently only supports trace and models from PyMC3.')
def trace_to_dataframe(trace, combined=True):
"""Convert trace to Pandas DataFrame.
Parameters
----------
trace : trace
PyMC3's trace or Pandas DataFrame
combined : Bool
If True multiple chains will be combined together in the same columns. Otherwise they will
be assigned to separate columns.
"""
if type(trace).__name__ == 'MultiTrace':
var_shapes = trace._straces[0].var_shapes # pylint: disable=protected-access
varnames = [var for var in var_shapes.keys() if not _is_transformed_name(str(var))]
flat_names = {v: _create_flat_names(v, var_shapes[v]) for v in varnames}
var_dfs = []
for varname in varnames:
vals = trace.get_values(varname, combine=combined)
if isinstance(vals, list):
for val in vals:
flat_vals = val.reshape(val.shape[0], -1)
var_dfs.append(pd.DataFrame(flat_vals, columns=flat_names[varname]))
else:
flat_vals = vals.reshape(vals.shape[0], -1)
var_dfs.append(pd.DataFrame(flat_vals, columns=flat_names[varname]))
elif isinstance(trace, pd.DataFrame):
if combined:
varnames = get_varnames(trace, trace.columns)
trace = pd.DataFrame({v: trace[v].values.ravel() for v in varnames})
return trace
else:
raise ValueError('The trace should be a DataFrame or a trace from PyMC3')
return pd.concat(var_dfs, axis=1)
def _create_flat_names(varname, shape):
"""Return flat variable names for `varname` of `shape`.
Examples
--------
>>> create_flat_names('x', (5,))
['x__0', 'x__1', 'x__2', 'x__3', 'x__4']
>>> create_flat_names('x', (2, 2))
['x__0_0', 'x__0_1', 'x__1_0', 'x__1_1']
"""
if not shape:
return [varname]
labels = (np.ravel(xs).tolist() for xs in np.indices(shape))
labels = (map(str, xs) for xs in labels)
return ['{}__{}'.format(varname, '_'.join(idxs)) for idxs in zip(*labels)]
def _is_transformed_name(name):
"""
Quickly check if a name was transformed with `get_transformed_name`
Parameters
----------
name : str
Name to check
Returns
-------
bool
Boolean, whether the string could have been produced by `get_transformed_name`
"""
return name.endswith('__') and name.count('_') >= 3
def _create_flat_names(varname, shape):
"""
Return flat variable names for `varname` of `shape`.
Examples
--------
>>> create_flat_names('x', (5,))
['x__0', 'x__1', 'x__2', 'x__3', 'x__4']
>>> create_flat_names('x', (2, 2))
['x__0_0', 'x__0_1', 'x__1_0', 'x__1_1']
"""
if not shape:
return [varname]
labels = (np.ravel(xs).tolist() for xs in np.indices(shape))
labels = (map(str, xs) for xs in labels)
return ['{}__{}'.format(varname, '_'.join(idxs)) for idxs in zip(*labels)]
def save_trace(trace, filename='trace.gzip', compression='gzip', combined=False):
"""
Save trace to a csv file. Duplicated columns names will be preserved, if any.
Parameters
----------
trace : trace
PyMC3's trace or Pandas DataFrame
filepath : str
name or path of the file to save trace
compression : str, optional
String representing the compression to use in the output file, allowed values are
'gzip' (default), 'bz2' and 'xz'.
combined : Bool
If True multiple chains will be combined together in the same columns. Otherwise they will
be assigned to separate columns. Defaults to False
"""
trace = trace_to_dataframe(trace, combined=combined)
trace.to_csv(filename, compression=compression)
def load_trace(filepath, combined=False):
"""
Load csv file into a DataFrame. Duplicated columns names will be preserved, if any.
Parameters
----------
filepath : str
name or path of the file to save trace
combined : Bool
If True multiple chains will be combined together in the same columns. Otherwise they will
be assigned to separate columns. Defaults to False
"""
ext = os.path.splitext(filepath)[1][1:]
df = | pd.read_csv(filepath, index_col=0, compression=ext) | pandas.read_csv |
"""Download the network from Netzschleuder:
https://networks.skewed.de
"""
import sys
import graph_tool.all as gt
import numpy as np
# %%
import pandas as pd
from scipy import sparse
from scipy.sparse.csgraph import connected_components
if "snakemake" in sys.modules:
net_name = snakemake.params["net_name"]
output_file = snakemake.output["output_file"]
else:
net_name = "dblp_cite"
output_file = "../data/"
#
# Load
#
g = gt.collection.ns[net_name]
A = gt.adjacency(g).T
#
# Get the largerst component
#
_components, labels = connected_components(
csgraph=A, directed=False, return_labels=True
)
lab, sz = np.unique(labels, return_counts=True)
inLargestComp = np.where(lab[np.argmax(sz)] == labels)[0]
A = A[inLargestComp, :][:, inLargestComp]
#
# Save
#
r, c, _ = sparse.find(A)
df = | pd.DataFrame({"src": r, "trg": c}) | pandas.DataFrame |
"""
MIT License
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import pandas as pd
from sklearn.inspection import partial_dependence
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score
from sklearn import svm
from sklearn.datasets import load_boston
from articles.pd.support import load_rent, load_bulldozer, load_flights, \
toy_weather_data, toy_weight_data, \
df_cat_to_catcode, df_split_dates, \
df_string_to_cat, synthetic_interaction_data
from stratx import plot_stratpd, plot_catstratpd, \
plot_stratpd_gridsearch, plot_catstratpd_gridsearch
from stratx.partdep import partial_dependence
from stratx.plot import marginal_plot_, plot_ice, plot_catice
from stratx.ice import predict_ice, predict_catice, friedman_partial_dependence
import inspect
import matplotlib.patches as mpatches
from collections import OrderedDict
import matplotlib.pyplot as plt
import os
import shap
import xgboost as xgb
from colour import rgb2hex, Color
from dtreeviz.trees import tree, ShadowDecTree
figsize = (2.5, 2)
figsize2 = (3.8, 3.2)
GREY = '#444443'
# This genfigs.py code is just demonstration code to generate figures for the paper.
# There are lots of programming sins committed here; to not take this to be
# our idea of good code. ;)
# For data sources, please see notebooks/examples.ipynb
def addnoise(df, n=1, c=0.5, prefix=''):
if n == 1:
df[f'{prefix}noise'] = np.random.random(len(df)) * c
return
for i in range(n):
df[f'{prefix}noise{i + 1}'] = np.random.random(len(df)) * c
def fix_missing_num(df, colname):
df[colname + '_na'] = pd.isnull(df[colname])
df[colname].fillna(df[colname].median(), inplace=True)
def savefig(filename, pad=0):
plt.tight_layout(pad=pad, w_pad=0, h_pad=0)
plt.savefig(f"images/{filename}.pdf", bbox_inches="tight", pad_inches=0)
# plt.savefig(f"images/{filename}.png", dpi=150)
plt.tight_layout()
plt.show()
plt.close()
def rent():
print(f"----------- {inspect.stack()[0][3]} -----------")
np.random.seed(1) # pick seed for reproducible article images
X,y = load_rent(n=10_000)
df_rent = X.copy()
df_rent['price'] = y
colname = 'bedrooms'
colname = 'bathrooms'
TUNE_RF = False
TUNE_XGB = False
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
if TUNE_RF:
rf, bestparams = tune_RF(X, y) # does CV on entire data set to tune
# bedrooms
# RF best: {'max_features': 0.3, 'min_samples_leaf': 1, 'n_estimators': 125}
# validation R^2 0.7873724127323822
# bathrooms
# RF best: {'max_features': 0.3, 'min_samples_leaf': 1, 'n_estimators': 200}
# validation R^2 0.8066593395345907
else:
rf = RandomForestRegressor(n_estimators=200, min_samples_leaf=1, max_features=.3,
oob_score=True, n_jobs=-1)
rf.fit(X_train, y_train) # Use training set for plotting
print("RF OOB R^2", rf.oob_score_)
rf_score = rf.score(X_test, y_test)
print("RF validation R^2", rf_score)
if TUNE_XGB:
tuned_parameters = {'n_estimators': [400, 450, 500, 600, 1000],
'learning_rate': [0.008, 0.01, 0.02, 0.05, 0.08, 0.1, 0.11],
'max_depth': [3, 4, 5, 6, 7, 8, 9]}
grid = GridSearchCV(
xgb.XGBRegressor(), tuned_parameters, scoring='r2',
cv=5,
n_jobs=-1,
verbose=2
)
grid.fit(X, y) # does CV on entire data set to tune
print("XGB best:", grid.best_params_)
b = grid.best_estimator_
# bedrooms
# XGB best: {'max_depth': 7, 'n_estimators': 250}
# XGB validation R^2 0.7945797751555217
# bathrooms
# XGB best: {'learning_rate': 0.11, 'max_depth': 6, 'n_estimators': 1000}
# XGB train R^2 0.9834399795800324
# XGB validation R^2 0.8244958014380593
else:
b = xgb.XGBRegressor(n_estimators=1000,
max_depth=6,
learning_rate=.11,
verbose=2,
n_jobs=8)
b.fit(X_train, y_train)
xgb_score = b.score(X_test, y_test)
print("XGB validation R^2", xgb_score)
lm = LinearRegression()
lm.fit(X_train, y_train)
lm_score = lm.score(X_test, y_test)
print("OLS validation R^2", lm_score)
lm.fit(X, y)
model, r2_keras = rent_deep_learning_model(X_train, y_train, X_test, y_test)
fig, axes = plt.subplots(1, 6, figsize=(10, 1.8),
gridspec_kw = {'wspace':0.15})
for i in range(len(axes)):
axes[i].set_xlim(0-.3,4+.3)
axes[i].set_xticks([0,1,2,3,4])
axes[i].set_ylim(1800, 9000)
axes[i].set_yticks([2000,4000,6000,8000])
axes[1].get_yaxis().set_visible(False)
axes[2].get_yaxis().set_visible(False)
axes[3].get_yaxis().set_visible(False)
axes[4].get_yaxis().set_visible(False)
axes[0].set_title("(a) Marginal", fontsize=10)
axes[1].set_title("(b) RF", fontsize=10)
axes[1].text(2,8000, f"$R^2=${rf_score:.3f}", horizontalalignment='center', fontsize=9)
axes[2].set_title("(c) XGBoost", fontsize=10)
axes[2].text(2,8000, f"$R^2=${xgb_score:.3f}", horizontalalignment='center', fontsize=9)
axes[3].set_title("(d) OLS", fontsize=10)
axes[3].text(2,8000, f"$R^2=${lm_score:.3f}", horizontalalignment='center', fontsize=9)
axes[4].set_title("(e) Keras", fontsize=10)
axes[4].text(2,8000, f"$R^2=${r2_keras:.3f}", horizontalalignment='center', fontsize=9)
axes[5].set_title("(f) StratPD", fontsize=10)
avg_per_baths = df_rent.groupby(colname).mean()['price']
axes[0].scatter(df_rent[colname], df_rent['price'], alpha=0.07, s=5)
axes[0].scatter(np.unique(df_rent[colname]), avg_per_baths, s=6, c='black',
label="average price/{colname}")
axes[0].set_ylabel("price") # , fontsize=12)
axes[0].set_xlabel("bathrooms")
axes[0].spines['right'].set_visible(False)
axes[0].spines['top'].set_visible(False)
ice = predict_ice(rf, X, colname, 'price', numx=30, nlines=100)
plot_ice(ice, colname, 'price', alpha=.3, ax=axes[1], show_xlabel=True,
show_ylabel=False)
ice = predict_ice(b, X, colname, 'price', numx=30, nlines=100)
plot_ice(ice, colname, 'price', alpha=.3, ax=axes[2], show_ylabel=False)
ice = predict_ice(lm, X, colname, 'price', numx=30, nlines=100)
plot_ice(ice, colname, 'price', alpha=.3, ax=axes[3], show_ylabel=False)
scaler = StandardScaler()
X_train_ = pd.DataFrame(scaler.fit_transform(X_train), columns=X_train.columns)
# y_pred = model.predict(X_)
# print("Keras training R^2", r2_score(y, y_pred)) # y_test in y
ice = predict_ice(model, X_train_, colname, 'price', numx=30, nlines=100)
# replace normalized unique X with unnormalized
ice.iloc[0, :] = np.linspace(np.min(X_train[colname]), np.max(X_train[colname]), 30, endpoint=True)
plot_ice(ice, colname, 'price', alpha=.3, ax=axes[4], show_ylabel=True)
pdpx, pdpy, ignored = \
plot_stratpd(X, y, colname, 'price', ax=axes[5],
pdp_marker_size=6,
show_x_counts=False,
hide_top_right_axes=False,
show_xlabel=True, show_ylabel=False)
print(f"StratPD ignored {ignored} records")
axes[5].yaxis.tick_right()
axes[5].yaxis.set_label_position('right')
axes[5].set_ylim(-250,2250)
axes[5].set_yticks([0,1000,2000])
axes[5].set_ylabel("price")
savefig(f"{colname}_vs_price")
def tune_RF(X, y, verbose=2):
tuned_parameters = {'n_estimators': [50, 100, 125, 150, 200],
'min_samples_leaf': [1, 3, 5, 7],
'max_features': [.1, .3, .5, .7, .9]}
grid = GridSearchCV(
RandomForestRegressor(), tuned_parameters, scoring='r2',
cv=5,
n_jobs=-1,
verbose=verbose
)
grid.fit(X, y) # does CV on entire data set
rf = grid.best_estimator_
print("RF best:", grid.best_params_)
#
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# rf.fit(X_train, y_train)
# print("validation R^2", rf.score(X_test, y_test))
return rf, grid.best_params_
def plot_with_noise_col(df, colname):
features = ['bedrooms', 'bathrooms', 'latitude', 'longitude']
features_with_noise = ['bedrooms', 'bathrooms', 'latitude', 'longitude',
colname + '_noise']
type = "noise"
fig, axes = plt.subplots(2, 2, figsize=(5, 5), sharey=True, sharex=True)
df = df.copy()
addnoise(df, n=1, c=50, prefix=colname + '_')
X = df[features]
y = df['price']
# STRATPD ON ROW 1
X = df[features]
y = df['price']
plot_stratpd(X, y, colname, 'price', ax=axes[0, 0], slope_line_alpha=.15, show_xlabel=True,
show_ylabel=False)
axes[0, 0].set_ylim(-1000, 5000)
axes[0, 0].set_title(f"StratPD")
X = df[features_with_noise]
y = df['price']
plot_stratpd(X, y, colname, 'price', ax=axes[0, 1], slope_line_alpha=.15,
show_ylabel=False)
axes[0, 1].set_ylim(-1000, 5000)
axes[0, 1].set_title(f"StratPD w/{type} col")
# ICE ON ROW 2
X = df[features]
y = df['price']
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True,
n_jobs=-1)
rf.fit(X, y)
# do it w/o dup'd column
ice = predict_ice(rf, X, colname, 'price', nlines=1000)
uniq_x, pdp_curve = \
plot_ice(ice, colname, 'price', alpha=.05, ax=axes[1, 0], show_xlabel=True)
axes[1, 0].set_ylim(-1000, 5000)
axes[1, 0].set_title(f"FPD/ICE")
for i in range(2):
for j in range(2):
axes[i, j].set_xlim(0, 6)
X = df[features_with_noise]
y = df['price']
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True,
n_jobs=-1)
rf.fit(X, y)
ice = predict_ice(rf, X, colname, 'price', nlines=1000)
uniq_x_, pdp_curve_ = \
plot_ice(ice, colname, 'price', alpha=.05, ax=axes[1, 1], show_xlabel=True,
show_ylabel=False)
axes[1, 1].set_ylim(-1000, 5000)
axes[1, 1].set_title(f"FPD/ICE w/{type} col")
# print(f"max ICE curve {np.max(pdp_curve):.0f}, max curve with dup {np.max(pdp_curve_):.0f}")
axes[0, 0].get_xaxis().set_visible(False)
axes[0, 1].get_xaxis().set_visible(False)
def plot_with_dup_col(df, colname, min_samples_leaf):
features = ['bedrooms', 'bathrooms', 'latitude', 'longitude']
features_with_dup = ['bedrooms', 'bathrooms', 'latitude', 'longitude',
colname + '_dup']
fig, axes = plt.subplots(2, 3, figsize=(7.5, 5), sharey=True, sharex=True)
type = "dup"
verbose = False
df = df.copy()
df[colname + '_dup'] = df[colname]
# df_rent[colname+'_dupdup'] = df_rent[colname]
# STRATPD ON ROW 1
X = df[features]
y = df['price']
print(f"shape is {X.shape}")
plot_stratpd(X, y, colname, 'price', ax=axes[0, 0], slope_line_alpha=.15,
show_xlabel=True,
min_samples_leaf=min_samples_leaf,
show_ylabel=True,
verbose=verbose)
axes[0, 0].set_ylim(-1000, 5000)
axes[0, 0].set_title(f"StratPD")
X = df[features_with_dup]
y = df['price']
print(f"shape with dup is {X.shape}")
plot_stratpd(X, y, colname, 'price', ax=axes[0, 1], slope_line_alpha=.15, show_ylabel=False,
min_samples_leaf=min_samples_leaf,
verbose=verbose)
axes[0, 1].set_ylim(-1000, 5000)
axes[0, 1].set_title(f"StratPD w/{type} col")
plot_stratpd(X, y, colname, 'price', ax=axes[0, 2], slope_line_alpha=.15, show_xlabel=True,
min_samples_leaf=min_samples_leaf,
show_ylabel=False,
n_trees=15,
max_features=1,
bootstrap=False,
verbose=verbose
)
axes[0, 2].set_ylim(-1000, 5000)
axes[0, 2].set_title(f"StratPD w/{type} col")
axes[0, 2].text(.2, 4000, "ntrees=15")
axes[0, 2].text(.2, 3500, "max features per split=1")
# ICE ON ROW 2
X = df[features]
y = df['price']
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True,
n_jobs=-1)
rf.fit(X, y)
# do it w/o dup'd column
ice = predict_ice(rf, X, colname, 'price', nlines=1000)
plot_ice(ice, colname, 'price', alpha=.05, ax=axes[1, 0], show_xlabel=True)
axes[1, 0].set_ylim(-1000, 5000)
axes[1, 0].set_title(f"FPD/ICE")
for i in range(2):
for j in range(3):
axes[i, j].set_xlim(0, 6)
# with dup'd column
X = df[features_with_dup]
y = df['price']
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True,
n_jobs=-1)
rf.fit(X, y)
ice = predict_ice(rf, X, colname, 'price', nlines=1000)
plot_ice(ice, colname, 'price', alpha=.05, ax=axes[1, 1], show_xlabel=True, show_ylabel=False)
axes[1, 1].set_ylim(-1000, 5000)
axes[1, 1].set_title(f"FPD/ICE w/{type} col")
# print(f"max ICE curve {np.max(pdp_curve):.0f}, max curve with dup {np.max(pdp_curve_):.0f}")
axes[1, 2].set_title(f"FPD/ICE w/{type} col")
axes[1, 2].text(.2, 4000, "Cannot compensate")
axes[1, 2].set_xlabel(colname)
# print(f"max curve {np.max(curve):.0f}, max curve with dup {np.max(curve_):.0f}")
axes[0, 0].get_xaxis().set_visible(False)
axes[0, 1].get_xaxis().set_visible(False)
def rent_ntrees():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
X, y = load_rent(n=10_000)
trees = [1, 5, 10, 30]
supervised = True
def onevar(colname, row, yrange=None):
alphas = [.1,.08,.05,.04]
for i, t in enumerate(trees):
plot_stratpd(X, y, colname, 'price', ax=axes[row, i], slope_line_alpha=alphas[i],
# min_samples_leaf=20,
yrange=yrange,
supervised=supervised,
show_ylabel=t == 1,
pdp_marker_size=2 if row==2 else 8,
n_trees=t,
max_features='auto',
bootstrap=True,
verbose=False)
fig, axes = plt.subplots(3, 4, figsize=(8, 6), sharey=True)
for i in range(1, 4):
axes[0, i].get_yaxis().set_visible(False)
axes[1, i].get_yaxis().set_visible(False)
axes[2, i].get_yaxis().set_visible(False)
for i in range(0, 4):
axes[0, i].set_title(f"{trees[i]} trees")
onevar('bedrooms', row=0, yrange=(-500, 4000))
onevar('bathrooms', row=1, yrange=(-500, 4000))
onevar('latitude', row=2, yrange=(-500, 4000))
savefig(f"rent_ntrees")
plt.close()
def meta_boston():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
boston = load_boston()
print(len(boston.data))
df = pd.DataFrame(boston.data, columns=boston.feature_names)
df['MEDV'] = boston.target
X = df.drop('MEDV', axis=1)
y = df['MEDV']
plot_stratpd_gridsearch(X, y, 'AGE', 'MEDV',
show_slope_lines=True,
min_samples_leaf_values=[2,5,10,20,30],
yrange=(-10,10))
# yranges = [(-30, 0), (0, 30), (-8, 8), (-11, 0)]
# for nbins in range(6):
# plot_meta_multivar(X, y, colnames=['LSTAT', 'RM', 'CRIM', 'DIS'], targetname='MEDV',
# nbins=nbins,
# yranges=yranges)
savefig(f"meta_boston_age_medv")
def plot_meta_multivar(X, y, colnames, targetname, nbins, yranges=None):
np.random.seed(1) # pick seed for reproducible article images
min_samples_leaf_values = [2, 5, 10, 30, 50, 100, 200]
nrows = len(colnames)
ncols = len(min_samples_leaf_values)
fig, axes = plt.subplots(nrows, ncols + 2, figsize=((ncols + 2) * 2.5, nrows * 2.5))
if yranges is None:
yranges = [None] * len(colnames)
row = 0
for i, colname in enumerate(colnames):
marginal_plot_(X, y, colname, targetname, ax=axes[row, 0])
col = 2
for msl in min_samples_leaf_values:
print(
f"---------- min_samples_leaf={msl}, nbins={nbins:.2f} ----------- ")
plot_stratpd(X, y, colname, targetname, ax=axes[row, col],
min_samples_leaf=msl,
yrange=yranges[i],
n_trees=1)
axes[row, col].set_title(
f"leafsz={msl}, nbins={nbins:.2f}",
fontsize=9)
col += 1
row += 1
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True)
rf.fit(X, y)
row = 0
for i, colname in enumerate(colnames):
ice = predict_ice(rf, X, colname, targetname)
plot_ice(ice, colname, targetname, ax=axes[row, 1])
row += 1
def unsup_rent():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
X, y = load_rent(n=10_000)
fig, axes = plt.subplots(4, 2, figsize=(4, 8))
plot_stratpd(X, y, 'bedrooms', 'price', ax=axes[0, 0], yrange=(-500,4000),
slope_line_alpha=.2, supervised=False)
plot_stratpd(X, y, 'bedrooms', 'price', ax=axes[0, 1], yrange=(-500,4000),
slope_line_alpha=.2, supervised=True)
plot_stratpd(X, y, 'bathrooms', 'price', ax=axes[1, 0], yrange=(-500,4000),
slope_line_alpha=.2, supervised=False)
plot_stratpd(X, y, 'bathrooms', 'price', ax=axes[1, 1], yrange=(-500,4000),
slope_line_alpha=.2, supervised=True)
plot_stratpd(X, y, 'latitude', 'price', ax=axes[2, 0], yrange=(-500,2000),
slope_line_alpha=.2, supervised=False, verbose=True)
plot_stratpd(X, y, 'latitude', 'price', ax=axes[2, 1], yrange=(-500,2000),
slope_line_alpha=.2, supervised=True, verbose=True)
plot_stratpd(X, y, 'longitude', 'price', ax=axes[3, 0], yrange=(-500,500),
slope_line_alpha=.2, supervised=False)
plot_stratpd(X, y, 'longitude', 'price', ax=axes[3, 1], yrange=(-500,500),
slope_line_alpha=.2, supervised=True)
axes[0, 0].set_title("Unsupervised")
axes[0, 1].set_title("Supervised")
for i in range(3):
axes[i, 1].get_yaxis().set_visible(False)
savefig(f"rent_unsup")
plt.close()
def weather():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
TUNE_RF = False
df_raw = toy_weather_data()
df = df_raw.copy()
df_string_to_cat(df)
names = np.unique(df['state'])
catnames = OrderedDict()
for i,v in enumerate(names):
catnames[i+1] = v
df_cat_to_catcode(df)
X = df.drop('temperature', axis=1)
y = df['temperature']
# cats = catencoders['state'].values
# cats = np.insert(cats, 0, None) # prepend a None for catcode 0
if TUNE_RF:
rf, bestparams = tune_RF(X, y)
# RF best: {'max_features': 0.9, 'min_samples_leaf': 5, 'n_estimators': 150}
# validation R^2 0.9500072628270099
else:
rf = RandomForestRegressor(n_estimators=150, min_samples_leaf=5, max_features=0.9, oob_score=True)
rf.fit(X, y) # Use full data set for plotting
print("RF OOB R^2", rf.oob_score_)
fig, ax = plt.subplots(1, 1, figsize=figsize)
df = df_raw.copy()
avgtmp = df.groupby(['state', 'dayofyear'])[['temperature']].mean()
avgtmp = avgtmp.reset_index()
ca = avgtmp.query('state=="CA"')
co = avgtmp.query('state=="CO"')
az = avgtmp.query('state=="AZ"')
wa = avgtmp.query('state=="WA"')
nv = avgtmp.query('state=="NV"')
ax.plot(ca['dayofyear'], ca['temperature'], lw=.5, c='#fdae61', label="CA")
ax.plot(co['dayofyear'], co['temperature'], lw=.5, c='#225ea8', label="CO")
ax.plot(az['dayofyear'], az['temperature'], lw=.5, c='#41b6c4', label="AZ")
ax.plot(wa['dayofyear'], wa['temperature'], lw=.5, c='#a1dab4', label="WA")
ax.plot(nv['dayofyear'], nv['temperature'], lw=.5, c='#a1dab4', label="NV")
ax.legend(loc='upper left', borderpad=0, labelspacing=0)
ax.set_xlabel("dayofyear")
ax.set_ylabel("temperature")
ax.set_title("(a) State/day vs temp")
savefig(f"dayofyear_vs_temp")
fig, ax = plt.subplots(1, 1, figsize=figsize)
plot_stratpd(X, y, 'dayofyear', 'temperature', ax=ax,
show_x_counts=False,
yrange=(-10, 10),
pdp_marker_size=2, slope_line_alpha=.5, n_trials=1)
ax.set_title("(b) StratPD")
savefig(f"dayofyear_vs_temp_stratpd")
plt.close()
fig, ax = plt.subplots(1, 1, figsize=figsize)
plot_catstratpd(X, y, 'state', 'temperature', catnames=catnames,
show_x_counts=False,
# min_samples_leaf=30,
min_y_shifted_to_zero=True,
# alpha=.3,
ax=ax,
yrange=(-1, 55))
ax.set_yticks([0,10,20,30,40,50])
ax.set_title("(d) CatStratPD")
savefig(f"state_vs_temp_stratpd")
fig, ax = plt.subplots(1, 1, figsize=figsize)
ice = predict_ice(rf, X, 'dayofyear', 'temperature')
plot_ice(ice, 'dayofyear', 'temperature', ax=ax)
ax.set_title("(c) FPD/ICE")
savefig(f"dayofyear_vs_temp_pdp")
fig, ax = plt.subplots(1, 1, figsize=figsize)
ice = predict_catice(rf, X, 'state', 'temperature')
plot_catice(ice, 'state', 'temperature', catnames=catnames, ax=ax,
pdp_marker_size=15,
min_y_shifted_to_zero = True,
yrange=(-2, 50)
)
ax.set_yticks([0,10,20,30,40,50])
ax.set_title("(b) FPD/ICE")
savefig(f"state_vs_temp_pdp")
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.scatter(X['state'], y, alpha=.05, s=15)
ax.set_xticks(range(1,len(catnames)+1))
ax.set_xticklabels(catnames.values())
ax.set_xlabel("state")
ax.set_ylabel("temperature")
ax.set_title("(a) Marginal")
savefig(f"state_vs_temp")
plt.close()
def meta_weather():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
# np.random.seed(66)
nyears = 5
years = []
for y in range(1980, 1980 + nyears):
df_ = toy_weather_data()
df_['year'] = y
years.append(df_)
df_raw = pd.concat(years, axis=0)
# df_raw.drop('year', axis=1, inplace=True)
df = df_raw.copy()
print(df.head(5))
names = {'CO': 5, 'CA': 10, 'AZ': 15, 'WA': 20}
df['state'] = df['state'].map(names)
catnames = {v:k for k,v in names.items()}
X = df.drop('temperature', axis=1)
y = df['temperature']
plot_catstratpd_gridsearch(X, y, 'state', 'temp',
min_samples_leaf_values=[2, 5, 20, 40, 60],
catnames=catnames,
yrange=(-5,60),
cellwidth=2
)
savefig(f"state_temp_meta")
plot_stratpd_gridsearch(X, y, 'dayofyear', 'temp',
show_slope_lines=True,
min_samples_leaf_values=[2,5,10,20,30],
yrange=(-10,10),
slope_line_alpha=.15)
savefig(f"dayofyear_temp_meta")
def weight():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
X, y, df_raw, eqn = toy_weight_data(2000)
TUNE_RF = False
fig, ax = plt.subplots(1, 1, figsize=figsize)
plot_stratpd(X, y, 'education', 'weight', ax=ax,
show_x_counts=False,
pdp_marker_size=5,
yrange=(-12, 0.05), slope_line_alpha=.1, show_ylabel=True)
# ax.get_yaxis().set_visible(False)
ax.set_title("StratPD", fontsize=10)
ax.set_xlim(10,18)
ax.set_xticks([10,12,14,16,18])
savefig(f"education_vs_weight_stratpd")
fig, ax = plt.subplots(1, 1, figsize=figsize)
plot_stratpd(X, y, 'height', 'weight', ax=ax,
pdp_marker_size=.2,
show_x_counts=False,
yrange=(0, 160), show_ylabel=False)
# ax.get_yaxis().set_visible(False)
ax.set_title("StratPD", fontsize=10)
ax.set_xticks([60,65,70,75])
savefig(f"height_vs_weight_stratpd")
fig, ax = plt.subplots(1, 1, figsize=(1.3,2))
plot_catstratpd(X, y, 'sex', 'weight', ax=ax,
show_x_counts=False,
catnames={0:'M',1:'F'},
yrange=(-1, 35),
)
ax.set_title("CatStratPD", fontsize=10)
savefig(f"sex_vs_weight_stratpd")
fig, ax = plt.subplots(1, 1, figsize=(1.5,1.8))
plot_catstratpd(X, y, 'pregnant', 'weight', ax=ax,
show_x_counts=False,
catnames={0:False, 1:True},
yrange=(-1, 45),
)
ax.set_title("CatStratPD", fontsize=10)
savefig(f"pregnant_vs_weight_stratpd")
if TUNE_RF:
rf, bestparams = tune_RF(X, y)
# RF best: {'max_features': 0.9, 'min_samples_leaf': 1, 'n_estimators': 200}
# validation R^2 0.9996343699640691
else:
rf = RandomForestRegressor(n_estimators=200, min_samples_leaf=1, max_features=0.9, oob_score=True)
rf.fit(X, y) # Use full data set for plotting
print("RF OOB R^2", rf.oob_score_)
# show pregnant female at max range drops going taller
X_test = np.array([[1, 1, 70, 10]])
y_pred = rf.predict(X_test)
print("pregnant female at max range", X_test, "predicts", y_pred)
X_test = np.array([[1, 1, 72, 10]]) # make them taller
y_pred = rf.predict(X_test)
print("pregnant female in male height range", X_test, "predicts", y_pred)
fig, ax = plt.subplots(1, 1, figsize=figsize)
ice = predict_ice(rf, X, 'education', 'weight')
plot_ice(ice, 'education', 'weight', ax=ax, yrange=(-12, 0), min_y_shifted_to_zero=True)
ax.set_xlim(10,18)
ax.set_xticks([10,12,14,16,18])
ax.set_title("FPD/ICE", fontsize=10)
savefig(f"education_vs_weight_pdp")
fig, ax = plt.subplots(1, 1, figsize=(2.4, 2.2))
ice = predict_ice(rf, X, 'height', 'weight')
plot_ice(ice, 'height', 'weight', ax=ax, pdp_linewidth=2, yrange=(100, 250),
min_y_shifted_to_zero=False)
ax.set_xlabel("height\n(a)", fontsize=10)
ax.set_ylabel("weight", fontsize=10)
ax.set_title("FPD/ICE", fontsize=10)
ax.set_xticks([60,65,70,75])
savefig(f"height_vs_weight_pdp")
fig, ax = plt.subplots(1, 1, figsize=(1.3,2))
ice = predict_catice(rf, X, 'sex', 'weight')
plot_catice(ice, 'sex', 'weight', catnames={0:'M',1:'F'}, ax=ax, yrange=(0, 35),
pdp_marker_size=15)
ax.set_title("FPD/ICE", fontsize=10)
savefig(f"sex_vs_weight_pdp")
fig, ax = plt.subplots(1, 1, figsize=(1.3,1.8))
ice = predict_catice(rf, X, 'pregnant', 'weight', cats=df_raw['pregnant'].unique())
plot_catice(ice, 'pregnant', 'weight', catnames={0:'M',1:'F'}, ax=ax,
min_y_shifted_to_zero=True,
yrange=(-5, 45), pdp_marker_size=20)
ax.set_title("FPD/ICE", fontsize=10)
savefig(f"pregnant_vs_weight_pdp")
def shap_pregnant():
np.random.seed(1) # pick seed for reproducible article images
n = 2000
shap_test_size = 300
X, y, df_raw, eqn = toy_weight_data(n=n)
df = df_raw.copy()
df_string_to_cat(df)
df_cat_to_catcode(df)
df['pregnant'] = df['pregnant'].astype(int)
X = df.drop('weight', axis=1)
y = df['weight']
# parameters from tune_RF() called in weight()
rf = RandomForestRegressor(n_estimators=200, min_samples_leaf=1,
max_features=0.9,
oob_score=True)
rf.fit(X, y) # Use full data set for plotting
print("RF OOB R^2", rf.oob_score_)
explainer = shap.TreeExplainer(rf, data=shap.sample(X, 100),
feature_perturbation='interventional')
shap_sample = X.sample(shap_test_size, replace=False)
shap_values = explainer.shap_values(shap_sample, check_additivity=False)
GREY = '#444443'
fig, ax = plt.subplots(1, 1, figsize=(1.3,1.8))
preg_shap_values = shap_values[:, 1]
avg_not_preg_weight = np.mean(preg_shap_values[np.where(shap_sample['pregnant']==0)])
avg_preg_weight = np.mean(preg_shap_values[np.where(shap_sample['pregnant']==1)])
ax.bar([0, 1], [avg_not_preg_weight-avg_not_preg_weight, avg_preg_weight-avg_not_preg_weight],
color='#1E88E5')
ax.set_title("SHAP", fontsize=10)
ax.set_xlabel("pregnant")
ax.set_xticks([0,1])
ax.set_xticklabels(['False','True'])
ax.set_ylabel("weight")
ax.set_ylim(-1,45)
ax.set_yticks([0,10,20,30,40])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
savefig('pregnant_vs_weight_shap')
def shap_weight(feature_perturbation, twin=False):
np.random.seed(1) # pick seed for reproducible article images
n = 2000
shap_test_size = 2000
X, y, df_raw, eqn = toy_weight_data(n=n)
df = df_raw.copy()
df_string_to_cat(df)
df_cat_to_catcode(df)
df['pregnant'] = df['pregnant'].astype(int)
X = df.drop('weight', axis=1)
y = df['weight']
# parameters from tune_RF() called in weight()
rf = RandomForestRegressor(n_estimators=200, min_samples_leaf=1,
max_features=0.9,
oob_score=True)
rf.fit(X, y) # Use full data set for plotting
print("RF OOB R^2", rf.oob_score_)
if feature_perturbation=='interventional':
explainer = shap.TreeExplainer(rf, data=shap.sample(X, 100), feature_perturbation='interventional')
xlabel = "height\n(c)"
ylabel = None
yticks = []
figsize = (2.2, 2.2)
else:
explainer = shap.TreeExplainer(rf, feature_perturbation='tree_path_dependent')
xlabel = "height\n(b)"
ylabel = "SHAP height"
yticks = [-75, -60, -40, -20, 0, 20, 40, 60, 75]
figsize = (2.6, 2.2)
shap_sample = X.sample(shap_test_size, replace=False)
shap_values = explainer.shap_values(shap_sample, check_additivity=False)
df_shap = pd.DataFrame()
df_shap['weight'] = shap_values[:, 2]
df_shap['height'] = shap_sample.iloc[:, 2]
# pdpy = df_shap.groupby('height').mean().reset_index()
# print("len pdpy", len(pdpy))
GREY = '#444443'
fig, ax = plt.subplots(1, 1, figsize=figsize)
shap.dependence_plot("height", shap_values, shap_sample,
interaction_index=None, ax=ax, dot_size=5,
show=False, alpha=1)
# ax.plot(pdpy['height'], pdpy['weight'], '.', c='k', markersize=.5, alpha=.5)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['right'].set_linewidth(.5)
ax.spines['top'].set_linewidth(.5)
ax.set_ylabel(ylabel, fontsize=10, labelpad=0)
ax.set_xlabel(xlabel, fontsize=10)
ax.tick_params(axis='both', which='major', labelsize=10)
ax.plot([70,70], [-75,75], '--', lw=.6, color=GREY)
ax.text(69.8,60, "Max female", horizontalalignment='right',
fontsize=9)
leaf_xranges, leaf_slopes, slope_counts_at_x, dx, slope_at_x, pdpx, pdpy, ignored = \
partial_dependence(X=X, y=y, colname='height')
ax.set_ylim(-77,75)
# ax.set_xlim(min(pdpx), max(pdpx))
ax.set_xticks([60,65,70,75])
ax.set_yticks(yticks)
ax.set_title(f"SHAP {feature_perturbation}", fontsize=10)
# ax.set_ylim(-40,70)
print(min(pdpx), max(pdpx))
print(min(pdpy), max(pdpy))
rise = max(pdpy) - min(pdpy)
run = max(pdpx) - min(pdpx)
slope = rise/run
print(slope)
# ax.plot([min(pdpx),max(pdpyX['height'])], [0,]
if twin:
ax2 = ax.twinx()
# ax2.set_xlim(min(pdpx), max(pdpx))
ax2.set_ylim(min(pdpy)-5, max(pdpy)+5)
ax2.set_xticks([60,65,70,75])
ax2.set_yticks([0,20,40,60,80,100,120,140,150])
# ax2.set_ylabel("weight", fontsize=12)
ax2.plot(pdpx, pdpy, '.', markersize=1, c='k')
# ax2.text(65,25, f"StratPD slope = {slope:.1f}")
ax2.annotate(f"StratPD", (64.65,39), xytext=(66,18),
horizontalalignment='left',
arrowprops=dict(facecolor='black', width=.5, headwidth=5, headlength=5),
fontsize=9)
savefig(f"weight_{feature_perturbation}_shap")
def saledayofweek():
np.random.seed(1) # pick seed for reproducible article images
n = 10_000
shap_test_size = 1000
TUNE_RF = False
X, y = load_bulldozer(n=n)
avgprice = pd.concat([X,y], axis=1).groupby('saledayofweek')[['SalePrice']].mean()
avgprice = avgprice.reset_index()['SalePrice']
print(avgprice)
fig, ax = plt.subplots(1, 1, figsize=figsize2)
ax.scatter(range(0,7), avgprice, s=20, c='k')
ax.scatter(X['saledayofweek'], y, s=3, alpha=.1, c='#1E88E5')
# ax.set_xlim(1960,2010)
ax.set_xlabel("saledayofweek\n(a)", fontsize=11)
ax.set_ylabel("SalePrice ($)", fontsize=11)
ax.set_title("Marginal plot", fontsize=13)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
savefig(f"bulldozer_saledayofweek_marginal")
if TUNE_RF:
rf, _ = tune_RF(X, y)
# RF best: {'max_features': 0.9, 'min_samples_leaf': 1, 'n_estimators': 150}
# validation R^2 0.8001628465688546
else:
rf = RandomForestRegressor(n_estimators=150, n_jobs=-1,
max_features=0.9,
min_samples_leaf=1, oob_score=True)
rf.fit(X, y)
print("RF OOB R^2", rf.oob_score_)
explainer = shap.TreeExplainer(rf, data=shap.sample(X, 100),
feature_perturbation='interventional')
shap_sample = X.sample(shap_test_size, replace=False)
shap_values = explainer.shap_values(shap_sample, check_additivity=False)
fig, ax = plt.subplots(1, 1, figsize=figsize2)
shap.dependence_plot("saledayofweek", shap_values, shap_sample,
interaction_index=None, ax=ax, dot_size=5,
show=False, alpha=.5)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.set_title("SHAP", fontsize=13)
ax.set_ylabel("Impact on SalePrice\n(saledayofweek SHAP)", fontsize=11)
ax.set_xlabel("saledayofweek\n(b)", fontsize=11)
# ax.set_xlim(1960, 2010)
ax.tick_params(axis='both', which='major', labelsize=10)
savefig(f"bulldozer_saledayofweek_shap")
fig, ax = plt.subplots(1, 1, figsize=figsize2)
plot_catstratpd(X, y, colname='saledayofweek', targetname='SalePrice',
catnames={0:'M',1:'T',2:'W',3:'R',4:'F',5:'S',6:'S'},
n_trials=1,
bootstrap=True,
show_x_counts=True,
show_xlabel=False,
show_impact=False,
pdp_marker_size=4,
pdp_marker_alpha=1,
ax=ax
)
ax.set_title("StratPD", fontsize=13)
ax.set_xlabel("saledayofweek\n(d)", fontsize=11)
# ax.set_xlim(1960,2010)
# ax.set_ylim(-10000,30_000)
savefig(f"bulldozer_saledayofweek_stratpd")
fig, ax = plt.subplots(1, 1, figsize=figsize2)
ice = predict_ice(rf, X, "saledayofweek", 'SalePrice', numx=30, nlines=100)
plot_ice(ice, "saledayofweek", 'SalePrice', alpha=.3, ax=ax, show_ylabel=True,
# yrange=(-10000,30_000),
min_y_shifted_to_zero=True)
# ax.set_xlim(1960, 2010)
savefig(f"bulldozer_saledayofweek_pdp")
def productsize():
np.random.seed(1) # pick seed for reproducible article images
shap_test_size = 1000
TUNE_RF = False
# reuse same data generated by gencsv.py for bulldozer to
# make same comparison.
df = pd.read_csv("bulldozer20k.csv")
X = df.drop('SalePrice', axis=1)
y = df['SalePrice']
fig, ax = plt.subplots(1, 1, figsize=figsize2)
ax.scatter(X['ProductSize'], y, s=3, alpha=.1, c='#1E88E5')
# ax.set_xlim(1960,2010)
ax.set_xlabel("ProductSize\n(a)", fontsize=11)
ax.set_ylabel("SalePrice ($)", fontsize=11)
ax.set_title("Marginal plot", fontsize=13)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
savefig(f"bulldozer_ProductSize_marginal")
if TUNE_RF:
rf, _ = tune_RF(X, y)
# RF best: {'max_features': 0.9, 'min_samples_leaf': 1, 'n_estimators': 150}
# validation R^2 0.8001628465688546
else:
rf = RandomForestRegressor(n_estimators=150, n_jobs=-1,
max_features=0.9,
min_samples_leaf=1, oob_score=True)
rf.fit(X, y)
print("RF OOB R^2", rf.oob_score_)
# SHAP
explainer = shap.TreeExplainer(rf, data=shap.sample(X, 100),
feature_perturbation='interventional')
shap_sample = X.sample(shap_test_size, replace=False)
shap_values = explainer.shap_values(shap_sample, check_additivity=False)
fig, ax = plt.subplots(1, 1, figsize=figsize2)
shap.dependence_plot("ProductSize", shap_values, shap_sample,
interaction_index=None, ax=ax, dot_size=5,
show=False, alpha=.5)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.set_title("(b) SHAP", fontsize=13)
ax.set_ylabel("Impact on SalePrice\n(ProductSize SHAP)", fontsize=11)
ax.set_xlabel("ProductSize", fontsize=11)
# ax.set_xlim(1960, 2010)
ax.set_ylim(-15000,40_000)
ax.tick_params(axis='both', which='major', labelsize=10)
savefig(f"bulldozer_ProductSize_shap")
fig, ax = plt.subplots(1, 1, figsize=figsize2)
plot_stratpd(X, y, colname='ProductSize', targetname='SalePrice',
n_trials=10,
bootstrap=True,
show_slope_lines=False,
show_x_counts=False,
show_xlabel=False,
show_impact=False,
show_all_pdp=False,
pdp_marker_size=10,
pdp_marker_alpha=1,
ax=ax
)
ax.set_title("(d) StratPD", fontsize=13)
ax.set_xlabel("ProductSize", fontsize=11)
ax.set_xlim(0, 5)
ax.set_ylim(-15000,40_000)
savefig(f"bulldozer_ProductSize_stratpd")
fig, ax = plt.subplots(1, 1, figsize=figsize2)
ice = predict_ice(rf, X, "ProductSize", 'SalePrice', numx=30, nlines=100)
plot_ice(ice, "ProductSize", 'SalePrice', alpha=.3, ax=ax, show_ylabel=True,
# yrange=(-10000,30_000),
min_y_shifted_to_zero=True)
# ax.set_xlim(1960, 2010)
ax.set_ylim(-15000,40_000)
ax.set_title("(a) FPD/ICE plot", fontsize=13)
savefig(f"bulldozer_ProductSize_pdp")
def saledayofyear():
np.random.seed(1) # pick seed for reproducible article images
n = 10_000
shap_test_size = 1000
TUNE_RF = False
X, y = load_bulldozer(n=n)
fig, ax = plt.subplots(1, 1, figsize=figsize2)
ax.scatter(X['saledayofyear'], y, s=3, alpha=.1, c='#1E88E5')
# ax.set_xlim(1960,2010)
ax.set_xlabel("saledayofyear\n(a)", fontsize=11)
ax.set_ylabel("SalePrice ($)", fontsize=11)
ax.set_title("Marginal plot", fontsize=13)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
savefig(f"bulldozer_saledayofyear_marginal")
if TUNE_RF:
rf, _ = tune_RF(X, y)
# RF best: {'max_features': 0.9, 'min_samples_leaf': 1, 'n_estimators': 150}
# validation R^2 0.8001628465688546
else:
rf = RandomForestRegressor(n_estimators=150, n_jobs=-1,
max_features=0.9,
min_samples_leaf=1, oob_score=True)
rf.fit(X, y)
print("RF OOB R^2", rf.oob_score_)
explainer = shap.TreeExplainer(rf, data=shap.sample(X, 100),
feature_perturbation='interventional')
shap_sample = X.sample(shap_test_size, replace=False)
shap_values = explainer.shap_values(shap_sample, check_additivity=False)
fig, ax = plt.subplots(1, 1, figsize=figsize2)
shap.dependence_plot("saledayofyear", shap_values, shap_sample,
interaction_index=None, ax=ax, dot_size=5,
show=False, alpha=.5)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.set_title("SHAP", fontsize=13)
ax.set_ylabel("Impact on SalePrice\n(saledayofyear SHAP)", fontsize=11)
ax.set_xlabel("saledayofyear\n(b)", fontsize=11)
# ax.set_xlim(1960, 2010)
ax.tick_params(axis='both', which='major', labelsize=10)
savefig(f"bulldozer_saledayofyear_shap")
fig, ax = plt.subplots(1, 1, figsize=figsize2)
plot_stratpd(X, y, colname='saledayofyear', targetname='SalePrice',
n_trials=10,
bootstrap=True,
show_all_pdp=False,
show_slope_lines=False,
show_x_counts=True,
show_xlabel=False,
show_impact=False,
pdp_marker_size=4,
pdp_marker_alpha=1,
ax=ax
)
ax.set_title("StratPD", fontsize=13)
ax.set_xlabel("saledayofyear\n(d)", fontsize=11)
# ax.set_xlim(1960,2010)
# ax.set_ylim(-10000,30_000)
savefig(f"bulldozer_saledayofyear_stratpd")
fig, ax = plt.subplots(1, 1, figsize=figsize2)
ice = predict_ice(rf, X, "saledayofyear", 'SalePrice', numx=30, nlines=100)
plot_ice(ice, "saledayofyear", 'SalePrice', alpha=.3, ax=ax, show_ylabel=True,
# yrange=(-10000,30_000),
min_y_shifted_to_zero=True)
# ax.set_xlim(1960, 2010)
savefig(f"bulldozer_saledayofyear_pdp")
def yearmade():
np.random.seed(1) # pick seed for reproducible article images
n = 20_000
shap_test_size = 1000
TUNE_RF = False
# X, y = load_bulldozer(n=n)
# reuse same data generated by gencsv.py for bulldozer to
# make same comparison.
df = pd.read_csv("bulldozer20k.csv")
X = df.drop('SalePrice', axis=1)
y = df['SalePrice']
if TUNE_RF:
rf, _ = tune_RF(X, y)
# RF best: {'max_features': 0.9, 'min_samples_leaf': 1, 'n_estimators': 150}
# validation R^2 0.8001628465688546
else:
rf = RandomForestRegressor(n_estimators=150, n_jobs=-1,
max_features=0.9,
min_samples_leaf=1, oob_score=True)
rf.fit(X, y)
print("RF OOB R^2", rf.oob_score_)
fig, ax = plt.subplots(1, 1, figsize=figsize2)
ax.scatter(X['YearMade'], y, s=3, alpha=.1, c='#1E88E5')
ax.set_xlim(1960,2010)
ax.set_xlabel("YearMade", fontsize=11)
ax.set_ylabel("SalePrice ($)", fontsize=11)
ax.set_title("(a) Marginal plot", fontsize=13)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
savefig(f"bulldozer_YearMade_marginal")
explainer = shap.TreeExplainer(rf, data=shap.sample(X, 100),
feature_perturbation='interventional')
shap_sample = X.sample(shap_test_size, replace=False)
shap_values = explainer.shap_values(shap_sample, check_additivity=False)
fig, ax = plt.subplots(1, 1, figsize=figsize2)
shap.dependence_plot("YearMade", shap_values, shap_sample,
interaction_index=None, ax=ax, dot_size=5,
show=False, alpha=.5)
ax.yaxis.label.set_visible(False)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.set_title("(b) SHAP", fontsize=13)
ax.set_ylabel("Impact on SalePrice\n(YearMade SHAP)", fontsize=11)
ax.set_xlabel("YearMade", fontsize=11)
ax.set_xlim(1960, 2010)
ax.tick_params(axis='both', which='major', labelsize=10)
savefig(f"bulldozer_YearMade_shap")
fig, ax = plt.subplots(1, 1, figsize=figsize2)
plot_stratpd(X, y, colname='YearMade', targetname='SalePrice',
n_trials=10,
bootstrap=True,
show_slope_lines=False,
show_x_counts=True,
show_ylabel=False,
show_xlabel=False,
show_impact=False,
pdp_marker_size=4,
pdp_marker_alpha=1,
ax=ax
)
ax.set_title("(d) StratPD", fontsize=13)
ax.set_xlabel("YearMade", fontsize=11)
ax.set_xlim(1960,2010)
ax.set_ylim(-5000,30_000)
savefig(f"bulldozer_YearMade_stratpd")
fig, ax = plt.subplots(1, 1, figsize=figsize2)
ice = predict_ice(rf, X, "YearMade", 'SalePrice', numx=30, nlines=100)
plot_ice(ice, "YearMade", 'SalePrice', alpha=.3, ax=ax, show_ylabel=True,
yrange=(20_000,55_000))
ax.set_xlabel("YearMade", fontsize=11)
ax.set_xlim(1960, 2010)
ax.set_title("(a) FPD/ICE plot", fontsize=13)
savefig(f"bulldozer_YearMade_pdp")
def MachineHours():
np.random.seed(1) # pick seed for reproducible article images
shap_test_size = 1000
TUNE_RF = False
# reuse same data generated by gencsv.py for bulldozer to
# make same comparison.
df = pd.read_csv("bulldozer20k.csv")
# DROP RECORDS WITH MISSING MachineHours VALUES
# df = df[df['MachineHours']!=3138]
X = df.drop('SalePrice', axis=1)
y = df['SalePrice']
if TUNE_RF:
rf, _ = tune_RF(X, y)
# RF best: {'max_features': 0.9, 'min_samples_leaf': 1, 'n_estimators': 150}
# validation R^2 0.8001628465688546
else:
rf = RandomForestRegressor(n_estimators=150, n_jobs=-1,
max_features=0.9,
min_samples_leaf=1, oob_score=True)
rf.fit(X, y)
print("RF OOB R^2", rf.oob_score_)
fig, ax = plt.subplots(1, 1, figsize=figsize2)
ax.scatter(X['MachineHours'], y, s=3, alpha=.1, c='#1E88E5')
ax.set_xlim(0,30_000)
ax.set_xlabel("MachineHours\n(a)", fontsize=11)
ax.set_ylabel("SalePrice ($)", fontsize=11)
ax.set_title("Marginal plot", fontsize=13)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
savefig(f"bulldozer_MachineHours_marginal")
# SHAP
explainer = shap.TreeExplainer(rf, data=shap.sample(X, 100),
feature_perturbation='interventional')
shap_sample = X.sample(shap_test_size, replace=False)
shap_values = explainer.shap_values(shap_sample, check_additivity=False)
fig, ax = plt.subplots(1, 1, figsize=figsize2)
shap.dependence_plot("MachineHours", shap_values, shap_sample,
interaction_index=None, ax=ax, dot_size=5,
show=False, alpha=.5)
ax.yaxis.label.set_visible(False)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.set_title("SHAP", fontsize=13)
ax.set_ylabel("SHAP MachineHours)", fontsize=11)
ax.set_xlabel("MachineHours\n(b)", fontsize=11)
ax.set_xlim(0,30_000)
ax.set_ylim(-3000,5000)
ax.tick_params(axis='both', which='major', labelsize=10)
savefig(f"bulldozer_MachineHours_shap")
# STRATPD
fig, ax = plt.subplots(1, 1, figsize=figsize2)
plot_stratpd(X, y, colname='MachineHours', targetname='SalePrice',
n_trials=10,
bootstrap=True,
show_all_pdp=False,
show_slope_lines=False,
show_x_counts=True,
barchar_alpha=1.0,
barchar_color='k',
show_ylabel=False,
show_xlabel=False,
show_impact=False,
pdp_marker_size=1,
pdp_marker_alpha=.3,
ax=ax
)
# ax.annotate("Imputed median value", xytext=(10000,-5300),
# xy=(3138,-5200), fontsize=9,
# arrowprops={'arrowstyle':"->"})
ax.yaxis.label.set_visible(False)
ax.set_title("StratPD", fontsize=13)
ax.set_xlim(0,30_000)
ax.set_xlabel("MachineHours\n(d)", fontsize=11)
ax.set_ylim(-6500,2_000)
savefig(f"bulldozer_MachineHours_stratpd")
fig, ax = plt.subplots(1, 1, figsize=figsize2)
ice = predict_ice(rf, X, "MachineHours", 'SalePrice', numx=300, nlines=200)
plot_ice(ice, "MachineHours", 'SalePrice', alpha=.5, ax=ax,
show_ylabel=True,
yrange=(33_000,38_000)
)
ax.set_xlabel("MachineHours\n(a)", fontsize=11)
ax.set_title("FPD/ICE plot", fontsize=13)
ax.set_xlim(0,30_000)
savefig(f"bulldozer_MachineHours_pdp")
def unsup_yearmade():
np.random.seed(1) # pick seed for reproducible article images
n = 10_000
X, y = load_bulldozer(n=n)
fig, ax = plt.subplots(1, 1, figsize=figsize2)
plot_stratpd(X, y, colname='YearMade', targetname='SalePrice',
n_trials=1,
bootstrap=True,
show_slope_lines=False,
show_x_counts=True,
show_xlabel=False,
show_impact=False,
pdp_marker_size=4,
pdp_marker_alpha=1,
ax=ax,
supervised=False
)
ax.set_title("Unsupervised StratPD", fontsize=13)
ax.set_xlabel("YearMade", fontsize=11)
ax.set_xlim(1960,2010)
ax.set_ylim(-10000,30_000)
savefig(f"bulldozer_YearMade_stratpd_unsup")
def unsup_weight():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
X, y, df_raw, eqn = toy_weight_data(2000)
df = df_raw.copy()
catencoders = df_string_to_cat(df)
df_cat_to_catcode(df)
df['pregnant'] = df['pregnant'].astype(int)
X = df.drop('weight', axis=1)
y = df['weight']
fig, axes = plt.subplots(2, 2, figsize=(4, 4))
plot_stratpd(X, y, 'education', 'weight', ax=axes[0, 0],
show_x_counts=False,
yrange=(-13, 0), slope_line_alpha=.1, supervised=False)
plot_stratpd(X, y, 'education', 'weight', ax=axes[0, 1],
show_x_counts=False,
yrange=(-13, 0), slope_line_alpha=.1, supervised=True)
plot_catstratpd(X, y, 'pregnant', 'weight', ax=axes[1, 0],
show_x_counts=False,
catnames=df_raw['pregnant'].unique(),
yrange=(-5, 45))
plot_catstratpd(X, y, 'pregnant', 'weight', ax=axes[1, 1],
show_x_counts=False,
catnames=df_raw['pregnant'].unique(),
yrange=(-5, 45))
axes[0, 0].set_title("Unsupervised")
axes[0, 1].set_title("Supervised")
axes[0, 1].get_yaxis().set_visible(False)
axes[1, 1].get_yaxis().set_visible(False)
savefig(f"weight_unsup")
plt.close()
def weight_ntrees():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
X, y, df_raw, eqn = toy_weight_data(1000)
df = df_raw.copy()
catencoders = df_string_to_cat(df)
df_cat_to_catcode(df)
df['pregnant'] = df['pregnant'].astype(int)
X = df.drop('weight', axis=1)
y = df['weight']
trees = [1, 5, 10, 30]
fig, axes = plt.subplots(2, 4, figsize=(8, 4))
for i in range(1, 4):
axes[0, i].get_yaxis().set_visible(False)
axes[1, i].get_yaxis().set_visible(False)
for i in range(0, 4):
axes[0, i].set_title(f"{trees[i]} trees")
plot_stratpd(X, y, 'education', 'weight', ax=axes[0, 0],
min_samples_leaf=5,
yrange=(-12, 0), slope_line_alpha=.1, pdp_marker_size=10, show_ylabel=True,
n_trees=1, max_features=1.0, bootstrap=False)
plot_stratpd(X, y, 'education', 'weight', ax=axes[0, 1],
min_samples_leaf=5,
yrange=(-12, 0), slope_line_alpha=.1, pdp_marker_size=10, show_ylabel=False,
n_trees=5, max_features='auto', bootstrap=True)
plot_stratpd(X, y, 'education', 'weight', ax=axes[0, 2],
min_samples_leaf=5,
yrange=(-12, 0), slope_line_alpha=.08, pdp_marker_size=10, show_ylabel=False,
n_trees=10, max_features='auto', bootstrap=True)
plot_stratpd(X, y, 'education', 'weight', ax=axes[0, 3],
min_samples_leaf=5,
yrange=(-12, 0), slope_line_alpha=.05, pdp_marker_size=10, show_ylabel=False,
n_trees=30, max_features='auto', bootstrap=True)
plot_catstratpd(X, y, 'pregnant', 'weight', ax=axes[1, 0],
catnames={0:False, 1:True}, show_ylabel=True,
yrange=(0, 35),
n_trees=1, max_features=1.0, bootstrap=False)
plot_catstratpd(X, y, 'pregnant', 'weight', ax=axes[1, 1],
catnames={0:False, 1:True}, show_ylabel=False,
yrange=(0, 35),
n_trees=5, max_features='auto', bootstrap=True)
plot_catstratpd(X, y, 'pregnant', 'weight', ax=axes[1, 2],
catnames={0:False, 1:True}, show_ylabel=False,
yrange=(0, 35),
n_trees=10, max_features='auto', bootstrap=True)
plot_catstratpd(X, y, 'pregnant', 'weight', ax=axes[1, 3],
catnames={0:False, 1:True}, show_ylabel=False,
yrange=(0, 35),
n_trees=30, max_features='auto', bootstrap=True)
savefig(f"education_pregnant_vs_weight_ntrees")
plt.close()
def meta_weight():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
X, y, df_raw, eqn = toy_weight_data(1000)
df = df_raw.copy()
catencoders = df_string_to_cat(df)
df_cat_to_catcode(df)
df['pregnant'] = df['pregnant'].astype(int)
X = df.drop('weight', axis=1)
y = df['weight']
plot_stratpd_gridsearch(X, y, colname='education', targetname='weight',
show_slope_lines=True,
xrange=(10,18),
yrange=(-12,0))
savefig("education_weight_meta")
plot_stratpd_gridsearch(X, y, colname='height', targetname='weight', yrange=(0,150),
show_slope_lines=True)
savefig("height_weight_meta")
def noisy_poly_data(n, sd=1.0):
x1 = np.random.uniform(-2, 2, size=n)
x2 = np.random.uniform(-2, 2, size=n)
y = x1 ** 2 + x2 + 10 + np.random.normal(0, sd, size=n)
df = pd.DataFrame()
df['x1'] = x1
df['x2'] = x2
df['y'] = y
return df
def noise():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
n = 1000
fig, axes = plt.subplots(1, 4, figsize=(8, 2), sharey=True)
sds = [0,.5,1,2]
for i,sd in enumerate(sds):
df = noisy_poly_data(n=n, sd=sd)
X = df.drop('y', axis=1)
y = df['y']
plot_stratpd(X, y, 'x1', 'y',
show_ylabel=False,
pdp_marker_size=1,
show_x_counts=False,
ax=axes[i], yrange=(-4, .5))
axes[0].set_ylabel("y", fontsize=12)
for i,(ax,which) in enumerate(zip(axes,['(a)','(b)','(c)','(d)'])):
ax.text(0, -1, f"{which}\n$\sigma = {sds[i]}$", horizontalalignment='center')
ax.set_xlabel('$x_1$', fontsize=12)
ax.set_xticks([-2,-1,0,1,2])
savefig(f"noise")
def meta_noise():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
n = 1000
noises = [0, .5, .8, 1.0]
sizes = [2, 10, 30, 50]
fig, axes = plt.subplots(len(noises), len(sizes), figsize=(7, 6), sharey=True,
sharex=True)
row = 0
for sd in noises:
df = noisy_poly_data(n=n, sd=sd)
X = df.drop('y', axis=1)
y = df['y']
col = 0
for s in sizes:
if row == 3:
show_xlabel = True
else:
show_xlabel = False
print(f"------------------- noise {sd}, SIZE {s} --------------------")
if col > 1: axes[row, col].get_yaxis().set_visible(False)
plot_stratpd(X, y, 'x1', 'y', ax=axes[row, col],
show_x_counts=False,
min_samples_leaf=s,
yrange=(-3.5, .5),
pdp_marker_size=1,
show_ylabel=False,
show_xlabel=show_xlabel)
if col == 0:
axes[row, col].set_ylabel(f'$y, \epsilon \sim N(0,{sd:.2f})$')
if row == 0:
axes[row, col].set_title("Min $x_{\\overline{c}}$ leaf " + f"{s}",
fontsize=12)
col += 1
row += 1
lastrow = len(noises)
# row = 0
# for sd in noises:
# axes[row, 0].scatter(X['x1'], y, slope_line_alpha=.12, label=None)
# axes[row, 0].set_xlabel("x1")
# axes[row, 0].set_ylabel("y")
# axes[row, 0].set_ylim(-5, 5)
# axes[row, 0].set_title(f"$y = x_1^2 + x_2 + \epsilon$, $\epsilon \sim N(0,{sd:.2f})$")
# row += 1
# axes[lastrow, 0].set_ylabel(f'$y$ vs $x_c$ partition')
# col = 0
# for s in sizes:
# rtreeviz_univar(axes[lastrow, col],
# X['x2'], y,
# min_samples_leaf=s,
# feature_name='x2',
# target_name='y',
# fontsize=10, show={'splits'},
# split_linewidth=.5,
# markersize=5)
# axes[lastrow, col].set_xlabel("x2")
# col += 1
savefig(f"meta_additivity_noise")
def bigX_data(n):
x1 = np.random.uniform(-1, 1, size=n)
x2 = np.random.uniform(-1, 1, size=n)
x3 = np.random.uniform(-1, 1, size=n)
y = 0.2 * x1 - 5 * x2 + 10 * x2 * np.where(x3 >= 0, 1, 0) + np.random.normal(0, 1,
size=n)
df = pd.DataFrame()
df['x1'] = x1
df['x2'] = x2
df['x3'] = x3
df['y'] = y
return df
def bigX():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
n = 1000
df = bigX_data(n=n)
X = df.drop('y', axis=1)
y = df['y']
# plot_stratpd_gridsearch(X, y, 'x2', 'y',
# min_samples_leaf_values=[2,5,10,20,30],
# # nbins_values=[1,3,5,6,10],
# yrange=(-4,4))
#
# plt.tight_layout()
# plt.show()
# return
# Partial deriv is just 0.2 so this is correct. flat deriv curve, net effect line at slope .2
# ICE is way too shallow and not line at n=1000 even
fig, axes = plt.subplots(2, 2, figsize=(4, 4), sharey=True)
# Partial deriv wrt x2 is -5 plus 10 about half the time so about 0
# Should not expect a criss-cross like ICE since deriv of 1_x3>=0 is 0 everywhere
# wrt to any x, even x3. x2 *is* affecting y BUT the net effect at any spot
# is what we care about and that's 0. Just because marginal x2 vs y shows non-
# random plot doesn't mean that x2's net effect is nonzero. We are trying to
# strip away x1/x3's effect upon y. When we do, x2 has no effect on y.
# Ask what is net effect at every x2? 0.
plot_stratpd(X, y, 'x2', 'y', ax=axes[0, 0], yrange=(-4, 4),
min_samples_leaf=5,
pdp_marker_size=2)
# Partial deriv wrt x3 of 1_x3>=0 is 0 everywhere so result must be 0
plot_stratpd(X, y, 'x3', 'y', ax=axes[1, 0], yrange=(-4, 4),
min_samples_leaf=5,
pdp_marker_size=2)
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True)
rf.fit(X, y)
print(f"RF OOB {rf.oob_score_}")
ice = predict_ice(rf, X, 'x2', 'y', numx=100)
plot_ice(ice, 'x2', 'y', ax=axes[0, 1], yrange=(-4, 4))
ice = predict_ice(rf, X, 'x3', 'y', numx=100)
plot_ice(ice, 'x3', 'y', ax=axes[1, 1], yrange=(-4, 4))
axes[0, 1].get_yaxis().set_visible(False)
axes[1, 1].get_yaxis().set_visible(False)
axes[0, 0].set_title("StratPD", fontsize=10)
axes[0, 1].set_title("FPD/ICE", fontsize=10)
savefig(f"bigx")
plt.close()
def unsup_boston():
np.random.seed(1) # pick seed for reproducible article images
# np.random.seed(42)
print(f"----------- {inspect.stack()[0][3]} -----------")
boston = load_boston()
print(len(boston.data))
df = pd.DataFrame(boston.data, columns=boston.feature_names)
df['MEDV'] = boston.target
X = df.drop('MEDV', axis=1)
y = df['MEDV']
fig, axes = plt.subplots(1, 4, figsize=(9, 2))
axes[0].scatter(df['AGE'], y, s=5, alpha=.7)
axes[0].set_ylabel('MEDV')
axes[0].set_xlabel('AGE')
axes[0].set_title("Marginal")
axes[1].set_title("Unsupervised StratPD")
axes[2].set_title("Supervised StratPD")
axes[3].set_title("FPD/ICE")
plot_stratpd(X, y, 'AGE', 'MEDV', ax=axes[1], yrange=(-20, 20),
n_trees=20,
bootstrap=True,
# min_samples_leaf=10,
max_features='auto',
supervised=False, show_ylabel=False,
verbose=True,
slope_line_alpha=.1)
plot_stratpd(X, y, 'AGE', 'MEDV', ax=axes[2], yrange=(-20, 20),
min_samples_leaf=5,
n_trees=1,
supervised=True, show_ylabel=False)
axes[1].text(5, 15, f"20 trees, bootstrap")
axes[2].text(5, 15, f"1 tree, no bootstrap")
rf = RandomForestRegressor(n_estimators=100, oob_score=True)
rf.fit(X, y)
print(f"RF OOB {rf.oob_score_}")
ice = predict_ice(rf, X, 'AGE', 'MEDV', numx=10)
plot_ice(ice, 'AGE', 'MEDV', ax=axes[3], yrange=(-20, 20), show_ylabel=False)
# axes[0,1].get_yaxis().set_visible(False)
# axes[1,1].get_yaxis().set_visible(False)
savefig(f"boston_unsup")
# plt.tight_layout()
# plt.show()
def lm_plot(X, y, colname, targetname, ax=None):
ax.scatter(X[colname], y, alpha=.12, label=None)
ax.set_xlabel(colname)
ax.set_ylabel(targetname)
col = X[colname]
# y_pred_hp = r_col.predict(col.values.reshape(-1, 1))
# ax.plot(col, y_pred_hp, ":", linewidth=1, c='red', label='y ~ horsepower')
r = LinearRegression()
r.fit(X[['horsepower', 'weight']], y)
xcol = np.linspace(np.min(col), np.max(col), num=100)
ci = 0 if colname == 'horsepower' else 1
# use beta from y ~ hp + weight
# ax.plot(xcol, xcol * r.coef_[ci] + r.intercept_, linewidth=1, c='orange')
# ax.text(min(xcol)*1.02, max(y)*.95, f"$\\beta_{{{colname}}}$={r.coef_[ci]:.3f}")
# r = LinearRegression()
# r.fit(X[['horsepower','weight']], y)
# xcol = np.linspace(np.min(col), np.max(col), num=100)
# ci = X.columns.get_loc(colname)
# # ax.plot(xcol, xcol * r.coef_[ci] + r_col.intercept_, linewidth=1, c='orange', label=f"$\\beta_{{{colname}}}$")
# left40 = xcol[int(len(xcol) * .4)]
# ax.text(min(xcol), max(y)*.94, f"$\hat{{y}} = \\beta_0 + \\beta_1 x_{{horsepower}} + \\beta_2 x_{{weight}}$")
# i = 1 if colname=='horsepower' else 2
# # ax.text(left40, left40*r.coef_[ci] + r_col.intercept_, f"$\\beta_{i}$={r.coef_[ci]:.3f}")
def cars():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
df_cars = | pd.read_csv("../notebooks/data/auto-mpg.csv") | pandas.read_csv |
from pandas import read_csv,pivot_table,Series,to_numeric,concat
import sys
import json
import math
import os
import csv
import numpy as np
save_path = './'
class gen():
def __init__(self,uploads):
self.do(uploads)
def do(self,uploads):
self.Das=datas()
for upload in uploads:
upload.save(save_path,overwrite=True)
Da=data(upload.filename)
self.Das.addda(Da)
self.Das.makemean()
def dcm_gen(self,qlist):
myDCM=DCM(self.Das.meanda.df,qlist)
return myDCM.dcm
def json_gen(self,qlist):
'''prepare dict for return value json'''
ret={}
ret['status']='OK'
ret['files']=self.Das.len
ret['plots']={}
for i in range(self.Das.len):
ret['plots'][self.Das.das[i].name]=json.loads(self.Das.das[i].df.to_json())
if(self.Das.len>1):
ret['plots']['mean']=json.loads(self.Das.meanda.df.to_json())
ret['DCM']=self.dcm_gen(qlist).to_html(float_format=lambda x: "{0:2f}".format(x))
return json.dumps(ret)
#return str(ret)
# except:
# return "nok"
class DCM():
def __init__(self,mean,qlist=None):
self.mean=mean
self.qlist=np.trim_zeros(qlist, 'b') or [0,0.5,0.75,1.,2.,3.,4.,6.,8.,10.,20.,40.,60.,80.,100.,120.]
self.process()
def process(self):
df=self.mean
qlist=self.qlist
df=df.rolling(window=5,center=False).mean()
df.loc[10000]=df.apply(self.ext)
t=[]
t.append(df.apply(self.convert))
df1=concat(t,axis=1)
ext=Series(2*df1.loc[:,250000]-df1.loc[:,300000],index=qlist,name='100000')
df1=concat([ext,df1],axis=1)
df1.loc[df1.index==0]=0
for row in range(len(df1.index)):
for col in range(len(df1.columns)-1)[::-1]:
if df1.iloc[row,col]<df1.iloc[row,col+1]:
df1.iloc[row,col]=df1.iloc[row,col+1]
self.dcm=df1
def ext(self,col):
c=len(col)
last=col.iat[-1]
lastkey=col.index[-1]
mid=math.ceil(c*0.8)
start=col.iat[mid]
startkey=col.index[mid]
g=(last-start)/(lastkey-startkey)
return last+g*(10000-lastkey)
def convert(self,col):
col=col.sort_index(ascending=False)
res=[]
#print(col.name)
for q in self.qlist:
temp=None #store k temperary
ET=None
for k,v in col.iteritems():
#k=float(k)
if q==v:
ET=k
break
elif q<v:
temp=v
key=k
continue
elif temp!=None:
g=(temp-v)/(key-k)
intq=(g*(key)-temp+q)/g
ET=intq
break
res.append(4000 if ET==None else ET)
return Series(res,index=self.qlist,name=col.name)
class datas():
def __init__(self):
self.das=[]
self.len=0
self.meanda=None
def addda(self,da):
self.das.append(da)
self.len += 1
def makemean(self):
if(self.len>1):
meandf=self.das[0].df.copy(deep=True)
for i in range(1,self.len):
meandf=meandf.add(self.das[i].df,fill_value=0)
#meandf.apply(lambda x: x / self.len)
self.meanda=data("mean",meandf / len(self.das))
#此函数接受一个参数并返回轨压列表
def genrailp(count):
#如果一共设定了n次轨压,最高轨压即为n*100bar,其他轨压顺延,最后一个轨压是250bar
r=[i*100000 for i in range(count+1,2,-1)]
r+=[250000]
return r
def railpgen(se,cnt):
railp_=genrailp(cnt)
match=0
for k,v in se.iteritems():
if(v==1):
se[k]=railp_[match]
match+=1
class data():
def __init__(self,filename,df=None):
self.name=filename
if(df is None):
self.df=self.fileTodf(self.name)
os.remove(filename)
else:
self.df=df
def fileTodf(self,filename):
sep=self.getsep(filename)
m= | read_csv(filename,sep=sep,header=None,encoding='latin_1',skiprows=3,skipinitialspace=True) | pandas.read_csv |
import re
import numpy as np
import pytest
from pandas import Categorical, CategoricalIndex, DataFrame, Index, Series
import pandas._testing as tm
from pandas.core.arrays.categorical import recode_for_categories
from pandas.tests.arrays.categorical.common import TestCategorical
class TestCategoricalAPI:
def test_ordered_api(self):
# GH 9347
cat1 = Categorical(list("acb"), ordered=False)
tm.assert_index_equal(cat1.categories, Index(["a", "b", "c"]))
assert not cat1.ordered
cat2 = Categorical(list("acb"), categories=list("bca"), ordered=False)
tm.assert_index_equal(cat2.categories, Index(["b", "c", "a"]))
assert not cat2.ordered
cat3 = Categorical(list("acb"), ordered=True)
tm.assert_index_equal(cat3.categories, Index(["a", "b", "c"]))
assert cat3.ordered
cat4 = Categorical(list("acb"), categories=list("bca"), ordered=True)
tm.assert_index_equal(cat4.categories, Index(["b", "c", "a"]))
assert cat4.ordered
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
assert not cat2.ordered
cat2 = cat.as_ordered()
assert cat2.ordered
cat2.as_unordered(inplace=True)
assert not cat2.ordered
cat2.as_ordered(inplace=True)
assert cat2.ordered
assert cat2.set_ordered(True).ordered
assert not cat2.set_ordered(False).ordered
cat2.set_ordered(True, inplace=True)
assert cat2.ordered
cat2.set_ordered(False, inplace=True)
assert not cat2.ordered
# removed in 0.19.0
msg = "can't set attribute"
with pytest.raises(AttributeError, match=msg):
cat.ordered = True
with pytest.raises(AttributeError, match=msg):
cat.ordered = False
def test_rename_categories(self):
cat = Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
tm.assert_numpy_array_equal(
res.__array__(), np.array([1, 2, 3, 1], dtype=np.int64)
)
tm.assert_index_equal(res.categories, Index([1, 2, 3]))
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
exp_cat = Index(["a", "b", "c"])
tm.assert_index_equal(cat.categories, exp_cat)
# GH18862 (let rename_categories take callables)
result = cat.rename_categories(lambda x: x.upper())
expected = Categorical(["A", "B", "C", "A"])
tm.assert_categorical_equal(result, expected)
# and now inplace
res = cat.rename_categories([1, 2, 3], inplace=True)
assert res is None
tm.assert_numpy_array_equal(
cat.__array__(), np.array([1, 2, 3, 1], dtype=np.int64)
)
tm.assert_index_equal(cat.categories, Index([1, 2, 3]))
@pytest.mark.parametrize("new_categories", [[1, 2, 3, 4], [1, 2]])
def test_rename_categories_wrong_length_raises(self, new_categories):
cat = Categorical(["a", "b", "c", "a"])
msg = (
"new categories need to have the same number of items as the "
"old categories!"
)
with pytest.raises(ValueError, match=msg):
cat.rename_categories(new_categories)
def test_rename_categories_series(self):
# https://github.com/pandas-dev/pandas/issues/17981
c = Categorical(["a", "b"])
result = c.rename_categories(Series([0, 1], index=["a", "b"]))
expected = Categorical([0, 1])
tm.assert_categorical_equal(result, expected)
def test_rename_categories_dict(self):
# GH 17336
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"a": 4, "b": 3, "c": 2, "d": 1})
expected = Index([4, 3, 2, 1])
tm.assert_index_equal(res.categories, expected)
# Test for inplace
res = cat.rename_categories({"a": 4, "b": 3, "c": 2, "d": 1}, inplace=True)
assert res is None
tm.assert_index_equal(cat.categories, expected)
# Test for dicts of smaller length
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"a": 1, "c": 3})
expected = Index([1, "b", 3, "d"])
tm.assert_index_equal(res.categories, expected)
# Test for dicts with bigger length
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6})
expected = Index([1, 2, 3, 4])
tm.assert_index_equal(res.categories, expected)
# Test for dicts with no items from old categories
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"f": 1, "g": 3})
expected = Index(["a", "b", "c", "d"])
tm.assert_index_equal(res.categories, expected)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(
["a", "b", "c", "a"], categories=["c", "b", "a"], ordered=True
)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
tm.assert_categorical_equal(cat, old)
# only res is changed
tm.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
assert res is None
tm.assert_categorical_equal(cat, new)
@pytest.mark.parametrize(
"new_categories",
[
["a"], # not all "old" included in "new"
["a", "b", "d"], # still not all "old" in "new"
["a", "b", "c", "d"], # all "old" included in "new", but too long
],
)
def test_reorder_categories_raises(self, new_categories):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
msg = "items in new_categories are not the same as in old categories"
with pytest.raises(ValueError, match=msg):
cat.reorder_categories(new_categories)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(
["a", "b", "c", "a"], categories=["a", "b", "c", "d"], ordered=True
)
# first inplace == False
res = cat.add_categories("d")
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
tm.assert_categorical_equal(cat, new)
assert res is None
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
tm.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
tm.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
tm.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
tm.assert_categorical_equal(res, expected)
def test_add_categories_existing_raises(self):
# new is in old categories
cat = Categorical(["a", "b", "c", "d"], ordered=True)
msg = re.escape("new categories must not include old categories: {'d'}")
with pytest.raises(ValueError, match=msg):
cat.add_categories(["d"])
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = Index(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)
res = cat.set_categories(["c", "b", "a"], inplace=True)
tm.assert_index_equal(cat.categories, exp_categories)
tm.assert_numpy_array_equal(cat.__array__(), exp_values)
assert res is None
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
tm.assert_index_equal(cat.categories, exp_categories)
tm.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = Index(["a", "b", "c"])
tm.assert_index_equal(res.categories, exp_categories_back)
tm.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
tm.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0], dtype=np.int8))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
tm.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0], dtype=np.int8))
tm.assert_index_equal(res.categories, Index(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = Index(["a", "b", "c", "d"])
tm.assert_index_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0], dtype=np.int8))
tm.assert_index_equal(c.categories, Index([1, 2, 3, 4]))
exp = np.array([1, 2, 3, 4, 1], dtype=np.int64)
tm.assert_numpy_array_equal(np.asarray(c), exp)
# all "pointers" to '4' must be changed from 3 to 0,...
c = c.set_categories([4, 3, 2, 1])
# positions are changed
tm.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3], dtype=np.int8))
# categories are now in new order
tm.assert_index_equal(c.categories, Index([4, 3, 2, 1]))
# output is the same
exp = np.array([1, 2, 3, 4, 1], dtype=np.int64)
tm.assert_numpy_array_equal(np.asarray(c), exp)
assert c.min() == 4
assert c.max() == 1
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
assert not c2.ordered
tm.assert_numpy_array_equal(np.asarray(c), np.asarray(c2))
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
assert not c2.ordered
tm.assert_numpy_array_equal(np.asarray(c), np.asarray(c2))
def test_to_dense_deprecated(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
with tm.assert_produces_warning(FutureWarning):
cat.to_dense()
@pytest.mark.parametrize(
"values, categories, new_categories",
[
# No NaNs, same cats, same order
(["a", "b", "a"], ["a", "b"], ["a", "b"]),
# No NaNs, same cats, different order
(["a", "b", "a"], ["a", "b"], ["b", "a"]),
# Same, unsorted
(["b", "a", "a"], ["a", "b"], ["a", "b"]),
# No NaNs, same cats, different order
(["b", "a", "a"], ["a", "b"], ["b", "a"]),
# NaNs
(["a", "b", "c"], ["a", "b"], ["a", "b"]),
(["a", "b", "c"], ["a", "b"], ["b", "a"]),
(["b", "a", "c"], ["a", "b"], ["a", "b"]),
(["b", "a", "c"], ["a", "b"], ["a", "b"]),
# Introduce NaNs
(["a", "b", "c"], ["a", "b"], ["a"]),
(["a", "b", "c"], ["a", "b"], ["b"]),
(["b", "a", "c"], ["a", "b"], ["a"]),
(["b", "a", "c"], ["a", "b"], ["a"]),
# No overlap
(["a", "b", "c"], ["a", "b"], ["d", "e"]),
],
)
@pytest.mark.parametrize("ordered", [True, False])
def test_set_categories_many(self, values, categories, new_categories, ordered):
c = Categorical(values, categories)
expected = Categorical(values, new_categories, ordered)
result = c.set_categories(new_categories, ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_set_categories_rename_less(self):
# GH 24675
cat = Categorical(["A", "B"])
result = cat.set_categories(["A"], rename=True)
expected = Categorical(["A", np.nan])
tm.assert_categorical_equal(result, expected)
def test_set_categories_private(self):
cat = Categorical(["a", "b", "c"], categories=["a", "b", "c", "d"])
cat._set_categories(["a", "c", "d", "e"])
expected = Categorical(["a", "c", "d"], categories=list("acde"))
tm.assert_categorical_equal(cat, expected)
# fastpath
cat = Categorical(["a", "b", "c"], categories=["a", "b", "c", "d"])
cat._set_categories(["a", "c", "d", "e"], fastpath=True)
expected = Categorical(["a", "c", "d"], categories=list("acde"))
tm.assert_categorical_equal(cat, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"], ordered=True)
# first inplace == False
res = cat.remove_categories("c")
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
tm.assert_categorical_equal(cat, new)
assert res is None
@pytest.mark.parametrize("removals", [["c"], ["c", np.nan], "c", ["c", "c"]])
def test_remove_categories_raises(self, removals):
cat = Categorical(["a", "b", "a"])
message = re.escape("removals must all be in old categories: {'c'}")
with pytest.raises(ValueError, match=message):
cat.remove_categories(removals)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"], categories=["a", "b", "c", "d", "e"])
exp_categories_all = Index(["a", "b", "c", "d", "e"])
exp_categories_dropped = | Index(["a", "b", "c", "d"]) | pandas.Index |
#!/usr/bin/env python
"""Tests for `openml_speed_dating_pipeline_steps` package."""
import unittest
from sklearn import datasets
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
from openml_speed_dating_pipeline_steps import (
openml_speed_dating_pipeline_steps
as pipeline_steps
)
class TestOpenml_speed_dating_pipeline_steps(unittest.TestCase):
"""Tests for `openml_speed_dating_pipeline_steps` package."""
def setUp(self):
"""Set up test fixtures, if any."""
iris = datasets.load_iris()
self.data = pd.DataFrame(data=iris.data, columns=iris.feature_names)
self.range_col = iris.feature_names[0] + 'range'
self.range_orig = iris.feature_names[0]
self.data[self.range_col] = self.data[iris.feature_names[0]].apply(
lambda x: '[{}-{}]'.format(x, x+1)
)
self.numeric_difference = pipeline_steps.NumericDifferenceTransformer()
self.range_transformer = pipeline_steps.RangeTransformer()
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_000_numeric_difference_columns(self):
"""Test that numeric differences returns the
right number of columns."""
assert(len(
self.numeric_difference.fit_transform(self.data).columns
) == 6)
def test_001_numeric_difference_coltypes(self):
transformed = self.numeric_difference.fit_transform(self.data)
for col in transformed.columns:
assert | is_numeric_dtype(transformed[col]) | pandas.api.types.is_numeric_dtype |
import time
import datetime
# import geojson
# import eventlet
import pandas as pd
import geopandas as gpd
from shapely.geometry import LineString
from typing import Optional
from pathlib import Path
from src.python_plots.plot_classes import PyPlot
from multiprocessing import Manager
from abc import ABCMeta, abstractmethod
from src.FleetSimulationBase import build_operator_attribute_dicts
from src.misc.globals import *
PORT = 4200
EPSG_WGS = 4326
""" Eventlet should be put to sleep for around 2s. """ # should be reduced for a smoother visual experience
FRAMERATE_PER_SECOND = 0.5
PYPLOT_FRAMERATE = 30
AVAILABLE_LAYERS = ["vehicles"] # add layers here when they are implemented
def interpolate_coordinates(row, nodes_gdf):
"""This function approximates the coordinates of a vehicle position.
For simplicity and computational effort, it is assumed that street sections are small compared to the earth radius
and linear interpolation has sufficient accuraccy.
:param pos: (o_node, d_node, rel_pos) of an edge | d_node, rel_pos can be None (if vehicle is idle in o_node)
:param nodes_gdf: GeoDataFrame with node geometry in WGS84 coordinates
:return: lon, lat
"""
pos = row["nw_pos"]
p0 = nodes_gdf.loc[pos[0], "geometry"]
p0_lon = p0.x
p0_lat = p0.y
if pos[1] == -1:
return p0_lon, p0_lat
else:
p1 = nodes_gdf.loc[pos[1], "geometry"]
p1_lon = p1.x
p1_lat = p1.y
return p0_lon + (p1_lon - p0_lon)*pos[2], p0_lat + (p1_lat - p0_lat)*pos[2]
#return (p0_lon + p1_lon) / 2, (p0_lat + p1_lat) / 2
def interpolate_coordinates_with_edges(row, nodes_gdf, edges_gdf):
"""This function approximates the coordinates of a vehicle position.
For simplicity and computational effort, it is assumed that street sections are small compared to the earth radius
and linear interpolation has sufficient accuraccy.
This function also interpolates on the edge geometries.
:param pos: (o_node, d_node, rel_pos) of an edge | d_node, rel_pos can be None (if vehicle is idle in o_node)
:param nodes_gdf: GeoDataFrame with node geometry in WGS84 coordinates
:return: lon, lat
"""
pos = row["nw_pos"]
p0 = nodes_gdf.loc[pos[0], "geometry"]
p0_lon = p0.x
p0_lat = p0.y
if pos[1] == -1:
return p0_lon, p0_lat
else:
edge_geometry = edges_gdf.loc[(pos[0], pos[1]), "geometry"]
full_length = edge_geometry.length
next_length = 0
current_index = 0
current_part_len = None
while current_index < len(edge_geometry.coords) - 1:
prev_p = edge_geometry.coords[current_index]
next_p = edge_geometry.coords[current_index]
current_part_len = LineString([prev_p, next_p]).length
next_length += current_part_len
if next_length/full_length > pos[2]:
break
current_index += 1
p0_lat, p0_lon = edge_geometry.coords[current_index]
p1_lat, p1_lon = edge_geometry.coords[current_index + 1]
frac_on_part = 1.0 - ( (next_length - pos[2] * full_length) / current_part_len)
return p0_lon + (p1_lon - p0_lon)*frac_on_part, p1_lat + (p1_lat - p0_lat)*frac_on_part
def point_str_to_pos(p_str):
p_info = p_str.split(";")
return int(p_info[0]), int(p_info[1]), float(p_info[2])
def prep_nw_output(nw_row):
geo = nw_row["geometry"]
return {"type":"Point", "coordinates": [geo.x, geo.y]}
def prep_output(gdf_row):
# TODO # change moving to new status classification ("idle","in-service","charging")
geo = gdf_row["geometry"]
if gdf_row["moving"]:
moving = 1
else:
moving = 0
return {"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [geo.x, geo.y]
},
"properties": {
"id": gdf_row["vid"],
"occupancy": gdf_row["pax"],
"soc": gdf_row["soc"],
"moving": moving
}}
class State:
def __init__(self, vid_str, time, pos, end_time, end_pos, soc, pax, moving, status, trajectory_str = None):
self.vid_str = vid_str
self.time = time
if type(pos) == str:
self.pos = point_str_to_pos(pos)
else:
self.pos = pos
self.soc = soc
self.pax = pax
self.moving = moving
self.status = status
self.end_time = end_time
if type(end_pos) == str:
self.end_pos = point_str_to_pos(end_pos)
else:
self.end_pos = end_pos
self.trajectory = []
if trajectory_str is not None and not pd.isnull(trajectory_str):
for tmp_str in trajectory_str.split(";"):
tmp_str2 = tmp_str.split(":")
self.trajectory.append((int(tmp_str2[0]), float(tmp_str2[1])))
def to_dict(self):
# TODO # make definition of transferred attributes here
return {"vid": self.vid_str, "nw_pos": self.pos, "soc": self.soc, "pax": self.pax, "moving": self.moving,
"status": self.status}
def return_state(self, replay_time):
"""This method allows a standardized way to prepare the output.
:param replay_time: current replay time
:return: state-dict or empty dict
"""
if replay_time > self.end_time:
self.pos = self.end_pos
return None
if not self.moving:
return self.to_dict()
else:
while self.time < replay_time:
if len(self.trajectory) != 0:
if self.trajectory[0][1] <= replay_time:
self.time = self.trajectory[0][1]
self.pos = (self.trajectory[0][0], -1, -1)
self.trajectory = self.trajectory[1:]
continue
else:
target = self.trajectory[0][0]
target_time = self.trajectory[0][1]
if self.pos[2] < 0:
cur_pos = 0.0
else:
cur_pos = self.pos[2]
delta_pos = (1.0 - cur_pos)/(target_time - self.time)*(replay_time - self.time)
self.pos = (self.pos[0], target, cur_pos + delta_pos)
self.time = replay_time
else:
target = self.end_pos[0]
target_time = self.end_time
target_pos = self.end_pos[2]
if target_pos is None:
print("is this possible??")
if self.pos[2] < 0:
cur_pos = 0.0
else:
cur_pos = self.pos[2]
delta_pos = (target_pos - cur_pos)/(target_time - self.time)*(replay_time - self.time)
self.pos = (self.pos[0], target, cur_pos + delta_pos)
self.time = replay_time
return self.to_dict()
# if replay_time == self.time:
# return self.to_dict()
# else:
# return None
class ReplayVehicle:
def __init__(self, op_id, vid, veh_df, start_time, end_time):
self.op_id = op_id
self.vid = vid
self.vid_df = veh_df.reset_index()
self.start_time = start_time
self.end_time = end_time
#
self.active_row = 0
self.init_pax = 0
self.init_pos = self.vid_df.loc[0, G_VR_LEG_START_POS]
try:
self.init_soc = self.vid_df.loc[0, G_VR_LEG_START_SOC]
except:
self.init_soc = 1.0
#
self.last_state = State(str(self), self.start_time, self.init_pos, self.start_time, self.init_pos, self.init_soc, self.init_pax, False, "idle")
def __str__(self):
return f"{self.op_id}-{self.vid}"
def get_veh_state(self, replay_time):
"""This method returns the current vehicle state.
:param replay_time: current simulation replay time
:return: json with state information of this vehicle
"""
# TODO # adopt for smooth soc
same_state = self.last_state.return_state(replay_time)
if same_state is not None:
return same_state
# first check active row if simulation time is still within its boundaries
if replay_time < self.vid_df.loc[self.active_row, G_VR_LEG_START_TIME] or\
replay_time > self.vid_df.loc[self.active_row, G_VR_LEG_END_TIME]:
self.vid_df["started"] = self.vid_df[G_VR_LEG_START_TIME] <= replay_time
self.active_row = self.vid_df["started"].sum() - 1
if self.active_row == -1:
self.active_row = 0
end_time = self.vid_df.loc[self.active_row, G_VR_LEG_START_TIME]
self.last_state = State(str(self), replay_time, self.init_pos, end_time, self.init_pos, self.init_soc, self.init_pax, False, "idle")
else:
pax = self.vid_df.loc[self.active_row, G_VR_NR_PAX]
route_start_time = self.vid_df.loc[self.active_row, G_VR_LEG_START_TIME]
route_end_time = self.vid_df.loc[self.active_row, G_VR_LEG_END_TIME]
# check status
if route_end_time > replay_time:
status = self.vid_df.loc[self.active_row, G_VR_STATUS]
end_pos = self.vid_df.loc[self.active_row, G_VR_LEG_END_POS]
# TODO # change status to "idle", "in-service", "charging" instead of "moving"
if status in ["route", "reposition", "to_charge", "to_depot"]:
moving = True
trajectory_str = self.vid_df.loc[self.active_row, G_VR_REPLAY_ROUTE]
else:
moving = False
trajectory_str = None
# TODO # soc!
start_soc = self.vid_df.loc[self.active_row, G_VR_LEG_START_SOC]
self.last_state = State(str(self), replay_time, self.last_state.pos, route_end_time, end_pos, start_soc, pax, moving, status, trajectory_str=trajectory_str)
else:
status = "idle"
moving = False
end_time = self.end_time
if self.active_row + 1 < self.vid_df.shape[0]:
end_time = self.vid_df.loc[self.active_row + 1, G_VR_LEG_START_TIME]
self.last_state = State(str(self), replay_time, self.last_state.pos, end_time, self.last_state.pos, self.last_state.soc, 0, moving, status)
#
return_state = self.last_state.return_state(replay_time)
return return_state
def get_current_vehicle_trajectory(self, replay_time):
"""This method returns the current vehicle trajectory as geojson collection containing
- polyline with currently assigned route
- stops for requests
:param replay_time: current replay time
:return: geojson feature collection
"""
# TODO # get_current_vehicle_trajectory()
# TODO # use request (rq_time, pu/do) information to decide whether
# - to include a stop to a route or
# - to mark it with 'currently_unknown' flag (next stop)
# - to mark whether it is a drop-off/pick-up/pu&do stop
pass
class Singleton(ABCMeta):
_instance = None
def __call__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instance
# TODO # rename base class (more layers than just vehicles)
class VehicleMovementSimulation(metaclass=Singleton):
"""
Base class for all the simulations. Must be a singleton.
"""
@abstractmethod
def start(self, socket_io):
pass
@abstractmethod
def step(self):
pass
@property
@abstractmethod
def started(self):
pass
class Replay(VehicleMovementSimulation):
def __init__(self):
self._socket_io = None
self._started = False
self._sc_loaded = False
self._inv_frame_rate = 1 / FRAMERATE_PER_SECOND
self._time_step = None
self._paused = False
self._act_layer = "vehicles"
self._layer_changed = False
self._last_veh_state = None
self._last_replay_time = None
self._current_kpis = {}
#
self.sim_start_time = None
self.sim_end_time = None
self.replay_time = None
self.nw_dir = None
self.node_gdf = None
self.edge_gdf = None
self.n_op = None
self.list_op_dicts = None
self.poss_veh_states = []
#
self.steps_per_real_sec = 1
self.replay_vehicles = {} # (op_id, vid) -> ReplayVehicle
self.operator_vehicles = {} # op_id -> list_vehicles
def load_scenario(self, output_dir, start_time_in_seconds = None, end_time_in_seconds = None):
"""This method has to be called to load the scenario data.
:param output_dir: scenario output dir to be processed
:param start_time_in_seconds: determines when replay is started in simulation time (None : starts with simulation start time)
:return: None
"""
print(f"Running replay for simulation {output_dir} ...")
# # connection to server
# # --------------------
# print(f"... connecting to MobiVi server")
# self.s = socket.socket()
# try:
# self.s.connect(("localhost", PORT))
# except:
# raise AssertionError("Please start visualization server by calling 'ng serve' in mobivi-front directory."
# f" Check that the server is running on 'http://localhost:{PORT}/'!")
# general settings
# ----------------
print(f"... loading scenario information")
scenario_parameters, list_operator_attributes, _ = load_scenario_inputs(output_dir)
dir_names = get_directory_dict(scenario_parameters)
replay_mode = scenario_parameters[G_SIM_REPLAY_FLAG]
if not replay_mode:
raise AssertionError("Original simulation was not saved in replay mode!")
if start_time_in_seconds is None:
self.sim_start_time = scenario_parameters[G_SIM_START_TIME]
else:
self.sim_start_time = start_time_in_seconds
self.sim_end_time = scenario_parameters[G_SIM_END_TIME]
if end_time_in_seconds is not None and end_time_in_seconds < self.sim_end_time:
self.sim_end_time = end_time_in_seconds
#
self.replay_time = self.sim_start_time
self._time_step = self.steps_per_real_sec * self._inv_frame_rate
print(f"Time step: {self._time_step}")
# load network, compute border node positions, emit network information
# --------------------------------------------------------------
print(f"... loading network information")
self.nw_dir = dir_names[G_DIR_NETWORK]
nw_base_dir = os.path.join(dir_names[G_DIR_NETWORK], "base")
crs_f = os.path.join(nw_base_dir, "crs.info")
node_all_info_f = os.path.join(nw_base_dir, "nodes_all_infos.geojson")
if os.path.isfile(crs_f):
with open(crs_f) as fh_in:
n_crs = {"init":fh_in.read().strip()}
n_epsg = int(n_crs["init"][5:])
if not os.path.isfile(node_all_info_f):
node_f = os.path.join(nw_base_dir, "nodes.csv")
node_df = pd.read_csv(node_f, index_col=0)
self.node_gdf = gpd.GeoDataFrame(node_df, geometry=gpd.points_from_xy(node_df["pos_x"], node_df["pos_y"]),
crs=n_crs)
else:
self.node_gdf = gpd.read_file(node_all_info_f)
self.node_gdf.crs = n_crs
if n_epsg != EPSG_WGS:
self.node_gdf = self.node_gdf.to_crs({"init":f"epsg:{EPSG_WGS}"})
elif os.path.isfile(node_all_info_f):
self.node_gdf = gpd.read_file(node_all_info_f)
if self.node_gdf.crs != f"epsg:{EPSG_WGS}":
self.node_gdf = self.node_gdf.to_crs({"init":f"epsg:{EPSG_WGS}"})
# check that units are correct
if self.node_gdf["geometry"].x.max() > 180 or self.node_gdf["geometry"].x.max() < -180:
raise AssertionError("GeoJSON format assumes WGS input format!")
if self.node_gdf["geometry"].y.max() > 90 or self.node_gdf["geometry"].y.max() < -90:
raise AssertionError("GeoJSON format assumes WGS input format!")
else:
raise AssertionError(f"Neither {crs_f} or {node_all_info_f} were found! -> Insufficient GIS information.")
# load zone system if available
# -----------------------------
# TODO # get spatial information of zone system if available (and transform to WGS coordinates)
# load vehicle trajectories, prepare replay mode
# ----------------------------------------------
print("... processing vehicle data")
self.n_op = scenario_parameters[G_NR_OPERATORS]
self.list_op_dicts = build_operator_attribute_dicts(scenario_parameters, self.n_op, prefix="op_")
for op_id in range(self.n_op):
fleet_stat_f = os.path.join(output_dir, f"2-{op_id}_op-stats.csv")
fleet_stat_df = | pd.read_csv(fleet_stat_f) | pandas.read_csv |
import os
from datetime import datetime
import pandas as pd
def import_raw(start_date,end_date,data_dir="../../../bleed-orange-measure-purple/data/raw/purpleair/",**kwargs):
"""
Imports the raw data from each device
Inputs:
- start_date: datetime corresponding to the file of interest
- end_date: dateimte corresponding to the file of interest
- data_dir: location of the raw data files
Returns a dataframe with data from all devices
"""
dataset = | pd.DataFrame() | pandas.DataFrame |
import datetime
import numpy as np
import pandas as pd
from src.utils import update_dati,\
convert_date
lista_inquinanti = ['BENZENE', 'CO', 'NO2', 'NOX', 'NO', 'O3', 'PM10', 'PM2.5', 'SO2']
base_url_anno_corrente = 'http://www.arpalazio.net/main/aria/sci/annoincorso/chimici/RM/DatiOrari/RM_'
df_final = update_dati(lista_inquinanti, base_url_anno_corrente)
# Aggiungo una nuova colonna data_ora che normalizza anno, giorno e ora
df_final['data_ora'] = np.vectorize(convert_date)(df_final['Anno'], df_final['Giorno_giuliano'], df_final['Ora'])
# Carico il vecchio tsv
old_df = pd.read_csv('data/air_pollution_pregressa.tsv', sep='\t')
# Faccio la union con il nuovo
df_update = | pd.concat([old_df, df_final]) | pandas.concat |
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
import numpy as np
import pandas as pd
import random
import math
from scipy.spatial.distance import cdist
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from progress.bar import Bar
# read hillslopes computation time
def read_data(path):
df_HS_runtime = pd.read_csv(path)
df_HS_runtime = df_HS_runtime.set_index(['HS_Name'])
return df_HS_runtime
# apply K-means
def apply_kmeans(df_AS, df_Feat_Norm, df_HS_runtime):
df_rmse = | pd.DataFrame() | pandas.DataFrame |
# EIA_CBECS_Land.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
"""
2012 Commercial Buildings Energy Consumption Survey (CBECS)
https://www.eia.gov/consumption/commercial/reports/2012/energyusage/index.php
Last updated: Monday, August 17, 2020
"""
import io
import pandas as pd
import numpy as np
from flowsa.location import US_FIPS, get_region_and_division_codes
from flowsa.common import WITHDRAWN_KEYWORD, \
clean_str_and_capitalize, fba_mapped_default_grouping_fields
from flowsa.settings import vLogDetailed
from flowsa.flowbyfunctions import assign_fips_location_system, aggregator
from flowsa.literature_values import \
get_commercial_and_manufacturing_floorspace_to_land_area_ratio
from flowsa.validation import calculate_flowamount_diff_between_dfs
def eia_cbecs_land_URL_helper(*, build_url, config, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:param config: dictionary, items in FBA method yaml
:return: list, urls to call, concat, parse, format into
Flow-By-Activity format
"""
# initiate url list for coa cropland data
urls = []
# replace "__xlsx_name__" in build_url to create three urls
for x in config['xlsx']:
url = build_url
url = url.replace("__xlsx__", x)
urls.append(url)
return urls
def eia_cbecs_land_call(*, resp, url, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param resp: df, response from url call
:param url: string, url
:return: pandas dataframe of original source data
"""
# Convert response to dataframe
df_raw_data = pd.read_excel(io.BytesIO(resp.content),
sheet_name='data')
df_raw_rse = pd.read_excel(io.BytesIO(resp.content),
sheet_name='rse')
if "b5.xlsx" in url:
# skip rows and remove extra rows at end of dataframe
df_data = pd.DataFrame(df_raw_data.loc[15:32]).reindex()
df_rse = pd.DataFrame(df_raw_rse.loc[15:32]).reindex()
df_data.columns = ["Name", "All buildings", "New England",
"Middle Atlantic", "East North Central",
"West North Central", "South Atlantic",
"East South Central", "West South Central",
"Mountain", "Pacific"]
df_rse.columns = ["Name", "All buildings", "New England",
"Middle Atlantic", "East North Central",
"West North Central", "South Atlantic",
"East South Central", "West South Central",
"Mountain", "Pacific"]
df_rse = df_rse.melt(id_vars=["Name"],
var_name="Location",
value_name="Spread")
df_data = df_data.melt(id_vars=["Name"],
var_name="Location",
value_name="FlowAmount")
if "b12.xlsx" in url:
# skip rows and remove extra rows at end of dataframe
df_data1 = | pd.DataFrame(df_raw_data[4:5]) | pandas.DataFrame |
#!/usr/bin/env python3
"""
Authors: <NAME>, <NAME>
Functionality implemented:
- Generates and aggregates polarities across headlines and conversations
"""
# Libraries and Dependencies
import os
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import pandas as pd
from nltk.stem import WordNetLemmatizer
import tweepy
# Global Variables
sia = SentimentIntensityAnalyzer()
lemmatizer = WordNetLemmatizer()
conversations_map = {}
headlines_map = {}
def update_stock_terminology():
"""
Creates dictionary with updated terminologies for SentimentIntensityAnalyzer. Includes positive and negative words,
along with polarized words with weights. Used to improve VADER accuracy.
"""
stock_lexicon = {}
csv_df = | pd.read_csv('setup_csvs/polarized_stock_lex.csv') | pandas.read_csv |
from context import dero
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas import Timestamp
from numpy import nan
import numpy
class DataFrameTest:
df = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_duplicate_row = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/3/2000', 1.03), #this is a duplicated row
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_weight = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1),
(10516, 'a', '1/4/2000', 1.04, 0),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 1),
(10516, 'b', '1/4/2000', 1.08, 1),
(10517, 'a', '1/1/2000', 1.09, 0),
(10517, 'a', '1/2/2000', 1.1, 0),
(10517, 'a', '1/3/2000', 1.11, 0),
(10517, 'a', '1/4/2000', 1.12, 1),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight'])
df_nan_byvar = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', 3),
('b', 4),
], columns = ['byvar', 'val'])
df_nan_byvar_and_val = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', nan),
('b', 4),
], columns = ['byvar', 'val'])
single_ticker_df = pd.DataFrame(data = [
('a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['byvar', 'Date', 'TICKER'])
df_datetime = df.copy()
df_datetime['Date'] = pd.to_datetime(df_datetime['Date'])
df_datetime_no_ret = df_datetime.copy()
df_datetime_no_ret.drop('RET', axis=1, inplace=True)
df_gvkey_str = pd.DataFrame([
('001076','3/1/1995'),
('001076','4/1/1995'),
('001722','1/1/2012'),
('001722','7/1/2012'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str['Date'] = pd.to_datetime(df_gvkey_str['Date'])
df_gvkey_num = df_gvkey_str.copy()
df_gvkey_num['GVKEY'] = df_gvkey_num['GVKEY'].astype('float64')
df_gvkey_str2 = pd.DataFrame([
('001076','2/1/1995'),
('001076','3/2/1995'),
('001722','11/1/2011'),
('001722','10/1/2011'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str2['Date'] = pd.to_datetime(df_gvkey_str2['Date'])
df_fill_data = pd.DataFrame(
data=[
(4, 'c', nan, 'a'),
(1, 'd', 3, 'a'),
(10, 'e', 100, 'a'),
(2, nan, 6, 'b'),
(5, 'f', 8, 'b'),
(11, 'g', 150, 'b'),
],
columns=['y', 'x1', 'x2', 'group']
)
class TestCumulate(DataFrameTest):
expect_between_1_3 = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.1, 1.1),
(10517, 'a', '1/3/2000', 1.11, 1.2210000000000003),
(10517, 'a', '1/4/2000', 1.12, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'cum_RET'])
expect_first = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.092624),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.224936),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.10, 1.10),
(10517, 'a', '1/3/2000', 1.11, 1.221),
(10517, 'a', '1/4/2000', 1.12, 1.36752),
], columns = ['PERMNO','byvar','Date', 'RET', 'cum_RET'])
def test_method_between_1_3(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[1,3])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_between_m2_0(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
#Actually same result as [1,3]
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_shifted_index(self):
df = self.df.copy()
df.index = df.index + 10
cum_df = dero.pandas.cumulate(df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_first(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'])
assert_frame_equal(self.expect_first, cum_df, check_dtype=False)
def test_grossify(self):
df = self.df.copy() #don't overwrite original
df['RET'] -= 1 #ungrossify
expect_first_grossify = self.expect_first.copy()
expect_first_grossify['cum_RET'] -= 1
expect_first_grossify['RET'] -= 1
cum_df = dero.pandas.cumulate(df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'], grossify=True)
assert_frame_equal(expect_first_grossify, cum_df, check_dtype=False)
class TestGroupbyMerge(DataFrameTest):
def test_subset_max(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'max', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 1.04),
(10516, 'a', '1/2/2000', 1.02, 1.04),
(10516, 'a', '1/3/2000', 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.08),
(10516, 'b', '1/2/2000', 1.06, 1.08),
(10516, 'b', '1/3/2000', 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.12),
(10517, 'a', '1/2/2000', 1.10, 1.12),
(10517, 'a', '1/3/2000', 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.12, 1.12)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_max'])
assert_frame_equal(expect_df, out)
def test_subset_std(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'std', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 0.012909944487358068),
(10516, 'a', '1/2/2000', 1.02, 0.012909944487358068),
(10516, 'a', '1/3/2000', 1.03, 0.012909944487358068),
(10516, 'a', '1/4/2000', 1.04, 0.012909944487358068),
(10516, 'b', '1/1/2000', 1.05, 0.012909944487358068),
(10516, 'b', '1/2/2000', 1.06, 0.012909944487358068),
(10516, 'b', '1/3/2000', 1.07, 0.012909944487358068),
(10516, 'b', '1/4/2000', 1.08, 0.012909944487358068),
(10517, 'a', '1/1/2000', 1.09, 0.012909944487358068),
(10517, 'a', '1/2/2000', 1.10, 0.012909944487358068),
(10517, 'a', '1/3/2000', 1.11, 0.012909944487358068),
(10517, 'a', '1/4/2000', 1.12, 0.012909944487358068)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_std'])
assert_frame_equal(expect_df, out)
def test_nan_byvar_transform(self):
expect_df = self.df_nan_byvar.copy()
expect_df['val_transform'] = expect_df['val']
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'transform', (lambda x: x))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_transform_numeric(self):
non_standard_index = self.df_nan_byvar_and_val.copy()
non_standard_index.index = [5,6,7,8]
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
expect_df.index = [5,6,7,8]
out = dero.pandas.groupby_merge(non_standard_index, 'byvar', 'transform', (lambda x: x + 1))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_and_nonstandard_index_transform_numeric(self):
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
def test_nan_byvar_sum(self):
expect_df = pd.DataFrame(data = [
('a', 1, 1.0),
(nan, 2, nan),
('b', 3, 7.0),
('b', 4, 7.0),
], columns = ['byvar', 'val', 'val_sum'])
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'sum')
assert_frame_equal(expect_df, out)
class TestLongToWide:
expect_df_with_colindex = pd.DataFrame(data = [
(10516, 'a', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar',
'RET1/1/2000', 'RET1/2/2000',
'RET1/3/2000', 'RET1/4/2000'])
expect_df_no_colindex = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/2/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/3/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/2/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/3/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/2/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/3/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET0',
'RET1', 'RET2', 'RET3'])
input_data = DataFrameTest()
ltw_no_dup_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_dup_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_no_dup_no_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET')
ltw_dup_no_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET')
df_list = [ltw_no_dup_colindex, ltw_dup_colindex,
ltw_no_dup_no_colindex, ltw_dup_no_colindex]
def test_no_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_no_dup_colindex)
def test_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_dup_colindex)
def test_no_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_no_dup_no_colindex)
def test_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_dup_no_colindex)
def test_no_extra_vars(self):
for df in self.df_list:
assert ('__idx__','__key__') not in df.columns
class TestPortfolioAverages:
input_data = DataFrameTest()
expect_avgs_no_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001),
(1, 'b', 1.0550000000000002),
(2, 'a', 1.1050000000000002),
(2, 'b', 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET'])
expect_avgs_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001, 1.025),
(1, 'b', 1.0550000000000002, 1.0550000000000002),
(2, 'a', 1.1050000000000002, 1.12),
(2, 'b', 1.0750000000000002, 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET', 'RET_wavg'])
expect_ports = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0, 1),
(10516, 'a', '1/2/2000', 1.02, 1, 1),
(10516, 'a', '1/3/2000', 1.03, 1, 1),
(10516, 'a', '1/4/2000', 1.04, 0, 1),
(10516, 'b', '1/1/2000', 1.05, 1, 1),
(10516, 'b', '1/2/2000', 1.06, 1, 1),
(10516, 'b', '1/3/2000', 1.07, 1, 2),
(10516, 'b', '1/4/2000', 1.08, 1, 2),
(10517, 'a', '1/1/2000', 1.09, 0, 2),
(10517, 'a', '1/2/2000', 1.1, 0, 2),
(10517, 'a', '1/3/2000', 1.11, 0, 2),
(10517, 'a', '1/4/2000', 1.12, 1, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight', 'portfolio'])
avgs, ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar')
w_avgs, w_ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar', wtvar='weight')
def test_simple_averages(self):
assert_frame_equal(self.expect_avgs_no_wt, self.avgs, check_dtype=False)
def test_weighted_averages(self):
assert_frame_equal(self.expect_avgs_wt, self.w_avgs, check_dtype=False)
def test_portfolio_construction(self):
print(self.ports)
assert_frame_equal(self.expect_ports, self.ports, check_dtype=False)
assert_frame_equal(self.expect_ports, self.w_ports, check_dtype=False)
class TestWinsorize(DataFrameTest):
def test_winsor_40_subset_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.022624),
(10516, 'a', '1/2/2000', 1.022624),
(10516, 'a', '1/3/2000', 1.02672),
(10516, 'a', '1/4/2000', 1.02672),
(10516, 'b', '1/1/2000', 1.062624),
(10516, 'b', '1/2/2000', 1.062624),
(10516, 'b', '1/3/2000', 1.06672),
(10516, 'b', '1/4/2000', 1.06672),
(10517, 'a', '1/1/2000', 1.102624),
(10517, 'a', '1/2/2000', 1.102624),
(10517, 'a', '1/3/2000', 1.10672),
(10517, 'a', '1/4/2000', 1.10672),
], columns = ['PERMNO', 'byvar', 'Date', 'RET'])
wins = dero.pandas.winsorize(self.df, .4, subset='RET', byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, wins, check_less_precise=True)
class TestRegBy(DataFrameTest):
def create_indf(self):
indf = self.df_weight.copy()
indf['key'] = indf['PERMNO'].astype(str) + '_' + indf['byvar']
return indf
def test_regby_nocons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.48774684748988806, '10516_a'),
(0.9388636664168903, '10516_b'),
(0.22929206076239614, '10517_a'),
], columns = ['coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key', cons=False)
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(-32.89999999999997, 29.999999999999982, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons_low_obs(self):
indf = self.create_indf().loc[:8,:] #makes it so that one byvar only has one obs
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(nan, nan, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
class TestExpandMonths(DataFrameTest):
def test_expand_months_tradedays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-19 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-20 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-21 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-24 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-25 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-26 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-27 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-28 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-31 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['Daily Date', 'byvar', 'Date', 'TICKER'])
em = dero.pandas.expand_months(self.single_ticker_df)
assert_frame_equal(expect_df.sort_index(axis=1), em.sort_index(axis=1))
def test_expand_months_calendardays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-01 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-02 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-08 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-09 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-15 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-16 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-17 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-19 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-20 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-21 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-22 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-23 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-24 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
( | Timestamp('2000-01-25 00:00:00') | pandas.Timestamp |
#
# Collective Knowledge ()
#
#
#
#
# Developer:
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
import os
import sys
import time
import pandas as pd
import numpy as np
#default_repo_uoa = ''
#default_repo_uoa = 'local'
default_repo_uoa = 'ck-quantum-hackathon-20190315'
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# get raw data for repo-widget
def get_raw_data(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
selected_repo_uoa = i.get('repo_uoa', default_repo_uoa)
def get_experimental_results(repo_uoa=selected_repo_uoa, tags='qck,hackathon-20190315', module_uoa='experiment'):
r = ck.access({'action':'search', 'repo_uoa':repo_uoa, 'module_uoa':module_uoa, 'tags':tags})
if r['return']>0:
print('Error: %s' % r['error'])
exit(1)
experiments = r['lst']
index = [
'team', 'problem_index'
]
dfs = []
for experiment in experiments:
data_uoa = experiment['data_uoa']
r = ck.access({'action':'list_points', 'repo_uoa':repo_uoa, 'module_uoa':module_uoa, 'data_uoa':data_uoa})
if r['return']>0:
print('Error: %s' % r['error'])
exit(1)
# Get all the parameters from meta.json -> "meta"
mmeta = r['dict']['meta']
team = mmeta.get('team', 'UNKNOWN_TEAM')
experiment_entry_path = r['path']
entry_modification_epoch_secs = int( os.path.getmtime(experiment_entry_path) )
entry_modification_utc_human = time.asctime(time.gmtime( entry_modification_epoch_secs ))
point_ids = r['points']
for point_id in point_ids:
load_point_adict = { 'action': 'load_point',
'module_uoa': module_uoa,
'data_uoa': data_uoa,
'point': point_id,
}
r=ck.access( load_point_adict )
if r['return']>0: return r
if i.get('out')=='con':
ck.out( "Loading {}:experiment:{} point_id={} (recorded {})".format(repo_uoa, data_uoa, point_id, entry_modification_utc_human) )
point_data_raw = r['dict']['0001']
choices = point_data_raw['choices']
characteristics_list = point_data_raw['characteristics_list']
num_repetitions = len(characteristics_list)
data = [
{
# statistical repetition
'repetition_id': repetition_id,
# runtime characteristics
'problem_name': characteristics['run'].get('problem_name','problem_x'),
'problem_index': characteristics['run'].get('problem_index',-1),
'training_accuracy': np.float64(characteristics['run'].get('training_accuracy',1e6)),
'training_time': np.float64(characteristics['run'].get('training_time',0.0)),
'training_vectors_limit': np.int64(characteristics['run'].get('training_vectors_limit') or -1),
'solution_function_name': characteristics['run'].get('solution_function_name',''),
'source_code': characteristics['run'].get('source_code',''),
'circuit_str': characteristics['run'].get('circuit_str',''),
'test_accuracy': np.float64(characteristics['run'].get('test_accuracy',0.0)),
'team': team,
'timestamp_epoch_secs': entry_modification_epoch_secs,
'timestamp_utc_human': entry_modification_utc_human,
'success?': characteristics['run'].get('run_success','N/A'),
}
for (repetition_id, characteristics) in zip(range(num_repetitions), characteristics_list)
if len(characteristics['run']) > 0
]
# Construct a DataFrame.
df = pd.DataFrame(data)
df = df.set_index(index, drop=False)
# Append to the list of similarly constructed DataFrames.
dfs.append(df)
if dfs:
# Concatenate all thus constructed DataFrames (i.e. stack on top of each other).
result = | pd.concat(dfs) | pandas.concat |
import pandas as pd
import toml
from bs4 import BeautifulSoup
import requests
# from rich import print_json
from pathlib import Path, PosixPath
from openpyxl import load_workbook
import xlrd
import zipfile
import itertools
import os
from loguru import logger
OTHER_VARS_TO_STORE = ["long_name", "code", "short_name", "measure"]
# Read local `config.toml` file.
config = toml.load("config.toml")
# print_json(data=config)
def get_sheetnames_xlsx(filepath: PosixPath):
wb = load_workbook(filepath, read_only=True, keep_links=False)
return wb.sheetnames
def get_sheetnames_xls(filepath: PosixPath):
xls = xlrd.open_workbook(filepath, on_demand=True)
return xls.sheet_names()
def remove_bad_sheets(series: pd.Series):
return series.apply(lambda x: [el for el in x if "triangle" in el.lower()])
def find_files(url: str):
soup = BeautifulSoup(requests.get(url).text, features="html5lib")
hrefs = [a["href"] for a in soup.find_all("a")]
hrefs = [a for a in hrefs if len(a.split(".")) > 1]
hrefs = [
a
for a in hrefs
if (
a.split(".")[1] == "xlsx"
or a.split(".")[1] == "xls"
or a.split(".")[1] == "zip"
or a.split(".")[1] == "xlsm"
)
]
return hrefs
def download_zip_file(file_url: str, in_file_name: str, short_name: str, code: str):
"""Downloads a zip file from given url.
:param file_url: url
:type file_url: str
:param in_file_name: zip file to download
:type in_file_name: str
:return: Name of the file actually downloaded
:rtype: str
"""
_ = download_and_save_file(file_url, in_file_name)
names_to_keep = ["quarterly", "m on m", "1 month", code]
file_location = Path("scratch") / in_file_name
zip_object = zipfile.ZipFile(file_location)
# Work around introduced because looking for code picks up some cases twice (eg IOS is in both 3M on 3M on M on M)
names = [name for name in zip_object.namelist() if "3m on 3m" not in name.lower()]
files_to_extract = [[x for x in names if y in x.lower()] for y in names_to_keep]
files_to_extract = list(set(itertools.chain(*files_to_extract)))
# This picks out production or manufacturing which are combined, for some reason,
# in the Index of Production zip file
if len(files_to_extract) > 1:
files_to_extract = [x for x in files_to_extract if short_name in x.lower()]
for file in files_to_extract:
zip_object.extract(file, path=Path("scratch"))
assert len(files_to_extract) == 1
# Tidy up by removing the zip
os.remove(file_location)
return files_to_extract[0]
def download_and_save_file(file_url: str, file_name: str):
# Download the file from the top of the list
file_location = Path("scratch") / file_name
if file_location.is_file():
logger.info(f"Skipping download of {file_name}; file already exists")
else:
r = requests.get("https://www.ons.gov.uk" + file_url, stream=True)
with open(Path("scratch") / file_name, "wb") as f:
f.write(r.content)
logger.info(f"Success: file download of {file_name} complete")
return file_name
def convert_yyyy_qn_to_datetime(series: pd.Series):
return (
pd.to_datetime(series.apply(lambda x: x[:4] + "-" + str(int(x[-1]) * 3)))
+ pd.offsets.QuarterEnd()
)
def find_vintage_from_pub_datetime(df_in: pd.DataFrame):
offsets = {
"1st": pd.offsets.MonthEnd(),
"M2": pd.offsets.MonthEnd(2), # 2nd estimate (month 2)
"QNA": pd.offsets.QuarterEnd(),
"M3": pd.offsets.MonthEnd(3),
}
df_in["vintage"] = df_in.apply(
lambda x: offsets[x["estimate"]] + x["pub_datetime"], axis=1
)
return df_in
def combined_df_urls(config):
df_urls = pd.DataFrame()
frequencies = ["Q", "M"]
for freq in frequencies:
df_urls = pd.concat(
[df_urls, populate_dataframe_of_data_urls(config, freq)], axis=0
)
for key, value in config[freq][0].items():
df_urls[key] = ""
for freq in frequencies:
for key, value in config[freq][0].items():
if key != "urls":
for inner_key, inner_val in value.items():
df_urls.loc[inner_key, key] = inner_val
return df_urls
def populate_dataframe_of_data_urls(config, freq):
dict_of_urls = config[freq][0]["urls"]
dict_of_files = {k: find_files(v) for k, v in dict_of_urls.items()}
# restrict to only first file found on each page
for key, value in dict_of_files.items():
dict_of_files[key] = value[0]
# turn this into a dataframe
df_urls = pd.DataFrame(dict_of_files, index=["url"]).T
df_urls["file_name"] = df_urls["url"].apply(lambda x: x.split("/")[-1])
df_urls[["url", "file_name"]].set_index("url").to_dict()
df_urls["freq"] = freq
df_urls["extension"] = df_urls["file_name"].str.split(".").str[1]
return df_urls
def download_all_files(df_urls):
df_urls["dl_filename"] = ""
# Download non-zips
query = df_urls["extension"] != "zip"
df_urls.loc[query, "dl_filename"] = df_urls.loc[query, :].apply(
lambda x: download_and_save_file(x["url"], x["file_name"]), axis=1
)
# Download zips
df_urls.loc[~query, "dl_filename"] = df_urls.loc[~query, :].apply(
lambda x: download_zip_file(x["url"], x["file_name"], x["short_name"], x["code"]), axis=1
)
df_urls["dl_fn_extension"] = df_urls["dl_filename"].str.split(".").str[1]
return df_urls
def nominate_sheets_from_ss(df_urls):
# Add sheet names
df_urls["sheet_names"] = "None"
df_urls.loc[df_urls["dl_fn_extension"] == "xlsx", "sheet_names"] = df_urls.loc[
df_urls["dl_fn_extension"] == "xlsx", :
].apply(lambda x: get_sheetnames_xlsx(Path("scratch") / x["dl_filename"]), axis=1)
if "xlsm" in df_urls["dl_fn_extension"].unique():
df_urls.loc[df_urls["dl_fn_extension"] == "xlsm", "sheet_names"] = df_urls.loc[
df_urls["dl_fn_extension"] == "xlsm", :
].apply(
lambda x: get_sheetnames_xlsx(Path("scratch") / x["dl_filename"]), axis=1
)
if "xls" in df_urls["dl_fn_extension"].unique():
df_urls.loc[df_urls["dl_fn_extension"] == "xls", "sheet_names"] = df_urls.loc[
df_urls["dl_fn_extension"] == "xls", :
].apply(
lambda x: get_sheetnames_xls(Path("scratch") / x["dl_filename"]), axis=1
)
df_urls["sheet_names"] = remove_bad_sheets(df_urls["sheet_names"])
# stick only to the first sheet
df_urls["sheet_names"] = df_urls["sheet_names"].apply(lambda x: x[0])
return df_urls
def enforce_types(df):
# Ensure the correct types are enforced
type_dict = {
"long_name": "category",
"code": "category",
"short_name": "category",
"measure": "category",
}
for key, value in type_dict.items():
df[key] = df[key].astype(value)
return df
def process_triangle_file(df_urls_row):
logger.info(f"Processing {df_urls_row.name}")
file_name, sheet_name = df_urls_row["dl_filename"], df_urls_row["sheet_names"]
df = pd.read_excel(Path("scratch") / file_name, sheet_name=sheet_name)
# Remove all the of the guff
search_text = "Relating to Period"
alt_search_text = search_text + " (three months ending)"
alt_alt_search_text = "Relating to period"
df = df.dropna(how="all", axis=1).dropna(how="all", axis=0)
# work around for variations on 'relating to period'
dates_row = (
df[(df == search_text) | (df == alt_search_text) | (df == alt_alt_search_text)]
.dropna(how="all", axis=1)
.dropna(how="all", axis=0)
.index.values
)
df = df.rename(columns=dict(zip(df.columns, df.loc[dates_row, :].values[0])))
# remove any lingering first cols
if search_text in list(df.columns):
srch_txt_ix = list(df.columns).index(search_text)
elif alt_search_text in list(df.columns):
srch_txt_ix = list(df.columns).index(alt_search_text)
df = df.rename(columns={df.columns[srch_txt_ix]: search_text})
elif alt_alt_search_text in list(df.columns):
srch_txt_ix = list(df.columns).index(alt_alt_search_text)
df = df.rename(columns={df.columns[srch_txt_ix]: search_text})
else:
raise ValueError("None of the names associated with dates can be found in the spreadsheet")
if srch_txt_ix != 0:
df = df[df.columns[srch_txt_ix:]].copy()
format_datetime = "%Y-%m-%d"
if(any([x in df_urls_row["code"] for x in ["abjr", "npqt", "ihyq"]])):
format_datetime = "%b-%y"
df[df.columns[0]] = pd.to_datetime(df[df.columns[0]], errors="coerce", format=format_datetime)
first_datetime_row = (
pd.to_datetime(df[df.columns[0]], errors="coerce", format=format_datetime).dropna().index.min()
)
df = df.loc[first_datetime_row:, :]
# fill in the "latest estimate" entry with a datetime
df = df[~pd.isna(df[search_text])].copy()
time_series_down = pd.to_datetime(df[search_text], errors="coerce")
time_series_down.iloc[-1] = time_series_down.iloc[-2] + pd.DateOffset(months=3)
df[search_text] = time_series_down
df = pd.melt(df, id_vars=search_text, var_name="datetime")
df = df.rename(columns={search_text: "vintage"})
df["value"] = pd.to_numeric(df["value"], errors="coerce")
if "Q" in str(df["datetime"].iloc[0]):
df["datetime"] = convert_yyyy_qn_to_datetime(df["datetime"].str.strip())
df = df.dropna(subset=["value"])
for var in OTHER_VARS_TO_STORE:
df[var] = df_urls_row[var]
return enforce_types(df)
def get_ons_series(dataset, code):
url = f"https://api.ons.gov.uk/timeseries/{code}/dataset/{dataset}/data"
# Get the data from the ONS API:
json_data = requests.get(url).json()
# Prep the data for a quick plot
title = json_data["description"]["title"]
df = (
pd.DataFrame(pd.json_normalize(json_data["months"]))
.assign(
date=lambda x: pd.to_datetime(x["date"]),
value=lambda x: | pd.to_numeric(x["value"]) | pandas.to_numeric |
# -*- coding: utf-8 -*-
import datetime as dt, IPython, pandas as pd, pyarrow as pa, pytest, requests, unittest
from builtins import object
from common import NoAuthTestCase
import graphistry
from mock import patch
triangleEdges = pd.DataFrame({'src': ['a', 'b', 'c'], 'dst': ['b', 'c', 'a']})
triangleNodes = pd.DataFrame({'id': ['a', 'b', 'c'], 'a1': [1, 2, 3], 'a2': ['red', 'blue', 'green']})
triangleNodesRich = pd.DataFrame({
'id': ['a', 'b', 'c'],
'a1': [1, 2, 3],
'a2': ['red', 'blue', 'green'],
'a3': [True, False, False],
'a4': [0.5, 1.5, 1000.3],
'a5': [dt.datetime.fromtimestamp(x) for x in [1440643875, 1440644191, 1440645638]],
'a6': [u'æski ēˈmōjē', u'😋', 's']
})
squareEvil = pd.DataFrame({
'src': [0,1,2,3],
'dst': [1,2,3,0],
'colors': [1, 1, 2, 2],
'list_int': [ [1], [2, 3], [4], []],
'list_str': [ ['x'], ['1', '2'], ['y'], []],
'list_bool': [ [True], [True, False], [False], []],
'list_date_str': [ ['2018-01-01 00:00:00'], ['2018-01-02 00:00:00', '2018-01-03 00:00:00'], ['2018-01-05 00:00:00'], []],
'list_date': [ [pd.Timestamp('2018-01-05')], [pd.Timestamp('2018-01-05'), pd.Timestamp('2018-01-05')], [], []],
'list_mixed': [ [1], ['1', '2'], [False, None], []],
'bool': [True, False, True, True],
'char': ['a', 'b', 'c', 'd'],
'str': ['a', 'b', 'c', 'd'],
'ustr': [u'a', u'b', u'c', u'd'],
'emoji': ['😋', '😋😋', '😋', '😋'],
'int': [0, 1, 2, 3],
'num': [0.5, 1.5, 2.5, 3.5],
'date_str': ['2018-01-01 00:00:00', '2018-01-02 00:00:00', '2018-01-03 00:00:00', '2018-01-05 00:00:00'],
## API 1 BUG: Try with https://github.com/graphistry/pygraphistry/pull/126
'date': [dt.datetime(2018, 1, 1), dt.datetime(2018, 1, 1), dt.datetime(2018, 1, 1), dt.datetime(2018, 1, 1)],
'time': [pd.Timestamp('2018-01-05'), pd.Timestamp('2018-01-05'), pd.Timestamp('2018-01-05'), pd.Timestamp('2018-01-05')],
## API 2 BUG: Need timedelta in https://github.com/graphistry/pygraphistry/blob/master/graphistry/vgraph.py#L108
'delta': [pd.Timedelta('1 day'), pd.Timedelta('1 day'), pd.Timedelta('1 day'), pd.Timedelta('1 day')]
})
for c in squareEvil.columns:
try:
squareEvil[c + '_cat'] = squareEvil[c].astype('category')
except:
# lists aren't categorical
#print('could not make categorical', c)
1
class Fake_Response(object):
def raise_for_status(self):
pass
def json(self):
return {'success': True, 'dataset': 'fakedatasetname', 'viztoken': '<PASSWORD>'}
def assertFrameEqual(df1, df2, **kwds ):
""" Assert that two dataframes are equal, ignoring ordering of columns"""
from pandas.util.testing import assert_frame_equal
return assert_frame_equal(df1.sort_index(axis=1), df2.sort_index(axis=1), check_names=True, **kwds)
@patch('webbrowser.open')
@patch.object(graphistry.util, 'warn')
@patch.object(graphistry.pygraphistry.PyGraphistry, '_etl1')
class TestPlotterBindings_API_1(NoAuthTestCase):
@classmethod
def setUpClass(cls):
graphistry.pygraphistry.PyGraphistry._is_authenticated = True
graphistry.register(api=1)
def test_no_src_dst(self, mock_etl, mock_warn, mock_open):
with self.assertRaises(ValueError):
graphistry.bind().plot(triangleEdges)
with self.assertRaises(ValueError):
graphistry.bind(source='src').plot(triangleEdges)
with self.assertRaises(ValueError):
graphistry.bind(destination='dst').plot(triangleEdges)
with self.assertRaises(ValueError):
graphistry.bind(source='doesnotexist', destination='dst').plot(triangleEdges)
def test_no_nodeid(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst')
with self.assertRaises(ValueError):
plotter.plot(triangleEdges, triangleNodes)
def test_triangle_edges(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst')
plotter.plot(triangleEdges)
self.assertTrue(mock_etl.called)
self.assertFalse(mock_warn.called)
def test_bind_edges(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst', edge_title='src')
plotter.plot(triangleEdges)
self.assertTrue(mock_etl.called)
self.assertFalse(mock_warn.called)
def test_bind_nodes(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst', node='id', point_title='a2')
plotter.plot(triangleEdges, triangleNodes)
self.assertTrue(mock_etl.called)
self.assertFalse(mock_warn.called)
def test_bind_nodes_rich(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst', node='id', point_title='a2')
plotter.plot(triangleEdges, triangleNodesRich)
self.assertTrue(mock_etl.called)
self.assertFalse(mock_warn.called)
def test_bind_edges_rich_2(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst')
plotter.plot(squareEvil)
self.assertTrue(mock_etl.called)
self.assertFalse(mock_warn.called)
def test_unknown_col_edges(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst', edge_title='doesnotexist')
plotter.plot(triangleEdges)
self.assertTrue(mock_etl.called)
self.assertTrue(mock_warn.called)
def test_unknown_col_nodes(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst', node='id', point_title='doesnotexist')
plotter.plot(triangleEdges, triangleNodes)
self.assertTrue(mock_etl.called)
self.assertTrue(mock_warn.called)
@patch.object(graphistry.util, 'error')
def test_empty_graph(self, mock_error, mock_etl, mock_warn, mock_open):
mock_error.side_effect = ValueError('error')
plotter = graphistry.bind(source='src', destination='dst')
with self.assertRaises(ValueError):
plotter.plot( | pd.DataFrame([]) | pandas.DataFrame |
import logging, matplotlib, os, sys, glob
import scanpy as sc
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib import colors
import pandas as pd
from glbase3 import genelist
plt.rcParams['figure.figsize']=(8,8)
sc.settings.verbosity = 3
sc.set_figure_params(dpi=200, dpi_save=200)
matplotlib.rcParams['pdf.fonttype']=42
matplotlib.rcParams['font.size']=10
from glbase3 import genelist, glload
sc.settings.figdir = 'diffexp'
[os.remove(f) for f in glob.glob('{}/*.pdf'.format(sc.settings.figdir))]
[os.remove(f) for f in glob.glob('gls/*.glb'.format(sc.settings.figdir))]
[os.remove(f) for f in glob.glob('gls/*.tsv'.format(sc.settings.figdir))]
transcript_id = glload('../../transcript_assembly/packed/all_genes.glb')
transcript_id = {i['transcript_id']: i for i in transcript_id}
de_leiden = 'de_clusters' # If you merge clusters;
#de_leiden = 'leiden_r1.00'
adata = sc.read('./de.h5ad')
sc.pl.rank_genes_groups(adata, n_genes=25, sharey=True, show=False, save='genes-top25.pdf')
sc.pl.rank_genes_groups(adata, key='rank_genes_groups', show=False, save='genes.pdf')
sc.pl.rank_genes_groups_dotplot(adata, key='rank_genes_groups', show=False, save='genes-top25.pdf')
topall = | pd.DataFrame(adata.uns['rank_genes_groups']['names']) | pandas.DataFrame |
import copy
import logging
import math
import random
import warnings
from typing import List, Tuple
import numpy as np
import pandas as pd
import torch
logger = logging.getLogger()
EXPL_LENGTHS_TRAIN_20 = [(1, 208),
(2, 299),
(3, 354),
(4, 375),
(5, 298),
(6, 298),
(7, 224),
(8, 160),
(9, 140),
(10, 99),
(11, 61),
(12, 56),
(13, 45),
(14, 22),
(15, 21),
(16, 15),
(17, 13),
(18, 9),
(19, 5),
(20, 3),
(21, 9),
(22, 1)]
FREQS = np.array([c for i, c in EXPL_LENGTHS_TRAIN_20])
EXPL_LENGTH_FREQS = FREQS / sum(FREQS)
def gold_facts_in_n_closest_all(dataset, nearest_k):
results = {}
for i, row in dataset.qa_feats.iterrows():
gold_to_find = set(copy.deepcopy(row.gold_facts))
visible_facts = set(row.closest[:nearest_k])
lens_visible = []
gold_found = set()
while len(gold_to_find) > 0:
lens_visible.append(len(visible_facts))
found = set([fact for fact in gold_to_find if fact in visible_facts])
if len(found) == 0:
break
gold_found = gold_found.union(found)
gold_to_find -= found
for fact in found:
visible_from_fact = set(dataset.fact_feats.iloc[fact].closest[:nearest_k])
visible_facts = visible_facts.union(visible_from_fact)
results[i] = {
'all': set(copy.deepcopy(row.gold_facts)),
'found': gold_found,
'not_found': gold_to_find,
'mean_len_visible': np.mean(lens_visible)
}
return results
def gold_facts_in_n_closest_cur(dataset, nearest_k):
results = {}
for i, row in dataset.qa_feats.iterrows():
gold_to_find = set(copy.deepcopy(row.gold_facts))
visible_facts = set(row.closest[:nearest_k])
lens_visible = []
gold_found = set()
while len(gold_to_find) > 0:
lens_visible.append(len(visible_facts))
facts = [fact for fact in gold_to_find if fact in visible_facts]
if len(facts) == 0:
break
selected = facts[0]
gold_found.add(selected)
gold_to_find -= {selected}
visible_facts = dataset.fact_feats.iloc[selected].closest[:nearest_k]
results[i] = {
'all': set(copy.deepcopy(row.gold_facts)),
'found': gold_found,
'not_found': gold_to_find,
'mean_len_visible': np.mean(lens_visible)
}
return results
def find_nearest_k_for_rate(dataset, target_rate, func=gold_facts_in_n_closest_all, start_at=0):
k = start_at
results = func(dataset, k)
nb_all = sum([len(res['all']) for res in results.values()])
while True:
nb_found = sum([len(res['found']) for res in results.values()])
mean_len_visible = np.mean([res['mean_len_visible'] for res in results.values()])
rate = nb_found / nb_all
if rate > target_rate:
break
k += 10
print('Trying k = %s, rate was %s' % (k, rate))
results = func(dataset, k)
return k, rate, mean_len_visible
def nCr(n, r):
f = math.factorial
return f(n) // f(r) // f(n - r)
def nb_combinations(dataset):
def q_nb_combinations(nb_facts):
return sum([dataset.nCr(nb_facts, i) for i in range(1, nb_facts)])
return sum([q_nb_combinations(len(gf)) for gf in dataset.qa_feats.gold_facts])
def nb_samples(dataset):
combs = [(i, sum([dataset.nCr(i, j) for j in range(0, i + 1)]))
for i in range(1, 23)]
lens = [(i, len([row for _, row in dataset.qa_feats.iterrows() if len(row.gold_facts) == i]))
for i in range(1, 23)]
tot = [(combs[i][0], combs[i][1] * lens[i][1]) for i in range(22)]
cum_tot = np.cumsum([count for _, count in tot])
real_counts = [(i + 1, c + sum(combs[i][1] * lens[j][1] for j in range(i + 1, 22)))
for i, c in enumerate(cum_tot)]
return combs, lens, tot, real_counts
def max_length_of_explanation(dataset):
"""
make sure that this fits in language model (max seq length - max_position_embeddings)
>> 19: (344, 91.55, 21, 734)
"""
max_length = 0
lengths = []
for i, row in dataset.qa_feats.iterrows():
qa_tok = row.tokenized
facts = list(dataset.fact_feats.iloc[list(row.gold_facts)].tokenized)
encoded = qa_tok + [t for fact in facts for t in fact]
length = len(encoded)
if length > max_length:
max_length = length
lengths.append(length)
longest_qa = sorted(list(dataset.qa_feats.tokenized),
key=lambda x: len(x), reverse=True)[0]
max_nb_facts = max([len(gf) for gf in dataset.qa_feats.gold_facts])
longest_facts = sorted(list(dataset.fact_feats.tokenized),
key=lambda x: len(x), reverse=True)[:max_nb_facts]
flattened_longest_facts = [t for fact in longest_facts for t in fact]
return (max_length, sum(lengths) / len(lengths), max_nb_facts,
len(longest_qa) + len(flattened_longest_facts))
POINTWISE_LOSSES = ['xent', 'mse', 'xent-2']
BATCHWISE_LOSSES = ['fisher']
NOISE_CONTRASTIVE_LOSSES: List[str] = ['nce', 'ranking-nce', 'binary-nce']
CONTRASTIVE_LOSSES = ['ranknet', 'lambdaloss', 'margin-pairs'] + NOISE_CONTRASTIVE_LOSSES
def should_recompute_lengths(args, dataset, train, valid, infer):
return (
(train and (not hasattr(dataset, 'max_length_in_batches_single_q')
and not hasattr(dataset, 'max_length_in_batches')))
or args.nearest_k_visible != dataset.nearest_k_visible
or (train and (dataset.batch_size != args.train_batch_size or
dataset.tokens_per_batch != args.train_tokens_per_batch))
or ((valid or infer) and (dataset.batch_size != args.eval_batch_size or
dataset.tokens_per_batch != args.eval_tokens_per_batch))
)
def read_explanations(path: str) -> List[Tuple[str, str]]:
header = []
uid = None
df = pd.read_csv(path, sep='\t', dtype=str)
for name in df.columns:
if name.startswith('[SKIP]'):
if 'UID' in name and not uid:
uid = name
else:
header.append(name)
if not uid or len(df) == 0:
warnings.warn('Possibly misformatted file: ' + path)
return []
return df.apply(lambda r: (r[uid], ' '.join(str(s) for s in list(r[header]) if not pd.isna(s))), 1).tolist()
def read_explanations_wo_fill(path: str) -> List[Tuple[str, str]]:
header = []
uid = None
df = | pd.read_csv(path, sep='\t', dtype=str) | pandas.read_csv |
import os
import time
import json
import numpy as np
import pandas as pd
import torch
from hydroDL import kPath
from hydroDL.app import waterQuality
from hydroDL.model import rnn, crit
caseName = 'refBasins'
ratioTrain = 0.8
rho = 365
batchSize = 100
nEpoch = 100
hiddenSize = 64
modelFolder = os.path.join(kPath.dirWQ, 'modelA', caseName)
if not os.path.exists(modelFolder):
os.mkdir(modelFolder)
# predict - point-by-point
modelFile = os.path.join(modelFolder, 'modelSeq_Ep' + str(nEpoch) + '.pt')
model = torch.load(modelFile)
nt = dictData['rho']
nd, ny = y.shape
batchSize = 1000
iS = np.arange(0, nd, batchSize)
iE = np.append(iS[1:], nd)
yOutLst = list()
xNorm = (xNorm - np.tile(statDict['xMean'], [nt, nd, 1])) / np.tile(
statDict['xStd'], [nt, nd, 1])
cNorm = (c - np.tile(statDict['cMean'], [nd, 1])) / np.tile(
statDict['cStd'], [nd, 1])
for k in range(len(iS)):
print('batch: '+str(k))
xT = torch.from_numpy(np.concatenate(
[xNorm[:, iS[k]:iE[k], :], np.tile(cNorm[iS[k]:iE[k], :], [nt, 1, 1])], axis=-1)).float()
if torch.cuda.is_available():
xT = xT.cuda()
model = model.cuda()
yT = model(xT)[-1, :, :]
yOutLst.append(yT.detach().cpu().numpy())
yOut = np.concatenate(yOutLst, axis=0)
yOut = yOut * np.tile(statDict['yStd'], [nd, 1]) +\
np.tile(statDict['yMean'], [nd, 1])
# save output
dfOut = info
dfOut['train'] = np.nan
dfOut['train'][indTrain] = 1
dfOut['train'][indTest] = 0
varC = dictData['varC']
targetFile = os.path.join(modelFolder, 'target.csv')
if not os.path.exists(targetFile):
targetDf = pd.merge(dfOut, | pd.DataFrame(data=y, columns=varC) | pandas.DataFrame |
#!/usr/bin/env python
"""
Compute CCF of a list of observed spectra with a weighted binary mask.
"""
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import textwrap
import ipdb
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from raccoon import ccf as ccflib
from raccoon import carmenesutils
from raccoon import expresutils
from raccoon import peakutils
from raccoon import plotutils
from raccoon import pyutils
from raccoon import spectrographutils
from raccoon import spectrumutils
from raccoon import telluricutils
# Plots
mpl.rcdefaults()
plotutils.mpl_custom_basic()
plotutils.mpl_size_same(font_size=18)
# Constants
C_MS = 2.99792458*1.e8 # Light speed [m/s]
C_KMS = 2.99792458*1.e5 # Light speed [km/s]
###############################################################################
def parse_args():
parser = argparse.ArgumentParser(
description=textwrap.dedent('''
`ccf_compute.py`
Compute CCF of a list of observed spectra with a weighted binary mask.
Arguments
---------
'''),
epilog=textwrap.dedent('''
'''),
formatter_class=pyutils.CustomFormatter)
# Spectra
parser.add_argument('fil_or_list_spec', help='File with the names of the reduced FITS spectra or directly the file names (names must include the absolute path to the files). The file with the list cannot end in `.fits`.', nargs='+', type=str)
parser.add_argument('inst', choices=['HARPS', 'HARPN', 'CARM_VIS', 'CARM_NIR', 'EXPRES'], help='Instrument.')
parser.add_argument('--filobs2blaze', help='List of blaze file corresponding to each observation. Format: Column 1) filspec (e2ds), Column 2) filblaze. Full paths. For HARPS/N data. Needed if do not want to use the default from the header. If None, get file names from each observation header.', type=str, default=None)
# parser.add_argument('--dirblaze', help='Directory containing blaze files. For HARPS/N data.', type=str, default=None)
parser.add_argument('--expresw', choices=['wavelength', 'bary_wavelength', 'excalibur', 'bary_excalibur'], help='EXPRES wavelength.', default='bary_excalibur')
# Mask
parser.add_argument('filmask', help='Path to custom mask file (file with extension `.mas`), or mask ID to use one of the default masks, or (CARM GTO) path to "mask selection file" (file with any other extension that specifies the masks available to choose from). Mask file format: Columns: 0) w (wavelengths), 1) f (weights), separated by whitespaces. Mask-selection file format: Columns: 0) object used to make the mask `objmask`, 1) spectral type of `objmask`, 2) `vsini` of `objmask`, 3) path to mask file (`.mas` extension). There can only be one mask file for each combination of spt-vsini. TODO: Only spt is used to select the mask (not the vsini).', type=str)
parser.add_argument('--maskformatharp', help='If mask format is w1, w2, f and wavelengths are in air -> it is transformed into w, f and vacuum.', action='store_true')
parser.add_argument('--maskair', help='If mask wavelengths in air, tranform to vacuum. Not needed if `maskformatharp` is True.', action='store_true')
parser.add_argument('--objmask', help='Overwrites values from `filmask`.', type=str, default=None)
parser.add_argument('--sptmask', help='Overwrites values from `filmask`.', type=str, default=None)
parser.add_argument('--vsinimask', help='Overwrites values from `filmask`.', type=float, default=None)
# parser.add_argument('--filmaskabserr')
# Target info
parser.add_argument('--obj', help='CARMENES ID.', type=str)
parser.add_argument('--targ', help='Target name (SIMBAD).', type=str)
parser.add_argument('--spt', help='Target spectral type. Choices: A) Spectral type with the format `M3.5` (letter and number with 1 decimal), B) `carmencita` to look for the spectral type in the database. Used to select a mask if none is specified. If input not valid, use the default value.', type=str, default='M3.5')
parser.add_argument('--vsini', help='Target projected rotational velocity [km/s]. Choices: A) float in km/s, e.g. `2.5`, B) `carmencita` to look for the vsini in the database. Used to estimate the CCF RV range. If input not valid or None (default), compute the test CCF and get its width.', type=str, default=None)
parser.add_argument('--rvabs', help='Absolute RV of the star [km/s]. Used to estimate the centre of the CCF RV array and remove tellurics. If None (default), compute the test CCF and get the RVabs from its minimum. Choices: A) float, B) `carmencita`', type=str, default=None)
parser.add_argument('--bervmax', help='Maximum BERV to consider when removing mask lines not always visible. Options: A) `obsall` (default): get maximum absolute BERV of all observations available, B) float [m/s]', type=str, default='obsall')
# CCF computation
parser.add_argument('--ords_use', nargs='+', help='Sectral orders to consider for the CCF (the orders not listed here will not be used). The orders are counted from 0 (bluest order) to N-1 (reddest order), where N is the number of orders in the template file - so these are not the real orders. Orders in instruments: CARM_VIS: 0-61, CARM_NIR:0-27 (use half order), HARPS: 0-71, HARPN: 0-71. If None (default), all orders are used.', default=None)
parser.add_argument('--pmin', help='Minimum pixel of each order to use. If None (default) all the pixels are used. Pixels: CARMENES 0-4095, HARP 0-4095, EXPRES 0-7919.', type=int, default=None)
parser.add_argument('--pmax', help='Maximum pixel of each order to use. If None (default) all the pixels are used.', type=int, default=None)
parser.add_argument('--wrange', nargs='+', help='Wavelength range to use (2 values), e.g. `--wrange 6000 6500`, [A]. Overwrites ORDS_USE. If None (default), use range defined by orders in ORDS_USE.', type=float, default=None)
parser.add_argument('--nlinmin', help="Minimum number of usable mask lines per order. Orders with less lines won't be used to compute the CCF.", type=int, default=0)
# Observations
parser.add_argument('--obsservalrvc', help='Compute CCFs only of observations in rvc serval files. Must provide SERVAL data.', action='store_true')
# Extra data
parser.add_argument('--dirserval', help='Directory containing SERVAL outputs for the observations to analyse. Used to get RV corrections and precise BJD.', default=None)
# Precise BJD
parser.add_argument('--bjd', help='BJD source. If `serval`, must provide `dirserval`', choices=['header', 'serval'], default='header')
# RV corrections
parser.add_argument('--rvshift', help='Correct spectra RV for BERV, secular acceleration, instrumental drift or other drifts. Options: A) `header`: get corrections (BERV and drift) from FITS header. Secular acceleration and other drift is 0. If nan, use 0. B) `serval`: get corrections (BERV, drift and sa) from SERVAL outputs of the input observations. Must provide SERVAL output directory with `dirserval` (must have run SERVAL previously). C) `serval_header`: get corrections from SERVAL outputs, and if not or nan, get them from header. D) `pathtofile`: File containing the corrections for each obs. Columns: 0) observation name, rest of columns: BERV, drift, sa, other... with header indicating which column is each correction. Can only have a single column with all the corrections already put together. E) `none` (default) then no correction is applied. All in [km/s]', type=str, default=None)
# Flux correction
parser.add_argument('--fcorrorders', help='Correct order flux so that all observations have the same SED', choices=['obshighsnr'], default=None)
# Telluric mask
parser.add_argument('--filtell', help='File containing a telluric mask, or `default` to use the default file. If None, no tellurics are removed', type=str, default=None)
parser.add_argument('--tellbroadendv', help='Velocity by which to broaden the telluric lines to be removed. Options: A) `obsall` (default): get maximum absolute BERV of all observations available, B) float [m/s].', type=str, default='obsall') # nargs='+',
# Extra data for CARMENES GTO
parser.add_argument('--dircarmencita', help='Absolute path.', type=str, default=None)
parser.add_argument('--carmencitaversion', help='', default=None)
# CCF test
parser.add_argument('--ccftestrvcen', help='Central velocity of the RV array of the test CCF [km/s].', type=float, default=0.)
parser.add_argument('--ccftestrvrng', help='Half of the velocity range of the RV array of the test CCF [km/s].', type=float, default=200.)
parser.add_argument('--ccftestrvstp', help='Step of the RV array of the test CCF [km/s].', type=float, default=1.)
parser.add_argument('--ccftesto', help='Order to use to compute the CCF test. If None (default) a specific order depending on the instrument is used: CARM_VIS 36, CARM_NIR X, HARPS and HARPN X.', type=int, default=None)
parser.add_argument('--ccftestdmin', help='', type=float, default=2.)
# CCF
parser.add_argument('--rvcen', help='Central velocity of the RV array of the definitive CCF [km/s] (i.e. absolute RV of the target). If None (default), a test CCF is computed over a broad RV range to find the minimum.', type=float, default=None)
parser.add_argument('--rvrng', help='Half of the velocity range of the RV array of the definitive CCF [km/s]. If None (default), a test CCF is computed and the range is taken from the test CCF width.', type=float, default=None)
parser.add_argument('--rvstp', help='Step of the RV array of the definitive CCF [km/s]. This should be smaller than the "real" step RVSTPREAL in order to properly compute the bisector.', type=float, default=0.25)
parser.add_argument('--rvstpreal', help='Step of the RV array according to the instrument resolution. Needed in order to correctly compute the parameter errors. If None, the default values are used. Better not change this.', type=float, default=None)
# CCF fit
parser.add_argument('--fitfunc', help='', type=str, default='gaussian')
parser.add_argument('--fitrng', help='Range of CCF used to fit the function `fitfunc`. Options: A) float indicating half of the fit range from the CCF minimum [km/s] (e.g. [rvmin - rng, rvmin + rng], B) `maxabs` fit the region between the 2 absolute maxima (default), C) `maxcl` fit the region between the 2 maxima closest to the CCF, D) `all` fit all the CCF range.', type=str, default='maxabs')
parser.add_argument('--fitrngeach', help='By default (i.e. if this option is not present), if `fitrng` is `maxabs` or `maxcl`, define fit range for all observation using the first one. If `fitrngeach` is present, the maxima are selected for each observation, which may slightly change the fit.', action='store_true')
# Bisector
parser.add_argument('--bisectorn', help='', type=int, default=100)
parser.add_argument('--bisbotmin', help='BIS (bisector inverse slope) bottom region minimum [percent]', type=float, default=10.)
parser.add_argument('--bisbotmax', help='BIS bottom region maximum [percent]', type=float, default=40.)
parser.add_argument('--bistopmin', help='BIS top region minimum [percent]', type=float, default=60.)
parser.add_argument('--bistopmax', help='BIS top region maximum [percent]', type=float, default=90.)
# Output
parser.add_argument('--dirout', help='Output directory.', default='./ccf_output/', type=str)
parser.add_argument('--plot_sv', help='Make and save plots.', action='store_true')
parser.add_argument('--plot_sh', help='Show all plots.', action='store_true')
parser.add_argument('--plot_spec', action='store_true')
# parser.add_argument('--plot_ccfproc', help='CCF process', action='store_true')
parser.add_argument('--plottest_sh', help='Show test plots to check progress.', action='store_true')
parser.add_argument('--plottest_sv', help='Save test plots to check progress.', action='store_true')
parser.add_argument('--plot_ext', nargs='+', help='Extensions of the plots to be saved (e.g. `--plot_ext pdf png`)', default=['pdf'])
parser.add_argument('--verbose', help='', action='store_true')
parser.add_argument('--testnobs', help='Testing. Number of observations (>0).', type=int, default=0)
args = parser.parse_args()
return args
def main():
args = parse_args()
# Verbosity
verboseprint = print if args.verbose else lambda *a, **k: None
verboseprint('\n')
verboseprint('#'*40)
verboseprint('\nCompute CCF\n')
verboseprint('#'*40)
verboseprint('\n')
# Expand directories and files
if isinstance(args.fil_or_list_spec, str): args.fil_or_list_spec = os.path.expanduser(args.fil_or_list_spec)
if isinstance(args.filobs2blaze, str): args.filobs2blaze = os.path.expanduser(args.filobs2blaze)
if isinstance(args.filmask, str): args.filmask = os.path.expanduser(args.filmask)
if isinstance(args.dirout, str): args.dirout = os.path.expanduser(args.dirout)
if isinstance(args.filtell, str): args.filtell = os.path.expanduser(args.filtell)
if isinstance(args.dirserval, str): args.dirserval = os.path.expanduser(args.dirserval)
if isinstance(args.dircarmencita, str): args.dircarmencita = os.path.expanduser(args.dircarmencita)
# Outputs
if not os.path.exists(args.dirout): os.makedirs(args.dirout)
# Orders
carmnirordssplit = True
nord = spectrographutils.inst_nord(args.inst, carmnirsplit=carmnirordssplit, notfound=None, verb=True)
ords = np.arange(0, nord, 1)
# Make sure ords_use is a list-type of ints
if args.ords_use is not None:
if not isinstance(args.ords_use, (list, tuple, np.ndarray)): args.ords_use = [args.ords_use]
if not isinstance(args.ords_use[0], int): args.ords_use = [int(o) for o in args.ords_use]
else:
args.ords_use = ords
args.ords_use = np.sort(args.ords_use)
# Pixels per order
if 'CARM' in args.inst or 'HARP' in args.inst:
# pixmin = 0
# pixmax = 4096
npix = 4096
elif args.inst == 'EXPRES':
npix = 7920
pix = np.arange(0, npix, 1)
# Check extreme pixels to remove inside limits
if args.pmin is not None:
if args.pmin < pix[0] or args.pmin > pix[-1]:
verboseprint('Minimum pixel per order not correct {}. Setting it to {}'.format(args.pmin, pix[0]))
args.pmin = pix[0]
else:
args.pmin = pix[0]
if args.pmax is not None:
if args.pmax < pix[0] or args.pmax > pix[-1]:
verboseprint('Maximum pixel per order not correct {}. Setting it to {}'.format(args.pmax, pix[-1]))
args.pmax = pix[-1]
else:
args.pmax = pix[-1]
# To plot or not to plot
doplot = args.plot_sh or args.plot_sv
doplottest = args.plottest_sh or args.plottest_sv
# Make sure figure extensions is a list
if not isinstance(args.plot_ext, list): args.plot_ext = [args.plot_ext]
if __name__ == "__main__":
pyutils.save_command_current_hist_args(args.dirout, sys.argv, args)
###########################################################################
# Get reduced spectra
# -------------------
# Check if input is file with list or directly the filenames
# - more than 1 FITS filename in input
if len(args.fil_or_list_spec) > 1:
lisfilobs = args.fil_or_list_spec
# - single FITS filename in input
elif os.path.splitext(args.fil_or_list_spec[0])[1] == '.fits':
lisfilobs = args.fil_or_list_spec
# - file with list in input
else:
# Read names of the files
args.fil_or_list_spec = os.path.expanduser(args.fil_or_list_spec[0])
lisfilobs = np.loadtxt(args.fil_or_list_spec, dtype='str', usecols=[0])
# Expand user
lisfilobs = np.sort([os.path.expanduser(i) for i in lisfilobs])
if args.testnobs > 0:
lisfilobs = lisfilobs[:args.testnobs]
if args.obsservalrvc and args.dirserval:
# Read SERVAL data
dataserval = carmenesutils.serval_get(args.dirserval, obj=args.obj, lisdataid=['rvc', 'info'], inst=args.inst)
# Get observations with non-nan rvc
mask = np.isfinite(dataserval['servalrvc'])
dataserval = dataserval[mask]
# Clean observations
lisfilobs_new = []
for i, filobs in enumerate(lisfilobs):
obs = os.path.basename(filobs)
if obs in dataserval['obs'].values:
lisfilobs_new.append(filobs)
lisfilobs = lisfilobs_new
# Number of observations
nobs = len(lisfilobs)
if nobs == 0:
sys.exit('No observations found! Exit.')
# Time ids
# lisfiltimeid = [i.replace('_A', '') for i in lisfilobs]
listimeid = [os.path.basename(i).replace('_A.fits', '.fits') for i in lisfilobs]
listimeid = | pd.DataFrame({'timeid': listimeid}, index=lisfilobs) | pandas.DataFrame |
import pandas as pd
from pandas.io.json import json_normalize
import requests
import backoff
ticker_df = | pd.read_csv('djia_symbols.csv') | pandas.read_csv |
import pdb
import unittest
import torch
import pandas as pd
import numpy as np
from agents.SACAgent import SACAgent
from cobs.model import Model
from test.test_config import state_name, sac_network_map, eplus_naming_dict, eplus_var_types, \
SatAction, BlindActionSingleZone, ThermActionSingleZone, BlindActionMultiZone,\
ThermActionMultiZone
from utils.rewards import ViolationPActionReward
# SatAction = ActionCreator("Schedule:Constant", "Schedule Value", "SAT_SP")
# BlindActionSingle = ActionCreator("Schedule:Constant", "Schedule Value", "WF-1_shading_schedule")
# ThermActionSingle = ActionCreator("Zone Temperature Control", "Heating Setpoint", "SPACE1-1")
class SACTest(unittest.TestCase):
agent_params = {
"policy_type": "Gaussian",
"gamma": 0.99,
"tau": 0.005,
"lr": 0.0003,
"batch_size": 2,
"hidden_size": 2,
"updates_per_step": 1,
"target_update_interval": 1,
"replay_size": 200,
"cuda": False,
"step": 300 * 3,
"start_steps": 5,
"alpha": 0.2,
"automatic_entropy_tuning": False,
"num_inputs": len(state_name),
"min_sat_action": -20,
"max_sat_action": 20,
"seed": 42
}
eplus_path = '/Applications/EnergyPlus-9-' \
'3-0-bugfix/'
# idf_path = 'test/eplus_files/test_control.idf'
# idf_path = 'test/eplus_files/5Zone_Control_SAT.idf'
epw_path = 'test/eplus_files/test.epw'
Model.set_energyplus_folder(eplus_path)
def test_sac_sat(self):
self.agent_params["num_sat_actions"] = 1
self.agent_params["num_blind_actions"] = 0
self.agent_params["num_therm_actions"] = 0
network = sac_network_map['leaky']
agent = SACAgent(self.agent_params, network, chkpt_dir='test/agent_tests/test_results')
ep_model = self.setup_env('test/eplus_files/test_control.idf')
observations, actions, agent = self.run_episode(ep_model, agent, "SAT_SP")
obs_test = pd.DataFrame.from_dict(observations)
sat_actions, therm_actions, blind_actions = actions
# pdb.set_trace()
obs_test['actions'] = [a1 for a1, _ in sat_actions]
obs_test['sat_stpts'] = [a2.item() for _, a2 in sat_actions]
# obs_test['blind_actions'] = blind_actions
float_cols = [
'Outdoor Temp.',
'Diff. Solar Rad.',
'Direct Solar Rad.',
'Indoor Temp.',
'Indoor Temp. Setpoint',
'PPD',
'Occupancy Flag',
'Heat Coil Power',
'HVAC Power',
'Sys Out Temp.',
'MA Temp.',
'actions',
'sat_stpts'
]
obs_true = | pd.read_csv('test/agent_tests/saved_results/sac_no_blinds_obs.csv') | pandas.read_csv |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import os
import operator
import unittest
import cStringIO as StringIO
import nose
from numpy import nan
import numpy as np
import numpy.ma as ma
from pandas import Index, Series, TimeSeries, DataFrame, isnull, notnull
from pandas.core.index import MultiIndex
import pandas.core.datetools as datetools
from pandas.util import py3compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
#-------------------------------------------------------------------------------
# Series test cases
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class CheckNameIntegration(object):
def test_scalarop_preserve_name(self):
result = self.ts * 2
self.assertEquals(result.name, self.ts.name)
def test_copy_name(self):
result = self.ts.copy()
self.assertEquals(result.name, self.ts.name)
# def test_copy_index_name_checking(self):
# # don't want to be able to modify the index stored elsewhere after
# # making a copy
# self.ts.index.name = None
# cp = self.ts.copy()
# cp.index.name = 'foo'
# self.assert_(self.ts.index.name is None)
def test_append_preserve_name(self):
result = self.ts[:5].append(self.ts[5:])
self.assertEquals(result.name, self.ts.name)
def test_binop_maybe_preserve_name(self):
# names match, preserve
result = self.ts * self.ts
self.assertEquals(result.name, self.ts.name)
result = self.ts * self.ts[:-2]
self.assertEquals(result.name, self.ts.name)
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'something else'
result = self.ts + cp
self.assert_(result.name is None)
def test_combine_first_name(self):
result = self.ts.combine_first(self.ts[:5])
self.assertEquals(result.name, self.ts.name)
def test_getitem_preserve_name(self):
result = self.ts[self.ts > 0]
self.assertEquals(result.name, self.ts.name)
result = self.ts[[0, 2, 4]]
self.assertEquals(result.name, self.ts.name)
result = self.ts[5:10]
self.assertEquals(result.name, self.ts.name)
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(range(0,len(index)), index=index, name='sth')
expected = ["first second",
"foo one 0",
" two 1",
" three 2",
"bar one 3",
" two 4",
"baz two 5",
" three 6",
"qux one 7",
" two 8",
" three 9",
"Name: sth"]
expected = "\n".join(expected)
self.assertEquals(repr(s), expected)
def test_multilevel_preserve_name(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(np.random.randn(len(index)), index=index, name='sth')
result = s['foo']
result2 = s.ix['foo']
self.assertEquals(result.name, s.name)
self.assertEquals(result2.name, s.name)
def test_name_printing(self):
# test small series
s = Series([0, 1, 2])
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
# test big series (diff code path)
s = Series(range(0,1000))
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
def test_pickle_preserve_name(self):
unpickled = self._pickle_roundtrip(self.ts)
self.assertEquals(unpickled.name, self.ts.name)
def _pickle_roundtrip(self, obj):
obj.save('__tmp__')
unpickled = Series.load('__tmp__')
os.remove('__tmp__')
return unpickled
def test_argsort_preserve_name(self):
result = self.ts.argsort()
self.assertEquals(result.name, self.ts.name)
def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
self.assertEquals(result.name, self.ts.name)
def test_to_sparse_pass_name(self):
result = self.ts.to_sparse()
self.assertEquals(result.name, self.ts.name)
class SafeForSparse(object):
pass
class TestSeries(unittest.TestCase, CheckNameIntegration):
def setUp(self):
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = 'objects'
self.empty = Series([], index=[])
def test_constructor(self):
# Recognize TimeSeries
self.assert_(isinstance(self.ts, TimeSeries))
# Pass in Series
derived = Series(self.ts)
self.assert_(isinstance(derived, TimeSeries))
self.assert_(tm.equalContents(derived.index, self.ts.index))
# Ensure new index is not created
self.assertEquals(id(self.ts.index), id(derived.index))
# Pass in scalar
scalar = Series(0.5)
self.assert_(isinstance(scalar, float))
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
self.assert_(mixed.dtype == np.object_)
self.assert_(mixed[1] is np.NaN)
self.assert_(not isinstance(self.empty, TimeSeries))
self.assert_(not isinstance(Series({}), TimeSeries))
self.assertRaises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
def test_constructor_empty(self):
empty = Series()
empty2 = Series([])
assert_series_equal(empty, empty2)
empty = Series(index=range(10))
empty2 = Series(np.nan, index=range(10))
assert_series_equal(empty, empty2)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([nan, nan, nan])
assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0.0, nan, 2.0], index=index)
assert_series_equal(result, expected)
def test_constructor_default_index(self):
s = Series([0, 1, 2])
assert_almost_equal(s.index, np.arange(3))
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
self.assert_(isinstance(s, Series))
def test_constructor_cast(self):
self.assertRaises(ValueError, Series, ['a', 'b', 'c'], dtype=float)
def test_constructor_dict(self):
d = {'a' : 0., 'b' : 1., 'c' : 2.}
result = Series(d, index=['b', 'c', 'd', 'a'])
expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a'])
assert_series_equal(result, expected)
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
self.assertEqual(list(s), data)
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
self.assertEqual(tuple(s), data)
def test_fromDict(self):
data = {'a' : 0, 'b' : 1, 'c' : 2, 'd' : 3}
series = Series(data)
self.assert_(tm.is_sorted(series.index))
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : datetime.now()}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : '3'}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : '0', 'b' : '1'}
series = Series(data, dtype=float)
self.assert_(series.dtype == np.float64)
def test_setindex(self):
# wrong type
series = self.series.copy()
self.assertRaises(TypeError, setattr, series, 'index', None)
# wrong length
series = self.series.copy()
self.assertRaises(AssertionError, setattr, series, 'index',
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
self.assert_(isinstance(series.index, Index))
def test_array_finalize(self):
pass
def test_fromValue(self):
nans = Series(np.NaN, index=self.ts.index)
self.assert_(nans.dtype == np.float_)
self.assertEqual(len(nans), len(self.ts))
strings = Series('foo', index=self.ts.index)
self.assert_(strings.dtype == np.object_)
self.assertEqual(len(strings), len(self.ts))
d = datetime.now()
dates = Series(d, index=self.ts.index)
self.assert_(dates.dtype == np.object_)
self.assertEqual(len(dates), len(self.ts))
def test_contains(self):
tm.assert_contains_all(self.ts.index, self.ts)
def test_pickle(self):
unp_series = self._pickle_roundtrip(self.series)
unp_ts = self._pickle_roundtrip(self.ts)
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def _pickle_roundtrip(self, obj):
obj.save('__tmp__')
unpickled = Series.load('__tmp__')
os.remove('__tmp__')
return unpickled
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assert_(self.series.get(-1) is None)
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - datetools.bday
self.assertRaises(KeyError, self.ts.__getitem__, d)
def test_iget(self):
s = Series(np.random.randn(10), index=range(0, 20, 2))
for i in range(len(s)):
result = s.iget(i)
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iget(slice(1, 3))
expected = s.ix[2:4]
assert_series_equal(result, expected)
def test_getitem_regression(self):
s = Series(range(5), index=range(5))
result = s[range(5)]
assert_series_equal(result, s)
def test_getitem_slice_bug(self):
s = Series(range(10), range(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1,2,3]]
slice2 = self.objSeries[[1,2,3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_(np.array_equal(result.index, s.index[mask]))
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
cop = s.copy()
cop[omask] = 5
s[mask] = 5
assert_series_equal(cop, s)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, offset=datetools.bday) > ts.median()
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assert_((s[:4] == 0).all())
self.assert_(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
def test_getitem_box_float64(self):
value = self.ts[5]
self.assert_(isinstance(value, np.float64))
def test_getitem_ambiguous_keyerror(self):
s = Series(range(10), index=range(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_setitem_ambiguous_keyerror(self):
s = Series(range(10), index=range(0, 20, 2))
self.assertRaises(KeyError, s.__setitem__, 1, 5)
self.assertRaises(KeyError, s.ix.__setitem__, 1, 5)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assert_(self.series.index[9] not in numSlice.index)
self.assert_(self.objSeries.index[9] not in objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assert_(tm.equalContents(numSliceEnd,
np.array(self.series)[-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assert_((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
result = s[::-1] # it works!
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1,2,17]] = np.NaN
self.ts[6] = np.NaN
self.assert_(np.isnan(self.ts[6]))
self.assert_(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assert_(not np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assert_((series[::2] == 0).all())
# set item that's not contained
self.assertRaises(Exception, self.series.__setitem__,
'foobar', 1)
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assert_(res is self.ts)
self.assertEqual(self.ts[idx], 0)
res = self.series.set_value('foobar', 0)
self.assert_(res is not self.series)
self.assert_(res.index[-1] == 'foobar')
self.assertEqual(res['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertEqual(len(sl.index.indexMap), len(sl.index))
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
self.assertRaises(Exception, self.ts.__getitem__,
(slice(None, None), 2))
self.assertRaises(Exception, self.ts.__setitem__,
(slice(None, None), 2), 2)
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=range(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=range(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
def test_ix_getitem(self):
inds = self.series.index[[3,4,7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEquals(self.ts.ix[d1], self.ts[d1])
self.assertEquals(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][::-1]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=range(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assert_((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assert_((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s[::-1]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_ix_setitem(self):
inds = self.series.index[[3,4,7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3,4,7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEquals(self.series[d1], 4)
self.assertEquals(self.series[d2], 6)
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.order()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series(tm.randn(1000), index=np.arange(1000)))
str(Series(tm.randn(1000), index=np.arange(1000, 0, step=-1)))
# empty
str(self.empty)
# with NaNs
self.series[5:7] = np.NaN
str(self.series)
# tuple name, e.g. from hierarchical index
self.series.name = ('foo', 'bar', 'baz')
repr(self.series)
biggie = Series(tm.randn(1000), index=np.arange(1000),
name=('foo', 'bar', 'baz'))
repr(biggie)
def test_to_string(self):
from cStringIO import StringIO
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
self.assert_(retval is None)
self.assertEqual(buf.getvalue().strip(), s)
# pass float_format
format = '%.4f'.__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split('\n')]
expected = [format(x) for x in self.ts]
self.assertEqual(result, expected)
# empty string
result = self.ts[:0].to_string()
self.assertEqual(result, '')
result = self.ts[:0].to_string(length=0)
self.assertEqual(result, '')
# name and length
cp = self.ts.copy()
cp.name = 'foo'
result = cp.to_string(length=True, name=True)
last_line = result.split('\n')[-1].strip()
self.assertEqual(last_line, "Name: foo, Length: %d" % len(cp))
def test_to_string_mixed(self):
s = Series(['foo', np.nan, -1.23, 4.56])
result = s.to_string()
expected = ('0 foo\n'
'1 NaN\n'
'2 -1.23\n'
'3 4.56')
self.assertEqual(result, expected)
# but don't count NAs as floats
s = Series(['foo', np.nan, 'bar', 'baz'])
result = s.to_string()
expected = ('0 foo\n'
'1 NaN\n'
'2 bar\n'
'3 baz')
self.assertEqual(result, expected)
s = Series(['foo', 5, 'bar', 'baz'])
result = s.to_string()
expected = ('0 foo\n'
'1 5\n'
'2 bar\n'
'3 baz')
self.assertEqual(result, expected)
def test_to_string_float_na_spacing(self):
s = Series([0., 1.5678, 2., -3., 4.])
s[::2] = np.nan
result = s.to_string()
expected = ('0 NaN\n'
'1 1.568\n'
'2 NaN\n'
'3 -3.000\n'
'4 NaN')
self.assertEqual(result, expected)
def test_iter(self):
for i, val in enumerate(self.series):
self.assertEqual(val, self.series[i])
for i, val in enumerate(self.ts):
self.assertEqual(val, self.ts[i])
def test_keys(self):
# HACK: By doing this in two stages, we avoid 2to3 wrapping the call
# to .keys() in a list()
getkeys = self.ts.keys
self.assert_(getkeys() is self.ts.index)
def test_values(self):
self.assert_(np.array_equal(self.ts, self.ts.values))
def test_iteritems(self):
for idx, val in self.series.iteritems():
self.assertEqual(val, self.series[idx])
for idx, val in self.ts.iteritems():
self.assertEqual(val, self.ts[idx])
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_sum_inf(self):
s = Series(np.random.randn(10))
s2 = s.copy()
s[5:8] = np.inf
s2[5:8] = np.nan
assert_almost_equal(s.sum(), s2.sum())
import pandas.core.nanops as nanops
arr = np.random.randn(100, 100).astype('f4')
arr[:, 2] = np.inf
res = nanops.nansum(arr, axis=1)
expected = nanops._nansum(arr, axis=1)
assert_almost_equal(res, expected)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_median(self):
self._check_stat_op('median', np.median)
# test with integers, test failure
int_ts = TimeSeries(np.ones(10, dtype=int), index=range(10))
self.assertAlmostEqual(np.median(int_ts), int_ts.median())
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_min(self):
self._check_stat_op('min', np.min, check_objects=True)
def test_max(self):
self._check_stat_op('max', np.max, check_objects=True)
def test_std(self):
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_var(self):
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_skew(self):
from scipy.stats import skew
alt =lambda x: skew(x, bias=False)
self._check_stat_op('skew', alt)
def test_argsort(self):
self._check_accum_op('argsort')
argsorted = self.ts.argsort()
self.assert_(issubclass(argsorted.dtype.type, np.integer))
def test_cumsum(self):
self._check_accum_op('cumsum')
def test_cumprod(self):
self._check_accum_op('cumprod')
def _check_stat_op(self, name, alternate, check_objects=False):
from pandas import DateRange
import pandas.core.nanops as nanops
def testit():
f = getattr(Series, name)
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assert_(notnull(f(self.series)))
self.assert_(isnull(f(self.series, skipna=False)))
# check the result is correct
nona = self.series.dropna()
assert_almost_equal(f(nona), alternate(nona))
allna = self.series * nan
self.assert_(np.isnan(f(allna)))
# dtype=object with None, it works!
s = Series([1, 2, 3, None, 5])
f(s)
# check DateRange
if check_objects:
s = Series(DateRange('1/1/2000', periods=10))
res = f(s)
exp = alternate(s)
self.assertEqual(res, exp)
testit()
try:
import bottleneck as bn
nanops._USE_BOTTLENECK = False
testit()
nanops._USE_BOTTLENECK = True
except ImportError:
pass
def _check_accum_op(self, name):
func = getattr(np, name)
self.assert_(np.array_equal(func(self.ts), func(np.array(self.ts))))
# with missing values
ts = self.ts.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.valid()))
self.assert_(np.array_equal(result, expected))
def test_round(self):
# numpy.round doesn't preserve metadata, probably a numpy bug,
# re: GH #314
result = np.round(self.ts, 2)
expected = Series(np.round(self.ts.values, 2), index=self.ts.index)
assert_series_equal(result, expected)
self.assertEqual(result.name, self.ts.name)
def test_prod_numpy16_bug(self):
s = Series([1., 1., 1.] , index=range(3))
result = s.prod()
self.assert_(not isinstance(result, Series))
def test_quantile(self):
from scipy.stats import scoreatpercentile
q = self.ts.quantile(0.1)
self.assertEqual(q, scoreatpercentile(self.ts.valid(), 10))
q = self.ts.quantile(0.9)
self.assertEqual(q, scoreatpercentile(self.ts.valid(), 90))
def test_describe(self):
_ = self.series.describe()
_ = self.ts.describe()
def test_describe_objects(self):
s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a'])
result = s.describe()
expected = Series({'count' : 7, 'unique' : 4,
'top' : 'a', 'freq' : 3}, index=result.index)
assert_series_equal(result, expected)
def test_append(self):
appendedSeries = self.series.append(self.ts)
for idx, value in appendedSeries.iteritems():
if idx in self.series.index:
self.assertEqual(value, self.series[idx])
elif idx in self.ts.index:
self.assertEqual(value, self.ts[idx])
else:
self.fail("orphaned index!")
self.assertRaises(Exception, self.ts.append, self.ts)
def test_append_many(self):
pieces = [self.ts[:5], self.ts[5:10], self.ts[10:]]
result = pieces[0].append(pieces[1:])
assert_series_equal(result, self.ts)
def test_all_any(self):
np.random.seed(12345)
ts = tm.makeTimeSeries()
bool_series = ts > 0
self.assert_(not bool_series.all())
self.assert_(bool_series.any())
def test_operators(self):
series = self.ts
other = self.ts[::2]
def _check_op(other, op, pos_only=False):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_almost_equal(cython_or_numpy, python)
def check(other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv',
'gt', 'ge', 'lt', 'le']
for opname in simple_ops:
_check_op(other, getattr(operator, opname))
_check_op(other, operator.pow, pos_only=True)
_check_op(other, lambda x, y: operator.add(y, x))
_check_op(other, lambda x, y: operator.sub(y, x))
_check_op(other, lambda x, y: operator.truediv(y, x))
_check_op(other, lambda x, y: operator.floordiv(y, x))
_check_op(other, lambda x, y: operator.mul(y, x))
_check_op(other, lambda x, y: operator.pow(y, x),
pos_only=True)
check(self.ts * 2)
check(self.ts * 0)
check(self.ts[::2])
check(5)
def check_comparators(other):
_check_op(other, operator.gt)
_check_op(other, operator.ge)
_check_op(other, operator.eq)
_check_op(other, operator.lt)
_check_op(other, operator.le)
check_comparators(5)
check_comparators(self.ts + 1)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x' : 0.})
# it works!
_ = s1 * s2
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_idxmin(self):
# test idxmin
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmin()], self.series.min())
self.assert_(isnull(self.series.idxmin(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmin()], nona.min())
self.assertEqual(nona.index.values.tolist().index(nona.idxmin()),
nona.values.argmin())
# all NaNs
allna = self.series * nan
self.assert_(isnull(allna.idxmin()))
def test_idxmax(self):
# test idxmax
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmax()], self.series.max())
self.assert_(isnull(self.series.idxmax(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmax()], nona.max())
self.assertEqual(nona.index.values.tolist().index(nona.idxmax()),
nona.values.argmax())
# all NaNs
allna = self.series * nan
self.assert_(isnull(allna.idxmax()))
def test_operators_date(self):
result = self.objSeries + timedelta(1)
result = self.objSeries - timedelta(1)
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assert_(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assert_(len(result) == 0)
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = self.ts.values[:-5] + int_ts.values
self.assert_(np.array_equal(added[:-5], expected))
def test_operators_reverse_object(self):
# GH 56
arr = Series(np.random.randn(10), index=np.arange(10),
dtype=object)
def _check_op(arr, op):
result = op(1., arr)
expected = op(1., arr.astype(float))
assert_series_equal(result.astype(float), expected)
_check_op(arr, operator.add)
_check_op(arr, operator.sub)
_check_op(arr, operator.mul)
_check_op(arr, operator.truediv)
_check_op(arr, operator.floordiv)
def test_series_frame_radd_bug(self):
from pandas.util.testing import rands
import operator
# GH 353
vals = Series([rands(5) for _ in xrange(10)])
result = 'foo_' + vals
expected = vals.map(lambda x: 'foo_' + x)
assert_series_equal(result, expected)
frame = DataFrame({'vals' : vals})
result = 'foo_' + frame
expected = DataFrame({'vals' : vals.map(lambda x: 'foo_' + x)})
tm.assert_frame_equal(result, expected)
# really raise this time
self.assertRaises(TypeError, operator.add, datetime.now(), self.ts)
def test_operators_frame(self):
# rpow does not work with DataFrame
df = DataFrame({'A' : self.ts})
tm.assert_almost_equal(self.ts + self.ts, (self.ts + df)['A'])
tm.assert_almost_equal(self.ts ** self.ts, (self.ts ** df)['A'])
def test_operators_combine(self):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isnull(a)
bmask = isnull(b)
exp_values = []
for i in range(len(exp_index)):
if amask[i]:
if bmask[i]:
exp_values.append(nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
assert_series_equal(result, expected)
a = Series([nan, 1., 2., 3., nan], index=np.arange(5))
b = Series([nan, 1, nan, 3, nan, 4.], index=np.arange(6))
ops = [Series.add, Series.sub, Series.mul, Series.div]
equivs = [operator.add, operator.sub, operator.mul]
if py3compat.PY3:
equivs.append(operator.truediv)
else:
equivs.append(operator.div)
fillvals = [0, 0, 1, 1]
for op, equiv_op, fv in zip(ops, equivs, fillvals):
result = op(a, b)
exp = equiv_op(a, b)
assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
def test_combine_first(self):
values = tm.makeIntIndex(20).values.astype(float)
series = Series(values, index=tm.makeIntIndex(20))
series_copy = series * 2
series_copy[::2] = np.NaN
# nothing used from the input
combined = series.combine_first(series_copy)
self.assert_(np.array_equal(combined, series))
# Holes filled from input
combined = series_copy.combine_first(series)
self.assert_(np.isfinite(combined).all())
self.assert_(np.array_equal(combined[::2], series[::2]))
self.assert_(np.array_equal(combined[1::2], series_copy[1::2]))
# mixed types
index = tm.makeStringIndex(20)
floats = Series(tm.randn(20), index=index)
strings = Series(tm.makeStringIndex(10), index=index[::2])
combined = strings.combine_first(floats)
tm.assert_dict_equal(strings, combined, compare_keys=False)
tm.assert_dict_equal(floats[1::2], combined, compare_keys=False)
# corner case
s = Series([1., 2, 3], index=[0, 1, 2])
result = s.combine_first(Series([], index=[]))
assert_series_equal(s, result)
def test_corr(self):
import scipy.stats as stats
# full overlap
self.assertAlmostEqual(self.ts.corr(self.ts), 1)
# partial overlap
self.assertAlmostEqual(self.ts[:15].corr(self.ts[5:]), 1)
# No overlap
self.assert_(np.isnan(self.ts[::2].corr(self.ts[1::2])))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
self.assert_(isnull(cp.corr(cp)))
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
result = A.corr(B)
expected, _ = stats.pearsonr(A, B)
self.assertAlmostEqual(result, expected)
def test_corr_rank(self):
import scipy
import scipy.stats as stats
# kendall and spearman
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
A[-5:] = A[:5]
result = A.corr(B, method='kendall')
expected = stats.kendalltau(A, B)[0]
self.assertAlmostEqual(result, expected)
result = A.corr(B, method='spearman')
expected = stats.spearmanr(A, B)[0]
self.assertAlmostEqual(result, expected)
# these methods got rewritten in 0.8
if int(scipy.__version__.split('.')[1]) < 9:
raise nose.SkipTest
# results from R
A = Series([-0.89926396, 0.94209606, -1.03289164, -0.95445587,
0.76910310, -0.06430576, -2.09704447, 0.40660407,
-0.89926396, 0.94209606])
B = Series([-1.01270225, -0.62210117, -1.56895827, 0.59592943,
-0.01680292, 1.17258718, -1.06009347, -0.10222060,
-0.89076239, 0.89372375])
kexp = 0.4319297
sexp = 0.5853767
self.assertAlmostEqual(A.corr(B, method='kendall'), kexp)
self.assertAlmostEqual(A.corr(B, method='spearman'), sexp)
def test_cov(self):
# full overlap
self.assertAlmostEqual(self.ts.cov(self.ts), self.ts.std()**2)
# partial overlap
self.assertAlmostEqual(self.ts[:15].cov(self.ts[5:]), self.ts[5:15].std()**2)
# No overlap
self.assert_(np.isnan(self.ts[::2].cov(self.ts[1::2])))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
self.assert_(isnull(cp.cov(cp)))
def test_copy(self):
ts = self.ts.copy()
ts[::2] = np.NaN
# Did not modify original Series
self.assertFalse(np.isnan(self.ts[0]))
def test_count(self):
self.assertEqual(self.ts.count(), len(self.ts))
self.ts[::2] = np.NaN
self.assertEqual(self.ts.count(), np.isfinite(self.ts).sum())
def test_value_counts_nunique(self):
s = Series(['a', 'b', 'b', 'b', 'b', 'a', 'c', 'd', 'd', 'a'])
hist = s.value_counts()
expected = Series([4, 3, 2, 1], index=['b', 'a', 'd', 'c'])
assert_series_equal(hist, expected)
self.assertEquals(s.nunique(), 4)
# handle NA's properly
s[5:7] = np.nan
hist = s.value_counts()
expected = s.dropna().value_counts()
assert_series_equal(hist, expected)
s = Series({})
hist = s.value_counts()
expected = Series([])
| assert_series_equal(hist, expected) | pandas.util.testing.assert_series_equal |
# Author: <NAME>
import itertools, glob, re
from functools import reduce
from operator import add
import pandas as pd
shared = '/home/berkeleylab/Model/storage'
# Maintain total_iters.txt to store iteration count info, also is it required since we are saving file names with iter count?
def initializers(what_type, which_iter, which_stage):
df = pd.read_csv(glob.glob(shared+'/1_*.csv'))
num_neg_directionality = (df.loc['Positive_Directionality']==0).astype(int).sum()
# to do corner case: num_neg_directionality = 0 and 8
def compute_intercept(prior_df, post_df):
intercept_prior = prior_df.iloc[0]
L1_prior = prior_df.iloc[3]
intercept_post = post_df.iloc[0]
L1_post = post_df.iloc[3]
v_dIntercept_prior = prior_df.iloc[6]
dIntercept = (L1_prior[1:]-L1_post[1:])/(intercept_prior[1:]-intercept_post[1:])
# Old implementation
#m = (modeshare_prior[1:]-modeshare_post[1:])/(intercept_prior[1:]-intercept_post[1:])
#del_nudges = (post_df.iloc[3][1:])/m
# Parameters:
beta = 0.9
alpha = 0.01 # optimization step size
updated_v_dIntercept = list(map(add, list(map(lambda x: x * beta, v_dIntercept_prior)), list(map(lambda x: x * (1 - beta), dIntercept))))
input_vector = intercept_post[1:] - list(map(lambda x: x * alpha, updated_v_dIntercept)) # analyse addition or substraction suitability
return updated_v_dIntercept, input_vector
if what_type == 'base_nudge':
# returns updated nudge with 0.5 increment or decrement
params = []
for i in range(1,9):
params.append(reduce(lambda x, k: x+"b" if k%2 else x+"a", range(i), ""))
# a, b = 5, 15
neg_identifier, pos_identifier = params[num_neg_directionality-1], params[7-num_neg_directionality]
p_permuts = set([''.join(a) for a in itertools.permutations(pos_identifier)])
n_permuts = set([''.join(a) for a in itertools.permutations(neg_identifier)])
total_combos = list(itertools.product(p_permuts, n_permuts)) #(p_permuts,n_permuts)
ip_vec_grp = []
for i in range(len(total_combos)):
single_ip_vec=[]
for j in range(len(total_combos[i][0])):
if total_combos[i][0][j] == 'a':
single_ip_vec.append(5)
elif total_combos[i][0][j]== 'b':
single_ip_vec.append(15)
for k in range(len(total_combos[i][1])):
if total_combos[i][1][k]== 'a':
single_ip_vec.append(-5)
elif total_combos[i][1][k]== 'b':
single_ip_vec.append(-15)
ip_vec_grp.append(single_ip_vec)
next_ip_vec_grp = []
for i in range(len(ip_vec_grp)):
single_ip_vec=[]
for j in range(len(ip_vec_grp[i])):
if ip_vec_grp[i][j] > 0:
single_ip_vec.append(ip_vec_grp[i][j]+0.5)
else:
single_ip_vec.append(ip_vec_grp[i][j]-0.5)
next_ip_vec_grp.append(single_ip_vec)
input_vector_group = ip_vec_grp + next_ip_vec_grp
input_vector = input_vector_group[int(which_iter)]
elif what_type == 'linear_inter_extra_polate':
# returns updated nudge based on first order extrapolation
# required args: what_type, which_iter, which_stage
# Arg file info:
#iterations bike car cav drive_transit ride_hail ride_hail_pooled ride_hail_transit walk walk_transit
#intercepts_now 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
#benchmark 0.000000 64.856712 0.000000 5.982906 5.228758 5.027652 5.982906 6.938160 5.982906
#modeshare_now 23.567151 56.159110 5.132592 1.796407 0.171086 8.383234 0.000000 0.598802 4.191617
#L1 -23.567151 8.697602 -5.132592 4.186499 5.057672 -3.355581 5.982906 6.339357 1.791289
#L1_rank 1.000000 2.000000 5.000000 7.000000 6.000000 8.000000 4.000000 3.000000 9.000000
#Positive_Directionality 0.000000 1.000000 0.000000 1.000000 1.000000 0.000000 1.000000 1.000000 1.000000
#v_dIntercept 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
# Name: shared+'/5_60.csv'
# 2 stage implementation only
if num_neg_directionality == 1 or num_neg_directionality == 5:
last_results = 70 # no of iterations:from 35*2 to 70th -> 18 (88th) -> 18 (106th) -> 9 (115th) -> 9 (124th)
last_results_stage_1 = 18
elif num_neg_directionality == 2 or num_neg_directionality == 6:
last_results = 80 # no of iterations:from 40*2 to 20 -> 20 -> 10 -> 10
last_results_stage_1 = 20
elif num_neg_directionality == 3 or num_neg_directionality == 7:
last_results = 60 # no of iterations:from 30*2 to 16 -> 16 -> 8 -> 8
last_results_stage_1 = 16
elif num_neg_directionality == 4:
last_results = 72 # no of iterations:from 36*2 to 18 -> 18 -> 9 -> 9
last_results_stage_1 = 18
if which_stage == '1_1':
prior, post = ([] for i in range(2))
for i in range(1,(last_results/2)+1):
prior.append(glob.glob(shared+'/'+str(i)+'_*.csv'))
for i in range((last_results/2)+1,last_results+1):
post.append(glob.glob(shared+'/'+str(i)+'_*.csv'))
merged = list(itertools.chain(*post))
best_results_ranked = sorted(merged, key=lambda merged: re.split('_|.csv',merged)[2], reverse=False)
iter_rank_nums_1_1 = []
for i in range(len(best_results_ranked)):
iter_rank_nums_1_1.append(int(re.split('/|_',best_results_ranked[i])[6]))
iter_rank_index = [x - 1 for x in iter_rank_nums_1_1]
stage_1_1_results = []
if len(prior) == len(post):
for i in range(len(prior)):
prior_df = pd.read_csv(prior[i][0])
post_df = pd.read_csv(post[i][0])
updated_v_dIntercept, input_vector = compute_intercept(prior_df, post_df)
data = pd.concat([prior_df, post_df], ignore_index=True)
updated_v_dIntercept.insert(0, "updated_v_dIntercept")
data.loc[12] = updated_v_dIntercept
data.loc[13] = input_vector
data.at[13, 'iterations'] = 'input_vector'
stage_1_1_results.append(data)
else:
raise ValueError('Collected output files in prior and post are not equal!')
input_vector = [stage_1_1_results[i] for i in iter_rank_index][:last_results_stage_1] # 18 top computed second order optimized input vectors
# list with two dfs, v_dI, and input_vector, handle carefully!
elif which_stage == '1_2':
prior, post = ([] for i in range(2))
for i in range(len(iter_rank_nums_1_1)):
prior.append(glob.glob(shared+'/'+str(iter_rank_nums_1_1[i])+'_*.csv'))
for i in range(last_results+1,last_results+last_results_stage_1+1):
post.append(glob.glob(shared+'/'+str(i)+'_*.csv'))
merged = list(itertools.chain(*post))
best_results_ranked = sorted(merged, key=lambda merged: re.split('_|.csv',merged)[2], reverse=False)
iter_rank_nums_1_2 = []
for i in range(len(best_results_ranked)):
iter_rank_nums_1_2.append(int(re.split('/|_',best_results_ranked[i])[6]))
iter_rank_index = [x - (last_results + 1) for x in iter_rank_nums_1_2]
stage_1_2_results = []
if len(prior) == len(post):
for i in range(len(prior)):
prior_df = pd.read_csv(prior[i][0])
post_df = pd.read_csv(post[i][0])
updated_v_dIntercept, input_vector = compute_intercept(prior_df, post_df)
data = pd.concat([prior_df, post_df], ignore_index=True)
updated_v_dIntercept.insert(0, "updated_v_dIntercept")
data.loc[12] = updated_v_dIntercept
data.loc[13] = input_vector
data.at[13, 'iterations'] = 'input_vector'
stage_1_2_results.append(data)
else:
raise ValueError('Collected output files in prior and post are not equal!')
input_vector = [stage_1_2_results[i] for i in iter_rank_index] # 18 top computed second order optimized input vectors
# list with two dfs, v_dI, and input_vector, handle carefully!
elif which_stage == '2_1':
prior, post = ([] for i in range(2))
for i in range(last_results+1,last_results+last_results_stage_1+1):
prior.append(glob.glob(shared+'/'+str(i)+'_*.csv'))
for i in range(last_results+last_results_stage_1+1, last_results+(2 * last_results_stage_1)+1):
post.append(glob.glob(shared+'/'+str(i)+'_*.csv'))
merged = list(itertools.chain(*post))
best_results_ranked = sorted(merged, key=lambda merged: re.split('_|.csv',merged)[2], reverse=False)
iter_rank_nums_2_1 = []
for i in range(len(best_results_ranked)):
iter_rank_nums_2_1.append(int(re.split('/|_',best_results_ranked[i])[6]))
iter_rank_index = [x - (last_results+last_results_stage_1+1) for x in iter_rank_nums_2_1]
stage_2_1_results = []
if len(prior) == len(post):
for i in range(len(prior)):
prior_df = pd.read_csv(prior[i][0])
post_df = pd.read_csv(post[i][0])
updated_v_dIntercept, input_vector = compute_intercept(prior_df, post_df)
data = | pd.concat([prior_df, post_df], ignore_index=True) | pandas.concat |
import json
import io
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import dash
from dash import html
from dash import dcc
import dash_bootstrap_components as dbc
import pandas as pd
import numpy as np
import plotly.express as px
from dash.dependencies import Output, Input, State
from datetime import datetime, timedelta
from server import app
import plotly.graph_objects as go
import plotly.express as px
from sqlalchemy import create_engine
from flask import send_file
import os
from joblib import Parallel, delayed
from dash.exceptions import PreventUpdate
# ----------------------------------------------------------------------------------------------------- 一级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取概览一级第一张图数据
def get_first_lev_first_fig_date(engine):
res_数据时间缺失及汇总 = pd.DataFrame(columns=['业务类型', '问题数', '总数', '问题数量占比'])
# 问题类别、问题数据量统计、全数据统计
bus_dic = {
# '患者基本信息': ['select count(distinct caseid) as num from overall where in_time is null or out_time is null','select count(distinct caseid) as num from overall'],
'入院时间': ['select count(distinct caseid) as num from overall where in_time is null ',
'select count(distinct caseid) as num from overall'],
'出院时间': ['select count(distinct caseid) as num from overall where out_time is null',
'select count(distinct caseid) as num from overall'],
'手术': ['select count(1) as num from oper2 where BEGINTIME is null or ENDTIME is null ','select count(1) as num from oper2 '],
'给药': ['select count(1) as num from ANTIBIOTICS where BEGINTIME is null or ENDTIME is null ','select count(1) as num from ANTIBIOTICS '],
'入出转': ['select count(1) as num from DEPARTMENT where BEGINTIME is null or ENDTIME is null ','select count(1) as num from DEPARTMENT '],
'菌检出': ['select count(1) as num from BACTERIA where REQUESTTIME is null ','select count(1) as num from BACTERIA '],
'体温': ['select count(1) as num from TEMPERATURE where RECORDDATE is null ','select count(1) as num from TEMPERATURE '],
'药敏': ['select count(1) as num from DRUGSUSCEPTIBILITY where REQUESTTIME is null or REPORTTIME is null ','select count(1) as num from DRUGSUSCEPTIBILITY '],
'检查': ['select count(1) as num from EXAM where EXAM_DATE is null ','select count(1) as num from EXAM '],
'生化': ['select count(1) as num from ROUTINE2 where REQUESTTIME is null or REPORTTIME is null ','select count(1) as num from ROUTINE2 '],
'三管': ['select count(1) as num from TREATMENT1 where BEGINTIME is null or ENDTIME is null ','select count(1) as num from TREATMENT1 '],
}
for bus in bus_dic:
try:
count_时间为空 = pd.read_sql(bus_dic[bus][0],con=engine)['num'][0]
count_总 = pd.read_sql(bus_dic[bus][1],con=engine)['num'][0]
res_数据时间缺失及汇总.loc[res_数据时间缺失及汇总.shape[0]] = [bus,count_时间为空,count_总,round(count_时间为空 / count_总, 4) * 100]
except:
res_数据时间缺失及汇总.loc[res_数据时间缺失及汇总.shape[0]] = [bus,-1,-1,-1]
print('一级图一',bus)
return res_数据时间缺失及汇总
# 更新一级图一
@app.callback(
Output('first_level_first_fig','figure'),
Output('general_situation_first_level_first_fig_data','data'),
Input('general_situation_first_level_first_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_first_fig(general_situation_first_level_first_fig_data,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
if general_situation_first_level_first_fig_data is None:
general_situation_first_level_first_fig_data = {}
first_level_first_fig_data = get_first_lev_first_fig_date(engine)
general_situation_first_level_first_fig_data['first_level_first_fig_data'] = first_level_first_fig_data.to_json(orient='split', date_format='iso')
general_situation_first_level_first_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_first_fig_data = json.dumps(general_situation_first_level_first_fig_data)
else:
general_situation_first_level_first_fig_data = json.loads(general_situation_first_level_first_fig_data)
if db_con_url['hosname'] != general_situation_first_level_first_fig_data['hosname']:
first_level_first_fig_data = get_first_lev_first_fig_date(engine)
general_situation_first_level_first_fig_data['first_level_first_fig_data'] = first_level_first_fig_data.to_json(orient='split',date_format='iso')
general_situation_first_level_first_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_first_fig_data = json.dumps(general_situation_first_level_first_fig_data)
else:
first_level_first_fig_data = pd.read_json(general_situation_first_level_first_fig_data['first_level_first_fig_data'], orient='split')
general_situation_first_level_first_fig_data = dash.no_update
#
fig_概览一级_时间缺失 = make_subplots(specs=[[{"secondary_y": True}]])
res_数据时间缺失及汇总 = first_level_first_fig_data.sort_values(['问题数'], ascending=False)
# 各业务缺失数量--柱形图
fig_概览一级_时间缺失.add_trace(
go.Bar(x=res_数据时间缺失及汇总['业务类型'], y=res_数据时间缺失及汇总['问题数'], name="问题数量",
marker_color=px.colors.qualitative.Dark24, ),
secondary_y=False,
)
# 各业务缺失数量占比--折线图
fig_概览一级_时间缺失.add_trace(
go.Scatter(x=res_数据时间缺失及汇总['业务类型'], y=res_数据时间缺失及汇总['问题数量占比'], name="问题数量占比", ),
secondary_y=True,
)
# 设置X轴title
fig_概览一级_时间缺失.update_xaxes(tickangle=45,title_text="业务指标")
# 设置Y轴title
fig_概览一级_时间缺失.update_yaxes(title_text="缺失数量", secondary_y=False)
fig_概览一级_时间缺失.update_yaxes(title_text="缺失占比(%)", secondary_y=True)
# 设置水平图例及位置
fig_概览一级_时间缺失.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
# 设置图片边距
fig_概览一级_时间缺失.update_layout(margin=dict(l=20, r=20, t=20, b=20), )
return fig_概览一级_时间缺失,general_situation_first_level_first_fig_data
# 下载一级图一明细
@app.callback(
Output('first_level_first_fig_data_detail', 'data'),
Input('first_level_first_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
prevent_initial_call=True,
)
def download_first_level_first_fig_data_detail(n_clicks,db_con_url):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
engine = create_engine(db_con_url['db'])
bus_dic = {
'入院时间': 'select * from overall where in_time is null ',
'出院时间': 'select * from overall where out_time is null',
'手术': 'select * from oper2 where BEGINTIME is null or ENDTIME is null ',
'给药': 'select * from ANTIBIOTICS where BEGINTIME is null or ENDTIME is null ',
'入出转': 'select * from DEPARTMENT where BEGINTIME is null or ENDTIME is null ',
'菌检出': 'select * from BACTERIA where REQUESTTIME is null ',
'药敏': 'select * from DRUGSUSCEPTIBILITY where REQUESTTIME is null or REPORTTIME is null ',
'检查': 'select * from EXAM where EXAM_DATE is null',
'生化': 'select * from ROUTINE2 where REQUESTTIME is null or REPORTTIME is null ',
'三管': 'select * from TREATMENT1 where BEGINTIME is null or ENDTIME is null ',
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key],con=engine)
if temp.shape[0]>0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'],columns=[key])
error_df.to_excel(writer, sheet_name = key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}各业务时间缺失数量占比.xlsx')
else:
return dash.no_update
# ----------------------------------------------------------------------------------------------------- 一级图二 ----------------------------------------------------------------------------------------------------------------------
# 获取概览一级第二张图数据
def get_first_lev_second_fig_date(engine,btime,etime):
res_数据关键字缺失及汇总 = pd.DataFrame(columns=['业务类型', '问题数', '总数', '关键字缺失占比'])
bus_dic = {'用药目的': [f"select count(1) as num from ANTIBIOTICS where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' and (GOAL is null or replace(GOAL,' ','') is null)",
f"select count(1) as num from ANTIBIOTICS where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'药敏结果': [f"select count(1) as num from drugsusceptibility where substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and SUSCEPTIBILITY is null or replace(SUSCEPTIBILITY,' ','') is null",
f"select count(1) as num from drugsusceptibility where substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' "],
'手术名称': [f"select count(1) as num from oper2 where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' and (OPER_NAME is null or replace(OPER_NAME,' ','') is null)",
f"select count(1) as num from oper2 where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'手术切口等级': [f"select count(1) as num from oper2 where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' and ( WOUND_GRADE is null or replace(WOUND_GRADE,' ','') is null)",
f"select count(1) as num from oper2 where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'出入院科室': [f"select count(1) as num from overall where substr(IN_TIME,1,7)>='{btime}' and substr(IN_TIME,1,7)<='{etime}' and ( IN_DEPT is null or replace(IN_DEPT,' ','') is null or OUT_DEPT is null or replace(OUT_DEPT,' ','') is null )",
f"select count(1) as num from overall where substr(IN_TIME,1,7)>='{btime}' and substr(IN_TIME,1,7)<='{etime}' "],
'入出转科室': [f"select count(1) as num from department where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' and ( DEPT is null or replace(DEPT,' ','') is null)",
f"select count(1) as num from department where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "]
}
for bus in bus_dic:
try:
count_时间为空 = pd.read_sql(bus_dic[bus][0],con=engine)['num'][0]
count_总 = pd.read_sql(bus_dic[bus][1],con=engine)['num'][0]
res_数据关键字缺失及汇总.loc[res_数据关键字缺失及汇总.shape[0]] = [bus,count_时间为空,count_总,round(count_时间为空 / count_总, 4) * 100]
except:
res_数据关键字缺失及汇总.loc[res_数据关键字缺失及汇总.shape[0]] = [bus,-1,-1,-1]
print('一级图二', bus)
return res_数据关键字缺失及汇总
# 更新一级图二
@app.callback(
Output('first_level_second_fig','figure'),
Output('general_situation_first_level_second_fig_data','data'),
Input('general_situation_first_level_second_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_second_fig(general_situation_first_level_second_fig_data,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if general_situation_first_level_second_fig_data is None:
general_situation_first_level_second_fig_data = {}
first_level_second_fig_data = get_first_lev_second_fig_date(engine,btime,etime)
general_situation_first_level_second_fig_data['first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split', date_format='iso')
general_situation_first_level_second_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_second_fig_data['btime'] = btime
general_situation_first_level_second_fig_data['etime'] = etime
general_situation_first_level_second_fig_data = json.dumps(general_situation_first_level_second_fig_data)
else:
general_situation_first_level_second_fig_data = json.loads(general_situation_first_level_second_fig_data)
if db_con_url['hosname'] != general_situation_first_level_second_fig_data['hosname']:
first_level_second_fig_data = get_first_lev_second_fig_date(engine, btime, etime)
general_situation_first_level_second_fig_data['first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split',date_format='iso')
general_situation_first_level_second_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_second_fig_data['btime'] = btime
general_situation_first_level_second_fig_data['etime'] = etime
general_situation_first_level_second_fig_data = json.dumps( general_situation_first_level_second_fig_data)
else:
if general_situation_first_level_second_fig_data['btime'] != btime or general_situation_first_level_second_fig_data['etime'] != etime:
first_level_second_fig_data = get_first_lev_second_fig_date(engine, btime, etime)
general_situation_first_level_second_fig_data[ 'first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split', date_format='iso')
general_situation_first_level_second_fig_data['btime'] = btime
general_situation_first_level_second_fig_data['etime'] = etime
general_situation_first_level_second_fig_data = json.dumps(general_situation_first_level_second_fig_data)
else:
first_level_second_fig_data = pd.read_json(general_situation_first_level_second_fig_data['first_level_second_fig_data'], orient='split')
general_situation_first_level_second_fig_data = dash.no_update
print("一级第二张图数据:")
print(first_level_second_fig_data)
fig_概览一级_关键字缺失 = make_subplots()
res_数据关键字缺失及汇总 = first_level_second_fig_data.sort_values(['关键字缺失占比'], ascending=False)
fig_概览一级_关键字缺失.add_trace(
go.Bar(x=res_数据关键字缺失及汇总['业务类型'], y=res_数据关键字缺失及汇总['关键字缺失占比'], marker_color=px.colors.qualitative.Dark24, )
)
fig_概览一级_关键字缺失.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
#title=f"{btime}--{etime}",
)
fig_概览一级_关键字缺失.update_yaxes(title_text="关键字缺失占比(%)")
fig_概览一级_关键字缺失.update_xaxes(title_text="业务指标")
return fig_概览一级_关键字缺失,general_situation_first_level_second_fig_data
# 下载一级图二明细
@app.callback(
Output('first_level_second_fig_data_detail', 'data'),
Input('first_level_second_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_second_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime']
etime = count_time['etime']
bus_dic = {
'用药目的': f"select * from ANTIBIOTICS where (GOAL is null or replace(GOAL,' ','') is null) and BEGINTIME is not null and substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}' ",
'药敏结果': f"select * from drugsusceptibility where (SUSCEPTIBILITY is null or replace(SUSCEPTIBILITY,' ','') is null) and REQUESTTIME is not null and substr(REQUESTTIME,1,10)>='{btime}' and substr(REQUESTTIME,1,10)<='{etime}' ",
'手术名称': f"select * from oper2 where (OPER_NAME is null or replace(OPER_NAME,' ','') is null) and BEGINTIME is not null and substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}'",
'手术切口等级': f"select * from oper2 where (WOUND_GRADE is null or replace(WOUND_GRADE,' ','') is null) and BEGINTIME is not null and substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}' ",
'出入院科室': f" select * from overall where (IN_DEPT is null or replace(IN_DEPT,' ','') is null or OUT_DEPT is null or replace(OUT_DEPT,' ','') is null) and in_time is not null and substr(in_time,1,10)>='{btime}' and substr(in_time,1,10)<='{etime}' ",
'入出转科室': f"select * from department where (DEPT is null or replace(DEPT,' ','') is null) and BEGINTIME is not null and substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}' ",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}各业务关键字缺失数量占比.xlsx')
else:
return dash.no_update
# ----------------------------------------------------------------------------------------------------- 一级图三 ----------------------------------------------------------------------------------------------------------------------
# 获取概览一级第三张图数据
def get_first_lev_third_fig_date(engine,btime,etime):
res_数据科室信息缺失及汇总 = pd.DataFrame(columns=['业务类型', '问题数', '总数', '科室信息映射问题占比'])
bus_dic = {'入院科室': [f" select count(1) as num from OVERALL t1 where not exists (select 1 from S_DEPARTMENTS t2 where t1.in_dept = t2.code) and t1.in_dept is not null and (substr(t1.IN_TIME,1,7)>='{btime}' and substr(t1.IN_TIME,1,7)<='{etime}') ",
f"select count(1) as num from overall where substr(IN_TIME,1,7)>='{btime}' and substr(IN_TIME,1,7)<='{etime}' "],
'出院科室': [
f" select count(1) as num from OVERALL t1 where not exists (select 1 from S_DEPARTMENTS t2 where t1.out_dept = t2.code) and t1.out_dept is not null and (substr(t1.IN_TIME,1,7)>='{btime}' and substr(t1.IN_TIME,1,7)<='{etime}') ",
f"select count(1) as num from overall where substr(IN_TIME,1,7)>='{btime}' and substr(IN_TIME,1,7)<='{etime}' "],
'入出转科室': [
f" select count(1) as num from department t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') ",
f"select count(1) as num from department where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'抗菌药物医嘱科室': [
f" select count(1) as num from ANTIBIOTICS t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') ",
f"select count(1) as num from ANTIBIOTICS where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'手术科室': [
f" select count(1) as num from OPER2 t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') ",
f"select count(1) as num from OPER2 where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'菌检出送检科室': [
f" select count(1) as num from BACTERIA t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}') ",
f"select count(1) as num from BACTERIA where substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' "],
'药敏送检科室': [
f" select count(1) as num from DRUGSUSCEPTIBILITY t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}') ",
f"select count(1) as num from DRUGSUSCEPTIBILITY where substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' "],
'体温科室': [
f" select count(1) as num from TEMPERATURE t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.RECORDDATE,1,7)>='{btime}' and substr(t1.RECORDDATE,1,7)<='{etime}') ",
f"select count(1) as num from TEMPERATURE where substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' "],
'治疗科室': [
f" select count(1) as num from TREATMENT1 t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') ",
f"select count(1) as num from TREATMENT1 where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'常规科室': [
f" select count(1) as num from ROUTINE2 t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}') ",
f"select count(1) as num from ROUTINE2 where substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' "],
}
for bus in bus_dic:
try:
count_时间为空 = pd.read_sql(bus_dic[bus][0], con=engine)['num'][0]
count_总 = pd.read_sql(bus_dic[bus][1], con=engine)['num'][0]
res_数据科室信息缺失及汇总.loc[res_数据科室信息缺失及汇总.shape[0]] = [bus, count_时间为空, count_总,round(count_时间为空 / count_总, 4) * 100]
except:
res_数据科室信息缺失及汇总.loc[res_数据科室信息缺失及汇总.shape[0]] = [bus, -1, -1, -1]
return res_数据科室信息缺失及汇总
# 更新一级图三
@app.callback(
Output('first_level_third_fig','figure'),
Output('general_situation_first_level_third_fig_data','data'),
Input('general_situation_first_level_third_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_third_fig(general_situation_first_level_third_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if general_situation_first_level_third_fig_data is None:
first_level_third_fig_data = get_first_lev_third_fig_date(engine, btime, etime)
general_situation_first_level_third_fig_data={}
general_situation_first_level_third_fig_data['first_level_third_fig_data'] = first_level_third_fig_data.to_json(orient='split', date_format='iso')
general_situation_first_level_third_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_third_fig_data['btime'] = btime
general_situation_first_level_third_fig_data['etime'] = etime
general_situation_first_level_third_fig_data = json.dumps(general_situation_first_level_third_fig_data)
else:
general_situation_first_level_third_fig_data = json.loads(general_situation_first_level_third_fig_data)
if db_con_url['hosname'] != general_situation_first_level_third_fig_data['hosname']:
first_level_third_fig_data = get_first_lev_third_fig_date(engine, btime, etime)
general_situation_first_level_third_fig_data['first_level_third_fig_data'] = first_level_third_fig_data.to_json(orient='split',date_format='iso')
general_situation_first_level_third_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_third_fig_data['btime'] = btime
general_situation_first_level_third_fig_data['etime'] = etime
general_situation_first_level_third_fig_data = json.dumps(general_situation_first_level_third_fig_data)
else:
if general_situation_first_level_third_fig_data['btime'] != btime or general_situation_first_level_third_fig_data['etime'] != etime:
first_level_third_fig_data = get_first_lev_third_fig_date(engine, btime, etime)
general_situation_first_level_third_fig_data['first_level_third_fig_data'] = first_level_third_fig_data.to_json(orient='split',date_format='iso')
general_situation_first_level_third_fig_data['btime'] = btime
general_situation_first_level_third_fig_data['etime'] = etime
general_situation_first_level_third_fig_data = json.dumps(general_situation_first_level_third_fig_data)
else:
first_level_third_fig_data = pd.read_json(general_situation_first_level_third_fig_data['first_level_third_fig_data'], orient='split')
general_situation_first_level_third_fig_data = dash.no_update
fig_概览一级_科室映射缺失 = go.Figure()
res_数据科室信息缺失及汇总 = first_level_third_fig_data.sort_values(['科室信息映射问题占比'], ascending=False)
fig_概览一级_科室映射缺失.add_trace(
go.Bar(x=res_数据科室信息缺失及汇总['业务类型'], y=res_数据科室信息缺失及汇总['科室信息映射问题占比'], marker_color=px.colors.qualitative.Dark24 )
)
fig_概览一级_科室映射缺失.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
)
fig_概览一级_科室映射缺失.update_yaxes(title_text="科室信息映射问题占比(%)")
fig_概览一级_科室映射缺失.update_xaxes(title_text="业务指标")
return fig_概览一级_科室映射缺失,general_situation_first_level_third_fig_data
# 下载一级图三明细
@app.callback(
Output('first_level_third_fig_data_detail', 'data'),
Input('first_level_third_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_third_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime']
etime = count_time['etime']
bus_dic = {
'入院科室': f" select * from OVERALL t1 where not exists (select 1 from S_DEPARTMENTS t2 where t1.in_dept = t2.code) and t1.in_dept is not null and substr(t1.IN_TIME,1,10)>='{btime}' and substr(t1.IN_TIME,1,10)<='{etime}' ",
'出院科室': f" select * from OVERALL t1 where not exists (select 1 from S_DEPARTMENTS t2 where t1.out_dept = t2.code) and t1.out_dept is not null and substr(t1.IN_TIME,1,10)>='{btime}' and substr(t1.IN_TIME,1,10)<='{etime}' ",
'入出转科室': f" select * from department t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and substr(t1.BEGINTIME,1,10) >='{btime}' and substr(t1.BEGINTIME,1,10) <='{etime}' ",
'抗菌药物医嘱科室': f" select * from ANTIBIOTICS t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.BEGINTIME,1,10)>='{btime}' and substr(t1.BEGINTIME,1,10)<='{etime}') ",
'手术科室': f" select * from OPER2 t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.BEGINTIME,1,10)>='{btime}' and substr(t1.BEGINTIME,1,10)<='{etime}') ",
'菌检出送检科室': f" select * from BACTERIA t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.REQUESTTIME,1,10)>='{btime}' and substr(t1.REQUESTTIME,1,10)<='{etime}') ",
'药敏送检科室': f" select * from DRUGSUSCEPTIBILITY t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.REQUESTTIME,1,10)>='{btime}' and substr(t1.REQUESTTIME,1,10)<='{etime}') ",
'体温科室': " select * from TEMPERATURE t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.RECORDDATE,1,10)>='{btime}' and substr(t1.RECORDDATE,1,10)<='{etime}') ",
'治疗科室': f" select * from TREATMENT1 t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.BEGINTIME,1,10)>='{btime}' and substr(t1.BEGINTIME,1,10)<='{etime}') ",
'常规科室': f" select * from ROUTINE2 t1 where t1.dept is not null and not exists (select 1 from s_departments t2 where t1.dept = t2.code) and (substr(t1.REQUESTTIME,1,10)>='{btime}' and substr(t1.REQUESTTIME,1,10)<='{etime}') ",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}科室映射缺失数量占比.xlsx')
else:
return dash.no_update
# ----------------------------------------------------------------------------------------------------- 二级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取概二级各业务逻辑问题数据
def get_second_level_fig_date(engine,btime,etime):
res_业务逻辑问题数据汇总 = pd.DataFrame(columns=['问题数据数量', '问题'])
ques_dic = {
'出院时间小于等于入院时间' : f""" select count(1) from overall where in_time is not null and out_time is not null and in_time >= out_time and (substr(in_time,1,7)>='{btime}' and substr(in_time,1,7)<='{etime}')""",
'存在测试患者数据' : f""" select count(1) from overall where (pname like '%测试%' or pname like '%test%') and (substr(in_time,1,7)>='{btime}' and substr(in_time,1,7)<='{etime}') """,
'存在住院时长超四个月患者' : f""" select count(1) from overall where (((out_time is null or out_time='9999') and ( trunc(sysdate)-to_date(substr(in_time,0,10),'yyyy-mm-dd') )> 120)
or (out_time is not null and ( to_date(substr(out_time,0,10),'yyyy-mm-dd')-to_date(substr(in_time,0,10),'yyyy-mm-dd') )> 120)) and (substr(in_time,1,7)>='{btime}' and substr(in_time,1,7)<='{etime}')
""",
'存在住院天数不足一天患者' : f""" select count(1) from overall where (out_time is not null and out_time <> '9999' and ( to_date(substr(out_time,0,10),'yyyy-mm-dd')-to_date(substr(in_time,0,10),'yyyy-mm-dd') )< 1 ) and (substr(in_time,1,7)>='{btime}' and substr(in_time,1,7)<='{etime}') """,
'转科时间在出入院时间之外' : f""" select count(1) from department t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}')
""",
'转入时间大于等于转出时间' : f""" select count(1) from department where BEGINTIME is not null and ENDTIME is not null and BEGINTIME >= ENDTIME and (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}') """,
'治疗开始时间大于等于结束时间' : f""" select count(1) from TREATMENT1 where BEGINTIME is not null and ENDTIME is not null and BEGINTIME>= ENDTIME and (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}') """,
'治疗时间在出入院时间之外' : f""" select count(1) from TREATMENT1 t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}')
""",
'医嘱开始时间大于结束时间' : f""" select count(1) from ANTIBIOTICS where BEGINTIME is not null and ENDTIME is not null and BEGINTIME> ENDTIME and (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}')""",
'医嘱时间在出入院时间之外' : f""" select count(1) from ANTIBIOTICS t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}')
""",
'送检时间大于等于报告时间' : f""" select count(1) from BACTERIA where REQUESTTIME is not null and REPORTTIME is not null and REQUESTTIME>= REPORTTIME and (substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}')""",
'送检时间在出入院时间之外' : f""" select count(1) from BACTERIA t1,overall t2 where
( t1.REQUESTTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}')
""",
'药敏送检时间大于等于报告时间' : f""" select count(1) from DRUGSUSCEPTIBILITY where REQUESTTIME is not null and REPORTTIME is not null and REQUESTTIME>= REPORTTIME and ( substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' )""",
'药敏送检时间在出入院时间之外' : f""" select count(1) from DRUGSUSCEPTIBILITY t1,overall t2 where
( t1.REQUESTTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}')
""",
'手术开始时间大于结束时间' : f""" select count(1) from OPER2 where BEGINTIME is not null and ENDTIME is not null and BEGINTIME> ENDTIME and ( substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' )""",
'手术时间在出入院时间之外' : f""" select count(1) from OPER2 t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}')
""",
'OPERID重复' : f""" select count(1) from oper2 where operid in (select operid from oper2 group by operid having count(operid)>1) and ( substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' ) order by operid """,
'体温值异常' : f""" select count(1) from TEMPERATURE where (VALUE > 46 or VALUE < 34 or VALUE is null) and ( substr(RECORDDATE,1,7) >='{btime}' and substr(RECORDDATE,1,7) <='{etime}') """,
'体温测量时间在出入院时间之外' : f""" select count(1) from TEMPERATURE t1,overall t2 where
( t1.RECORDDATE is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.RECORDDATE<t2.IN_TIME or t1.RECORDDATE > t2.OUT_TIME )
and ( substr(t1.RECORDDATE,1,7)>='{btime}' and substr(t1.RECORDDATE,1,7)<='{etime}')
""",
'入出转入科时间重复': f""" select count(1) from department t1,
(select caseid ,begintime from department where substr(begintime,1,7)>='{btime}' and substr(begintime,1,7)<='{etime}' group by caseid ,begintime having count(1)>1) t2
where t1.caseid=t2.caseid and t1.begintime = t2.begintime
""",
}
for ques in ques_dic:
try:
ques_df = pd.read_sql(ques_dic[ques], con=engine)
ques_df.columns = ['问题数据数量']
ques_df['问题'] = ques
res_业务逻辑问题数据汇总 = res_业务逻辑问题数据汇总.append( ques_df )
except:
res_业务逻辑问题数据汇总.loc[res_业务逻辑问题数据汇总.shape[0]] = [ -1 , ques ]
print('二级图 ' , ques)
return res_业务逻辑问题数据汇总
# def get_second_level_fig_date(engine,btime,etime):
# res_业务逻辑问题数据汇总 = pd.DataFrame(columns=['问题数据数量', '问题','month'])
# ques_dic = {
# '出院时间小于等于入院时间' : f""" select count(1) as 问题数据数量, '出院时间小于等于入院时间' as 问题, substr(in_time,1,7) as month from overall where in_time is not null and out_time is not null and in_time >= out_time group by substr(in_time,1,7) """,
# '存在测试患者数据' : f""" select count(1) as 问题数据数量, '存在测试患者数据' as 问题, substr(in_time,1,7) as month from overall where (pname like '%测试%' or pname like '%test%') group by substr(in_time,1,7) """,
# '存在住院时长超四个月患者' : f""" select count(1) as 问题数据数量, '存在住院时长超四个月患者' as 问题, substr(in_time,1,7) as month from overall where
# (((out_time is null or out_time='9999') and ( trunc(sysdate)-to_date(substr(in_time,0,10),'yyyy-mm-dd') )> 120)
# or (out_time is not null and ( to_date(substr(out_time,0,10),'yyyy-mm-dd')-to_date(substr(in_time,0,10),'yyyy-mm-dd') )> 120))
# group by substr(in_time,1,7) )
# """,
# '存在住院天数不足一天患者' : f""" select count(1) as 问题数据数量, '存在住院天数不足一天患者' as 问题, substr(in_time,1,7) as month from overall where
# (out_time is not null and out_time <> '9999' and ( to_date(substr(out_time,0,10),'yyyy-mm-dd')-to_date(substr(in_time,0,10),'yyyy-mm-dd') )< 1 )
# group by substr(in_time,1,7) """,
# '转科时间在出入院时间之外' : f""" select count(1) as 问题数据数量, '转科时间在出入院时间之外' as 问题, substr(t1.BEGINTIME,1,7) as month from department t1,overall t2 where
# ( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
# and t1.caseid = t2.caseid
# and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
# group by substr(t1.BEGINTIME,1,7)
# """,
# '转入时间大于等于转出时间' : f""" select count(1) as 问题数据数量, '转入时间大于等于转出时间' as 问题, substr(t1.BEGINTIME,1,7) as month from department where
# BEGINTIME is not null and ENDTIME is not null and BEGINTIME >= ENDTIME
# group by substr( BEGINTIME,1,7)
# """,
#
# '治疗开始时间大于等于结束时间' : f""" select count(1) as 问题数据数量, '治疗开始时间大于等于结束时间' as 问题, substr(BEGINTIME,1,7) as month from TREATMENT1 where
# BEGINTIME is not null and ENDTIME is not null and BEGINTIME>= ENDTIME
# group by substr(BEGINTIME,1,7)
# """,
# '治疗时间在出入院时间之外' : f""" select count(1) as 问题数据数量, '治疗时间在出入院时间之外' as 问题, substr(t1.BEGINTIME,1,7) as month from TREATMENT1 t1,overall t2 where
# ( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
# and t1.caseid = t2.caseid
# and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
# group by substr(t1.BEGINTIME,1,7)
# """,
# '医嘱开始时间大于结束时间' : f""" select count(1) as 问题数据数量, '医嘱开始时间大于结束时间' as 问题, substr(BEGINTIME,1,7) as month from ANTIBIOTICS where
# BEGINTIME is not null and ENDTIME is not null and BEGINTIME> ENDTIME
# group by substr( BEGINTIME,1,7)
# """,
# '医嘱时间在出入院时间之外' : f""" select count(1) as 问题数据数量, '医嘱时间在出入院时间之外' as 问题, substr(t1.BEGINTIME,1,7) as month from ANTIBIOTICS t1,overall t2 where
# ( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
# and t1.caseid = t2.caseid
# and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
# group by substr(t1.BEGINTIME,1,7)
# """,
# '送检时间大于等于报告时间' : f""" select count(1) as 问题数据数量, '送检时间大于等于报告时间' as 问题, substr(REQUESTTIME,1,7) as month from BACTERIA where
# REQUESTTIME is not null and REPORTTIME is not null and REQUESTTIME>= REPORTTIME
# group by substr( REQUESTTIME,1,7)
# """,
# '送检时间在出入院时间之外' : f""" select count(1) as 问题数据数量, '送检时间在出入院时间之外' as 问题, substr(t1.REQUESTTIME,1,7) as month from BACTERIA t1,overall t2 where
# ( t1.REQUESTTIME is not null and t2.in_time is not null and t2.out_time is not null)
# and t1.caseid = t2.caseid
# and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
# group by substr(t1.REQUESTTIME,1,7)
# """,
# '药敏送检时间大于等于报告时间' : f""" select count(1) as 问题数据数量, '药敏送检时间大于等于报告时间' as 问题, substr(REQUESTTIME,1,7) as month from DRUGSUSCEPTIBILITY where
# REQUESTTIME is not null and REPORTTIME is not null and REQUESTTIME>= REPORTTIME
# group by substr( REQUESTTIME,1,7)
# """,
# '药敏送检时间在出入院时间之外' : f""" select count(1) as 问题数据数量, '药敏送检时间在出入院时间之外' as 问题, substr( t1.REQUESTTIME,1,7) as month from DRUGSUSCEPTIBILITY t1,overall t2 where
# ( t1.REQUESTTIME is not null and t2.in_time is not null and t2.out_time is not null)
# and t1.caseid = t2.caseid
# and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
# group by substr(t1.REQUESTTIME,1,7)
# """,
# '手术开始时间大于结束时间' : f""" select count(1) as 问题数据数量, '手术开始时间大于结束时间' as 问题, substr(BEGINTIME,1,7) as month from OPER2 where
# BEGINTIME is not null and ENDTIME is not null and BEGINTIME> ENDTIME
# group by substr( BEGINTIME,1,7)
# """,
# '手术时间在出入院时间之外' : f""" select count(1) as 问题数据数量, '手术时间在出入院时间之外' as 问题, substr( t1.BEGINTIME,1,7) as month from OPER2 t1,overall t2 where
# ( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
# and t1.caseid = t2.caseid
# and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
# group by substr(t1.BEGINTIME,1,7)
# """,
# 'OPERID重复' : f""" select count(1) as 问题数据数量, 'OPERID重复' as 问题, substr(BEGINTIME,1,7) as month from oper2 where
# operid in (select operid from oper2 group by operid having count(operid)>1)
# group by substr( BEGINTIME,1,7)
# """,
# '体温值异常' : f""" select count(1) as 问题数据数量, '体温值异常' as 问题, substr(RECORDDATE,1,7) as month from TEMPERATURE where
# (VALUE > 46 or VALUE < 34 or VALUE is null) group by substr( RECORDDATE,1,7) """,
# '体温测量时间在出入院时间之外' : f""" select count(1) as 问题数据数量, '体温测量时间在出入院时间之外' as 问题, substr(t1.RECORDDATE,1,7) as month from TEMPERATURE t1,overall t2 where
# ( t1.RECORDDATE is not null and t2.in_time is not null and t2.out_time is not null)
# and t1.caseid = t2.caseid
# and (t1.RECORDDATE<t2.IN_TIME or t1.RECORDDATE > t2.OUT_TIME )
# group by substr( t1.RECORDDATE,1,7)
# """,
# }
#
# for ques in ques_dic:
# try:
# # ques_df = pd.read_sql(ques_dic[ques], con=engine)
# # ques_df.columns = ['问题数据数量']
# # ques_df['问题'] = ques
# # res_业务逻辑问题数据汇总 = res_业务逻辑问题数据汇总.append( ques_df )
# res_业务逻辑问题数据汇总 = res_业务逻辑问题数据汇总.append(pd.read_sql(ques_dic[ques], con=engine) )
# except:
# res_业务逻辑问题数据汇总.loc[res_业务逻辑问题数据汇总.shape[0]] = [ -1 , ques ,]
# print('二级图 ' , ques)
# return res_业务逻辑问题数据汇总
# 获取概二级各业务逻辑问题明细数据数据
# 更新二级图
@app.callback(
Output('second_level_fig','figure'),
Output('general_situation_secod_level_fig_data','data'),
Input('general_situation_secod_level_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_second_level_fig(general_situation_secod_level_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if general_situation_secod_level_fig_data is None:
general_situation_secod_level_fig_data = {}
second_level_fig_date = get_second_level_fig_date(engine, btime, etime)
general_situation_secod_level_fig_data['second_level_fig_date'] = second_level_fig_date.to_json(orient='split', date_format='iso')
general_situation_secod_level_fig_data['hosname'] = db_con_url['hosname']
general_situation_secod_level_fig_data['btime'] = btime
general_situation_secod_level_fig_data['etime'] = etime
general_situation_secod_level_fig_data = json.dumps(general_situation_secod_level_fig_data)
else:
general_situation_secod_level_fig_data = json.loads(general_situation_secod_level_fig_data)
if db_con_url['hosname'] != general_situation_secod_level_fig_data['hosname']:
second_level_fig_date = get_second_level_fig_date(engine, btime, etime)
general_situation_secod_level_fig_data['second_level_fig_date'] = second_level_fig_date.to_json(orient='split',date_format='iso')
general_situation_secod_level_fig_data['hosname'] = db_con_url['hosname']
general_situation_secod_level_fig_data['btime'] = btime
general_situation_secod_level_fig_data['etime'] = etime
general_situation_secod_level_fig_data = json.dumps(general_situation_secod_level_fig_data)
else:
if general_situation_secod_level_fig_data['btime'] != btime or general_situation_secod_level_fig_data['etime'] != etime:
second_level_fig_date = get_second_level_fig_date(engine, btime, etime)
general_situation_secod_level_fig_data['second_level_fig_date'] = second_level_fig_date.to_json(orient='split',date_format='iso')
general_situation_secod_level_fig_data['btime'] = btime
general_situation_secod_level_fig_data['etime'] = etime
general_situation_secod_level_fig_data = json.dumps(general_situation_secod_level_fig_data)
else:
second_level_fig_date = pd.read_json(general_situation_secod_level_fig_data['second_level_fig_date'], orient='split')
general_situation_secod_level_fig_data = dash.no_update
print('二级图数据:')
print(second_level_fig_date)
fig_概览二级 = second_level_fig_date
fig_概览二级_业务逻辑问题 = make_subplots()
fig_概览二级 = fig_概览二级.sort_values(['问题数据数量'],ascending=False)
fig_概览二级_业务逻辑问题.add_trace(
go.Bar(x=fig_概览二级['问题'], y=fig_概览二级['问题数据数量'], marker_color=px.colors.qualitative.Dark24, )
)
fig_概览二级_业务逻辑问题.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
#title=f"{btime}--{etime}",
)
fig_概览二级_业务逻辑问题.update_yaxes(title_text="问题数据数量", )
fig_概览二级_业务逻辑问题.update_xaxes(title_text="业务问题", )
return fig_概览二级_业务逻辑问题,general_situation_secod_level_fig_data
# 下载二级图明细
@app.callback(
Output('second_level_fig_date_detail','data'),
Input('second_level_fig_data_detail_down', 'n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_second_level_fig(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime']
etime = count_time['etime']
ques_dic = {
'出院时间小于等于入院时间': f""" select * from overall where in_time is not null and out_time is not null and in_time >= out_time and (substr(in_time,1,10)>='{btime}' and substr(in_time,1,10)<='{etime}')""",
'存在测试患者数据': f""" select * from overall where (pname like '%测试%' or pname like '%test%') and (substr(in_time,1,10)>='{btime}' and substr(in_time,1,10)<='{etime}') """,
'存在住院时长超四个月患者': f""" select * from overall where (((out_time is null or out_time='9999') and ( trunc(sysdate)-to_date(substr(in_time,0,10),'yyyy-mm-dd') )> 120)
or (out_time is not null and ( to_date(substr(out_time,0,10),'yyyy-mm-dd')-to_date(substr(in_time,0,10),'yyyy-mm-dd') )> 120)) and (substr(in_time,1,10)>='{btime}' and substr(in_time,1,10)<='{etime}')
""",
'存在住院天数不足一天患者': f""" select * from overall where (out_time is not null and out_time <> '9999' and ( to_date(substr(out_time,0,10),'yyyy-mm-dd')-to_date(substr(in_time,0,10),'yyyy-mm-dd') )< 1 ) and (substr(in_time,1,10)>='{btime}' and substr(in_time,1,10)<='{etime}') """,
'转科时间在出入院时间之外': f""" select t1.*,t2.in_time as 入院时间,t2.out_time as 出院时间 from department t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,10)>='{btime}' and substr(t1.BEGINTIME,1,10)<='{etime}')
""",
'转入时间大于等于转出时间': f""" select * from department where BEGINTIME is not null and ENDTIME is not null and BEGINTIME >= ENDTIME and (substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}') """,
'治疗开始时间大于等于结束时间': f""" select * from TREATMENT1 where BEGINTIME is not null and ENDTIME is not null and BEGINTIME>= ENDTIME and (substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}') """,
'治疗时间在出入院时间之外': f""" select t1.*,t2.in_time as 入院时间,t2.out_time as 出院时间 from TREATMENT1 t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,10)>='{btime}' and substr(t1.BEGINTIME,1,10)<='{etime}')
""",
'医嘱开始时间大于结束时间': f""" select * from ANTIBIOTICS where BEGINTIME is not null and ENDTIME is not null and BEGINTIME> ENDTIME and (substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}')""",
'医嘱时间在出入院时间之外': f""" select t1.*,t2.in_time as 入院时间,t2.out_time as 出院时间 from ANTIBIOTICS t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,10)>='{btime}' and substr(t1.BEGINTIME,1,10)<='{etime}')
""",
'送检时间大于等于报告时间': f""" select * from BACTERIA where REQUESTTIME is not null and REPORTTIME is not null and REQUESTTIME>= REPORTTIME and (substr(REQUESTTIME,1,10)>='{btime}' and substr(REQUESTTIME,1,10)<='{etime}')""",
'送检时间在出入院时间之外': f""" select t1.*,t2.in_time as 入院时间,t2.out_time as 出院时间 from BACTERIA t1,overall t2 where
( t1.REQUESTTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
and (substr(t1.REQUESTTIME,1,10)>='{btime}' and substr(t1.REQUESTTIME,1,10)<='{etime}')
""",
'药敏送检时间大于等于报告时间': f""" select * from DRUGSUSCEPTIBILITY where REQUESTTIME is not null and REPORTTIME is not null and REQUESTTIME>= REPORTTIME and ( substr(REQUESTTIME,1,10)>='{btime}' and substr(REQUESTTIME,1,10)<='{etime}' )""",
'药敏送检时间在出入院时间之外': f""" select t1.*,t2.in_time as 入院时间,t2.out_time as 出院时间 from DRUGSUSCEPTIBILITY t1,overall t2 where
( t1.REQUESTTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
and (substr(t1.REQUESTTIME,1,10)>='{btime}' and substr(t1.REQUESTTIME,1,10)<='{etime}')
""",
'手术开始时间大于结束时间': f""" select * from OPER2 where BEGINTIME is not null and ENDTIME is not null and BEGINTIME> ENDTIME and ( substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}' )""",
'手术时间在出入院时间之外': f""" select t1.*,t2.in_time as 入院时间,t2.out_time as 出院时间 from OPER2 t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,10)>='{btime}' and substr(t1.BEGINTIME,1,10)<='{etime}')
""",
'OPERID重复': f""" select * from oper2 where operid in (select operid from oper2 group by operid having count(operid)>1) and ( substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}' ) order by operid """,
'体温值异常': f""" select * from TEMPERATURE where (VALUE > 46 or VALUE < 34 or VALUE is null) and ( substr(RECORDDATE,1,10) >='{btime}' and substr(RECORDDATE,1,10) <='{etime}') """,
'体温测量时间在出入院时间之外': f""" select t1.*,t2.in_time as 入院时间,t2.out_time as 出院时间 from TEMPERATURE t1,overall t2 where
( t1.RECORDDATE is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.RECORDDATE<t2.IN_TIME or t1.RECORDDATE > t2.OUT_TIME )
and ( substr(t1.RECORDDATE,1,10)>='{btime}' and substr(t1.RECORDDATE,1,10)<='{etime}')
""",
'体温测量时间在出入院时间之外': f""" select t1.* from department t1,
(select caseid ,begintime from department where substr(begintime,1,10)>='{btime}' and substr(begintime,1,10)<='{etime}' group by caseid ,begintime having count(1)>1) t2
where t1.caseid=t2.caseid and t1.begintime = t2.begintime
""",
'入出转入科时间重复': f""" select t1.* from department t1,
(select caseid ,begintime from department where substr(begintime,1,7)>='{btime}' and substr(begintime,1,7)<='{etime}' group by caseid ,begintime having count(1)>1) t2
where t1.caseid=t2.caseid and t1.begintime = t2.begintime
""",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in ques_dic.keys():
try:
temp = pd.read_sql(ques_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}全院数据逻辑问题明细.xlsx')
else:
return dash.no_update
# ----------------------------------------------------------------------------------------------------- 三级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取概览三级第一张图数据
def get_third_level_first_fig_date(engine):
res_全业务 = pd.DataFrame(columns=['num', 'month', '业务类型'])
bus_dic = {
'入院人数':"select count(distinct caseid) as num ,substr(in_time,1,7) as month,'入院人数' as 业务类型 from overall where in_time is not null group by substr(in_time,1,7) having substr(in_time,1,7) <= to_char(sysdate,'yyyy-mm') and substr(in_time,1,7) >= '1990-01' order by substr(in_time,1,7)",
'出院人数':"select count(distinct caseid) as num ,substr(out_time,1,7) as month,'出院人数' as 业务类型 from overall where in_time is not null and out_time is not null group by substr(out_time,1,7) having substr(out_time,1,7) <= to_char(sysdate,'yyyy-mm') and substr(out_time,1,7) >= '1990-01' order by substr(out_time,1,7)",
'抗菌药物医嘱数':"select count( distinct CASEID||ORDERNO||ANAME ) as num ,substr(BEGINTIME,1,7) as month ,'抗菌药物医嘱数' as 业务类型 from antibiotics where BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' order by substr(BEGINTIME,1,7)",
'手术台数':"select count( distinct CASEID||OPERID ) as num ,substr(BEGINTIME,1,7) as month,'手术台数' as 业务类型 from oper2 where BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' order by substr(BEGINTIME,1,7)",
'菌检出结果记录数':"select count( distinct CASEID||TESTNO||BACTERIA ) as num ,substr(REQUESTTIME,1,7) as month ,'菌检出结果记录数' as 业务类型 from bacteria where REQUESTTIME is not null group by substr(REQUESTTIME,1,7) having substr(REQUESTTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(REQUESTTIME,1,7) >= '1990-01' order by substr(REQUESTTIME,1,7)",
'药敏结果记录数':"select count( distinct CASEID||TESTNO||BACTERIA||ANTIBIOTICS ) as num ,substr(REQUESTTIME,1,7) as month ,'药敏结果记录数' as 业务类型 from DRUGSUSCEPTIBILITY where REQUESTTIME is not null group by substr(REQUESTTIME,1,7) having substr(REQUESTTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(REQUESTTIME,1,7) >= '1990-01' order by substr(REQUESTTIME,1,7)",
'体温测量数':"select count( distinct CASEID||RECORDDATE ) as num ,substr(RECORDDATE,1,7) as month ,'体温测量数' as 业务类型 from TEMPERATURE where RECORDDATE is not null group by substr(RECORDDATE,1,7) having substr(RECORDDATE,1,7) <= to_char(sysdate,'yyyy-mm') and substr(RECORDDATE,1,7) >= '1990-01' order by substr(RECORDDATE,1,7)",
'入出转记录数':"select count( distinct CASEID||BEGINTIME||DEPT ) as num ,substr(BEGINTIME,1,7) as month ,'入出转记录数' as 业务类型 from department where BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' order by substr(BEGINTIME,1,7)",
'常规结果记录数':"select count( distinct CASEID||TESTNO||RINDEX ) as num ,substr(REQUESTTIME,1,7) as month ,'常规结果记录数' as 业务类型 from ROUTINE2 where REQUESTTIME is not null group by substr(REQUESTTIME,1,7) having substr(REQUESTTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(REQUESTTIME,1,7) >= '1990-01' order by substr(REQUESTTIME,1,7)",
'影像检查记录数':"select count( distinct CASEID||EXAM_NO ) as num ,substr(EXAM_DATE,1,7) as month ,'影像检查记录数' as 业务类型 from EXAM where EXAM_DATE is not null group by substr(EXAM_DATE,1,7) having substr(EXAM_DATE,1,7) <= to_char(sysdate,'yyyy-mm') and substr(EXAM_DATE,1,7) >= '1990-01' order by substr(EXAM_DATE,1,7)",
'治疗记录数':"select count( distinct CASEID||TNO||TTYPE||DEPT ) as num ,substr(BEGINTIME,1,7) as month ,'治疗记录数' as 业务类型 from TREATMENT1 where BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' order by substr(BEGINTIME,1,7)",
'中心静脉插管记录数':"select count(1) as num ,substr(BEGINTIME,1,7) as month,'中心静脉插管记录数' as 业务类型 from treatment1 where TTYPE like '%中心%静脉%' and BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' ",
'呼吸机记录数':"select count(1) as num ,substr(BEGINTIME,1,7) as month,'呼吸机记录数' as 业务类型 from treatment1 where TTYPE like '%呼吸机%' and BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' ",
'泌尿道插管记录数':"select count(1) as num ,substr(BEGINTIME,1,7) as month,'泌尿道插管记录数' as 业务类型 from treatment1 where TTYPE like '%泌尿道%' and BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' ",
}
for bus in bus_dic:
res_全业务 = res_全业务.append(pd.read_sql(bus_dic[bus],con=engine))
return res_全业务
# 获取概览三级第一张图数据
# def get_third_level_first_fig_date(engine,date_type):
# res_全业务 = pd.DataFrame(columns=['num', 'month', '业务类型'])
# if date_type == 'month':
# bus_dic = {
# '入院人数': "select count(distinct caseid) as num ,substr(in_time,1,7) as month,'入院人数' as 业务类型 from overall where in_time is not null group by substr(in_time,1,7) having substr(in_time,1,7) <= to_char(sysdate,'yyyy-mm') and substr(in_time,1,7) >= '1990-01' order by substr(in_time,1,7)",
# '出院人数': "select count(distinct caseid) as num ,substr(out_time,1,7) as month,'出院人数' as 业务类型 from overall where in_time is not null and out_time is not null group by substr(out_time,1,7) having substr(out_time,1,7) <= to_char(sysdate,'yyyy-mm') and substr(out_time,1,7) >= '1990-01' order by substr(out_time,1,7)",
# '抗菌药物医嘱数': "select count( distinct CASEID||ORDERNO||ANAME ) as num ,substr(BEGINTIME,1,7) as month ,'抗菌药物医嘱数' as 业务类型 from antibiotics where BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' order by substr(BEGINTIME,1,7)",
# '手术台数': "select count( distinct CASEID||OPERID ) as num ,substr(BEGINTIME,1,7) as month,'手术台数' as 业务类型 from oper2 where BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' order by substr(BEGINTIME,1,7)",
# '菌检出结果记录数': "select count( distinct CASEID||TESTNO||BACTERIA ) as num ,substr(REQUESTTIME,1,7) as month ,'菌检出结果记录数' as 业务类型 from bacteria where REQUESTTIME is not null group by substr(REQUESTTIME,1,7) having substr(REQUESTTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(REQUESTTIME,1,7) >= '1990-01' order by substr(REQUESTTIME,1,7)",
# '药敏结果记录数': "select count( distinct CASEID||TESTNO||BACTERIA||ANTIBIOTICS ) as num ,substr(REQUESTTIME,1,7) as month ,'药敏结果记录数' as 业务类型 from DRUGSUSCEPTIBILITY where REQUESTTIME is not null group by substr(REQUESTTIME,1,7) having substr(REQUESTTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(REQUESTTIME,1,7) >= '1990-01' order by substr(REQUESTTIME,1,7)",
# '体温测量数': "select count( distinct CASEID||RECORDDATE ) as num ,substr(RECORDDATE,1,7) as month ,'体温测量数' as 业务类型 from TEMPERATURE where RECORDDATE is not null group by substr(RECORDDATE,1,7) having substr(RECORDDATE,1,7) <= to_char(sysdate,'yyyy-mm') and substr(RECORDDATE,1,7) >= '1990-01' order by substr(RECORDDATE,1,7)",
# '入出转记录数': "select count( distinct CASEID||BEGINTIME||DEPT ) as num ,substr(BEGINTIME,1,7) as month ,'入出转记录数' as 业务类型 from department where BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' order by substr(BEGINTIME,1,7)",
# '常规结果记录数': "select count( distinct CASEID||TESTNO||RINDEX ) as num ,substr(REQUESTTIME,1,7) as month ,'常规结果记录数' as 业务类型 from ROUTINE2 where REQUESTTIME is not null group by substr(REQUESTTIME,1,7) having substr(REQUESTTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(REQUESTTIME,1,7) >= '1990-01' order by substr(REQUESTTIME,1,7)",
# '影像检查记录数': "select count( distinct CASEID||EXAM_NO ) as num ,substr(EXAM_DATE,1,7) as month ,'影像检查记录数' as 业务类型 from EXAM where EXAM_DATE is not null group by substr(EXAM_DATE,1,7) having substr(EXAM_DATE,1,7) <= to_char(sysdate,'yyyy-mm') and substr(EXAM_DATE,1,7) >= '1990-01' order by substr(EXAM_DATE,1,7)",
# '治疗记录数': "select count( distinct CASEID||TNO||TTYPE||DEPT ) as num ,substr(BEGINTIME,1,7) as month ,'治疗记录数' as 业务类型 from TREATMENT1 where BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' order by substr(BEGINTIME,1,7)",
# '中心静脉插管记录数': "select count(1) as num ,substr(BEGINTIME,1,7) as month,'中心静脉插管记录数' as 业务类型 from treatment1 where TTYPE like '%中心%静脉%' and BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' ",
# '呼吸机记录数': "select count(1) as num ,substr(BEGINTIME,1,7) as month,'呼吸机记录数' as 业务类型 from treatment1 where TTYPE like '%呼吸机%' and BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' ",
# '泌尿道插管记录数': "select count(1) as num ,substr(BEGINTIME,1,7) as month,'泌尿道插管记录数' as 业务类型 from treatment1 where TTYPE like '%泌尿道%' and BEGINTIME is not null group by substr(BEGINTIME,1,7) having substr(BEGINTIME,1,7) <= to_char(sysdate,'yyyy-mm') and substr(BEGINTIME,1,7) >= '1990-01' ",
# }
# for bus in bus_dic:
# temp = pd.read_sql(bus_dic[bus], con=engine)
# res_全业务 = res_全业务.append(temp)
# return res_全业务
# else:
# bus_dic = {
# '入院人数': "select count(distinct caseid) as num ,to_char(to_date(substr(in_time,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month,'入院人数' as 业务类型 from overall where in_time is not null group by to_char(to_date(substr(in_time,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(in_time,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(in_time,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01'",
# '出院人数': "select count(distinct caseid) as num ,to_char(to_date(substr(out_time,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month,'出院人数' as 业务类型 from overall where in_time is not null and out_time is not null group by to_char(to_date(substr(out_time,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(out_time,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(out_time,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01' ",
# '抗菌药物医嘱数': "select count( distinct CASEID||ORDERNO||ANAME ) as num , to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month ,'抗菌药物医嘱数' as 业务类型 from antibiotics where BEGINTIME is not null group by to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01' ",
# '手术台数': "select count( distinct CASEID||OPERID ) as num ,to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month,'手术台数' as 业务类型 from oper2 where BEGINTIME is not null group by to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01' ",
# '菌检出结果记录数': "select count( distinct CASEID||TESTNO||BACTERIA ) as num , to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month ,'菌检出结果记录数' as 业务类型 from bacteria where REQUESTTIME is not null group by to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01'",
# '药敏结果记录数': "select count( distinct CASEID||TESTNO||BACTERIA||ANTIBIOTICS ) as num ,to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month ,'药敏结果记录数' as 业务类型 from DRUGSUSCEPTIBILITY where REQUESTTIME is not null group by to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01' ",
# '体温测量数': "select count( distinct CASEID||RECORDDATE ) as num ,to_char(to_date(substr(RECORDDATE,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month ,'体温测量数' as 业务类型 from TEMPERATURE where RECORDDATE is not null group by to_char(to_date(substr(RECORDDATE,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(RECORDDATE,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(RECORDDATE,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01'",
# '入出转记录数': "select count( distinct CASEID||BEGINTIME||DEPT ) as num , to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month ,'入出转记录数' as 业务类型 from department where BEGINTIME is not null group by to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01'",
# '常规结果记录数': "select count( distinct CASEID||TESTNO||RINDEX ) as num ,to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month ,'常规结果记录数' as 业务类型 from ROUTINE2 where REQUESTTIME is not null group by to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(REQUESTTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01' ",
# '影像检查记录数': "select count( distinct CASEID||EXAM_NO ) as num ,to_char(to_date(substr(EXAM_DATE,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month ,'影像检查记录数' as 业务类型 from EXAM where EXAM_DATE is not null group by to_char(to_date(substr(EXAM_DATE,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(EXAM_DATE,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(EXAM_DATE,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01'",
# '治疗记录数': "select count( distinct CASEID||TNO||TTYPE||DEPT ) as num ,to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month ,'治疗记录数' as 业务类型 from TREATMENT1 where BEGINTIME is not null group by to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01' ",
# '中心静脉插管记录数': "select count(1) as num ,to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month,'中心静脉插管记录数' as 业务类型 from treatment1 where TTYPE like '%中心%静脉%' and BEGINTIME is not null group by to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01' ",
# '呼吸机记录数': "select count(1) as num ,to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month,'呼吸机记录数' as 业务类型 from treatment1 where TTYPE like '%呼吸机%' and BEGINTIME is not null group by to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01' ",
# '泌尿道插管记录数': "select count(1) as num ,to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') as month,'泌尿道插管记录数' as 业务类型 from treatment1 where TTYPE like '%泌尿道%' and BEGINTIME is not null group by to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') having to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') <= to_char(sysdate,'iyyy-iw') and to_char(to_date(substr(BEGINTIME,1,10),'yyyy-mm-dd'), 'iyyy-iw') >= '1990-01'",
# }
#
# for bus in bus_dic:
# temp = pd.read_sql(bus_dic[bus],con=engine)
# temp['month'] = temp['month'].str.replace('-','年') +'周'
# res_全业务 = res_全业务.append(temp)
# return res_全业务
# 三级第一张图更新
@app.callback(
Output('third_level_first_fig','figure'),
Output('general_situation_third_level_first_fig_data', 'data'),
Input('general_situation_third_level_first_fig_data', 'data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
Input('third_level_first_window_choice', 'value'),
# Input('third_level_first_date_type_choice', 'value'),
# prevent_initial_call=True,
)
# def update_third_level_first_fig(general_situation_third_level_first_fig_data,db_con_url,count_time,window,date_type):
def update_third_level_first_fig(general_situation_third_level_first_fig_data,db_con_url,count_time,window):
# print(date_type)
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if general_situation_third_level_first_fig_data is None:
general_situation_third_level_first_fig_data = {}
# third_level_first_fig_date = get_third_level_first_fig_date(engine, 'week')
# general_situation_third_level_first_fig_data['week'] = third_level_first_fig_date.to_json(orient='split', date_format='iso')
# third_level_first_fig_date = get_third_level_first_fig_date(engine,'month')
# general_situation_third_level_first_fig_data['month'] = third_level_first_fig_date.to_json(orient='split',date_format='iso')
third_level_first_fig_date = get_third_level_first_fig_date(engine)
general_situation_third_level_first_fig_data['third_level_first_fig_date'] = third_level_first_fig_date.to_json(orient='split',date_format='iso')
general_situation_third_level_first_fig_data['hosname'] = db_con_url['hosname']
general_situation_third_level_first_fig_data = json.dumps(general_situation_third_level_first_fig_data)
else:
general_situation_third_level_first_fig_data = json.loads(general_situation_third_level_first_fig_data)
if db_con_url['hosname'] != general_situation_third_level_first_fig_data['hosname']:
# third_level_first_fig_date = get_third_level_first_fig_date(engine, 'week')
# general_situation_third_level_first_fig_data['week'] = third_level_first_fig_date.to_json( orient='split', date_format='iso')
# third_level_first_fig_date = get_third_level_first_fig_date(engine, 'month')
# general_situation_third_level_first_fig_data['month'] = third_level_first_fig_date.to_json( orient='split', date_format='iso')
third_level_first_fig_date = get_third_level_first_fig_date(engine)
general_situation_third_level_first_fig_data[ 'third_level_first_fig_date'] = third_level_first_fig_date.to_json(orient='split', date_format='iso')
general_situation_third_level_first_fig_data['hosname'] = db_con_url['hosname']
general_situation_third_level_first_fig_data = json.dumps( general_situation_third_level_first_fig_data)
else:
third_level_first_fig_date = pd.read_json(general_situation_third_level_first_fig_data['third_level_first_fig_date'],orient='split')
general_situation_third_level_first_fig_data = dash.no_update
# 布林图子图顺序
# bus = ['入院人数', '入出转记录数', '抗菌药物医嘱数', '手术台数', '菌检出结果记录数', '药敏结果记录数', '体温测量数', '常规结果记录数', '影像检查记录数', '治疗记录数']
bus = [ '抗菌药物医嘱数', '手术台数', '菌检出结果记录数', '药敏结果记录数', '体温测量数', '常规结果记录数', '影像检查记录数', '治疗记录数','中心静脉插管记录数','呼吸机记录数','出院人数','泌尿道插管记录数','入出转记录数','入院人数']
# print(third_level_first_fig_date)
fig = make_subplots(rows= 7 , cols=2, shared_xaxes=True)
# btime = pd.read_sql(f"select to_char(to_date('{btime}-01','yyyy-mm-dd'),'iyyy-iw') as week from dual",con=engine)['week'][0].replace('-','年')+'周' if date_type == 'week' else btime
# etime = pd.read_sql(f"select to_char(to_date('{etime}-01','yyyy-mm-dd'),'iyyy-iw') as week from dual",con=engine)['week'][0].replace('-','年')+'周' if date_type == 'week' else etime
for i in range(1, 8):
temp1 = bus[(i - 1) * 2]
temp2 = bus[i * 2 - 1]
df1 = third_level_first_fig_date[third_level_first_fig_date['业务类型'] == temp1]
df1 = df1[ (df1['month']>=btime) & (df1['month']<=etime) ]
df1 = df1.sort_values(['month'])
df2 = third_level_first_fig_date[third_level_first_fig_date['业务类型'] == temp2]
df2 = df2[ (df2['month'] >= btime) & (df2['month'] <= etime)]
df2 = df2.sort_values(['month'])
print(df1, df2)
fig.add_trace(
go.Scatter(x=df1['month'], y=df1['num'], name=bus[(i - 1) * 2]),
row=i, col=1
)
data = df1[['month', 'num']]
mean_data = np.array([data[i: i + window]['num'].mean() for i in range(len(data) - window + 1)]) # 计算移动平均线,转换为ndarray对象数据类型是为了更方便的计算上下轨线
std_data = np.array([data[i: i + window]['num'].std() for i in range(len(data) - window + 1)]) # 计算移动标准差
up_line = | pd.DataFrame(mean_data + 2 * std_data, columns=['num']) | pandas.DataFrame |
import pandas as pd
import re
from scipy.sparse import csr_matrix
ratings = pd.read_csv("./data/ml-latest-small/ratings.csv")
movies = | pd.read_csv("./data/ml-latest-small/movies.csv") | pandas.read_csv |
import csv
import logging
import os
import tempfile
import time
from hashlib import sha256
from ipaddress import IPv4Address, ip_address
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from imblearn.under_sampling import RandomUnderSampler
from pandas.api.types import is_bool_dtype as is_bool
from pandas.api.types import is_datetime64_any_dtype as is_datetime
from pandas.api.types import (
is_float_dtype,
is_integer_dtype,
is_numeric_dtype,
is_string_dtype,
)
from pandas.core.dtypes.common import is_period_dtype
from upgini.errors import ValidationError
from upgini.http import UPGINI_API_KEY, get_rest_client
from upgini.metadata import (
EVAL_SET_INDEX,
SYSTEM_RECORD_ID,
DataType,
FeaturesFilter,
FileColumnMeaningType,
FileColumnMetadata,
FileMetadata,
FileMetrics,
ModelTaskType,
NumericInterval,
RuntimeParameters,
SearchCustomization,
)
from upgini.normalizer.phone_normalizer import phone_to_int
from upgini.search_task import SearchTask
class Dataset(pd.DataFrame):
MIN_ROWS_COUNT = 100
MAX_ROWS_REGISTERED = 299_999
MAX_ROWS_UNREGISTERED = 149_999
FIT_SAMPLE_ROWS = 100_000
FIT_SAMPLE_THRESHOLD = FIT_SAMPLE_ROWS * 3
IMBALANCE_THESHOLD = 0.4
MIN_TARGET_CLASS_COUNT = 100
MAX_MULTICLASS_CLASS_COUNT = 100
MIN_SUPPORTED_DATE_TS = 1114992000000 # 2005-05-02
_metadata = [
"dataset_name",
"description",
"meaning_types",
"search_keys",
"ignore_columns",
"hierarchical_group_keys",
"hierarchical_subgroup_keys",
"date_format",
"random_state",
"task_type",
"initial_data",
"file_upload_id",
"etalon_def",
"endpoint",
"api_key",
"columns_renaming",
"sampled",
]
def __init__(
self,
dataset_name: str,
description: Optional[str] = None,
df: Optional[pd.DataFrame] = None,
path: Optional[str] = None,
meaning_types: Optional[Dict[str, FileColumnMeaningType]] = None,
search_keys: Optional[List[Tuple[str, ...]]] = None,
model_task_type: Optional[ModelTaskType] = None,
date_format: Optional[str] = None,
random_state: Optional[int] = None,
endpoint: Optional[str] = None,
api_key: Optional[str] = None,
**kwargs,
):
if df is not None:
data = df.copy()
elif path is not None:
if "sep" in kwargs:
data = pd.read_csv(path, **kwargs)
else:
# try different separators: , ; \t ...
with open(path, mode="r") as csvfile:
sep = csv.Sniffer().sniff(csvfile.read(2048)).delimiter
kwargs["sep"] = sep
data = pd.read_csv(path, **kwargs)
else:
raise ValueError("DataFrame or path to file should be passed.")
if isinstance(data, pd.DataFrame):
super(Dataset, self).__init__(data) # type: ignore
else:
raise ValueError("Iteration is not supported. Remove `iterator` and `chunksize` arguments and try again.")
self.dataset_name = dataset_name
self.task_type = model_task_type
self.description = description
self.meaning_types = meaning_types
self.search_keys = search_keys
self.ignore_columns = []
self.hierarchical_group_keys = []
self.hierarchical_subgroup_keys = []
self.date_format = date_format
self.initial_data = data.copy()
self.file_upload_id: Optional[str] = None
self.etalon_def: Optional[Dict[str, str]] = None
self.endpoint = endpoint
self.api_key = api_key
self.random_state = random_state
self.columns_renaming: Dict[str, str] = {}
self.sampled: bool = False
@property
def meaning_types_checked(self) -> Dict[str, FileColumnMeaningType]:
if self.meaning_types is None:
raise ValueError("meaning_types is empty.")
else:
return self.meaning_types
@property
def search_keys_checked(self) -> List[Tuple[str, ...]]:
if self.search_keys is None:
raise ValueError("search_keys is empty.")
else:
return self.search_keys
@property
def etalon_def_checked(self) -> Dict[str, str]:
if self.etalon_def is None:
self.etalon_def = {
v.value: k for k, v in self.meaning_types_checked.items() if v != FileColumnMeaningType.FEATURE
}
return self.etalon_def
def __validate_min_rows_count(self):
if self.shape[0] < self.MIN_ROWS_COUNT:
raise ValueError(f"X should contain at least {self.MIN_ROWS_COUNT} valid distinct rows.")
def __validate_max_row_count(self):
api_key = self.api_key or os.environ.get(UPGINI_API_KEY)
is_registered = api_key is not None and api_key != ""
if is_registered:
if len(self) > self.MAX_ROWS_REGISTERED:
raise ValueError(
f"Total X + eval_set rows count limit is {self.MAX_ROWS_REGISTERED}. "
"Please sample X and eval_set"
)
else:
if len(self) > self.MAX_ROWS_UNREGISTERED:
raise ValueError(
f"For unregistered users total rows count limit for X + eval_set is {self.MAX_ROWS_UNREGISTERED}. "
"Please register to increase the limit"
)
def __rename_columns(self):
# logging.info("Replace restricted symbols in column names")
for column in self.columns:
if len(column) == 0:
raise ValueError("Some of column names are empty. Fill them and try again, please.")
new_column = str(column).lower()
if ord(new_column[0]) not in range(ord("a"), ord("z")):
new_column = "a" + new_column
for idx, c in enumerate(new_column):
if ord(c) not in range(ord("a"), ord("z")) and ord(c) not in range(ord("0"), ord("9")):
new_column = new_column[:idx] + "_" + new_column[idx + 1 :]
self.rename(columns={column: new_column}, inplace=True)
self.meaning_types = {
(new_column if key == str(column) else key): value for key, value in self.meaning_types_checked.items()
}
self.search_keys = [
tuple(new_column if key == str(column) else key for key in keys) for keys in self.search_keys_checked
]
self.columns_renaming[new_column] = str(column)
def __validate_too_long_string_values(self):
"""Check that string values less than 400 characters"""
# logging.info("Validate too long string values")
for col in self.columns:
if is_string_dtype(self[col]):
max_length: int = self[col].astype("str").str.len().max()
if max_length > 400:
raise ValueError(
f"Some of column {col} values are too long: {max_length} characters. "
"Remove this column or trim values to 50 characters."
)
def __clean_duplicates(self):
"""Clean DataSet from full duplicates."""
# logging.info("Clean full duplicates")
nrows = len(self)
unique_columns = self.columns.tolist()
logging.info(f"Dataset shape before clean duplicates: {self.shape}")
self.drop_duplicates(subset=unique_columns, inplace=True)
logging.info(f"Dataset shape after clean duplicates: {self.shape}")
nrows_after_full_dedup = len(self)
share_full_dedup = 100 * (1 - nrows_after_full_dedup / nrows)
if share_full_dedup > 0:
print(f"{share_full_dedup:.5f}% of the rows are fully duplicated")
target_column = self.etalon_def_checked.get(FileColumnMeaningType.TARGET.value)
if target_column is not None:
unique_columns.remove(target_column)
unique_columns.remove(SYSTEM_RECORD_ID)
self.drop_duplicates(subset=unique_columns, inplace=True)
nrows_after_tgt_dedup = len(self)
share_tgt_dedup = 100 * (1 - nrows_after_tgt_dedup / nrows_after_full_dedup)
if nrows_after_tgt_dedup < nrows_after_full_dedup:
msg = (
f"{share_tgt_dedup:.5f}% of rows in X are duplicates with different y values. "
"Please check the dataframe and restart fit"
)
logging.error(msg)
raise ValueError(msg)
def __convert_bools(self):
"""Convert bool columns True -> 1, False -> 0"""
# logging.info("Converting bool to int")
for col in self.columns:
if is_bool(self[col]):
self[col] = self[col].astype("Int64")
def __convert_float16(self):
"""Convert float16 to float"""
# logging.info("Converting float16 to float")
for col in self.columns:
if is_float_dtype(self[col]):
self[col] = self[col].astype("float64")
def __correct_decimal_comma(self):
"""Check DataSet for decimal commas and fix them"""
# logging.info("Correct decimal commas")
tmp = self.head(10)
# all columns with sep="," will have dtype == 'object', i.e string
# sep="." will be casted to numeric automatically
cls_to_check = [i for i in tmp.columns if is_string_dtype(tmp[i])]
for col in cls_to_check:
if tmp[col].astype(str).str.match("^[0-9]+,[0-9]*$").any():
self[col] = self[col].astype(str).str.replace(",", ".").astype(np.float64)
def __to_millis(self):
"""Parse date column and transform it to millis"""
date = self.etalon_def_checked.get(FileColumnMeaningType.DATE.value) or self.etalon_def_checked.get(
FileColumnMeaningType.DATETIME.value
)
def intToOpt(i: int) -> Optional[int]:
if i == -9223372036855:
return None
else:
return i
if date is not None and date in self.columns:
# logging.info("Transform date column to millis")
if is_string_dtype(self[date]):
self[date] = (
pd.to_datetime(self[date], format=self.date_format).dt.floor("D").view(np.int64) // 1_000_000
)
elif is_datetime(self[date]):
self[date] = self[date].dt.floor("D").view(np.int64) // 1_000_000
elif is_period_dtype(self[date]):
self[date] = pd.to_datetime(self[date].astype("string")).dt.floor("D").view(np.int64) // 1_000_000
elif is_numeric_dtype(self[date]):
msg = f"Unsupported type of date column {date}. Convert to datetime manually please."
logging.error(msg)
raise Exception(msg)
self[date] = self[date].apply(lambda x: intToOpt(x)).astype("Int64")
@staticmethod
def __email_to_hem(email: str) -> Optional[str]:
if email is None or not isinstance(email, str) or email == "":
return None
else:
return sha256(email.lower().encode("utf-8")).hexdigest()
def __hash_email(self):
"""Add column with HEM if email presented in search keys"""
email = self.etalon_def_checked.get(FileColumnMeaningType.EMAIL.value)
if email is not None and email in self.columns:
# logging.info("Hashing email")
generated_hem_name = "generated_hem"
self[generated_hem_name] = self[email].apply(self.__email_to_hem)
self.meaning_types_checked[generated_hem_name] = FileColumnMeaningType.HEM
self.meaning_types_checked.pop(email)
self.etalon_def_checked[FileColumnMeaningType.HEM.value] = generated_hem_name
del self.etalon_def_checked[FileColumnMeaningType.EMAIL.value]
self.search_keys = [
tuple(key if key != email else generated_hem_name for key in search_group)
for search_group in self.search_keys_checked
]
self["email_domain"] = self[email].str.split("@").str[1]
self.drop(columns=email, inplace=True)
@staticmethod
def __ip_to_int(ip: Union[str, int, IPv4Address]) -> Optional[int]:
try:
return int(ip_address(ip))
except Exception:
return None
def __convert_ip(self):
"""Convert ip address to int"""
ip = self.etalon_def_checked.get(FileColumnMeaningType.IP_ADDRESS.value)
if ip is not None and ip in self.columns:
# logging.info("Convert ip address to int")
self[ip] = self[ip].apply(self.__ip_to_int).astype("Int64")
def __normalize_iso_code(self):
iso_code = self.etalon_def_checked.get(FileColumnMeaningType.COUNTRY.value)
if iso_code is not None and iso_code in self.columns:
# logging.info("Normalize iso code column")
self[iso_code] = (
self[iso_code]
.astype(str)
.str.upper()
.str.replace(r"[^A-Z]", "", regex=True)
.str.replace("UK", "GB", regex=False)
)
def __normalize_postal_code(self):
postal_code = self.etalon_def_checked.get(FileColumnMeaningType.POSTAL_CODE.value)
if postal_code is not None and postal_code in self.columns:
# logging.info("Normalize postal code")
self[postal_code] = (
self[postal_code]
.astype(str)
.str.upper()
.str.replace(r"[^0-9A-Z]", "", regex=True)
.str.replace(r"^0+\B", "", regex=True)
)
def __remove_old_dates(self):
date_column = self.etalon_def_checked.get(FileColumnMeaningType.DATE.value) or self.etalon_def_checked.get(
FileColumnMeaningType.DATETIME.value
)
if date_column is not None:
old_subset = self[self[date_column] < self.MIN_SUPPORTED_DATE_TS]
if len(old_subset) > 0:
logging.info(f"df before dropping old rows: {self.shape}")
self.drop(index=old_subset.index, inplace=True)
logging.info(f"df after dropping old rows: {self.shape}")
msg = "We don't have data before '2000-01-01' and removed all earlier records from the search dataset"
logging.warning(msg)
print("WARN: ", msg)
def __drop_ignore_columns(self):
"""Drop ignore columns"""
columns_to_drop = list(set(self.columns) & set(self.ignore_columns))
if len(columns_to_drop) > 0:
# logging.info(f"Dropping ignore columns: {self.ignore_columns}")
self.drop(columns_to_drop, axis=1, inplace=True)
def __target_value(self) -> pd.Series:
target_column = self.etalon_def_checked.get(FileColumnMeaningType.TARGET.value, "")
target: pd.Series = self[target_column]
# clean target from nulls
target.dropna(inplace=True)
if is_numeric_dtype(target):
target = target.loc[np.isfinite(target)] # type: ignore
else:
target = target.loc[target != ""]
return target
def __validate_target(self):
# logging.info("Validating target")
target_column = self.etalon_def_checked.get(FileColumnMeaningType.TARGET.value, "")
target = self[target_column]
if self.task_type == ModelTaskType.BINARY:
if not is_integer_dtype(target):
try:
self[target_column] = self[target_column].astype("int")
except ValueError:
logging.exception("Failed to cast target to integer for binary task type")
raise ValidationError(
f"Unexpected dtype of target for binary task type: {target.dtype}." " Expected int or bool"
)
target_classes_count = target.nunique()
if target_classes_count != 2:
msg = f"Binary task type should contain only 2 target values, but {target_classes_count} presented"
logging.error(msg)
raise ValidationError(msg)
elif self.task_type == ModelTaskType.MULTICLASS:
if not is_integer_dtype(target) and not is_string_dtype(target):
if is_numeric_dtype(target):
try:
self[target_column] = self[target_column].astype("int")
except ValueError:
logging.exception("Failed to cast target to integer for multiclass task type")
raise ValidationError(
f"Unexpected dtype of target for multiclass task type: {target.dtype}."
"Expected int or str"
)
else:
msg = f"Unexpected dtype of target for multiclass task type: {target.dtype}. Expected int or str"
logging.exception(msg)
raise ValidationError(msg)
elif self.task_type == ModelTaskType.REGRESSION:
if not is_float_dtype(target):
try:
self[target_column] = self[target_column].astype("float")
except ValueError:
logging.exception("Failed to cast target to float for regression task type")
raise ValidationError(
f"Unexpected dtype of target for regression task type: {target.dtype}. Expected float"
)
elif self.task_type == ModelTaskType.TIMESERIES:
if not is_float_dtype(target):
try:
self[target_column] = self[target_column].astype("float")
except ValueError:
logging.exception("Failed to cast target to float for timeseries task type")
raise ValidationError(
f"Unexpected dtype of target for timeseries task type: {target.dtype}. Expected float"
)
def __resample(self):
# logging.info("Resampling etalon")
# Resample imbalanced target. Only train segment (without eval_set)
if self.task_type in [ModelTaskType.BINARY, ModelTaskType.MULTICLASS]:
if EVAL_SET_INDEX in self.columns:
train_segment = self[self[EVAL_SET_INDEX] == 0]
validation_segment = self[self[EVAL_SET_INDEX] != 0]
else:
train_segment = self
validation_segment = None
count = len(train_segment)
min_class_count = count
min_class_value = None
target_column = self.etalon_def_checked.get(FileColumnMeaningType.TARGET.value, "")
target = train_segment[target_column]
target_classes_count = target.nunique()
if target_classes_count > self.MAX_MULTICLASS_CLASS_COUNT:
msg = (
f"The number of target classes {target_classes_count} exceeds the allowed threshold: "
f"{self.MAX_MULTICLASS_CLASS_COUNT}. Please, correct your data and try again"
)
logging.error(msg)
raise ValidationError(msg)
unique_target = target.unique()
for v in list(unique_target): # type: ignore
current_class_count = len(train_segment.loc[target == v])
if current_class_count < min_class_count:
min_class_count = current_class_count
min_class_value = v
if min_class_count < self.MIN_TARGET_CLASS_COUNT:
msg = (
f"The rarest class `{min_class_value}` occurs {min_class_count}. "
"The minimum number of observations for each class in a train dataset must be "
f"grater than {self.MIN_TARGET_CLASS_COUNT}. Please, correct your data and try again"
)
logging.error(msg)
raise ValidationError(msg)
min_class_percent = self.IMBALANCE_THESHOLD / target_classes_count
min_class_threshold = min_class_percent * count
if min_class_count < min_class_threshold:
logging.info(
f"Target is imbalanced. The rarest class `{min_class_value}` occurs {min_class_count} times. "
"The minimum number of observations for each class in a train dataset must be "
f"grater than or equal to {min_class_threshold} ({min_class_percent * 100} %). "
"It will be undersampled"
)
if is_string_dtype(target):
target_replacement = {v: i for i, v in enumerate(unique_target)} # type: ignore
prepared_target = target.replace(target_replacement)
else:
prepared_target = target
sampler = RandomUnderSampler(random_state=self.random_state)
X = train_segment[SYSTEM_RECORD_ID]
X = X.to_frame(SYSTEM_RECORD_ID)
new_x, _ = sampler.fit_resample(X, prepared_target) # type: ignore
resampled_data = train_segment[train_segment[SYSTEM_RECORD_ID].isin(new_x[SYSTEM_RECORD_ID])]
if validation_segment is not None:
resampled_data = pd.concat([resampled_data, validation_segment], ignore_index=True)
self._update_inplace(resampled_data)
logging.info(f"Shape after resampling: {self.shape}")
self.sampled = True
# Resample over fit threshold
if EVAL_SET_INDEX in self.columns:
train_segment = self[self[EVAL_SET_INDEX] == 0]
validation_segment = self[self[EVAL_SET_INDEX] != 0]
else:
train_segment = self
validation_segment = None
if len(train_segment) > self.FIT_SAMPLE_THRESHOLD:
logging.info(
f"Etalon has size {len(train_segment)} more than threshold {self.FIT_SAMPLE_THRESHOLD} "
f"and will be downsampled to {self.FIT_SAMPLE_ROWS}"
)
resampled_data = train_segment.sample(n=self.FIT_SAMPLE_ROWS, random_state=self.random_state)
if validation_segment is not None:
resampled_data = pd.concat([resampled_data, validation_segment], ignore_index=True)
self._update_inplace(resampled_data)
logging.info(f"Shape after resampling: {self.shape}")
self.sampled = True
def __convert_phone(self):
"""Convert phone/msisdn to int"""
# logging.info("Convert phone to int")
msisdn_column = self.etalon_def_checked.get(FileColumnMeaningType.MSISDN.value)
if msisdn_column is not None and msisdn_column in self.columns:
# logging.info(f"going to apply phone_to_int for column {msisdn_column}")
phone_to_int(self, msisdn_column)
self[msisdn_column] = self[msisdn_column].astype("Int64")
def __features(self):
return [
f for f, meaning_type in self.meaning_types_checked.items() if meaning_type == FileColumnMeaningType.FEATURE
]
def __remove_dates_from_features(self):
# logging.info("Remove date columns from features")
for f in self.__features():
if is_datetime(self[f]) or is_period_dtype(self[f]):
logging.warning(f"Column {f} has datetime or period type but is feature and will be dropped from tds")
self.drop(columns=f, inplace=True)
del self.meaning_types_checked[f]
def __remove_empty_and_constant_features(self):
# logging.info("Remove almost constant and almost empty columns")
for f in self.__features():
value_counts = self[f].value_counts(dropna=False, normalize=True)
# most_frequent_value = value_counts.index[0]
most_frequent_percent = value_counts.iloc[0]
if most_frequent_percent >= 0.99:
# logging.warning(
# f"Column {f} has value {most_frequent_value} with {most_frequent_percent * 100}% > 99% "
# " and will be dropped from tds"
# )
self.drop(columns=f, inplace=True)
del self.meaning_types_checked[f]
def __remove_high_cardinality_features(self):
# logging.info("Remove columns with high cardinality")
count = len(self)
for f in self.__features():
if (is_string_dtype(self[f]) or is_integer_dtype(self[f])) and self[f].nunique() / count >= 0.9:
# logging.warning(
# f"Column {f} has high cardinality (more than 90% uniques and string or integer type) "
# "and will be droped from tds"
# )
self.drop(columns=f, inplace=True)
del self.meaning_types_checked[f]
def __convert_features_types(self):
# logging.info("Convert features to supported data types")
for f in self.__features():
if self[f].dtype == object:
self[f] = self[f].astype(str)
elif not is_numeric_dtype(self[f].dtype):
self[f] = self[f].astype(str)
def __validate_dataset(self, validate_target: bool, silent_mode: bool):
"""Validate DataSet"""
# logging.info("validating etalon")
date_millis = self.etalon_def_checked.get(FileColumnMeaningType.DATE.value) or self.etalon_def_checked.get(
FileColumnMeaningType.DATETIME.value
)
target = self.etalon_def_checked.get(FileColumnMeaningType.TARGET.value)
score = self.etalon_def_checked.get(FileColumnMeaningType.SCORE.value)
if validate_target:
if target is None:
raise ValidationError("Target column is absent in meaning_types.")
if self.task_type != ModelTaskType.MULTICLASS:
target_value = self.__target_value()
target_items = target_value.nunique()
if target_items == 1:
raise ValidationError("Target contains only one distinct value.")
elif target_items == 0:
raise ValidationError("Target contains only NaN or incorrect values.")
self[target] = self[target].apply(pd.to_numeric, errors="coerce")
keys_to_validate = [key for search_group in self.search_keys_checked for key in search_group]
mandatory_columns = [date_millis, target, score]
columns_to_validate = mandatory_columns.copy()
columns_to_validate.extend(keys_to_validate)
columns_to_validate = set([i for i in columns_to_validate if i is not None])
nrows = len(self)
validation_stats = {}
self["valid_keys"] = 0
self["valid_mandatory"] = True
for col in columns_to_validate:
self[f"{col}_is_valid"] = ~self[col].isnull()
if validate_target and target is not None and col == target:
self.loc[self[target] == np.Inf, f"{col}_is_valid"] = False
if col in mandatory_columns:
self["valid_mandatory"] = self["valid_mandatory"] & self[f"{col}_is_valid"]
invalid_values = list(self.loc[self[f"{col}_is_valid"] == 0, col].head().values) # type: ignore
valid_share = self[f"{col}_is_valid"].sum() / nrows
validation_stats[col] = {}
optional_drop_message = "Invalid rows will be dropped. " if col in mandatory_columns else ""
if valid_share == 1:
valid_status = "All valid"
valid_message = "All values in this column are good to go"
elif 0 < valid_share < 1:
valid_status = "Some invalid"
valid_message = (
f"{100 * (1 - valid_share):.5f}% of the values of this column failed validation. "
f"{optional_drop_message}"
f"Some examples of invalid values: {invalid_values}"
)
else:
valid_status = "All invalid"
valid_message = (
f"{100 * (1 - valid_share):.5f}% of the values of this column failed validation. "
f"{optional_drop_message}"
f"Some examples of invalid values: {invalid_values}"
)
validation_stats[col]["valid_status"] = valid_status
validation_stats[col]["valid_message"] = valid_message
if col in keys_to_validate:
self["valid_keys"] = self["valid_keys"] + self[f"{col}_is_valid"]
self.drop(columns=f"{col}_is_valid", inplace=True)
self["is_valid"] = self["valid_keys"] > 0
self["is_valid"] = self["is_valid"] & self["valid_mandatory"]
self.drop(columns=["valid_keys", "valid_mandatory"], inplace=True)
drop_idx = self[self["is_valid"] != 1].index # type: ignore
self.drop(drop_idx, inplace=True)
self.drop(columns=["is_valid"], inplace=True)
if not silent_mode:
df_stats = pd.DataFrame.from_dict(validation_stats, orient="index")
df_stats.reset_index(inplace=True)
df_stats.columns = ["Column name", "Status", "Description"]
try:
import html
from IPython.display import HTML, display # type: ignore
def map_color(text):
colormap = {"All valid": "#DAF7A6", "Some invalid": "#FFC300", "All invalid": "#FF5733"}
return (
f"<td style='background-color:{colormap[text]};color:black'>{text}</td>"
if text in colormap
else f"<td>{text}</td>"
)
df_stats["Description"] = df_stats["Description"].apply(lambda x: html.escape(x))
html_stats = (
"<table>"
+ "<tr>"
+ "".join(f"<th style='font-weight:bold'>{col}</th>" for col in df_stats.columns)
+ "</tr>"
+ "".join("<tr>" + "".join(map(map_color, row[1:])) + "</tr>" for row in df_stats.itertuples())
+ "</table>"
)
display(HTML(html_stats))
except ImportError:
print(df_stats)
def __validate_meaning_types(self, validate_target: bool):
# logging.info("Validating meaning types")
if self.meaning_types is None or len(self.meaning_types) == 0:
raise ValueError("Please pass the `meaning_types` argument before validation.")
if SYSTEM_RECORD_ID not in self.columns:
self[SYSTEM_RECORD_ID] = self.apply(lambda row: hash(tuple(row)), axis=1)
self.meaning_types[SYSTEM_RECORD_ID] = FileColumnMeaningType.SYSTEM_RECORD_ID
for column in self.meaning_types:
if column not in self.columns:
raise ValueError(f"Meaning column {column} doesn't exist in dataframe columns: {self.columns}.")
if validate_target and FileColumnMeaningType.TARGET not in self.meaning_types.values():
raise ValueError("Target column is not presented in meaning types. Specify it, please.")
def __validate_search_keys(self):
# logging.info("Validating search keys")
if self.search_keys is None or len(self.search_keys) == 0:
raise ValueError("Please pass `search_keys` argument before validation.")
for keys_group in self.search_keys:
for key in keys_group:
if key not in self.columns:
raise ValueError(f"Search key {key} doesn't exist in dataframe columns: {self.columns}.")
def validate(self, validate_target: bool = True, silent_mode: bool = False):
# logging.info("Validating dataset")
self.__rename_columns()
self.__validate_meaning_types(validate_target=validate_target)
self.__validate_search_keys()
self.__drop_ignore_columns()
self.__validate_too_long_string_values()
self.__clean_duplicates()
self.__convert_bools()
self.__convert_float16()
self.__correct_decimal_comma()
self.__to_millis()
self.__remove_old_dates()
self.__hash_email()
self.__convert_ip()
self.__convert_phone()
self.__normalize_iso_code()
self.__normalize_postal_code()
self.__remove_dates_from_features()
self.__remove_empty_and_constant_features()
self.__remove_high_cardinality_features()
self.__convert_features_types()
self.__validate_dataset(validate_target, silent_mode)
if validate_target:
self.__validate_target()
self.__resample()
self.__validate_min_rows_count()
self.__validate_max_row_count()
def __construct_metadata(self) -> FileMetadata:
# logging.info("Constructing dataset metadata")
columns = []
for index, (column_name, column_type) in enumerate(zip(self.columns, self.dtypes)):
if column_name not in self.ignore_columns:
if column_name in self.meaning_types_checked:
meaning_type = self.meaning_types_checked[column_name]
# Temporary workaround while backend doesn't support datetime
if meaning_type == FileColumnMeaningType.DATETIME:
meaning_type = FileColumnMeaningType.DATE
else:
meaning_type = FileColumnMeaningType.FEATURE
if meaning_type in {
FileColumnMeaningType.DATE,
FileColumnMeaningType.DATETIME,
FileColumnMeaningType.IP_ADDRESS,
}:
min_max_values = NumericInterval(
minValue=self[column_name].astype("Int64").min(),
maxValue=self[column_name].astype("Int64").max(),
)
else:
min_max_values = None
column_meta = FileColumnMetadata(
index=index,
name=column_name,
originalName=self.columns_renaming.get(column_name) or column_name,
dataType=self.__get_data_type(column_type, column_name),
meaningType=meaning_type,
minMaxValues=min_max_values,
)
columns.append(column_meta)
return FileMetadata(
name=self.dataset_name,
description=self.description,
columns=columns,
searchKeys=self.search_keys,
hierarchicalGroupKeys=self.hierarchical_group_keys,
hierarchicalSubgroupKeys=self.hierarchical_subgroup_keys,
taskType=self.task_type,
)
def __get_data_type(self, pandas_data_type, column_name) -> DataType:
if is_integer_dtype(pandas_data_type):
return DataType.INT
elif is_float_dtype(pandas_data_type):
return DataType.DECIMAL
elif | is_string_dtype(pandas_data_type) | pandas.api.types.is_string_dtype |
import os
from datetime import date
from dask.dataframe import DataFrame as DaskDataFrame
from numpy import nan, ndarray
from numpy.testing import assert_allclose, assert_array_equal
from pandas import DataFrame, Series, Timedelta, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from pymove import (
DaskMoveDataFrame,
MoveDataFrame,
PandasDiscreteMoveDataFrame,
PandasMoveDataFrame,
read_csv,
)
from pymove.core.grid import Grid
from pymove.utils.constants import (
DATE,
DATETIME,
DAY,
DIST_PREV_TO_NEXT,
DIST_TO_PREV,
HOUR,
HOUR_SIN,
LATITUDE,
LOCAL_LABEL,
LONGITUDE,
PERIOD,
SITUATION,
SPEED_PREV_TO_NEXT,
TID,
TIME_PREV_TO_NEXT,
TRAJ_ID,
TYPE_DASK,
TYPE_PANDAS,
UID,
WEEK_END,
)
list_data = [
[39.984094, 116.319236, '2008-10-23 05:53:05', 1],
[39.984198, 116.319322, '2008-10-23 05:53:06', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
]
str_data_default = """
lat,lon,datetime,id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_different = """
latitude,longitude,time,traj_id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_missing = """
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
def _default_move_df():
return MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
def _default_pandas_df():
return DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
def test_move_data_frame_from_list():
move_df = _default_move_df()
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_file(tmpdir):
d = tmpdir.mkdir('core')
file_default_columns = d.join('test_read_default.csv')
file_default_columns.write(str_data_default)
filename_default = os.path.join(
file_default_columns.dirname, file_default_columns.basename
)
move_df = read_csv(filename_default)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_different_columns = d.join('test_read_different.csv')
file_different_columns.write(str_data_different)
filename_diferent = os.path.join(
file_different_columns.dirname, file_different_columns.basename
)
move_df = read_csv(
filename_diferent,
latitude='latitude',
longitude='longitude',
datetime='time',
traj_id='traj_id',
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_missing_columns = d.join('test_read_missing.csv')
file_missing_columns.write(str_data_missing)
filename_missing = os.path.join(
file_missing_columns.dirname, file_missing_columns.basename
)
move_df = read_csv(
filename_missing, names=[LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_dict():
dict_data = {
LATITUDE: [39.984198, 39.984224, 39.984094],
LONGITUDE: [116.319402, 116.319322, 116.319402],
DATETIME: [
'2008-10-23 05:53:11',
'2008-10-23 05:53:06',
'2008-10-23 05:53:06',
],
}
move_df = MoveDataFrame(
data=dict_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_data_frame():
df = _default_pandas_df()
move_df = MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_attribute_error_from_data_frame():
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['laterr', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lonerr', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetimerr', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
def test_lat():
move_df = _default_move_df()
lat = move_df.lat
srs = Series(
data=[39.984094, 39.984198, 39.984224, 39.984224],
index=[0, 1, 2, 3],
dtype='float64',
name='lat',
)
assert_series_equal(lat, srs)
def test_lon():
move_df = _default_move_df()
lon = move_df.lon
srs = Series(
data=[116.319236, 116.319322, 116.319402, 116.319402],
index=[0, 1, 2, 3],
dtype='float64',
name='lon',
)
assert_series_equal(lon, srs)
def test_datetime():
move_df = _default_move_df()
datetime = move_df.datetime
srs = Series(
data=[
'2008-10-23 05:53:05',
'2008-10-23 05:53:06',
'2008-10-23 05:53:11',
'2008-10-23 05:53:11',
],
index=[0, 1, 2, 3],
dtype='datetime64[ns]',
name='datetime',
)
assert_series_equal(datetime, srs)
def test_loc():
move_df = _default_move_df()
assert move_df.loc[0, TRAJ_ID] == 1
loc_ = move_df.loc[move_df[LONGITUDE] > 116.319321]
expected = DataFrame(
data=[
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[1, 2, 3],
)
assert_frame_equal(loc_, expected)
def test_iloc():
move_df = _default_move_df()
expected = Series(
data=[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=0,
)
assert_series_equal(move_df.iloc[0], expected)
def test_at():
move_df = _default_move_df()
assert move_df.at[0, TRAJ_ID] == 1
def test_values():
move_df = _default_move_df()
expected = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
]
assert_array_equal(move_df.values, expected)
def test_columns():
move_df = _default_move_df()
assert_array_equal(
move_df.columns, [LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
def test_index():
move_df = _default_move_df()
assert_array_equal(move_df.index, [0, 1, 2, 3])
def test_dtypes():
move_df = _default_move_df()
expected = Series(
data=['float64', 'float64', '<M8[ns]', 'int64'],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=None,
)
assert_series_equal(move_df.dtypes, expected)
def test_shape():
move_df = _default_move_df()
assert move_df.shape == (4, 4)
def test_len():
move_df = _default_move_df()
assert move_df.len() == 4
def test_unique():
move_df = _default_move_df()
assert_array_equal(move_df['id'].unique(), [1, 2])
def test_head():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1],
)
assert_frame_equal(move_df.head(2), expected)
def test_tail():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[2, 3],
)
assert_frame_equal(move_df.tail(2), expected)
def test_number_users():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert move_df.get_users_number() == 1
move_df[UID] = [1, 1, 2, 3]
assert move_df.get_users_number() == 3
def test_to_numpy():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_numpy(), ndarray)
def test_to_dict():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_dict(), dict)
def test_to_grid():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
g = move_df.to_grid(8)
assert isinstance(move_df.to_grid(8), Grid)
def test_to_data_frame():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_data_frame(), DataFrame)
def test_to_discrete_move_df():
move_df = PandasDiscreteMoveDataFrame(
data={DATETIME: ['2020-01-01 01:08:29',
'2020-01-05 01:13:24',
'2020-01-06 02:21:53',
'2020-01-06 03:34:48',
'2020-01-08 05:55:41'],
LATITUDE: [3.754245,
3.150849,
3.754249,
3.165933,
3.920178],
LONGITUDE: [38.3456743,
38.6913486,
38.3456743,
38.2715962,
38.5161605],
TRAJ_ID: ['pwe-5089',
'xjt-1579',
'tre-1890',
'xjt-1579',
'pwe-5089'],
LOCAL_LABEL: [1, 4, 2, 16, 32]},
)
assert isinstance(
move_df.to_dicrete_move_df(), PandasDiscreteMoveDataFrame
)
def test_describe():
move_df = _default_move_df()
expected = DataFrame(
data=[
[4.0, 4.0, 4.0],
[39.984185, 116.31934049999998, 1.5],
[6.189237971348586e-05, 7.921910543639078e-05, 0.5773502691896257],
[39.984094, 116.319236, 1.0],
[39.984172, 116.3193005, 1.0],
[39.984211, 116.319362, 1.5],
[39.984224, 116.319402, 2.0],
[39.984224, 116.319402, 2.0],
],
columns=['lat', 'lon', 'id'],
index=['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'],
)
assert_frame_equal(move_df.describe(), expected)
def test_memory_usage():
move_df = _default_move_df()
expected = Series(
data=[128, 32, 32, 32, 32],
index=['Index', 'lat', 'lon', 'datetime', 'id'],
dtype='int64',
name=None,
)
assert_series_equal(move_df.memory_usage(), expected)
def test_copy():
move_df = _default_move_df()
cp = move_df.copy()
assert_frame_equal(move_df, cp)
cp.at[0, TRAJ_ID] = 0
assert move_df.loc[0, TRAJ_ID] == 1
assert move_df.loc[0, TRAJ_ID] != cp.loc[0, TRAJ_ID]
def test_generate_tid_based_on_id_datetime():
move_df = _default_move_df()
new_move_df = move_df.generate_tid_based_on_id_datetime(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'12008102305',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'12008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'tid'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert TID not in move_df
move_df.generate_tid_based_on_id_datetime()
assert_frame_equal(move_df, expected)
def test_generate_date_features():
move_df = _default_move_df()
new_move_df = move_df.generate_date_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
date(2008, 10, 23),
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
date(2008, 10, 23),
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
date(2008, 10, 23),
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
date(2008, 10, 23),
],
],
columns=['lat', 'lon', 'datetime', 'id', 'date'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DATE not in move_df
move_df.generate_date_features()
assert_frame_equal(move_df, expected)
def test_generate_hour_features():
move_df = _default_move_df()
new_move_df = move_df.generate_hour_features(inplace=False)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 5],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1, 5],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 5],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 5],
],
columns=['lat', 'lon', 'datetime', 'id', 'hour'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert HOUR not in move_df
move_df.generate_hour_features()
assert_frame_equal(move_df, expected)
def test_generate_day_of_the_week_features():
move_df = _default_move_df()
new_move_df = move_df.generate_day_of_the_week_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'Thursday',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'Thursday',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Thursday',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Thursday',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'day'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DAY not in move_df
move_df.generate_day_of_the_week_features()
assert_frame_equal(move_df, expected)
def test_generate_weekend_features():
move_df = _default_move_df()
new_move_df = move_df.generate_weekend_features(inplace=False)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 0],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1, 0],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 0],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 0],
],
columns=['lat', 'lon', 'datetime', 'id', 'weekend'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert WEEK_END not in move_df
move_df.generate_weekend_features()
assert_frame_equal(move_df, expected)
def test_generate_time_of_day_features():
move_df = _default_move_df()
new_move_df = move_df.generate_time_of_day_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'Early morning',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'Early morning',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Early morning',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Early morning',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'period'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert PERIOD not in move_df
move_df.generate_time_of_day_features()
assert_frame_equal(move_df, expected)
def test_generate_datetime_in_format_cyclical():
move_df = _default_move_df()
new_move_df = move_df.generate_datetime_in_format_cyclical(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
0.9790840876823229,
0.20345601305263375,
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
0.9790840876823229,
0.20345601305263375,
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
0.9790840876823229,
0.20345601305263375,
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
0.9790840876823229,
0.20345601305263375,
],
],
columns=['lat', 'lon', 'datetime', 'id', 'hour_sin', 'hour_cos'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert HOUR_SIN not in move_df
move_df.generate_datetime_in_format_cyclical()
assert_frame_equal(move_df, expected)
def test_generate_dist_time_speed_features():
move_df = _default_move_df()
new_move_df = move_df.generate_dist_time_speed_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan,
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
1.0,
13.690153134343689,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
0.0,
nan,
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'dist_to_prev',
'time_to_prev',
'speed_to_prev',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DIST_TO_PREV not in move_df
move_df.generate_dist_time_speed_features()
assert_frame_equal(move_df, expected)
def test_generate_dist_features():
move_df = _default_move_df()
new_move_df = move_df.generate_dist_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
13.690153134343689,
nan,
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
0.0,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
nan,
nan,
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'dist_to_prev',
'dist_to_next',
'dist_prev_to_next',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DIST_PREV_TO_NEXT not in move_df
move_df.generate_dist_features()
assert_frame_equal(move_df, expected)
def test_generate_time_features():
move_df = _default_move_df()
new_move_df = move_df.generate_time_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
1.0,
nan,
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1.0,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
0.0,
nan,
],
[
2,
39.984224,
116.319402,
| Timestamp('2008-10-23 05:53:11') | pandas.Timestamp |
import sys
import boto3
import re
from uuid import UUID
import pandas as pd
from datetime import date, timedelta
from tabulate import tabulate
def is_email_address(string):
return re.match(r"[^@]+@[^@]+\.[^@]+", string)
def is_uuid(uuid_to_test, version=4):
try:
uuid_obj = UUID(uuid_to_test, version=version)
except ValueError:
return False
return str(uuid_obj) == uuid_to_test
def find_filter_method(string):
if is_email_address(string):
return 'email'
elif is_uuid(string, version=4):
return 'uuid'
def get_cognito_id(user_cognito_data):
for record in user_cognito_data['Attributes']:
if record['Name'] == "sub":
return record['Value']
def get_cognito_user(user_list, requested_user):
"""
:param user_list: result of get_cognito_user_list
:param by: 'email' or 'uuid'
:return:
"""
user_list_dict = build_cognito_user_dict(user_list, by=find_filter_method(requested_user))
try:
user_data = user_list_dict[requested_user]
return user_data
except KeyError:
print("User not found. Exiting")
sys.exit(1)
def get_cognito_users_dataframe(user_list, requested_users_list):
_df = None
for user in requested_users_list:
user_cognito_data = get_cognito_user(user_list, user)
if _df is None:
_df = user_data_to_dataframe(user_cognito_data)
else:
_df = _df.append(user_data_to_dataframe(user_cognito_data))
return _df
def build_cognito_user_dict(user_list, by):
"""
:param user_list: result of get_cognito_user_list
:param by: 'email' or 'uuid'
:return:
"""
if by == 'email':
user_list_dict = {}
for user in user_list:
user_list_dict[user['Username']] = user
return user_list_dict
elif by == 'uuid':
user_list_dict = {}
for user in user_list:
for attribute in user['Attributes']:
if attribute['Name'] == 'sub':
user_list_dict[attribute['Value']] = user
break
return user_list_dict
else:
raise NotImplementedError
def get_cognito_user_list(region_name,pool_name):
client = boto3.client('cognito-idp',region_name=region_name)
pool = get_pool_id(region_name,pool_name)
if not pool:
print("No participant User Pool found. Speak to one of the Rorys")
print("Exiting!")
sys.exit(1)
response = client.list_users(UserPoolId=pool)
user_list = response.get("Users")
page_token = response.get("PaginationToken")
while page_token:
response = client.list_users(
UserPoolId=pool,
PaginationToken=page_token
)
user_list.extend(response.get("Users"))
page_token = response.get("PaginationToken")
return user_list
def get_pool_id(region_name,pool_name):
client = boto3.client('cognito-idp',region_name=region_name)
cognito_details = client.list_user_pools(MaxResults=60)
for user_pool in cognito_details['UserPools']:
if user_pool['Name'] == pool_name:
user_pool_id = user_pool['Id']
return user_pool_id
def get_office_user_list(region_name,pool_name):
user_list = get_cognito_user_list(region_name,pool_name)
office_user_list = {}
for user in user_list:
for att in user['Attributes']:
if att['Name'] == "sub":
cog_id = att['Value']
for att in user['Attributes']:
if att['Name'] == "custom:arup_office":
if att['Value'] not in office_user_list:
office_user_list[att['Value']] = []
office_user_list[att['Value']].append(cog_id)
offices = office_user_list.keys()
members_list = office_user_list.values()
output = []
for i in range(0,len(offices)):
office = offices[i]
members = members_list[i]
output.append({"office":office,"members": members})
return output
def get_study_stats(region_name,user_stats,pool_name):
user_list = get_cognito_user_list(region_name,pool_name)
office_user_list = get_office_user_list(region_name,pool_name)
user_count = len(user_list)
cog_ids = []
users_data = []
offices = []
planners = 0
for user in user_list:
cog_id = user['Attributes'][0]['Value']
user_data = {"user":user['Username'],"signup":user['UserCreateDate']}
for att in user['Attributes']:
if att['Name'] == "custom:arup_office":
offices.append({"office" : att['Value']})
user_data["office"] = att['Value']
if att['Name'] == "custom:is_transport_planner" and att['Value'] == "true":
planners = planners + 1
users_data.append(user_data)
global_new_user_count = 0
for user in user_stats:
for office in office_user_list:
if user['user'] in office['members']:
if "data" not in office:
office['data'] = []
office['data'].append(user)
for office in office_user_list:
if "data" in office:
record_count = 0
trip_count = 0
for record in office['data']:
trip_count = trip_count + record['trip_count']
record_count = record_count + record['total_records']
office.pop('data')
else:
trip_count = 0
record_count = 0
office['trip_count'] = trip_count
office['record_count'] = record_count
yesterday = (date.today() - timedelta(1)).timetuple()
if "new_users_24hr" not in office:
office['new_users_24hr'] = 0
for user in user_list:
creation_date = user['UserCreateDate'].timetuple()
if creation_date > yesterday:
for record in user['Attributes']:
if record['Name'] == "sub":
cog_id = record['Value']
if cog_id in office['members']:
office['new_users_24hr'] = office['new_users_24hr'] + 1
global_new_user_count = global_new_user_count + 1
for office in office_user_list:
office['User'] = len(office["members"])
office.pop("members")
for office in office_user_list:
if office['office'] == "-1":
office['office'] = "Unkown office (intrigue)"
top_office = sorted(office_user_list, key=lambda k: k['new_users_24hr'],reverse=True)
growth = int(float(global_new_user_count) / len(user_list) * 100.0)
print("{} new users since yesterday").format(global_new_user_count)
summary_stats_df = pd.DataFrame(office_user_list)
summary_stats_df['New users'] = summary_stats_df['new_users_24hr']
summary_stats_df['Points'] = summary_stats_df['record_count']
summary_stats_df['Trips'] = summary_stats_df['trip_count']
output = summary_stats_df.drop(columns=["new_users_24hr","record_count","trip_count"])
output = output[['office',"Trips","New users"]]
output = output.sort_values("Trips",ascending=False)
overall_stats = "```" + tabulate(output, tablefmt="simple", headers="keys",showindex=False) + "```"
return user_count, global_new_user_count, growth, top_office, overall_stats
def find_new_users_since_yesterday(user_list):
yesterday = (date.today() - timedelta(1)).timetuple()
new_user_count = 0
offices = []
for user in user_list:
creation_date = user['UserCreateDate'].timetuple()
if creation_date > yesterday:
new_user_count = new_user_count + 1
for att in user['Attributes']:
if att['Name'] == "custom:arup_office":
offices.append(att['Value'])
return new_user_count, offices
def find_percentage_of_verified_users(region_name, pool_name):
# this is a dummy method to test the integration with AWS Cognito
# email_verified is an attribute that should exist in all user pools
user_list = get_cognito_user_list(region_name,pool_name)
user_count = len(user_list)
verified_user_count = 0
for user in user_list:
for att in user['Attributes']:
if att['Name'] == "email_verified":
if att['Value'] == "true":
verified_user_count += 1
verified_user_percentage = (user_count / verified_user_count) * 100
return user_count, verified_user_percentage
def user_data_to_dataframe(user_cognito_data):
flat_user_cognito_data = {}
for key, value in user_cognito_data.items():
if isinstance(value, list):
for attribute in value:
flat_user_cognito_data[attribute['Name']] = attribute['Value']
else:
flat_user_cognito_data[key] = value
return | pd.DataFrame(flat_user_cognito_data, index=[0]) | pandas.DataFrame |
"""
@brief test log(time=6s)
"""
import sys
import unittest
from logging import getLogger
import numpy
import pandas
from pyquickhelper.pycode import ExtTestCase, skipif_circleci, ignore_warnings
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from skl2onnx.common.data_types import (
StringTensorType, FloatTensorType, Int64TensorType,
BooleanTensorType)
from mlprodict.onnxrt import OnnxInference
from mlprodict.onnx_conv import register_converters, to_onnx
from mlprodict.tools.asv_options_helper import get_ir_version_from_onnx
class TestOnnxrtRuntimeLightGbm(ExtTestCase):
def setUp(self):
logger = getLogger('skl2onnx')
logger.disabled = True
register_converters()
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
def test_missing(self):
from mlprodict.onnx_conv.parsers.parse_lightgbm import WrappedLightGbmBooster
r = WrappedLightGbmBooster._generate_classes( # pylint: disable=W0212
dict(num_class=1))
self.assertEqual(r.tolist(), [0, 1])
r = WrappedLightGbmBooster._generate_classes( # pylint: disable=W0212
dict(num_class=3))
self.assertEqual(r.tolist(), [0, 1, 2])
@skipif_circleci('stuck')
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
@ignore_warnings((RuntimeWarning, UserWarning))
def test_onnxrt_python_lightgbm_categorical(self):
from lightgbm import LGBMClassifier
X = pandas.DataFrame({"A": numpy.random.permutation(['a', 'b', 'c', 'd'] * 75), # str
# int
"B": numpy.random.permutation([1, 2, 3] * 100),
# float
"C": numpy.random.permutation([0.1, 0.2, -0.1, -0.1, 0.2] * 60),
# bool
"D": numpy.random.permutation([True, False] * 150),
"E": pandas.Categorical(numpy.random.permutation(['z', 'y', 'x', 'w', 'v'] * 60),
ordered=True)}) # str and ordered categorical
y = numpy.random.permutation([0, 1] * 150)
X_test = pandas.DataFrame({"A": numpy.random.permutation(['a', 'b', 'e'] * 20), # unseen category
"B": numpy.random.permutation([1, 3] * 30),
"C": numpy.random.permutation([0.1, -0.1, 0.2, 0.2] * 15),
"D": numpy.random.permutation([True, False] * 30),
"E": pandas.Categorical(numpy.random.permutation(['z', 'y'] * 30),
ordered=True)})
cat_cols_actual = ["A", "B", "C", "D"]
X[cat_cols_actual] = X[cat_cols_actual].astype('category')
X_test[cat_cols_actual] = X_test[cat_cols_actual].astype('category')
gbm0 = LGBMClassifier().fit(X, y)
exp = gbm0.predict(X_test, raw_scores=False)
self.assertNotEmpty(exp)
init_types = [('A', StringTensorType()),
('B', Int64TensorType()),
('C', FloatTensorType()),
('D', BooleanTensorType()),
('E', StringTensorType())]
self.assertRaise(lambda: to_onnx(gbm0, initial_types=init_types), RuntimeError,
"at most 1 input(s) is(are) supported")
X = X[['C']].values.astype(numpy.float32)
X_test = X_test[['C']].values.astype(numpy.float32)
gbm0 = LGBMClassifier().fit(X, y, categorical_feature=[0])
exp = gbm0.predict_proba(X_test, raw_scores=False)
model_def = to_onnx(gbm0, X)
self.assertIn('ZipMap', str(model_def))
oinf = OnnxInference(model_def)
y = oinf.run({'X': X_test})
self.assertEqual(list(sorted(y)),
['output_label', 'output_probability'])
df = pandas.DataFrame(y['output_probability'])
self.assertEqual(df.shape, (X_test.shape[0], 2))
self.assertEqual(exp.shape, (X_test.shape[0], 2))
# self.assertEqualArray(exp, df.values, decimal=6)
@skipif_circleci('stuck')
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
@ignore_warnings((RuntimeWarning, UserWarning))
def test_onnxrt_python_lightgbm_categorical3(self):
from lightgbm import LGBMClassifier
X = pandas.DataFrame({"A": numpy.random.permutation(['a', 'b', 'c', 'd'] * 75), # str
# int
"B": numpy.random.permutation([1, 2, 3] * 100),
# float
"C": numpy.random.permutation([0.1, 0.2, -0.1, -0.1, 0.2] * 60),
# bool
"D": numpy.random.permutation([True, False] * 150),
"E": pandas.Categorical(numpy.random.permutation(['z', 'y', 'x', 'w', 'v'] * 60),
ordered=True)}) # str and ordered categorical
y = numpy.random.permutation([0, 1, 2] * 100)
X_test = pandas.DataFrame({"A": numpy.random.permutation(['a', 'b', 'e'] * 20), # unseen category
"B": numpy.random.permutation([1, 3] * 30),
"C": numpy.random.permutation([0.1, -0.1, 0.2, 0.2] * 15),
"D": numpy.random.permutation([True, False] * 30),
"E": pandas.Categorical(numpy.random.permutation(['z', 'y'] * 30),
ordered=True)})
cat_cols_actual = ["A", "B", "C", "D"]
X[cat_cols_actual] = X[cat_cols_actual].astype('category')
X_test[cat_cols_actual] = X_test[cat_cols_actual].astype('category')
gbm0 = LGBMClassifier().fit(X, y)
exp = gbm0.predict(X_test, raw_scores=False)
self.assertNotEmpty(exp)
init_types = [('A', StringTensorType()),
('B', Int64TensorType()),
('C', FloatTensorType()),
('D', BooleanTensorType()),
('E', StringTensorType())]
self.assertRaise(lambda: to_onnx(gbm0, initial_types=init_types), RuntimeError,
"at most 1 input(s) is(are) supported")
X = X[['C']].values.astype(numpy.float32)
X_test = X_test[['C']].values.astype(numpy.float32)
gbm0 = LGBMClassifier().fit(X, y, categorical_feature=[0])
exp = gbm0.predict_proba(X_test, raw_scores=False)
model_def = to_onnx(gbm0, X)
self.assertIn('ZipMap', str(model_def))
oinf = OnnxInference(model_def)
y = oinf.run({'X': X_test})
self.assertEqual(list(sorted(y)),
['output_label', 'output_probability'])
df = pandas.DataFrame(y['output_probability'])
self.assertEqual(df.shape, (X_test.shape[0], 3))
self.assertEqual(exp.shape, (X_test.shape[0], 3))
# self.assertEqualArray(exp, df.values, decimal=6)
@skipif_circleci('stuck')
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
@ignore_warnings((RuntimeWarning, UserWarning))
def test_onnxrt_python_lightgbm_categorical_iris(self):
from lightgbm import LGBMClassifier, Dataset, train as lgb_train
iris = load_iris()
X, y = iris.data, iris.target
X = (X * 10).astype(numpy.int32)
X_train, X_test, y_train, _ = train_test_split(
X, y, random_state=11)
other_x = numpy.random.randint(
0, high=10, size=(1500, X_train.shape[1]))
X_train = numpy.vstack([X_train, other_x]).astype(dtype=numpy.int32)
y_train = numpy.hstack(
[y_train, numpy.zeros(500) + 3, numpy.zeros(500) + 4,
numpy.zeros(500) + 5]).astype(dtype=numpy.int32)
self.assertEqual(y_train.shape, (X_train.shape[0], ))
y_train = y_train % 2
# Classic
gbm = LGBMClassifier()
gbm.fit(X_train, y_train)
exp = gbm.predict_proba(X_test)
onx = to_onnx(gbm, initial_types=[
('X', Int64TensorType([None, X_train.shape[1]]))])
self.assertIn('ZipMap', str(onx))
oif = OnnxInference(onx)
got = oif.run({'X': X_test})
values = pandas.DataFrame(got['output_probability']).values
self.assertEqualArray(exp, values, decimal=5)
# categorical_feature=[0, 1]
train_data = Dataset(
X_train, label=y_train,
feature_name=['c1', 'c2', 'c3', 'c4'],
categorical_feature=['c1', 'c2'])
params = {
"boosting_type": "gbdt",
"learning_rate": 0.05,
"n_estimators": 2,
"objective": "binary",
"max_bin": 5,
"min_child_samples": 100,
'verbose': -1,
}
booster = lgb_train(params, train_data)
exp = booster.predict(X_test)
onx = to_onnx(booster, initial_types=[
('X', Int64TensorType([None, X_train.shape[1]]))])
self.assertIn('ZipMap', str(onx))
oif = OnnxInference(onx)
got = oif.run({'X': X_test})
values = pandas.DataFrame(got['output_probability']).values
self.assertEqualArray(exp, values[:, 1], decimal=5)
@skipif_circleci('stuck')
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
@ignore_warnings((RuntimeWarning, UserWarning))
def test_onnxrt_python_lightgbm_categorical_iris_booster3(self):
from lightgbm import LGBMClassifier, Dataset, train as lgb_train
iris = load_iris()
X, y = iris.data, iris.target
X = (X * 10).astype(numpy.int32)
X_train, X_test, y_train, _ = train_test_split(
X, y, random_state=11)
other_x = numpy.random.randint(
0, high=10, size=(1500, X_train.shape[1]))
X_train = numpy.vstack([X_train, other_x]).astype(dtype=numpy.int32)
y_train = numpy.hstack(
[y_train, numpy.zeros(500) + 3, numpy.zeros(500) + 4,
numpy.zeros(500) + 5]).astype(dtype=numpy.int32)
self.assertEqual(y_train.shape, (X_train.shape[0], ))
# Classic
gbm = LGBMClassifier()
gbm.fit(X_train, y_train)
exp = gbm.predict_proba(X_test)
onx = to_onnx(gbm, initial_types=[
('X', Int64TensorType([None, X_train.shape[1]]))])
self.assertIn('ZipMap', str(onx))
oif = OnnxInference(onx)
got = oif.run({'X': X_test})
values = | pandas.DataFrame(got['output_probability']) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/2 23:26
Desc: 东方财富网-行情首页-沪深京 A 股
"""
import requests
import pandas as pd
def stock_zh_a_spot_em() -> pd.DataFrame:
"""
东方财富网-沪深京 A 股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 实时行情
:rtype: pandas.DataFrame
"""
url = "http://82.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80,m:1 t:2,m:1 t:23,m:0 t:81 s:2048",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"量比",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["量比"] = pd.to_numeric(temp_df["量比"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
def stock_zh_b_spot_em() -> pd.DataFrame:
"""
东方财富网- B 股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 实时行情
:rtype: pandas.DataFrame
"""
url = "http://28.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:7,m:1 t:3",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"量比",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["量比"] = pd.to_numeric(temp_df["量比"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
def code_id_map_em() -> dict:
"""
东方财富-股票和市场代码
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 股票和市场代码
:rtype: dict
"""
url = "http://80.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:1 t:2,m:1 t:23",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df["market_id"] = 1
temp_df.columns = ["sh_code", "sh_id"]
code_id_dict = dict(zip(temp_df["sh_code"], temp_df["sh_id"]))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["sz_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["sz_id"])))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:81 s:2048",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["bj_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["bj_id"])))
return code_id_dict
def stock_zh_a_hist(
symbol: str = "000001",
period: str = "daily",
start_date: str = "19700101",
end_date: str = "20500101",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日行情
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param period: choice of {'daily', 'weekly', 'monthly'}
:type period: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param adjust: choice of {"qfq": "前复权", "hfq": "后复权", "": "不复权"}
:type adjust: str
:return: 每日行情
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
adjust_dict = {"qfq": "1", "hfq": "2", "": "0"}
period_dict = {"daily": "101", "weekly": "102", "monthly": "103"}
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61,f116",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"klt": period_dict[period],
"fqt": adjust_dict[adjust],
"secid": f"{code_id_dict[symbol]}.{symbol}",
"beg": start_date,
"end": end_date,
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["klines"]:
return pd.DataFrame()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["日期"])
temp_df.reset_index(inplace=True, drop=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
return temp_df
def stock_zh_a_hist_min_em(
symbol: str = "000001",
start_date: str = "1979-09-01 09:32:00",
end_date: str = "2222-01-01 09:32:00",
period: str = "5",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日分时行情
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param period: choice of {'1', '5', '15', '30', '60'}
:type period: str
:param adjust: choice of {'', 'qfq', 'hfq'}
:type adjust: str
:return: 每日分时行情
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
adjust_map = {
"": "0",
"qfq": "1",
"hfq": "2",
}
if period == "1":
url = "https://push2his.eastmoney.com/api/qt/stock/trends2/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"ndays": "5",
"iscr": "0",
"secid": f"{code_id_dict[symbol]}.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["trends"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"最新价",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
return temp_df
else:
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"klt": period,
"fqt": adjust_map[adjust],
"secid": f"{code_id_dict[symbol]}.{symbol}",
"beg": "0",
"end": "20500000",
"_": "1630930917857",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
temp_df = temp_df[
[
"时间",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
return temp_df
def stock_zh_a_hist_pre_min_em(
symbol: str = "000001",
start_time: str = "09:00:00",
end_time: str = "15:50:00",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日分时行情包含盘前数据
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param start_time: 开始时间
:type start_time: str
:param end_time: 结束时间
:type end_time: str
:return: 每日分时行情包含盘前数据
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
url = "https://push2.eastmoney.com/api/qt/stock/trends2/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"ndays": "1",
"iscr": "1",
"iscca": "0",
"secid": f"{code_id_dict[symbol]}.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["trends"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"最新价",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
date_format = temp_df.index[0].date().isoformat()
temp_df = temp_df[
date_format + " " + start_time : date_format + " " + end_time
]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
return temp_df
def stock_hk_spot_em() -> pd.DataFrame:
"""
东方财富网-港股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hk_stocks
:return: 港股-实时行情
:rtype: pandas.DataFrame
"""
url = "http://72.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:128 t:3,m:128 t:4,m:128 t:1,m:128 t:2",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1624010056945",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
"今开",
"最高",
"最低",
"昨收",
"成交量",
"成交额",
]
]
temp_df["序号"] = pd.to_numeric(temp_df["序号"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
return temp_df
def stock_hk_hist(
symbol: str = "40224",
period: str = "daily",
start_date: str = "19700101",
end_date: str = "22220101",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情-港股-每日行情
http://quote.eastmoney.com/hk/08367.html
:param symbol: 港股-每日行情
:type symbol: str
:param period: choice of {'daily', 'weekly', 'monthly'}
:type period: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param adjust: choice of {"qfq": "1", "hfq": "2", "": "不复权"}
:type adjust: str
:return: 每日行情
:rtype: pandas.DataFrame
"""
adjust_dict = {"qfq": "1", "hfq": "2", "": "0"}
period_dict = {"daily": "101", "weekly": "102", "monthly": "103"}
url = "http://33.push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"secid": f"116.{symbol}",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period_dict[period],
"fqt": adjust_dict[adjust],
"end": "20500000",
"lmt": "1000000",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
if temp_df.empty:
return pd.DataFrame()
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["日期"])
temp_df = temp_df[start_date:end_date]
if temp_df.empty:
return pd.DataFrame()
temp_df.reset_index(inplace=True, drop=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
return temp_df
def stock_hk_hist_min_em(
symbol: str = "01611",
period: str = "1",
adjust: str = "",
start_date: str = "1979-09-01 09:32:00",
end_date: str = "2222-01-01 09:32:00",
) -> pd.DataFrame:
"""
东方财富网-行情-港股-每日分时行情
http://quote.eastmoney.com/hk/00948.html
:param symbol: 股票代码
:type symbol: str
:param period: choice of {'1', '5', '15', '30', '60'}
:type period: str
:param adjust: choice of {'', 'qfq', 'hfq'}
:type adjust: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 每日分时行情
:rtype: pandas.DataFrame
"""
adjust_map = {
"": "0",
"qfq": "1",
"hfq": "2",
}
if period == "1":
url = "http://push2his.eastmoney.com/api/qt/stock/trends2/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"iscr": "0",
"ndays": "5",
"secid": f"116.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["trends"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"最新价",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
return temp_df
else:
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"klt": period,
"fqt": adjust_map[adjust],
"secid": f"116.{symbol}",
"beg": "0",
"end": "20500000",
"_": "1630930917857",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
temp_df = temp_df[
[
"时间",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
return temp_df
def stock_us_spot_em() -> pd.DataFrame:
"""
东方财富-美股-实时行情
http://quote.eastmoney.com/center/gridlist.html#us_stocks
:return: 美股-实时行情; 延迟 15 min
:rtype: pandas.DataFrame
"""
url = "http://72.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "20000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:105,m:106,m:107",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f26,f22,f33,f11,f62,f128,f136,f115,f152",
"_": "1624010056945",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"_",
"_",
"_",
"简称",
"编码",
"名称",
"最高价",
"最低价",
"开盘价",
"昨收价",
"总市值",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"市盈率",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df["代码"] = temp_df["编码"].astype(str) + "." + temp_df["简称"]
temp_df = temp_df[
[
"序号",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
"开盘价",
"最高价",
"最低价",
"昨收价",
"总市值",
"市盈率",
"成交量",
"成交额",
"振幅",
"换手率",
"代码",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["开盘价"] = pd.to_numeric(temp_df["开盘价"], errors="coerce")
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"], errors="coerce")
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"], errors="coerce")
temp_df["昨收价"] = pd.to_ | numeric(temp_df["昨收价"], errors="coerce") | pandas.to_numeric |
import os
import sys
import re
import json
import yaml
import pandas as pd
import numpy as np
sys.path.append('../')
from load_paths import load_box_paths
try:
print(Location)
except NameError:
if os.name == "posix":
Location = "NUCLUSTER"
else:
Location = "Local"
datapath, projectpath, wdir, exe_dir, git_dir = load_box_paths(Location=Location)
class covidModel:
def __init__(self,subgroups, expandModel, observeLevel='primary', add_interventions='baseline',
change_testDelay=False, intervention_config='intervention_emodl_config.yaml',
add_migration=False, fit_params=None,emodl_name=None, git_dir=git_dir):
self.model = 'locale'
self.grpList = subgroups
self.expandModel = expandModel
self.add_migration = add_migration
self.observeLevel = observeLevel
self.add_interventions = add_interventions
self.change_testDelay = change_testDelay
self.intervention_config = intervention_config
self.emodl_name = emodl_name
self.startdate = pd.Timestamp('2020-02-13')
self.emodl_dir = os.path.join(git_dir, 'emodl')
self.fit_param = fit_params # Currenly support single parameter only
def get_configs(key, config_file='intervention_emodl_config.yaml'):
yaml_file = open(os.path.join('./experiment_configs', config_file))
config_dic = yaml.safe_load(yaml_file)
config_dic = config_dic[key]
return config_dic
## For postprocessing that splits by '_', it is easier if EMS are names EMS-1 not EMS_1
## This might change depending on the postprocessing
def sub(x):
xout = re.sub('_', '-', str(x), count=1)
return xout
def DateToTimestep(date, startdate):
datediff = date - startdate
timestep = datediff.days
return timestep
def get_trigger(grp, channel):
grp_nr = grp.replace('EMS_','')
file_path = os.path.join(datapath, 'covid_IDPH', 'Corona virus reports', 'hospital_capacity_thresholds')
files = os.listdir(file_path)
files = [name for name in files if not 'extra_thresholds' in name]
filedates = [item.replace('capacity_weekday_average_', '') for item in files]
filedates = [item.replace('.csv', '') for item in filedates]
latest_filedate = max([int(x) for x in filedates])
fname = 'capacity_weekday_average_' + str(latest_filedate) + '.csv'
ems_fname = os.path.join(datapath, 'covid_IDPH/Corona virus reports/hospital_capacity_thresholds/', fname)
df = pd.read_csv(ems_fname)
df = df.drop_duplicates()
df = df[df['geography_modeled'] == f'covidregion_{grp_nr}']
df = df[df['overflow_threshold_percent'] == 1]
df['ems'] = df['geography_modeled']
df['ems'] = df['geography_modeled'].replace("covidregion_", "", regex=True)
df = df[['ems', 'resource_type', 'avg_resource_available']]
df = df.drop_duplicates()
## if conflicting numbers, take the lower ones!
dups = df.groupby(["ems", "resource_type"])["avg_resource_available"].nunique()
if int(dups.nunique()) > 1:
print(f'{ems_fname} contains multiple capacity values, selecting the lower ones.')
df = df.loc[df.groupby(["ems", "resource_type"])["avg_resource_available"].idxmax()]
df = df.pivot(index='ems', columns='resource_type', values='avg_resource_available')
df.index.name = 'ems'
df.reset_index(inplace=True)
df = df.rename(columns={ 'hb_availforcovid':'hosp_det',
'hb_availforcovid':'total_hosp_census',
'icu_availforcovid': 'crit_det',
'vent_availforcovid':'ventilators'})
return int(df[channel])
def get_species(self):
state_SE = ['S', 'E']
state_nosymptoms = ['As', 'As_det1', 'P', 'P_det']
state_symptoms = ['Sym', 'Sym_det2', 'Sys', 'Sys_det3']
# state_hospitalized = ['H1', 'H2', 'H3', 'H1_det3', 'H2_det3', 'H3_det3']
state_hospitalized = ['H1', 'H2pre', 'H2post', 'H3', 'H1_det3', 'H2pre_det3', 'H2post_det3', 'H3_det3']
state_critical = ['C2', 'C3', 'C2_det3', 'C3_det3']
state_deaths = ['D3', 'D3_det3']
state_recoveries = ['RAs', 'RSym', 'RH1', 'RC2', 'RAs_det1', 'RSym_det2', 'RH1_det3', 'RC2_det3']
state_testDelay_SymSys = ['Sym_preD', 'Sys_preD']
state_testDelay_AsSymSys = ['As_preD', 'Sym_preD', 'Sym_det2a', 'Sym_det2b', 'Sys_preD', 'Sys_det3a', 'Sys_det3b']
state_variables = state_SE + state_nosymptoms + state_symptoms + state_hospitalized + state_critical + state_deaths + state_recoveries
if self.expandModel == "SymSys" or self.expandModel == "uniform":
state_variables = state_variables + state_testDelay_SymSys
if self.expandModel == "AsSymSys":
state_variables = state_variables + state_testDelay_AsSymSys
if 'vaccine' in self.add_interventions:
state_variables_vaccine = [f'{state}_V' for state in state_variables ]
state_variables = state_variables + state_variables_vaccine
return state_variables
def write_species(self, grp):
state_variables = covidModel.get_species(self)
def write_species_emodl():
grp_suffix = "::{grp}"
grp_suffix2 = "_{grp}"
species_emodl = ""
for state in state_variables:
if state == "S":
species_emodl = species_emodl + f'(species {state}{grp_suffix} @speciesS{grp_suffix2}@)\n'
else:
species_emodl = species_emodl + f'(species {state}{grp_suffix} 0)\n'
return species_emodl
def write_species_str(species_emodl, grp):
grp = str(grp)
species_str = species_emodl.format(grp=grp)
return species_str
species_emodl = write_species_emodl()
species_str = write_species_str(species_emodl, grp)
return species_str
def get_channels(self):
"""Channels to exclude from final list"""
channels_not_observe = ['presymp_det','presymp_cumul','presymp_det_cumul']
"""Define channels to observe """
primary_channels_notdet = ['susceptible','infected','recovered','symp_mild','symp_severe','hosp','crit','deaths']
secondary_channels_notdet = ['exposed','asymp','presymp','detected']
tertiary_channels = ['infectious_undet', 'infectious_det', 'infectious_det_symp', 'infectious_det_AsP']
channels_notdet = primary_channels_notdet
if self.observeLevel != 'primary':
channels_notdet = channels_notdet + secondary_channels_notdet
channels_det = [channel + '_det' for channel in channels_notdet if channel not in ['susceptible', 'exposed','detected']]
channels_cumul = [channel + '_cumul' for channel in channels_notdet + channels_det
if channel not in ['susceptible','exposed', 'recovered', 'deaths', 'recovered_det']]
channels = channels_notdet + channels_det + channels_cumul
if self.observeLevel == 'tertiary':
channels = channels + tertiary_channels
channels = [channel for channel in channels if channel not in channels_not_observe]
channels = list(set(channels))
if 'vaccine' in self.add_interventions:
channels_vaccine = [f'{channel}_V' for channel in channels]
channels = channels + channels_vaccine
return channels
def write_observe(self, grp):
grp = str(grp)
grpout = covidModel.sub(grp)
def write_observe_emodl():
#grp_suffix = "::{grp}"
#grp_suffix2 = "_{grp}"
if 'vaccine' in self.add_interventions:
channels = covidModel.get_channels(self)
channels = channels[int(len(channels) / 2):]
observe_emodl = f"(observe vaccinated_cumul_{grpout} vaccinated_cumul_{grp})\n"
for channel in channels:
if channel == 'crit_V':
channel = 'critical_V'
if channel == 'hosp_V':
channel = 'hospitalized_V'
if channel == "susceptible_V":
observe_emodl = observe_emodl + f'(observe {channel}_{grpout} S_V::{grp})\n'
elif channel == "exposed_V":
observe_emodl = observe_emodl + f'(observe {channel}_{grpout} E_V::{grp})\n'
elif channel == "deaths_det_V":
observe_emodl = observe_emodl + f'(observe {channel}_{grpout} D3_det3_V::{grp})\n'
else:
observe_emodl = observe_emodl + f'(observe {channel}_{grpout} {channel}_{grp})\n'
channels = covidModel.get_channels(self)
channels = channels[:int(len(channels) / 2)]
for channel in channels:
if channel == 'crit':
channel = 'critical'
if channel == 'hosp':
channel = 'hospitalized'
if channel == "susceptible":
observe_emodl = observe_emodl + f'(observe {channel}_{grpout} (+ S::{grp} S_V::{grp}))\n'
elif channel == "exposed":
observe_emodl = observe_emodl + f'(observe {channel}_{grpout} (+ E::{grp} E_V::{grp}))\n'
elif channel == "deaths_det":
observe_emodl = observe_emodl + f'(observe {channel}_{grpout} (+ D3_det3::{grp} D3_det3_V::{grp}))\n'
else:
observe_emodl= observe_emodl + f'(observe {channel}_{grpout} (+ {channel}_{grp} {channel}_V_{grp}))\n'
else:
channels = covidModel.get_channels(self)
observe_emodl = ""
for channel in channels:
if channel == 'crit':
channel = 'critical'
if channel == 'hosp':
channel = 'hospitalized'
if channel == "susceptible":
observe_emodl = observe_emodl + f'(observe {channel}_{grpout} S::{grp})\n'
elif channel == "exposed":
observe_emodl = observe_emodl + f'(observe {channel}_{grpout} E::{grp})\n'
elif channel == "deaths_det":
observe_emodl = observe_emodl + f'(observe {channel}_{grpout} D3_det3::{grp})\n'
else:
observe_emodl = observe_emodl + f'(observe {channel}_{grpout} {channel}_{grp})\n'
"""Observe all state variables over time"""
if self.observeLevel=='all':
state_variables = covidModel.get_species(self)
for state in state_variables:
observe_emodl = observe_emodl + f'(observe {state}_{grp} {state}::{grp})\n'
return observe_emodl
def write_observe_str(observe_emodl, grp):
grp = str(grp)
observe_str = observe_emodl.format(grp=grp)
return observe_str
observe_emodl = write_observe_emodl()
observe_str = write_observe_str(observe_emodl, grp)
return observe_str
def write_functions(self, grp):
grp = str(grp)
func_dic = {'presymp_{grp}': ['P::{grp}', 'P_det::{grp}'],
'hospitalized_{grp}': ['H1::{grp}', 'H2pre::{grp}', 'H2post::{grp}', 'H3::{grp}', 'H1_det3::{grp}', 'H2pre_det3::{grp}', 'H2post_det3::{grp}', 'H3_det3::{grp}'],
'hosp_det_{grp}': ['H1_det3::{grp}', 'H2pre_det3::{grp}', 'H2post_det3::{grp}', 'H3_det3::{grp}'],
'critical_{grp}': ['C2::{grp}', 'C3::{grp}', 'C2_det3::{grp}', 'C3_det3::{grp}'],
'crit_det_{grp}': ['C2_det3::{grp}', 'C3_det3::{grp}'],
'deaths_{grp}': ['D3::{grp}', 'D3_det3::{grp}'],
'recovered_{grp}': ['RAs::{grp}', 'RSym::{grp}', 'RH1::{grp}', 'RC2::{grp}', 'RAs_det1::{grp}', 'RSym_det2::{grp}', 'RH1_det3::{grp}', 'RC2_det3::{grp}'],
'recovered_det_{grp}': ['RAs_det1::{grp}', 'RSym_det2::{grp}', 'RH1_det3::{grp}', 'RC2_det3::{grp}'],
'asymp_cumul_{grp}': ['asymp_{grp}', 'RAs::{grp}', 'RAs_det1::{grp}'],
'asymp_det_cumul_{grp}': ['As_det1::{grp}', 'RAs_det1::{grp}'],
'symp_mild_cumul_{grp}': ['symp_mild_{grp}', 'RSym::{grp}', 'RSym_det2::{grp}'],
'symp_mild_det_cumul_{grp}': ['symp_mild_det_{grp}', 'RSym_det2::{grp}'],
'symp_severe_cumul_{grp}': ['symp_severe_{grp}', 'hospitalized_{grp}', 'critical_{grp}', 'deaths_{grp}', 'RH1::{grp}', 'RC2::{grp}', 'RH1_det3::{grp}', 'RC2_det3::{grp}'],
'symp_severe_det_cumul_{grp}': ['symp_severe_det_{grp}', 'hosp_det_{grp}', 'crit_det_{grp}', 'D3_det3::{grp}', ' RH1_det3::{grp}', 'RC2_det3::{grp}'],
'hosp_cumul_{grp}': ['hospitalized_{grp}', 'critical_{grp}', 'deaths_{grp}', 'RH1::{grp}', 'RC2::{grp}', 'RH1_det3::{grp}', 'RC2_det3::{grp}'],
'hosp_det_cumul_{grp}': ['H1_det3::{grp}', 'H2pre_det3::{grp}', 'H2post_det3::{grp}', ' H3_det3::{grp}', 'C2_det3::{grp}', 'C3_det3::{grp}', 'D3_det3::{grp}', ' RH1_det3::{grp}', ' RC2_det3::{grp}'],
'crit_cumul_{grp}': ['deaths_{grp}', 'critical_{grp}', 'RC2::{grp}', 'RC2_det3::{grp}'],
'crit_det_cumul_{grp}': ['C2_det3::{grp}', 'C3_det3::{grp}', 'D3_det3::{grp}', 'RC2_det3::{grp}'],
'detected_cumul_{grp}': ['As_det1::{grp}', 'Sym_det2::{grp}', 'Sys_det3::{grp}', 'H1_det3::{grp}', 'H2pre_det3::{grp}', ' H2post_det3::{grp}', ' C2_det3::{grp}', 'C3_det3::{grp}', 'RAs_det1::{grp}', 'RSym_det2::{grp}', 'RH1_det3::{grp}', 'RC2_det3::{grp}', 'D3_det3::{grp}'],
'infected_{grp}': ['infectious_det_{grp}', 'infectious_undet_{grp}', 'H1_det3::{grp}', 'H2pre_det3::{grp}', 'H2post_det3::{grp}', 'H3_det3::{grp}', 'C2_det3::{grp}', 'C3_det3::{grp}'],
'infected_det_{grp}': ['infectious_det_{grp}', 'H1_det3::{grp}', 'H2pre_det3::{grp}', 'H2post_det3::{grp}', 'H3_det3::{grp}', 'C2_det3::{grp}', 'C3_det3::{grp}'],
'infected_cumul_{grp}': ['infected_{grp}', 'recovered_{grp}', 'deaths_{grp}'],
'infected_det_cumul_{grp}': ['infected_det_{grp}', 'recovered_det_{grp}', 'D3_det3::{grp}']
}
func_dic_base = {'asymp_{grp}': ['As::{grp}', 'As_det1::{grp}'],
'symp_mild_{grp}': ['Sym::{grp}', 'Sym_det2::{grp}'],
'symp_severe_{grp}': ['Sys::{grp}', 'Sys_det3::{grp}'],
'detected_{grp}': ['As_det1::{grp}', 'Sym_det2::{grp}', 'Sys_det3::{grp}', 'H1_det3::{grp}', 'H2pre_det3::{grp}', 'H2post_det3::{grp}', ' H3_det3::{grp}', 'C2_det3::{grp}', 'C3_det3::{grp}'],
'infectious_undet_{grp}': ['As::{grp}', 'P::{grp}', 'Sym::{grp}', 'Sys::{grp}', 'H1::{grp}', 'H2pre::{grp}', 'H2post::{grp}', ' H3::{grp}', 'C2::{grp}', 'C3::{grp}'],
'infectious_det_{grp}': ['As_det1::{grp}', 'P_det::{grp}', 'Sym_det2::{grp}', 'Sys_det3::{grp}'],
'infectious_det_symp_{grp}': ['Sym_det2::{grp}', 'Sys_det3::{grp}'],
'infectious_det_AsP_{grp}': ['As_det1::{grp}', 'P_det::{grp}']
}
func_dic_SymSys = {'asymp_{grp}': ['As::{grp}', 'As_det1::{grp}'],
'symp_mild_{grp}': ['Sym::{grp}', 'Sym_preD::{grp}', 'Sym_det2::{grp}'],
'symp_mild_det_{grp}': ['Sym_preD::{grp}', 'Sym_det2::{grp}'],
'symp_severe_{grp}': ['Sys::{grp}', 'Sys_preD::{grp}', 'Sys_det3::{grp}'],
'symp_severe_det_{grp}': ['Sys_preD::{grp}', 'Sys_det3::{grp}'],
'detected_{grp}': ['As_det1::{grp}', 'Sym_det2::{grp}', 'Sys_det3::{grp}', 'H1_det3::{grp}', 'H2pre_det3::{grp}', 'H2post_det3::{grp}', ' H3_det3::{grp}', 'C2_det3::{grp}', 'C3_det3::{grp}'],
'infectious_undet_{grp}': ['As::{grp}', 'P::{grp}', 'Sym_preD::{grp}', 'Sym::{grp}', 'Sys_preD::{grp}', 'Sys::{grp}', 'H1::{grp}', 'H2pre::{grp}', ' H2post::{grp}', ' H3::{grp}', 'C2::{grp}', 'C3::{grp}'],
'infectious_det_{grp}': ['As_det1::{grp}', 'P_det::{grp}', 'Sym_det2::{grp}', 'Sys_det3::{grp}'],
'infectious_det_symp_{grp}': ['Sym_det2::{grp}', 'Sys_det3::{grp}'],
'infectious_det_AsP_{grp}': ['As_det1::{grp}', 'P_det::{grp}']
}
func_dic_AsSymSys = {'asymp_{grp}': ['As_preD::{grp}', 'As::{grp}', 'As_det1::{grp}'],
'symp_mild_{grp}': ['Sym::{grp}', 'Sym_preD::{grp}', 'Sym_det2a::{grp}', 'Sym_det2b::{grp}'],
'symp_mild_det_{grp}': ['Sym_preD::{grp}', 'Sym_det2a::{grp}', 'Sym_det2b::{grp}'],
'symp_severe_{grp}': ['Sys::{grp}', 'Sys_preD::{grp}', 'Sys_det3a::{grp}', 'Sys_det3b::{grp}'],
'symp_severe_det_{grp}': ['Sys_preD::{grp}', 'Sys_det3a::{grp}', 'Sys_det3b::{grp}'],
'detected_{grp}': ['As_det1::{grp}', 'Sym_det2a::{grp}', 'Sym_det2b::{grp}', 'Sys_det3a::{grp}', 'Sys_det3b::{grp}', 'H1_det3::{grp}', 'H2pre_det3::{grp}', 'H2post_det3::{grp}', 'H3_det3::{grp}', 'C2_det3::{grp}', 'C3_det3::{grp}'],
'infectious_undet_{grp}': ['As_preD::{grp}', 'As::{grp}', 'P::{grp}', 'Sym::{grp}', 'Sym_preD::{grp}', 'Sys::{grp}', 'Sys_preD::{grp}', 'H1::{grp}', 'H2pre::{grp}', 'H2post::{grp}', 'H3::{grp}', 'C2::{grp}', 'C3::{grp}'],
'infectious_det_{grp}': ['As_det1::{grp}', 'P_det::{grp}', 'Sym_det2a::{grp}', 'Sym_det2b::{grp}', 'Sys_det3a::{grp}', 'Sys_det3b::{grp}'],
'infectious_undet_symp_{grp}': ['P::{grp}', 'Sym::{grp}', 'Sym_preD::{grp}', 'Sys::{grp}', 'Sys_preD::{grp}', 'H1::{grp}', 'H2pre::{grp}', 'H2post::{grp}', 'H3::{grp}', 'C2::{grp}', 'C3::{grp}'],
'infectious_undet_As_{grp}': ['As_preD::{grp}', 'As::{grp}'],
'infectious_det_symp_{grp}': ['Sym_det2a::{grp}', 'Sym_det2b::{grp}', 'Sys_det3a::{grp}', 'Sys_det3b::{grp}'],
'infectious_det_AsP_{grp}': ['As_det1::{grp}', 'P_det::{grp}']
}
func_str = f'(func deaths_det_cumul_{grp} D3_det3::{grp})\n(func asymp_det_{grp} As_det1::{grp})\n'
if self.expandModel == "SymSys" or self.expandModel == "uniform":
func_dic_SymSys.update(func_dic)
func_dic_all = func_dic_SymSys
elif self.expandModel == "AsSymSys":
func_dic_AsSymSys.update(func_dic)
func_dic_all = func_dic_AsSymSys
else:
func_str = func_str + f'(func symp_mild_det_{grp} Sym_det2::{grp})\n' \
f'(func symp_severe_det_{grp} Sys_det3::{grp})\n'
func_dic_base.update(func_dic)
func_dic_all = func_dic_base
if 'vaccine' in self.add_interventions:
vacc_cumul = f'(func vaccinated_cumul_{grp} (+ S_V::{grp} infected_V_{grp} recovered_V_{grp} deaths_V_{grp} ))\n'
func_str_V = func_str.replace(f'_{grp}',f'_V_{grp}')
func_str_V = func_str_V.replace(f'::{grp}',f'_V::{grp}')
func_str = func_str + func_str_V
func_dic_all_V = {}
for key, value in func_dic_all.items():
key_V = key.replace('_{grp}','_V_{grp}')
func_dic_all_V[key_V] = [item.replace('_{grp}','_V_{grp}') if '_{grp}' in item
else item.replace('::{grp}','_V::{grp}') for item in func_dic_all[key]]
func_dic_all.update(func_dic_all_V)
for key in func_dic_all.keys():
func_str = func_str + f"(func {key} (+ {' '.join(func_dic_all[key])}))\n".format(grp=grp)
if 'vaccine' in self.add_interventions:
func_str = func_str + vacc_cumul
return func_str
def write_params(self):
yaml_sampled_param = list(covidModel.get_configs(key ='sampled_parameters', config_file='extendedcobey_200428.yaml').keys())
yaml_sampled_param_str = ''.join([f'(param {param} @{param}@)\n' for param in yaml_sampled_param])
"""calculated parameters"""
param_dic = {'fraction_hospitalized' : '(- 1 (+ fraction_critical fraction_dead))',
'Kr_a' : '(/ 1 recovery_time_asymp)',
'Kr_m' : '(/ 1 recovery_time_mild)',
'Kl' : '(/ (- 1 fraction_symptomatic ) time_to_infectious)',
'Ks' :'(/ fraction_symptomatic time_to_infectious)',
'Ksys' :'(* fraction_severe (/ 1 time_to_symptoms))',
'Ksym' :'(* (- 1 fraction_severe) (/ 1 time_to_symptoms))',
'Km' :'(/ 1 time_to_death)',
'Kc' :'(/ 1 time_to_critical)',
'Kr_hc' :'(/ 1 recovery_time_postcrit)',
'Kr_h' :'(/ 1 recovery_time_hosp)',
'Kr_c' :'(/ 1 recovery_time_crit)'
}
param_dic_base = {'Kh1':'(/ fraction_hospitalized time_to_hospitalization)',
'Kh2':'(/ fraction_critical time_to_hospitalization )',
'Kh3':'(/ fraction_dead time_to_hospitalization)'
}
param_dic_uniform = {'time_D':'@time_to_detection@',
'Ksym_D':'(/ 1 time_D)',
'Ksys_D':'(/ 1 time_D)',
'Kh1':'(/ fraction_hospitalized time_to_hospitalization)',
'Kh2':'(/ fraction_critical time_to_hospitalization )',
'Kh3':'(/ fraction_dead time_to_hospitalization)',
'Kh1_D':'(/ fraction_hospitalized (- time_to_hospitalization time_D))',
'Kh2_D':'(/ fraction_critical (- time_to_hospitalization time_D) )',
'Kh3_D':'(/ fraction_dead (- time_to_hospitalization time_D))',
'Kr_m_D':'(/ 1 (- recovery_time_mild time_D ))'
}
param_dic_SymSys = {'time_D_Sym':'@time_to_detection_Sym@',
'time_D_Sys':'@time_to_detection_Sys@',
'Ksym_D':'(/ 1 time_D_Sym)',
'Ksys_D':'(/ 1 time_D_Sys)',
'Kh1':'(/ fraction_hospitalized time_to_hospitalization)',
'Kh2':'(/ fraction_critical time_to_hospitalization )',
'Kh3':'(/ fraction_dead time_to_hospitalization)',
'Kh1_D':'(/ fraction_hospitalized (- time_to_hospitalization time_D_Sys))',
'Kh2_D':'(/ fraction_critical (- time_to_hospitalization time_D_Sys) )',
'Kh3_D':'(/ fraction_dead (- time_to_hospitalization time_D_Sys))',
'Kr_m_D':'(/ 1 (- recovery_time_mild time_D_Sym ))'
}
param_dic_AsSymSys = {'Kh1':'(/ fraction_hospitalized time_to_hospitalization)',
'Kh2':'(/ fraction_critical time_to_hospitalization )',
'Kh3':'(/ fraction_dead time_to_hospitalization)',
'time_D_Sys':'@time_to_detection_Sys@',
'Ksys_D':'(/ 1 time_D_Sys)',
'Kh1_D':'(/ fraction_hospitalized (- time_to_hospitalization time_D_Sys))',
'Kh2_D':'(/ fraction_critical (- time_to_hospitalization time_D_Sys) )',
'Kh3_D':'(/ fraction_dead (- time_to_hospitalization time_D_Sys))',
'time_D_Sym':'@time_to_detection_Sym@',
'Ksym_D':'(/ 1 time_D_Sym)',
'Kr_m_D':'(/ 1 (- recovery_time_mild time_D_Sym ))',
'time_D_As':'@time_to_detection_As@',
'Kl_D':'(/ 1 time_D_As)',
'Kr_a_D':'(/ 1 (- recovery_time_asymp time_D_As ))'
}
if self.expandModel == "SymSys":
param_dic_expand = param_dic_SymSys
elif self.expandModel == "uniform":
param_dic_expand = param_dic_uniform
elif self.expandModel == "AsSymSys":
param_dic_expand = param_dic_AsSymSys
else:
param_dic_expand = param_dic_base
calculated_params_str = ''.join([f'(param {key} {param_dic[key]})\n' for key in list(param_dic.keys())])
calculated_params_expand_str = ''.join([f'(param {key} {param_dic_expand[key]})\n' for key in list(param_dic_expand.keys())])
params_str = yaml_sampled_param_str + calculated_params_str + calculated_params_expand_str
if 'vaccine' in self.add_interventions:
#custom_param_vacc = ['fraction_symptomatic_V', 'fraction_severe_V']
custom_param_vacc_str = '(param fraction_symptomatic_V (* fraction_symptomatic @reduced_fraction_Sym@))\n' \
'(param fraction_severe_V (* fraction_severe @reduced_fraction_Sys@))\n'
param_symptoms_dic_V = {'KlV ': '(/ (- 1 fraction_symptomatic_V ) time_to_infectious)',
'KsV ': '(/ fraction_symptomatic_V time_to_infectious)',
'KsysV ': '(* fraction_severe_V (/ 1 time_to_symptoms))',
'KsymV ': '(* (- 1 fraction_severe_V ) (/ 1 time_to_symptoms))'
}
param_symptoms_str_V = ''.join([f'(param {key} {param_symptoms_dic_V[key]})\n' for key in list(param_symptoms_dic_V.keys())])
params_str = params_str + custom_param_vacc_str + param_symptoms_str_V
return params_str
def write_migration_param(self):
x1 = range(1, len(self.grpList) + 1)
x2 = range(1, len(self.grpList) + 1)
param_str = ""
for x1_i in x1:
param_str = param_str + "\n"
for x2_i in x2:
# x1_i=1
param_str = param_str + f'(param toEMS_{x1_i}_from_EMS_{x2_i} @toEMS_{x1_i}_from_EMS_{x2_i}@)\n'
return param_str
def write_travel_reaction(grp, travelspeciesList=None):
x1_i = int(grp.split("_")[1])
x2 = list(range(1, 12))
x2 = [i for i in x2 if i != x1_i]
reaction_str = ""
if travelspeciesList == None:
travelspeciesList = ["S", "E", "As", "P"]
for travelspecies in travelspeciesList:
reaction_str = reaction_str + "\n"
for x2_i in x2:
# x1_i=1
reaction_str = reaction_str + f'\n(reaction {travelspecies}_travel_EMS_{x2_i}to{x1_i} ' \
f'({travelspecies}::EMS_{x2_i}) ({travelspecies}::EMS_{x1_i}) ' \
f'(* {travelspecies}::EMS_{x2_i} toEMS_{x1_i}_from_EMS_{x2_i} ' \
f'(/ N_EMS_{x2_i} ' \
f'(+ S::EMS_{x2_i} E::EMS_{x2_i} As::EMS_{x2_i} P::EMS_{x2_i} recovered_EMS_{x2_i})' \
f')))\n'
return reaction_str
def write_Ki_timevents(grp):
grp = str(grp)
grpout = covidModel.sub(grp)
params_str = f'(param Ki_{grp} @Ki_{grp}@)\n' \
f'(observe Ki_t_{grpout} Ki_{grp})\n' \
f'(time-event time_infection_import @time_infection_import_{grp}@ ' \
f'(' \
f'(As::{grp} @initialAs_{grp}@) ' \
f'(S::{grp} (- S::{grp} @initialAs_{grp}@))' \
f')' \
f')\n'
return params_str
def write_N_population(self):
stringAll = ""
for grp in self.grpList:
string1 = f'(param N_{grp} (+ @speciesS_{grp}@ @initialAs_{grp}@))\n'
stringAll = stringAll + string1
string2 = f'(param N_All (+ {covidModel.repeat_string_by_grp("N_", self.grpList)}))\n'
string3 = '(observe N_All N_All)\n'
stringAll = stringAll + string2 + string3
return stringAll
def repeat_string_by_grp(fixedstring, grpList):
stringAll = ""
for grp in grpList:
temp_string = " " + fixedstring + grp
stringAll = stringAll + temp_string
return stringAll
def write_observe_All(self):
grpList = self.grpList
if "vaccine" in self.add_interventions:
observe_channels_All_str = f"(observe vaccinated_cumul_All (+ " + covidModel.repeat_string_by_grp('vaccinated_cumul_',grpList) + "))\n"
channels = covidModel.get_channels(self)
channels = channels[:int(len(channels) / 2)]
for channel in channels:
if channel == 'crit':
channel = 'critical'
if channel == 'hosp':
channel = 'hospitalized'
if channel == "susceptible":
temp_str = f"(observe {channel}_All " \
f"(+ " +\
covidModel.repeat_string_by_grp('S::', grpList) + \
covidModel.repeat_string_by_grp('S_V::', grpList) + \
"))\n"
elif channel == "deaths_det":
temp_str = f"(observe {channel}_All (+ " + \
covidModel.repeat_string_by_grp('D3_det3::', grpList) + \
covidModel.repeat_string_by_grp('D3_det3_V::', grpList) + \
"))\n"
elif channel == "exposed":
temp_str = f"(observe {channel}_All (+ " + \
covidModel.repeat_string_by_grp('E::', grpList) + \
covidModel.repeat_string_by_grp('E_V::', grpList) + \
"))\n"
elif channel == "asymp_det":
temp_str = f"(observe {channel}_All (+ " +\
covidModel.repeat_string_by_grp('As_det1::', grpList) + \
covidModel.repeat_string_by_grp('As_det1_V::', grpList) + \
"))\n"
elif channel == "presymp":
temp_str = f"(observe {channel}_All (+ " + \
covidModel.repeat_string_by_grp('P::', grpList) + \
covidModel.repeat_string_by_grp('P_V::', grpList) + \
"))\n"
elif channel == "presymp_det":
temp_str = f"(observe {channel}_All (+ " + \
covidModel.repeat_string_by_grp('P_det::',grpList) + \
covidModel.repeat_string_by_grp('P_det_V::', grpList) + \
"))\n"
else:
temp_str = f"(observe {channel}_All (+ " + \
covidModel.repeat_string_by_grp(f'{channel}_', grpList) + \
covidModel.repeat_string_by_grp(f'{channel}_V_', grpList) + \
"))\n"
observe_channels_All_str = observe_channels_All_str + temp_str
del temp_str
channels = covidModel.get_channels(self)
channels = channels[int(len(channels) / 2):]
for channel in channels:
if channel == 'crit_V':
channel = 'critical_V'
if channel == 'hosp_V':
channel = 'hospitalized_V'
if channel == "susceptible_V":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('S_V::',grpList) + "))\n"
elif channel == "deaths_det_V":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('D3_det3_V::', grpList) + "))\n"
elif channel == "exposed_V":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('E_V::',grpList) + "))\n"
elif channel == "asymp_det_V":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('As_det1_V::', grpList) + "))\n"
elif channel == "presymp_V":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('P_V::', grpList) + "))\n"
elif channel == "presymp_det_V":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('P_det_V::', grpList) + "))\n"
else:
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp(f'{channel}_', grpList) + "))\n"
observe_channels_All_str = observe_channels_All_str + temp_str
del temp_str
else:
observe_channels_All_str = ""
channels = covidModel.get_channels(self)
for channel in channels :
if channel == 'crit':
channel = 'critical'
if channel == 'hosp':
channel = 'hospitalized'
if channel == "susceptible":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('S::', grpList) + "))\n"
elif channel == "deaths_det":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('D3_det3::', grpList) + "))\n"
elif channel == "exposed":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('E::', grpList) + "))\n"
elif channel == "asymp_det":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('As_det1::', grpList) + "))\n"
elif channel == "presymp":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('P::',grpList) + "))\n"
elif channel == "presymp_det":
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp('P_det::', grpList) + "))\n"
else:
temp_str = f"(observe {channel}_All (+ " + covidModel.repeat_string_by_grp(f'{channel}_', grpList) + "))\n"
observe_channels_All_str = observe_channels_All_str + temp_str
del temp_str
return observe_channels_All_str
def write_reactions(self, grp):
grp = str(grp)
reaction_str_I = f'\n(reaction exposure_{grp} ' \
f'(S::{grp}) (E::{grp}) ' \
f'(* Ki_{grp} S::{grp} ' \
f'(/ ' \
f'(+ infectious_undet_symp_{grp} ' \
f'(* infectious_undet_As_{grp} reduced_infectious_As ) ' \
f'(* infectious_det_symp_{grp} reduced_inf_of_det_cases) ' \
f'(* infectious_det_AsP_{grp} reduced_inf_of_det_cases)' \
f') N_{grp} )' \
f'))\n'
reaction_str_Ia = f'\n(reaction exposure_{grp} ' \
f'(S::{grp}) (E::{grp}) ' \
f'(* Ki_{grp} S::{grp} ' \
f'(/ ' \
f'(+ infectious_undet_symp_{grp}' \
f'(* (+ infectious_undet_symp_V_{grp} infectious_undet_As_V_{grp} ) reduced_infectious_V ) ' \
f'(* infectious_undet_As_{grp} reduced_infectious_As ) ' \
f'(* infectious_det_symp_{grp} reduced_inf_of_det_cases) ' \
f'(* infectious_det_AsP_{grp} reduced_inf_of_det_cases)' \
f'(* infectious_det_symp_V_{grp} reduced_infectious_V reduced_inf_of_det_cases) ' \
f'(* infectious_det_AsP_V_{grp} reduced_infectious_V reduced_inf_of_det_cases)' \
f') N_{grp} )' \
f'))\n'
reaction_str_Ib = f'\n(reaction exposure_{grp} ' \
f'(S_V::{grp}) (E_V::{grp}) ' \
f'(* Ki_{grp} S_V::{grp} ' \
f'(/ ' \
f'(+ infectious_undet_symp_{grp}' \
f'(* (+ infectious_undet_symp_V_{grp} infectious_undet_As_V_{grp} ) reduced_infectious_V ) ' \
f'(* infectious_undet_As_{grp} reduced_infectious_As ) ' \
f'(* infectious_det_symp_{grp} reduced_inf_of_det_cases) ' \
f'(* infectious_det_AsP_{grp} reduced_inf_of_det_cases)' \
f'(* infectious_det_symp_V_{grp} reduced_infectious_V reduced_inf_of_det_cases) ' \
f'(* infectious_det_AsP_V_{grp} reduced_infectious_V reduced_inf_of_det_cases)' \
f') N_{grp} )' \
f'))\n'
if 'vaccine' in self.add_interventions:
reaction_str_I = f'(reaction vaccination_{grp} (S::{grp}) (S_V::{grp}) (* Kv_{grp} S::{grp}))\n'
reaction_str_I = reaction_str_I + reaction_str_Ia + reaction_str_Ib
reaction_str_III = f'(reaction recovery_H1_{grp} (H1::{grp}) (RH1::{grp}) (* Kr_h{grp} H1::{grp}))\n' \
f'(reaction recovery_C2_{grp} (C2::{grp}) (H2post::{grp}) (* Kr_c{grp} C2::{grp}))\n' \
f'(reaction recovery_H2post_{grp} (H2post::{grp}) (RC2::{grp}) (* Kr_hc H2post::{grp}))\n' \
f'(reaction recovery_H1_det3_{grp} (H1_det3::{grp}) (RH1_det3::{grp}) (* Kr_h{grp} H1_det3::{grp}))\n' \
f'(reaction recovery_C2_det3_{grp} (C2_det3::{grp}) (H2post_det3::{grp}) (* Kr_c{grp} C2_det3::{grp}))\n' \
f'(reaction recovery_H2post_det3_{grp} (H2post_det3::{grp}) (RC2_det3::{grp}) (* Kr_hc H2post_det3::{grp}))\n'
expand_base_str = f'(reaction infection_asymp_undet_{grp} (E::{grp}) (As::{grp}) (* Kl E::{grp} (- 1 d_As)))\n' \
f'(reaction infection_asymp_det_{grp} (E::{grp}) (As_det1::{grp}) (* Kl E::{grp} d_As))\n' \
f'(reaction presymptomatic_{grp} (E::{grp}) (P::{grp}) (* Ks E::{grp} (- 1 d_P)))\n' \
f'(reaction presymptomatic_{grp} (E::{grp}) (P_det::{grp}) (* Ks E::{grp} d_P))\n' \
f'(reaction mild_symptomatic_undet_{grp} (P::{grp}) (Sym::{grp}) (* Ksym P::{grp} (- 1 d_Sym)))\n' \
f'(reaction mild_symptomatic_det_{grp} (P::{grp}) (Sym_det2::{grp}) (* Ksym P::{grp} d_Sym))\n' \
f'(reaction severe_symptomatic_undet_{grp} (P::{grp}) (Sys::{grp}) (* Ksys P::{grp} (- 1 d_Sys)))\n' \
f'(reaction severe_symptomatic_det_{grp} (P::{grp}) (Sys_det3::{grp}) (* Ksys P::{grp} d_Sys))\n' \
f'(reaction mild_symptomatic_det_{grp} (P_det::{grp}) (Sym_det2::{grp}) (* Ksym P_det::{grp}))\n' \
f'(reaction severe_symptomatic_det_{grp} (P_det::{grp}) (Sys_det3::{grp}) (* Ksys P_det::{grp} ))\n' \
f'(reaction hospitalization_1_{grp} (Sys::{grp}) (H1::{grp}) (* Kh1 Sys::{grp}))\n' \
f'(reaction hospitalization_2_{grp} (Sys::{grp}) (H2pre::{grp}) (* Kh2 Sys::{grp}))\n' \
f'(reaction hospitalization_3_{grp} (Sys::{grp}) (H3::{grp}) (* Kh3 Sys::{grp}))\n' \
f'(reaction critical_2_{grp} (H2pre::{grp}) (C2::{grp}) (* Kc H2pre::{grp}))\n' \
f'(reaction critical_3_{grp} (H3::{grp}) (C3::{grp}) (* Kc H3::{grp}))\n' \
f'(reaction deaths_{grp} (C3::{grp}) (D3::{grp}) (* Km C3::{grp}))\n' \
f'(reaction hospitalization_1_det_{grp} (Sys_det3::{grp}) (H1_det3::{grp}) (* Kh1 Sys_det3::{grp}))\n' \
f'(reaction hospitalization_2_det_{grp} (Sys_det3::{grp}) (H2pre_det3::{grp}) (* Kh2 Sys_det3::{grp}))\n' \
f'(reaction hospitalization_3_det_{grp} (Sys_det3::{grp}) (H3_det3::{grp}) (* Kh3 Sys_det3::{grp}))\n' \
f'(reaction critical_2_det2_{grp} (H2pre_det3::{grp}) (C2_det3::{grp}) (* Kc H2pre_det3::{grp}))\n' \
f'(reaction critical_3_det2_{grp} (H3_det3::{grp}) (C3_det3::{grp}) (* Kc H3_det3::{grp}))\n' \
f'(reaction deaths_det3_{grp} (C3_det3::{grp}) (D3_det3::{grp}) (* Km C3_det3::{grp}))\n' \
f'(reaction recovery_As_{grp} (As::{grp}) (RAs::{grp}) (* Kr_a As::{grp}))\n' \
f'(reaction recovery_As_det_{grp} (As_det1::{grp}) (RAs_det1::{grp}) (* Kr_a As_det1::{grp}))\n' \
f'(reaction recovery_Sym_{grp} (Sym::{grp}) (RSym::{grp}) (* Kr_m Sym::{grp}))\n' \
f'(reaction recovery_Sym_det2_{grp} (Sym_det2::{grp}) (RSym_det2::{grp}) (* Kr_m Sym_det2::{grp}))\n'
expand_testDelay_SymSys_str = f'(reaction infection_asymp_undet_{grp} (E::{grp}) (As::{grp}) (* Kl E::{grp} (- 1 d_As)))\n' \
f'(reaction infection_asymp_det_{grp} (E::{grp}) (As_det1::{grp}) (* Kl E::{grp} d_As))\n' \
f'(reaction presymptomatic_{grp} (E::{grp}) (P::{grp}) (* Ks E::{grp}))\n' \
f'; developing symptoms - same time to symptoms as in master emodl\n' \
f'(reaction mild_symptomatic_{grp} (P::{grp}) (Sym_preD::{grp}) (* Ksym P::{grp}))\n' \
f'(reaction severe_symptomatic_{grp} (P::{grp}) (Sys_preD::{grp}) (* Ksys P::{grp}))\n' \
f'; never detected \n' \
f'(reaction mild_symptomatic_undet_{grp} (Sym_preD::{grp}) (Sym::{grp}) (* Ksym_D Sym_preD::{grp} (- 1 d_Sym)))\n' \
f'(reaction severe_symptomatic_undet_{grp} (Sys_preD::{grp}) (Sys::{grp}) (* Ksys_D Sys_preD::{grp} (- 1 d_Sys)))\n' \
f'; new detections - time to detection is substracted from hospital time\n' \
f'(reaction mild_symptomatic_det_{grp} (Sym_preD::{grp}) (Sym_det2::{grp}) (* Ksym_D Sym_preD::{grp} d_Sym))\n' \
f'(reaction severe_symptomatic_det_{grp} (Sys_preD::{grp}) (Sys_det3::{grp}) (* Ksys_D Sys_preD::{grp} d_Sys))\n' \
f'(reaction hospitalization_1_{grp} (Sys::{grp}) (H1::{grp}) (* Kh1_D Sys::{grp}))\n' \
f'(reaction hospitalization_2_{grp} (Sys::{grp}) (H2pre::{grp}) (* Kh2_D Sys::{grp}))\n' \
f'(reaction hospitalization_3_{grp} (Sys::{grp}) (H3::{grp}) (* Kh3_D Sys::{grp}))\n' \
f'(reaction critical_2_{grp} (H2pre::{grp}) (C2::{grp}) (* Kc H2pre::{grp}))\n' \
f'(reaction critical_3_{grp} (H3::{grp}) (C3::{grp}) (* Kc H3::{grp}))\n' \
f'(reaction deaths_{grp} (C3::{grp}) (D3::{grp}) (* Km C3::{grp}))\n' \
f'(reaction hospitalization_1_det_{grp} (Sys_det3::{grp}) (H1_det3::{grp}) (* Kh1_D Sys_det3::{grp}))\n' \
f'(reaction hospitalization_2_det_{grp} (Sys_det3::{grp}) (H2pre_det3::{grp}) (* Kh2_D Sys_det3::{grp}))\n' \
f'(reaction hospitalization_3_det_{grp} (Sys_det3::{grp}) (H3_det3::{grp}) (* Kh3_D Sys_det3::{grp}))\n' \
f'(reaction critical_2_det2_{grp} (H2pre_det3::{grp}) (C2_det3::{grp}) (* Kc H2pre_det3::{grp}))\n' \
f'(reaction critical_3_det2_{grp} (H3_det3::{grp}) (C3_det3::{grp}) (* Kc H3_det3::{grp}))\n' \
f'(reaction deaths_det3_{grp} (C3_det3::{grp}) (D3_det3::{grp}) (* Km C3_det3::{grp}))\n' \
f'(reaction recovery_As_{grp} (As::{grp}) (RAs::{grp}) (* Kr_a As::{grp}))\n' \
f'(reaction recovery_As_det_{grp} (As_det1::{grp}) (RAs_det1::{grp}) (* Kr_a As_det1::{grp}))\n' \
f'(reaction recovery_Sym_{grp} (Sym::{grp}) (RSym::{grp}) (* Kr_m_D Sym::{grp}))\n' \
f'(reaction recovery_Sym_det2_{grp} (Sym_det2::{grp}) (RSym_det2::{grp}) (* Kr_m_D Sym_det2::{grp}))\n'
expand_testDelay_AsSymSys_str = f'(reaction infection_asymp_det_{grp} (E::{grp}) (As_preD::{grp}) (* Kl E::{grp}))\n' \
f'(reaction infection_asymp_undet_{grp} (As_preD::{grp}) (As::{grp}) (* Kl_D As_preD::{grp} (- 1 d_As)))\n' \
f'(reaction infection_asymp_det_{grp} (As_preD::{grp}) (As_det1::{grp}) (* Kl_D As_preD::{grp} d_As))\n' \
f'(reaction presymptomatic_{grp} (E::{grp}) (P::{grp}) (* Ks E::{grp} (- 1 d_P)))\n' \
f'(reaction presymptomatic_{grp} (E::{grp}) (P_det::{grp}) (* Ks E::{grp} d_P))\n' \
f'; developing symptoms - same time to symptoms as in master emodl\n' \
f'(reaction mild_symptomatic_{grp} (P::{grp}) (Sym_preD::{grp}) (* Ksym P::{grp}))\n' \
f'(reaction severe_symptomatic_{grp} (P::{grp}) (Sys_preD::{grp}) (* Ksys P::{grp}))\n' \
f'; never detected\n' \
f'(reaction mild_symptomatic_undet_{grp} (Sym_preD::{grp}) (Sym::{grp}) (* Ksym_D Sym_preD::{grp} (- 1 d_Sym)))\n' \
f'(reaction severe_symptomatic_undet_{grp} (Sys_preD::{grp}) (Sys::{grp}) (* Ksys_D Sys_preD::{grp} (- 1 d_Sys)))\n' \
f'; new detections - time to detection is subtracted from hospital time\n' \
f'(reaction mild_symptomatic_det_{grp} (Sym_preD::{grp}) (Sym_det2a::{grp}) (* Ksym_D Sym_preD::{grp} d_Sym))\n' \
f'(reaction severe_symptomatic_det_{grp} (Sys_preD::{grp}) (Sys_det3a::{grp}) (* Ksys_D Sys_preD::{grp} d_Sys))\n' \
f'; developing symptoms - already detected, same time to symptoms as in master emodl\n' \
f'(reaction mild_symptomatic_det_{grp} (P_det::{grp}) (Sym_det2b::{grp}) (* Ksym P_det::{grp}))\n' \
f'(reaction severe_symptomatic_det_{grp} (P_det::{grp}) (Sys_det3b::{grp}) (* Ksys P_det::{grp} ))\n' \
f'(reaction hospitalization_1_{grp} (Sys::{grp}) (H1::{grp}) (* Kh1_D Sys::{grp}))\n' \
f'(reaction hospitalization_2_{grp} (Sys::{grp}) (H2pre::{grp}) (* Kh2_D Sys::{grp}))\n' \
f'(reaction hospitalization_3_{grp} (Sys::{grp}) (H3::{grp}) (* Kh3_D Sys::{grp}))\n' \
f'(reaction critical_2_{grp} (H2pre::{grp}) (C2::{grp}) (* Kc H2pre::{grp}))\n' \
f'(reaction critical_3_{grp} (H3::{grp}) (C3::{grp}) (* Kc H3::{grp}))\n' \
f'(reaction deaths_{grp} (C3::{grp}) (D3::{grp}) (* Km C3::{grp}))\n' \
f'(reaction hospitalization_1_det_{grp} (Sys_det3a::{grp}) (H1_det3::{grp}) (* Kh1_D Sys_det3a::{grp}))\n' \
f'(reaction hospitalization_2_det_{grp} (Sys_det3a::{grp}) (H2pre_det3::{grp}) (* Kh2_D Sys_det3a::{grp}))\n' \
f'(reaction hospitalization_3_det_{grp} (Sys_det3a::{grp}) (H3_det3::{grp}) (* Kh3_D Sys_det3a::{grp}))\n' \
f'(reaction hospitalization_1_det_{grp} (Sys_det3b::{grp}) (H1_det3::{grp}) (* Kh1 Sys_det3b::{grp}))\n' \
f'(reaction hospitalization_2pre_det_{grp} (Sys_det3b::{grp}) (H2pre_det3::{grp}) (* Kh2 Sys_det3b::{grp}))\n' \
f'(reaction hospitalization_3_det_{grp} (Sys_det3b::{grp}) (H3_det3::{grp}) (* Kh3 Sys_det3b::{grp}))\n' \
f'(reaction critical_2_det2_{grp} (H2pre_det3::{grp}) (C2_det3::{grp}) (* Kc H2pre_det3::{grp}))\n' \
f'(reaction critical_3_det2_{grp} (H3_det3::{grp}) (C3_det3::{grp}) (* Kc H3_det3::{grp}))\n' \
f'(reaction deaths_det3_{grp} (C3_det3::{grp}) (D3_det3::{grp}) (* Km C3_det3::{grp}))\n' \
f'(reaction recovery_As_{grp} (As::{grp}) (RAs::{grp}) (* Kr_a_D As::{grp}))\n' \
f'(reaction recovery_As_det_{grp} (As_det1::{grp}) (RAs_det1::{grp}) (* Kr_a_D As_det1::{grp}))\n' \
f'(reaction recovery_Sym_{grp} (Sym::{grp}) (RSym::{grp}) (* Kr_m_D Sym::{grp}))\n' \
f'(reaction recovery_Sym_det2a_{grp} (Sym_det2a::{grp}) (RSym_det2::{grp}) (* Kr_m_D Sym_det2a::{grp}))\n' \
f'(reaction recovery_Sym_det2b_{grp} (Sym_det2b::{grp}) (RSym_det2::{grp}) (* Kr_m Sym_det2b::{grp}))\n'
if self.expandModel == None:
reaction_str = expand_base_str + reaction_str_III
if self.expandModel == "SymSys" or self.expandModel == "uniform":
reaction_str = expand_testDelay_SymSys_str + reaction_str_III
if self.expandModel == 'AsSymSys':
reaction_str = expand_testDelay_AsSymSys_str + reaction_str_III
if 'vaccine' in self.add_interventions:
reaction_str_V = reaction_str.replace(f'_{grp}',f'_V_{grp}')
reaction_str_V = reaction_str_V.replace(f'::{grp}', f'_V::{grp}')
reaction_str = reaction_str + reaction_str_V
"""Custom adjustments - not automated/integrated yet"""
reaction_str = reaction_str.replace('_V_V', '_V')
reaction_str = reaction_str.replace('Ki_V', 'Ki')
reaction_str = reaction_str.replace('N_V', 'N')
"""Vaccinated-population specific parameters"""
reaction_str = reaction_str.replace('Kl E_V::', 'KlV E_V::')
reaction_str = reaction_str.replace('Ks E_V::', 'KsV E_V::')
reaction_str = reaction_str.replace('Ksym P_V::', 'KsymV P_V::')
reaction_str = reaction_str.replace('Ksys P_V::', 'KsysV P_V::')
reaction_str = reaction_str.replace('Ksym P_det_V::', 'KsymV P_det_V::')
reaction_str = reaction_str.replace('Ksys P_det_V::', 'KsysV P_det_V::')
reaction_str = reaction_str_I + reaction_str
return reaction_str
def write_time_varying_parameter(self, total_string):
"""Time varying parameter that have been fitted to data, or informed by local data.
Parameters and corresponding sub-functions:
- fraction_critical: `write_frac_crit_change`
- fraction_dead: `write_fraction_dead_change`
- dSys: `write_dSys_change`
- d_Sym: `write_d_Sym_P_As_change`
- dP_As: `write_d_Sym_P_As_change`
- Ki (monthly multipliers): `write_ki_multiplier_change`
- recovery_time_crit: `write_recovery_time_crit_change`
- recovery_time_hosp: `write_recovery_time_hosp_change`
All functions take required argument: nchanges, that defines number of updates.
The default has been set within the function and currently would need to be edited manually.
"""
def write_frac_crit_change(nchanges):
n_frac_crit_change = range(1, nchanges+1)
frac_crit_change_observe = '(observe fraction_severe_t fraction_severe)\n(observe frac_crit_t fraction_critical)\n'
frac_crit_change_timeevent = ''.join([f'(time-event frac_crit_adjust{i} @crit_time_{i}@ '
f'('
f'(fraction_critical @fraction_critical_change{i}@) '
f'(fraction_hospitalized (- 1 (+ fraction_critical fraction_dead))) '
f'(Kh1 (/ fraction_hospitalized time_to_hospitalization)) '
f'(Kh2 (/ fraction_critical time_to_hospitalization )) '
f'(Kh1_D (/ fraction_hospitalized (- time_to_hospitalization time_D_Sys))) '
f'(Kh2_D (/ fraction_critical (- time_to_hospitalization time_D_Sys)))'
f')'
f')'
f'\n' for i in n_frac_crit_change])
return frac_crit_change_observe + frac_crit_change_timeevent
def write_fraction_dead_change(nchanges):
n_fraction_dead_change = range(1, nchanges+1)
fraction_dead_change_observe = '(observe fraction_dead_t fraction_dead)\n' \
'(observe fraction_hospitalized_t fraction_hospitalized)\n'
fraction_dead_change_timeevent = ''.join([f'(time-event fraction_dead_adjust2 @fraction_dead_time_{i}@ '
f'('
f'(fraction_dead @fraction_dead_change{i}@) '
f'(fraction_hospitalized (- 1 (+ fraction_critical fraction_dead))) '
f'(Kh1 (/ fraction_hospitalized time_to_hospitalization)) '
f'(Kh2 (/ fraction_critical time_to_hospitalization )) '
f'(Kh1_D (/ fraction_hospitalized (- time_to_hospitalization time_D_Sys))) '
f'(Kh2_D (/ fraction_critical (- time_to_hospitalization time_D_Sys)))'
f')'
f')'
f' \n' for i in n_fraction_dead_change])
return fraction_dead_change_observe + fraction_dead_change_timeevent
def write_dSys_change(nchanges):
n_dSys_change = range(1, nchanges+1)
dSys_change_observe = '(observe d_Sys_t d_Sys)\n'
dSys_change_timeevent = ''.join([f'(time-event dSys_change{i} @d_Sys_change_time_{i}@ '
f'((d_Sys @d_Sys_incr{i}@))'
f')'
f'\n' for i in n_dSys_change])
return dSys_change_observe + dSys_change_timeevent
def write_ki_multiplier_change(nchanges,fit_param):
n_ki_multiplier = ['3a','3b','3c'] + list(range(4, nchanges+1))
ki_multiplier_change_str = ''
for grp in self.grpList:
temp_str_param = ''.join([f'(param Ki_red{i}_{grp} '
f'(* Ki_{grp} @ki_multiplier_{i}_{grp}@)'
f')'
f'\n' for i in n_ki_multiplier])
temp_str_timeevent = ''.join([f'(time-event ki_multiplier_change_{i} @ki_multiplier_time_{i}@ '
f'((Ki_{grp} Ki_red{i}_{grp}))'
f')'
f'\n' for i in n_ki_multiplier])
if 'ki_multiplier' in fit_param:
i = fit_param.split('_')[-1]
temp_str_param = temp_str_param.replace(f'@ki_multiplier_{i}_{grp}@', f'(* @ki_multiplier_{i}_{grp}@ @scalingfactor@)')
ki_multiplier_change_str = ki_multiplier_change_str + temp_str_param + temp_str_timeevent
return ki_multiplier_change_str
def write_d_Sym_P_As_change(nchanges):
d_Sym_P_As_change_observe = '(observe d_Sym_t d_Sym)\n' \
'(observe d_P_t d_P)\n' \
'(observe d_As_t d_As)\n'
n_d_PAs_changes = range(1,nchanges+1)
d_Sym_P_As_change_param = ''.join([f'(param d_PAs_change{i} '
f'(/ @d_Sym_change{i}@ dSym_dAsP_ratio)'
f')'
f'\n' for i in n_d_PAs_changes])
d_Sym_P_As_change_timeevent = ''.join([f'(time-event d_Sym_change{i} @d_Sym_change_time_{i}@ '
f'('
f'(d_Sym @d_Sym_change{i}@) ' \
f'(d_P d_PAs_change1) ' \
f'(d_As d_PAs_change{i}))'
f')'
f'\n' for i in n_d_PAs_changes])
return d_Sym_P_As_change_observe + d_Sym_P_As_change_param + d_Sym_P_As_change_timeevent
def write_recovery_time_crit_change(nchanges):
n_recovery_time_crit_change = range(1,nchanges+1)
recovery_time_crit_change = ''
for grp in self.grpList:
grpout = covidModel.sub(grp)
recovery_time_crit_change_param = f'(param recovery_time_crit_{grp} recovery_time_crit)\n' \
f'(param Kr_c{grp} (/ 1 recovery_time_crit_{grp}))\n' \
f'(observe recovery_time_crit_t_{grpout} recovery_time_crit_{grp})' \
f'\n'
recovery_time_crit_change_timeevent = ''.join([f'(time-event LOS_ICU_change_{i} @recovery_time_crit_change_time_{i}_{grp}@ '
f'('
f'(recovery_time_crit_{grp} @recovery_time_crit_change{i}_{grp}@) '
f'(Kr_c{grp} '
f'(/ 1 @recovery_time_crit_change{i}_{grp}@))'
f')'
f')'
f'\n' for i in n_recovery_time_crit_change])
recovery_time_crit_change = recovery_time_crit_change + \
recovery_time_crit_change_param + \
recovery_time_crit_change_timeevent
return recovery_time_crit_change
def write_recovery_time_hosp_change(nchanges):
n_recovery_time_hosp_change = range(1, nchanges + 1)
recovery_time_hosp_change = ''
for grp in self.grpList:
grpout = covidModel.sub(grp)
recovery_time_hosp_change_param = f'(param recovery_time_hosp_{grp} recovery_time_hosp)\n' \
f'(param Kr_h{grp} (/ 1 recovery_time_hosp_{grp}))\n' \
f'(observe recovery_time_hosp_t_{grpout} recovery_time_hosp_{grp})' \
f'\n'
recovery_time_hosp_change_timeevent = ''.join(
[f'(time-event LOS_nonICU_change_{i} @recovery_time_hosp_change_time_{i}_{grp}@ '
f'('
f'(recovery_time_hosp_{grp} @recovery_time_hosp_change{i}_{grp}@) '
f'(Kr_h{grp} (/ 1 @recovery_time_hosp_change{i}_{grp}@))'
f')'
f')'
f'\n' for i in n_recovery_time_hosp_change])
recovery_time_hosp_change = recovery_time_hosp_change + recovery_time_hosp_change_param + recovery_time_hosp_change_timeevent
return recovery_time_hosp_change
config_dic = covidModel.get_configs(key ='time_varying_parameter', config_file='intervention_emodl_config.yaml')
param_update_string = write_ki_multiplier_change(nchanges=config_dic['n_ki_multiplier'], fit_param = self.fit_param) + \
write_dSys_change(nchanges=config_dic['n_dSys_change']) + \
write_d_Sym_P_As_change(nchanges=config_dic['n_d_Sym_P_As_change']) + \
write_frac_crit_change(nchanges=config_dic['n_frac_crit_change']) + \
write_fraction_dead_change(nchanges=config_dic['n_fraction_dead_change']) + \
write_recovery_time_crit_change(nchanges=config_dic['n_recovery_time_crit_change']) + \
write_recovery_time_hosp_change(nchanges=config_dic['n_recovery_time_hosp_change'])
total_string = total_string.replace(';[TIMEVARYING_PARAMETERS]', param_update_string)
return total_string
def get_intervention_dates(intervention_param,scen):
"""intervention dates"""
n_gradual_steps = intervention_param['n_gradual_steps']
config_dic_dates = covidModel.get_configs(key ='time_parameters', config_file='extendedcobey_200428.yaml') #FIXME args.masterconfig
#FIXME more flexible read in and return of dates for any number of scenarios
#for i in range(1,nscenarios)
intervention_start = pd.to_datetime(config_dic_dates[f'{scen}_start']['function_kwargs']['dates'])
intervention_scaleupend = pd.to_datetime(config_dic_dates[f'{scen}_scaleupend']['function_kwargs']['dates'])
#intervention_end = pd.to_datetime(config_dic_dates[f'{scen}_end']['function_kwargs']['dates'])
if n_gradual_steps > 1 and intervention_scaleupend < pd.Timestamp('2090-01-01') :
date_freq = (intervention_scaleupend - intervention_start) /(n_gradual_steps-1)
intervention_dates = pd.date_range(start=intervention_start,end=intervention_scaleupend, freq=date_freq).tolist()
else:
n_gradual_steps = 1
intervention_dates = [intervention_start]
return n_gradual_steps, intervention_dates
def write_interventions(self, total_string):
""" Write interventions
Interventions defined in sub-functions:
- bvariant: `write_bvariant`
- intervention_stop: `write_intervention_stop`
- transmission_increase: `write_transmission_increase`
- rollback: `write_rollback`
- gradual_reopening: `write_gradual_reopening`
"""
""" Get intervention configurations """
intervention_param = covidModel.get_configs(key ='interventions', config_file=self.intervention_config)
def write_vaccine_generic():
emodl_str = ';COVID-19 vaccine scenario\n'
emodl_param_initial = '(param Kv 0)\n(observe daily_vaccinated Kv)\n'
csvfile = intervention_param['vaccination_csv']
if csvfile != "":
df = pd.read_csv(os.path.join("./experiment_configs", 'input_csv', csvfile))
intervention_dates = list(df['Date'].values)
intervention_effectsizes = list(df['daily_cov'].values)
emodl_timeevents = ''
for i, date in enumerate(intervention_dates, 1):
temp_str = f'(time-event vaccination_change{i} {covidModel.DateToTimestep(pd.Timestamp(date), self.startdate)} ((Kv {intervention_effectsizes[i-1]})))\n'
emodl_timeevents = emodl_timeevents + temp_str
else:
n_gradual_steps, intervention_dates = covidModel.get_intervention_dates(intervention_param,scen='vaccine')
emodl_timeevents = ''
for i, date in enumerate(intervention_dates, 1):
temp_str = f'(time-event vaccination_change{i} {covidModel.DateToTimestep(pd.Timestamp(date), self.startdate)} ((Kv (* @vacc_daily_cov@ {(1 / (len(intervention_dates)) * i)}) )))\n'
emodl_timeevents = emodl_timeevents + temp_str
emodl_str = emodl_str + emodl_param_initial + emodl_timeevents
return emodl_str
def write_vaccine():
emodl_str = ';COVID-19 vaccine scenario\n'
csvfile = intervention_param['vaccination_csv']
df = pd.read_csv(os.path.join("./experiment_configs", 'input_csv', csvfile))
df['Date'] = pd.to_datetime(df['date'])
emodl_str_grp = ""
for grp in self.grpList:
grp_num = grp.replace('EMS_','')
df_grp = df[df['covid_region']==int(grp_num)]
emodl_param_initial = f'(param Kv_{grp} 0)\n' \
f'(observe n_daily_vaccinated_{grp} (* Kv_{grp} S::{grp} ))\n'
intervention_dates = list(df_grp['Date'].values) + [max(df_grp['Date']) + pd.Timedelta(1,'days')]
intervention_effectsizes = list(df_grp['daily_first_vacc_perc'].values) + [0]
#intervention_effectsizes = list(df_grp['daily_first_vacc'].values) + [0]
emodl_timeevents = ''
for i, date in enumerate(intervention_dates, 1):
temp_str = f'(time-event daily_vaccinations_{i} {covidModel.DateToTimestep(pd.Timestamp(date), self.startdate)} ((Kv_{grp} {intervention_effectsizes[i-1]})))\n'
emodl_timeevents = emodl_timeevents + temp_str
emodl_str_grp = emodl_str_grp + emodl_param_initial + emodl_timeevents
del df_grp
"""Adjust fraction severe"""
df = pd.read_csv(os.path.join(git_dir,"experiment_configs", 'input_csv', 'vaccination_fractionSevere_adjustment_IL.csv'))
df['Date'] = pd.to_datetime(df['date'])
intervention_dates = df['Date'].unique()
fraction_severe_notV = ''
for i, date in enumerate(intervention_dates, 1):
temp_str = f"(time-event fraction_severe_changeV_{i} {covidModel.DateToTimestep(pd.Timestamp(date), self.startdate)} (" \
f"(fraction_severe (- @fraction_severe@ (* (- @fraction_severe@ (* @fraction_severe@ reduced_fraction_Sys_notV)) {df['persons_above65_first_vaccinated_perc'][i-1]}))) " \
"(Ksys (* fraction_severe (/ 1 time_to_symptoms))) " \
"(Ksym (* (- 1 fraction_severe) (/ 1 time_to_symptoms)))))\n"
fraction_severe_notV = fraction_severe_notV + temp_str
emodl_str = fraction_severe_notV + emodl_str + emodl_str_grp
return emodl_str
def write_bvariant():
emodl_str = ';COVID-19 bvariant scenario\n'
csvfile = intervention_param['bvariant_csv']
if csvfile != "":
df = pd.read_csv(os.path.join("./experiment_configs", 'input_csv', csvfile))
intervention_dates = list(df['Date'].values)
fracinfect = list(df['variant_freq'].values)
fracinfect_timevent = ''.join([f'(time-event bvariant_fracinfect {covidModel.DateToTimestep(pd.Timestamp(date), self.startdate)} '
f'((bvariant_fracinfect {fracinfect[i - 1]})))\n'
for i, date in enumerate(intervention_dates, 1)])
emodl_timeevents = ''
for i, date in enumerate(intervention_dates, 1):
temp_str = f'(time-event ki_bvariant_change{i} {covidModel.DateToTimestep(pd.Timestamp(date), self.startdate)} ('
temp_str = temp_str + ''.join([f' (Ki_{grp} ( + Ki_{grp} (* (* Ki_{grp} 0.5) (* @bvariant_fracinfect@ {fracinfect[i - 1]} ))))' for grp in self.grpList])
temp_str = temp_str + f'))\n'
emodl_timeevents = emodl_timeevents + temp_str
else:
n_gradual_steps, intervention_dates = covidModel.get_intervention_dates(intervention_param,scen='bvariant')
fracinfect_timevent = ''.join([f'(time-event bvariant_fracinfect {covidModel.DateToTimestep(pd.Timestamp(date), self.startdate)}'
f' ((bvariant_fracinfect (* @bvariant_fracinfect@ '
f'{(1 / (len(intervention_dates)) * i)})))'
f')\n' for i, date in enumerate(intervention_dates, 1)])
emodl_param = ''.join([ f'(param Ki_bvariant_initial_{grp} 0)\n'
f'(time-event ki_bvariant_initial {covidModel.DateToTimestep(pd.Timestamp(intervention_dates[0])-pd.Timedelta(2,"days"), self.startdate)} ('
f'(Ki_bvariant_initial_{grp} Ki_{grp})'
f'))\n ' for grp in self.grpList])
emodl_timeevents = ''
for i, date in enumerate(intervention_dates, 1):
temp_str = f'(time-event ki_bvariant_change{i} {covidModel.DateToTimestep(pd.Timestamp(date), self.startdate)} ('
temp_str = temp_str + ''.join([f' (Ki_{grp} ( + Ki_bvariant_initial_{grp} (* (* Ki_bvariant_initial_{grp} @bvariant_infectivity@) (* @bvariant_fracinfect@ {(1 / (len(intervention_dates)) * i)} ))))' for grp in self.grpList])
temp_str = temp_str + f'))\n'
emodl_timeevents = emodl_timeevents + temp_str
bvariant_infectivity = emodl_param + emodl_timeevents
"""keep track of fracinfect, and use for update symptom development reactions"""
fracinfect_str = '(param bvariant_fracinfect 0)\n' \
'(observe bvariant_fracinfect_t bvariant_fracinfect)\n' + fracinfect_timevent
"""fraction severe adjustment over time"""
frac_severe_timevent = ''.join([f'(time-event fraction_severe_change{i} {covidModel.DateToTimestep(pd.Timestamp(date), self.startdate)} '
f'('
f'(fraction_severe (+ '
f'(* @fraction_severe@ (- 1 bvariant_fracinfect)) '
f'(* fraction_severeB bvariant_fracinfect ) '
f')) '
f'(Ksys ( * fraction_severe (/ 1 time_to_symptoms))) '
f'(Ksym ( * (- 1 fraction_severe)(/ 1 time_to_symptoms)))'
f')'
f')\n' for i, date in enumerate(intervention_dates, 1)])
frac_severe_str = '(param fraction_severeB (* @fraction_severe@ @bvariant_severity@))\n' + frac_severe_timevent
if 'vaccine' in self.add_interventions:
"""fraction severe adjustment over time"""
frac_severeV_timevent = ''.join([f'(time-event fraction_severe_V_change{i} {covidModel.DateToTimestep(pd.Timestamp(date), self.startdate)} '
f'('
f'(fraction_severe_V (+ '
f'(* @fraction_severe@ @reduced_fraction_Sys@ (- 1 bvariant_fracinfect)) '
f'(* fraction_severeB @reduced_fraction_Sys@ bvariant_fracinfect ) '
f')) '
f'(KsysV ( * fraction_severe_V (/ 1 time_to_symptoms))) '
f'(KsymV ( * (- 1 fraction_severe_V)(/ 1 time_to_symptoms)))'
f')'
f')\n' for i, date in enumerate(intervention_dates, 1)])
frac_severeV_str = '(observe fraction_severe_V_t fraction_severe_V)\n' + frac_severeV_timevent
frac_severe_str = frac_severe_str + frac_severeV_str
emodl_str = emodl_str + bvariant_infectivity + fracinfect_str + frac_severe_str
return emodl_str
def write_rollback():
emodl_str = ';COVID-19 rollback scenario\n'
rollback_regionspecific = intervention_param['rollback_regionspecific']
csvfile = intervention_param['rollback_csv']
if csvfile != "":
df = pd.read_csv(os.path.join("./experiment_configs", 'input_csv', csvfile))
intervention_dates = list(df['Date'].values)
perc_rollback = list(df['perc_reopen'].values)
else:
n_gradual_steps, intervention_dates = covidModel.get_intervention_dates(intervention_param,scen='rollback')
perc_rollback = ['@rollback_multiplier@' for _ in range(len(intervention_dates))]
emodl_param = ''.join([ f'(param Ki_rollback_initial_{grp} 0)\n'
f'(time-event ki_rollback_initial_ {covidModel.DateToTimestep(pd.Timestamp(intervention_dates[0])-pd.Timedelta(2,"days"), self.startdate)} ('
f'(Ki_rollback_initial_{grp} Ki_{grp})'
f'))\n ' for grp in self.grpList])
emodl_timeevents = ''
for i, date in enumerate(intervention_dates, 1):
temp_str = f'(time-event ki_rollback_change{i} {covidModel.DateToTimestep(pd.Timestamp(date), self.startdate)} ('
temp_str = temp_str + ''.join([f' (Ki_{grp} (- Ki_rollback_initial_{grp} (* {perc_rollback[i - 1]} Ki_rollback_initial_{grp})))' for grp in self.grpList ])
temp_str = temp_str + f'))\n'
emodl_timeevents = emodl_timeevents + temp_str
emodl_str = emodl_str + emodl_param + emodl_timeevents
return emodl_str
def write_triggeredrollback():
emodl_str = ';COVID-19 triggeredrollback scenario\n'
trigger_channel = intervention_param['trigger_channel']
n_gradual_steps, intervention_dates = covidModel.get_intervention_dates(intervention_param, scen='triggeredrollback')
emodl_timeevents = ''.join([f'(param time_of_trigger_{grp} 10000)\n'
f'(state-event rollbacktrigger_{grp} '
f'(and (> time {covidModel.DateToTimestep(pd.Timestamp(intervention_dates[0]),self.startdate)}) '
f'(> {trigger_channel}_{grp} (* {covidModel.get_trigger(grp,trigger_channel)} @capacity_multiplier@))'
f') '
f'((time_of_trigger_{grp} time))'
f')\n'
f'(func time_since_trigger_{grp} (- time time_of_trigger_{grp}))\n'
f'(state-event apply_rollback_{grp} '
f'(> (- time_since_trigger_{grp} @trigger_delay_days@) 0) ('
f'(Ki_{grp} (* Ki_{grp} @rollback_multiplier@)) '
f'))\n'
f'(observe triggertime_{covidModel.sub(grp)} time_of_trigger_{grp})\n' for grp in self.grpList])
emodl_str = emodl_str + emodl_timeevents
return emodl_str
def write_reopen():
emodl_str = ';COVID-19 reopen scenario\n'
reopen_regionspecific = intervention_param['reopen_regionspecific']
reopen_relative_to_initial = intervention_param['reopen_relative_to_initial']
csvfile = intervention_param['reopen_csv']
if csvfile != "":
df = pd.read_csv(os.path.join("./experiment_configs", 'input_csv', csvfile))
intervention_dates = list(df['Date'].values)
perc_reopen = list(df['perc_reopen'].values)
else:
n_gradual_steps, intervention_dates = covidModel.get_intervention_dates(intervention_param,scen='reopen')
perc_reopen = ['@reopen_multiplier@' for _ in range(len(intervention_dates))]
emodl_param = ''.join([ f'(param Ki_reopen_initial_{grp} 0)\n'
f'(time-event ki_reopen_initial_ {covidModel.DateToTimestep(pd.Timestamp(intervention_dates[0])- | pd.Timedelta(2,"days") | pandas.Timedelta |
import pickle
import random
import string
import warnings
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
import pytest
from scipy import stats
import linearmodels
from linearmodels.shared.exceptions import missing_warning
from linearmodels.shared.hypotheses import (
InapplicableTestStatistic,
InvalidTestStatistic,
WaldTestStatistic,
)
from linearmodels.shared.io import add_star, format_wide
from linearmodels.shared.linalg import has_constant, inv_sqrth
from linearmodels.shared.utility import AttrDict, ensure_unique_column, panel_to_frame
MISSING_PANEL = "Panel" not in dir(pd)
def test_missing_warning():
missing = np.zeros(500, dtype=bool)
with warnings.catch_warnings(record=True) as w:
missing_warning(missing)
assert len(w) == 0
missing[0] = True
with warnings.catch_warnings(record=True) as w:
missing_warning(missing)
assert len(w) == 1
original = linearmodels.WARN_ON_MISSING
linearmodels.WARN_ON_MISSING = False
with warnings.catch_warnings(record=True) as w:
missing_warning(missing)
assert len(w) == 0
linearmodels.WARN_ON_MISSING = original
def test_hasconstant():
x = np.random.randn(100, 3)
hc, loc = has_constant(x)
assert bool(hc) is False
assert loc is None
x[:, 0] = 1
hc, loc = has_constant(x)
assert hc is True
assert loc == 0
x[:, 0] = 2
hc, loc = has_constant(x)
assert hc is True
assert loc == 0
x[::2, 0] = 0
x[:, 1] = 1
x[1::2, 1] = 0
hc, loc = has_constant(x)
assert hc is True
def test_wald_statistic():
ts = WaldTestStatistic(1.0, "_NULL_", 1, name="_NAME_")
assert str(hex(id(ts))) in ts.__repr__()
assert "_NULL_" in str(ts)
assert ts.stat == 1.0
assert ts.df == 1
assert ts.df_denom is None
assert ts.dist_name == "chi2(1)"
assert isinstance(ts.critical_values, dict)
assert_allclose(1 - stats.chi2.cdf(1.0, 1), ts.pval)
ts = WaldTestStatistic(1.0, "_NULL_", 1, 1000, name="_NAME_")
assert ts.df == 1
assert ts.df_denom == 1000
assert ts.dist_name == "F(1,1000)"
assert_allclose(1 - stats.f.cdf(1.0, 1, 1000), ts.pval)
def test_invalid_test_statistic():
ts = InvalidTestStatistic("_REASON_", name="_NAME_")
assert str(hex(id(ts))) in ts.__repr__()
assert "_REASON_" in str(ts)
assert np.isnan(ts.pval)
assert ts.critical_values is None
def test_inapplicable_test_statistic():
ts = InapplicableTestStatistic(reason="_REASON_", name="_NAME_")
assert str(hex(id(ts))) in ts.__repr__()
assert "_REASON_" in str(ts)
assert np.isnan(ts.pval)
assert ts.critical_values is None
ts = InapplicableTestStatistic()
assert "not applicable" in str(ts)
def test_inv_sqrth():
x = np.random.randn(1000, 10)
xpx = x.T @ x
invsq = inv_sqrth(xpx)
prod = invsq @ xpx @ invsq - np.eye(10)
assert_allclose(1 + prod, np.ones((10, 10)))
def test_ensure_unique_column():
df = pd.DataFrame({"a": [0, 1, 0], "b": [1.0, 0.0, 1.0]})
out = ensure_unique_column("a", df)
assert out == "_a_"
out = ensure_unique_column("c", df)
assert out == "c"
out = ensure_unique_column("a", df, "=")
assert out == "=a="
df["_a_"] = -1
out = ensure_unique_column("a", df)
assert out == "__a__"
def test_attr_dict():
ad = AttrDict()
ad["one"] = "one"
ad[1] = 1
ad[("a", 2)] = ("a", 2)
assert list(ad.keys()) == ["one", 1, ("a", 2)]
assert len(ad) == 3
plk = pickle.dumps(ad)
pad = pickle.loads(plk)
assert list(pad.keys()) == ["one", 1, ("a", 2)]
assert len(pad) == 3
ad2 = ad.copy()
assert list(ad2.keys()) == list(ad.keys())
assert ad.get("one", None) == "one"
assert ad.get("two", False) is False
k, v = ad.popitem()
assert k == "one"
assert v == "one"
items = ad.items()
assert (1, 1) in items
assert (("a", 2), ("a", 2)) in items
assert len(items) == 2
values = ad.values()
assert 1 in values
assert ("a", 2) in values
assert len(values) == 2
ad2 = AttrDict()
ad2[1] = 3
ad2["one"] = "one"
ad2["a"] = "a"
ad.update(ad2)
assert ad[1] == 3
assert "a" in ad
ad.__str__()
with pytest.raises(AttributeError):
ad.__private_dict__ = None
with pytest.raises(AttributeError):
ad.some_other_key
with pytest.raises(KeyError):
ad["__private_dict__"] = None
del ad[1]
assert 1 not in ad.keys()
ad.new_value = "new_value"
assert "new_value" in ad.keys()
assert ad.new_value == ad["new_value"]
for key in ad.keys():
if isinstance(key, str):
assert key in dir(ad)
new_value = ad.pop("new_value")
assert new_value == "new_value"
del ad.one
assert "one" not in ad.keys()
ad.clear()
assert list(ad.keys()) == []
def test_format_wide():
k = 26
inputs = [chr(65 + i) * (20 + i) for i in range(k)]
out = format_wide(inputs, 80)
assert max([len(v) for v in out]) <= 80
out = format_wide(["a"], 80)
assert out == [["a"]]
def test_panel_to_midf():
x = np.random.standard_normal((3, 7, 100))
df = panel_to_frame(x, list(range(3)), list(range(7)), list(range(100)))
mi = pd.MultiIndex.from_product([list(range(7)), list(range(100))])
expected = pd.DataFrame(index=mi, columns=[0, 1, 2])
for i in range(3):
expected[i] = x[i].ravel()
expected.index.names = ["major", "minor"]
pd.testing.assert_frame_equal(df, expected)
expected2 = expected.copy()
expected2 = expected2.sort_index(level=[1, 0])
expected2.index = expected2.index.swaplevel(0, 1)
expected2.index.names = ["major", "minor"]
df2 = panel_to_frame(x, list(range(3)), list(range(7)), list(range(100)), True)
pd.testing.assert_frame_equal(df2, expected2)
entities = list(
map(
"".join,
[
[random.choice(string.ascii_lowercase) for __ in range(10)]
for _ in range(100)
],
)
)
times = pd.date_range("1999-12-31", freq="A-DEC", periods=7)
var_names = ["x.{0}".format(i) for i in range(1, 4)]
df3 = panel_to_frame(x, var_names, times, entities, True)
mi = pd.MultiIndex.from_product([times, entities])
expected3 = pd.DataFrame(index=mi, columns=var_names)
for i in range(1, 4):
expected3["x.{0}".format(i)] = x[i - 1].ravel()
expected3.index = expected3.index.swaplevel(0, 1)
mi = pd.MultiIndex.from_product([entities, times])
expected3 = expected3.loc[mi]
expected3.index.names = ["major", "minor"]
| pd.testing.assert_frame_equal(df3, expected3) | pandas.testing.assert_frame_equal |
import numpy as np
import pandas as pd
import sys
from tqdm import tqdm
import h5py
from sklearn.metrics.pairwise import cosine_similarity
import pkg_resources
import re
import itertools
import os
import matplotlib.pyplot as plt
from sys import stdout ### GET rid of later
from .context import context_composite, context96, context1536, context78, context83, context_composite96
COMPL = {"A":"T","T":"A","G":"C","C":"G"}
# ---------------------------------
# IOUtils
# ---------------------------------
def file_loader(x):
if x.endswith('.csv'):
return pd.read_csv(x, index_col=0)
elif x.endswith('.parquet'):
return pd.read_parquet(x)
else:
return | pd.read_csv(x, sep='\t', index_col=0) | pandas.read_csv |
import random
import numpy as np
import pandas as pd
def remove_unlinked_triples(triples, linked_ents):
print("before removing unlinked triples:", len(triples))
new_triples = set()
for h, r, t in triples:
if h in linked_ents and t in linked_ents:
new_triples.add((h, r, t))
print("after removing unlinked triples:", len(new_triples))
return list(new_triples)
def enhance_triples(kg1, kg2, ents1, ents2):
assert len(ents1) == len(ents2)
print("before enhanced:", len(kg1.triples), len(kg2.triples))
enhanced_triples1, enhanced_triples2 = set(), set()
links1 = dict(zip(ents1, ents2))
links2 = dict(zip(ents2, ents1))
for h1, r1, t1 in kg1.triples:
h2 = links1.get(h1, None)
t2 = links1.get(t1, None)
if h2 is not None and t2 is not None and t2 not in kg2.out_related_ents_dict.get(h2, set()):
enhanced_triples2.add((h2, r1, t2))
for h2, r2, t2 in kg2.triples:
h1 = links2.get(h2, None)
t1 = links2.get(t2, None)
if h1 is not None and t1 is not None and t1 not in kg1.out_related_ents_dict.get(h1, set()):
enhanced_triples1.add((h1, r2, t1))
print("after enhanced:", len(enhanced_triples1), len(enhanced_triples2))
return enhanced_triples1, enhanced_triples2
def generate_3hop_triples(kg, two_hop_triples, linked_ents=None):
two_triple_df = np.array([[tr[0], tr[1], tr[2]] for tr in two_hop_triples])
two_triple_df = pd.DataFrame(two_triple_df, columns=['h', 'r', 't'])
triples = kg.triples
if linked_ents is not None:
triples = remove_unlinked_triples(triples, linked_ents)
triple_df = np.array([[tr[0], tr[1], tr[2]] for tr in triples])
triple_df = pd.DataFrame(triple_df, columns=['h', 'r', 't'])
# print(triple_df)
two_hop_triple_df = pd.merge(two_triple_df, triple_df, left_on='t', right_on='h')
# print(two_hop_triple_df)
two_step_quadruples = set()
relation_patterns = dict()
for index, row in two_hop_triple_df.iterrows():
head = row["h_x"]
tail = row["t_y"]
r_x = row["r_x"]
r_y = row['r_y']
if tail not in kg.out_related_ents_dict.get(head, set()) and \
head not in kg.in_related_ents_dict.get(tail, set()):
relation_patterns[(r_x, r_y)] = relation_patterns.get((r_x, r_y), 0) + 1
two_step_quadruples.add((head, r_x, r_y, tail))
print("total 3-hop neighbors:", len(two_step_quadruples))
print("total 3-hop relation patterns:", len(relation_patterns))
relation_patterns = sorted(relation_patterns.items(), key=lambda x: x[1], reverse=True)
p = 0.05
num = int(p * len(relation_patterns))
selected_patterns = set()
# for i in range(20, num):
for i in range(5, len(relation_patterns)):
pattern = relation_patterns[i][0]
selected_patterns.add(pattern)
print("selected relation patterns:", len(selected_patterns))
two_step_triples = set()
for head, rx, ry, tail in two_step_quadruples:
if (rx, ry) in selected_patterns:
two_step_triples.add((head, 0, head))
two_step_triples.add((head, rx + ry, tail))
print("selected 3-hop neighbors:", len(two_step_triples))
return two_step_triples
def generate_2hop_triples(kg, linked_ents=None):
triples = kg.triples
if linked_ents is not None:
triples = remove_unlinked_triples(triples, linked_ents)
triple_df = np.array([[tr[0], tr[1], tr[2]] for tr in triples])
triple_df = pd.DataFrame(triple_df, columns=['h', 'r', 't'])
# print(triple_df)
two_hop_triple_df = | pd.merge(triple_df, triple_df, left_on='t', right_on='h') | pandas.merge |
import numpy as np
import pandas as pd
import hashlib
from pathlib import Path
from biom import parse_table
from biom import Table as BiomTable
from omicexperiment.util import parse_fasta, parse_fastq
def load_biom(biom_filepath):
with open(biom_filepath) as f:
t = parse_table(f)
return t
def is_biomtable_object(obj):
return isinstance(obj, BiomTable)
def biomtable_to_dataframe(biom_table_object):
_bt = biom_table_object
data = _bt.matrix_data.todense()
out = pd.SparseDataFrame(data, index=_bt.ids('observation'),
columns=_bt.ids('sample'))
return out.to_dense()
def biomtable_to_sparsedataframe(biom_table_object):
_bt = biom_table_object
m = _bt.matrix_data
data = [pd.SparseSeries(m[i].toarray().ravel()) for i in np.arange(m.shape[0])]
out = pd.SparseDataFrame(data, index=_bt.ids('observation'),
columns=_bt.ids('sample'))
return out
def load_biom_as_dataframe(biom_filepath):
t = load_biom(biom_filepath)
return biomtable_to_dataframe(t)
def load_fasta(fasta_filepath, calculate_sha1=False):
descs = []
seqs = []
for desc, seq in parse_fasta(fasta_filepath):
descs.append(desc)
seqs.append(seq)
fasta_df = pd.DataFrame({'description': descs, 'sequence': seqs}, columns=['description', 'sequence'])
del descs
del seqs
if calculate_sha1:
fasta_df['sha1'] = fasta_df['sequence'].apply(lambda x: hashlib.sha1(x.encode('utf-8')).hexdigest())
return fasta_df
def load_fastq(fastq_filepath, calculate_sha1=False):
descs = []
seqs = []
quals = []
for desc, seq, qual in parse_fastq(fastq_filepath):
descs.append(desc)
seqs.append(seq)
quals.append(qual)
fastq_df = pd.DataFrame({'description': descs, 'sequence': seqs, 'qual': quals}, columns=['description', 'sequence', 'qual'])
del descs
del seqs
del quals
if calculate_sha1:
fastq_df['sha1'] = fastq_df['sequence'].apply(lambda x: hashlib.sha1(x.encode('utf-8')).hexdigest())
return fastq_df
def load_fasta_counts(fasta_filepath, sample_name=None):
fasta_df = load_fasta(fasta_filepath, calculate_sha1=True)
if sample_name is None:
counts = fasta_df['sha1'].value_counts().to_frame(name='count')
else:
counts = fasta_df['sha1'].value_counts().to_frame(name=sample_name)
fasta_df.drop('description', axis=1, inplace=True)
fasta_df.set_index('sha1', inplace=True)
joined_df = counts.join(fasta_df)
joined_df.index.name = 'sha1'
return joined_df.drop_duplicates()
def counts_table_from_fasta_files(fasta_filepaths, sample_names=None):
if sample_names is None:
sample_names = [None for i in range(len(fasta_filepaths))]
concated_df = None
seq_sha1_df = None
for fasta, sample_name in zip(fasta_filepaths, sample_names):
fasta_df = load_fasta_counts(fasta, fasta)
seq_sha1_df = pd.concat([seq_sha1_df, fasta_df['sequence']])
fasta_df.drop('sequence', axis=1, inplace=True)
concated_df = pd.concat([concated_df, fasta_df])
del fasta_df
concated_df = concated_df.fillna(0).groupby(level=0).sum()
concated_df.set_index(seq_sha1_df.drop_duplicates(), append=True, inplace=True)
return concated_df
def load_fasta_descriptions(fasta_filepath):
import hashlib
from skbio.parse.sequences import parse_fasta
descs = []
for desc, seq in parse_fasta(fasta_filepath):
descs.append(desc)
fasta_df = pd.DataFrame({'description': descs})
del descs
return fasta_df
def fasta_df_to_counts_table(fasta_df, desc_to_sampleid_func, index='sha1'):
fasta_df['sample'] = fasta_df['description'].apply(desc_to_sampleid_func)
if index == 'sha1' \
and 'sha1' not in fasta_df.columns:
fasta_df['sha1'] = fasta_df['sequence'].apply(lambda x: hashlib.sha1(x.encode('utf-8')).hexdigest())
pivoted = fasta_df.pivot_table(index=index, columns='sample', aggfunc='count', fill_value=0)
fasta_df.drop(['sample', 'sha1'], axis=1, inplace=True)
pivoted.columns = pivoted.columns.droplevel()
#sha1_seqs = pivoted.index.to_series().apply(lambda x: hashlib.sha1(x).hexdigest())
#sha1_seqs.name = 'sha1'
#pivoted.set_index(sha1_seqs, append=True, inplace=True)
return pivoted
def load_uc_file(uc_filepath):
columns = ['Type', 'Cluster', 'Size', 'Id', 'Strand', 'Qlo', 'Tlo', 'Alignment', 'Query', 'Target']
df = pd.read_csv(uc_filepath, names=columns, header=None, sep="\t")
df.rename(columns={'Query': 'observation', 'Cluster': 'cluster'}, inplace=True)
#check for size annotations and take them away
sizes_in_labels = df['observation'].apply(lambda x: ';size=' in x).any()
if sizes_in_labels:
df['observation'] = df['observation'].apply(lambda x: x.split(';size=')[0])
df = df[df['Type'] != 'C']
seeds = df[df['Type'] == 'S']
df_joined = pd.merge(df, seeds, on='cluster', suffixes=('', '_seed'), left_index=True)
df_joined.rename(columns={'observation_seed': 'seed'}, inplace=True)
df_joined.set_index('observation', drop=False, inplace=True)
return df_joined[['observation','cluster', 'seed']]
def load_swarm_otus_file(swarm_otus_filepath):
columns = ['amplicon_a', 'amplicon_b', 'differences', 'cluster', 'steps']
swarms_df = pd.read_csv(swarm_otus_filepath, \
names=columns, \
sep="\t")
duplicate_amplicons = swarms_df.drop_duplicates('amplicon_a')
duplicate_amplicons['amplicon_b'] = duplicate_amplicons['amplicon_a']
concat_df = pd.concat([swarms_df, duplicate_amplicons]).drop_duplicates('amplicon_b')
concat_df.rename(columns={'amplicon_b': 'observation'}, inplace=True)
concat_df.set_index('observation', drop=False, inplace=True)
return concat_df[['observation', 'cluster']].sort_values('cluster')
def load_qiime_otu_assignment_file(otu_assignment_filepath):
with open(otu_assignment_filepath) as f:
lines = f.readlines()
observation_list = []
otu_list = []
for l in lines:
splt = l.split()
otu_name = splt[0].strip()
observations = splt[1:]
for obs in observations:
obs = obs.strip()
observation_list.append(obs)
otu_list.append(otu_name)
observation_to_otu_dict = OrderedDict(observation=observation_list, otu=otu_list)
del observation_list
del otu_list
df = | pd.DataFrame(observation_to_otu_dict, index=observation_list) | pandas.DataFrame |
import numpy as np
import pandas as pd
from aif360.datasets import BinaryLabelDataset
from aif360.datasets.multiclass_label_dataset import MulticlassLabelDataset
from aif360.metrics import ClassificationMetric
def test_generalized_entropy_index():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[2, 1],
[2, 0],
[2, 1],
[2, 1]])
pred = data.copy()
pred[[3, 9], -1] = 0
pred[[4, 5], -1] = 1
df = | pd.DataFrame(data, columns=['feat', 'label']) | pandas.DataFrame |
#tusr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 25 17:13:22 2018
@author: kitreatakataglushkoff
Kitrea's hand-written copied/adjusted version of the analyze_massredistribution.py,
which was last significantly edited Thursday July 18.
UPDATE - Oct 9, 2018 - Kitrea double-checked code, added some comments.
last updated Wed Nov 14 - to clean out bad data in the new large dataset.
UPDATE - March/April, 2020 (ongoing) - Zoe edited script to integrate
new parameters into the existing functions
"""
import pandas as pd
import pickle
import numpy as np
import os
import matplotlib.pyplot as plt
#from scipy.stats import median_absolute_deviation
#%% ===== FUNCTIONS =====
rgi_fp = os.getcwd() + '/../RGI/rgi60/00_rgi60_attribs/'
assert os.path.exists(rgi_fp), '01_rgi60_Alaska.csv'
def selectglaciersrgitable(glac_no=None,
rgi_regionsO1=None,
rgi_regionsO2=None,
rgi_glac_number=None,
rgi_fp=rgi_fp,
rgi_cols_drop=['GLIMSId','BgnDate','EndDate','Status','Connect','Linkages','Name'],
rgi_O1Id_colname='glacno',
rgi_glacno_float_colname='RGIId_float',
indexname='GlacNo'):
"""
Select all glaciers to be used in the model run according to the regions and glacier numbers defined by the RGI
glacier inventory. This function returns the rgi table associated with all of these glaciers.
glac_no : list of strings
list of strings of RGI glacier numbers (e.g., ['1.00001', '13.00001'])
rgi_regionsO1 : list of integers
list of integers of RGI order 1 regions (e.g., [1, 13])
rgi_regionsO2 : list of integers or 'all'
list of integers of RGI order 2 regions or simply 'all' for all the order 2 regions
rgi_glac_number : list of strings
list of RGI glacier numbers without the region (e.g., ['00001', '00002'])
Output: Pandas DataFrame of the glacier statistics for each glacier in the model run
(rows = GlacNo, columns = glacier statistics)
"""
if glac_no is not None:
glac_no_byregion = {}
rgi_regionsO1 = [int(i.split('.')[0]) for i in glac_no]
rgi_regionsO1 = list(set(rgi_regionsO1))
for region in rgi_regionsO1:
glac_no_byregion[region] = []
for i in glac_no:
region = i.split('.')[0]
glac_no_only = i.split('.')[1]
glac_no_byregion[int(region)].append(glac_no_only)
for region in rgi_regionsO1:
glac_no_byregion[region] = sorted(glac_no_byregion[region])
# Create an empty dataframe
rgi_regionsO1 = sorted(rgi_regionsO1)
glacier_table = pd.DataFrame()
for region in rgi_regionsO1:
if glac_no is not None:
rgi_glac_number = glac_no_byregion[region]
for i in os.listdir(rgi_fp):
if i.startswith(str(region).zfill(2)) and i.endswith('.csv'):
rgi_fn = i
print(rgi_fn)
try:
csv_regionO1 = pd.read_csv(rgi_fp + rgi_fn)
except:
csv_regionO1 = pd.read_csv(rgi_fp + rgi_fn, encoding='latin1')
# Populate glacer_table with the glaciers of interest
if rgi_regionsO2 == 'all' and rgi_glac_number == 'all':
print("All glaciers within region(s) %s are included in this model run." % (region))
if glacier_table.empty:
glacier_table = csv_regionO1
else:
glacier_table = pd.concat([glacier_table, csv_regionO1], axis=0)
elif rgi_regionsO2 != 'all' and rgi_glac_number == 'all':
print("All glaciers within subregion(s) %s in region %s are included in this model run." %
(rgi_regionsO2, region))
for regionO2 in rgi_regionsO2:
if glacier_table.empty:
glacier_table = csv_regionO1.loc[csv_regionO1['O2Region'] == regionO2]
else:
glacier_table = (pd.concat([glacier_table, csv_regionO1.loc[csv_regionO1['O2Region'] ==
regionO2]], axis=0))
else:
if len(rgi_glac_number) < 20:
print("%s glaciers in region %s are included in this model run: %s" % (len(rgi_glac_number), region,
rgi_glac_number))
else:
print("%s glaciers in region %s are included in this model run: %s and more" %
(len(rgi_glac_number), region, rgi_glac_number[0:50]))
rgiid_subset = ['RGI60-' + str(region).zfill(2) + '.' + x for x in rgi_glac_number]
rgiid_all = list(csv_regionO1.RGIId.values)
rgi_idx = [rgiid_all.index(x) for x in rgiid_subset]
if glacier_table.empty:
glacier_table = csv_regionO1.loc[rgi_idx]
else:
glacier_table = (pd.concat([glacier_table, csv_regionO1.loc[rgi_idx]],
axis=0))
glacier_table = glacier_table.copy()
# reset the index so that it is in sequential order (0, 1, 2, etc.)
glacier_table.reset_index(inplace=True)
# change old index to 'O1Index' to be easier to recall what it is
glacier_table.rename(columns={'index': 'O1Index'}, inplace=True)
# Record the reference date
glacier_table['RefDate'] = glacier_table['BgnDate']
# if there is an end date, then roughly average the year
enddate_idx = glacier_table.loc[(glacier_table['EndDate'] > 0), 'EndDate'].index.values
glacier_table.loc[enddate_idx,'RefDate'] = (
np.mean((glacier_table.loc[enddate_idx,['BgnDate', 'EndDate']].values / 10**4).astype(int),
axis=1).astype(int) * 10**4 + 9999)
# drop columns of data that is not being used
glacier_table.drop(rgi_cols_drop, axis=1, inplace=True)
# add column with the O1 glacier numbers
glacier_table[rgi_O1Id_colname] = (
glacier_table['RGIId'].str.split('.').apply(pd.Series).loc[:,1].astype(int))
glacier_table['rgino_str'] = [x.split('-')[1] for x in glacier_table.RGIId.values]
glacier_table[rgi_glacno_float_colname] = (np.array([np.str.split(glacier_table['RGIId'][x],'-')[1]
for x in range(glacier_table.shape[0])]).astype(float))
# set index name
glacier_table.index.name = indexname
print("This study is focusing on %s glaciers in region %s" % (glacier_table.shape[0], rgi_regionsO1))
return glacier_table
def weighted_avg_and_std(values, weights):
"""
Return the weighted average and standard deviation.
values, weights -- Numpy ndarrays with the same shape.
"""
average = np.average(values, weights=weights)
# Fast and numerically precise:
variance = np.average((values-average)**2, weights=weights)
return average, variance**0.5
def weighted_percentile(sorted_list, weights, percentile):
"""
Calculate weighted percentile of a sorted list
"""
assert percentile <= 1 or percentile >=0, 'Error: Percentile outside of 0-1'
weights_cumsum_norm_high = np.cumsum(weights) / np.sum(weights)
# print(weights_cumsum_norm_high)
weights_norm = weights / np.sum(weights)
weights_cumsum_norm_low = weights_cumsum_norm_high - weights_norm
# print(weights_cumsum_norm_low)
percentile_idx_high = np.where(weights_cumsum_norm_high >= percentile)[0][0]
# print(percentile_idx_high)
percentile_idx_low = np.where(weights_cumsum_norm_low <= percentile)[0][-1]
# print(percentile_idx_low)
if percentile_idx_low == percentile_idx_high:
value_percentile = sorted_list[percentile_idx_low]
else:
value_percentile = np.mean([sorted_list[percentile_idx_low], sorted_list[percentile_idx_high]])
return value_percentile
def normalized_stats(norm_list):
# Merge norm_list to make array of all glaciers with same elevation normalization space
max_length = len(max(norm_list,key=len)) #len of glac w most norm values
# All data normalized: 1st column is normalized elev, the others are norm dhdt for each glacier
norm_all = np.zeros((max_length, len(norm_list)+1))
# First column is normalized elevation, pulled from the glac with most norm vals
norm_all[:,0] = max(norm_list,key=len)[:,0]
norm_all_area = norm_all.copy()
norm_elev_binsize = (norm_all_area[0,0] - norm_all_area[1,0])
# Loop through each glacier's normalized array (where col1 is elev_norm and col2 is norm dhdt)
for n in range(len(norm_list)):
norm_single = norm_list[n] # get one glacier at a time
# Fill in nan values for elev_norm of 0 and 1 with nearest neighbor
norm_single[0,1] = norm_single[np.where(~np.isnan(norm_single[:,1]))[0][0], 1]
norm_single[-1,1] = norm_single[np.where(~np.isnan(norm_single[:,1]))[0][-1], 1]
norm_single[0,2] = norm_single[np.where(~np.isnan(norm_single[:,2]))[0][0], 2]
norm_single[-1,2] = norm_single[np.where(~np.isnan(norm_single[:,2]))[0][-1], 2]
# Remove nan values
norm_single = norm_single[np.where(~np.isnan(norm_single[:,2]))] #~ is the same as !
elev_single = norm_single[:,0]
dhdt_single = norm_single[:,1]
area_single = norm_single[:,2]
area_single_cumsum = np.cumsum(area_single)
#loop through each area value of the glacier, and add it and interpolate to add to the norm_all array.
for r in range(0, max_length):
# Find value need to interpolate to
norm_elev_value = norm_all_area[r,0]
norm_elev_lower = norm_elev_value - norm_elev_binsize/2
if norm_elev_lower <= 0:
norm_elev_lower = 0
# ----- AREA CALCULATION -----
if r == 0:
area_cumsum_upper = 0
if norm_elev_lower > 0:
# if r < max_length-1:
# print(r, norm_elev_value, norm_elev_value - norm_elev_binsize/2)
# Find index of value above it from dhdt_norm, which is a different size
upper_idx = np.where(elev_single == elev_single[elev_single >= norm_elev_lower].min())[0][0]
# Find index of value below it
# print(len(elev_single), max_length)
# print(elev_single, norm_elev_lower)
lower_idx = np.where(elev_single == elev_single[elev_single < norm_elev_lower].max())[0][0]
#get the two values, based on the indices
upper_elev = elev_single[upper_idx]
upper_value = area_single_cumsum[upper_idx]
lower_elev = elev_single[lower_idx]
lower_value = area_single_cumsum[lower_idx]
#Linearly Interpolate between two values, and plug in interpolated value into norm_all
area_cumsum_interp = (lower_value + (norm_elev_lower - lower_elev) / (upper_elev - lower_elev) *
(upper_value - lower_value))
else:
area_cumsum_interp = area_single_cumsum[-1]
# Calculate area within that bin
norm_all_area[r,n+1] = area_cumsum_interp - area_cumsum_upper
# Update area_lower_cumsum
area_cumsum_upper = area_cumsum_interp
# ----- DH/DT CALCULATION -----
if r == 0:
#put first value dhdt value into the norm_all. n+1 because the first col is taken by the elevnorms.
norm_all[r,n+1] = dhdt_single[0]
elif r == (max_length - 1):
#put last value into the the last row for the glacier's 'stretched out'(interpolated) normalized curve.
norm_all[r,n+1] = dhdt_single[-1]
else:
# Find value need to interpolate to
norm_elev_value = norm_all[r,0] #go through each row in the elev (col1)
# Find index of value above it from dhdt_norm, which is a different size
upper_idx = np.where(elev_single == elev_single[elev_single >= norm_elev_value].min())[0][0]
# Find index of value below it
lower_idx = np.where(elev_single == elev_single[elev_single < norm_elev_value].max())[0][0]
#get the two values, based on the indices
upper_elev = elev_single[upper_idx]
upper_value = dhdt_single[upper_idx]
lower_elev = elev_single[lower_idx]
lower_value = dhdt_single[lower_idx]
#Linearly Interpolate between two values, and plug in interpolated value into norm_all
norm_all[r,n+1] = (lower_value + (norm_elev_value - lower_elev) / (upper_elev - lower_elev) *
(upper_value - lower_value))
# Compute mean and standard deviation
norm_all_stats = pd.DataFrame()
norm_all_stats['norm_elev'] = norm_all[:,0]
# DH/DT STATISTICS
norm_all_stats['norm_dhdt_mean'] = np.nanmean(norm_all[:,1:], axis=1)
norm_all_stats['norm_dhdt_med'] = np.nanmedian(norm_all[:,1:], axis=1)
norm_all_stats['norm_dhdt_std'] = np.nanstd(norm_all[:,1:], axis=1)
norm_all_stats['norm_dhdt_16perc'] = np.percentile(norm_all[:,1:], 16, axis=1)
norm_all_stats['norm_dhdt_84perc'] = np.percentile(norm_all[:,1:], 84, axis=1)
# AREA STATISTICS
norm_all_stats['norm_area'] = np.nansum(norm_all_area[:,1:], axis=1)
norm_all_stats['norm_area_perc'] = norm_all_stats['norm_area'] / norm_all_stats['norm_area'].sum() * 100
norm_all_stats['norm_area_perc_cumsum'] = np.cumsum(norm_all_stats['norm_area_perc'])
# area-weighted stats
norm_all_stats['norm_dhdt_mean_areaweighted'] = np.nan
norm_all_stats['norm_dhdt_med_areaweighted'] = np.nan
norm_all_stats['norm_dhdt_std_areaweighted'] = np.nan
norm_all_stats['norm_dhdt_16perc_areaweighted'] = np.nan
norm_all_stats['norm_dhdt_84perc_areaweighted'] = np.nan
for nrow in np.arange(0,norm_all.shape[0]):
# Select values
norm_values = norm_all[nrow,1:]
area_values = norm_all_area[nrow,1:]
# Sorted values
area_values_sorted = [x for _,x in sorted(zip(norm_values, area_values))]
norm_values_sorted = sorted(norm_values)
# Statistics
weighted_mean, weighted_std = weighted_avg_and_std(norm_values_sorted, area_values_sorted)
weighted_med = weighted_percentile(norm_values_sorted, area_values_sorted, 0.5)
weighted_16perc = weighted_percentile(norm_values_sorted, area_values_sorted, 0.16)
weighted_84perc = weighted_percentile(norm_values_sorted, area_values_sorted, 0.84)
# record stats
norm_all_stats.loc[nrow,'norm_dhdt_mean_areaweighted'] = weighted_mean
norm_all_stats.loc[nrow,'norm_dhdt_std_areaweighted'] = weighted_std
norm_all_stats.loc[nrow,'norm_dhdt_med_areaweighted'] = weighted_med
norm_all_stats.loc[nrow,'norm_dhdt_16perc_areaweighted'] = weighted_16perc
norm_all_stats.loc[nrow,'norm_dhdt_84perc_areaweighted'] = weighted_84perc
return norm_all_stats
def pickle_data(fn, data):
"""Pickle data
Parameters
----------
fn : str
filename including filepath
data : list, etc.
data to be pickled
Returns
-------
.pkl file
saves .pkl file of the data
"""
with open(fn, 'wb') as f:
pickle.dump(data, f)
#%%
# TO-DO LIST:
print('\nTo-do list:\n - code Larsen! \n\n')
#%% ===== REGION AND GLACIER FILEPATH OPTIONS =====
# User defines regions of interest
group1 = ['01', '02', '09', '12', '13', '14', '15', '16', '17', '18']
group2 = ['03', '04']
group3 = ['05', '06', '07', '08', '10', '11']
all_regions = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17', '18']
rois = all_regions
if 'davidrounce' in os.getcwd():
binnedcsv_fp = ('/Users/davidrounce/Documents/Dave_Rounce/DebrisGlaciers_WG/Melt_Intercomparison/output/' +
'mb_bins_all/csv/')
elif 'zoescrewvala' in os.getcwd():
binnedcsv_fp = '/Users/zoescrewvala/Documents/Alaska_REU_2019/mb_binned_data/'
else:
assert True == False, 'add correct binnedcsv_fp'
#for roi in rois:
# assert os.path.exists(rgi_fp), roi
# OPTION
option_plot_multipleglaciers_multiplethresholds = False
option_plot_multipleregions = True
# Columns to use for mass balance and dhdt (specify mean or median)
#dhdt_stat = 'mean'
dhdt_stat = 'med'
if dhdt_stat == 'mean':
mb_cn = 'mb_bin_mean_mwea'
dhdt_cn = 'dhdt_bin_mean_ma'
else:
mb_cn = 'mb_bin_med_mwea'
dhdt_cn = 'dhdt_bin_med_ma'
dhdt_max = 2.5
dhdt_min = -50
add_dc_classification_to_termtype = False
dc_perc_threshold = 5
# Quality control options
binsize = 50 # resample bins to remove noise
min_elevbins = 5 # minimum number of elevation bins
min_glac_area = 2 # minimum total glacier area size (km2) to consider (removes small glaciers)
perc_remove = 2.5 # percentage of glacier area to remove (1 means 1 - 99% are used); set to 0 to keep everything
min_bin_area_km2 = 0.02 # minimum binned area (km2) to remove everything else; set to 0 to keep everything
option_remove_surge_glac = True
option_remove_all_pos_dhdt = True
option_remove_dhdt_acc = True
option_remove_acc_lt_abl = True
# ===== PLOT OPTIONS =====
# Option to save figures
option_savefigs = True
fig_fp = binnedcsv_fp + '../figs/'
glacier_plots_transparency = 0.3
#%% Select Files
# # Load file if it already exists
overwrite = False
pkl_fp = binnedcsv_fp + '../pickle_datasets/'
if not os.path.exists(pkl_fp):
os.makedirs(pkl_fp)
binnedcsv_all_fullfn = pkl_fp + 'binnedcsv_all.pkl'
main_glac_rgi_fullfn = pkl_fp + 'main_glac_rgi_all.pkl'
# Load pickle data if it exists
if os.path.exists(binnedcsv_all_fullfn) and not overwrite:
# Binnedcsv data
with open(binnedcsv_all_fullfn, 'rb') as f:
binnedcsv_all = pickle.load(f)
# Main_glac_rgi data
with open(main_glac_rgi_fullfn, 'rb') as f:
main_glac_rgi = pickle.load(f)
# Otherwise, process the data (all regions)
else:
print('redoing pickle datasets')
# Process all regions
rois = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17', '18']
# Find files for analysis; create list of all binned filenames
binnedcsv_fullfns_all = []
rgiids_all = []
binnedcsv_fullfns_allrois = []
for roi in rois:
binnedcsv_fullfns_roi = []
rgiids_roi = []
if roi in ['13','14','15']:
roi_4fp = 'HMA'
else:
roi_4fp = roi
binnedcsv_fp_roi = binnedcsv_fp + roi_4fp + '/'
for i in os.listdir(binnedcsv_fp_roi):
if i.startswith(str(int(roi))) and i.endswith('_mb_bins.csv'):
rgiids_roi.append(i.split('_')[0])
binnedcsv_fullfns_roi.append(binnedcsv_fp_roi + i)
# Sorted files
binnedcsv_fullfns_roi = [x for _,x in sorted(zip(rgiids_roi, binnedcsv_fullfns_roi))]
rgiids_roi = sorted(rgiids_roi)
binnedcsv_fullfns_all.extend(binnedcsv_fullfns_roi)
binnedcsv_fullfns_allrois.append(binnedcsv_fullfns_roi)
rgiids_all.extend(rgiids_roi)
main_glac_rgi_all = selectglaciersrgitable(glac_no=rgiids_all)
b = main_glac_rgi_all.copy()
main_glac_rgi_all['binnedcsv_fullfn'] = binnedcsv_fullfns_all
main_glac_rgi_all['roi'] = [x.split('-')[1].split('.')[0] for x in main_glac_rgi_all.RGIId.values]
main_glac_rgi = main_glac_rgi_all[main_glac_rgi_all['Area'] > min_glac_area].copy()
main_glac_rgi.reset_index(drop=True, inplace=True)
# Add statistics for each glacier
main_glac_rgi['Zmean'] = np.nan
main_glac_rgi['PercDebris'] = np.nan
main_glac_rgi['HypsoIndex'] = np.nan
main_glac_rgi['AAR'] = np.nan
main_glac_rgi['Z_maxloss_norm'] = np.nan
main_glac_rgi['mb_abl_lt_acc'] = np.nan
main_glac_rgi['nbins'] = np.nan
main_glac_rgi['Size'] = np.nan
binnedcsv_all = []
for nglac, rgiid in enumerate(main_glac_rgi.rgino_str.values):
# for nglac, rgiid in enumerate(main_glac_rgi.rgino_str.values[0:1]):
if nglac%100 == 0:
print(nglac, rgiid)
binnedcsv_fullfn = main_glac_rgi.loc[nglac,'binnedcsv_fullfn']
binnedcsv = | pd.read_csv(binnedcsv_fullfn) | pandas.read_csv |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index=pd.Index(['g1', 'g2'], dtype='object')).rename('count')
)
def test_idxmin(self):
assert mapped_array['a'].idxmin() == mapped_array['a'].to_pd().idxmin()
pd.testing.assert_series_equal(
mapped_array.idxmin(),
mapped_array.to_pd().idxmin().rename('idxmin')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmin(),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmin')
)
def test_idxmax(self):
assert mapped_array['a'].idxmax() == mapped_array['a'].to_pd().idxmax()
pd.testing.assert_series_equal(
mapped_array.idxmax(),
mapped_array.to_pd().idxmax().rename('idxmax')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmax(),
pd.Series(
np.array(['y', 'x'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmax')
)
def test_describe(self):
pd.testing.assert_series_equal(
mapped_array['a'].describe(),
mapped_array['a'].to_pd().describe()
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=None),
mapped_array.to_pd().describe(percentiles=None)
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=[]),
mapped_array.to_pd().describe(percentiles=[])
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=np.arange(0, 1, 0.1)),
mapped_array.to_pd().describe(percentiles=np.arange(0, 1, 0.1))
)
pd.testing.assert_frame_equal(
mapped_array_grouped.describe(),
pd.DataFrame(
np.array([
[6., 3.],
[12.16666667, 11.],
[1.47196014, 1.],
[10., 10.],
[11.25, 10.5],
[12.5, 11.],
[13., 11.5],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object'),
index=mapped_array.describe().index
)
)
def test_value_counts(self):
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(),
pd.Series(
np.array([1, 1, 1]),
index=pd.Float64Index([10.0, 11.0, 12.0], dtype='float64'),
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(mapping=mapping),
pd.Series(
np.array([1, 1, 1]),
index=pd.Index(['test_10.0', 'test_11.0', 'test_12.0'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.value_counts(),
pd.DataFrame(
np.array([
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 2, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.value_counts(),
pd.DataFrame(
np.array([
[1, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
mapped_array2 = mapped_array.replace(mapped_arr=[4, 4, 3, 2, np.nan, 4, 3, 2, 1])
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=False),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 3.0, 2.0, 1.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([1.0, 2.0, 3.0, 4.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, ascending=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0]
]),
index=pd.Float64Index([1.0, np.nan, 2.0, 3.0, 4.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True),
pd.DataFrame(
np.array([
[0.2222222222222222, 0.1111111111111111, 0.0, 0.0],
[0.0, 0.1111111111111111, 0.1111111111111111, 0.0],
[0.1111111111111111, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.1111111111111111, 0.0, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True, dropna=True),
pd.DataFrame(
np.array([
[0.25, 0.125, 0.0, 0.0],
[0.0, 0.125, 0.125, 0.0],
[0.125, 0.0, 0.125, 0.0],
[0.0, 0.0, 0.125, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0], dtype='float64'),
columns=wrapper.columns
)
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
ma = mapped_array_nosort
ma_grouped = mapped_array_nosort_grouped
else:
ma = mapped_array
ma_grouped = mapped_array_grouped
np.testing.assert_array_equal(
ma['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
ma['a'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
np.testing.assert_array_equal(
ma['b'].id_arr,
np.array([3, 4, 5])
)
np.testing.assert_array_equal(
ma['b'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'a']].id_arr,
np.array([0, 1, 2, 0, 1, 2])
)
np.testing.assert_array_equal(
ma[['a', 'a']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'b']].id_arr,
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
ma[['a', 'b']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = ma.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped['g1'].wrapper.ndim == 2
assert ma_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert ma_grouped['g2'].wrapper.ndim == 2
assert ma_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped[['g1']].wrapper.ndim == 2
assert ma_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert ma_grouped[['g1', 'g2']].wrapper.ndim == 2
assert ma_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_magic(self):
a = vbt.MappedArray(
wrapper,
records_arr['some_field1'],
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
a_inv = vbt.MappedArray(
wrapper,
records_arr['some_field1'][::-1],
records_arr['col'][::-1],
id_arr=records_arr['id'][::-1],
idx_arr=records_arr['idx'][::-1]
)
b = records_arr['some_field2']
a_bool = vbt.MappedArray(
wrapper,
records_arr['some_field1'] > np.mean(records_arr['some_field1']),
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
b_bool = records_arr['some_field2'] > np.mean(records_arr['some_field2'])
assert a ** a == a ** 2
with pytest.raises(Exception):
_ = a * a_inv
# binary ops
# comparison ops
np.testing.assert_array_equal((a == b).values, a.values == b)
np.testing.assert_array_equal((a != b).values, a.values != b)
np.testing.assert_array_equal((a < b).values, a.values < b)
np.testing.assert_array_equal((a > b).values, a.values > b)
np.testing.assert_array_equal((a <= b).values, a.values <= b)
np.testing.assert_array_equal((a >= b).values, a.values >= b)
# arithmetic ops
np.testing.assert_array_equal((a + b).values, a.values + b)
np.testing.assert_array_equal((a - b).values, a.values - b)
np.testing.assert_array_equal((a * b).values, a.values * b)
np.testing.assert_array_equal((a ** b).values, a.values ** b)
np.testing.assert_array_equal((a % b).values, a.values % b)
np.testing.assert_array_equal((a // b).values, a.values // b)
np.testing.assert_array_equal((a / b).values, a.values / b)
# __r*__ is only called if the left object does not have an __*__ method
np.testing.assert_array_equal((10 + a).values, 10 + a.values)
np.testing.assert_array_equal((10 - a).values, 10 - a.values)
np.testing.assert_array_equal((10 * a).values, 10 * a.values)
np.testing.assert_array_equal((10 ** a).values, 10 ** a.values)
np.testing.assert_array_equal((10 % a).values, 10 % a.values)
np.testing.assert_array_equal((10 // a).values, 10 // a.values)
np.testing.assert_array_equal((10 / a).values, 10 / a.values)
# mask ops
np.testing.assert_array_equal((a_bool & b_bool).values, a_bool.values & b_bool)
np.testing.assert_array_equal((a_bool | b_bool).values, a_bool.values | b_bool)
np.testing.assert_array_equal((a_bool ^ b_bool).values, a_bool.values ^ b_bool)
np.testing.assert_array_equal((True & a_bool).values, True & a_bool.values)
np.testing.assert_array_equal((True | a_bool).values, True | a_bool.values)
np.testing.assert_array_equal((True ^ a_bool).values, True ^ a_bool.values)
# unary ops
np.testing.assert_array_equal((-a).values, -a.values)
np.testing.assert_array_equal((+a).values, +a.values)
np.testing.assert_array_equal((abs(-a)).values, abs((-a.values)))
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Mean', 'Std', 'Min', 'Median', 'Max', 'Min Index', 'Max Index'
], dtype='object')
pd.testing.assert_series_equal(
mapped_array.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
2.25, 11.777777777777779, 0.859116756396542, 11.0, 11.666666666666666, 12.666666666666666
],
index=stats_index[:-2],
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
3, 11.0, 1.0, 10.0, 11.0, 12.0, 'x', 'z'
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
6, 12.166666666666666, 1.4719601443879746, 10.0, 12.5, 14.0, 'x', 'y'
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
def test_stats_mapping(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Value Counts: test_10.0',
'Value Counts: test_11.0', 'Value Counts: test_12.0',
'Value Counts: test_13.0', 'Value Counts: test_14.0'
], dtype='object')
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
2.25, 0.5, 0.5, 0.5, 0.5, 0.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='a'),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
3, 1, 1, 1, 0, 0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
6, 1, 1, 1, 2, 1
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
mapped_array.stats(settings=dict(mapping=mapping))
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mp_mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 9)
pd.testing.assert_index_equal(stats_df.index, mp_mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# base.py ############# #
class TestRecords:
def test_config(self, tmp_path):
assert vbt.Records.loads(records['a'].dumps()) == records['a']
assert vbt.Records.loads(records.dumps()) == records
records.save(tmp_path / 'records')
assert vbt.Records.load(tmp_path / 'records') == records
def test_records(self):
pd.testing.assert_frame_equal(
records.records,
pd.DataFrame.from_records(records_arr)
)
def test_recarray(self):
np.testing.assert_array_equal(records['a'].recarray.some_field1, records['a'].values['some_field1'])
np.testing.assert_array_equal(records.recarray.some_field1, records.values['some_field1'])
def test_records_readable(self):
pd.testing.assert_frame_equal(
records.records_readable,
pd.DataFrame([
[0, 'a', 'x', 10.0, 21.0], [1, 'a', 'y', 11.0, 20.0], [2, 'a', 'z', 12.0, 19.0],
[3, 'b', 'x', 13.0, 18.0], [4, 'b', 'y', 14.0, 17.0], [5, 'b', 'z', 13.0, 18.0],
[6, 'c', 'x', 12.0, 19.0], [7, 'c', 'y', 11.0, 20.0], [8, 'c', 'z', 10.0, 21.0]
], columns=pd.Index(['Id', 'Column', 'Timestamp', 'some_field1', 'some_field2'], dtype='object'))
)
def test_is_sorted(self):
assert records.is_sorted()
assert records.is_sorted(incl_id=True)
assert not records_nosort.is_sorted()
assert not records_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert records.sort().is_sorted()
assert records.sort().is_sorted(incl_id=True)
assert records.sort(incl_id=True).is_sorted(incl_id=True)
assert records_nosort.sort().is_sorted()
assert records_nosort.sort().is_sorted(incl_id=True)
assert records_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = records['a'].values['some_field1'] >= records['a'].values['some_field1'].mean()
record_arrays_close(
records['a'].apply_mask(mask_a).values,
np.array([
(1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
mask = records.values['some_field1'] >= records.values['some_field1'].mean()
filtered = records.apply_mask(mask)
record_arrays_close(
filtered.values,
np.array([
(2, 0, 2, 12., 19.), (3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.),
(5, 1, 2, 13., 18.), (6, 2, 0, 12., 19.)
], dtype=example_dt)
)
assert records_grouped.apply_mask(mask).wrapper == records_grouped.wrapper
def test_map_field(self):
np.testing.assert_array_equal(
records['a'].map_field('some_field1').values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
records.map_field('some_field1').values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
assert records_grouped.map_field('some_field1').wrapper == \
records.map_field('some_field1', group_by=group_by).wrapper
assert records_grouped.map_field('some_field1', group_by=False).wrapper.grouper.group_by is None
def test_map(self):
@njit
def map_func_nb(record):
return record['some_field1'] + record['some_field2']
np.testing.assert_array_equal(
records['a'].map(map_func_nb).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map(map_func_nb).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map(map_func_nb).wrapper == \
records.map(map_func_nb, group_by=group_by).wrapper
assert records_grouped.map(map_func_nb, group_by=False).wrapper.grouper.group_by is None
def test_map_array(self):
arr = records_arr['some_field1'] + records_arr['some_field2']
np.testing.assert_array_equal(
records['a'].map_array(arr[:3]).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map_array(arr).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map_array(arr).wrapper == \
records.map_array(arr, group_by=group_by).wrapper
assert records_grouped.map_array(arr, group_by=False).wrapper.grouper.group_by is None
def test_apply(self):
@njit
def cumsum_apply_nb(records):
return np.cumsum(records['some_field1'])
np.testing.assert_array_equal(
records['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
records.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert records_grouped.apply(cumsum_apply_nb).wrapper == \
records.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert records_grouped.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_count(self):
assert records['a'].count() == 3
pd.testing.assert_series_equal(
records.count(),
pd.Series(
np.array([3, 3, 3, 0]),
index=wrapper.columns
).rename('count')
)
assert records_grouped['g1'].count() == 6
pd.testing.assert_series_equal(
records_grouped.count(),
pd.Series(
np.array([6, 3]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('count')
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
r = records_nosort
r_grouped = records_nosort_grouped
else:
r = records
r_grouped = records_grouped
record_arrays_close(
r['a'].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
pd.testing.assert_index_equal(
r['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
record_arrays_close(
r[['a', 'a']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(0, 1, 0, 10., 21.), (1, 1, 1, 11., 20.), (2, 1, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
record_arrays_close(
r[['a', 'b']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.), (5, 1, 2, 13., 18.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = r.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped['g1'].wrapper.ndim == 2
assert r_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert r_grouped['g2'].wrapper.ndim == 2
assert r_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped[['g1']].wrapper.ndim == 2
assert r_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert r_grouped[['g1', 'g2']].wrapper.ndim == 2
assert r_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_filtering(self):
filtered_records = vbt.Records(wrapper, records_arr[[0, -1]])
record_arrays_close(
filtered_records.values,
np.array([(0, 0, 0, 10., 21.), (8, 2, 2, 10., 21.)], dtype=example_dt)
)
# a
record_arrays_close(
filtered_records['a'].values,
np.array([(0, 0, 0, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['a'].map_field('some_field1').id_arr,
np.array([0])
)
assert filtered_records['a'].map_field('some_field1').min() == 10.
assert filtered_records['a'].count() == 1.
# b
record_arrays_close(
filtered_records['b'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['b'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['b'].map_field('some_field1').min())
assert filtered_records['b'].count() == 0.
# c
record_arrays_close(
filtered_records['c'].values,
np.array([(8, 0, 2, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['c'].map_field('some_field1').id_arr,
np.array([8])
)
assert filtered_records['c'].map_field('some_field1').min() == 10.
assert filtered_records['c'].count() == 1.
# d
record_arrays_close(
filtered_records['d'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['d'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['d'].map_field('some_field1').min())
assert filtered_records['d'].count() == 0.
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count'
], dtype='object')
pd.testing.assert_series_equal(
records.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 2.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
records.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 3
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
records.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 6
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c')
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records.stats(column='g2', group_by=group_by)
)
stats_df = records.stats(agg_func=None)
assert stats_df.shape == (4, 4)
pd.testing.assert_index_equal(stats_df.index, records.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# ranges.py ############# #
ts = pd.DataFrame({
'a': [1, -1, 3, -1, 5, -1],
'b': [-1, -1, -1, 4, 5, 6],
'c': [1, 2, 3, -1, -1, -1],
'd': [-1, -1, -1, -1, -1, -1]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
ranges = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days'))
ranges_grouped = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestRanges:
def test_mapped_fields(self):
for name in range_dt.names:
np.testing.assert_array_equal(
getattr(ranges, name).values,
ranges.values[name]
)
def test_from_ts(self):
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 1, 1), (1, 0, 2, 3, 1), (2, 0, 4, 5, 1), (3, 1, 3, 5, 0), (4, 2, 0, 3, 1)
], dtype=range_dt)
)
assert ranges.wrapper.freq == day_dt
pd.testing.assert_index_equal(
ranges_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = ranges.records_readable
np.testing.assert_array_equal(
records_readable['Range Id'].values,
np.array([
0, 1, 2, 3, 4
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-01T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Closed', 'Closed', 'Closed', 'Open', 'Closed'
])
)
def test_to_mask(self):
pd.testing.assert_series_equal(
ranges['a'].to_mask(),
ts['a'] != -1
)
pd.testing.assert_frame_equal(
ranges.to_mask(),
ts != -1
)
pd.testing.assert_frame_equal(
ranges_grouped.to_mask(),
pd.DataFrame(
[
[True, True],
[False, True],
[True, True],
[True, False],
[True, False],
[True, False]
],
index=ts.index,
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_duration(self):
np.testing.assert_array_equal(
ranges['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_equal(
ranges.duration.values,
np.array([1, 1, 1, 3, 3])
)
def test_avg_duration(self):
assert ranges['a'].avg_duration() == | pd.Timedelta('1 days 00:00:00') | pandas.Timedelta |
# coding=utf-8
import collections
import pandas as pd
import tensorflow as tf
import _pickle as pickle
from absl import logging
from transformers import BertTokenizer
LABELS = []
class InputExample(object):
def __init__(self, text=None, labels=None):
# List of tokens
self.text = text
# List of labels
self.labels = labels
class InputFeatures(object):
def __init__(self, input_ids, input_mask, segment_ids, label_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
def _load_dataset(name):
dataset = {"text": [], "labels": []}
logging.info(name + ": " + str(tf.io.gfile.exists(name)))
with tf.io.gfile.GFile(name) as f:
words = []
labels = []
for line in f:
contents = line.strip()
tokens = contents.split(' ')
if contents.startswith("-DOCSTART-"):
continue
if len(tokens) == 2 or len(tokens) == 4:
words.append(tokens[0])
labels.append(tokens[-1])
else:
if len(contents) == 0 and len(words) > 0:
for l in labels:
if l not in LABELS:
LABELS.append(l)
dataset["text"].append(words)
dataset["labels"].append(labels)
words = []
labels = []
return | pd.DataFrame.from_dict(dataset) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""
This module is EXPERIMENTAL, that means that tests are missing.
The reason is that the coastdat2 dataset is deprecated and will be replaced by
the OpenFred dataset from Helmholtz-Zentrum Geesthacht. It should work though.
This module is designed for the use with the coastdat2 weather data set
of the Helmholtz-Zentrum Geesthacht.
A description of the coastdat2 data set can be found here:
https://www.earth-syst-sci-data.net/6/147/2014/
SPDX-FileCopyrightText: 2016-2019 <NAME> <<EMAIL>>
SPDX-License-Identifier: MIT
"""
__copyright__ = "<NAME> <<EMAIL>>"
__license__ = "MIT"
# Python libraries
import os
import datetime
import logging
from collections import namedtuple
import calendar
# External libraries
import requests
import pandas as pd
import pvlib
from shapely.geometry import Point
from windpowerlib.wind_turbine import WindTurbine
# Internal modules
from reegis import tools
from reegis import feedin
from reegis import config as cfg
from reegis import powerplants as powerplants
from reegis import geometries
from reegis import bmwi
def download_coastdat_data(filename=None, year=None, url=None,
test_only=False, overwrite=True):
"""
Download coastdat data set from internet source.
Parameters
----------
filename : str
Full path with the filename, where the downloaded file will be stored.
year : int or None
Year of the weather data set. If a url is passed this value will be
ignored because it is used to create the default url.
url : str or None
Own url can be used if the default url does not work an one found an
alternative valid url.
test_only : bool
If True the the url is tested but the file will not be downloaded
(default: False).
overwrite : bool
If True the file will be downloaded even if it already exist.
(default: True)
Returns
-------
str or None : If the url is valid the filename is returned otherwise None.
Examples
--------
>>> download_coastdat_data(year=2014, test_only=True)
'coastDat2_de_2014.h5'
>>> print(download_coastdat_data(url='https://osf.io/url', test_only=True))
None
>>> download_coastdat_data(filename='w14.hd5', year=2014) # doctest: +SKIP
"""
if url is None:
url_ids = cfg.get_dict("coastdat_url_id")
url_id = url_ids.get(str(year), None)
if url_id is not None:
url = cfg.get("coastdat", "basic_url").format(url_id=url_id)
if url is not None and not test_only:
response = requests.get(url, stream=True)
if response.status_code == 200:
msg = "Downloading the coastdat2 file of {0} from {1} ..."
logging.info(msg.format(year, url))
if filename is None:
headers = response.headers["Content-Disposition"]
filename = (
headers.split("; ")[1].split("=")[1].replace('"', "")
)
tools.download_file(filename, url, overwrite=overwrite)
return filename
else:
raise ValueError("URL not valid: {0}".format(url))
elif url is not None and test_only:
response = requests.get(url, stream=True)
if response.status_code == 200:
headers = response.headers["Content-Disposition"]
filename = headers.split("; ")[1].split("=")[1].replace('"', "")
else:
filename = None
return filename
else:
raise ValueError("No URL found for {0}".format(year))
def fetch_id_by_coordinates(latitude, longitude):
"""
Get nearest weather data set to a given location.
Parameters
----------
latitude : float
longitude : float
Returns
-------
int : coastdat id
Examples
--------
>>> fetch_id_by_coordinates(53.655119, 11.181475)
1132101
"""
coastdat_polygons = geometries.load(
cfg.get("paths", "geometry"),
cfg.get("coastdat", "coastdatgrid_polygon"),
)
location = Point(longitude, latitude)
cid = coastdat_polygons[coastdat_polygons.contains(location)].index
if len(cid) == 0:
msg = "No id found for latitude {0} and longitude {1}."
logging.warning(msg.format(latitude, longitude))
return None
elif len(cid) == 1:
return cid[0]
def fetch_data_coordinates_by_id(coastdat_id):
"""
Returns the coordinates of the weather data set.
Parameters
----------
coastdat_id : int or str
ID of the coastdat weather data set
Returns
-------
namedtuple : Fields are latitude and longitude
Examples
--------
>>> location=fetch_data_coordinates_by_id(1132101)
>>> round(location.latitude, 3)
53.692
>>> round(location.longitude, 3)
11.351
"""
coord = namedtuple("weather_location", "latitude, longitude")
coastdat_polygons = geometries.load(
cfg.get("paths", "geometry"),
cfg.get("coastdat", "coastdatgrid_polygon"),
)
c = coastdat_polygons.loc[int(coastdat_id)].geometry.centroid
return coord(latitude=c.y, longitude=c.x)
def fetch_coastdat_weather(year, coastdat_id):
"""
Fetch weather one coastdat weather data set.
Parameters
----------
year : int
Year of the weather data set
coastdat_id : numeric
ID of the coastdat data set.
Returns
-------
pd.DataFrame : Weather data set.
Examples
--------
>>> coastdat_id=fetch_id_by_coordinates(53.655119, 11.181475)
>>> fetch_coastdat_weather(2014, coastdat_id)['v_wind'].mean().round(2)
4.39
"""
weather_file_name = os.path.join(
cfg.get("paths", "coastdat"),
cfg.get("coastdat", "file_pattern").format(year=year),
)
if not os.path.isfile(weather_file_name):
download_coastdat_data(filename=weather_file_name, year=year)
key = "/A{0}".format(int(coastdat_id))
return pd.DataFrame(pd.read_hdf(weather_file_name, key))
def adapt_coastdat_weather_to_pvlib(weather, loc):
"""
Adapt the coastdat weather data sets to the needs of the pvlib.
Parameters
----------
weather : pandas.DataFrame
Coastdat2 weather data set.
loc : pvlib.location.Location
The coordinates of the weather data point.
Returns
-------
pandas.DataFrame : Adapted weather data set.
Examples
--------
>>> cd_id=1132101
>>> cd_weather=fetch_coastdat_weather(2014, cd_id)
>>> c=fetch_data_coordinates_by_id(cd_id)
>>> location=pvlib.location.Location(**getattr(c, '_asdict')())
>>> pv_weather=adapt_coastdat_weather_to_pvlib(cd_weather, location)
>>> 'ghi' in cd_weather.columns
False
>>> 'ghi' in pv_weather.columns
True
"""
w = pd.DataFrame(weather.copy())
w["temp_air"] = w.temp_air - 273.15
w["ghi"] = w.dirhi + w.dhi
clearskydni = loc.get_clearsky(w.index).dni
w["dni"] = pvlib.irradiance.dni(
w["ghi"],
w["dhi"],
pvlib.solarposition.get_solarposition(
w.index, loc.latitude, loc.longitude
).zenith,
clearsky_dni=clearskydni,
)
return w
def adapt_coastdat_weather_to_windpowerlib(weather, data_height):
"""
Adapt the coastdat weather data sets to the needs of the pvlib.
Parameters
----------
weather : pandas.DataFrame
Coastdat2 weather data set.
data_height : dict
The data height for each weather data column.
Returns
-------
pandas.DataFrame : Adapted weather data set.
Examples
--------
>>> cd_id=1132101
>>> cd_weather=fetch_coastdat_weather(2014, cd_id)
>>> data_height=cfg.get_dict('coastdat_data_height')
>>> wind_weather=adapt_coastdat_weather_to_windpowerlib(
... cd_weather, data_height)
>>> cd_weather.columns.nlevels
1
>>> wind_weather.columns.nlevels
2
"""
weather = pd.DataFrame(weather.copy())
cols = {
"v_wind": "wind_speed",
"z0": "roughness_length",
"temp_air": "temperature",
}
weather.rename(columns=cols, inplace=True)
dh = [(key, data_height[key]) for key in weather.columns]
weather.columns = pd.MultiIndex.from_tuples(dh)
return weather
def normalised_feedin_for_each_data_set(
year, wind=True, solar=True, overwrite=False
):
"""
Loop over all weather data sets (regions) and calculate a normalised time
series for each data set with the given parameters of the power plants.
This file could be more elegant and shorter but it will be rewritten soon
with the new feedinlib features.
year : int
The year of the weather data set to use.
wind : boolean
Set to True if you want to create wind feed-in time series.
solar : boolean
Set to True if you want to create solar feed-in time series.
Returns
-------
"""
# Get coordinates of the coastdat data points.
data_points = pd.read_csv(
os.path.join(
cfg.get("paths", "geometry"),
cfg.get("coastdat", "coastdatgrid_centroid"),
),
index_col="gid",
)
pv_sets = None
wind_sets = None
# Open coastdat-weather data hdf5 file for the given year or try to
# download it if the file is not found.
weather_file_name = os.path.join(
cfg.get("paths", "coastdat"),
cfg.get("coastdat", "file_pattern").format(year=year),
)
if not os.path.isfile(weather_file_name):
download_coastdat_data(year=year, filename=weather_file_name)
weather = pd.HDFStore(weather_file_name, mode="r")
# Fetch coastdat data heights from ini file.
data_height = cfg.get_dict("coastdat_data_height")
# Create basic file and path pattern for the resulting files
coastdat_path = os.path.join(cfg.get("paths_pattern", "coastdat"))
feedin_file = os.path.join(
coastdat_path, cfg.get("feedin", "file_pattern")
)
# Fetch coastdat region-keys from weather file.
key_file_path = coastdat_path.format(year="", type="")[:-2]
key_file = os.path.join(key_file_path, "coastdat_keys.csv")
if not os.path.isfile(key_file):
coastdat_keys = weather.keys()
if not os.path.isdir(key_file_path):
os.makedirs(key_file_path)
| pd.Series(coastdat_keys) | pandas.Series |
import re
import os
import pandas as pd
import numpy as np
def readRuns(parallel_runs_harddisk, time_step, NO_ITERATIONS):
"""
Read eplusmtr files in a folder and combine them in one dataframe, formatted based on time_step.
:param parallel_runs_harddisk: location of eplusmtr.csv files.
:param time_step: resample to 'month' or 'year'.
:param NO_ITERATIONS: number of files to include in new dataframe.
:return:
"""
def strip_columns(df):
"""
Rename columns of loaded eplusmtr file
- get rid off name "Thermal Zone"
- delete any unwanted symbols
- strip blank spaces
- split by ':'
- delete word "Interior"
"""
cols = df.columns.tolist()
for i, v in enumerate(cols): # rename columns
if 'THERMAL ZONE' in cols[i]:
rest = cols[i].split(':', 1)[1]
rest = ''.join(cols[i].partition('ZONE:')[-1:])
rest = re.sub("([(\[]).*?([)\]])", "\g<1>\g<2>", rest) # remove text within symbols
rest = re.sub("[(\[].*?[)\]]", "", rest) # remove symbols
rest = rest.strip() # remove leading and trailing spaces
elif ':' not in cols[i]:
rest = cols[i]
rest = re.sub("([(\[]).*?([)\]])", "\g<1>\g<2>", rest) # remove text within symbols
rest = re.sub("[(\[].*?[)\]]", "", rest) # remove symbols
rest = rest.strip() # remove leading and trailing spaces
else:
rest = cols[i].split(':', 1)[0] # removes all characters after ':'
rest = rest.replace('Interior', '')
df.rename(columns={cols[i]: rest}, inplace=True)
return df
def read_eplusout(df):
"""
Read an eplusmtr.csv file and manipulate index and content
- delete design days
- set index
- remove last row
- displace index to align with measured data time range.
"""
if df.shape[0] < 10000: # hourly
df = df[48:] # exclude 2 design days
rng = pd.date_range(start='09/01/2016 01:00:00', end='09/01/2017', freq='H')
df = df.set_index(rng)
df = df[:-1] # remove the last row cause it is in the next year.
else:
df = df[48 * 4:]
rng = pd.date_range(start='09/01/2016 00:15:00', end='09/01/2017', freq='15Min')
df = df.set_index(rng)
df.index = df.index - | pd.DateOffset(hours=-.75) | pandas.DateOffset |
'''Check the datasets for simulation'''
import os
from basis.file import downloadDatasets
existing_datasets = os.path.exists("haikou-experiments/datasets")
if existing_datasets == False:
print("Downloading datasets...")
print("If failed, you can download them from https://drive.google.com/file/d/1yi3aNhB6xc1vjsWX5pq9eb5rSyDiyeRw/view?usp=sharing")
downloadDatasets()
'''import neccessary dependency'''
import random
import simpy
import time
import pandas as pd
import datetime
import csv
import numpy as np
from basis.schedule import Schedule
from basis.setting import MAX_SEARCH_LAYERS,MAX_DETOUR_LENGTH
from basis.setting import WAITING_TIME,PERIODS_MINUTES,getSpeed,SPEED
from basis.time_periods import ALL_PERIODS,TIME_DICS,PERIODS
from basis.edges import ALL_EDGES,ALL_EDGES_DIC
from basis.vertexes import ALL_VERTEXES
from basis.neighbor import ALL_NEIGHBOR_EDGES
from basis.assistant import getID
import progressbar
ALL_PASSENGERS = {}
EDGES_TO_CUSTOMERS = [[] for _ in range(len(ALL_EDGES))] # The customers existing in each edge
RANDOM_SEED = 30
class CarpoolSimulation(object):
def __init__(self, env, max_time):
self.begin_time = time.strftime("%m-%d %H:%M:%S", time.localtime())
self.overall_success = 0
self.schedule_by_history = True
self.env = env
self.max_time = max_time
self.all_OD = False
self.COMBINED_OD = True
self.tendency = 1 # Proportion of passengers who choose carpooling
self.possibleOD()
self.env.process(self.generateByHistory())
def possibleOD(self):
'''Load all origin-destination'''
ODs_df = | pd.read_csv("haikou-experiments/network/ODs_combined.csv") | pandas.read_csv |
import tensorflow as tf
import numpy as np
import logging
import matplotlib.pyplot as plt
import json
import os
import rnn
from reactions import QuadraticEval, ConstraintQuadraticEval, RealReaction
from logger import get_handlers
from collections import namedtuple
from sklearn.metrics.pairwise import euclidean_distances
import pandas as pd
NUM_DIMENSIONS = 3
logging.basicConfig(level=logging.INFO, handlers=get_handlers())
logger = logging.getLogger()
state_space = | pd.read_csv('EtNH3Istateset.csv') | pandas.read_csv |
import copy
import logging
import pickle
import re
from datetime import datetime
from unittest.mock import patch
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pytest
from woodwork.logical_types import (
URL,
Boolean,
Categorical,
CountryCode,
Datetime,
Double,
EmailAddress,
Integer,
LatLong,
NaturalLanguage,
Ordinal,
PostalCode,
SubRegionCode
)
import featuretools as ft
from featuretools.entityset import EntitySet
from featuretools.entityset.entityset import LTI_COLUMN_NAME, WW_SCHEMA_KEY
from featuretools.tests.testing_utils import get_df_tags, to_pandas
from featuretools.utils.gen_utils import Library, import_or_none
from featuretools.utils.spark_utils import pd_to_spark_clean
ps = import_or_none('pyspark.pandas')
def test_normalize_time_index_as_additional_column(es):
error_text = "Not moving signup_date as it is the base time index column. Perhaps, move the column to the copy_columns."
with pytest.raises(ValueError, match=error_text):
assert "signup_date" in es["customers"].columns
es.normalize_dataframe(base_dataframe_name='customers',
new_dataframe_name='cancellations',
index='cancel_reason',
make_time_index='signup_date',
additional_columns=['signup_date'],
copy_columns=[])
def test_normalize_time_index_as_copy_column(es):
assert "signup_date" in es["customers"].columns
es.normalize_dataframe(base_dataframe_name='customers',
new_dataframe_name='cancellations',
index='cancel_reason',
make_time_index='signup_date',
additional_columns=[],
copy_columns=['signup_date'])
assert 'signup_date' in es['customers'].columns
assert es['customers'].ww.time_index == 'signup_date'
assert 'signup_date' in es['cancellations'].columns
assert es['cancellations'].ww.time_index == 'signup_date'
def test_normalize_time_index_as_copy_column_new_time_index(es):
assert "signup_date" in es["customers"].columns
es.normalize_dataframe(base_dataframe_name='customers',
new_dataframe_name='cancellations',
index='cancel_reason',
make_time_index=True,
additional_columns=[],
copy_columns=['signup_date'])
assert 'signup_date' in es['customers'].columns
assert es['customers'].ww.time_index == 'signup_date'
assert 'first_customers_time' in es['cancellations'].columns
assert 'signup_date' not in es['cancellations'].columns
assert es['cancellations'].ww.time_index == 'first_customers_time'
def test_normalize_time_index_as_copy_column_no_time_index(es):
assert "signup_date" in es["customers"].columns
es.normalize_dataframe(base_dataframe_name='customers',
new_dataframe_name='cancellations',
index='cancel_reason',
make_time_index=False,
additional_columns=[],
copy_columns=['signup_date'])
assert 'signup_date' in es['customers'].columns
assert es['customers'].ww.time_index == 'signup_date'
assert 'signup_date' in es['cancellations'].columns
assert es['cancellations'].ww.time_index is None
def test_cannot_re_add_relationships_that_already_exists(es):
warn_text = "Not adding duplicate relationship: " + str(es.relationships[0])
before_len = len(es.relationships)
rel = es.relationships[0]
with pytest.warns(UserWarning, match=warn_text):
es.add_relationship(relationship=rel)
with pytest.warns(UserWarning, match=warn_text):
es.add_relationship(rel._parent_dataframe_name, rel._parent_column_name,
rel._child_dataframe_name, rel._child_column_name)
after_len = len(es.relationships)
assert before_len == after_len
def test_add_relationships_convert_type(es):
for r in es.relationships:
parent_df = es[r.parent_dataframe.ww.name]
child_df = es[r.child_dataframe.ww.name]
assert parent_df.ww.index == r._parent_column_name
assert 'foreign_key' in r.child_column.ww.semantic_tags
assert str(parent_df[r._parent_column_name].dtype) == str(child_df[r._child_column_name].dtype)
def test_add_relationship_diff_param_logical_types(es):
ordinal_1 = Ordinal(order=[0, 1, 2, 3, 4, 5, 6])
ordinal_2 = Ordinal(order=[0, 1, 2, 3, 4, 5])
es['sessions'].ww.set_types(logical_types={'id': ordinal_1})
log_2_df = es['log'].copy()
log_logical_types = {
'id': Integer,
'session_id': ordinal_2,
'product_id': Categorical(),
'datetime': Datetime,
'value': Double,
'value_2': Double,
'latlong': LatLong,
'latlong2': LatLong,
'zipcode': PostalCode,
'countrycode': CountryCode,
'subregioncode': SubRegionCode,
'value_many_nans': Double,
'priority_level': Ordinal(order=[0, 1, 2]),
'purchased': Boolean,
'comments': NaturalLanguage,
'url': URL,
'email_address': EmailAddress
}
log_semantic_tags = {
'session_id': 'foreign_key',
'product_id': 'foreign_key'
}
assert set(log_logical_types) == set(log_2_df.columns)
es.add_dataframe(dataframe_name='log2',
dataframe=log_2_df,
index='id',
logical_types=log_logical_types,
semantic_tags=log_semantic_tags,
time_index='datetime')
assert 'log2' in es.dataframe_dict
assert es['log2'].ww.schema is not None
assert isinstance(es['log2'].ww.logical_types['session_id'], Ordinal)
assert isinstance(es['sessions'].ww.logical_types['id'], Ordinal)
assert es['sessions'].ww.logical_types['id'] != es['log2'].ww.logical_types['session_id']
warning_text = 'Logical type Ordinal for child column session_id does not match parent '\
'column id logical type Ordinal. There is a conflict between the parameters. '\
'Changing child logical type to match parent.'
with pytest.warns(UserWarning, match=warning_text):
es.add_relationship(u'sessions', 'id', 'log2', 'session_id')
assert isinstance(es['log2'].ww.logical_types['product_id'], Categorical)
assert isinstance(es['products'].ww.logical_types['id'], Categorical)
def test_add_relationship_different_logical_types_same_dtype(es):
log_2_df = es['log'].copy()
log_logical_types = {
'id': Integer,
'session_id': Integer,
'product_id': CountryCode,
'datetime': Datetime,
'value': Double,
'value_2': Double,
'latlong': LatLong,
'latlong2': LatLong,
'zipcode': PostalCode,
'countrycode': CountryCode,
'subregioncode': SubRegionCode,
'value_many_nans': Double,
'priority_level': Ordinal(order=[0, 1, 2]),
'purchased': Boolean,
'comments': NaturalLanguage,
'url': URL,
'email_address': EmailAddress,
}
log_semantic_tags = {
'session_id': 'foreign_key',
'product_id': 'foreign_key'
}
assert set(log_logical_types) == set(log_2_df.columns)
es.add_dataframe(dataframe_name='log2',
dataframe=log_2_df,
index='id',
logical_types=log_logical_types,
semantic_tags=log_semantic_tags,
time_index='datetime')
assert 'log2' in es.dataframe_dict
assert es['log2'].ww.schema is not None
assert isinstance(es['log2'].ww.logical_types['product_id'], CountryCode)
assert isinstance(es['products'].ww.logical_types['id'], Categorical)
warning_text = 'Logical type CountryCode for child column product_id does not match parent column id logical type Categorical. Changing child logical type to match parent.'
with pytest.warns(UserWarning, match=warning_text):
es.add_relationship(u'products', 'id', 'log2', 'product_id')
assert isinstance(es['log2'].ww.logical_types['product_id'], Categorical)
assert isinstance(es['products'].ww.logical_types['id'], Categorical)
assert 'foreign_key' in es['log2'].ww.semantic_tags['product_id']
def test_add_relationship_different_compatible_dtypes(es):
log_2_df = es['log'].copy()
log_logical_types = {
'id': Integer,
'session_id': Datetime,
'product_id': Categorical,
'datetime': Datetime,
'value': Double,
'value_2': Double,
'latlong': LatLong,
'latlong2': LatLong,
'zipcode': PostalCode,
'countrycode': CountryCode,
'subregioncode': SubRegionCode,
'value_many_nans': Double,
'priority_level': Ordinal(order=[0, 1, 2]),
'purchased': Boolean,
'comments': NaturalLanguage,
'url': URL,
'email_address': EmailAddress,
}
log_semantic_tags = {
'session_id': 'foreign_key',
'product_id': 'foreign_key'
}
assert set(log_logical_types) == set(log_2_df.columns)
es.add_dataframe(dataframe_name='log2',
dataframe=log_2_df,
index='id',
logical_types=log_logical_types,
semantic_tags=log_semantic_tags,
time_index='datetime')
assert 'log2' in es.dataframe_dict
assert es['log2'].ww.schema is not None
assert isinstance(es['log2'].ww.logical_types['session_id'], Datetime)
assert isinstance(es['customers'].ww.logical_types['id'], Integer)
warning_text = 'Logical type Datetime for child column session_id does not match parent column id logical type Integer. Changing child logical type to match parent.'
with pytest.warns(UserWarning, match=warning_text):
es.add_relationship(u'customers', 'id', 'log2', 'session_id')
assert isinstance(es['log2'].ww.logical_types['session_id'], Integer)
assert isinstance(es['customers'].ww.logical_types['id'], Integer)
def test_add_relationship_errors_child_v_index(es):
new_df = es['log'].ww.copy()
new_df.ww._schema.name = 'log2'
es.add_dataframe(dataframe=new_df)
to_match = "Unable to add relationship because child column 'id' in 'log2' is also its index"
with pytest.raises(ValueError, match=to_match):
es.add_relationship('log', 'id', 'log2', 'id')
def test_add_relationship_empty_child_convert_dtype(es):
relationship = ft.Relationship(es, "sessions", "id", "log", "session_id")
empty_log_df = pd.DataFrame(columns=es['log'].columns)
if es.dataframe_type == Library.DASK.value:
empty_log_df = dd.from_pandas(empty_log_df, npartitions=2)
elif es.dataframe_type == Library.SPARK.value:
empty_log_df = ps.from_pandas(empty_log_df)
es.add_dataframe(empty_log_df, 'log')
assert len(es['log']) == 0
# session_id will be Unknown logical type with dtype string
assert es['log']['session_id'].dtype == 'string'
es.relationships.remove(relationship)
assert(relationship not in es.relationships)
es.add_relationship(relationship=relationship)
assert es['log']['session_id'].dtype == 'int64'
def test_add_relationship_with_relationship_object(es):
relationship = ft.Relationship(es, "sessions", "id", "log", "session_id")
es.add_relationship(relationship=relationship)
assert relationship in es.relationships
def test_add_relationships_with_relationship_object(es):
relationships = [ft.Relationship(es, "sessions", "id", "log", "session_id")]
es.add_relationships(relationships)
assert relationships[0] in es.relationships
def test_add_relationship_error(es):
relationship = ft.Relationship(es, "sessions", "id", "log", "session_id")
error_message = "Cannot specify dataframe and column name values and also supply a Relationship"
with pytest.raises(ValueError, match=error_message):
es.add_relationship(parent_dataframe_name="sessions", relationship=relationship)
def test_query_by_values_returns_rows_in_given_order():
data = pd.DataFrame({
"id": [1, 2, 3, 4, 5],
"value": ["a", "c", "b", "a", "a"],
"time": [1000, 2000, 3000, 4000, 5000]
})
es = ft.EntitySet()
es = es.add_dataframe(dataframe=data, dataframe_name="test",
index="id", time_index="time",
logical_types={"value": "Categorical"})
query = es.query_by_values('test', ['b', 'a'], column_name='value')
assert np.array_equal(query['id'], [1, 3, 4, 5])
def test_query_by_values_secondary_time_index(es):
end = np.datetime64(datetime(2011, 10, 1))
all_instances = [0, 1, 2]
result = es.query_by_values('customers', all_instances, time_last=end)
result = to_pandas(result, index='id')
for col in ["cancel_date", "cancel_reason"]:
nulls = result.loc[all_instances][col].isnull() == [False, True, True]
assert nulls.all(), "Some instance has data it shouldn't for column %s" % col
def test_query_by_id(es):
df = to_pandas(es.query_by_values('log', instance_vals=[0]))
assert df['id'].values[0] == 0
def test_query_by_single_value(es):
df = to_pandas(es.query_by_values('log', instance_vals=0))
assert df['id'].values[0] == 0
def test_query_by_df(es):
instance_df = pd.DataFrame({'id': [1, 3], 'vals': [0, 1]})
df = to_pandas(es.query_by_values('log', instance_vals=instance_df))
assert np.array_equal(df['id'], [1, 3])
def test_query_by_id_with_time(es):
df = es.query_by_values(
dataframe_name='log',
instance_vals=[0, 1, 2, 3, 4],
time_last=datetime(2011, 4, 9, 10, 30, 2 * 6))
df = to_pandas(df)
if es.dataframe_type == Library.SPARK.value:
# Spark doesn't maintain order
df = df.sort_values('id')
assert list(df['id'].values) == [0, 1, 2]
def test_query_by_column_with_time(es):
df = es.query_by_values(
dataframe_name='log',
instance_vals=[0, 1, 2], column_name='session_id',
time_last=datetime(2011, 4, 9, 10, 50, 0))
df = to_pandas(df)
true_values = [
i * 5 for i in range(5)] + [i * 1 for i in range(4)] + [0]
if es.dataframe_type == Library.SPARK.value:
# Spark doesn't maintain order
df = df.sort_values('id')
assert list(df['id'].values) == list(range(10))
assert list(df['value'].values) == true_values
def test_query_by_column_with_no_lti_and_training_window(es):
match = "Using training_window but last_time_index is not set for dataframe customers"
with pytest.warns(UserWarning, match=match):
df = es.query_by_values(
dataframe_name='customers',
instance_vals=[0, 1, 2], column_name='cohort',
time_last=datetime(2011, 4, 11),
training_window='3d')
df = to_pandas(df)
assert list(df['id'].values) == [1]
assert list(df['age'].values) == [25]
def test_query_by_column_with_lti_and_training_window(es):
es.add_last_time_indexes()
df = es.query_by_values(
dataframe_name='customers',
instance_vals=[0, 1, 2], column_name='cohort',
time_last=datetime(2011, 4, 11),
training_window='3d')
# Account for different ordering between pandas and dask/spark
df = to_pandas(df).reset_index(drop=True).sort_values('id')
assert list(df['id'].values) == [0, 1, 2]
assert list(df['age'].values) == [33, 25, 56]
def test_query_by_indexed_column(es):
df = es.query_by_values(
dataframe_name='log',
instance_vals=['taco clock'],
column_name='product_id')
# Account for different ordering between pandas and dask/spark
df = to_pandas(df).reset_index(drop=True).sort_values('id')
assert list(df['id'].values) == [15, 16]
@pytest.fixture
def pd_df():
return pd.DataFrame({'id': [0, 1, 2], 'category': ['a', 'b', 'c']})
@pytest.fixture
def dd_df(pd_df):
return dd.from_pandas(pd_df, npartitions=2)
@pytest.fixture
def spark_df(pd_df):
ps = pytest.importorskip('pyspark.pandas', reason="Spark not installed, skipping")
return ps.from_pandas(pd_df)
@pytest.fixture(params=['pd_df', 'dd_df', 'spark_df'])
def df(request):
return request.getfixturevalue(request.param)
def test_check_columns_and_dataframe(df):
# matches
logical_types = {'id': Integer,
'category': Categorical}
es = EntitySet(id='test')
es.add_dataframe(df, dataframe_name='test_dataframe', index='id',
logical_types=logical_types)
assert isinstance(es.dataframe_dict['test_dataframe'].ww.logical_types['category'], Categorical)
assert es.dataframe_dict['test_dataframe'].ww.semantic_tags['category'] == {'category'}
def test_make_index_any_location(df):
logical_types = {'id': Integer,
'category': Categorical}
es = EntitySet(id='test')
es.add_dataframe(dataframe_name='test_dataframe',
index='id1',
make_index=True,
logical_types=logical_types,
dataframe=df)
if es.dataframe_type != Library.PANDAS.value:
assert es.dataframe_dict['test_dataframe'].columns[-1] == 'id1'
else:
assert es.dataframe_dict['test_dataframe'].columns[0] == 'id1'
assert es.dataframe_dict['test_dataframe'].ww.index == 'id1'
def test_replace_dataframe_and_create_index(es):
expected_idx_col = [0, 1, 2]
df = pd.DataFrame({'ints': [3, 4, 5], 'category': ['a', 'b', 'a']})
if es.dataframe_type == Library.DASK.value:
df = dd.from_pandas(df, npartitions=2)
elif es.dataframe_type == Library.SPARK.value:
expected_idx_col = [0, 2, 1]
df = ps.from_pandas(df)
needs_idx_df = df.copy()
logical_types = {'ints': Integer,
'category': Categorical}
es.add_dataframe(dataframe=df,
dataframe_name='test_df',
index='id',
make_index=True,
logical_types=logical_types)
assert es['test_df'].ww.index == 'id'
# DataFrame that needs the index column added
assert 'id' not in needs_idx_df.columns
es.replace_dataframe('test_df', needs_idx_df)
assert es['test_df'].ww.index == 'id'
assert all(expected_idx_col == to_pandas(es['test_df']['id']))
def test_replace_dataframe_created_index_present(es):
original_idx_col = [100, 1, 2]
df = pd.DataFrame({'ints': [3, 4, 5], 'category': ['a', 'b', 'a']})
if es.dataframe_type == Library.DASK.value:
df = dd.from_pandas(df, npartitions=2)
elif es.dataframe_type == Library.SPARK.value:
original_idx_col = [100, 2, 1]
df = ps.from_pandas(df)
logical_types = {'ints': Integer,
'category': Categorical}
es.add_dataframe(dataframe=df,
dataframe_name='test_df',
index='id',
make_index=True,
logical_types=logical_types)
# DataFrame that already has the index column
has_idx_df = es['test_df'].replace({0: 100})
if es.dataframe_type == Library.PANDAS.value:
has_idx_df.set_index('id', drop=False, inplace=True)
assert 'id' in has_idx_df.columns
es.replace_dataframe('test_df', has_idx_df)
assert es['test_df'].ww.index == 'id'
assert all(original_idx_col == to_pandas(es['test_df']['id']))
def test_index_any_location(df):
logical_types = {'id': Integer,
'category': Categorical}
es = EntitySet(id='test')
es.add_dataframe(dataframe_name='test_dataframe',
index='category',
logical_types=logical_types,
dataframe=df)
assert es.dataframe_dict['test_dataframe'].columns[1] == 'category'
assert es.dataframe_dict['test_dataframe'].ww.index == 'category'
def test_extra_column_type(df):
# more columns
logical_types = {'id': Integer,
'category': Categorical,
'category2': Categorical}
error_text = re.escape("logical_types contains columns that are not present in dataframe: ['category2']")
with pytest.raises(LookupError, match=error_text):
es = EntitySet(id='test')
es.add_dataframe(dataframe_name='test_dataframe',
index='id',
logical_types=logical_types, dataframe=df)
def test_add_parent_not_index_column(es):
error_text = "Parent column 'language' is not the index of dataframe régions"
with pytest.raises(AttributeError, match=error_text):
es.add_relationship(u'régions', 'language', 'customers', u'région_id')
@pytest.fixture
def pd_df2():
return pd.DataFrame({'category': [1, 2, 3], 'category2': ['1', '2', '3']})
@pytest.fixture
def dd_df2(pd_df2):
return dd.from_pandas(pd_df2, npartitions=2)
@pytest.fixture
def spark_df2(pd_df2):
ps = pytest.importorskip('pyspark.pandas', reason="Spark not installed, skipping")
return ps.from_pandas(pd_df2)
@pytest.fixture(params=['pd_df2', 'dd_df2', 'spark_df2'])
def df2(request):
return request.getfixturevalue(request.param)
def test_none_index(df2):
es = EntitySet(id='test')
copy_df = df2.copy()
copy_df.ww.init(name='test_dataframe')
error_msg = 'Cannot add Woodwork DataFrame to EntitySet without index'
with pytest.raises(ValueError, match=error_msg):
es.add_dataframe(dataframe=copy_df)
warn_text = "Using first column as index. To change this, specify the index parameter"
with pytest.warns(UserWarning, match=warn_text):
es.add_dataframe(dataframe_name='test_dataframe',
logical_types={'category': 'Categorical'},
dataframe=df2)
assert es['test_dataframe'].ww.index == 'category'
assert es['test_dataframe'].ww.semantic_tags['category'] == {'index'}
assert isinstance(es['test_dataframe'].ww.logical_types['category'], Categorical)
@pytest.fixture
def pd_df3():
return pd.DataFrame({'category': [1, 2, 3]})
@pytest.fixture
def dd_df3(pd_df3):
return dd.from_pandas(pd_df3, npartitions=2)
@pytest.fixture
def spark_df3(pd_df3):
ps = pytest.importorskip('pyspark.pandas', reason="Spark not installed, skipping")
return ps.from_pandas(pd_df3)
@pytest.fixture(params=['pd_df3', 'dd_df3', 'spark_df3'])
def df3(request):
return request.getfixturevalue(request.param)
def test_unknown_index(df3):
warn_text = "index id not found in dataframe, creating new integer column"
es = EntitySet(id='test')
with pytest.warns(UserWarning, match=warn_text):
es.add_dataframe(dataframe_name='test_dataframe',
dataframe=df3,
index='id',
logical_types={'category': 'Categorical'})
assert es['test_dataframe'].ww.index == 'id'
assert list(to_pandas(es['test_dataframe']['id'], sort_index=True)) == list(range(3))
def test_doesnt_remake_index(df):
logical_types = {'id': 'Integer', 'category': 'Categorical'}
error_text = "Cannot make index: column with name id already present"
with pytest.raises(RuntimeError, match=error_text):
es = EntitySet(id='test')
es.add_dataframe(dataframe_name='test_dataframe',
index='id',
make_index=True,
dataframe=df,
logical_types=logical_types)
def test_bad_time_index_column(df3):
logical_types = {'category': 'Categorical'}
error_text = "Specified time index column `time` not found in dataframe"
with pytest.raises(LookupError, match=error_text):
es = EntitySet(id='test')
es.add_dataframe(dataframe_name='test_dataframe',
dataframe=df3,
time_index='time',
logical_types=logical_types)
@pytest.fixture
def pd_df4():
df = pd.DataFrame({'id': [0, 1, 2],
'category': ['a', 'b', 'a'],
'category_int': [1, 2, 3],
'ints': ['1', '2', '3'],
'floats': ['1', '2', '3.0']})
df["category_int"] = df["category_int"].astype("category")
return df
@pytest.fixture
def dd_df4(pd_df4):
return dd.from_pandas(pd_df4, npartitions=2)
@pytest.fixture
def spark_df4(pd_df4):
ps = pytest.importorskip('pyspark.pandas', reason="Spark not installed, skipping")
return ps.from_pandas(pd_to_spark_clean(pd_df4))
@pytest.fixture(params=['pd_df4', 'dd_df4', 'spark_df4'])
def df4(request):
return request.getfixturevalue(request.param)
def test_converts_dtype_on_init(df4):
logical_types = {'id': Integer,
'ints': Integer,
'floats': Double}
if not isinstance(df4, pd.DataFrame):
logical_types['category'] = Categorical
logical_types['category_int'] = Categorical
es = EntitySet(id='test')
df4.ww.init(name='test_dataframe', index='id', logical_types=logical_types)
es.add_dataframe(dataframe=df4)
df = es['test_dataframe']
assert df['ints'].dtype.name == 'int64'
assert df['floats'].dtype.name == 'float64'
# this is infer from pandas dtype
df = es["test_dataframe"]
assert isinstance(df.ww.logical_types['category_int'], Categorical)
def test_converts_dtype_after_init(df4):
category_dtype = 'category'
if ps and isinstance(df4, ps.DataFrame):
category_dtype = 'string'
df4["category"] = df4["category"].astype(category_dtype)
if not isinstance(df4, pd.DataFrame):
logical_types = {'id': Integer,
'category': Categorical,
'category_int': Categorical,
'ints': Integer,
'floats': Double}
else:
logical_types = None
es = EntitySet(id='test')
es.add_dataframe(dataframe_name='test_dataframe', index='id',
dataframe=df4, logical_types=logical_types)
df = es['test_dataframe']
df.ww.set_types(logical_types={'ints': 'Integer'})
assert isinstance(df.ww.logical_types['ints'], Integer)
assert df['ints'].dtype == 'int64'
df.ww.set_types(logical_types={'ints': 'Categorical'})
assert isinstance(df.ww.logical_types['ints'], Categorical)
assert df['ints'].dtype == category_dtype
df.ww.set_types(logical_types={'ints': Ordinal(order=[1, 2, 3])})
assert df.ww.logical_types['ints'] == Ordinal(order=[1, 2, 3])
assert df['ints'].dtype == category_dtype
df.ww.set_types(logical_types={'ints': 'NaturalLanguage'})
assert isinstance(df.ww.logical_types['ints'], NaturalLanguage)
assert df['ints'].dtype == 'string'
def test_warns_no_typing(df4):
es = EntitySet(id='test')
if not isinstance(df4, pd.DataFrame):
msg = 'Performing type inference on Dask or Spark DataFrames may be computationally intensive. Specify logical types for each column to speed up EntitySet initialization.'
with pytest.warns(UserWarning, match=msg):
es.add_dataframe(dataframe_name='test_dataframe', index='id',
dataframe=df4)
else:
es.add_dataframe(dataframe_name='test_dataframe', index='id',
dataframe=df4)
assert 'test_dataframe' in es.dataframe_dict
@pytest.fixture
def pd_datetime1():
times = pd.date_range('1/1/2011', periods=3, freq='H')
time_strs = times.strftime('%Y-%m-%d')
return pd.DataFrame({'id': [0, 1, 2], 'time': time_strs})
@pytest.fixture
def dd_datetime1(pd_datetime1):
return dd.from_pandas(pd_datetime1, npartitions=2)
@pytest.fixture
def spark_datetime1(pd_datetime1):
ps = pytest.importorskip('pyspark.pandas', reason="Spark not installed, skipping")
return ps.from_pandas(pd_datetime1)
@pytest.fixture(params=['pd_datetime1', 'dd_datetime1', 'spark_datetime1'])
def datetime1(request):
return request.getfixturevalue(request.param)
def test_converts_datetime(datetime1):
# string converts to datetime correctly
# This test fails without defining logical types.
# Entityset infers time column should be numeric type
logical_types = {'id': Integer,
'time': Datetime}
es = EntitySet(id='test')
es.add_dataframe(
dataframe_name='test_dataframe',
index='id',
time_index="time",
logical_types=logical_types,
dataframe=datetime1)
pd_col = to_pandas(es['test_dataframe']['time'])
assert isinstance(es['test_dataframe'].ww.logical_types['time'], Datetime)
assert type(pd_col[0]) == pd.Timestamp
@pytest.fixture
def pd_datetime2():
datetime_format = "%d-%m-%Y"
actual = pd.Timestamp('Jan 2, 2011')
time_strs = [actual.strftime(datetime_format)] * 3
return pd.DataFrame(
{'id': [0, 1, 2], 'time_format': time_strs, 'time_no_format': time_strs})
@pytest.fixture
def dd_datetime2(pd_datetime2):
return dd.from_pandas(pd_datetime2, npartitions=2)
@pytest.fixture
def spark_datetime2(pd_datetime2):
ps = pytest.importorskip('pyspark.pandas', reason="Spark not installed, skipping")
return ps.from_pandas(pd_datetime2)
@pytest.fixture(params=['pd_datetime2', 'dd_datetime2', 'spark_datetime2'])
def datetime2(request):
return request.getfixturevalue(request.param)
def test_handles_datetime_format(datetime2):
# check if we load according to the format string
# pass in an ambiguous date
datetime_format = "%d-%m-%Y"
actual = pd.Timestamp('Jan 2, 2011')
logical_types = {'id': Integer,
'time_format': (Datetime(datetime_format=datetime_format)),
'time_no_format': Datetime}
es = EntitySet(id='test')
es.add_dataframe(
dataframe_name='test_dataframe',
index='id',
logical_types=logical_types,
dataframe=datetime2)
col_format = to_pandas(es['test_dataframe']['time_format'])
col_no_format = to_pandas(es['test_dataframe']['time_no_format'])
# without formatting pandas gets it wrong
assert (col_no_format != actual).all()
# with formatting we correctly get jan2
assert (col_format == actual).all()
def test_handles_datetime_mismatch():
# can't convert arbitrary strings
df = pd.DataFrame({'id': [0, 1, 2], 'time': ['a', 'b', 'tomorrow']})
logical_types = {'id': Integer,
'time': Datetime}
error_text = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_text):
es = EntitySet(id='test')
es.add_dataframe(df, dataframe_name='test_dataframe', index='id',
time_index='time', logical_types=logical_types)
def test_dataframe_init(es):
df = pd.DataFrame({'id': ['0', '1', '2'],
'time': [datetime(2011, 4, 9, 10, 31, 3 * i)
for i in range(3)],
'category': ['a', 'b', 'a'],
'number': [4, 5, 6]})
if es.dataframe_type == Library.DASK.value:
df = dd.from_pandas(df, npartitions=2)
elif es.dataframe_type == Library.SPARK.value:
df = ps.from_pandas(df)
logical_types = {'time': Datetime}
if not isinstance(df, pd.DataFrame):
extra_logical_types = {
'id': Categorical,
'category': Categorical,
'number': Integer
}
logical_types.update(extra_logical_types)
es.add_dataframe(df.copy(), dataframe_name='test_dataframe', index='id',
time_index='time', logical_types=logical_types)
if isinstance(df, dd.DataFrame):
df_shape = (df.shape[0].compute(), df.shape[1])
else:
df_shape = df.shape
if es.dataframe_type == Library.DASK.value:
es_df_shape = (es['test_dataframe'].shape[0].compute(), es['test_dataframe'].shape[1])
else:
es_df_shape = es['test_dataframe'].shape
assert es_df_shape == df_shape
assert es['test_dataframe'].ww.index == 'id'
assert es['test_dataframe'].ww.time_index == 'time'
assert set([v for v in es['test_dataframe'].ww.columns]) == set(df.columns)
assert es['test_dataframe']['time'].dtype == df['time'].dtype
if es.dataframe_type == Library.SPARK.value:
assert set(es['test_dataframe']['id'].to_list()) == set(df['id'].to_list())
else:
assert set(es['test_dataframe']['id']) == set(df['id'])
@pytest.fixture
def pd_bad_df():
return pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 3: ['a', 'b', 'c']})
@pytest.fixture
def dd_bad_df(pd_bad_df):
return dd.from_pandas(pd_bad_df, npartitions=2)
@pytest.fixture(params=['pd_bad_df', 'dd_bad_df'])
def bad_df(request):
return request.getfixturevalue(request.param)
# Skip for Spark, automatically converts non-str column names to str
def test_nonstr_column_names(bad_df):
if isinstance(bad_df, dd.DataFrame):
pytest.xfail('Dask DataFrames cannot handle integer column names')
es = ft.EntitySet(id='Failure')
error_text = r"All column names must be strings \(Columns \[3\] are not strings\)"
with pytest.raises(ValueError, match=error_text):
es.add_dataframe(dataframe_name='str_cols',
dataframe=bad_df,
index='a')
bad_df.ww.init()
with pytest.raises(ValueError, match=error_text):
es.add_dataframe(dataframe_name='str_cols',
dataframe=bad_df)
def test_sort_time_id():
transactions_df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"transaction_time": pd.date_range(start="10:00", periods=6, freq="10s")[::-1]})
es = EntitySet("test", dataframes={"t": (transactions_df.copy(), "id", "transaction_time")})
assert es['t'] is not transactions_df
times = list(es["t"].transaction_time)
assert times == sorted(list(transactions_df.transaction_time))
def test_already_sorted_parameter():
transactions_df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"transaction_time": [datetime(2014, 4, 6),
datetime(
2012, 4, 8),
datetime(
2012, 4, 8),
datetime(
2013, 4, 8),
datetime(
2015, 4, 8),
datetime(2016, 4, 9)]})
es = EntitySet(id='test')
es.add_dataframe(transactions_df.copy(),
dataframe_name='t',
index='id',
time_index="transaction_time",
already_sorted=True)
assert es['t'] is not transactions_df
times = list(es["t"].transaction_time)
assert times == list(transactions_df.transaction_time)
def test_concat_not_inplace(es):
first_es = copy.deepcopy(es)
for df in first_es.dataframes:
new_df = df.loc[[], :]
first_es.replace_dataframe(df.ww.name, new_df)
second_es = copy.deepcopy(es)
# set the data description
first_es.metadata
new_es = first_es.concat(second_es)
assert new_es == es
assert new_es._data_description is None
assert first_es._data_description is not None
def test_concat_inplace(es):
first_es = copy.deepcopy(es)
second_es = copy.deepcopy(es)
for df in first_es.dataframes:
new_df = df.loc[[], :]
first_es.replace_dataframe(df.ww.name, new_df)
# set the data description
es.metadata
es.concat(first_es, inplace=True)
assert second_es == es
assert es._data_description is None
def test_concat_with_lti(es):
first_es = copy.deepcopy(es)
for df in first_es.dataframes:
if first_es.dataframe_type == Library.SPARK.value:
# Spark cannot compute last time indexes on an empty Dataframe
new_df = df.head(1)
else:
new_df = df.loc[[], :]
first_es.replace_dataframe(df.ww.name, new_df)
second_es = copy.deepcopy(es)
first_es.add_last_time_indexes()
second_es.add_last_time_indexes()
es.add_last_time_indexes()
new_es = first_es.concat(second_es)
assert new_es == es
first_es['stores'].ww.pop(LTI_COLUMN_NAME)
first_es['stores'].ww.metadata.pop('last_time_index')
second_es['stores'].ww.pop(LTI_COLUMN_NAME)
second_es['stores'].ww.metadata.pop('last_time_index')
assert not first_es.__eq__(es, deep=False)
assert not second_es.__eq__(es, deep=False)
assert LTI_COLUMN_NAME not in first_es['stores']
assert LTI_COLUMN_NAME not in second_es['stores']
new_es = first_es.concat(second_es)
assert new_es.__eq__(es, deep=True)
# stores will get last time index re-added because it has children that will get lti calculated
assert LTI_COLUMN_NAME in new_es['stores']
def test_concat_errors(es):
# entitysets are not equal
copy_es = copy.deepcopy(es)
copy_es['customers'].ww.pop('phone_number')
error = "Entitysets must have the same dataframes, relationships"\
", and column names"
with pytest.raises(ValueError, match=error):
es.concat(copy_es)
def test_concat_sort_index_with_time_index(pd_es):
# only pandas dataframes sort on the index and time index
es1 = copy.deepcopy(pd_es)
es1.replace_dataframe(dataframe_name='customers', df=pd_es['customers'].loc[[0, 1], :], already_sorted=True)
es2 = copy.deepcopy(pd_es)
es2.replace_dataframe(dataframe_name='customers', df=pd_es['customers'].loc[[2], :], already_sorted=True)
combined_es_order_1 = es1.concat(es2)
combined_es_order_2 = es2.concat(es1)
assert list(combined_es_order_1['customers'].index) == [2, 0, 1]
assert list(combined_es_order_2['customers'].index) == [2, 0, 1]
assert combined_es_order_1.__eq__(pd_es, deep=True)
assert combined_es_order_2.__eq__(pd_es, deep=True)
assert combined_es_order_2.__eq__(combined_es_order_1, deep=True)
def test_concat_sort_index_without_time_index(pd_es):
# Sorting is only performed on DataFrames with time indices
es1 = copy.deepcopy(pd_es)
es1.replace_dataframe(dataframe_name='products', df=pd_es['products'].iloc[[0, 1, 2], :], already_sorted=True)
es2 = copy.deepcopy(pd_es)
es2.replace_dataframe(dataframe_name='products', df=pd_es['products'].iloc[[3, 4, 5], :], already_sorted=True)
combined_es_order_1 = es1.concat(es2)
combined_es_order_2 = es2.concat(es1)
# order matters when we don't sort
assert list(combined_es_order_1['products'].index) == ['Haribo sugar-free gummy bears',
'car',
'toothpaste',
'brown bag',
'coke zero',
'taco clock']
assert list(combined_es_order_2['products'].index) == ['brown bag',
'coke zero',
'taco clock',
'Haribo sugar-free gummy bears',
'car',
'toothpaste'
]
assert combined_es_order_1.__eq__(pd_es, deep=True)
assert not combined_es_order_2.__eq__(pd_es, deep=True)
assert combined_es_order_2.__eq__(pd_es, deep=False)
assert not combined_es_order_2.__eq__(combined_es_order_1, deep=True)
def test_concat_with_make_index(es):
df = pd.DataFrame({'id': [0, 1, 2], 'category': ['a', 'b', 'a']})
if es.dataframe_type == Library.DASK.value:
df = dd.from_pandas(df, npartitions=2)
elif es.dataframe_type == Library.SPARK.value:
df = ps.from_pandas(df)
logical_types = {'id': Categorical,
'category': Categorical}
es.add_dataframe(dataframe=df,
dataframe_name='test_df',
index='id1',
make_index=True,
logical_types=logical_types)
es_1 = copy.deepcopy(es)
es_2 = copy.deepcopy(es)
assert es.__eq__(es_1, deep=True)
assert es.__eq__(es_2, deep=True)
# map of what rows to take from es_1 and es_2 for each dataframe
emap = {
'log': [list(range(10)) + [14, 15, 16], list(range(10, 14)) + [15, 16]],
'sessions': [[0, 1, 2], [1, 3, 4, 5]],
'customers': [[0, 2], [1, 2]],
'test_df': [[0, 1], [0, 2]],
}
for i, _es in enumerate([es_1, es_2]):
for df_name, rows in emap.items():
df = _es[df_name]
_es.replace_dataframe(dataframe_name=df_name, df=df.loc[rows[i]])
assert es.__eq__(es_1, deep=False)
assert es.__eq__(es_2, deep=False)
if es.dataframe_type == Library.PANDAS.value:
assert not es.__eq__(es_1, deep=True)
assert not es.__eq__(es_2, deep=True)
old_es_1 = copy.deepcopy(es_1)
old_es_2 = copy.deepcopy(es_2)
es_3 = es_1.concat(es_2)
assert old_es_1.__eq__(es_1, deep=True)
assert old_es_2.__eq__(es_2, deep=True)
assert es_3.__eq__(es, deep=True)
@pytest.fixture
def pd_transactions_df():
return pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"card_id": [1, 2, 1, 3, 4, 5],
"transaction_time": [10, 12, 13, 20, 21, 20],
"fraud": [True, False, False, False, True, True]})
@pytest.fixture
def dd_transactions_df(pd_transactions_df):
return dd.from_pandas(pd_transactions_df, npartitions=3)
@pytest.fixture
def spark_transactions_df(pd_transactions_df):
ps = pytest.importorskip('pyspark.pandas', reason="Spark not installed, skipping")
return ps.from_pandas(pd_transactions_df)
@pytest.fixture(params=['pd_transactions_df', 'dd_transactions_df', 'spark_transactions_df'])
def transactions_df(request):
return request.getfixturevalue(request.param)
def test_set_time_type_on_init(transactions_df):
# create cards dataframe
cards_df = pd.DataFrame({"id": [1, 2, 3, 4, 5]})
if isinstance(transactions_df, dd.DataFrame):
cards_df = dd.from_pandas(cards_df, npartitions=3)
if ps and isinstance(transactions_df, ps.DataFrame):
cards_df = ps.from_pandas(cards_df)
if not isinstance(transactions_df, pd.DataFrame):
cards_logical_types = {'id': Categorical}
transactions_logical_types = {
'id': Integer,
'card_id': Categorical,
'transaction_time': Integer,
'fraud': Boolean
}
else:
cards_logical_types = None
transactions_logical_types = None
dataframes = {
"cards": (cards_df, "id", None, cards_logical_types),
"transactions": (transactions_df, "id", "transaction_time", transactions_logical_types)
}
relationships = [("cards", "id", "transactions", "card_id")]
es = EntitySet("fraud", dataframes, relationships)
# assert time_type is set
assert es.time_type == 'numeric'
def test_sets_time_when_adding_dataframe(transactions_df):
accounts_df = pd.DataFrame({"id": [3, 4, 5],
"signup_date": [datetime(2002, 5, 1),
datetime(2006, 3, 20),
datetime(2011, 11, 11)]})
accounts_df_string = pd.DataFrame({"id": [3, 4, 5],
"signup_date": ["element",
"exporting",
"editable"]})
if isinstance(transactions_df, dd.DataFrame):
accounts_df = dd.from_pandas(accounts_df, npartitions=2)
if ps and isinstance(transactions_df, ps.DataFrame):
accounts_df = ps.from_pandas(accounts_df)
if not isinstance(transactions_df, pd.DataFrame):
accounts_logical_types = {'id': Categorical, 'signup_date': Datetime}
transactions_logical_types = {
'id': Integer,
'card_id': Categorical,
'transaction_time': Integer,
'fraud': Boolean
}
else:
accounts_logical_types = None
transactions_logical_types = None
# create empty entityset
es = EntitySet("fraud")
# assert it's not set
assert getattr(es, "time_type", None) is None
# add dataframe
es.add_dataframe(transactions_df,
dataframe_name="transactions",
index="id",
time_index="transaction_time",
logical_types=transactions_logical_types)
# assert time_type is set
assert es.time_type == 'numeric'
# add another dataframe
es.normalize_dataframe("transactions",
"cards",
"card_id",
make_time_index=True)
# assert time_type unchanged
assert es.time_type == 'numeric'
# add wrong time type dataframe
error_text = "accounts time index is Datetime type which differs from other entityset time indexes"
with pytest.raises(TypeError, match=error_text):
es.add_dataframe(accounts_df,
dataframe_name="accounts",
index="id",
time_index="signup_date",
logical_types=accounts_logical_types)
# add non time type as time index, only valid for pandas
if isinstance(transactions_df, pd.DataFrame):
error_text = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_text):
es.add_dataframe(accounts_df_string,
dataframe_name="accounts",
index="id",
time_index="signup_date")
def test_secondary_time_index_no_primary_time_index(es):
es['products'].ww.set_types(logical_types={'rating': 'Datetime'})
assert es['products'].ww.time_index is None
error = 'Cannot set secondary time index on a DataFrame that has no primary time index.'
with pytest.raises(ValueError, match=error):
es.set_secondary_time_index('products', {'rating': ['url']})
assert 'secondary_time_index' not in es['products'].ww.metadata
assert es['products'].ww.time_index is None
def test_set_non_valid_time_index_type(es):
error_text = 'Time index column must be a Datetime or numeric column.'
with pytest.raises(TypeError, match=error_text):
es['log'].ww.set_time_index('purchased')
def test_checks_time_type_setting_secondary_time_index(es):
# entityset is timestamp time type
assert es.time_type == Datetime
# add secondary index that is timestamp type
new_2nd_ti = {'upgrade_date': ['upgrade_date', 'favorite_quote'],
'cancel_date': ['cancel_date', 'cancel_reason']}
es.set_secondary_time_index("customers", new_2nd_ti)
assert es.time_type == Datetime
# add secondary index that is numeric type
new_2nd_ti = {'age': ['age', 'loves_ice_cream']}
error_text = "customers time index is numeric type which differs from other entityset time indexes"
with pytest.raises(TypeError, match=error_text):
es.set_secondary_time_index("customers", new_2nd_ti)
# add secondary index that is non-time type
new_2nd_ti = {'favorite_quote': ['favorite_quote', 'loves_ice_cream']}
error_text = 'customers time index not recognized as numeric or datetime'
with pytest.raises(TypeError, match=error_text):
es.set_secondary_time_index("customers", new_2nd_ti)
# add mismatched pair of secondary time indexes
new_2nd_ti = {'upgrade_date': ['upgrade_date', 'favorite_quote'],
'age': ['age', 'loves_ice_cream']}
error_text = "customers time index is numeric type which differs from other entityset time indexes"
with pytest.raises(TypeError, match=error_text):
es.set_secondary_time_index("customers", new_2nd_ti)
# create entityset with numeric time type
cards_df = pd.DataFrame({"id": [1, 2, 3, 4, 5]})
transactions_df = pd.DataFrame({
"id": [1, 2, 3, 4, 5, 6],
"card_id": [1, 2, 1, 3, 4, 5],
"transaction_time": [10, 12, 13, 20, 21, 20],
"fraud_decision_time": [11, 14, 15, 21, 22, 21],
"transaction_city": ["City A"] * 6,
"transaction_date": [datetime(1989, 2, i) for i in range(1, 7)],
"fraud": [True, False, False, False, True, True]
})
dataframes = {
"cards": (cards_df, "id"),
"transactions": (transactions_df, "id", "transaction_time")
}
relationships = [("cards", "id", "transactions", "card_id")]
card_es = EntitySet("fraud", dataframes, relationships)
assert card_es.time_type == 'numeric'
# add secondary index that is numeric time type
new_2nd_ti = {'fraud_decision_time': ['fraud_decision_time', 'fraud']}
card_es.set_secondary_time_index("transactions", new_2nd_ti)
assert card_es.time_type == 'numeric'
# add secondary index that is timestamp type
new_2nd_ti = {'transaction_date': ['transaction_date', 'fraud']}
error_text = "transactions time index is Datetime type which differs from other entityset time indexes"
with pytest.raises(TypeError, match=error_text):
card_es.set_secondary_time_index("transactions", new_2nd_ti)
# add secondary index that is non-time type
new_2nd_ti = {'transaction_city': ['transaction_city', 'fraud']}
error_text = 'transactions time index not recognized as numeric or datetime'
with pytest.raises(TypeError, match=error_text):
card_es.set_secondary_time_index("transactions", new_2nd_ti)
# add mixed secondary time indexes
new_2nd_ti = {'transaction_city': ['transaction_city', 'fraud'],
'fraud_decision_time': ['fraud_decision_time', 'fraud']}
with pytest.raises(TypeError, match=error_text):
card_es.set_secondary_time_index("transactions", new_2nd_ti)
# add bool secondary time index
error_text = 'transactions time index not recognized as numeric or datetime'
with pytest.raises(TypeError, match=error_text):
card_es.set_secondary_time_index("transactions", {'fraud': ['fraud']})
def test_normalize_dataframe(es):
error_text = "'additional_columns' must be a list, but received type.*"
with pytest.raises(TypeError, match=error_text):
es.normalize_dataframe('sessions', 'device_types', 'device_type',
additional_columns='log')
error_text = "'copy_columns' must be a list, but received type.*"
with pytest.raises(TypeError, match=error_text):
es.normalize_dataframe('sessions', 'device_types', 'device_type',
copy_columns='log')
es.normalize_dataframe('sessions', 'device_types', 'device_type',
additional_columns=['device_name'],
make_time_index=False)
assert len(es.get_forward_relationships('sessions')) == 2
assert es.get_forward_relationships(
'sessions')[1].parent_dataframe.ww.name == 'device_types'
assert 'device_name' in es['device_types'].columns
assert 'device_name' not in es['sessions'].columns
assert 'device_type' in es['device_types'].columns
def test_normalize_dataframe_add_index_as_column(es):
error_text = "Not adding device_type as both index and column in additional_columns"
with pytest.raises(ValueError, match=error_text):
es.normalize_dataframe('sessions', 'device_types', 'device_type',
additional_columns=['device_name', 'device_type'],
make_time_index=False)
error_text = "Not adding device_type as both index and column in copy_columns"
with pytest.raises(ValueError, match=error_text):
es.normalize_dataframe('sessions', 'device_types', 'device_type',
copy_columns=['device_name', 'device_type'],
make_time_index=False)
def test_normalize_dataframe_new_time_index_in_base_dataframe_error_check(es):
error_text = "'make_time_index' must be a column in the base dataframe"
with pytest.raises(ValueError, match=error_text):
es.normalize_dataframe(base_dataframe_name='customers',
new_dataframe_name='cancellations',
index='cancel_reason',
make_time_index="non-existent")
def test_normalize_dataframe_new_time_index_in_column_list_error_check(es):
error_text = "'make_time_index' must be specified in 'additional_columns' or 'copy_columns'"
with pytest.raises(ValueError, match=error_text):
es.normalize_dataframe(base_dataframe_name='customers',
new_dataframe_name='cancellations',
index='cancel_reason',
make_time_index='cancel_date')
def test_normalize_dataframe_new_time_index_copy_success_check(es):
es.normalize_dataframe(base_dataframe_name='customers',
new_dataframe_name='cancellations',
index='cancel_reason',
make_time_index='cancel_date',
additional_columns=[],
copy_columns=['cancel_date'])
def test_normalize_dataframe_new_time_index_additional_success_check(es):
es.normalize_dataframe(base_dataframe_name='customers',
new_dataframe_name='cancellations',
index='cancel_reason',
make_time_index='cancel_date',
additional_columns=['cancel_date'],
copy_columns=[])
@pytest.fixture
def pd_normalize_es():
df = pd.DataFrame({
"id": [0, 1, 2, 3],
"A": [5, 4, 2, 3],
'time': [datetime(2020, 6, 3), (datetime(2020, 3, 12)), datetime(2020, 5, 1), datetime(2020, 4, 22)]
})
es = ft.EntitySet("es")
return es.add_dataframe(
dataframe_name="data",
dataframe=df,
index="id")
@pytest.fixture
def dd_normalize_es(pd_normalize_es):
es = ft.EntitySet(id=pd_normalize_es.id)
dd_df = dd.from_pandas(pd_normalize_es['data'], npartitions=2)
dd_df.ww.init(schema=pd_normalize_es['data'].ww.schema)
es.add_dataframe(dataframe=dd_df)
return es
@pytest.fixture
def spark_normalize_es(pd_normalize_es):
ps = pytest.importorskip('pyspark.pandas', reason="Spark not installed, skipping")
es = ft.EntitySet(id=pd_normalize_es.id)
spark_df = ps.from_pandas(pd_normalize_es['data'])
spark_df.ww.init(schema=pd_normalize_es['data'].ww.schema)
es.add_dataframe(dataframe=spark_df)
return es
@pytest.fixture(params=['pd_normalize_es', 'dd_normalize_es', 'spark_normalize_es'])
def normalize_es(request):
return request.getfixturevalue(request.param)
def test_normalize_time_index_from_none(normalize_es):
assert normalize_es['data'].ww.time_index is None
normalize_es.normalize_dataframe(base_dataframe_name='data',
new_dataframe_name='normalized',
index='A',
make_time_index='time',
copy_columns=['time'])
assert normalize_es['normalized'].ww.time_index == 'time'
df = normalize_es['normalized']
# only pandas sorts by time index
if isinstance(df, pd.DataFrame):
assert df['time'].is_monotonic_increasing
def test_raise_error_if_dupicate_additional_columns_passed(es):
error_text = "'additional_columns' contains duplicate columns. All columns must be unique."
with pytest.raises(ValueError, match=error_text):
es.normalize_dataframe('sessions', 'device_types', 'device_type',
additional_columns=['device_name', 'device_name'])
def test_raise_error_if_dupicate_copy_columns_passed(es):
error_text = "'copy_columns' contains duplicate columns. All columns must be unique."
with pytest.raises(ValueError, match=error_text):
es.normalize_dataframe('sessions', 'device_types', 'device_type',
copy_columns=['device_name', 'device_name'])
def test_normalize_dataframe_copies_logical_types(es):
es['log'].ww.set_types(logical_types={'value': Ordinal(order=[0.0, 1.0, 2.0, 3.0, 5.0, 7.0, 10.0, 14.0, 15.0, 20.0])})
assert isinstance(es['log'].ww.logical_types['value'], Ordinal)
assert len(es['log'].ww.logical_types['value'].order) == 10
assert isinstance(es['log'].ww.logical_types['priority_level'], Ordinal)
assert len(es['log'].ww.logical_types['priority_level'].order) == 3
es.normalize_dataframe('log', 'values_2', 'value_2',
additional_columns=['priority_level'],
copy_columns=['value'],
make_time_index=False)
assert len(es.get_forward_relationships('log')) == 3
assert es.get_forward_relationships(
'log')[2].parent_dataframe.ww.name == 'values_2'
assert 'priority_level' in es['values_2'].columns
assert 'value' in es['values_2'].columns
assert 'priority_level' not in es['log'].columns
assert 'value' in es['log'].columns
assert 'value_2' in es['values_2'].columns
assert isinstance(es['values_2'].ww.logical_types['priority_level'], Ordinal)
assert len(es['values_2'].ww.logical_types['priority_level'].order) == 3
assert isinstance(es['values_2'].ww.logical_types['value'], Ordinal)
assert len(es['values_2'].ww.logical_types['value'].order) == 10
# sorting not supported in Dask, Spark
def test_make_time_index_keeps_original_sorting():
trips = {
'trip_id': [999 - i for i in range(1000)],
'flight_time': [datetime(1997, 4, 1) for i in range(1000)],
'flight_id': [1 for i in range(350)] + [2 for i in range(650)]
}
order = [i for i in range(1000)]
df = pd.DataFrame.from_dict(trips)
es = EntitySet('flights')
es.add_dataframe(dataframe=df,
dataframe_name="trips",
index="trip_id",
time_index='flight_time')
assert (es['trips']['trip_id'] == order).all()
es.normalize_dataframe(base_dataframe_name="trips",
new_dataframe_name="flights",
index="flight_id",
make_time_index=True)
assert (es['trips']['trip_id'] == order).all()
def test_normalize_dataframe_new_time_index(es):
new_time_index = 'value_time'
es.normalize_dataframe('log', 'values', 'value',
make_time_index=True,
new_dataframe_time_index=new_time_index)
assert es['values'].ww.time_index == new_time_index
assert new_time_index in es['values'].columns
assert len(es['values'].columns) == 2
df = to_pandas(es['values'], sort_index=True)
assert df[new_time_index].is_monotonic_increasing
def test_normalize_dataframe_same_index(es):
transactions_df = pd.DataFrame({"id": [1, 2, 3],
"transaction_time": pd.date_range(start="10:00", periods=3, freq="10s"),
"first_df_time": [1, 2, 3]})
es = ft.EntitySet("example")
es.add_dataframe(dataframe_name="df",
index="id",
time_index="transaction_time",
dataframe=transactions_df)
error_text = "'index' must be different from the index column of the base dataframe"
with pytest.raises(ValueError, match=error_text):
es.normalize_dataframe(base_dataframe_name="df",
new_dataframe_name="new_dataframe",
index="id",
make_time_index=True)
def test_secondary_time_index(es):
es.normalize_dataframe('log', 'values', 'value',
make_time_index=True,
make_secondary_time_index={
'datetime': ['comments']},
new_dataframe_time_index="value_time",
new_dataframe_secondary_time_index='second_ti')
assert isinstance(es['values'].ww.logical_types['second_ti'], Datetime)
assert (es['values'].ww.semantic_tags['second_ti'] == set())
assert (es['values'].ww.metadata['secondary_time_index'] == {
'second_ti': ['comments', 'second_ti']})
def test_sizeof(es):
es.add_last_time_indexes()
total_size = 0
for df in es.dataframes:
total_size += df.__sizeof__()
assert es.__sizeof__() == total_size
def test_construct_without_id():
assert ft.EntitySet().id is None
def test_repr_without_id():
match = 'Entityset: None\n DataFrames:\n Relationships:\n No relationships'
assert repr(ft.EntitySet()) == match
def test_getitem_without_id():
error_text = 'DataFrame test does not exist in entity set'
with pytest.raises(KeyError, match=error_text):
ft.EntitySet()['test']
def test_metadata_without_id():
es = ft.EntitySet()
assert es.metadata.id is None
@pytest.fixture
def pd_datetime3():
return pd.DataFrame({'id': [0, 1, 2],
'ints': ['1', '2', '1']})
@pytest.fixture
def dd_datetime3(pd_datetime3):
return dd.from_pandas(pd_datetime3, npartitions=2)
@pytest.fixture
def spark_datetime3(pd_datetime3):
ps = pytest.importorskip('pyspark.pandas', reason="Spark not installed, skipping")
return ps.from_pandas(pd_datetime3)
@pytest.fixture(params=['pd_datetime3', 'dd_datetime3', 'spark_datetime3'])
def datetime3(request):
return request.getfixturevalue(request.param)
def test_datetime64_conversion(datetime3):
df = datetime3
df["time"] = pd.Timestamp.now()
if ps and isinstance(df, ps.DataFrame):
df['time'] = df['time'].astype(np.datetime64)
else:
df["time"] = df["time"].dt.tz_localize("UTC")
if not isinstance(df, pd.DataFrame):
logical_types = {
'id': Integer,
'ints': Integer,
'time': Datetime
}
else:
logical_types = None
es = EntitySet(id='test')
es.add_dataframe(dataframe_name='test_dataframe',
index='id',
dataframe=df,
logical_types=logical_types)
es['test_dataframe'].ww.set_time_index('time')
assert es['test_dataframe'].ww.time_index == 'time'
@pytest.fixture
def pd_index_df():
return pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"transaction_time": pd.date_range(start="10:00", periods=6, freq="10s"),
"first_dataframe_time": [1, 2, 3, 5, 6, 6]})
@pytest.fixture
def dd_index_df(pd_index_df):
return dd.from_pandas(pd_index_df, npartitions=3)
@pytest.fixture
def spark_index_df(pd_index_df):
ps = pytest.importorskip('pyspark.pandas', reason="Spark not installed, skipping")
return ps.from_pandas(pd_index_df)
@pytest.fixture(params=['pd_index_df', 'dd_index_df', 'spark_index_df'])
def index_df(request):
return request.getfixturevalue(request.param)
def test_same_index_values(index_df):
if not isinstance(index_df, pd.DataFrame):
logical_types = {
'id': Integer,
'transaction_time': Datetime,
'first_dataframe_time': Integer
}
else:
logical_types = None
es = ft.EntitySet("example")
error_text = '"id" is already set as the index. An index cannot also be the time index.'
with pytest.raises(ValueError, match=error_text):
es.add_dataframe(dataframe_name="dataframe",
index="id",
time_index="id",
dataframe=index_df,
logical_types=logical_types)
es.add_dataframe(dataframe_name="dataframe",
index="id",
time_index="transaction_time",
dataframe=index_df,
logical_types=logical_types)
error_text = "time_index and index cannot be the same value, first_dataframe_time"
with pytest.raises(ValueError, match=error_text):
es.normalize_dataframe(base_dataframe_name="dataframe",
new_dataframe_name="new_dataframe",
index="first_dataframe_time",
make_time_index=True)
def test_use_time_index(index_df):
if not isinstance(index_df, pd.DataFrame):
bad_ltypes = {
'id': Integer,
'transaction_time': Datetime,
'first_dataframe_time': Integer
}
bad_semantic_tags = {'transaction_time': 'time_index'}
logical_types = {
'id': Integer,
'transaction_time': Datetime,
'first_dataframe_time': Integer
}
else:
bad_ltypes = {"transaction_time": Datetime}
bad_semantic_tags = {'transaction_time': 'time_index'}
logical_types = None
es = ft.EntitySet()
error_text = re.escape("Cannot add 'time_index' tag directly for column transaction_time. To set a column as the time index, use DataFrame.ww.set_time_index() instead.")
with pytest.raises(ValueError, match=error_text):
es.add_dataframe(dataframe_name="dataframe",
index="id",
logical_types=bad_ltypes,
semantic_tags=bad_semantic_tags,
dataframe=index_df)
es.add_dataframe(dataframe_name="dataframe",
index="id",
time_index="transaction_time",
logical_types=logical_types,
dataframe=index_df)
def test_normalize_with_datetime_time_index(es):
es.normalize_dataframe(base_dataframe_name="customers",
new_dataframe_name="cancel_reason",
index="cancel_reason",
make_time_index=False,
copy_columns=['signup_date', 'upgrade_date'])
assert isinstance(es['cancel_reason'].ww.logical_types['signup_date'], Datetime)
assert isinstance(es['cancel_reason'].ww.logical_types['upgrade_date'], Datetime)
def test_normalize_with_numeric_time_index(int_es):
int_es.normalize_dataframe(base_dataframe_name="customers",
new_dataframe_name="cancel_reason",
index="cancel_reason",
make_time_index=False,
copy_columns=['signup_date', 'upgrade_date'])
assert int_es['cancel_reason'].ww.semantic_tags['signup_date'] == {'numeric'}
def test_normalize_with_invalid_time_index(es):
error_text = 'Time index column must contain datetime or numeric values'
with pytest.raises(TypeError, match=error_text):
es.normalize_dataframe(base_dataframe_name="customers",
new_dataframe_name="cancel_reason",
index="cancel_reason",
copy_columns=['upgrade_date', 'favorite_quote'],
make_time_index='favorite_quote')
def test_entityset_init():
cards_df = | pd.DataFrame({"id": [1, 2, 3, 4, 5]}) | pandas.DataFrame |
# Perform tolerance sweep tolerance by ESM-SSP and well-characterized errors
# for an audience seeking to emulate from the full CMIP6 archive and for the
# scenarioMIP approach, for all ESMs
# For each ESM, loop over various tolerances and generate Ndraws = 500
# GSAT trajectories. Archives with and without each target to characterize
# error. (Reproducible mode off so draws are different). And the ScenMIP.
# Compare to the target ensemble via 4 metrics (E1, E2 on both Tgavs and
# jumps) and record errors for each draw and tolerance.
## TODO: comment out saving the GSATs
## TODO functionalize at least some part of the analysis or at least use a for-loop
## over the different SSP targets so that the code isn't so long and repetitive
# making a table of avail runs X planned archives and for looping over that would
# trim things down (see approach for max tol runs). And rewrite the tolerance
# iteration to be a while loop, comparing current to prev instead of calculating
# and saving it all? Update writes and reads to be subdir so things are tidier
# would be better to functionalize this script with ESM, tol and Ndraws as arguments
# and then have the .sh just call the function and dispatch to diff nodes for each run I guess.
# #############################################################################
# General setup
# #############################################################################
# Import packages
import pandas as pd
import numpy as np
import stitches as stitches
import pkg_resources
import os
from pathlib import Path
pd.set_option('display.max_columns', None)
OUTPUT_DIR = pkg_resources.resource_filename('stitches', 'data/created_data')
# OUTPUT_DIR = '/pic/projects/GCAM/stitches_pic/paper1_outputs'
# #############################################################################
# Experiment setup
# #############################################################################
# experiment parameters
tolerances = np.round(np.arange(0.05, 0.225, 0.005), 3)
Ndraws =20
error_threshold = 0.1
# pangeo table of ESMs for reference
pangeo_path = pkg_resources.resource_filename('stitches', 'data/pangeo_table.csv')
pangeo_data = pd.read_csv(pangeo_path)
pangeo_data = pangeo_data[((pangeo_data['variable'] == 'tas') | (pangeo_data['variable'] == 'pr') | (pangeo_data['variable'] == 'psl'))
& ((pangeo_data['domain'] == 'Amon') ) ].copy()
# Keep only the runs that have data for all vars X all timesteps:
pangeo_good_ensembles =[]
for name, group in pangeo_data.groupby(['model', 'experiment', 'ensemble']):
df = group.drop_duplicates().copy()
if len(df) == 3:
pangeo_good_ensembles.append(df)
del(df)
pangeo_good_ensembles = pd.concat(pangeo_good_ensembles)
pangeo_good_ensembles = pangeo_good_ensembles[['model', 'experiment', 'ensemble']].drop_duplicates().copy()
pangeo_good_ensembles = pangeo_good_ensembles.reset_index(drop=True).copy()
# won't use idealized runs
pangeo_good_ensembles = pangeo_good_ensembles[~((pangeo_good_ensembles['experiment'] == '1pctCO2') |
(pangeo_good_ensembles['experiment'] == 'abrupt-4xCO2')|
(pangeo_good_ensembles['experiment'] == 'ssp534-over')) ].reset_index(drop=True).copy()
# #############################################################################
# Load full archive and target data
# #############################################################################
# Load the full archive of all staggered windows, which we will be matching on
full_archive_path = pkg_resources.resource_filename('stitches', 'data/matching_archive.csv')
full_archive_data = pd.read_csv(full_archive_path)
# Keep only the entries that appeared in pangeo_good_ensembles:
keys =['model', 'experiment', 'ensemble']
i1 = full_archive_data.set_index(keys).index
i2 = pangeo_good_ensembles.set_index(keys).index
full_archive_data= full_archive_data[i1.isin(i2)].copy()
del(i1)
del(i2)
# get list of ESMs that are both pangeo good ensembles and in archive
df1 = full_archive_data[['model', 'experiment', 'ensemble']].drop_duplicates()
d = pd.merge(df1, pangeo_good_ensembles.drop_duplicates(), how = 'inner')
esms = d.model.unique().copy()
del(df1)
del(d)
# Load the original archive without staggered windows, which we will draw
# the target trajectories from for matching
full_target_path = pkg_resources.resource_filename('stitches', 'data/matching_archive.csv')
full_target_data = | pd.read_csv(full_target_path) | pandas.read_csv |
from datetime import datetime
import numpy as np
import pandas as pd
import scorecardpy as sc
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from definitions import SEED
from definitions import TARGET_NAME, REPORT_DIR
from definitions import LOGGER, USE_PRECALC
from src.data.download import download_data
from src.data.dataframes import get_train, get_test
from src.features.feature_extraction import prepare_dataset, woe_transform
from src.features.feature_selection import best_logreg_features
from src.models.parameter_selection import get_best_logreg_params
from src.models.parameter_selection import validate_logreg
from src.models.threshold_tuning import get_optimal_threshold
if __name__ == '__main__':
start_time = datetime.now()
# download data
LOGGER.info("Download data...")
download_data()
# Getting original dataset
LOGGER.info("Getting original dataset...")
train_df = get_train()
test_df = get_test()
# Feature extraction
# Build new features and split dataset for 2 parts: companies with and without financial report
LOGGER.info(
"Prepare data. Build new features and split dataset for 2 parts: companies with and without financial report")
train_df_fr, train_df_nofr, test = prepare_dataset(train_df, test_df)
train_nofr, val_nofr = train_test_split(train_df_nofr, test_size=0.2, random_state=SEED)
train_fr, val_fr = train_test_split(train_df_fr, test_size=0.2, random_state=SEED)
# WOE-transformation
LOGGER.info("Calculate WOE-transformation...")
train_nofr_woe, val_nofr_woe, bins_nofr = woe_transform(train_nofr, val_nofr)
train_fr_woe, val_fr_woe, bins_fr = woe_transform(train_fr, val_fr)
# Feature selection
LOGGER.info("Calculate best features...")
best_features_nofr = best_logreg_features(train_nofr_woe.drop(TARGET_NAME, axis=1),
train_nofr_woe[TARGET_NAME],
use_precalc=USE_PRECALC,
financial_report=False)
LOGGER.info(f"Best features nofr is: {best_features_nofr}")
X_train_nofr = train_nofr_woe.drop(TARGET_NAME, axis=1)[best_features_nofr]
y_train_nofr = train_nofr_woe[TARGET_NAME]
X_val_nofr = val_nofr_woe.drop(TARGET_NAME, axis=1)[best_features_nofr]
y_val_nofr = val_nofr_woe[TARGET_NAME]
best_features_fr = best_logreg_features(train_fr_woe.drop(TARGET_NAME, axis=1),
train_fr_woe[TARGET_NAME],
use_precalc=USE_PRECALC,
financial_report=True)
LOGGER.info(f"Best features fr is: {best_features_fr}")
X_train_fr = train_fr_woe.drop(TARGET_NAME, axis=1)[best_features_fr]
y_train_fr = train_fr_woe[TARGET_NAME]
X_val_fr = val_fr_woe.drop(TARGET_NAME, axis=1)[best_features_fr]
y_val_fr = val_fr_woe[TARGET_NAME]
# Logreg hyperparameters tuning
LOGGER.info("Calculate best logreg parameters...")
param_grid = {
'C': np.arange(0.0, 5, 0.05),
'class_weight': [None, 'balanced']
}
best_score_nofr, best_params_nofr = get_best_logreg_params(X_train_nofr,
y_train_nofr,
param_grid=param_grid,
use_precalc=USE_PRECALC,
financial_report=False
)
best_score_fr, best_params_fr = get_best_logreg_params(X_train_fr,
y_train_fr,
param_grid=param_grid,
use_precalc=USE_PRECALC,
financial_report=True
)
# Validation
LOGGER.info("Validate best models")
train_score_nofr, val_score_nofr = validate_logreg(X_train_nofr,
y_train_nofr,
X_val_nofr,
y_val_nofr,
params=best_params_nofr)
LOGGER.info(f"""Logreg for data without financial report:
Train score: {train_score_nofr:.4f}, validation score: {val_score_nofr:.4f}""")
train_score_fr, val_score_fr = validate_logreg(X_train_fr,
y_train_fr,
X_val_fr,
y_val_fr,
params=best_params_fr)
LOGGER.info(f"""Logreg for data with financial report:
Train score: {train_score_fr:.4f}, validation score: {val_score_fr:.4f}""")
# Threshold tuning
LOGGER.info("Calculate optimal thresholds.")
log_reg = LogisticRegression(random_state=SEED, **best_params_nofr)
log_reg.fit(X_train_nofr[best_features_nofr], y_train_nofr)
optimal_threshold_nofr = get_optimal_threshold(val_nofr_woe[best_features_nofr + [TARGET_NAME]],
log_reg,
financial_report=False)
LOGGER.info(f"Threshold for data without financial report: {optimal_threshold_nofr:.3f}")
log_reg = LogisticRegression(random_state=SEED, **best_params_fr)
log_reg.fit(X_train_fr[best_features_fr], y_train_fr)
optimal_threshold_fr = get_optimal_threshold(val_fr_woe[best_features_fr + [TARGET_NAME]],
log_reg,
financial_report=True)
LOGGER.info(f"Threshold for data with financial report: {optimal_threshold_fr:.3f}")
# Train model on all train data
data_nofr = | pd.concat([train_nofr_woe, val_nofr_woe]) | pandas.concat |
import time
import pandas as pd
import numpy as np
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
print(' ')
# get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
print('Enter the city you want to analyze the data from the following cities:')
print('Chicago: 1')
print('New York: 2')
print('Washington: 3')
print(' ')
city = input('Please choose the city for which you would like to see the Statistics: ')
city = city.lower()
while True:
if city == '1' or city == 'chicago':
print("\n You have selected Chicago City! Okay Let's go further\n")
city = 'chicago'
break
elif city == '2' or city == 'new york':
print("\n You have selected New York City! Okay let's go further\n")
city= 'new york city'
break
elif city == '3' or city == 'washington':
print("\n You have selected Washington! Okay let's go further\n")
city= 'washington'
break
else:
print ('Enter the correct city you want to analyze the data')
city = input('Please choose the city for which you would like to see the Statistics: ')
city = city.lower()
#goto check
# TO DO: get user input for month (all, january, february, ... , june)
month = input('\nWhich month?? Please type the full month name.\n')
while month.strip().lower() not in ['january', 'february', 'march', 'april', 'may', 'june','july','august',' september','october','november','december']:
month = input('\nPlease choose between January, February, March, April, May, or June? Please type the full month name.\n')
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
day = input('\nWhich day? Please enter an integer (e.g., 1=sunday) \n')
while day.strip() not in ['1', '2', '3', '4', '5', '6', '7']:
day = input('\nWhich day? Please enter an integer (e.g., 1=sunday) \n')
print('-'*40)
return city, month.strip().lower(), day.lower().strip()
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
print('\nLoading the data... .. .. ..\n')
df = pd.read_csv(CITY_DATA[city])
#extracting from Start Time
df['Start Time'] = pd.to_datetime(df['Start Time'])
df['day_of_week'] = df['Start Time'].dt.weekday_name
df['month'] = df['Start Time'].dt.month
df["day_of_month"] = df["Start Time"].dt.day
print('Data loaded. Now computing statistics... \n')
#Filter by Month
if month == 'month':
months = ['january', 'february', 'march', 'april', 'may', 'june', 'july','august',' september','october','november','december']
month = months.index(month) + 1
df = df[df['month'] == month]
#Filter by day of week
if day == 'day_of_week':
days = ['Monday', 'Tuesday',
'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']
for d in days:
if week_day.capitalize() in d:
day_of_week = d
df = df[df['day_of_week'] == day_of_week]
if day == "day_of_month":
months = ['january', 'february', 'march', 'april', 'may', 'june']
month = md[0]
month = months.index(month) + 1
df = df[df['month']==month]
day = md[1]
df = df[df['day_of_month'] == day]
return df
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# TO DO: display the most common month
print('\n Calculating the statistic.. most popular month ...')
m = df.month.mode()[0]
months = ['january', 'february', 'march', 'april', 'may', 'june','july','august',' september','october','november','december']
popular_month = months[m - 1].capitalize()
print(popular_month)
# TO DO: display the most common day of week
print('\nCalculating the statistic.. most popular day of the week..')
popular_day = df['day_of_week'].value_counts().reset_index()['index'][0]
print(popular_day)
# TO DO: display the most common start hour
print('\n Calculating the statistic.. most popular hour of the day ..')
df['hour'] = df['Start Time'].dt.hour
print(df.hour.mode()[0])
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# TO DO: display most commonly used start station
print("\n Calculating the statistic.. most popular start station..\n")
start_station = df['Start Station'].value_counts().reset_index()['index'][0]
print (start_station)
# TO DO: display most commonly used end station
print("\n Calculating the statistic.. most popular end station..\n")
end_station = df['End Station'].value_counts().reset_index()['index'][0]
print(end_station)
# TO DO: display most frequent combination of start station and end station trip
result = df[['Start Station', 'End Station']].groupby(['Start Station', 'End Station']).size().nlargest(1)
print('\n Calculating the statistic.. most popular trip from start to end is {}'.format(result))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating the statistic.. Trip Duration...\n')
start_time = time.time()
print('\n What was the total traveling done and what was the average time spent on each trip?')
df['End Time'] = | pd.to_datetime(df['End Time']) | pandas.to_datetime |
# license: Creative Commons License
# Title: Big data strategies seminar. Challenge 1. www.iaac.net
# Created by: <NAME>
#
# is licensed under a license Creative Commons Attribution 4.0 International License.
# http://creativecommons.org/licenses/by/4.0/
# This script uses pandas for data management for more information visit; pandas.pydata.org/
# The tasks for joins and merges are here https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html
# The options for scatterplotw with seaborn https://seaborn.pydata.org/generated/seaborn.scatterplot.html
#
import pandas as pd
from pandas import plotting
from shapely.geometry import Point
import matplotlib.pyplot as plt
import seaborn as sns
plotting.register_matplotlib_converters()
######################################################
# Read the different files starting with the last file
irf_2007 = pd.read_csv('../data/opendatabcn/2007_distribucio_territorial_renda_familiar.csv')
irf_2008 = pd.read_csv('../data/opendatabcn/2008_distribucio_territorial_renda_familiar.csv')
irf_2009 = pd.read_csv('../data/opendatabcn/2009_distribucio_territorial_renda_familiar.csv')
irf_2010 = pd.read_csv('../data/opendatabcn/2010_distribucio_territorial_renda_familiar.csv')
irf_2011 = pd.read_csv('../data/opendatabcn/2011_distribucio_territorial_renda_familiar.csv')
irf_2012 = pd.read_csv('../data/opendatabcn/2012_distribucio_territorial_renda_familiar.csv')
irf_2013 = pd.read_csv('../data/opendatabcn/2013_distribucio_territorial_renda_familiar.csv')
irf_2014 = pd.read_csv('../data/opendatabcn/2014_distribucio_territorial_renda_familiar.csv')
irf_2015 = pd.read_csv('../data/opendatabcn/2015_distribucio_territorial_renda_familiar.csv')
irf_2016 = | pd.read_csv('../data/opendatabcn/2016_distribucio_territorial_renda_familiar.csv') | pandas.read_csv |